mirror of
https://github.com/kubenetworks/kubevpn.git
synced 2025-10-05 15:26:57 +08:00
refactor: update go mod library (#210)
refactor: update go mod library and refactor dev logic Co-authored-by: wencaiwulue <895703375@qq.com>
This commit is contained in:
2
.gitignore
vendored
2
.gitignore
vendored
@@ -1,8 +1,6 @@
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.exe~
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, built with `go test -c`
|
||||
|
@@ -693,10 +693,6 @@ kubectl delete -f https://raw.githubusercontent.com/kubenetworks/kubevpn/master/
|
||||
- Linux
|
||||
- Windows
|
||||
|
||||
on Windows platform, you need to
|
||||
install [PowerShell](https://docs.microsoft.com/en-us/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
|
||||
in advance
|
||||
|
||||
## FAQ
|
||||
|
||||
### 1, What should I do if the dependent image cannot be pulled, or the inner environment cannot access docker.io?
|
||||
|
@@ -581,9 +581,6 @@ d0b3dab8912a naison/kubevpn:v2.0.0 "/bin/bash" 5 minutes ago
|
||||
- Linux
|
||||
- Windows
|
||||
|
||||
Windows
|
||||
下需要安装 [PowerShell](https://docs.microsoft.com/zh-cn/powershell/scripting/install/installing-powershell-on-windows?view=powershell-7.2)
|
||||
|
||||
## 问答
|
||||
|
||||
### 1,依赖的镜像拉不下来,或者内网环境无法访问 docker.io 怎么办?
|
||||
|
@@ -86,7 +86,7 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
|
||||
// special empty string, eg: --target-registry ""
|
||||
options.IsChangeTargetRegistry = cmd.Flags().Changed("target-registry")
|
||||
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -101,7 +101,7 @@ func CmdClone(f cmdutil.Factory) *cobra.Command {
|
||||
Workloads: args,
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
UseLocalDNS: options.UseLocalDNS,
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
Engine: string(options.Engine),
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TargetKubeconfig: options.TargetKubeconfig,
|
||||
|
@@ -45,7 +45,7 @@ func cmdConfigAdd(f cmdutil.Factory) *cobra.Command {
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -60,7 +60,7 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
|
||||
return nil
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -74,7 +74,7 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
|
||||
ExtraRoute: extraRoute.ToRPC(),
|
||||
UseLocalDNS: connect.UseLocalDNS,
|
||||
Engine: string(connect.Engine),
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
|
||||
SshJump: sshConf.ToRPC(),
|
||||
TransferImage: transferImage,
|
||||
|
@@ -6,7 +6,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/genericiooptions"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/util/completion"
|
||||
"k8s.io/kubectl/pkg/util/i18n"
|
||||
@@ -59,7 +59,7 @@ var cpExample = templates.Examples(i18n.T(`
|
||||
))
|
||||
|
||||
func CmdCp(f cmdutil.Factory) *cobra.Command {
|
||||
o := cp.NewCopyOptions(genericclioptions.IOStreams{
|
||||
o := cp.NewCopyOptions(genericiooptions.IOStreams{
|
||||
In: os.Stdin,
|
||||
Out: os.Stdout,
|
||||
ErrOut: os.Stderr,
|
||||
|
@@ -4,7 +4,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
dockercomp "github.com/docker/cli/cli/command/completion"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -21,15 +20,15 @@ import (
|
||||
)
|
||||
|
||||
func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
cli, dockerCli, err := util.GetClient()
|
||||
client, cli, err := util.GetClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
log.Fatal(err)
|
||||
}
|
||||
var devOptions = &dev.Options{
|
||||
var options = &dev.Options{
|
||||
Factory: f,
|
||||
NoProxy: false,
|
||||
Cli: cli,
|
||||
DockerCli: dockerCli,
|
||||
Cli: client,
|
||||
DockerCli: cli,
|
||||
ExtraRouteInfo: handler.ExtraRouteInfo{},
|
||||
}
|
||||
var sshConf = &util.SshConfig{}
|
||||
@@ -96,7 +95,7 @@ Startup your kubernetes workloads in local Docker container with same volume、e
|
||||
}
|
||||
util.InitLogger(false)
|
||||
// not support temporally
|
||||
if devOptions.Engine == config.EngineGvisor {
|
||||
if options.Engine == config.EngineGvisor {
|
||||
return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw)
|
||||
}
|
||||
|
||||
@@ -107,55 +106,42 @@ Startup your kubernetes workloads in local Docker container with same volume、e
|
||||
return util.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
devOptions.Workload = args[0]
|
||||
options.Workload = args[0]
|
||||
for i, arg := range args {
|
||||
if arg == "--" && i != len(args)-1 {
|
||||
devOptions.Copts.Args = args[i+1:]
|
||||
options.Copts.Args = args[i+1:]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
err = dev.DoDev(cmd.Context(), devOptions, sshConf, cmd.Flags(), f, transferImage)
|
||||
for _, fun := range devOptions.GetRollbackFuncList() {
|
||||
if fun != nil {
|
||||
if err = fun(); err != nil {
|
||||
log.Errorf("roll back failed, error: %s", err.Error())
|
||||
defer func() {
|
||||
for _, function := range options.GetRollbackFuncList() {
|
||||
if function != nil {
|
||||
if er := function(); er != nil {
|
||||
log.Errorf("roll back failed, error: %s", er.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
err = dev.DoDev(cmd.Context(), options, sshConf, cmd.Flags(), f, transferImage)
|
||||
return err
|
||||
},
|
||||
}
|
||||
cmd.Flags().SortFlags = false
|
||||
cmd.Flags().StringToStringVarP(&devOptions.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
|
||||
cmd.Flags().StringToStringVarP(&options.Headers, "headers", "H", map[string]string{}, "Traffic with special headers with reverse it to local PC, you should startup your service after reverse workloads successfully, If not special, redirect all traffic to local PC, format is k=v, like: k1=v1,k2=v2")
|
||||
cmd.Flags().BoolVar(&config.Debug, "debug", false, "enable debug mode or not, true or false")
|
||||
cmd.Flags().StringVar(&config.Image, "image", config.Image, "use this image to startup container")
|
||||
cmd.Flags().BoolVar(&devOptions.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
|
||||
cmdutil.AddContainerVarFlags(cmd, &devOptions.ContainerName, devOptions.ContainerName)
|
||||
cmd.Flags().BoolVar(&options.NoProxy, "no-proxy", false, "Whether proxy remote workloads traffic into local or not, true: just startup container on local without inject containers to intercept traffic, false: intercept traffic and forward to local")
|
||||
cmdutil.AddContainerVarFlags(cmd, &options.ContainerName, options.ContainerName)
|
||||
cmdutil.CheckErr(cmd.RegisterFlagCompletionFunc("container", completion.ContainerCompletionFunc(f)))
|
||||
cmd.Flags().StringVar((*string)(&devOptions.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
|
||||
cmd.Flags().StringVar((*string)(&options.ConnectMode), "connect-mode", string(dev.ConnectModeHost), "Connect to kubernetes network in container or in host, eg: ["+string(dev.ConnectModeContainer)+"|"+string(dev.ConnectModeHost)+"]")
|
||||
cmd.Flags().BoolVar(&transferImage, "transfer-image", false, "transfer image to remote registry, it will transfer image "+config.OriginImage+" to flags `--image` special image, default: "+config.Image)
|
||||
cmd.Flags().StringVar((*string)(&devOptions.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
|
||||
cmd.Flags().StringVar((*string)(&options.Engine), "engine", string(config.EngineRaw), fmt.Sprintf(`transport engine ("%s"|"%s") %s: use gvisor and raw both (both performance and stable), %s: use raw mode (best stable)`, config.EngineMix, config.EngineRaw, config.EngineMix, config.EngineRaw))
|
||||
|
||||
// diy docker options
|
||||
cmd.Flags().StringVar(&devOptions.DockerImage, "docker-image", "", "Overwrite the default K8s pod of the image")
|
||||
cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image")
|
||||
// origin docker options
|
||||
flags := cmd.Flags()
|
||||
flags.SetInterspersed(false)
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
flags.BoolVarP(&devOptions.Options.Detach, "detach", "d", false, "Run container in background and print container ID")
|
||||
flags.StringVar(&devOptions.Options.Name, "name", "", "Assign a name to the container")
|
||||
flags.StringVar(&devOptions.Options.Pull, "pull", dev.PullImageMissing, `Pull image before running ("`+dev.PullImageAlways+`"|"`+dev.PullImageMissing+`"|"`+dev.PullImageNever+`")`)
|
||||
flags.BoolVarP(&devOptions.Options.Quiet, "quiet", "q", false, "Suppress the pull output")
|
||||
|
||||
// Add an explicit help that doesn't have a `-h` to prevent the conflict
|
||||
// with hostname
|
||||
flags.Bool("help", false, "Print usage")
|
||||
|
||||
command.AddPlatformFlag(flags, &devOptions.Options.Platform)
|
||||
command.AddTrustVerificationFlags(flags, &devOptions.Options.Untrusted, dockerCli.ContentTrustEnabled())
|
||||
devOptions.Copts = dev.AddFlags(flags)
|
||||
dev.AddDockerFlags(options, cmd.Flags(), cli)
|
||||
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"env",
|
||||
@@ -171,10 +157,10 @@ Startup your kubernetes workloads in local Docker container with same volume、e
|
||||
)
|
||||
_ = cmd.RegisterFlagCompletionFunc(
|
||||
"network",
|
||||
dockercomp.NetworkNames(nil),
|
||||
dockercomp.NetworkNames(cli),
|
||||
)
|
||||
|
||||
addExtraRoute(cmd, &devOptions.ExtraRouteInfo)
|
||||
addExtraRoute(cmd, &options.ExtraRouteInfo)
|
||||
addSshFlags(cmd, sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
@@ -97,7 +97,7 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
return cmdutil.UsageErrorf(cmd, usageString)
|
||||
}
|
||||
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -122,7 +122,7 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
Level: int32(logLevel),
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
|
@@ -50,7 +50,7 @@ func CmdReset(f cmdutil.Factory) *cobra.Command {
|
||||
return daemon.StartupDaemon(cmd.Context())
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
bytes, ns, err := util.ConvertToKubeconfigBytes(f)
|
||||
bytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
145
go.mod
145
go.mod
@@ -1,21 +1,19 @@
|
||||
module github.com/wencaiwulue/kubevpn/v2
|
||||
|
||||
go 1.22.0
|
||||
|
||||
toolchain go1.22.1
|
||||
go 1.22.1
|
||||
|
||||
require (
|
||||
github.com/cilium/ipam v0.0.0-20220824141044-46ef3d556735
|
||||
github.com/containerd/containerd v1.7.13
|
||||
github.com/cilium/ipam v0.0.0-20230509084518-fd66eae7909b
|
||||
github.com/containerd/containerd v1.7.14
|
||||
github.com/containernetworking/cni v1.1.2
|
||||
github.com/coredns/caddy v1.1.1
|
||||
github.com/coredns/coredns v1.11.1
|
||||
github.com/docker/cli v23.0.3+incompatible
|
||||
github.com/docker/distribution v2.8.3+incompatible
|
||||
github.com/docker/docker v23.0.3+incompatible
|
||||
github.com/docker/go-connections v0.4.0
|
||||
github.com/coredns/coredns v1.11.2
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/cli v26.0.0+incompatible
|
||||
github.com/docker/docker v26.0.0+incompatible
|
||||
github.com/docker/go-connections v0.5.0
|
||||
github.com/docker/libcontainer v2.2.1+incompatible
|
||||
github.com/envoyproxy/go-control-plane v0.11.1
|
||||
github.com/envoyproxy/go-control-plane v0.12.0
|
||||
github.com/fsnotify/fsnotify v1.7.0
|
||||
github.com/google/gopacket v1.1.19
|
||||
github.com/google/uuid v1.6.0
|
||||
@@ -24,17 +22,18 @@ require (
|
||||
github.com/jcmturner/gokrb5/v8 v8.4.4
|
||||
github.com/kevinburke/ssh_config v1.2.0
|
||||
github.com/libp2p/go-netroute v0.2.1
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24
|
||||
github.com/miekg/dns v1.1.57
|
||||
github.com/moby/term v0.0.0-20221205130635-1aeaba878587
|
||||
github.com/opencontainers/image-spec v1.1.0-rc6
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20240118010651-0ba75a80ca38
|
||||
github.com/miekg/dns v1.1.58
|
||||
github.com/moby/sys/signal v0.7.0
|
||||
github.com/moby/term v0.5.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus-community/pro-bing v0.1.0
|
||||
github.com/schollz/progressbar/v3 v3.13.0
|
||||
github.com/prometheus-community/pro-bing v0.4.0
|
||||
github.com/schollz/progressbar/v3 v3.14.2
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/spf13/cobra v1.8.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.uber.org/automaxprocs v1.5.1
|
||||
go.uber.org/automaxprocs v1.5.3
|
||||
golang.org/x/crypto v0.21.0
|
||||
golang.org/x/exp v0.0.0-20240325151524-a685a6edb6d8
|
||||
golang.org/x/net v0.22.0
|
||||
@@ -43,29 +42,32 @@ require (
|
||||
golang.org/x/sys v0.18.0
|
||||
golang.org/x/text v0.14.0
|
||||
golang.org/x/time v0.5.0
|
||||
golang.zx2c4.com/wintun v0.0.0-20211104114900-415007cec224
|
||||
golang.zx2c4.com/wireguard v0.0.0-20220920152132-bb719d3a6e2c
|
||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2
|
||||
golang.zx2c4.com/wireguard v0.0.0-20231211153847-12269c276173
|
||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||
google.golang.org/grpc v1.61.0
|
||||
google.golang.org/grpc v1.62.1
|
||||
google.golang.org/protobuf v1.33.0
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.2.1
|
||||
gvisor.dev/gvisor v0.0.0-20240331093445-9d995324d058
|
||||
gvisor.dev/gvisor v0.0.0-20240403010941-979aae3d2c21
|
||||
k8s.io/api v0.31.0-alpha.0
|
||||
k8s.io/apimachinery v0.31.0-alpha.0
|
||||
k8s.io/cli-runtime v0.29.0
|
||||
k8s.io/cli-runtime v0.29.3
|
||||
k8s.io/client-go v0.31.0-alpha.0
|
||||
k8s.io/kubectl v0.29.0
|
||||
k8s.io/kubectl v0.29.3
|
||||
k8s.io/utils v0.0.0-20240310230437-4693a0247e57
|
||||
sigs.k8s.io/controller-runtime v0.17.1-0.20240327193027-21368602d84b
|
||||
sigs.k8s.io/kustomize/api v0.13.5-0.20230601165947-6ce0bf390ce3
|
||||
sigs.k8s.io/kustomize/api v0.16.0
|
||||
sigs.k8s.io/yaml v1.4.0
|
||||
tags.cncf.io/container-device-interface v0.7.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.23.4 // indirect
|
||||
cel.dev/expr v0.15.0 // indirect
|
||||
cloud.google.com/go/compute v1.25.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
dario.cat/mergo v1.0.0 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.29 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.23 // indirect
|
||||
@@ -75,42 +77,41 @@ require (
|
||||
github.com/Azure/go-autorest/autorest/to v0.4.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/DataDog/appsec-internal-go v1.4.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.50.1 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.50.1 // indirect
|
||||
github.com/DataDog/datadog-go/v5 v5.4.0 // indirect
|
||||
github.com/DataDog/go-libddwaf/v2 v2.2.3 // indirect
|
||||
github.com/DataDog/go-sqllexer v0.0.10 // indirect
|
||||
github.com/DataDog/go-tuf v1.0.2-0.5.2 // indirect
|
||||
github.com/DataDog/appsec-internal-go v1.5.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/obfuscate v0.52.0 // indirect
|
||||
github.com/DataDog/datadog-agent/pkg/remoteconfig/state v0.52.0 // indirect
|
||||
github.com/DataDog/datadog-go/v5 v5.5.0 // indirect
|
||||
github.com/DataDog/go-libddwaf/v2 v2.4.2 // indirect
|
||||
github.com/DataDog/go-sqllexer v0.0.11 // indirect
|
||||
github.com/DataDog/go-tuf v1.1.0-0.5.2 // indirect
|
||||
github.com/DataDog/sketches-go v1.4.4 // indirect
|
||||
github.com/MakeNowJust/heredoc v1.0.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/Microsoft/hcsshim v0.11.4 // indirect
|
||||
github.com/Microsoft/hcsshim v0.12.2 // indirect
|
||||
github.com/antonmedv/expr v1.15.5 // indirect
|
||||
github.com/apparentlymart/go-cidr v1.1.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.49.13 // indirect
|
||||
github.com/aws/aws-sdk-go v1.51.12 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chai2010/gettext-go v1.0.2 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20231109132714-523115ebc101 // indirect
|
||||
github.com/containerd/continuity v0.4.3 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20240329184929-0c46c01016dc // indirect
|
||||
github.com/containerd/log v0.1.0 // indirect
|
||||
github.com/coreos/go-semver v0.3.1 // indirect
|
||||
github.com/coreos/go-systemd/v22 v22.5.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
|
||||
github.com/dimchansky/utfbom v1.1.1 // indirect
|
||||
github.com/distribution/reference v0.5.0 // indirect
|
||||
github.com/dnstap/golang-dnstap v0.4.0 // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/docker/distribution v2.8.3+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.8.1 // indirect
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/docker/go-units v0.5.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/ebitengine/purego v0.5.1 // indirect
|
||||
github.com/ebitengine/purego v0.7.0 // indirect
|
||||
github.com/emicklei/go-restful/v3 v3.12.0 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.2 // indirect
|
||||
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v1.0.4 // indirect
|
||||
github.com/evanphx/json-patch v5.9.0+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.9.0 // indirect
|
||||
github.com/exponent-io/jsonpath v0.0.0-20210407135951-1de76d718b3f // indirect
|
||||
github.com/farsightsec/golang-framestream v0.3.0 // indirect
|
||||
@@ -118,7 +119,7 @@ require (
|
||||
github.com/felixge/httpsnoop v1.0.4 // indirect
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
||||
github.com/fvbommel/sortorder v1.1.0 // indirect
|
||||
github.com/go-errors/errors v1.4.2 // indirect
|
||||
github.com/go-errors/errors v1.5.1 // indirect
|
||||
github.com/go-logr/logr v1.4.1 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.21.0 // indirect
|
||||
@@ -133,13 +134,13 @@ require (
|
||||
github.com/google/gnostic-models v0.6.8 // indirect
|
||||
github.com/google/go-cmp v0.6.0 // indirect
|
||||
github.com/google/gofuzz v1.2.0 // indirect
|
||||
github.com/google/pprof v0.0.0-20231229205709-960ae82b1e42 // indirect
|
||||
github.com/google/pprof v0.0.0-20240327155427-868f304927ed // indirect
|
||||
github.com/google/s2a-go v0.1.7 // indirect
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.0 // indirect
|
||||
github.com/gorilla/mux v1.8.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.12.3 // indirect
|
||||
github.com/gorilla/mux v1.8.1 // indirect
|
||||
github.com/gorilla/websocket v1.5.1 // indirect
|
||||
github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
@@ -156,7 +157,7 @@ require (
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/compress v1.17.6 // indirect
|
||||
github.com/klauspost/compress v1.17.7 // indirect
|
||||
github.com/liggitt/tabwriter v0.0.0-20181228230101-89fcab3d43de // indirect
|
||||
github.com/mailru/easyjson v0.7.7 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.15 // indirect
|
||||
@@ -166,12 +167,12 @@ require (
|
||||
github.com/mitchellh/go-homedir v1.1.0 // indirect
|
||||
github.com/mitchellh/go-wordwrap v1.0.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/moby/buildkit v0.9.0-rc1 // indirect
|
||||
github.com/moby/patternmatcher v0.5.0 // indirect
|
||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||
github.com/moby/patternmatcher v0.6.0 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/moby/sys/sequential v0.5.0 // indirect
|
||||
github.com/moby/sys/signal v0.7.0 // indirect
|
||||
github.com/moby/sys/symlink v0.2.0 // indirect
|
||||
github.com/moby/sys/user v0.1.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/monochromegane/go-gitignore v0.0.0-20200626010858-205db1a8cc00 // indirect
|
||||
@@ -180,7 +181,6 @@ require (
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.17.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/runc v1.1.12 // indirect
|
||||
github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/openzipkin-contrib/zipkin-go-opentracing v0.5.0 // indirect
|
||||
@@ -194,9 +194,8 @@ require (
|
||||
github.com/prometheus/client_model v0.6.0 // indirect
|
||||
github.com/prometheus/common v0.51.1 // indirect
|
||||
github.com/prometheus/procfs v0.13.0 // indirect
|
||||
github.com/quic-go/qtls-go1-20 v0.4.1 // indirect
|
||||
github.com/quic-go/quic-go v0.40.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/quic-go/quic-go v0.42.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/secure-systems-lab/go-securesystemslib v0.8.0 // indirect
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
@@ -205,43 +204,41 @@ require (
|
||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||
github.com/xlab/treeprint v1.2.0 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.11 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.11 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.11 // indirect
|
||||
go.etcd.io/etcd/api/v3 v3.5.13 // indirect
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.13 // indirect
|
||||
go.etcd.io/etcd/client/v3 v3.5.13 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.47.0 // indirect
|
||||
go.opentelemetry.io/otel v1.23.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.23.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.23.0 // indirect
|
||||
go.starlark.net v0.0.0-20230525235612-a134d8f9ddca // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
|
||||
go.opentelemetry.io/otel v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.24.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.24.0 // indirect
|
||||
go.starlark.net v0.0.0-20240329153429-e6e8e7ce1b7a // indirect
|
||||
go.uber.org/atomic v1.11.0 // indirect
|
||||
go.uber.org/mock v0.4.0 // indirect
|
||||
go.uber.org/multierr v1.11.0 // indirect
|
||||
go.uber.org/zap v1.27.0 // indirect
|
||||
go4.org/intern v0.0.0-20230525184215-6c62f75575cb // indirect
|
||||
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6 // indirect
|
||||
golang.org/x/mod v0.16.0 // indirect
|
||||
golang.org/x/term v0.18.0 // indirect
|
||||
golang.org/x/tools v0.19.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20231012003039-104605ab7028 // indirect
|
||||
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
|
||||
google.golang.org/api v0.160.0 // indirect
|
||||
google.golang.org/api v0.172.0 // indirect
|
||||
google.golang.org/appengine v1.6.8 // indirect
|
||||
google.golang.org/genproto v0.0.0-20240205150955-31a09d347014 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240125205218-1f4bbc51befe // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240205150955-31a09d347014 // indirect
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.58.1 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20240401170217-c3f982113cda // indirect
|
||||
gopkg.in/DataDog/dd-trace-go.v1 v1.62.0 // indirect
|
||||
gopkg.in/evanphx/json-patch.v5 v5.9.0 // indirect
|
||||
gopkg.in/fsnotify.v1 v1.4.7 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
inet.af/netaddr v0.0.0-20230525184311-b8eac61e914a // indirect
|
||||
k8s.io/apiextensions-apiserver v0.31.0-alpha.0 // indirect
|
||||
k8s.io/component-base v0.31.0-alpha.0 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240322212309-b815d8309940 // indirect
|
||||
sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.14.3-0.20230601165947-6ce0bf390ce3 // indirect
|
||||
sigs.k8s.io/kustomize/kyaml v0.16.0 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
)
|
||||
|
12
pkg/cp/cp.go
12
pkg/cp/cp.go
@@ -12,7 +12,7 @@ import (
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/cli-runtime/pkg/genericiooptions"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/kubectl/pkg/cmd/exec"
|
||||
@@ -32,11 +32,11 @@ type CopyOptions struct {
|
||||
|
||||
args []string
|
||||
|
||||
genericclioptions.IOStreams
|
||||
genericiooptions.IOStreams
|
||||
}
|
||||
|
||||
// NewCopyOptions creates the options for copy
|
||||
func NewCopyOptions(ioStreams genericclioptions.IOStreams) *CopyOptions {
|
||||
func NewCopyOptions(ioStreams genericiooptions.IOStreams) *CopyOptions {
|
||||
return &CopyOptions{
|
||||
IOStreams: ioStreams,
|
||||
}
|
||||
@@ -149,7 +149,7 @@ func (o *CopyOptions) Run() error {
|
||||
func (o *CopyOptions) checkDestinationIsDir(dest fileSpec) error {
|
||||
options := &exec.ExecOptions{
|
||||
StreamOptions: exec.StreamOptions{
|
||||
IOStreams: genericclioptions.IOStreams{
|
||||
IOStreams: genericiooptions.IOStreams{
|
||||
Out: bytes.NewBuffer([]byte{}),
|
||||
ErrOut: bytes.NewBuffer([]byte{}),
|
||||
},
|
||||
@@ -199,7 +199,7 @@ func (o *CopyOptions) copyToPod(src, dest fileSpec, options *exec.ExecOptions) e
|
||||
}
|
||||
|
||||
options.StreamOptions = exec.StreamOptions{
|
||||
IOStreams: genericclioptions.IOStreams{
|
||||
IOStreams: genericiooptions.IOStreams{
|
||||
In: reader,
|
||||
Out: o.Out,
|
||||
ErrOut: o.ErrOut,
|
||||
@@ -246,7 +246,7 @@ func (t *TarPipe) initReadFrom(n uint64) {
|
||||
t.reader, t.outStream = io.Pipe()
|
||||
options := &exec.ExecOptions{
|
||||
StreamOptions: exec.StreamOptions{
|
||||
IOStreams: genericclioptions.IOStreams{
|
||||
IOStreams: genericiooptions.IOStreams{
|
||||
In: nil,
|
||||
Out: t.outStream,
|
||||
ErrOut: t.o.Out,
|
||||
|
@@ -86,7 +86,7 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f := InitFactoryByPath(path, req.Namespace)
|
||||
f := util.InitFactoryByPath(path, req.Namespace)
|
||||
err = options.InitClient(f)
|
||||
if err != nil {
|
||||
log.Errorf("init client failed: %v", err)
|
||||
|
@@ -71,7 +71,7 @@ func (svr *Server) ConnectFork(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectF
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -130,7 +130,7 @@ func (svr *Server) redirectConnectForkToSudoDaemon(req *rpc.ConnectRequest, resp
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -5,18 +5,12 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
defaultlog "log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/rest"
|
||||
restclient "k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
@@ -97,7 +91,7 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = svr.connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = svr.connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -157,7 +151,7 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -248,43 +242,3 @@ func (r *warp) Write(p []byte) (n int, err error) {
|
||||
func newWarp(server rpc.Daemon_ConnectServer) io.Writer {
|
||||
return &warp{server: server}
|
||||
}
|
||||
|
||||
func InitFactory(kubeconfigBytes string, ns string) cmdutil.Factory {
|
||||
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
|
||||
configFlags.WrapConfigFn = func(c *rest.Config) *rest.Config {
|
||||
if path, ok := os.LookupEnv(config.EnvSSHJump); ok {
|
||||
bytes, err := os.ReadFile(path)
|
||||
cmdutil.CheckErr(err)
|
||||
var conf *restclient.Config
|
||||
conf, err = clientcmd.RESTConfigFromKubeConfig(bytes)
|
||||
cmdutil.CheckErr(err)
|
||||
return conf
|
||||
}
|
||||
return c
|
||||
}
|
||||
// todo optimize here
|
||||
temp, err := os.CreateTemp("", "*.json")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
err = temp.Close()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
err = os.WriteFile(temp.Name(), []byte(kubeconfigBytes), os.ModePerm)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
configFlags.KubeConfig = pointer.String(temp.Name())
|
||||
configFlags.Namespace = pointer.String(ns)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
|
||||
return cmdutil.NewFactory(matchVersionFlags)
|
||||
}
|
||||
|
||||
func InitFactoryByPath(kubeconfig string, ns string) cmdutil.Factory {
|
||||
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
|
||||
configFlags.KubeConfig = pointer.String(kubeconfig)
|
||||
configFlags.Namespace = pointer.String(ns)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
|
||||
return cmdutil.NewFactory(matchVersionFlags)
|
||||
}
|
||||
|
@@ -123,7 +123,7 @@ func disconnectByKubeConfig(ctx context.Context, svr *Server, kubeconfigBytes st
|
||||
connect := &handler.ConnectOptions{
|
||||
Namespace: ns,
|
||||
}
|
||||
err = connect.InitClient(InitFactoryByPath(path, ns))
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, ns))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -58,7 +58,7 @@ func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) e
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -41,7 +41,7 @@ func (svr *Server) Reset(req *rpc.ResetRequest, resp rpc.Daemon_ResetServer) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = connect.InitClient(InitFactoryByPath(path, req.Namespace))
|
||||
err = connect.InitClient(util.InitFactoryByPath(path, req.Namespace))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -7,6 +7,7 @@ import (
|
||||
"text/tabwriter"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func (svr *Server) Status(ctx context.Context, request *rpc.StatusRequest) (*rpc.StatusResponse, error) {
|
||||
@@ -15,14 +16,14 @@ func (svr *Server) Status(ctx context.Context, request *rpc.StatusRequest) (*rpc
|
||||
_, _ = fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", "ID", "Mode", "Cluster", "Kubeconfig", "Namespace", "Status")
|
||||
if svr.connect != nil {
|
||||
status := "Connected"
|
||||
cluster := svr.connect.GetKubeconfigCluster()
|
||||
cluster := util.GetKubeconfigCluster(svr.connect.GetFactory())
|
||||
namespace := svr.connect.Namespace
|
||||
kubeconfig := svr.connect.OriginKubeconfigPath
|
||||
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\n", 0, "full", cluster, kubeconfig, namespace, status)
|
||||
}
|
||||
|
||||
for i, options := range svr.secondaryConnect {
|
||||
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\n", i+1, "lite", options.GetKubeconfigCluster(), options.OriginKubeconfigPath, options.Namespace, "Connected")
|
||||
_, _ = fmt.Fprintf(w, "%d\t%s\t%s\t%s\t%s\t%s\n", i+1, "lite", util.GetKubeconfigCluster(options.GetFactory()), options.OriginKubeconfigPath, options.Namespace, "Connected")
|
||||
}
|
||||
_ = w.Flush()
|
||||
return &rpc.StatusResponse{Message: sb.String()}, nil
|
||||
|
@@ -1,254 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/google/uuid"
|
||||
miekgdns "github.com/miekg/dns"
|
||||
"github.com/moby/term"
|
||||
v12 "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/api/core/v1"
|
||||
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/kubectl/pkg/cmd/util"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/cp"
|
||||
util2 "github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type RunConfig struct {
|
||||
containerName string
|
||||
k8sContainerName string
|
||||
|
||||
config *container.Config
|
||||
hostConfig *container.HostConfig
|
||||
networkingConfig *network.NetworkingConfig
|
||||
platform *v12.Platform
|
||||
|
||||
Options RunOptions
|
||||
Copts *ContainerOptions
|
||||
}
|
||||
|
||||
func ConvertKubeResourceToContainer(namespace string, temp v1.PodTemplateSpec, envMap map[string][]string, mountVolume map[string][]mount.Mount, dnsConfig *miekgdns.ClientConfig) (runConfigList ConfigList) {
|
||||
spec := temp.Spec
|
||||
for _, c := range spec.Containers {
|
||||
tmpConfig := &container.Config{
|
||||
Hostname: func() string {
|
||||
var hostname = spec.Hostname
|
||||
if hostname == "" {
|
||||
for _, envEntry := range envMap[c.Name] {
|
||||
env := strings.Split(envEntry, "=")
|
||||
if len(env) == 2 && env[0] == "HOSTNAME" {
|
||||
hostname = env[1]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return hostname
|
||||
}(),
|
||||
Domainname: spec.Subdomain,
|
||||
User: "root",
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
ExposedPorts: nil,
|
||||
Tty: c.TTY,
|
||||
OpenStdin: c.Stdin,
|
||||
StdinOnce: false,
|
||||
Env: envMap[c.Name],
|
||||
Cmd: c.Args,
|
||||
Healthcheck: nil,
|
||||
ArgsEscaped: false,
|
||||
Image: c.Image,
|
||||
Volumes: nil,
|
||||
WorkingDir: c.WorkingDir,
|
||||
Entrypoint: c.Command,
|
||||
NetworkDisabled: false,
|
||||
MacAddress: "",
|
||||
OnBuild: nil,
|
||||
Labels: temp.Labels,
|
||||
StopSignal: "",
|
||||
StopTimeout: nil,
|
||||
Shell: nil,
|
||||
}
|
||||
if temp.DeletionGracePeriodSeconds != nil {
|
||||
tmpConfig.StopTimeout = (*int)(unsafe.Pointer(temp.DeletionGracePeriodSeconds))
|
||||
}
|
||||
hostConfig := &container.HostConfig{
|
||||
Binds: []string{},
|
||||
ContainerIDFile: "",
|
||||
LogConfig: container.LogConfig{},
|
||||
//NetworkMode: "",
|
||||
PortBindings: nil,
|
||||
RestartPolicy: container.RestartPolicy{},
|
||||
AutoRemove: false,
|
||||
VolumeDriver: "",
|
||||
VolumesFrom: nil,
|
||||
ConsoleSize: [2]uint{},
|
||||
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
|
||||
CgroupnsMode: "",
|
||||
DNS: dnsConfig.Servers,
|
||||
DNSOptions: []string{fmt.Sprintf("ndots=%d", dnsConfig.Ndots)},
|
||||
DNSSearch: dnsConfig.Search,
|
||||
ExtraHosts: nil,
|
||||
GroupAdd: nil,
|
||||
IpcMode: "",
|
||||
Cgroup: "",
|
||||
Links: nil,
|
||||
OomScoreAdj: 0,
|
||||
PidMode: "",
|
||||
Privileged: true,
|
||||
PublishAllPorts: false,
|
||||
ReadonlyRootfs: false,
|
||||
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
|
||||
StorageOpt: nil,
|
||||
Tmpfs: nil,
|
||||
UTSMode: "",
|
||||
UsernsMode: "",
|
||||
ShmSize: 0,
|
||||
Sysctls: nil,
|
||||
Runtime: "",
|
||||
Isolation: "",
|
||||
Resources: container.Resources{},
|
||||
Mounts: mountVolume[c.Name],
|
||||
MaskedPaths: nil,
|
||||
ReadonlyPaths: nil,
|
||||
Init: nil,
|
||||
}
|
||||
var portmap = nat.PortMap{}
|
||||
var portset = nat.PortSet{}
|
||||
for _, port := range c.Ports {
|
||||
port1 := nat.Port(fmt.Sprintf("%d/%s", port.ContainerPort, strings.ToLower(string(port.Protocol))))
|
||||
if port.HostPort != 0 {
|
||||
binding := []nat.PortBinding{{HostPort: strconv.FormatInt(int64(port.HostPort), 10)}}
|
||||
portmap[port1] = binding
|
||||
} else {
|
||||
binding := []nat.PortBinding{{HostPort: strconv.FormatInt(int64(port.ContainerPort), 10)}}
|
||||
portmap[port1] = binding
|
||||
}
|
||||
portset[port1] = struct{}{}
|
||||
}
|
||||
hostConfig.PortBindings = portmap
|
||||
tmpConfig.ExposedPorts = portset
|
||||
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
|
||||
hostConfig.CapAdd = append(hostConfig.CapAdd, *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Add))...)
|
||||
hostConfig.CapDrop = *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Drop))
|
||||
}
|
||||
var suffix string
|
||||
newUUID, err := uuid.NewUUID()
|
||||
if err == nil {
|
||||
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
|
||||
}
|
||||
var r RunConfig
|
||||
r.containerName = fmt.Sprintf("%s_%s_%s_%s", c.Name, namespace, "kubevpn", suffix)
|
||||
r.k8sContainerName = c.Name
|
||||
r.config = tmpConfig
|
||||
r.hostConfig = hostConfig
|
||||
r.networkingConfig = &network.NetworkingConfig{EndpointsConfig: make(map[string]*network.EndpointSettings)}
|
||||
r.platform = nil
|
||||
|
||||
runConfigList = append(runConfigList, &r)
|
||||
}
|
||||
return runConfigList
|
||||
}
|
||||
|
||||
func GetDNS(ctx context.Context, f util.Factory, ns, pod string) (*miekgdns.ClientConfig, error) {
|
||||
clientSet, err := f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = clientSet.CoreV1().Pods(ns).Get(ctx, pod, v13.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config, err := f.ToRESTConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := f.RESTClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientConfig, err := util2.GetDNSServiceIPFromPod(clientSet, client, config, pod, ns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clientConfig, nil
|
||||
}
|
||||
|
||||
// GetVolume key format: [container name]-[volume mount name]
|
||||
func GetVolume(ctx context.Context, f util.Factory, ns, pod string, d *Options) (map[string][]mount.Mount, error) {
|
||||
clientSet, err := f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var get *v1.Pod
|
||||
get, err = clientSet.CoreV1().Pods(ns).Get(ctx, pod, v13.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := map[string][]mount.Mount{}
|
||||
for _, c := range get.Spec.Containers {
|
||||
// if container name is vpn or envoy-proxy, not need to download volume
|
||||
if c.Name == config.ContainerSidecarVPN || c.Name == config.ContainerSidecarEnvoyProxy {
|
||||
continue
|
||||
}
|
||||
var m []mount.Mount
|
||||
for _, volumeMount := range c.VolumeMounts {
|
||||
if volumeMount.MountPath == "/tmp" {
|
||||
continue
|
||||
}
|
||||
join := filepath.Join(os.TempDir(), strconv.Itoa(rand.Int()))
|
||||
err = os.MkdirAll(join, 0755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if volumeMount.SubPath != "" {
|
||||
join = filepath.Join(join, volumeMount.SubPath)
|
||||
}
|
||||
d.AddRollbackFunc(func() error {
|
||||
return os.RemoveAll(join)
|
||||
})
|
||||
// pod-namespace/pod-name:path
|
||||
remotePath := fmt.Sprintf("%s/%s:%s", ns, pod, volumeMount.MountPath)
|
||||
stdIn, stdOut, stdErr := term.StdStreams()
|
||||
copyOptions := cp.NewCopyOptions(genericclioptions.IOStreams{In: stdIn, Out: stdOut, ErrOut: stdErr})
|
||||
copyOptions.Container = c.Name
|
||||
copyOptions.MaxTries = 10
|
||||
err = copyOptions.Complete(f, &cobra.Command{}, []string{remotePath, join})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = copyOptions.Run()
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "failed to download volume %s path %s to %s, err: %v, ignore...\n", volumeMount.Name, remotePath, join, err)
|
||||
continue
|
||||
}
|
||||
m = append(m, mount.Mount{
|
||||
Type: mount.TypeBind,
|
||||
Source: join,
|
||||
Target: volumeMount.MountPath,
|
||||
})
|
||||
log.Infof("%s:%s", join, volumeMount.MountPath)
|
||||
}
|
||||
result[c.Name] = m
|
||||
}
|
||||
return result, nil
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -8,18 +8,18 @@ import (
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/completion"
|
||||
"github.com/docker/cli/cli/command/image"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
imagetypes "github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
apiclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/registry"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -55,7 +55,7 @@ func NewCreateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
if len(args) > 1 {
|
||||
copts.Args = args[1:]
|
||||
}
|
||||
return runCreate(dockerCli, cmd.Flags(), &options, copts)
|
||||
return runCreate(cmd.Context(), dockerCli, cmd.Flags(), &options, copts)
|
||||
},
|
||||
Annotations: map[string]string{
|
||||
"aliases": "docker container create, docker create",
|
||||
@@ -80,7 +80,7 @@ func NewCreateCommand(dockerCli command.Cli) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runCreate(dockerCli command.Cli, flags *pflag.FlagSet, options *createOptions, copts *containerOptions) error {
|
||||
func runCreate(ctx context.Context, dockerCli command.Cli, flags *pflag.FlagSet, options *createOptions, copts *containerOptions) error {
|
||||
if err := validatePullOpt(options.pull); err != nil {
|
||||
reportError(dockerCli.Err(), "create", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
@@ -91,62 +91,48 @@ func runCreate(dockerCli command.Cli, flags *pflag.FlagSet, options *createOptio
|
||||
if v == nil {
|
||||
newEnv = append(newEnv, k)
|
||||
} else {
|
||||
newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, *v))
|
||||
newEnv = append(newEnv, k+"="+*v)
|
||||
}
|
||||
}
|
||||
copts.env = *opts.NewListOptsRef(&newEnv, nil)
|
||||
containerConfig, err := parse(flags, copts, dockerCli.ServerInfo().OSType)
|
||||
containerCfg, err := parse(flags, copts, dockerCli.ServerInfo().OSType)
|
||||
if err != nil {
|
||||
reportError(dockerCli.Err(), "create", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
if err = validateAPIVersion(containerConfig, dockerCli.Client().ClientVersion()); err != nil {
|
||||
if err = validateAPIVersion(containerCfg, dockerCli.Client().ClientVersion()); err != nil {
|
||||
reportError(dockerCli.Err(), "create", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
response, err := createContainer(context.Background(), dockerCli, containerConfig, options)
|
||||
id, err := createContainer(ctx, dockerCli, containerCfg, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), response.ID)
|
||||
_, _ = fmt.Fprintln(dockerCli.Out(), id)
|
||||
return nil
|
||||
}
|
||||
|
||||
func pullImage(ctx context.Context, dockerCli command.Cli, image string, platform string, out io.Writer) error {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
// FIXME(thaJeztah): this is the only code-path that uses APIClient.ImageCreate. Rewrite this to use the regular "pull" code (or vice-versa).
|
||||
func pullImage(ctx context.Context, dockerCli command.Cli, img string, options *createOptions) error {
|
||||
encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
repoInfo, err := registry.ParseRepositoryInfo(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index)
|
||||
encodedAuth, err := command.EncodeAuthToBase64(authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := types.ImageCreateOptions{
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, img, imagetypes.CreateOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
Platform: platform,
|
||||
}
|
||||
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options)
|
||||
Platform: options.platform,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
return jsonmessage.DisplayJSONMessagesStream(
|
||||
responseBody,
|
||||
out,
|
||||
dockerCli.Out().FD(),
|
||||
dockerCli.Out().IsTerminal(),
|
||||
nil)
|
||||
out := dockerCli.Err()
|
||||
if options.quiet {
|
||||
out = io.Discard
|
||||
}
|
||||
return jsonmessage.DisplayJSONMessagesToStream(responseBody, streams.NewOut(out), nil)
|
||||
}
|
||||
|
||||
type cidFile struct {
|
||||
@@ -199,10 +185,10 @@ func newCIDFile(path string) (*cidFile, error) {
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func createContainer(ctx context.Context, dockerCli command.Cli, containerConfig *containerConfig, opts *createOptions) (*container.CreateResponse, error) {
|
||||
config := containerConfig.Config
|
||||
hostConfig := containerConfig.HostConfig
|
||||
networkingConfig := containerConfig.NetworkingConfig
|
||||
func createContainer(ctx context.Context, dockerCli command.Cli, containerCfg *containerConfig, options *createOptions) (containerID string, err error) {
|
||||
config := containerCfg.Config
|
||||
hostConfig := containerCfg.HostConfig
|
||||
networkingConfig := containerCfg.NetworkingConfig
|
||||
|
||||
warnOnOomKillDisable(*hostConfig, dockerCli.Err())
|
||||
warnOnLocalhostDNS(*hostConfig, dockerCli.Err())
|
||||
@@ -214,33 +200,29 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerConfig
|
||||
|
||||
containerIDFile, err := newCIDFile(hostConfig.ContainerIDFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
defer containerIDFile.Close()
|
||||
|
||||
ref, err := reference.ParseAnyReference(config.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
if named, ok := ref.(reference.Named); ok {
|
||||
namedRef = reference.TagNameOnly(named)
|
||||
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !opts.untrusted {
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !options.untrusted {
|
||||
var err error
|
||||
trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef, nil)
|
||||
trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
config.Image = reference.FamiliarString(trustedRef)
|
||||
}
|
||||
}
|
||||
|
||||
pullAndTagImage := func() error {
|
||||
pullOut := dockerCli.Err()
|
||||
if opts.quiet {
|
||||
pullOut = io.Discard
|
||||
}
|
||||
if err := pullImage(ctx, dockerCli, config.Image, opts.platform, pullOut); err != nil {
|
||||
if err := pullImage(ctx, dockerCli, config.Image, options); err != nil {
|
||||
return err
|
||||
}
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
|
||||
@@ -254,50 +236,50 @@ func createContainer(ctx context.Context, dockerCli command.Cli, containerConfig
|
||||
// create. It will produce an error if you try to set a platform on older API
|
||||
// versions, so check the API version here to maintain backwards
|
||||
// compatibility for CLI users.
|
||||
if opts.platform != "" && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.41") {
|
||||
p, err := platforms.Parse(opts.platform)
|
||||
if options.platform != "" && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.41") {
|
||||
p, err := platforms.Parse(options.platform)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error parsing specified platform")
|
||||
return "", errors.Wrap(err, "error parsing specified platform")
|
||||
}
|
||||
platform = &p
|
||||
}
|
||||
|
||||
if opts.pull == PullImageAlways {
|
||||
if options.pull == PullImageAlways {
|
||||
if err := pullAndTagImage(); err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
|
||||
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, opts.name)
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, options.name)
|
||||
if err != nil {
|
||||
// Pull image if it does not exist locally and we have the PullImageMissing option. Default behavior.
|
||||
if apiclient.IsErrNotFound(err) && namedRef != nil && opts.pull == PullImageMissing {
|
||||
if !opts.quiet {
|
||||
if errdefs.IsNotFound(err) && namedRef != nil && options.pull == PullImageMissing {
|
||||
if !options.quiet {
|
||||
// we don't want to write to stdout anything apart from container.ID
|
||||
fmt.Fprintf(dockerCli.Err(), "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
|
||||
}
|
||||
|
||||
if err := pullAndTagImage(); err != nil {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
|
||||
var retryErr error
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, opts.name)
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, options.name)
|
||||
if retryErr != nil {
|
||||
return nil, retryErr
|
||||
return "", retryErr
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
for _, warning := range response.Warnings {
|
||||
fmt.Fprintf(dockerCli.Err(), "WARNING: %s\n", warning)
|
||||
for _, w := range response.Warnings {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "WARNING: %s\n", w)
|
||||
}
|
||||
err = containerIDFile.Write(response.ID)
|
||||
return &response, err
|
||||
return response.ID, err
|
||||
}
|
||||
|
||||
func warnOnOomKillDisable(hostConfig container.HostConfig, stderr io.Writer) {
|
@@ -87,7 +87,7 @@ func (h *hijackedIOStreamer) setupInput() (restore func(), err error) {
|
||||
var restoreOnce sync.Once
|
||||
restore = func() {
|
||||
restoreOnce.Do(func() {
|
||||
restoreTerminal(h.streams, h.inputStream)
|
||||
_ = restoreTerminal(h.streams, h.inputStream)
|
||||
})
|
||||
}
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package dev
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -13,18 +13,33 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/compose/loader"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
mounttypes "github.com/docker/docker/api/types/mount"
|
||||
networktypes "github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
cdi "tags.cncf.io/container-device-interface/pkg/parser"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO(thaJeztah): define these in the API-types, or query available defaults
|
||||
// from the daemon, or require "local" profiles to be an absolute path or
|
||||
// relative paths starting with "./". The daemon-config has consts for this
|
||||
// but we don't want to import that package:
|
||||
// https://github.com/moby/moby/blob/v23.0.0/daemon/config/config.go#L63-L67
|
||||
|
||||
// seccompProfileDefault is the built-in default seccomp profile.
|
||||
seccompProfileDefault = "builtin"
|
||||
// seccompProfileUnconfined is a special profile name for seccomp to use an
|
||||
// "unconfined" seccomp profile.
|
||||
seccompProfileUnconfined = "unconfined"
|
||||
)
|
||||
|
||||
var deviceCgroupRuleRegexp = regexp.MustCompile(`^[acb] ([0-9]+|\*):([0-9]+|\*) [rwm]{1,3}$`)
|
||||
@@ -119,10 +134,12 @@ type containerOptions struct {
|
||||
healthInterval time.Duration
|
||||
healthTimeout time.Duration
|
||||
healthStartPeriod time.Duration
|
||||
healthStartInterval time.Duration
|
||||
healthRetries int
|
||||
runtime string
|
||||
autoRemove bool
|
||||
init bool
|
||||
annotations *opts.MapOpts
|
||||
|
||||
Image string
|
||||
Args []string
|
||||
@@ -163,6 +180,7 @@ func addFlags(flags *pflag.FlagSet) *containerOptions {
|
||||
ulimits: opts.NewUlimitOpt(nil),
|
||||
volumes: opts.NewListOpts(nil),
|
||||
volumesFrom: opts.NewListOpts(nil),
|
||||
annotations: opts.NewMapOpts(nil, nil),
|
||||
}
|
||||
|
||||
// General purpose flags
|
||||
@@ -181,7 +199,7 @@ func addFlags(flags *pflag.FlagSet) *containerOptions {
|
||||
flags.VarP(&copts.labels, "label", "l", "Set meta data on a container")
|
||||
flags.Var(&copts.labelsFile, "label-file", "Read in a line delimited file of labels")
|
||||
flags.BoolVar(&copts.readonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only")
|
||||
flags.StringVar(&copts.restartPolicy, "restart", "no", "Restart policy to apply when a container exits")
|
||||
flags.StringVar(&copts.restartPolicy, "restart", string(container.RestartPolicyDisabled), "Restart policy to apply when a container exits")
|
||||
flags.StringVar(&copts.stopSignal, "stop-signal", "", "Signal to stop the container")
|
||||
flags.IntVar(&copts.stopTimeout, "stop-timeout", 0, "Timeout (in seconds) to stop a container")
|
||||
flags.SetAnnotation("stop-timeout", "version", []string{"1.25"})
|
||||
@@ -248,6 +266,8 @@ func addFlags(flags *pflag.FlagSet) *containerOptions {
|
||||
flags.DurationVar(&copts.healthTimeout, "health-timeout", 0, "Maximum time to allow one check to run (ms|s|m|h) (default 0s)")
|
||||
flags.DurationVar(&copts.healthStartPeriod, "health-start-period", 0, "Start period for the container to initialize before starting health-retries countdown (ms|s|m|h) (default 0s)")
|
||||
flags.SetAnnotation("health-start-period", "version", []string{"1.29"})
|
||||
flags.DurationVar(&copts.healthStartInterval, "health-start-interval", 0, "Time between running the check during the start period (ms|s|m|h) (default 0s)")
|
||||
flags.SetAnnotation("health-start-interval", "version", []string{"1.44"})
|
||||
flags.BoolVar(&copts.noHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK")
|
||||
|
||||
// Resource management
|
||||
@@ -266,7 +286,7 @@ func addFlags(flags *pflag.FlagSet) *containerOptions {
|
||||
flags.SetAnnotation("cpu-rt-period", "version", []string{"1.25"})
|
||||
flags.Int64Var(&copts.cpuRealtimeRuntime, "cpu-rt-runtime", 0, "Limit CPU real-time runtime in microseconds")
|
||||
flags.SetAnnotation("cpu-rt-runtime", "version", []string{"1.25"})
|
||||
flags.Int64VarP(&copts.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||
flags.Int64Var(&copts.cpuShares, "cpu-shares", 0, "CPU shares (relative weight)")
|
||||
flags.Var(&copts.cpus, "cpus", "Number of CPUs")
|
||||
flags.SetAnnotation("cpus", "version", []string{"1.25"})
|
||||
flags.Var(&copts.deviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device")
|
||||
@@ -297,6 +317,10 @@ func addFlags(flags *pflag.FlagSet) *containerOptions {
|
||||
|
||||
flags.BoolVar(&copts.init, "init", false, "Run an init inside the container that forwards signals and reaps processes")
|
||||
flags.SetAnnotation("init", "version", []string{"1.25"})
|
||||
|
||||
flags.Var(copts.annotations, "annotation", "Add an annotation to the container (passed through to the OCI runtime)")
|
||||
flags.SetAnnotation("annotation", "version", []string{"1.43"})
|
||||
|
||||
return copts
|
||||
}
|
||||
|
||||
@@ -348,7 +372,10 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
||||
volumes := copts.volumes.GetMap()
|
||||
// add any bind targets to the list of container volumes
|
||||
for bind := range copts.volumes.GetMap() {
|
||||
parsed, _ := loader.ParseVolume(bind)
|
||||
parsed, err := loader.ParseVolume(bind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if parsed.Source != "" {
|
||||
toBind := bind
|
||||
@@ -443,12 +470,17 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
||||
// parsing flags, we haven't yet sent a _ping to the daemon to determine
|
||||
// what operating system it is.
|
||||
deviceMappings := []container.DeviceMapping{}
|
||||
var cdiDeviceNames []string
|
||||
for _, device := range copts.devices.GetAll() {
|
||||
var (
|
||||
validated string
|
||||
deviceMapping container.DeviceMapping
|
||||
err error
|
||||
)
|
||||
if cdi.IsQualifiedName(device) {
|
||||
cdiDeviceNames = append(cdiDeviceNames, device)
|
||||
continue
|
||||
}
|
||||
validated, err = validateDevice(device, serverOS)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -520,7 +552,8 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
||||
copts.healthInterval != 0 ||
|
||||
copts.healthTimeout != 0 ||
|
||||
copts.healthStartPeriod != 0 ||
|
||||
copts.healthRetries != 0
|
||||
copts.healthRetries != 0 ||
|
||||
copts.healthStartInterval != 0
|
||||
if copts.noHealthcheck {
|
||||
if haveHealthSettings {
|
||||
return nil, errors.Errorf("--no-healthcheck conflicts with --health-* options")
|
||||
@@ -543,16 +576,29 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
||||
if copts.healthStartPeriod < 0 {
|
||||
return nil, fmt.Errorf("--health-start-period cannot be negative")
|
||||
}
|
||||
if copts.healthStartInterval < 0 {
|
||||
return nil, fmt.Errorf("--health-start-interval cannot be negative")
|
||||
}
|
||||
|
||||
healthConfig = &container.HealthConfig{
|
||||
Test: probe,
|
||||
Interval: copts.healthInterval,
|
||||
Timeout: copts.healthTimeout,
|
||||
StartPeriod: copts.healthStartPeriod,
|
||||
StartInterval: copts.healthStartInterval,
|
||||
Retries: copts.healthRetries,
|
||||
}
|
||||
}
|
||||
|
||||
deviceRequests := copts.gpus.Value()
|
||||
if len(cdiDeviceNames) > 0 {
|
||||
cdiDeviceRequest := container.DeviceRequest{
|
||||
Driver: "cdi",
|
||||
DeviceIDs: cdiDeviceNames,
|
||||
}
|
||||
deviceRequests = append(deviceRequests, cdiDeviceRequest)
|
||||
}
|
||||
|
||||
resources := container.Resources{
|
||||
CgroupParent: copts.cgroupParent,
|
||||
Memory: copts.memory.Value(),
|
||||
@@ -583,7 +629,7 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
||||
Ulimits: copts.ulimits.GetList(),
|
||||
DeviceCgroupRules: copts.deviceCgroupRules.GetAll(),
|
||||
Devices: deviceMappings,
|
||||
DeviceRequests: copts.gpus.Value(),
|
||||
DeviceRequests: deviceRequests,
|
||||
}
|
||||
|
||||
config := &container.Config{
|
||||
@@ -654,6 +700,7 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
||||
Mounts: mounts,
|
||||
MaskedPaths: maskedPaths,
|
||||
ReadonlyPaths: readonlyPaths,
|
||||
Annotations: copts.annotations.GetAll(),
|
||||
}
|
||||
|
||||
if copts.autoRemove && !hostConfig.RestartPolicy.IsNone() {
|
||||
@@ -679,6 +726,12 @@ func parse(flags *pflag.FlagSet, copts *containerOptions, serverOS string) (*con
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Put the endpoint-specific MacAddress of the "main" network attachment into the container Config for backward
|
||||
// compatibility with older daemons.
|
||||
if nw, ok := networkingConfig.EndpointsConfig[hostConfig.NetworkMode.NetworkName()]; ok {
|
||||
config.MacAddress = nw.MacAddress //nolint:staticcheck // ignore SA1019: field is deprecated, but still used on API < v1.44.
|
||||
}
|
||||
|
||||
return &containerConfig{
|
||||
Config: config,
|
||||
HostConfig: hostConfig,
|
||||
@@ -699,6 +752,20 @@ func parseNetworkOpts(copts *containerOptions) (map[string]*networktypes.Endpoin
|
||||
hasUserDefined, hasNonUserDefined bool
|
||||
)
|
||||
|
||||
if len(copts.netMode.Value()) == 0 {
|
||||
n := opts.NetworkAttachmentOpts{
|
||||
Target: "default",
|
||||
}
|
||||
if err := applyContainerOptions(&n, copts); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ep, err := parseNetworkAttachmentOpt(n)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
endpoints["default"] = ep
|
||||
}
|
||||
|
||||
for i, n := range copts.netMode.Value() {
|
||||
n := n
|
||||
if container.NetworkMode(n.Target).IsUserDefined() {
|
||||
@@ -740,8 +807,7 @@ func parseNetworkOpts(copts *containerOptions) (map[string]*networktypes.Endpoin
|
||||
return endpoints, nil
|
||||
}
|
||||
|
||||
func applyContainerOptions(n *opts.NetworkAttachmentOpts, copts *containerOptions) error {
|
||||
// TODO should copts.MacAddress actually be set on the first network? (currently it's not)
|
||||
func applyContainerOptions(n *opts.NetworkAttachmentOpts, copts *containerOptions) error { //nolint:gocyclo
|
||||
// TODO should we error if _any_ advanced option is used? (i.e. forbid to combine advanced notation with the "old" flags (`--network-alias`, `--link`, `--ip`, `--ip6`)?
|
||||
if len(n.Aliases) > 0 && copts.aliases.Len() > 0 {
|
||||
return errdefs.InvalidParameter(errors.New("conflicting options: cannot specify both --network-alias and per-network alias"))
|
||||
@@ -755,11 +821,17 @@ func applyContainerOptions(n *opts.NetworkAttachmentOpts, copts *containerOption
|
||||
if n.IPv6Address != "" && copts.ipv6Address != "" {
|
||||
return errdefs.InvalidParameter(errors.New("conflicting options: cannot specify both --ip6 and per-network IPv6 address"))
|
||||
}
|
||||
if n.MacAddress != "" && copts.macAddress != "" {
|
||||
return errdefs.InvalidParameter(errors.New("conflicting options: cannot specify both --mac-address and per-network MAC address"))
|
||||
}
|
||||
if len(n.LinkLocalIPs) > 0 && copts.linkLocalIPs.Len() > 0 {
|
||||
return errdefs.InvalidParameter(errors.New("conflicting options: cannot specify both --link-local-ip and per-network link-local IP addresses"))
|
||||
}
|
||||
if copts.aliases.Len() > 0 {
|
||||
n.Aliases = make([]string, copts.aliases.Len())
|
||||
copy(n.Aliases, copts.aliases.GetAll())
|
||||
}
|
||||
if copts.links.Len() > 0 {
|
||||
if n.Target != "default" && copts.links.Len() > 0 {
|
||||
n.Links = make([]string, copts.links.Len())
|
||||
copy(n.Links, copts.links.GetAll())
|
||||
}
|
||||
@@ -769,8 +841,9 @@ func applyContainerOptions(n *opts.NetworkAttachmentOpts, copts *containerOption
|
||||
if copts.ipv6Address != "" {
|
||||
n.IPv6Address = copts.ipv6Address
|
||||
}
|
||||
|
||||
// TODO should linkLocalIPs be added to the _first_ network only, or to _all_ networks? (should this be a per-network option as well?)
|
||||
if copts.macAddress != "" {
|
||||
n.MacAddress = copts.macAddress
|
||||
}
|
||||
if copts.linkLocalIPs.Len() > 0 {
|
||||
n.LinkLocalIPs = make([]string, copts.linkLocalIPs.Len())
|
||||
copy(n.LinkLocalIPs, copts.linkLocalIPs.GetAll())
|
||||
@@ -807,6 +880,12 @@ func parseNetworkAttachmentOpt(ep opts.NetworkAttachmentOpts) (*networktypes.End
|
||||
LinkLocalIPs: ep.LinkLocalIPs,
|
||||
}
|
||||
}
|
||||
if ep.MacAddress != "" {
|
||||
if _, err := opts.ValidateMACAddress(ep.MacAddress); err != nil {
|
||||
return nil, errors.Errorf("%s is not a valid mac address", ep.MacAddress)
|
||||
}
|
||||
epConfig.MacAddress = ep.MacAddress
|
||||
}
|
||||
return epConfig, nil
|
||||
}
|
||||
|
||||
@@ -849,7 +928,13 @@ func parseSecurityOpts(securityOpts []string) ([]string, error) {
|
||||
// "no-new-privileges" is the only option that does not require a value.
|
||||
return securityOpts, errors.Errorf("Invalid --security-opt: %q", opt)
|
||||
}
|
||||
if k == "seccomp" && v != "unconfined" {
|
||||
if k == "seccomp" {
|
||||
switch v {
|
||||
case seccompProfileDefault, seccompProfileUnconfined:
|
||||
// known special names for built-in profiles, nothing to do.
|
||||
default:
|
||||
// value may be a filename, in which case we send the profile's
|
||||
// content if it's valid JSON.
|
||||
f, err := os.ReadFile(v)
|
||||
if err != nil {
|
||||
return securityOpts, errors.Errorf("opening seccomp profile (%s) failed: %v", v, err)
|
||||
@@ -861,6 +946,7 @@ func parseSecurityOpts(securityOpts []string) ([]string, error) {
|
||||
securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return securityOpts, nil
|
||||
}
|
||||
@@ -1054,8 +1140,8 @@ func validateAttach(val string) (string, error) {
|
||||
|
||||
func validateAPIVersion(c *containerConfig, serverAPIVersion string) error {
|
||||
for _, m := range c.HostConfig.Mounts {
|
||||
if m.BindOptions != nil && m.BindOptions.NonRecursive && versions.LessThan(serverAPIVersion, "1.40") {
|
||||
return errors.Errorf("bind-nonrecursive requires API v1.40 or later")
|
||||
if err := command.ValidateMountWithAPIVersion(m, serverAPIVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/completion"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/moby/term"
|
||||
@@ -43,7 +42,7 @@ func NewRunCommand(dockerCli command.Cli) *cobra.Command {
|
||||
if len(args) > 1 {
|
||||
copts.Args = args[1:]
|
||||
}
|
||||
return runRun(dockerCli, cmd.Flags(), &options, copts)
|
||||
return runRun(cmd.Context(), dockerCli, cmd.Flags(), &options, copts)
|
||||
},
|
||||
ValidArgsFunction: completion.ImageNames(dockerCli),
|
||||
Annotations: map[string]string{
|
||||
@@ -90,7 +89,7 @@ func NewRunCommand(dockerCli command.Cli) *cobra.Command {
|
||||
return cmd
|
||||
}
|
||||
|
||||
func runRun(dockerCli command.Cli, flags *pflag.FlagSet, ropts *runOptions, copts *containerOptions) error {
|
||||
func runRun(ctx context.Context, dockerCli command.Cli, flags *pflag.FlagSet, ropts *runOptions, copts *containerOptions) error {
|
||||
if err := validatePullOpt(ropts.pull); err != nil {
|
||||
reportError(dockerCli.Err(), "run", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
@@ -101,32 +100,32 @@ func runRun(dockerCli command.Cli, flags *pflag.FlagSet, ropts *runOptions, copt
|
||||
if v == nil {
|
||||
newEnv = append(newEnv, k)
|
||||
} else {
|
||||
newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, *v))
|
||||
newEnv = append(newEnv, k+"="+*v)
|
||||
}
|
||||
}
|
||||
copts.env = *opts.NewListOptsRef(&newEnv, nil)
|
||||
containerConfig, err := parse(flags, copts, dockerCli.ServerInfo().OSType)
|
||||
containerCfg, err := parse(flags, copts, dockerCli.ServerInfo().OSType)
|
||||
// just in case the parse does not exit
|
||||
if err != nil {
|
||||
reportError(dockerCli.Err(), "run", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
if err = validateAPIVersion(containerConfig, dockerCli.CurrentVersion()); err != nil {
|
||||
if err = validateAPIVersion(containerCfg, dockerCli.CurrentVersion()); err != nil {
|
||||
reportError(dockerCli.Err(), "run", err.Error(), true)
|
||||
return cli.StatusError{StatusCode: 125}
|
||||
}
|
||||
return runContainer(dockerCli, ropts, copts, containerConfig)
|
||||
return runContainer(ctx, dockerCli, ropts, copts, containerCfg)
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptions, containerConfig *containerConfig) error {
|
||||
config := containerConfig.Config
|
||||
func runContainer(ctx context.Context, dockerCli command.Cli, runOpts *runOptions, copts *containerOptions, containerCfg *containerConfig) error {
|
||||
config := containerCfg.Config
|
||||
stdout, stderr := dockerCli.Out(), dockerCli.Err()
|
||||
client := dockerCli.Client()
|
||||
apiClient := dockerCli.Client()
|
||||
|
||||
config.ArgsEscaped = false
|
||||
|
||||
if !opts.detach {
|
||||
if !runOpts.detach {
|
||||
if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -141,17 +140,17 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
||||
config.StdinOnce = false
|
||||
}
|
||||
|
||||
ctx, cancelFun := context.WithCancel(context.Background())
|
||||
ctx, cancelFun := context.WithCancel(ctx)
|
||||
defer cancelFun()
|
||||
|
||||
createResponse, err := createContainer(ctx, dockerCli, containerConfig, &opts.createOptions)
|
||||
containerID, err := createContainer(ctx, dockerCli, containerCfg, &runOpts.createOptions)
|
||||
if err != nil {
|
||||
reportError(stderr, "run", err.Error(), true)
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if opts.sigProxy {
|
||||
if runOpts.sigProxy {
|
||||
sigc := notifyAllSignals()
|
||||
go ForwardAllSignals(ctx, dockerCli, createResponse.ID, sigc)
|
||||
go ForwardAllSignals(ctx, apiClient, containerID, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
@@ -164,26 +163,33 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
||||
waitDisplayID = make(chan struct{})
|
||||
go func() {
|
||||
defer close(waitDisplayID)
|
||||
fmt.Fprintln(stdout, createResponse.ID)
|
||||
_, _ = fmt.Fprintln(stdout, containerID)
|
||||
}()
|
||||
}
|
||||
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
|
||||
if attach {
|
||||
if opts.detachKeys != "" {
|
||||
dockerCli.ConfigFile().DetachKeys = opts.detachKeys
|
||||
detachKeys := dockerCli.ConfigFile().DetachKeys
|
||||
if runOpts.detachKeys != "" {
|
||||
detachKeys = runOpts.detachKeys
|
||||
}
|
||||
|
||||
close, err := attachContainer(ctx, dockerCli, &errCh, config, createResponse.ID)
|
||||
closeFn, err := attachContainer(ctx, dockerCli, containerID, &errCh, config, container.AttachOptions{
|
||||
Stream: true,
|
||||
Stdin: config.AttachStdin,
|
||||
Stdout: config.AttachStdout,
|
||||
Stderr: config.AttachStderr,
|
||||
DetachKeys: detachKeys,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer close()
|
||||
defer closeFn()
|
||||
}
|
||||
|
||||
statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, copts.autoRemove)
|
||||
statusChan := waitExitOrRemoved(ctx, apiClient, containerID, copts.autoRemove)
|
||||
|
||||
// start the container
|
||||
if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil {
|
||||
if err := apiClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil {
|
||||
// If we have hijackedIOStreamer, we should notify
|
||||
// hijackedIOStreamer we are going to exit and wait
|
||||
// to avoid the terminal are not restored.
|
||||
@@ -201,8 +207,8 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
||||
}
|
||||
|
||||
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() {
|
||||
if err := MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil {
|
||||
fmt.Fprintln(stderr, "Error monitoring TTY size:", err)
|
||||
if err := MonitorTtySize(ctx, dockerCli, containerID, false); err != nil {
|
||||
_, _ = fmt.Fprintln(stderr, "Error monitoring TTY size:", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -232,15 +238,7 @@ func runContainer(dockerCli command.Cli, opts *runOptions, copts *containerOptio
|
||||
return nil
|
||||
}
|
||||
|
||||
func attachContainer(ctx context.Context, dockerCli command.Cli, errCh *chan error, config *container.Config, containerID string) (func(), error) {
|
||||
options := types.ContainerAttachOptions{
|
||||
Stream: true,
|
||||
Stdin: config.AttachStdin,
|
||||
Stdout: config.AttachStdout,
|
||||
Stderr: config.AttachStderr,
|
||||
DetachKeys: dockerCli.ConfigFile().DetachKeys,
|
||||
}
|
||||
|
||||
func attachContainer(ctx context.Context, dockerCli command.Cli, containerID string, errCh *chan error, config *container.Config, options container.AttachOptions) (func(), error) {
|
||||
resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options)
|
||||
if errAttach != nil {
|
||||
return nil, errAttach
|
||||
@@ -250,13 +248,13 @@ func attachContainer(ctx context.Context, dockerCli command.Cli, errCh *chan err
|
||||
out, cerr io.Writer
|
||||
in io.ReadCloser
|
||||
)
|
||||
if config.AttachStdin {
|
||||
if options.Stdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
if config.AttachStdout {
|
||||
if options.Stdout {
|
||||
out = dockerCli.Out()
|
||||
}
|
||||
if config.AttachStderr {
|
||||
if options.Stderr {
|
||||
if config.Tty {
|
||||
cerr = dockerCli.Out()
|
||||
} else {
|
@@ -1,11 +1,11 @@
|
||||
package container
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
// ForwardAllSignals forwards signals to the container
|
||||
//
|
||||
// The channel you pass in must already be setup to receive any signals you want to forward.
|
||||
func ForwardAllSignals(ctx context.Context, cli command.Cli, cid string, sigc <-chan os.Signal) {
|
||||
func ForwardAllSignals(ctx context.Context, apiClient client.ContainerAPIClient, cid string, sigc <-chan os.Signal) {
|
||||
var (
|
||||
s os.Signal
|
||||
ok bool
|
||||
@@ -48,7 +48,7 @@ func ForwardAllSignals(ctx context.Context, cli command.Cli, cid string, sigc <-
|
||||
continue
|
||||
}
|
||||
|
||||
if err := cli.Client().ContainerKill(ctx, cid, sig); err != nil {
|
||||
if err := apiClient.ContainerKill(ctx, cid, sig); err != nil {
|
||||
logrus.Debugf("Error sending signal: %s", err)
|
||||
}
|
||||
}
|
@@ -1,7 +1,6 @@
|
||||
//go:build !windows
|
||||
// +build !windows
|
||||
|
||||
package container
|
||||
package dev
|
||||
|
||||
import (
|
||||
"os"
|
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package dev
|
||||
|
||||
import "os"
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package container
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -9,28 +9,28 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// resizeTtyTo resizes tty to specific height and width
|
||||
func resizeTtyTo(ctx context.Context, client client.ContainerAPIClient, id string, height, width uint, isExec bool) error {
|
||||
func resizeTtyTo(ctx context.Context, apiClient client.ContainerAPIClient, id string, height, width uint, isExec bool) error {
|
||||
if height == 0 && width == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
options := types.ResizeOptions{
|
||||
options := container.ResizeOptions{
|
||||
Height: height,
|
||||
Width: width,
|
||||
}
|
||||
|
||||
var err error
|
||||
if isExec {
|
||||
err = client.ContainerExecResize(ctx, id, options)
|
||||
err = apiClient.ContainerExecResize(ctx, id, options)
|
||||
} else {
|
||||
err = client.ContainerResize(ctx, id, options)
|
||||
err = apiClient.ContainerResize(ctx, id, options)
|
||||
}
|
||||
|
||||
if err != nil {
|
@@ -1,19 +1,19 @@
|
||||
package container
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func waitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID string, waitRemove bool) <-chan int {
|
||||
func waitExitOrRemoved(ctx context.Context, apiClient client.APIClient, containerID string, waitRemove bool) <-chan int {
|
||||
if len(containerID) == 0 {
|
||||
// containerID can never be empty
|
||||
panic("Internal Error: waitExitOrRemoved needs a containerID as parameter")
|
||||
@@ -22,8 +22,8 @@ func waitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID s
|
||||
// Older versions used the Events API, and even older versions did not
|
||||
// support server-side removal. This legacyWaitExitOrRemoved method
|
||||
// preserves that old behavior and any issues it may have.
|
||||
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.30") {
|
||||
return legacyWaitExitOrRemoved(ctx, dockerCli, containerID, waitRemove)
|
||||
if versions.LessThan(apiClient.ClientVersion(), "1.30") {
|
||||
return legacyWaitExitOrRemoved(ctx, apiClient, containerID, waitRemove)
|
||||
}
|
||||
|
||||
condition := container.WaitConditionNextExit
|
||||
@@ -31,7 +31,7 @@ func waitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID s
|
||||
condition = container.WaitConditionRemoved
|
||||
}
|
||||
|
||||
resultC, errC := dockerCli.Client().ContainerWait(ctx, containerID, condition)
|
||||
resultC, errC := apiClient.ContainerWait(ctx, containerID, condition)
|
||||
|
||||
statusC := make(chan int)
|
||||
go func() {
|
||||
@@ -52,7 +52,7 @@ func waitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID s
|
||||
return statusC
|
||||
}
|
||||
|
||||
func legacyWaitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID string, waitRemove bool) <-chan int {
|
||||
func legacyWaitExitOrRemoved(ctx context.Context, apiClient client.APIClient, containerID string, waitRemove bool) <-chan int {
|
||||
var removeErr error
|
||||
statusChan := make(chan int)
|
||||
exitCode := 125
|
||||
@@ -65,7 +65,7 @@ func legacyWaitExitOrRemoved(ctx context.Context, dockerCli command.Cli, contain
|
||||
Filters: f,
|
||||
}
|
||||
eventCtx, cancel := context.WithCancel(ctx)
|
||||
eventq, errq := dockerCli.Client().Events(eventCtx, options)
|
||||
eventq, errq := apiClient.Events(eventCtx, options)
|
||||
|
||||
eventProcessor := func(e events.Message) bool {
|
||||
stopProcessing := false
|
||||
@@ -81,19 +81,17 @@ func legacyWaitExitOrRemoved(ctx context.Context, dockerCli command.Cli, contain
|
||||
}
|
||||
if !waitRemove {
|
||||
stopProcessing = true
|
||||
} else {
|
||||
} else if versions.LessThan(apiClient.ClientVersion(), "1.25") {
|
||||
// If we are talking to an older daemon, `AutoRemove` is not supported.
|
||||
// We need to fall back to the old behavior, which is client-side removal
|
||||
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") {
|
||||
go func() {
|
||||
removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true})
|
||||
removeErr = apiClient.ContainerRemove(ctx, containerID, container.RemoveOptions{RemoveVolumes: true})
|
||||
if removeErr != nil {
|
||||
logrus.Errorf("error removing container: %v", removeErr)
|
||||
cancel() // cancel the event Q
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
case "detach":
|
||||
exitCode = 0
|
||||
stopProcessing = true
|
||||
@@ -129,7 +127,7 @@ func legacyWaitExitOrRemoved(ctx context.Context, dockerCli command.Cli, contain
|
||||
return statusChan
|
||||
}
|
||||
|
||||
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error {
|
||||
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, containerID string) error) chan error {
|
||||
if len(containers) == 0 {
|
||||
return nil
|
||||
}
|
@@ -1,271 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/image"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
apiclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/registry"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Pull constants
|
||||
const (
|
||||
PullImageAlways = "always"
|
||||
PullImageMissing = "missing" // Default (matches previous behavior)
|
||||
PullImageNever = "never"
|
||||
)
|
||||
|
||||
type createOptions struct {
|
||||
Name string
|
||||
Platform string
|
||||
Untrusted bool
|
||||
Pull string // always, missing, never
|
||||
Quiet bool
|
||||
}
|
||||
|
||||
func pullImage(ctx context.Context, dockerCli command.Cli, image string, platform string, out io.Writer) error {
|
||||
ref, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Resolve the Repository name from fqn to RepositoryInfo
|
||||
repoInfo, err := registry.ParseRepositoryInfo(ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
authConfig := command.ResolveAuthConfig(ctx, dockerCli, repoInfo.Index)
|
||||
encodedAuth, err := command.EncodeAuthToBase64(authConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := types.ImageCreateOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
Platform: platform,
|
||||
}
|
||||
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
return jsonmessage.DisplayJSONMessagesStream(
|
||||
responseBody,
|
||||
out,
|
||||
dockerCli.Out().FD(),
|
||||
dockerCli.Out().IsTerminal(),
|
||||
nil)
|
||||
}
|
||||
|
||||
type cidFile struct {
|
||||
path string
|
||||
file *os.File
|
||||
written bool
|
||||
}
|
||||
|
||||
func (cid *cidFile) Close() error {
|
||||
if cid.file == nil {
|
||||
return nil
|
||||
}
|
||||
cid.file.Close()
|
||||
|
||||
if cid.written {
|
||||
return nil
|
||||
}
|
||||
if err := os.Remove(cid.path); err != nil {
|
||||
return errors.Wrapf(err, "failed to remove the CID file '%s'", cid.path)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cid *cidFile) Write(id string) error {
|
||||
if cid.file == nil {
|
||||
return nil
|
||||
}
|
||||
if _, err := cid.file.Write([]byte(id)); err != nil {
|
||||
return errors.Wrap(err, "failed to write the container ID to the file")
|
||||
}
|
||||
cid.written = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func newCIDFile(path string) (*cidFile, error) {
|
||||
if path == "" {
|
||||
return &cidFile{}, nil
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
return nil, errors.Errorf("container ID file found, make sure the other container isn't running or delete %s", path)
|
||||
}
|
||||
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create the container ID file")
|
||||
}
|
||||
|
||||
return &cidFile{path: path, file: f}, nil
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func createContainer(ctx context.Context, dockerCli command.Cli, containerConfig *containerConfig, opts *createOptions) (*container.CreateResponse, error) {
|
||||
config := containerConfig.Config
|
||||
hostConfig := containerConfig.HostConfig
|
||||
networkingConfig := containerConfig.NetworkingConfig
|
||||
stderr := dockerCli.Err()
|
||||
|
||||
warnOnOomKillDisable(*hostConfig, stderr)
|
||||
warnOnLocalhostDNS(*hostConfig, stderr)
|
||||
|
||||
var (
|
||||
trustedRef reference.Canonical
|
||||
namedRef reference.Named
|
||||
)
|
||||
|
||||
containerIDFile, err := newCIDFile(hostConfig.ContainerIDFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer containerIDFile.Close()
|
||||
|
||||
ref, err := reference.ParseAnyReference(config.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if named, ok := ref.(reference.Named); ok {
|
||||
namedRef = reference.TagNameOnly(named)
|
||||
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && !opts.Untrusted {
|
||||
var err error
|
||||
trustedRef, err = image.TrustedReference(ctx, dockerCli, taggedRef, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config.Image = reference.FamiliarString(trustedRef)
|
||||
}
|
||||
}
|
||||
|
||||
pullAndTagImage := func() error {
|
||||
pullOut := stderr
|
||||
if opts.Quiet {
|
||||
pullOut = io.Discard
|
||||
}
|
||||
if err := pullImage(ctx, dockerCli, config.Image, opts.Platform, pullOut); err != nil {
|
||||
return err
|
||||
}
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
|
||||
return image.TagTrusted(ctx, dockerCli, trustedRef, taggedRef)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var platform *specs.Platform
|
||||
// Engine API version 1.41 first introduced the option to specify platform on
|
||||
// create. It will produce an error if you try to set a platform on older API
|
||||
// versions, so check the API version here to maintain backwards
|
||||
// compatibility for CLI users.
|
||||
if opts.Platform != "" && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.41") {
|
||||
p, err := platforms.Parse(opts.Platform)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "error parsing specified platform")
|
||||
}
|
||||
platform = &p
|
||||
}
|
||||
|
||||
if opts.Pull == PullImageAlways {
|
||||
if err := pullAndTagImage(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
|
||||
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, opts.Name)
|
||||
if err != nil {
|
||||
// Pull image if it does not exist locally and we have the PullImageMissing option. Default behavior.
|
||||
if apiclient.IsErrNotFound(err) && namedRef != nil && opts.Pull == PullImageMissing {
|
||||
if !opts.Quiet {
|
||||
// we don't want to write to stdout anything apart from container.ID
|
||||
fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
|
||||
}
|
||||
|
||||
if err := pullAndTagImage(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var retryErr error
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, platform, opts.Name)
|
||||
if retryErr != nil {
|
||||
return nil, retryErr
|
||||
}
|
||||
} else {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, warning := range response.Warnings {
|
||||
fmt.Fprintf(stderr, "WARNING: %s\n", warning)
|
||||
}
|
||||
err = containerIDFile.Write(response.ID)
|
||||
return &response, err
|
||||
}
|
||||
|
||||
func warnOnOomKillDisable(hostConfig container.HostConfig, stderr io.Writer) {
|
||||
if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 {
|
||||
fmt.Fprintln(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.")
|
||||
}
|
||||
}
|
||||
|
||||
// check the DNS settings passed via --dns against localhost regexp to warn if
|
||||
// they are trying to set a DNS to a localhost address
|
||||
func warnOnLocalhostDNS(hostConfig container.HostConfig, stderr io.Writer) {
|
||||
for _, dnsIP := range hostConfig.DNS {
|
||||
if isLocalhost(dnsIP) {
|
||||
fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// IPLocalhost is a regex pattern for IPv4 or IPv6 loopback range.
|
||||
const ipLocalhost = `((127\.([0-9]{1,3}\.){2}[0-9]{1,3})|(::1)$)`
|
||||
|
||||
var localhostIPRegexp = regexp.MustCompile(ipLocalhost)
|
||||
|
||||
// IsLocalhost returns true if ip matches the localhost IP regular expression.
|
||||
// Used for determining if nameserver settings are being passed which are
|
||||
// localhost addresses
|
||||
func isLocalhost(ip string) bool {
|
||||
return localhostIPRegexp.MatchString(ip)
|
||||
}
|
||||
|
||||
func validatePullOpt(val string) error {
|
||||
switch val {
|
||||
case PullImageAlways, PullImageMissing, PullImageNever, "":
|
||||
// valid option, but nothing to do yet
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"invalid pull option: '%s': must be one of %q, %q or %q",
|
||||
val,
|
||||
PullImageAlways,
|
||||
PullImageMissing,
|
||||
PullImageNever,
|
||||
)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
@@ -1,110 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type RunOptions struct {
|
||||
createOptions
|
||||
Detach bool
|
||||
sigProxy bool
|
||||
detachKeys string
|
||||
}
|
||||
|
||||
func attachContainer(ctx context.Context, dockerCli command.Cli, errCh *chan error, config *container.Config, containerID string) (func(), error) {
|
||||
options := types.ContainerAttachOptions{
|
||||
Stream: true,
|
||||
Stdin: config.AttachStdin,
|
||||
Stdout: config.AttachStdout,
|
||||
Stderr: config.AttachStderr,
|
||||
DetachKeys: dockerCli.ConfigFile().DetachKeys,
|
||||
}
|
||||
|
||||
resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options)
|
||||
if errAttach != nil {
|
||||
return nil, fmt.Errorf("failed to attach to container: %s, err: %v", containerID, errAttach)
|
||||
}
|
||||
|
||||
var (
|
||||
out, cerr io.Writer
|
||||
in io.ReadCloser
|
||||
)
|
||||
if config.AttachStdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
if config.AttachStdout {
|
||||
out = dockerCli.Out()
|
||||
}
|
||||
if config.AttachStderr {
|
||||
if config.Tty {
|
||||
cerr = dockerCli.Out()
|
||||
} else {
|
||||
cerr = dockerCli.Err()
|
||||
}
|
||||
}
|
||||
|
||||
ch := make(chan error, 1)
|
||||
*errCh = ch
|
||||
|
||||
if in != nil && out != nil && cerr != nil {
|
||||
|
||||
}
|
||||
|
||||
go func() {
|
||||
ch <- func() error {
|
||||
streamer := hijackedIOStreamer{
|
||||
streams: dockerCli,
|
||||
inputStream: in,
|
||||
outputStream: out,
|
||||
errorStream: cerr,
|
||||
resp: resp,
|
||||
tty: config.Tty,
|
||||
detachKeys: options.DetachKeys,
|
||||
}
|
||||
|
||||
if errHijack := streamer.stream(ctx); errHijack != nil {
|
||||
return errHijack
|
||||
}
|
||||
return errAttach
|
||||
}()
|
||||
}()
|
||||
return resp.Close, nil
|
||||
}
|
||||
|
||||
// reportError is a utility method that prints a user-friendly message
|
||||
// containing the error that occurred during parsing and a suggestion to get help
|
||||
func reportError(stderr io.Writer, name string, str string, withHelp bool) {
|
||||
str = strings.TrimSuffix(str, ".") + "."
|
||||
if withHelp {
|
||||
str += "\nSee 'docker " + name + " --help'."
|
||||
}
|
||||
log.Error(str)
|
||||
_, _ = fmt.Fprintln(stderr, "docker:", str)
|
||||
}
|
||||
|
||||
// if container start fails with 'not found'/'no such' error, return 127
|
||||
// if container start fails with 'permission denied' error, return 126
|
||||
// return 125 for generic docker daemon failures
|
||||
func runStartContainerErr(err error) error {
|
||||
trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ")
|
||||
statusError := cli.StatusError{StatusCode: 125}
|
||||
if strings.Contains(trimmedErr, "executable file not found") ||
|
||||
strings.Contains(trimmedErr, "no such file or directory") ||
|
||||
strings.Contains(trimmedErr, "system cannot find the file specified") {
|
||||
statusError = cli.StatusError{StatusCode: 127}
|
||||
} else if strings.Contains(trimmedErr, syscall.EACCES.Error()) {
|
||||
statusError = cli.StatusError{StatusCode: 126}
|
||||
}
|
||||
|
||||
return statusError
|
||||
}
|
841
pkg/dev/main.go
841
pkg/dev/main.go
@@ -1,399 +1,39 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
containertypes "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/google/uuid"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
pkgerr "github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/polymorphichelpers"
|
||||
"k8s.io/kubectl/pkg/util/interrupt"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
"k8s.io/utils/pointer"
|
||||
util2 "k8s.io/kubectl/pkg/cmd/util"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/mesh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type ConnectMode string
|
||||
|
||||
const (
|
||||
ConnectModeContainer ConnectMode = "container"
|
||||
ConnectModeHost ConnectMode = "host"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Headers map[string]string
|
||||
Namespace string
|
||||
Workload string
|
||||
Factory cmdutil.Factory
|
||||
ContainerName string
|
||||
NoProxy bool
|
||||
ExtraRouteInfo handler.ExtraRouteInfo
|
||||
ConnectMode ConnectMode
|
||||
Engine config.Engine
|
||||
|
||||
// docker options
|
||||
DockerImage string
|
||||
Options RunOptions
|
||||
Copts *ContainerOptions
|
||||
|
||||
// inner
|
||||
Cli *client.Client
|
||||
DockerCli *command.DockerCli
|
||||
|
||||
// rollback
|
||||
rollbackFuncList []func() error
|
||||
}
|
||||
|
||||
func (d *Options) Main(ctx context.Context, tempContainerConfig *containerConfig) error {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
object, err := util.GetUnstructuredObject(d.Factory, d.Namespace, d.Workload)
|
||||
func DoDev(ctx context.Context, option *Options, conf *util.SshConfig, flags *pflag.FlagSet, f util2.Factory, transferImage bool) error {
|
||||
if p := option.Options.platform; p != "" {
|
||||
_, err := platforms.Parse(p)
|
||||
if err != nil {
|
||||
log.Errorf("get unstructured object error: %v", err)
|
||||
return err
|
||||
return fmt.Errorf("error parsing specified platform: %v", err)
|
||||
}
|
||||
|
||||
u := object.Object.(*unstructured.Unstructured)
|
||||
var templateSpec *v1.PodTemplateSpec
|
||||
//var path []string
|
||||
templateSpec, _, err = util.GetPodTemplateSpecPath(u)
|
||||
}
|
||||
client, cli, err := util.GetClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
set, err := d.Factory.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sortBy := func(pods []*v1.Pod) sort.Interface {
|
||||
for i := 0; i < len(pods); i++ {
|
||||
if pods[i].DeletionTimestamp != nil {
|
||||
pods = append(pods[:i], pods[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
return sort.Reverse(podutils.ActivePods(pods))
|
||||
}
|
||||
lab := labels.SelectorFromSet(templateSpec.Labels).String()
|
||||
firstPod, _, err := polymorphichelpers.GetFirstPod(set.CoreV1(), d.Namespace, lab, time.Second*60, sortBy)
|
||||
if err != nil {
|
||||
log.Errorf("get first running pod from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
pod := firstPod.Name
|
||||
|
||||
env, err := util.GetEnv(ctx, d.Factory, d.Namespace, pod)
|
||||
if err != nil {
|
||||
log.Errorf("get env from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
volume, err := GetVolume(ctx, d.Factory, d.Namespace, pod, d)
|
||||
if err != nil {
|
||||
log.Errorf("get volume from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
dns, err := GetDNS(ctx, d.Factory, d.Namespace, pod)
|
||||
if err != nil {
|
||||
log.Errorf("get dns from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
mesh.RemoveContainers(templateSpec)
|
||||
runConfigList := ConvertKubeResourceToContainer(d.Namespace, *templateSpec, env, volume, dns)
|
||||
err = mergeDockerOptions(runConfigList, d, tempContainerConfig)
|
||||
if err != nil {
|
||||
log.Errorf("can not fill docker options, err: %v", err)
|
||||
return err
|
||||
}
|
||||
// check resource
|
||||
var oom bool
|
||||
oom, _ = checkOutOfMemory(templateSpec, d.Cli)
|
||||
if oom {
|
||||
return fmt.Errorf("your pod resource request is bigger than docker-desktop resource, please adjust your docker-desktop resource")
|
||||
}
|
||||
mode := container.NetworkMode(d.Copts.netMode.NetworkMode())
|
||||
if len(d.Copts.netMode.Value()) != 0 {
|
||||
log.Infof("network mode is %s", d.Copts.netMode.NetworkMode())
|
||||
for _, runConfig := range runConfigList[:] {
|
||||
// remove expose port
|
||||
runConfig.config.ExposedPorts = nil
|
||||
runConfig.hostConfig.NetworkMode = mode
|
||||
mode := container.NetworkMode(option.Copts.netMode.NetworkMode())
|
||||
if mode.IsContainer() {
|
||||
runConfig.hostConfig.PidMode = containertypes.PidMode(d.Copts.netMode.NetworkMode())
|
||||
}
|
||||
runConfig.hostConfig.PortBindings = nil
|
||||
|
||||
// remove dns
|
||||
runConfig.hostConfig.DNS = nil
|
||||
runConfig.hostConfig.DNSOptions = nil
|
||||
runConfig.hostConfig.DNSSearch = nil
|
||||
runConfig.hostConfig.PublishAllPorts = false
|
||||
runConfig.config.Hostname = ""
|
||||
}
|
||||
} else {
|
||||
var networkID string
|
||||
networkID, err = createKubevpnNetwork(ctx, d.Cli)
|
||||
if err != nil {
|
||||
log.Errorf("create network for %s: %v", d.Workload, err)
|
||||
return err
|
||||
}
|
||||
log.Infof("create docker network %s", networkID)
|
||||
|
||||
runConfigList[len(runConfigList)-1].networkingConfig.EndpointsConfig[runConfigList[len(runConfigList)-1].containerName] = &network.EndpointSettings{
|
||||
NetworkID: networkID,
|
||||
}
|
||||
var portmap = nat.PortMap{}
|
||||
var portset = nat.PortSet{}
|
||||
for _, runConfig := range runConfigList {
|
||||
for k, v := range runConfig.hostConfig.PortBindings {
|
||||
if oldValue, ok := portmap[k]; ok {
|
||||
portmap[k] = append(oldValue, v...)
|
||||
} else {
|
||||
portmap[k] = v
|
||||
}
|
||||
}
|
||||
for k, v := range runConfig.config.ExposedPorts {
|
||||
portset[k] = v
|
||||
}
|
||||
}
|
||||
runConfigList[len(runConfigList)-1].hostConfig.PortBindings = portmap
|
||||
runConfigList[len(runConfigList)-1].config.ExposedPorts = portset
|
||||
|
||||
// skip last, use last container network
|
||||
for _, runConfig := range runConfigList[:len(runConfigList)-1] {
|
||||
// remove expose port
|
||||
runConfig.config.ExposedPorts = nil
|
||||
runConfig.hostConfig.NetworkMode = containertypes.NetworkMode("container:" + runConfigList[len(runConfigList)-1].containerName)
|
||||
runConfig.hostConfig.PidMode = containertypes.PidMode("container:" + runConfigList[len(runConfigList)-1].containerName)
|
||||
runConfig.hostConfig.PortBindings = nil
|
||||
|
||||
// remove dns
|
||||
runConfig.hostConfig.DNS = nil
|
||||
runConfig.hostConfig.DNSOptions = nil
|
||||
runConfig.hostConfig.DNSSearch = nil
|
||||
runConfig.hostConfig.PublishAllPorts = false
|
||||
runConfig.config.Hostname = ""
|
||||
}
|
||||
}
|
||||
|
||||
d.AddRollbackFunc(func() error {
|
||||
return runConfigList.Remove(ctx, d.Cli)
|
||||
})
|
||||
err = runConfigList.Run(ctx, volume, d.Cli, d.DockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return terminal(runConfigList[0].containerName, d.DockerCli)
|
||||
}
|
||||
|
||||
type ConfigList []*RunConfig
|
||||
|
||||
func (l ConfigList) Remove(ctx context.Context, cli *client.Client) error {
|
||||
var remove = false
|
||||
for _, runConfig := range l {
|
||||
if runConfig.hostConfig.AutoRemove {
|
||||
remove = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !remove {
|
||||
return nil
|
||||
}
|
||||
for _, runConfig := range l {
|
||||
err := cli.NetworkDisconnect(ctx, runConfig.containerName, runConfig.containerName, true)
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
}
|
||||
err = cli.ContainerRemove(ctx, runConfig.containerName, types.ContainerRemoveOptions{Force: true})
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
}
|
||||
}
|
||||
i, err := cli.NetworkInspect(ctx, config.ConfigMapPodTrafficManager, types.NetworkInspectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(i.Containers) == 0 {
|
||||
return cli.NetworkRemove(ctx, config.ConfigMapPodTrafficManager)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l ConfigList) Run(ctx context.Context, volume map[string][]mount.Mount, cli *client.Client, dockerCli *command.DockerCli) error {
|
||||
for index := len(l) - 1; index >= 0; index-- {
|
||||
runConfig := l[index]
|
||||
if index == 0 {
|
||||
_, err := runFirst(ctx, runConfig, cli, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
id, err := run(ctx, runConfig, cli, dockerCli)
|
||||
if err != nil {
|
||||
// try another way to startup container
|
||||
log.Infof("occur err: %v, try another way to startup container...", err)
|
||||
runConfig.hostConfig.Mounts = nil
|
||||
id, err = run(ctx, runConfig, cli, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = l.copyToContainer(ctx, volume[runConfig.k8sContainerName], cli, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l ConfigList) copyToContainer(ctx context.Context, volume []mount.Mount, cli *client.Client, id string) error {
|
||||
// copy volume into container
|
||||
for _, v := range volume {
|
||||
target, err := createFolder(ctx, cli, id, v.Source, v.Target)
|
||||
if err != nil {
|
||||
log.Debugf("create folder %s previoully failed, err: %v", target, err)
|
||||
}
|
||||
log.Debugf("from %s to %s", v.Source, v.Target)
|
||||
srcInfo, err := archive.CopyInfoSourcePath(v.Source, true)
|
||||
if err != nil {
|
||||
log.Errorf("copy info source path, err: %v", err)
|
||||
return err
|
||||
}
|
||||
srcArchive, err := archive.TarResource(srcInfo)
|
||||
if err != nil {
|
||||
log.Errorf("tar resource failed, err: %v", err)
|
||||
return err
|
||||
}
|
||||
dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, archive.CopyInfo{Path: v.Target})
|
||||
if err != nil {
|
||||
log.Errorf("can not prepare archive copy, err: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = cli.CopyToContainer(ctx, id, dstDir, preparedArchive, types.CopyToContainerOptions{
|
||||
AllowOverwriteDirWithFile: true,
|
||||
CopyUIDGID: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Infof("can not copy %s to container %s:%s, err: %v", v.Source, id, v.Target, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createFolder(ctx context.Context, cli *client.Client, id string, src string, target string) (string, error) {
|
||||
lstat, err := os.Lstat(src)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !lstat.IsDir() {
|
||||
target = filepath.Dir(target)
|
||||
}
|
||||
var create types.IDResponse
|
||||
create, err = cli.ContainerExecCreate(ctx, id, types.ExecConfig{
|
||||
AttachStdin: true,
|
||||
AttachStderr: true,
|
||||
AttachStdout: true,
|
||||
Cmd: []string{"mkdir", "-p", target},
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("create folder %s previoully failed, err: %v", target, err)
|
||||
return "", err
|
||||
}
|
||||
err = cli.ContainerExecStart(ctx, create.ID, types.ExecStartCheck{})
|
||||
if err != nil {
|
||||
log.Errorf("create folder %s previoully failed, err: %v", target, err)
|
||||
return "", err
|
||||
}
|
||||
log.Infof("wait create folder %s in container %s to be done...", target, id)
|
||||
chanStop := make(chan struct{})
|
||||
wait.Until(func() {
|
||||
inspect, err := cli.ContainerExecInspect(ctx, create.ID)
|
||||
if err != nil {
|
||||
log.Warningf("can not inspect container %s, err: %v", id, err)
|
||||
return
|
||||
}
|
||||
if !inspect.Running {
|
||||
close(chanStop)
|
||||
}
|
||||
}, time.Second, chanStop)
|
||||
return target, nil
|
||||
}
|
||||
|
||||
func checkOutOfMemory(spec *v1.PodTemplateSpec, cli *client.Client) (outOfMemory bool, err error) {
|
||||
var info types.Info
|
||||
info, err = cli.Info(context.Background())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
total := info.MemTotal
|
||||
var req int64
|
||||
for _, container := range spec.Spec.Containers {
|
||||
memory := container.Resources.Requests.Memory()
|
||||
if memory != nil {
|
||||
req += memory.Value()
|
||||
}
|
||||
}
|
||||
if req > total {
|
||||
outOfMemory = true
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func DoDev(ctx context.Context, devOption *Options, conf *util.SshConfig, flags *pflag.FlagSet, f cmdutil.Factory, transferImage bool) error {
|
||||
cli, dockerCli, err := util.GetClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mode := container.NetworkMode(devOption.Copts.netMode.NetworkMode())
|
||||
if mode.IsContainer() {
|
||||
log.Infof("network mode container is %s", mode.ConnectedContainer())
|
||||
logrus.Infof("network mode container is %s", mode.ConnectedContainer())
|
||||
var inspect types.ContainerJSON
|
||||
inspect, err = cli.ContainerInspect(ctx, mode.ConnectedContainer())
|
||||
inspect, err = client.ContainerInspect(ctx, mode.ConnectedContainer())
|
||||
if err != nil {
|
||||
log.Errorf("can not inspect container %s, err: %v", mode.ConnectedContainer(), err)
|
||||
logrus.Errorf("can not inspect container %s, err: %v", mode.ConnectedContainer(), err)
|
||||
return err
|
||||
}
|
||||
if inspect.State == nil {
|
||||
@@ -402,37 +42,35 @@ func DoDev(ctx context.Context, devOption *Options, conf *util.SshConfig, flags
|
||||
if !inspect.State.Running {
|
||||
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
|
||||
}
|
||||
log.Infof("container %s is running", mode.ConnectedContainer())
|
||||
logrus.Infof("container %s is running", mode.ConnectedContainer())
|
||||
} else if mode.IsDefault() && util.RunningInContainer() {
|
||||
var hostname string
|
||||
if hostname, err = os.Hostname(); err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("hostname is %s", hostname)
|
||||
err = devOption.Copts.netMode.Set(fmt.Sprintf("container:%s", hostname))
|
||||
logrus.Infof("hostname is %s", hostname)
|
||||
err = option.Copts.netMode.Set(fmt.Sprintf("container:%s", hostname))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// connect to cluster, in container or host
|
||||
cancel, err := devOption.doConnect(ctx, f, conf, transferImage)
|
||||
if err != nil {
|
||||
log.Errorf("connect to cluster failed, err: %v", err)
|
||||
return err
|
||||
}
|
||||
cancel, err := option.connect(ctx, f, conf, transferImage)
|
||||
defer func() {
|
||||
if cancel != nil {
|
||||
cancel()
|
||||
}
|
||||
}()
|
||||
|
||||
var tempContainerConfig *containerConfig
|
||||
err = validatePullOpt(devOption.Options.Pull)
|
||||
if err != nil {
|
||||
logrus.Errorf("connect to cluster failed, err: %v", err)
|
||||
return err
|
||||
}
|
||||
err = validatePullOpt(option.Options.pull)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
proxyConfig := dockerCli.ConfigFile().ParseProxyConfig(dockerCli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(devOption.Copts.env.GetAll()))
|
||||
proxyConfig := cli.ConfigFile().ParseProxyConfig(cli.Client().DaemonHost(), opts.ConvertKVStringsToMapWithNil(option.Copts.env.GetAll()))
|
||||
var newEnv []string
|
||||
for k, v := range proxyConfig {
|
||||
if v == nil {
|
||||
@@ -441,442 +79,17 @@ func DoDev(ctx context.Context, devOption *Options, conf *util.SshConfig, flags
|
||||
newEnv = append(newEnv, fmt.Sprintf("%s=%s", k, *v))
|
||||
}
|
||||
}
|
||||
devOption.Copts.env = *opts.NewListOptsRef(&newEnv, nil)
|
||||
tempContainerConfig, err = parse(flags, devOption.Copts, dockerCli.ServerInfo().OSType)
|
||||
option.Copts.env = *opts.NewListOptsRef(&newEnv, nil)
|
||||
var c *containerConfig
|
||||
c, err = parse(flags, option.Copts, cli.ServerInfo().OSType)
|
||||
// just in case the parse does not exit
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = validateAPIVersion(tempContainerConfig, dockerCli.Client().ClientVersion())
|
||||
err = validateAPIVersion(c, cli.Client().ClientVersion())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return devOption.Main(ctx, tempContainerConfig)
|
||||
}
|
||||
|
||||
// connect to cluster network on docker container or host
|
||||
func (d *Options) doConnect(ctx context.Context, f cmdutil.Factory, conf *util.SshConfig, transferImage bool) (cancel func(), err error) {
|
||||
connect := &handler.ConnectOptions{
|
||||
Headers: d.Headers,
|
||||
Workloads: []string{d.Workload},
|
||||
ExtraRouteInfo: d.ExtraRouteInfo,
|
||||
Engine: d.Engine,
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
}
|
||||
if err = connect.InitClient(f); err != nil {
|
||||
return
|
||||
}
|
||||
d.Namespace = connect.Namespace
|
||||
|
||||
if err = connect.PreCheckResource(); err != nil {
|
||||
return
|
||||
}
|
||||
if len(connect.Workloads) > 1 {
|
||||
return nil, fmt.Errorf("can only dev one workloads at same time, workloads: %v", connect.Workloads)
|
||||
}
|
||||
if len(connect.Workloads) < 1 {
|
||||
return nil, fmt.Errorf("you must provide resource to dev, workloads : %v is invaild", connect.Workloads)
|
||||
}
|
||||
d.Workload = connect.Workloads[0]
|
||||
|
||||
// if no-proxy is true, not needs to intercept traffic
|
||||
if d.NoProxy {
|
||||
if len(connect.Headers) != 0 {
|
||||
return nil, fmt.Errorf("not needs to provide headers if is no-proxy mode")
|
||||
}
|
||||
connect.Workloads = []string{}
|
||||
}
|
||||
|
||||
switch d.ConnectMode {
|
||||
case ConnectModeHost:
|
||||
daemonCli := daemon.GetClient(false)
|
||||
if daemonCli == nil {
|
||||
return nil, fmt.Errorf("get nil daemon client")
|
||||
}
|
||||
var kubeconfig []byte
|
||||
var ns string
|
||||
kubeconfig, ns, err = util.ConvertToKubeconfigBytes(f)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
logLevel := log.ErrorLevel
|
||||
if config.Debug {
|
||||
logLevel = log.DebugLevel
|
||||
}
|
||||
// not needs to ssh jump in daemon, because dev mode will hang up until user exit,
|
||||
// so just ssh jump in client is enough
|
||||
req := &rpc.ConnectRequest{
|
||||
KubeconfigBytes: string(kubeconfig),
|
||||
Namespace: ns,
|
||||
Headers: connect.Headers,
|
||||
Workloads: connect.Workloads,
|
||||
ExtraRoute: connect.ExtraRouteInfo.ToRPC(),
|
||||
UseLocalDNS: connect.UseLocalDNS,
|
||||
Engine: string(connect.Engine),
|
||||
OriginKubeconfigPath: util.GetKubeconfigPath(f),
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
Level: int32(logLevel),
|
||||
SshJump: conf.ToRPC(),
|
||||
}
|
||||
cancel = disconnect(ctx, daemonCli)
|
||||
var resp rpc.Daemon_ConnectClient
|
||||
resp, err = daemonCli.Proxy(ctx, req)
|
||||
if err != nil {
|
||||
log.Errorf("connect to cluster error: %s", err.Error())
|
||||
return
|
||||
}
|
||||
for {
|
||||
recv, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
return cancel, nil
|
||||
} else if err != nil {
|
||||
return cancel, err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, recv.Message)
|
||||
}
|
||||
|
||||
case ConnectModeContainer:
|
||||
var path = os.Getenv(config.EnvSSHJump)
|
||||
if path != "" {
|
||||
path, err = ConvertHost(path)
|
||||
} else {
|
||||
path, err = connect.GetKubeconfigPath()
|
||||
}
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var platform specs.Platform
|
||||
if d.Options.Platform != "" {
|
||||
platform, err = platforms.Parse(d.Options.Platform)
|
||||
if err != nil {
|
||||
return nil, pkgerr.Wrap(err, "error parsing specified platform")
|
||||
}
|
||||
}
|
||||
|
||||
var connectContainer *RunConfig
|
||||
connectContainer, err = createConnectContainer(d.NoProxy, *connect, path, d.Cli, &platform)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
cancelCtx, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
var id string
|
||||
log.Infof("starting container connect to cluster")
|
||||
id, err = run(cancelCtx, connectContainer, d.Cli, d.DockerCli)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
h := interrupt.New(
|
||||
func(signal os.Signal) { return },
|
||||
func() {
|
||||
cancelFunc()
|
||||
_ = d.Cli.ContainerKill(context.Background(), id, "SIGTERM")
|
||||
_ = runLogsSinceNow(d.DockerCli, id, true)
|
||||
},
|
||||
)
|
||||
go h.Run(func() error { select {} })
|
||||
d.AddRollbackFunc(func() error {
|
||||
h.Close()
|
||||
return nil
|
||||
})
|
||||
err = runLogsWaitRunning(cancelCtx, d.DockerCli, id)
|
||||
if err != nil {
|
||||
// interrupt by signal KILL
|
||||
if errors.Is(err, context.Canceled) {
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Infof("container connect to cluster successfully")
|
||||
err = d.Copts.netMode.Set(fmt.Sprintf("container:%s", id))
|
||||
return
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupport connect mode: %s", d.ConnectMode)
|
||||
}
|
||||
}
|
||||
|
||||
func disconnect(ctx context.Context, daemonClient rpc.DaemonClient) func() {
|
||||
return func() {
|
||||
resp, err := daemonClient.Disconnect(ctx, &rpc.DisconnectRequest{
|
||||
ID: pointer.Int32(0),
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("disconnect error: %v", err)
|
||||
return
|
||||
}
|
||||
for {
|
||||
msg, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
return
|
||||
} else if err != nil {
|
||||
log.Errorf("disconnect error: %v", err)
|
||||
return
|
||||
}
|
||||
fmt.Fprint(os.Stdout, msg.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createConnectContainer(noProxy bool, connect handler.ConnectOptions, path string, cli *client.Client, platform *specs.Platform) (*RunConfig, error) {
|
||||
var entrypoint []string
|
||||
if noProxy {
|
||||
entrypoint = []string{"kubevpn", "connect", "--foreground", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(connect.Engine)}
|
||||
for _, v := range connect.ExtraRouteInfo.ExtraCIDR {
|
||||
entrypoint = append(entrypoint, "--extra-cidr", v)
|
||||
}
|
||||
for _, v := range connect.ExtraRouteInfo.ExtraDomain {
|
||||
entrypoint = append(entrypoint, "--extra-domain", v)
|
||||
}
|
||||
if connect.ExtraRouteInfo.ExtraNodeIP {
|
||||
entrypoint = append(entrypoint, "--extra-node-ip")
|
||||
}
|
||||
} else {
|
||||
entrypoint = []string{"kubevpn", "proxy", connect.Workloads[0], "--foreground", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(connect.Engine)}
|
||||
for k, v := range connect.Headers {
|
||||
entrypoint = append(entrypoint, "--headers", fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
for _, v := range connect.ExtraRouteInfo.ExtraCIDR {
|
||||
entrypoint = append(entrypoint, "--extra-cidr", v)
|
||||
}
|
||||
for _, v := range connect.ExtraRouteInfo.ExtraDomain {
|
||||
entrypoint = append(entrypoint, "--extra-domain", v)
|
||||
}
|
||||
if connect.ExtraRouteInfo.ExtraNodeIP {
|
||||
entrypoint = append(entrypoint, "--extra-node-ip")
|
||||
}
|
||||
}
|
||||
|
||||
runConfig := &container.Config{
|
||||
User: "root",
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
ExposedPorts: nil,
|
||||
StdinOnce: false,
|
||||
Env: []string{fmt.Sprintf("%s=1", config.EnvStartSudoKubeVPNByKubeVPN)},
|
||||
Cmd: []string{},
|
||||
Healthcheck: nil,
|
||||
ArgsEscaped: false,
|
||||
Image: config.Image,
|
||||
Volumes: nil,
|
||||
Entrypoint: entrypoint,
|
||||
NetworkDisabled: false,
|
||||
MacAddress: "",
|
||||
OnBuild: nil,
|
||||
StopSignal: "",
|
||||
StopTimeout: nil,
|
||||
Shell: nil,
|
||||
}
|
||||
hostConfig := &container.HostConfig{
|
||||
Binds: []string{fmt.Sprintf("%s:%s", path, "/root/.kube/config")},
|
||||
LogConfig: container.LogConfig{},
|
||||
PortBindings: nil,
|
||||
RestartPolicy: container.RestartPolicy{},
|
||||
AutoRemove: false,
|
||||
VolumeDriver: "",
|
||||
VolumesFrom: nil,
|
||||
ConsoleSize: [2]uint{},
|
||||
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
|
||||
CgroupnsMode: "",
|
||||
// https://stackoverflow.com/questions/24319662/from-inside-of-a-docker-container-how-do-i-connect-to-the-localhost-of-the-mach
|
||||
// couldn't get current server API group list: Get "https://host.docker.internal:62844/api?timeout=32s": tls: failed to verify certificate: x509: certificate is valid for kubernetes.default.svc.cluster.local, kubernetes.default.svc, kubernetes.default, kubernetes, istio-sidecar-injector.istio-system.svc, proxy-exporter.kube-system.svc, not host.docker.internal
|
||||
ExtraHosts: []string{"host.docker.internal:host-gateway", "kubernetes:host-gateway"},
|
||||
GroupAdd: nil,
|
||||
IpcMode: "",
|
||||
Cgroup: "",
|
||||
Links: nil,
|
||||
OomScoreAdj: 0,
|
||||
PidMode: "",
|
||||
Privileged: true,
|
||||
PublishAllPorts: false,
|
||||
ReadonlyRootfs: false,
|
||||
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
|
||||
StorageOpt: nil,
|
||||
Tmpfs: nil,
|
||||
UTSMode: "",
|
||||
UsernsMode: "",
|
||||
ShmSize: 0,
|
||||
Sysctls: map[string]string{"net.ipv6.conf.all.disable_ipv6": strconv.Itoa(0)},
|
||||
Runtime: "",
|
||||
Isolation: "",
|
||||
Resources: container.Resources{},
|
||||
MaskedPaths: nil,
|
||||
ReadonlyPaths: nil,
|
||||
Init: nil,
|
||||
}
|
||||
var suffix string
|
||||
if newUUID, err := uuid.NewUUID(); err == nil {
|
||||
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
|
||||
}
|
||||
kubevpnNetwork, err := createKubevpnNetwork(context.Background(), cli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := fmt.Sprintf("%s_%s_%s", "kubevpn", "local", suffix)
|
||||
c := &RunConfig{
|
||||
config: runConfig,
|
||||
hostConfig: hostConfig,
|
||||
networkingConfig: &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{name: {
|
||||
NetworkID: kubevpnNetwork,
|
||||
}},
|
||||
},
|
||||
platform: platform,
|
||||
containerName: name,
|
||||
k8sContainerName: name,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func runLogsWaitRunning(ctx context.Context, dockerCli command.Cli, container string) error {
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
}
|
||||
logStream, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer logStream.Close()
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := io.MultiWriter(buf, dockerCli.Out())
|
||||
|
||||
cancel, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
|
||||
go func() {
|
||||
t := time.NewTicker(time.Second)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
// keyword, maybe can find another way more elegant
|
||||
if strings.Contains(buf.String(), "enjoy it") {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var errChan = make(chan error)
|
||||
go func() {
|
||||
var err error
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(w, logStream)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(w, dockerCli.Err(), logStream)
|
||||
}
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case err = <-errChan:
|
||||
return err
|
||||
case <-cancel.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func runLogsSinceNow(dockerCli command.Cli, container string, follow bool) error {
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := types.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Since: "0m",
|
||||
Follow: follow,
|
||||
}
|
||||
responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(dockerCli.Out(), responseBody)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func runKill(dockerCli command.Cli, containers ...string) error {
|
||||
var errs []string
|
||||
ctx := context.Background()
|
||||
errChan := parallelOperation(ctx, append([]string{}, containers...), func(ctx context.Context, container string) error {
|
||||
return dockerCli.Client().ContainerKill(ctx, container, "SIGTERM")
|
||||
})
|
||||
for _, name := range containers {
|
||||
if err := <-errChan; err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
} else {
|
||||
fmt.Fprintln(dockerCli.Out(), name)
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createKubevpnNetwork(ctx context.Context, cli *client.Client) (string, error) {
|
||||
by := map[string]string{"owner": config.ConfigMapPodTrafficManager}
|
||||
list, _ := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
for _, resource := range list {
|
||||
if reflect.DeepEqual(resource.Labels, by) {
|
||||
return resource.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
create, err := cli.NetworkCreate(ctx, config.ConfigMapPodTrafficManager, types.NetworkCreate{
|
||||
Driver: "bridge",
|
||||
Scope: "local",
|
||||
IPAM: &network.IPAM{
|
||||
Driver: "",
|
||||
Options: nil,
|
||||
Config: []network.IPAMConfig{
|
||||
{
|
||||
Subnet: config.DockerCIDR.String(),
|
||||
Gateway: config.DockerRouterIP.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
//Options: map[string]string{"--icc": "", "--ip-masq": ""},
|
||||
Labels: by,
|
||||
})
|
||||
if err != nil {
|
||||
if errdefs.IsForbidden(err) {
|
||||
list, _ = cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
for _, resource := range list {
|
||||
if reflect.DeepEqual(resource.Labels, by) {
|
||||
return resource.ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return create.ID, nil
|
||||
}
|
||||
|
||||
func (d *Options) AddRollbackFunc(f func() error) {
|
||||
d.rollbackFuncList = append(d.rollbackFuncList, f)
|
||||
}
|
||||
|
||||
func (d *Options) GetRollbackFuncList() []func() error {
|
||||
return d.rollbackFuncList
|
||||
return option.Main(ctx, c)
|
||||
}
|
||||
|
@@ -3,14 +3,12 @@ package dev
|
||||
import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
// 这里的逻辑是找到指定的容器。然后以传入的参数 tempContainerConfig 为准。即也就是用户命令行指定的参数为准。
|
||||
// 然后附加上 deployment 中原本的声明
|
||||
func mergeDockerOptions(r ConfigList, copts *Options, tempContainerConfig *containerConfig) error {
|
||||
func mergeDockerOptions(r ConfigList, copts *Options, tempContainerConfig *containerConfig) {
|
||||
if copts.ContainerName != "" {
|
||||
var index = -1
|
||||
for i, config := range r {
|
||||
@@ -28,19 +26,16 @@ func mergeDockerOptions(r ConfigList, copts *Options, tempContainerConfig *conta
|
||||
config.Options = copts.Options
|
||||
config.Copts = copts.Copts
|
||||
|
||||
if copts.DockerImage != "" {
|
||||
config.config.Image = copts.DockerImage
|
||||
if copts.DevImage != "" {
|
||||
config.config.Image = copts.DevImage
|
||||
}
|
||||
if copts.Options.Name != "" {
|
||||
config.containerName = copts.Options.Name
|
||||
if copts.Options.name != "" {
|
||||
config.containerName = copts.Options.name
|
||||
} else {
|
||||
config.Options.Name = config.containerName
|
||||
}
|
||||
if copts.Options.Platform != "" {
|
||||
p, err := platforms.Parse(copts.Options.Platform)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error parsing specified platform")
|
||||
config.Options.name = config.containerName
|
||||
}
|
||||
if copts.Options.platform != "" {
|
||||
p, _ := platforms.Parse(copts.Options.platform)
|
||||
config.platform = &p
|
||||
}
|
||||
|
||||
@@ -92,6 +87,4 @@ func mergeDockerOptions(r ConfigList, copts *Options, tempContainerConfig *conta
|
||||
}
|
||||
|
||||
config.config = c
|
||||
|
||||
return nil
|
||||
}
|
656
pkg/dev/options.go
Normal file
656
pkg/dev/options.go
Normal file
@@ -0,0 +1,656 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
typescontainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/google/uuid"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
pkgerr "github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/polymorphichelpers"
|
||||
"k8s.io/kubectl/pkg/util/interrupt"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/mesh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type ConnectMode string
|
||||
|
||||
const (
|
||||
ConnectModeContainer ConnectMode = "container"
|
||||
ConnectModeHost ConnectMode = "host"
|
||||
)
|
||||
|
||||
type Options struct {
|
||||
Headers map[string]string
|
||||
Namespace string
|
||||
Workload string
|
||||
Factory cmdutil.Factory
|
||||
ContainerName string
|
||||
NoProxy bool
|
||||
ExtraRouteInfo handler.ExtraRouteInfo
|
||||
ConnectMode ConnectMode
|
||||
Engine config.Engine
|
||||
|
||||
// docker options
|
||||
DevImage string
|
||||
Options runOptions
|
||||
Copts *containerOptions
|
||||
|
||||
// inner
|
||||
Cli *client.Client
|
||||
DockerCli *command.DockerCli
|
||||
|
||||
// rollback
|
||||
rollbackFuncList []func() error
|
||||
}
|
||||
|
||||
func (option *Options) Main(ctx context.Context, c *containerConfig) error {
|
||||
rand.NewSource(time.Now().UnixNano())
|
||||
object, err := util.GetUnstructuredObject(option.Factory, option.Namespace, option.Workload)
|
||||
if err != nil {
|
||||
log.Errorf("get unstructured object error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
u := object.Object.(*unstructured.Unstructured)
|
||||
var templateSpec *v1.PodTemplateSpec
|
||||
//var path []string
|
||||
templateSpec, _, err = util.GetPodTemplateSpecPath(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clientSet, err := option.Factory.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sortBy := func(pods []*v1.Pod) sort.Interface {
|
||||
for i := 0; i < len(pods); i++ {
|
||||
if pods[i].DeletionTimestamp != nil {
|
||||
pods = append(pods[:i], pods[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
return sort.Reverse(podutils.ActivePods(pods))
|
||||
}
|
||||
label := labels.SelectorFromSet(templateSpec.Labels).String()
|
||||
firstPod, _, err := polymorphichelpers.GetFirstPod(clientSet.CoreV1(), option.Namespace, label, time.Second*60, sortBy)
|
||||
if err != nil {
|
||||
log.Errorf("get first running pod from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
pod := firstPod.Name
|
||||
|
||||
env, err := util.GetEnv(ctx, option.Factory, option.Namespace, pod)
|
||||
if err != nil {
|
||||
log.Errorf("get env from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
volume, err := util.GetVolume(ctx, option.Factory, option.Namespace, pod)
|
||||
if err != nil {
|
||||
log.Errorf("get volume from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
option.AddRollbackFunc(func() error {
|
||||
return util.RemoveDir(volume)
|
||||
})
|
||||
dns, err := util.GetDNS(ctx, option.Factory, option.Namespace, pod)
|
||||
if err != nil {
|
||||
log.Errorf("get dns from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
mesh.RemoveContainers(templateSpec)
|
||||
list := convertKubeResourceToContainer(option.Namespace, *templateSpec, env, volume, dns)
|
||||
mergeDockerOptions(list, option, c)
|
||||
mode := container.NetworkMode(option.Copts.netMode.NetworkMode())
|
||||
if len(option.Copts.netMode.Value()) != 0 {
|
||||
log.Infof("network mode is %s", option.Copts.netMode.NetworkMode())
|
||||
for _, runConfig := range list[:] {
|
||||
// remove expose port
|
||||
runConfig.config.ExposedPorts = nil
|
||||
runConfig.hostConfig.NetworkMode = mode
|
||||
if mode.IsContainer() {
|
||||
runConfig.hostConfig.PidMode = typescontainer.PidMode(option.Copts.netMode.NetworkMode())
|
||||
}
|
||||
runConfig.hostConfig.PortBindings = nil
|
||||
|
||||
// remove dns
|
||||
runConfig.hostConfig.DNS = nil
|
||||
runConfig.hostConfig.DNSOptions = nil
|
||||
runConfig.hostConfig.DNSSearch = nil
|
||||
runConfig.hostConfig.PublishAllPorts = false
|
||||
runConfig.config.Hostname = ""
|
||||
}
|
||||
} else {
|
||||
var networkID string
|
||||
networkID, err = createKubevpnNetwork(ctx, option.Cli)
|
||||
if err != nil {
|
||||
log.Errorf("create network for %s: %v", option.Workload, err)
|
||||
return err
|
||||
}
|
||||
log.Infof("create docker network %s", networkID)
|
||||
|
||||
list[len(list)-1].networkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
|
||||
list[len(list)-1].containerName: {NetworkID: networkID},
|
||||
}
|
||||
var portMap = nat.PortMap{}
|
||||
var portSet = nat.PortSet{}
|
||||
for _, runConfig := range list {
|
||||
for k, v := range runConfig.hostConfig.PortBindings {
|
||||
if oldValue, ok := portMap[k]; ok {
|
||||
portMap[k] = append(oldValue, v...)
|
||||
} else {
|
||||
portMap[k] = v
|
||||
}
|
||||
}
|
||||
for k, v := range runConfig.config.ExposedPorts {
|
||||
portSet[k] = v
|
||||
}
|
||||
}
|
||||
list[len(list)-1].hostConfig.PortBindings = portMap
|
||||
list[len(list)-1].config.ExposedPorts = portSet
|
||||
|
||||
// skip last, use last container network
|
||||
for _, runConfig := range list[:len(list)-1] {
|
||||
// remove expose port
|
||||
runConfig.config.ExposedPorts = nil
|
||||
runConfig.hostConfig.NetworkMode = typescontainer.NetworkMode("container:" + list[len(list)-1].containerName)
|
||||
runConfig.hostConfig.PidMode = typescontainer.PidMode("container:" + list[len(list)-1].containerName)
|
||||
runConfig.hostConfig.PortBindings = nil
|
||||
|
||||
// remove dns
|
||||
runConfig.hostConfig.DNS = nil
|
||||
runConfig.hostConfig.DNSOptions = nil
|
||||
runConfig.hostConfig.DNSSearch = nil
|
||||
runConfig.hostConfig.PublishAllPorts = false
|
||||
runConfig.config.Hostname = ""
|
||||
}
|
||||
}
|
||||
|
||||
option.AddRollbackFunc(func() error {
|
||||
return list.Remove(ctx, option.Cli)
|
||||
})
|
||||
return list.Run(ctx, volume, option.Cli, option.DockerCli)
|
||||
}
|
||||
|
||||
// connect to cluster network on docker container or host
|
||||
func (option *Options) connect(ctx context.Context, f cmdutil.Factory, conf *util.SshConfig, transferImage bool) (func(), error) {
|
||||
connect := &handler.ConnectOptions{
|
||||
Headers: option.Headers,
|
||||
Workloads: []string{option.Workload},
|
||||
ExtraRouteInfo: option.ExtraRouteInfo,
|
||||
Engine: option.Engine,
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
}
|
||||
if err := connect.InitClient(f); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
option.Namespace = connect.Namespace
|
||||
|
||||
if err := connect.PreCheckResource(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(connect.Workloads) > 1 {
|
||||
return nil, fmt.Errorf("can only dev one workloads at same time, workloads: %v", connect.Workloads)
|
||||
}
|
||||
if len(connect.Workloads) < 1 {
|
||||
return nil, fmt.Errorf("you must provide resource to dev, workloads : %v is invaild", connect.Workloads)
|
||||
}
|
||||
option.Workload = connect.Workloads[0]
|
||||
|
||||
// if no-proxy is true, not needs to intercept traffic
|
||||
if option.NoProxy {
|
||||
if len(connect.Headers) != 0 {
|
||||
return nil, fmt.Errorf("not needs to provide headers if is no-proxy mode")
|
||||
}
|
||||
connect.Workloads = []string{}
|
||||
}
|
||||
|
||||
switch option.ConnectMode {
|
||||
case ConnectModeHost:
|
||||
daemonCli := daemon.GetClient(false)
|
||||
if daemonCli == nil {
|
||||
return nil, fmt.Errorf("get nil daemon client")
|
||||
}
|
||||
kubeConfigBytes, ns, err := util.ConvertToKubeConfigBytes(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
logLevel := log.ErrorLevel
|
||||
if config.Debug {
|
||||
logLevel = log.DebugLevel
|
||||
}
|
||||
// not needs to ssh jump in daemon, because dev mode will hang up until user exit,
|
||||
// so just ssh jump in client is enough
|
||||
req := &rpc.ConnectRequest{
|
||||
KubeconfigBytes: string(kubeConfigBytes),
|
||||
Namespace: ns,
|
||||
Headers: connect.Headers,
|
||||
Workloads: connect.Workloads,
|
||||
ExtraRoute: connect.ExtraRouteInfo.ToRPC(),
|
||||
UseLocalDNS: connect.UseLocalDNS,
|
||||
Engine: string(connect.Engine),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(f),
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
Level: int32(logLevel),
|
||||
SshJump: conf.ToRPC(),
|
||||
}
|
||||
cancel := disconnect(ctx, daemonCli, &rpc.DisconnectRequest{
|
||||
KubeconfigBytes: ptr.To(string(kubeConfigBytes)),
|
||||
Namespace: ptr.To(ns),
|
||||
SshJump: conf.ToRPC(),
|
||||
})
|
||||
var resp rpc.Daemon_ConnectClient
|
||||
resp, err = daemonCli.Proxy(ctx, req)
|
||||
if err != nil {
|
||||
log.Errorf("connect to cluster error: %s", err.Error())
|
||||
return cancel, err
|
||||
}
|
||||
for {
|
||||
response, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
return cancel, nil
|
||||
} else if err != nil {
|
||||
return cancel, err
|
||||
}
|
||||
fmt.Fprint(os.Stdout, response.Message)
|
||||
}
|
||||
|
||||
case ConnectModeContainer:
|
||||
var path = os.Getenv(config.EnvSSHJump)
|
||||
var err error
|
||||
if path != "" {
|
||||
path, err = util.ConvertK8sApiServerToDomain(path)
|
||||
} else {
|
||||
path, err = util.GetKubeconfigPath(connect.GetFactory())
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var platform specs.Platform
|
||||
if option.Options.platform != "" {
|
||||
platform, err = platforms.Parse(option.Options.platform)
|
||||
if err != nil {
|
||||
return nil, pkgerr.Wrap(err, "error parsing specified platform")
|
||||
}
|
||||
}
|
||||
|
||||
var connectContainer *RunConfig
|
||||
connectContainer, err = createConnectContainer(option.NoProxy, *connect, path, option.Cli, &platform)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cancelCtx, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
var id string
|
||||
log.Infof("starting container connect to cluster")
|
||||
id, err = run(cancelCtx, connectContainer, option.Cli, option.DockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
h := interrupt.New(
|
||||
func(signal os.Signal) { return },
|
||||
func() {
|
||||
cancelFunc()
|
||||
_ = option.Cli.ContainerKill(context.Background(), id, "SIGTERM")
|
||||
_ = runLogsSinceNow(option.DockerCli, id, true)
|
||||
},
|
||||
)
|
||||
go h.Run(func() error { select {} })
|
||||
option.AddRollbackFunc(func() error {
|
||||
h.Close()
|
||||
return nil
|
||||
})
|
||||
err = runLogsWaitRunning(cancelCtx, option.DockerCli, id)
|
||||
if err != nil {
|
||||
// interrupt by signal KILL
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
log.Infof("container connect to cluster successfully")
|
||||
err = option.Copts.netMode.Set(fmt.Sprintf("container:%s", id))
|
||||
return nil, err
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupport connect mode: %s", option.ConnectMode)
|
||||
}
|
||||
}
|
||||
|
||||
func disconnect(ctx context.Context, daemonClient rpc.DaemonClient, req *rpc.DisconnectRequest) func() {
|
||||
return func() {
|
||||
resp, err := daemonClient.Disconnect(ctx, req)
|
||||
if err != nil {
|
||||
log.Errorf("disconnect error: %v", err)
|
||||
return
|
||||
}
|
||||
for {
|
||||
msg, err := resp.Recv()
|
||||
if err == io.EOF {
|
||||
return
|
||||
} else if err != nil {
|
||||
log.Errorf("disconnect error: %v", err)
|
||||
return
|
||||
}
|
||||
fmt.Fprint(os.Stdout, msg.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func createConnectContainer(noProxy bool, connect handler.ConnectOptions, path string, cli *client.Client, platform *specs.Platform) (*RunConfig, error) {
|
||||
var entrypoint []string
|
||||
if noProxy {
|
||||
entrypoint = []string{"kubevpn", "connect", "--foreground", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(connect.Engine)}
|
||||
for _, v := range connect.ExtraRouteInfo.ExtraCIDR {
|
||||
entrypoint = append(entrypoint, "--extra-cidr", v)
|
||||
}
|
||||
for _, v := range connect.ExtraRouteInfo.ExtraDomain {
|
||||
entrypoint = append(entrypoint, "--extra-domain", v)
|
||||
}
|
||||
if connect.ExtraRouteInfo.ExtraNodeIP {
|
||||
entrypoint = append(entrypoint, "--extra-node-ip")
|
||||
}
|
||||
} else {
|
||||
entrypoint = []string{"kubevpn", "proxy", connect.Workloads[0], "--foreground", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image, "--engine", string(connect.Engine)}
|
||||
for k, v := range connect.Headers {
|
||||
entrypoint = append(entrypoint, "--headers", fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
for _, v := range connect.ExtraRouteInfo.ExtraCIDR {
|
||||
entrypoint = append(entrypoint, "--extra-cidr", v)
|
||||
}
|
||||
for _, v := range connect.ExtraRouteInfo.ExtraDomain {
|
||||
entrypoint = append(entrypoint, "--extra-domain", v)
|
||||
}
|
||||
if connect.ExtraRouteInfo.ExtraNodeIP {
|
||||
entrypoint = append(entrypoint, "--extra-node-ip")
|
||||
}
|
||||
}
|
||||
|
||||
runConfig := &container.Config{
|
||||
User: "root",
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
ExposedPorts: nil,
|
||||
StdinOnce: false,
|
||||
Env: []string{fmt.Sprintf("%s=1", config.EnvStartSudoKubeVPNByKubeVPN)},
|
||||
Cmd: []string{},
|
||||
Healthcheck: nil,
|
||||
ArgsEscaped: false,
|
||||
Image: config.Image,
|
||||
Volumes: nil,
|
||||
Entrypoint: entrypoint,
|
||||
NetworkDisabled: false,
|
||||
MacAddress: "",
|
||||
OnBuild: nil,
|
||||
StopSignal: "",
|
||||
StopTimeout: nil,
|
||||
Shell: nil,
|
||||
}
|
||||
hostConfig := &container.HostConfig{
|
||||
Binds: []string{fmt.Sprintf("%s:%s", path, "/root/.kube/config")},
|
||||
LogConfig: container.LogConfig{},
|
||||
PortBindings: nil,
|
||||
RestartPolicy: container.RestartPolicy{},
|
||||
AutoRemove: false,
|
||||
VolumeDriver: "",
|
||||
VolumesFrom: nil,
|
||||
ConsoleSize: [2]uint{},
|
||||
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
|
||||
CgroupnsMode: "",
|
||||
// https://stackoverflow.com/questions/24319662/from-inside-of-a-docker-container-how-do-i-connect-to-the-localhost-of-the-mach
|
||||
// couldn't get current server API group list: Get "https://host.docker.internal:62844/api?timeout=32s": tls: failed to verify certificate: x509: certificate is valid for kubernetes.default.svc.cluster.local, kubernetes.default.svc, kubernetes.default, kubernetes, istio-sidecar-injector.istio-system.svc, proxy-exporter.kube-system.svc, not host.docker.internal
|
||||
ExtraHosts: []string{"host.docker.internal:host-gateway", "kubernetes:host-gateway"},
|
||||
GroupAdd: nil,
|
||||
IpcMode: "",
|
||||
Cgroup: "",
|
||||
Links: nil,
|
||||
OomScoreAdj: 0,
|
||||
PidMode: "",
|
||||
Privileged: true,
|
||||
PublishAllPorts: false,
|
||||
ReadonlyRootfs: false,
|
||||
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
|
||||
StorageOpt: nil,
|
||||
Tmpfs: nil,
|
||||
UTSMode: "",
|
||||
UsernsMode: "",
|
||||
ShmSize: 0,
|
||||
Sysctls: map[string]string{"net.ipv6.conf.all.disable_ipv6": strconv.Itoa(0)},
|
||||
Runtime: "",
|
||||
Isolation: "",
|
||||
Resources: container.Resources{},
|
||||
MaskedPaths: nil,
|
||||
ReadonlyPaths: nil,
|
||||
Init: nil,
|
||||
}
|
||||
var suffix string
|
||||
if newUUID, err := uuid.NewUUID(); err == nil {
|
||||
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
|
||||
}
|
||||
networkID, err := createKubevpnNetwork(context.Background(), cli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
name := fmt.Sprintf("%s_%s_%s", "kubevpn", "local", suffix)
|
||||
c := &RunConfig{
|
||||
config: runConfig,
|
||||
hostConfig: hostConfig,
|
||||
networkingConfig: &network.NetworkingConfig{
|
||||
EndpointsConfig: map[string]*network.EndpointSettings{name: {
|
||||
NetworkID: networkID,
|
||||
}},
|
||||
},
|
||||
platform: platform,
|
||||
containerName: name,
|
||||
k8sContainerName: name,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func runLogsWaitRunning(ctx context.Context, dockerCli command.Cli, container string) error {
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := typescontainer.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
}
|
||||
logStream, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer logStream.Close()
|
||||
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := io.MultiWriter(buf, dockerCli.Out())
|
||||
|
||||
cancel, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
|
||||
go func() {
|
||||
t := time.NewTicker(time.Second)
|
||||
defer t.Stop()
|
||||
for range t.C {
|
||||
// keyword, maybe can find another way more elegant
|
||||
if strings.Contains(buf.String(), "dns service ok") {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
var errChan = make(chan error)
|
||||
go func() {
|
||||
var err error
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(w, logStream)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(w, dockerCli.Err(), logStream)
|
||||
}
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case err = <-errChan:
|
||||
return err
|
||||
case <-cancel.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func runLogsSinceNow(dockerCli command.Cli, container string, follow bool) error {
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, container)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := typescontainer.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Since: "0m",
|
||||
Follow: follow,
|
||||
}
|
||||
responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(dockerCli.Out(), responseBody)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func runKill(dockerCli command.Cli, containers ...string) error {
|
||||
var errs []string
|
||||
ctx := context.Background()
|
||||
errChan := parallelOperation(ctx, append([]string{}, containers...), func(ctx context.Context, container string) error {
|
||||
return dockerCli.Client().ContainerKill(ctx, container, "SIGTERM")
|
||||
})
|
||||
for _, name := range containers {
|
||||
if err := <-errChan; err != nil {
|
||||
errs = append(errs, err.Error())
|
||||
} else {
|
||||
fmt.Fprintln(dockerCli.Out(), name)
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return errors.New(strings.Join(errs, "\n"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createKubevpnNetwork(ctx context.Context, cli *client.Client) (string, error) {
|
||||
by := map[string]string{"owner": config.ConfigMapPodTrafficManager}
|
||||
list, _ := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
for _, resource := range list {
|
||||
if reflect.DeepEqual(resource.Labels, by) {
|
||||
return resource.ID, nil
|
||||
}
|
||||
}
|
||||
|
||||
create, err := cli.NetworkCreate(ctx, config.ConfigMapPodTrafficManager, types.NetworkCreate{
|
||||
Driver: "bridge",
|
||||
Scope: "local",
|
||||
IPAM: &network.IPAM{
|
||||
Driver: "",
|
||||
Options: nil,
|
||||
Config: []network.IPAMConfig{
|
||||
{
|
||||
Subnet: config.DockerCIDR.String(),
|
||||
Gateway: config.DockerRouterIP.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
//Options: map[string]string{"--icc": "", "--ip-masq": ""},
|
||||
Labels: by,
|
||||
})
|
||||
if err != nil {
|
||||
if errdefs.IsForbidden(err) {
|
||||
list, _ = cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
for _, resource := range list {
|
||||
if reflect.DeepEqual(resource.Labels, by) {
|
||||
return resource.ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return create.ID, nil
|
||||
}
|
||||
|
||||
func (option *Options) AddRollbackFunc(f func() error) {
|
||||
option.rollbackFuncList = append(option.rollbackFuncList, f)
|
||||
}
|
||||
|
||||
func (option *Options) GetRollbackFuncList() []func() error {
|
||||
return option.rollbackFuncList
|
||||
}
|
||||
|
||||
func AddDockerFlags(options *Options, p *pflag.FlagSet, cli *command.DockerCli) {
|
||||
p.SetInterspersed(false)
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
p.BoolVarP(&options.Options.detach, "detach", "d", false, "Run container in background and print container ID")
|
||||
p.StringVar(&options.Options.name, "name", "", "Assign a name to the container")
|
||||
p.StringVar(&options.Options.pull, "pull", PullImageMissing, `Pull image before running ("`+PullImageAlways+`"|"`+PullImageMissing+`"|"`+PullImageNever+`")`)
|
||||
p.BoolVarP(&options.Options.quiet, "quiet", "q", false, "Suppress the pull output")
|
||||
|
||||
// Add an explicit help that doesn't have a `-h` to prevent the conflict
|
||||
// with hostname
|
||||
p.Bool("help", false, "Print usage")
|
||||
|
||||
command.AddPlatformFlag(p, &options.Options.platform)
|
||||
command.AddTrustVerificationFlags(p, &options.Options.untrusted, cli.ContentTrustEnabled())
|
||||
|
||||
options.Copts = addFlags(p)
|
||||
}
|
304
pkg/dev/run.go
304
pkg/dev/run.go
@@ -1,304 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/container"
|
||||
"github.com/docker/docker/api/types"
|
||||
typescommand "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
apiclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/moby/term"
|
||||
"github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
func run(ctx context.Context, runConfig *RunConfig, cli *client.Client, c *command.DockerCli) (id string, err error) {
|
||||
rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var config = runConfig.config
|
||||
var hostConfig = runConfig.hostConfig
|
||||
var platform = runConfig.platform
|
||||
var networkConfig = runConfig.networkingConfig
|
||||
var name = runConfig.containerName
|
||||
|
||||
var needPull bool
|
||||
var img types.ImageInspect
|
||||
img, _, err = cli.ImageInspectWithRaw(ctx, config.Image)
|
||||
if errdefs.IsNotFound(err) {
|
||||
log.Infof("needs to pull image %s", config.Image)
|
||||
needPull = true
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
log.Errorf("image inspect failed: %v", err)
|
||||
return
|
||||
}
|
||||
if platform != nil && platform.Architecture != "" && platform.OS != "" {
|
||||
if img.Os != platform.OS || img.Architecture != platform.Architecture {
|
||||
needPull = true
|
||||
}
|
||||
}
|
||||
if needPull {
|
||||
err = util.PullImage(ctx, runConfig.platform, cli, c, config.Image, nil)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to pull image: %s, err: %s", config.Image, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var create typescommand.CreateResponse
|
||||
create, err = cli.ContainerCreate(ctx, config, hostConfig, networkConfig, platform, name)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create container: %s, err: %s", name, err)
|
||||
return
|
||||
}
|
||||
id = create.ID
|
||||
log.Infof("Created container: %s", name)
|
||||
defer func() {
|
||||
if err != nil && runConfig.hostConfig.AutoRemove {
|
||||
_ = cli.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
|
||||
}
|
||||
}()
|
||||
|
||||
err = cli.ContainerStart(ctx, create.ID, types.ContainerStartOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("failed to startup container %s: %v", name, err)
|
||||
return
|
||||
}
|
||||
log.Infof("Wait container %s to be running...", name)
|
||||
var inspect types.ContainerJSON
|
||||
cancel, cancelFunc := context.WithCancel(ctx)
|
||||
wait.UntilWithContext(cancel, func(ctx2 context.Context) {
|
||||
inspect, err = cli.ContainerInspect(ctx2, create.ID)
|
||||
if errdefs.IsNotFound(err) {
|
||||
cancelFunc()
|
||||
return
|
||||
} else if err != nil {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
if inspect.State != nil && (inspect.State.Status == "exited" || inspect.State.Status == "dead" || inspect.State.Dead) {
|
||||
cancelFunc()
|
||||
err = errors.New(fmt.Sprintf("container status: %s", inspect.State.Status))
|
||||
return
|
||||
}
|
||||
if inspect.State != nil && inspect.State.Running {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
}, time.Second)
|
||||
if err != nil {
|
||||
log.Errorf("failed to wait container to be ready: %v", err)
|
||||
_ = runLogsSinceNow(c, id, false)
|
||||
return
|
||||
}
|
||||
|
||||
// print port mapping to host
|
||||
var empty = true
|
||||
var str string
|
||||
if inspect.NetworkSettings != nil && inspect.NetworkSettings.Ports != nil {
|
||||
var list []string
|
||||
for port, bindings := range inspect.NetworkSettings.Ports {
|
||||
var p []string
|
||||
for _, binding := range bindings {
|
||||
if binding.HostPort != "" {
|
||||
p = append(p, binding.HostPort)
|
||||
empty = false
|
||||
}
|
||||
}
|
||||
list = append(list, fmt.Sprintf("%s:%s", port, strings.Join(p, ",")))
|
||||
}
|
||||
str = fmt.Sprintf("Container %s is running on port %s now", name, strings.Join(list, " "))
|
||||
}
|
||||
if !empty {
|
||||
log.Info(str)
|
||||
} else {
|
||||
log.Infof("Container %s is running now", name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func runFirst(ctx context.Context, runConfig *RunConfig, cli *apiclient.Client, dockerCli *command.DockerCli) (id string, err error) {
|
||||
rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
stdout, stderr := dockerCli.Out(), dockerCli.Err()
|
||||
client := dockerCli.Client()
|
||||
|
||||
runConfig.config.ArgsEscaped = false
|
||||
|
||||
if err := dockerCli.In().CheckTty(runConfig.config.AttachStdin, runConfig.config.Tty); err != nil {
|
||||
return id, err
|
||||
}
|
||||
|
||||
if !runConfig.Options.Detach {
|
||||
if err := dockerCli.In().CheckTty(runConfig.config.AttachStdin, runConfig.config.Tty); err != nil {
|
||||
return id, err
|
||||
}
|
||||
} else {
|
||||
if runConfig.Copts.attach.Len() != 0 {
|
||||
return id, errors.New("Conflicting options: -a and -d")
|
||||
}
|
||||
|
||||
runConfig.config.AttachStdin = false
|
||||
runConfig.config.AttachStdout = false
|
||||
runConfig.config.AttachStderr = false
|
||||
runConfig.config.StdinOnce = false
|
||||
}
|
||||
|
||||
ctx, cancelFun := context.WithCancel(context.Background())
|
||||
defer cancelFun()
|
||||
|
||||
createResponse, err := createContainer(ctx, dockerCli, &containerConfig{
|
||||
Config: runConfig.config,
|
||||
HostConfig: runConfig.hostConfig,
|
||||
NetworkingConfig: runConfig.networkingConfig,
|
||||
}, &runConfig.Options.createOptions)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create main container: %s", err)
|
||||
return "", err
|
||||
}
|
||||
log.Infof("Created main container: %s", runConfig.containerName)
|
||||
|
||||
var (
|
||||
waitDisplayID chan struct{}
|
||||
errCh chan error
|
||||
)
|
||||
if !runConfig.config.AttachStdout && !runConfig.config.AttachStderr {
|
||||
// Make this asynchronous to allow the client to write to stdin before having to read the ID
|
||||
waitDisplayID = make(chan struct{})
|
||||
go func() {
|
||||
defer close(waitDisplayID)
|
||||
fmt.Fprintln(stdout, createResponse.ID)
|
||||
}()
|
||||
}
|
||||
attach := runConfig.config.AttachStdin || runConfig.config.AttachStdout || runConfig.config.AttachStderr
|
||||
if attach {
|
||||
close, err := attachContainer(ctx, dockerCli, &errCh, runConfig.config, createResponse.ID)
|
||||
|
||||
if err != nil {
|
||||
return id, err
|
||||
}
|
||||
defer close()
|
||||
}
|
||||
statusChan := waitExitOrRemoved(ctx, dockerCli, createResponse.ID, runConfig.Copts.autoRemove)
|
||||
// start the container
|
||||
if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil {
|
||||
// If we have hijackedIOStreamer, we should notify
|
||||
// hijackedIOStreamer we are going to exit and wait
|
||||
// to avoid the terminal are not restored.
|
||||
if attach {
|
||||
cancelFun()
|
||||
<-errCh
|
||||
}
|
||||
|
||||
reportError(stderr, "run", err.Error(), false)
|
||||
if runConfig.Copts.autoRemove {
|
||||
// wait container to be removed
|
||||
<-statusChan
|
||||
}
|
||||
return id, runStartContainerErr(err)
|
||||
}
|
||||
|
||||
if (runConfig.config.AttachStdin || runConfig.config.AttachStdout || runConfig.config.AttachStderr) && runConfig.config.Tty && dockerCli.Out().IsTerminal() {
|
||||
if err := container.MonitorTtySize(ctx, dockerCli, createResponse.ID, false); err != nil {
|
||||
fmt.Fprintln(stderr, "Error monitoring TTY size:", err)
|
||||
}
|
||||
}
|
||||
|
||||
if errCh != nil {
|
||||
if err := <-errCh; err != nil {
|
||||
if _, ok := err.(term.EscapeError); ok {
|
||||
// The user entered the detach escape sequence.
|
||||
return id, nil
|
||||
}
|
||||
|
||||
logrus.Debugf("Error hijack: %s", err)
|
||||
return id, err
|
||||
}
|
||||
}
|
||||
|
||||
// Detached mode: wait for the id to be displayed and return.
|
||||
if !runConfig.config.AttachStdout && !runConfig.config.AttachStderr {
|
||||
// Detached mode
|
||||
<-waitDisplayID
|
||||
return id, nil
|
||||
}
|
||||
|
||||
status := <-statusChan
|
||||
if status != 0 {
|
||||
return id, errors.New(strconv.Itoa(status))
|
||||
}
|
||||
/*log.Infof("Wait container %s to be running...", runConfig.containerName)
|
||||
chanStop := make(chan struct{})
|
||||
var inspect types.ContainerJSON
|
||||
var once = &sync.Once{}
|
||||
wait.Until(func() {
|
||||
inspect, err = cli.ContainerInspect(ctx, createResponse.ID)
|
||||
if err != nil && errdefs.IsNotFound(err) {
|
||||
once.Do(func() { close(chanStop) })
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("Error inspect container: %s", err)
|
||||
return
|
||||
}
|
||||
if inspect.State != nil && (inspect.State.Status == "exited" || inspect.State.Status == "dead" || inspect.State.Dead) {
|
||||
once.Do(func() { close(chanStop) })
|
||||
err = errors.New(fmt.Sprintf("container status: %s", inspect.State.Status))
|
||||
return
|
||||
}
|
||||
if inspect.State != nil && inspect.State.Running {
|
||||
once.Do(func() { close(chanStop) })
|
||||
return
|
||||
}
|
||||
}, time.Second, chanStop)
|
||||
if err != nil {
|
||||
log.Errorf("wait container to be ready: %v", err)
|
||||
return
|
||||
}*/
|
||||
|
||||
// print port mapping to host
|
||||
//var empty = true
|
||||
//var str string
|
||||
//if inspect.NetworkSettings != nil && inspect.NetworkSettings.Ports != nil {
|
||||
// var list []string
|
||||
// for port, bindings := range inspect.NetworkSettings.Ports {
|
||||
// var p []string
|
||||
// for _, binding := range bindings {
|
||||
// if binding.HostPort != "" {
|
||||
// p = append(p, binding.HostPort)
|
||||
// empty = false
|
||||
// }
|
||||
// }
|
||||
// list = append(list, fmt.Sprintf("%s:%s", port, strings.Join(p, ",")))
|
||||
// }
|
||||
// str = fmt.Sprintf("Container %s is running on port %s now", runConfig.containerName, strings.Join(list, " "))
|
||||
//}
|
||||
//if !empty {
|
||||
// log.Infoln(str)
|
||||
//} else {
|
||||
// log.Infof("Container %s is running now", runConfig.containerName)
|
||||
//}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func terminal(c string, cli *command.DockerCli) error {
|
||||
options := container.NewExecOptions()
|
||||
options.Interactive = true
|
||||
options.TTY = true
|
||||
options.Container = c
|
||||
options.Command = []string{"sh", "-c", `command -v bash >/dev/null && exec bash || exec sh`}
|
||||
return container.RunExec(cli, options)
|
||||
}
|
318
pkg/dev/runconfig.go
Normal file
318
pkg/dev/runconfig.go
Normal file
@@ -0,0 +1,318 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
typescommand "github.com/docker/docker/api/types/container"
|
||||
typescontainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/google/uuid"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
v12 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type RunConfig struct {
|
||||
containerName string
|
||||
k8sContainerName string
|
||||
|
||||
config *typescontainer.Config
|
||||
hostConfig *typescontainer.HostConfig
|
||||
networkingConfig *network.NetworkingConfig
|
||||
platform *v1.Platform
|
||||
|
||||
Options runOptions
|
||||
Copts *containerOptions
|
||||
}
|
||||
|
||||
type ConfigList []*RunConfig
|
||||
|
||||
func (c ConfigList) Remove(ctx context.Context, cli *client.Client) error {
|
||||
var remove = false
|
||||
for _, runConfig := range c {
|
||||
if runConfig.hostConfig.AutoRemove {
|
||||
remove = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !remove {
|
||||
return nil
|
||||
}
|
||||
for _, runConfig := range c {
|
||||
err := cli.NetworkDisconnect(ctx, runConfig.containerName, runConfig.containerName, true)
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
}
|
||||
err = cli.ContainerRemove(ctx, runConfig.containerName, typescontainer.RemoveOptions{Force: true})
|
||||
if err != nil {
|
||||
log.Debug(err)
|
||||
}
|
||||
}
|
||||
inspect, err := cli.NetworkInspect(ctx, config.ConfigMapPodTrafficManager, types.NetworkInspectOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(inspect.Containers) == 0 {
|
||||
return cli.NetworkRemove(ctx, config.ConfigMapPodTrafficManager)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c ConfigList) Run(ctx context.Context, volume map[string][]mount.Mount, cli *client.Client, dockerCli *command.DockerCli) error {
|
||||
for index := len(c) - 1; index >= 0; index-- {
|
||||
runConfig := c[index]
|
||||
if index == 0 {
|
||||
err := runAndAttach(ctx, runConfig, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
id, err := run(ctx, runConfig, cli, dockerCli)
|
||||
if err != nil {
|
||||
// try another way to startup container
|
||||
log.Infof("occur err: %v, try to use copy to startup container...", err)
|
||||
runConfig.hostConfig.Mounts = nil
|
||||
id, err = run(ctx, runConfig, cli, dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.CopyVolumeIntoContainer(ctx, volume[runConfig.k8sContainerName], cli, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertKubeResourceToContainer(ns string, temp v12.PodTemplateSpec, envMap map[string][]string, mountVolume map[string][]mount.Mount, dnsConfig *dns.ClientConfig) (list ConfigList) {
|
||||
getHostname := func(containerName string) string {
|
||||
for _, envEntry := range envMap[containerName] {
|
||||
env := strings.Split(envEntry, "=")
|
||||
if len(env) == 2 && env[0] == "HOSTNAME" {
|
||||
return env[1]
|
||||
}
|
||||
}
|
||||
return temp.Spec.Hostname
|
||||
}
|
||||
|
||||
for _, c := range temp.Spec.Containers {
|
||||
containerConf := &typescontainer.Config{
|
||||
Hostname: getHostname(c.Name),
|
||||
Domainname: temp.Spec.Subdomain,
|
||||
User: "root",
|
||||
AttachStdin: false,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
ExposedPorts: nil,
|
||||
Tty: c.TTY,
|
||||
OpenStdin: c.Stdin,
|
||||
StdinOnce: false,
|
||||
Env: envMap[c.Name],
|
||||
Cmd: c.Args,
|
||||
Healthcheck: nil,
|
||||
ArgsEscaped: false,
|
||||
Image: c.Image,
|
||||
Volumes: nil,
|
||||
WorkingDir: c.WorkingDir,
|
||||
Entrypoint: c.Command,
|
||||
NetworkDisabled: false,
|
||||
OnBuild: nil,
|
||||
Labels: temp.Labels,
|
||||
StopSignal: "",
|
||||
StopTimeout: nil,
|
||||
Shell: nil,
|
||||
}
|
||||
if temp.DeletionGracePeriodSeconds != nil {
|
||||
containerConf.StopTimeout = (*int)(unsafe.Pointer(temp.DeletionGracePeriodSeconds))
|
||||
}
|
||||
hostConfig := &typescontainer.HostConfig{
|
||||
Binds: []string{},
|
||||
ContainerIDFile: "",
|
||||
LogConfig: typescontainer.LogConfig{},
|
||||
//NetworkMode: "",
|
||||
PortBindings: nil,
|
||||
RestartPolicy: typescontainer.RestartPolicy{},
|
||||
AutoRemove: false,
|
||||
VolumeDriver: "",
|
||||
VolumesFrom: nil,
|
||||
ConsoleSize: [2]uint{},
|
||||
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
|
||||
CgroupnsMode: "",
|
||||
DNS: dnsConfig.Servers,
|
||||
DNSOptions: []string{fmt.Sprintf("ndots=%d", dnsConfig.Ndots)},
|
||||
DNSSearch: dnsConfig.Search,
|
||||
ExtraHosts: nil,
|
||||
GroupAdd: nil,
|
||||
IpcMode: "",
|
||||
Cgroup: "",
|
||||
Links: nil,
|
||||
OomScoreAdj: 0,
|
||||
PidMode: "",
|
||||
Privileged: true,
|
||||
PublishAllPorts: false,
|
||||
ReadonlyRootfs: false,
|
||||
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
|
||||
StorageOpt: nil,
|
||||
Tmpfs: nil,
|
||||
UTSMode: "",
|
||||
UsernsMode: "",
|
||||
ShmSize: 0,
|
||||
Sysctls: nil,
|
||||
Runtime: "",
|
||||
Isolation: "",
|
||||
Resources: typescontainer.Resources{},
|
||||
Mounts: mountVolume[c.Name],
|
||||
MaskedPaths: nil,
|
||||
ReadonlyPaths: nil,
|
||||
Init: nil,
|
||||
}
|
||||
var portMap = nat.PortMap{}
|
||||
var portSet = nat.PortSet{}
|
||||
for _, port := range c.Ports {
|
||||
p := nat.Port(fmt.Sprintf("%d/%s", port.ContainerPort, strings.ToLower(string(port.Protocol))))
|
||||
if port.HostPort != 0 {
|
||||
binding := []nat.PortBinding{{HostPort: strconv.FormatInt(int64(port.HostPort), 10)}}
|
||||
portMap[p] = binding
|
||||
} else {
|
||||
binding := []nat.PortBinding{{HostPort: strconv.FormatInt(int64(port.ContainerPort), 10)}}
|
||||
portMap[p] = binding
|
||||
}
|
||||
portSet[p] = struct{}{}
|
||||
}
|
||||
hostConfig.PortBindings = portMap
|
||||
containerConf.ExposedPorts = portSet
|
||||
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
|
||||
hostConfig.CapAdd = append(hostConfig.CapAdd, *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Add))...)
|
||||
hostConfig.CapDrop = *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Drop))
|
||||
}
|
||||
var suffix string
|
||||
newUUID, err := uuid.NewUUID()
|
||||
if err == nil {
|
||||
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
|
||||
}
|
||||
|
||||
var r RunConfig
|
||||
r.containerName = fmt.Sprintf("%s_%s_%s_%s", c.Name, ns, "kubevpn", suffix)
|
||||
r.k8sContainerName = c.Name
|
||||
r.config = containerConf
|
||||
r.hostConfig = hostConfig
|
||||
r.networkingConfig = &network.NetworkingConfig{EndpointsConfig: make(map[string]*network.EndpointSettings)}
|
||||
r.platform = nil
|
||||
list = append(list, &r)
|
||||
}
|
||||
return list
|
||||
}
|
||||
|
||||
func run(ctx context.Context, runConfig *RunConfig, cli *client.Client, c *command.DockerCli) (id string, err error) {
|
||||
rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var config = runConfig.config
|
||||
var hostConfig = runConfig.hostConfig
|
||||
var platform = runConfig.platform
|
||||
var networkConfig = runConfig.networkingConfig
|
||||
var name = runConfig.containerName
|
||||
|
||||
var needPull bool
|
||||
var img types.ImageInspect
|
||||
img, _, err = cli.ImageInspectWithRaw(ctx, config.Image)
|
||||
if errdefs.IsNotFound(err) {
|
||||
log.Infof("needs to pull image %s", config.Image)
|
||||
needPull = true
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
log.Errorf("image inspect failed: %v", err)
|
||||
return
|
||||
}
|
||||
if platform != nil && platform.Architecture != "" && platform.OS != "" {
|
||||
if img.Os != platform.OS || img.Architecture != platform.Architecture {
|
||||
needPull = true
|
||||
}
|
||||
}
|
||||
if needPull {
|
||||
err = util.PullImage(ctx, runConfig.platform, cli, c, config.Image, nil)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to pull image: %s, err: %s", config.Image, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var create typescommand.CreateResponse
|
||||
create, err = cli.ContainerCreate(ctx, config, hostConfig, networkConfig, platform, name)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create container: %s, err: %s", name, err)
|
||||
return
|
||||
}
|
||||
id = create.ID
|
||||
log.Infof("Created container: %s", name)
|
||||
defer func() {
|
||||
if err != nil && runConfig.hostConfig.AutoRemove {
|
||||
_ = cli.ContainerRemove(ctx, id, typescontainer.RemoveOptions{Force: true})
|
||||
}
|
||||
}()
|
||||
|
||||
err = cli.ContainerStart(ctx, create.ID, typescontainer.StartOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("failed to startup container %s: %v", name, err)
|
||||
return
|
||||
}
|
||||
log.Infof("Wait container %s to be running...", name)
|
||||
var inspect types.ContainerJSON
|
||||
ctx2, cancelFunc := context.WithTimeout(ctx, time.Minute*5)
|
||||
wait.UntilWithContext(ctx2, func(ctx context.Context) {
|
||||
inspect, err = cli.ContainerInspect(ctx, create.ID)
|
||||
if errdefs.IsNotFound(err) {
|
||||
cancelFunc()
|
||||
return
|
||||
} else if err != nil {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
if inspect.State != nil && (inspect.State.Status == "exited" || inspect.State.Status == "dead" || inspect.State.Dead) {
|
||||
cancelFunc()
|
||||
err = errors.New(fmt.Sprintf("container status: %s", inspect.State.Status))
|
||||
return
|
||||
}
|
||||
if inspect.State != nil && inspect.State.Running {
|
||||
cancelFunc()
|
||||
return
|
||||
}
|
||||
}, time.Second)
|
||||
if err != nil {
|
||||
log.Errorf("failed to wait container to be ready: %v", err)
|
||||
_ = runLogsSinceNow(c, id, false)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Container %s is running now", name)
|
||||
return
|
||||
}
|
||||
|
||||
func runAndAttach(ctx context.Context, runConfig *RunConfig, cli *command.DockerCli) error {
|
||||
c := &containerConfig{
|
||||
Config: runConfig.config,
|
||||
HostConfig: runConfig.hostConfig,
|
||||
NetworkingConfig: runConfig.networkingConfig,
|
||||
}
|
||||
return runContainer(ctx, cli, &runConfig.Options, runConfig.Copts, c)
|
||||
}
|
249
pkg/dev/utils.go
249
pkg/dev/utils.go
@@ -1,249 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/clientcmd/api"
|
||||
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
|
||||
)
|
||||
|
||||
func waitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID string, waitRemove bool) <-chan int {
|
||||
if len(containerID) == 0 {
|
||||
// containerID can never be empty
|
||||
panic("Internal Error: waitExitOrRemoved needs a containerID as parameter")
|
||||
}
|
||||
|
||||
// Older versions used the Events API, and even older versions did not
|
||||
// support server-side removal. This legacyWaitExitOrRemoved method
|
||||
// preserves that old behavior and any issues it may have.
|
||||
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.30") {
|
||||
return legacyWaitExitOrRemoved(ctx, dockerCli, containerID, waitRemove)
|
||||
}
|
||||
|
||||
condition := container.WaitConditionNextExit
|
||||
if waitRemove {
|
||||
condition = container.WaitConditionRemoved
|
||||
}
|
||||
|
||||
resultC, errC := dockerCli.Client().ContainerWait(ctx, containerID, condition)
|
||||
|
||||
statusC := make(chan int)
|
||||
go func() {
|
||||
select {
|
||||
case result := <-resultC:
|
||||
if result.Error != nil {
|
||||
logrus.Errorf("Error waiting for container: %v", result.Error.Message)
|
||||
statusC <- 125
|
||||
} else {
|
||||
statusC <- int(result.StatusCode)
|
||||
}
|
||||
case err := <-errC:
|
||||
if err != nil && err.Error() != "" {
|
||||
logrus.Errorf("error waiting for container: %v", err)
|
||||
}
|
||||
statusC <- 125
|
||||
}
|
||||
}()
|
||||
|
||||
return statusC
|
||||
}
|
||||
|
||||
func legacyWaitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID string, waitRemove bool) <-chan int {
|
||||
var removeErr error
|
||||
statusChan := make(chan int)
|
||||
exitCode := 125
|
||||
|
||||
// Get events via Events API
|
||||
f := filters.NewArgs()
|
||||
f.Add("type", "container")
|
||||
f.Add("container", containerID)
|
||||
options := types.EventsOptions{
|
||||
Filters: f,
|
||||
}
|
||||
eventCtx, cancel := context.WithCancel(ctx)
|
||||
eventq, errq := dockerCli.Client().Events(eventCtx, options)
|
||||
|
||||
eventProcessor := func(e events.Message) bool {
|
||||
stopProcessing := false
|
||||
switch e.Status {
|
||||
case "die":
|
||||
if v, ok := e.Actor.Attributes["exitCode"]; ok {
|
||||
code, cerr := strconv.Atoi(v)
|
||||
if cerr != nil {
|
||||
logrus.Errorf("failed to convert exitcode '%q' to int: %v", v, cerr)
|
||||
} else {
|
||||
exitCode = code
|
||||
}
|
||||
}
|
||||
if !waitRemove {
|
||||
stopProcessing = true
|
||||
} else {
|
||||
// If we are talking to an older daemon, `AutoRemove` is not supported.
|
||||
// We need to fall back to the old behavior, which is client-side removal
|
||||
if versions.LessThan(dockerCli.Client().ClientVersion(), "1.25") {
|
||||
go func() {
|
||||
removeErr = dockerCli.Client().ContainerRemove(ctx, containerID, types.ContainerRemoveOptions{RemoveVolumes: true})
|
||||
if removeErr != nil {
|
||||
logrus.Errorf("error removing container: %v", removeErr)
|
||||
cancel() // cancel the event Q
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
case "detach":
|
||||
exitCode = 0
|
||||
stopProcessing = true
|
||||
case "destroy":
|
||||
stopProcessing = true
|
||||
}
|
||||
return stopProcessing
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
statusChan <- exitCode // must always send an exit code or the caller will block
|
||||
cancel()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-eventCtx.Done():
|
||||
if removeErr != nil {
|
||||
return
|
||||
}
|
||||
case evt := <-eventq:
|
||||
if eventProcessor(evt) {
|
||||
return
|
||||
}
|
||||
case err := <-errq:
|
||||
logrus.Errorf("error getting events from daemon: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return statusChan
|
||||
}
|
||||
|
||||
func parallelOperation(ctx context.Context, containers []string, op func(ctx context.Context, container string) error) chan error {
|
||||
if len(containers) == 0 {
|
||||
return nil
|
||||
}
|
||||
const defaultParallel int = 50
|
||||
sem := make(chan struct{}, defaultParallel)
|
||||
errChan := make(chan error)
|
||||
|
||||
// make sure result is printed in correct order
|
||||
output := map[string]chan error{}
|
||||
for _, c := range containers {
|
||||
output[c] = make(chan error, 1)
|
||||
}
|
||||
go func() {
|
||||
for _, c := range containers {
|
||||
err := <-output[c]
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for _, c := range containers {
|
||||
sem <- struct{}{} // Wait for active queue sem to drain.
|
||||
go func(container string) {
|
||||
output[container] <- op(ctx, container)
|
||||
<-sem
|
||||
}(c)
|
||||
}
|
||||
}()
|
||||
return errChan
|
||||
}
|
||||
|
||||
func ConvertHost(kubeconfigPath string) (newPath string, err error) {
|
||||
var kubeconfigBytes []byte
|
||||
kubeconfigBytes, err = os.ReadFile(kubeconfigPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var conf clientcmd.ClientConfig
|
||||
conf, err = clientcmd.NewClientConfigFromBytes(kubeconfigBytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var rawConfig api.Config
|
||||
rawConfig, err = conf.RawConfig()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = api.FlattenConfig(&rawConfig); err != nil {
|
||||
return
|
||||
}
|
||||
if rawConfig.Contexts == nil {
|
||||
err = errors.New("kubeconfig is invalid")
|
||||
return
|
||||
}
|
||||
kubeContext := rawConfig.Contexts[rawConfig.CurrentContext]
|
||||
if kubeContext == nil {
|
||||
err = errors.New("kubeconfig is invalid")
|
||||
return
|
||||
}
|
||||
cluster := rawConfig.Clusters[kubeContext.Cluster]
|
||||
if cluster == nil {
|
||||
err = errors.New("kubeconfig is invalid")
|
||||
return
|
||||
}
|
||||
var u *url.URL
|
||||
u, err = url.Parse(cluster.Server)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var remote netip.AddrPort
|
||||
remote, err = netip.ParseAddrPort(u.Host)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
host := fmt.Sprintf("%s://%s", u.Scheme, net.JoinHostPort("kubernetes", strconv.Itoa(int(remote.Port()))))
|
||||
rawConfig.Clusters[rawConfig.Contexts[rawConfig.CurrentContext].Cluster].Server = host
|
||||
rawConfig.SetGroupVersionKind(schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"})
|
||||
|
||||
var convertedObj runtime.Object
|
||||
convertedObj, err = clientcmdlatest.Scheme.ConvertToVersion(&rawConfig, clientcmdlatest.ExternalVersion)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var marshal []byte
|
||||
marshal, err = json.Marshal(convertedObj)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var temp *os.File
|
||||
temp, err = os.CreateTemp("", "*.kubeconfig")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = temp.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
err = os.WriteFile(temp.Name(), marshal, 0644)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newPath = temp.Name()
|
||||
return
|
||||
}
|
@@ -39,7 +39,7 @@ func NewDNSServer(network, address string, forwardDNS *miekgdns.ClientConfig) er
|
||||
return miekgdns.ListenAndServe(address, network, &server{
|
||||
dnsCache: cache.NewLRUExpireCache(1000),
|
||||
forwardDNS: forwardDNS,
|
||||
client: &miekgdns.Client{Net: "udp", SingleInflight: true, Timeout: time.Second * 30},
|
||||
client: &miekgdns.Client{Net: "udp", Timeout: time.Second * 30},
|
||||
fwdSem: semaphore.NewWeighted(maxConcurrent),
|
||||
logInverval: rate.Sometimes{Interval: logInterval},
|
||||
})
|
||||
|
@@ -9,7 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/google/uuid"
|
||||
log "github.com/sirupsen/logrus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
@@ -422,7 +422,6 @@ func (d *CloneOptions) setVolume(u *unstructured.Unstructured) error {
|
||||
|
||||
// remove serviceAccount info
|
||||
temp.Spec.ServiceAccountName = ""
|
||||
temp.Spec.DeprecatedServiceAccount = ""
|
||||
temp.Spec.AutomountServiceAccountToken = pointer.Bool(false)
|
||||
|
||||
var volumeMap = make(map[string]v1.Volume)
|
||||
|
@@ -2,7 +2,6 @@ package handler
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
@@ -18,7 +17,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containernetworking/cni/pkg/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/google/gopacket/routing"
|
||||
goversion "github.com/hashicorp/go-version"
|
||||
netroute "github.com/libp2p/go-netroute"
|
||||
@@ -31,27 +30,20 @@ import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
pkgruntime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
pkgtypes "k8s.io/apimachinery/pkg/types"
|
||||
utilnet "k8s.io/apimachinery/pkg/util/net"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v12 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/clientcmd/api/latest"
|
||||
clientcmdlatest "k8s.io/client-go/tools/clientcmd/api/latest"
|
||||
"k8s.io/client-go/util/retry"
|
||||
"k8s.io/kubectl/pkg/cmd/set"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/polymorphichelpers"
|
||||
"k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/utils/pointer"
|
||||
pkgclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/core"
|
||||
@@ -173,30 +165,6 @@ func (c *ConnectOptions) CreateRemoteInboundPod(ctx context.Context) (err error)
|
||||
return
|
||||
}
|
||||
|
||||
func Rollback(f cmdutil.Factory, ns, workload string) {
|
||||
r := f.NewBuilder().
|
||||
WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...).
|
||||
NamespaceParam(ns).DefaultNamespace().
|
||||
ResourceTypeOrNameArgs(true, workload).
|
||||
ContinueOnError().
|
||||
Latest().
|
||||
Flatten().
|
||||
Do()
|
||||
if r.Err() == nil {
|
||||
_ = r.Visit(func(info *resource.Info, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rollbacker, err := polymorphichelpers.RollbackerFn(f, info.ResourceMapping())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rollbacker.Rollback(info.Object, nil, 0, cmdutil.DryRunNone)
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) DoConnect(ctx context.Context, isLite bool) (err error) {
|
||||
c.ctx, c.cancel = context.WithCancel(ctx)
|
||||
|
||||
@@ -305,7 +273,7 @@ func (c *ConnectOptions) portForward(ctx context.Context, portPair []string) err
|
||||
}
|
||||
}}
|
||||
// try to detect pod is delete event, if pod is deleted, needs to redo port-forward
|
||||
go checkPodStatus(childCtx, cancelFunc, podName, podInterface)
|
||||
go util.CheckPodStatus(childCtx, cancelFunc, podName, podInterface)
|
||||
err = util.PortForwardPod(
|
||||
c.config,
|
||||
c.restclient,
|
||||
@@ -347,37 +315,6 @@ func (c *ConnectOptions) portForward(ctx context.Context, portPair []string) err
|
||||
}
|
||||
}
|
||||
|
||||
func checkPodStatus(cCtx context.Context, cFunc context.CancelFunc, podName string, podInterface v12.PodInterface) {
|
||||
w, err := podInterface.Watch(cCtx, metav1.ListOptions{
|
||||
FieldSelector: fields.OneTermEqualSelector("metadata.name", podName).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer w.Stop()
|
||||
for {
|
||||
select {
|
||||
case e, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
switch e.Type {
|
||||
case watch.Deleted:
|
||||
cFunc()
|
||||
return
|
||||
case watch.Error:
|
||||
return
|
||||
case watch.Added, watch.Modified, watch.Bookmark:
|
||||
// do nothing
|
||||
default:
|
||||
return
|
||||
}
|
||||
case <-cCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) startLocalTunServe(ctx context.Context, forwardAddress string, lite bool) (err error) {
|
||||
// todo figure it out why
|
||||
if util.IsWindows() {
|
||||
@@ -816,22 +753,8 @@ func (c *ConnectOptions) PreCheckResource() error {
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) GetRunningPodList(ctx context.Context) ([]v1.Pod, error) {
|
||||
list, err := c.clientset.CoreV1().Pods(c.Namespace).List(ctx, metav1.ListOptions{
|
||||
LabelSelector: fields.OneTermEqualSelector("app", config.ConfigMapPodTrafficManager).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < len(list.Items); i++ {
|
||||
if list.Items[i].GetDeletionTimestamp() != nil || !util.AllContainerIsRunning(&list.Items[i]) {
|
||||
list.Items = append(list.Items[:i], list.Items[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
if len(list.Items) == 0 {
|
||||
return nil, errors.New("can not found any running pod")
|
||||
}
|
||||
return list.Items, nil
|
||||
label := fields.OneTermEqualSelector("app", config.ConfigMapPodTrafficManager).String()
|
||||
return util.GetRunningPodList(ctx, c.clientset, c.Namespace, label)
|
||||
}
|
||||
|
||||
// getCIDR
|
||||
@@ -1012,7 +935,7 @@ RetryWithDNSClient:
|
||||
}()
|
||||
|
||||
// 4) query with dns client
|
||||
client := &miekgdns.Client{Net: "udp", Timeout: time.Second * 2, SingleInflight: true}
|
||||
client := &miekgdns.Client{Net: "udp", Timeout: time.Second * 2}
|
||||
for _, domain := range c.ExtraRouteInfo.ExtraDomain {
|
||||
var success = false
|
||||
for _, qType := range []uint16{miekgdns.TypeA /*, miekgdns.TypeAAAA*/} {
|
||||
@@ -1104,44 +1027,6 @@ func (c *ConnectOptions) addExtraNodeIP(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) GetKubeconfigPath() (string, error) {
|
||||
rawConfig, err := c.factory.ToRawKubeConfigLoader().RawConfig()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = api.FlattenConfig(&rawConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
rawConfig.SetGroupVersionKind(schema.GroupVersionKind{Version: clientcmdlatest.Version, Kind: "Config"})
|
||||
var convertedObj pkgruntime.Object
|
||||
convertedObj, err = latest.Scheme.ConvertToVersion(&rawConfig, latest.ExternalVersion)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var kubeconfigJsonBytes []byte
|
||||
kubeconfigJsonBytes, err = json.Marshal(convertedObj)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
temp, err := os.CreateTemp("", "*.kubeconfig")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
temp.Close()
|
||||
err = os.WriteFile(temp.Name(), kubeconfigJsonBytes, 0644)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = os.Chmod(temp.Name(), 0644)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return temp.Name(), nil
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) GetClientset() *kubernetes.Clientset {
|
||||
return c.clientset
|
||||
}
|
||||
@@ -1157,63 +1042,6 @@ func (c *ConnectOptions) GetLocalTunIPv4() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// update to newer image
|
||||
func (c *ConnectOptions) UpdateImage(ctx context.Context) error {
|
||||
deployment, err := c.clientset.AppsV1().Deployments(c.Namespace).Get(ctx, config.ConfigMapPodTrafficManager, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
origin := deployment.DeepCopy()
|
||||
newImg, err := reference.ParseNormalizedNamed(config.Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newTag, ok := newImg.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
oldImg, err := reference.ParseNormalizedNamed(deployment.Spec.Template.Spec.Containers[0].Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var oldTag reference.NamedTagged
|
||||
oldTag, ok = oldImg.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if reference.Domain(newImg) != reference.Domain(oldImg) {
|
||||
return nil
|
||||
}
|
||||
var oldVersion, newVersion *goversion.Version
|
||||
oldVersion, err = goversion.NewVersion(oldTag.Tag())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
newVersion, err = goversion.NewVersion(newTag.Tag())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if oldVersion.GreaterThanOrEqual(newVersion) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("found newer image %s, set image from %s to it...", config.Image, deployment.Spec.Template.Spec.Containers[0].Image)
|
||||
for i := range deployment.Spec.Template.Spec.Containers {
|
||||
deployment.Spec.Template.Spec.Containers[i].Image = config.Image
|
||||
}
|
||||
p := pkgclient.MergeFrom(deployment)
|
||||
data, err := pkgclient.MergeFrom(origin).Data(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = c.clientset.AppsV1().Deployments(c.Namespace).Patch(ctx, config.ConfigMapPodTrafficManager, p.Type(), data, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = util.RolloutStatus(ctx, c.factory, c.Namespace, fmt.Sprintf("deployments/%s", config.ConfigMapPodTrafficManager), time.Minute*60)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) setImage(ctx context.Context) error {
|
||||
deployment, err := c.clientset.AppsV1().Deployments(c.Namespace).Get(ctx, config.ConfigMapPodTrafficManager, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
@@ -1357,17 +1185,6 @@ func (c *ConnectOptions) GetTunDeviceName() (string, error) {
|
||||
return device.Name, nil
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) GetKubeconfigCluster() string {
|
||||
rawConfig, err := c.GetFactory().ToRawKubeConfigLoader().RawConfig()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if rawConfig.Contexts != nil && rawConfig.Contexts[rawConfig.CurrentContext] != nil {
|
||||
return rawConfig.Contexts[rawConfig.CurrentContext].Cluster
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (c *ConnectOptions) AddRolloutFunc(f func() error) {
|
||||
c.rollbackFuncList = append(c.rollbackFuncList, f)
|
||||
}
|
||||
|
@@ -373,7 +373,7 @@ func TestPatch(t *testing.T) {
|
||||
var p = corev1.Probe{
|
||||
ProbeHandler: corev1.ProbeHandler{HTTPGet: &corev1.HTTPGetAction{
|
||||
Path: "/health",
|
||||
Port: intstr.FromInt(9080),
|
||||
Port: intstr.FromInt32(9080),
|
||||
Scheme: "HTTP",
|
||||
}},
|
||||
}
|
||||
|
@@ -26,7 +26,7 @@ func RemoveContainers(spec *v1.PodTemplateSpec) {
|
||||
}
|
||||
}
|
||||
|
||||
// todo envoy support ipv6
|
||||
// AddMeshContainer todo envoy support ipv6
|
||||
func AddMeshContainer(spec *v1.PodTemplateSpec, nodeId string, c util.PodRouteConfig) {
|
||||
// remove envoy proxy containers if already exist
|
||||
RemoveContainers(spec)
|
||||
|
@@ -86,30 +86,39 @@ func (c *tunConn) Read(b []byte) (n int, err error) {
|
||||
bytes := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(bytes[:])
|
||||
|
||||
var size int
|
||||
size, err = c.ifce.Read(bytes[:], offset)
|
||||
if err != nil {
|
||||
var num int
|
||||
sizes := []int{1}
|
||||
num, err = c.ifce.Read([][]byte{bytes[:]}, sizes, offset)
|
||||
if err != nil || num == 0 {
|
||||
return 0, err
|
||||
}
|
||||
if size == 0 || size > device.MaxSegmentSize-device.MessageTransportHeaderSize {
|
||||
return 0, nil
|
||||
var size = sizes[0]
|
||||
if size == 0 {
|
||||
return 0, errors.New("tun packet is zero")
|
||||
}
|
||||
if size > device.MaxSegmentSize-device.MessageTransportHeaderSize {
|
||||
return 0, errors.New("tun packet is too large")
|
||||
}
|
||||
return copy(b, bytes[offset:offset+size]), nil
|
||||
}
|
||||
|
||||
func (c *tunConn) Write(b []byte) (n int, err error) {
|
||||
func (c *tunConn) Write(b []byte) (int, error) {
|
||||
if len(b) < device.MessageTransportHeaderSize {
|
||||
return 0, err
|
||||
return 0, errors.New("tun packet is too short")
|
||||
}
|
||||
bytes := config.LPool.Get().([]byte)[:]
|
||||
defer config.LPool.Put(bytes[:])
|
||||
|
||||
copy(bytes[device.MessageTransportOffsetContent:], b)
|
||||
|
||||
return c.ifce.Write(bytes[:device.MessageTransportOffsetContent+len(b)], device.MessageTransportOffsetContent)
|
||||
_, err := c.ifce.Write([][]byte{bytes[:device.MessageTransportOffsetContent+len(b)]}, device.MessageTransportOffsetContent)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (c *tunConn) Close() (err error) {
|
||||
func (c *tunConn) Close() error {
|
||||
return c.ifce.Close()
|
||||
}
|
||||
|
||||
|
@@ -152,12 +152,24 @@ func (c *winTunConn) Close() error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *winTunConn) Read(b []byte) (n int, err error) {
|
||||
return c.ifce.Read(b, 0)
|
||||
func (c *winTunConn) Read(b []byte) (int, error) {
|
||||
sizes := []int{1}
|
||||
num, err := c.ifce.Read([][]byte{b}, sizes, 0)
|
||||
if err != nil || num == 0 {
|
||||
return 0, err
|
||||
}
|
||||
return sizes[0], nil
|
||||
}
|
||||
|
||||
func (c *winTunConn) Write(b []byte) (n int, err error) {
|
||||
return c.ifce.Write(b, 0)
|
||||
func (c *winTunConn) Write(b []byte) (int, error) {
|
||||
n, err := c.ifce.Write([][]byte{b}, 0)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if n == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
return len(b), nil
|
||||
}
|
||||
|
||||
func (c *winTunConn) LocalAddr() net.Addr {
|
||||
|
@@ -11,6 +11,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/kubectl/pkg/cmd/util"
|
||||
)
|
||||
|
||||
func GetDNSServiceIPFromPod(clientset *kubernetes.Clientset, restclient *rest.RESTClient, config *rest.Config, podName, namespace string) (*dns.ClientConfig, error) {
|
||||
@@ -65,3 +66,29 @@ func GetDNSIPFromDnsPod(clientset *kubernetes.Clientset) (ips []string, err erro
|
||||
err = nil
|
||||
return
|
||||
}
|
||||
|
||||
func GetDNS(ctx context.Context, f util.Factory, ns, pod string) (*dns.ClientConfig, error) {
|
||||
clientSet, err := f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = clientSet.CoreV1().Pods(ns).Get(ctx, pod, v12.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config, err := f.ToRESTConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
client, err := f.RESTClient()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
clientConfig, err := GetDNSServiceIPFromPod(clientSet, client, config, pod, ns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return clientConfig, nil
|
||||
}
|
||||
|
@@ -6,61 +6,68 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/image"
|
||||
"github.com/docker/cli/cli/flags"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/cli/cli/trust"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
typesimage "github.com/docker/docker/api/types/image"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/moby/term"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/crypto/ssh"
|
||||
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/kubectl/pkg/cmd/util"
|
||||
client2 "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
func GetClient() (*client.Client, *command.DockerCli, error) {
|
||||
cli, err := client.NewClientWithOpts(
|
||||
client, err := client.NewClientWithOpts(
|
||||
client.FromEnv,
|
||||
client.WithAPIVersionNegotiation(),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can not create docker client from env, err: %v", err)
|
||||
}
|
||||
var dockerCli *command.DockerCli
|
||||
dockerCli, err = command.NewDockerCli(command.WithAPIClient(cli))
|
||||
var cli *command.DockerCli
|
||||
cli, err = command.NewDockerCli(command.WithAPIClient(client))
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can not create docker client from env, err: %v", err)
|
||||
}
|
||||
err = dockerCli.Initialize(flags.NewClientOptions())
|
||||
err = cli.Initialize(flags.NewClientOptions())
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("can not init docker client, err: %v", err)
|
||||
}
|
||||
return cli, dockerCli, nil
|
||||
return client, cli, nil
|
||||
}
|
||||
|
||||
// TransferImage
|
||||
// 1) if not special ssh config, just pull image and tag and push
|
||||
// 2) if special ssh config, pull image, tag image, save image and scp image to remote, load image and push
|
||||
func TransferImage(ctx context.Context, conf *SshConfig, imageSource, imageTarget string, out io.Writer) error {
|
||||
cli, dockerCmdCli, err := GetClient()
|
||||
client, cli, err := GetClient()
|
||||
if err != nil {
|
||||
log.Errorf("failed to get docker client: %v", err)
|
||||
return err
|
||||
}
|
||||
// todo add flags? or detect k8s node runtime ?
|
||||
platform := &v1.Platform{Architecture: "amd64", OS: "linux"}
|
||||
err = PullImage(ctx, platform, cli, dockerCmdCli, imageSource, out)
|
||||
err = PullImage(ctx, platform, client, cli, imageSource, out)
|
||||
if err != nil {
|
||||
log.Errorf("failed to pull image: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = cli.ImageTag(ctx, imageSource, imageTarget)
|
||||
err = client.ImageTag(ctx, imageSource, imageTarget)
|
||||
if err != nil {
|
||||
log.Errorf("failed to tag image %s to %s: %v", imageSource, imageTarget, err)
|
||||
return err
|
||||
@@ -75,20 +82,20 @@ func TransferImage(ctx context.Context, conf *SshConfig, imageSource, imageTarge
|
||||
return err
|
||||
}
|
||||
var imgRefAndAuth trust.ImageRefAndAuth
|
||||
imgRefAndAuth, err = trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(dockerCmdCli), distributionRef.String())
|
||||
imgRefAndAuth, err = trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(cli), distributionRef.String())
|
||||
if err != nil {
|
||||
log.Errorf("can not get image auth: %v", err)
|
||||
return err
|
||||
}
|
||||
var encodedAuth string
|
||||
encodedAuth, err = command.EncodeAuthToBase64(*imgRefAndAuth.AuthConfig())
|
||||
encodedAuth, err = registrytypes.EncodeAuthConfig(*imgRefAndAuth.AuthConfig())
|
||||
if err != nil {
|
||||
log.Errorf("can not encode auth config to base64: %v", err)
|
||||
return err
|
||||
}
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(dockerCmdCli, imgRefAndAuth.RepoInfo().Index, "push")
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(cli, imgRefAndAuth.RepoInfo().Index, "push")
|
||||
var readCloser io.ReadCloser
|
||||
readCloser, err = cli.ImagePush(ctx, imageTarget, types.ImagePushOptions{
|
||||
readCloser, err = client.ImagePush(ctx, imageTarget, types.ImagePushOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
PrivilegeFunc: requestPrivilege,
|
||||
})
|
||||
@@ -117,7 +124,7 @@ func TransferImage(ctx context.Context, conf *SshConfig, imageSource, imageTarge
|
||||
}
|
||||
defer sshClient.Close()
|
||||
var responseReader io.ReadCloser
|
||||
responseReader, err = cli.ImageSave(ctx, []string{imageTarget})
|
||||
responseReader, err = client.ImageSave(ctx, []string{imageTarget})
|
||||
if err != nil {
|
||||
log.Errorf("can not save image %s: %v", imageTarget, err)
|
||||
return err
|
||||
@@ -152,6 +159,7 @@ func TransferImage(ctx context.Context, conf *SshConfig, imageSource, imageTarge
|
||||
return nil
|
||||
}
|
||||
|
||||
// PullImage image.RunPull(ctx, c, image.PullOptions{})
|
||||
func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, c *command.DockerCli, img string, out io.Writer) error {
|
||||
var readCloser io.ReadCloser
|
||||
var plat string
|
||||
@@ -164,19 +172,19 @@ func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, c
|
||||
return err
|
||||
}
|
||||
var imgRefAndAuth trust.ImageRefAndAuth
|
||||
imgRefAndAuth, err = trust.GetImageReferencesAndAuth(ctx, nil, image.AuthResolver(c), distributionRef.String())
|
||||
imgRefAndAuth, err = trust.GetImageReferencesAndAuth(ctx, image.AuthResolver(c), distributionRef.String())
|
||||
if err != nil {
|
||||
log.Errorf("can not get image auth: %v", err)
|
||||
return err
|
||||
}
|
||||
var encodedAuth string
|
||||
encodedAuth, err = command.EncodeAuthToBase64(*imgRefAndAuth.AuthConfig())
|
||||
encodedAuth, err = registrytypes.EncodeAuthConfig(*imgRefAndAuth.AuthConfig())
|
||||
if err != nil {
|
||||
log.Errorf("can not encode auth config to base64: %v", err)
|
||||
return err
|
||||
}
|
||||
requestPrivilege := command.RegistryAuthenticationPrivilegedFunc(c, imgRefAndAuth.RepoInfo().Index, "pull")
|
||||
readCloser, err = cli.ImagePull(ctx, img, types.ImagePullOptions{
|
||||
readCloser, err = cli.ImagePull(ctx, img, typesimage.PullOptions{
|
||||
All: false,
|
||||
RegistryAuth: encodedAuth,
|
||||
PrivilegeFunc: requestPrivilege,
|
||||
@@ -198,3 +206,64 @@ func PullImage(ctx context.Context, platform *v1.Platform, cli *client.Client, c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateImage update to newer image
|
||||
func UpdateImage(ctx context.Context, factory util.Factory, ns string, deployName string, image string) error {
|
||||
clientSet, err2 := factory.KubernetesClientSet()
|
||||
if err2 != nil {
|
||||
return err2
|
||||
}
|
||||
deployment, err := clientSet.AppsV1().Deployments(ns).Get(ctx, deployName, v12.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
origin := deployment.DeepCopy()
|
||||
newImg, err := reference.ParseNormalizedNamed(image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newTag, ok := newImg.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
oldImg, err := reference.ParseNormalizedNamed(deployment.Spec.Template.Spec.Containers[0].Image)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var oldTag reference.NamedTagged
|
||||
oldTag, ok = oldImg.(reference.NamedTagged)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if reference.Domain(newImg) != reference.Domain(oldImg) {
|
||||
return nil
|
||||
}
|
||||
var oldVersion, newVersion *version.Version
|
||||
oldVersion, err = version.NewVersion(oldTag.Tag())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
newVersion, err = version.NewVersion(newTag.Tag())
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if oldVersion.GreaterThanOrEqual(newVersion) {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof("found newer image %s, set image from %s to it...", image, deployment.Spec.Template.Spec.Containers[0].Image)
|
||||
for i := range deployment.Spec.Template.Spec.Containers {
|
||||
deployment.Spec.Template.Spec.Containers[i].Image = image
|
||||
}
|
||||
p := client2.MergeFrom(deployment)
|
||||
data, err := client2.MergeFrom(origin).Data(deployment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = clientSet.AppsV1().Deployments(ns).Patch(ctx, deployName, p.Type(), data, v12.PatchOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = RolloutStatus(ctx, factory, ns, fmt.Sprintf("deployments/%s", deployName), time.Minute*60)
|
||||
return err
|
||||
}
|
||||
|
@@ -1,8 +1,24 @@
|
||||
package util
|
||||
|
||||
import cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/netip"
|
||||
"net/url"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
func GetKubeconfigPath(f cmdutil.Factory) string {
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/clientcmd/api/latest"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
)
|
||||
|
||||
func GetKubeConfigPath(f cmdutil.Factory) string {
|
||||
rawConfig := f.ToRawKubeConfigLoader()
|
||||
if rawConfig.ConfigAccess().IsExplicitFile() {
|
||||
return rawConfig.ConfigAccess().GetExplicitFile()
|
||||
@@ -10,3 +26,76 @@ func GetKubeconfigPath(f cmdutil.Factory) string {
|
||||
return rawConfig.ConfigAccess().GetDefaultFilename()
|
||||
}
|
||||
}
|
||||
|
||||
func ConvertK8sApiServerToDomain(kubeConfigPath string) (newPath string, err error) {
|
||||
var kubeConfigBytes []byte
|
||||
kubeConfigBytes, err = os.ReadFile(kubeConfigPath)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var config clientcmd.ClientConfig
|
||||
config, err = clientcmd.NewClientConfigFromBytes(kubeConfigBytes)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var rawConfig api.Config
|
||||
rawConfig, err = config.RawConfig()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = api.FlattenConfig(&rawConfig); err != nil {
|
||||
return
|
||||
}
|
||||
if rawConfig.Contexts == nil {
|
||||
err = errors.New("kubeconfig is invalid")
|
||||
return
|
||||
}
|
||||
kubeContext := rawConfig.Contexts[rawConfig.CurrentContext]
|
||||
if kubeContext == nil {
|
||||
err = errors.New("kubeconfig is invalid")
|
||||
return
|
||||
}
|
||||
cluster := rawConfig.Clusters[kubeContext.Cluster]
|
||||
if cluster == nil {
|
||||
err = errors.New("kubeconfig is invalid")
|
||||
return
|
||||
}
|
||||
var u *url.URL
|
||||
u, err = url.Parse(cluster.Server)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var remote netip.AddrPort
|
||||
remote, err = netip.ParseAddrPort(u.Host)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
host := fmt.Sprintf("%s://%s", u.Scheme, net.JoinHostPort("kubernetes", strconv.Itoa(int(remote.Port()))))
|
||||
rawConfig.Clusters[rawConfig.Contexts[rawConfig.CurrentContext].Cluster].Server = host
|
||||
rawConfig.SetGroupVersionKind(schema.GroupVersionKind{Version: latest.Version, Kind: "Config"})
|
||||
|
||||
var convertedObj runtime.Object
|
||||
convertedObj, err = latest.Scheme.ConvertToVersion(&rawConfig, latest.ExternalVersion)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var marshal []byte
|
||||
marshal, err = json.Marshal(convertedObj)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
var temp *os.File
|
||||
temp, err = os.CreateTemp("", "*.kubeconfig")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if err = temp.Close(); err != nil {
|
||||
return
|
||||
}
|
||||
err = os.WriteFile(temp.Name(), marshal, 0644)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
newPath = temp.Name()
|
||||
return
|
||||
}
|
||||
|
@@ -9,12 +9,17 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
v12 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
"k8s.io/client-go/tools/clientcmd/api"
|
||||
"k8s.io/client-go/tools/clientcmd/api/latest"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/utils/pointer"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
)
|
||||
@@ -44,7 +49,7 @@ func IsSameCluster(client v12.ConfigMapInterface, namespace string, clientB v12.
|
||||
return a.UID == b.UID, nil
|
||||
}
|
||||
|
||||
func ConvertToKubeconfigBytes(factory cmdutil.Factory) ([]byte, string, error) {
|
||||
func ConvertToKubeConfigBytes(factory cmdutil.Factory) ([]byte, string, error) {
|
||||
loader := factory.ToRawKubeConfigLoader()
|
||||
namespace, _, err := loader.Namespace()
|
||||
if err != nil {
|
||||
@@ -102,3 +107,92 @@ func ConvertToTempKubeconfigFile(kubeconfigBytes []byte) (string, error) {
|
||||
}
|
||||
return temp.Name(), nil
|
||||
}
|
||||
|
||||
func InitFactory(kubeconfigBytes string, ns string) cmdutil.Factory {
|
||||
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
|
||||
configFlags.WrapConfigFn = func(c *rest.Config) *rest.Config {
|
||||
if path, ok := os.LookupEnv(config.EnvSSHJump); ok {
|
||||
bytes, err := os.ReadFile(path)
|
||||
cmdutil.CheckErr(err)
|
||||
var conf *rest.Config
|
||||
conf, err = clientcmd.RESTConfigFromKubeConfig(bytes)
|
||||
cmdutil.CheckErr(err)
|
||||
return conf
|
||||
}
|
||||
return c
|
||||
}
|
||||
// todo optimize here
|
||||
temp, err := os.CreateTemp("", "*.json")
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
err = temp.Close()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
err = os.WriteFile(temp.Name(), []byte(kubeconfigBytes), os.ModePerm)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
configFlags.KubeConfig = pointer.String(temp.Name())
|
||||
configFlags.Namespace = pointer.String(ns)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
|
||||
return cmdutil.NewFactory(matchVersionFlags)
|
||||
}
|
||||
|
||||
func InitFactoryByPath(kubeconfig string, ns string) cmdutil.Factory {
|
||||
configFlags := genericclioptions.NewConfigFlags(true).WithDeprecatedPasswordFlag()
|
||||
configFlags.KubeConfig = pointer.String(kubeconfig)
|
||||
configFlags.Namespace = pointer.String(ns)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
|
||||
return cmdutil.NewFactory(matchVersionFlags)
|
||||
}
|
||||
|
||||
func GetKubeconfigCluster(f cmdutil.Factory) string {
|
||||
rawConfig, err := f.ToRawKubeConfigLoader().RawConfig()
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
if rawConfig.Contexts != nil && rawConfig.Contexts[rawConfig.CurrentContext] != nil {
|
||||
return rawConfig.Contexts[rawConfig.CurrentContext].Cluster
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func GetKubeconfigPath(factory cmdutil.Factory) (string, error) {
|
||||
rawConfig, err := factory.ToRawKubeConfigLoader().RawConfig()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = api.FlattenConfig(&rawConfig)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
rawConfig.SetGroupVersionKind(schema.GroupVersionKind{Version: latest.Version, Kind: "Config"})
|
||||
var convertedObj runtime.Object
|
||||
convertedObj, err = latest.Scheme.ConvertToVersion(&rawConfig, latest.ExternalVersion)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
var kubeconfigJsonBytes []byte
|
||||
kubeconfigJsonBytes, err = json.Marshal(convertedObj)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
temp, err := os.CreateTemp("", "*.kubeconfig")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
temp.Close()
|
||||
err = os.WriteFile(temp.Name(), kubeconfigJsonBytes, 0644)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
err = os.Chmod(temp.Name(), 0644)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return temp.Name(), nil
|
||||
}
|
||||
|
92
pkg/util/ns_test.go
Normal file
92
pkg/util/ns_test.go
Normal file
@@ -0,0 +1,92 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/utils/ptr"
|
||||
)
|
||||
|
||||
func TestMergeRawConfig(t *testing.T) {
|
||||
var kubeConfigBytes = `
|
||||
apiVersion: v1
|
||||
kind: Config
|
||||
clusters:
|
||||
- cluster:
|
||||
server: http://localhost:8001
|
||||
name: localhost
|
||||
- cluster:
|
||||
server: http://localhost:8002
|
||||
name: localhost2
|
||||
contexts:
|
||||
- name: localhost
|
||||
context:
|
||||
cluster: localhost
|
||||
namespace: test
|
||||
- name: localhost2
|
||||
context:
|
||||
cluster: localhost2
|
||||
namespace: test2
|
||||
current-context: localhost`
|
||||
|
||||
temp, err := os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
temp.Close()
|
||||
t.Cleanup(func() {
|
||||
_ = os.Remove(temp.Name())
|
||||
})
|
||||
err = os.WriteFile(temp.Name(), []byte(kubeConfigBytes), 0644)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
testData := []struct {
|
||||
Context string
|
||||
Namespace string
|
||||
ApiServer string
|
||||
}{
|
||||
{
|
||||
Context: "localhost2",
|
||||
ApiServer: "http://localhost:8002",
|
||||
Namespace: "test2",
|
||||
},
|
||||
{
|
||||
Context: "localhost",
|
||||
ApiServer: "http://localhost:8001",
|
||||
Namespace: "test",
|
||||
},
|
||||
{
|
||||
Context: "",
|
||||
ApiServer: "http://localhost:8001",
|
||||
Namespace: "test",
|
||||
},
|
||||
}
|
||||
|
||||
for _, data := range testData {
|
||||
configFlags := genericclioptions.NewConfigFlags(false)
|
||||
configFlags.KubeConfig = ptr.To(temp.Name())
|
||||
configFlags.Context = ptr.To(data.Context)
|
||||
matchVersionFlags := cmdutil.NewMatchVersionFlags(configFlags)
|
||||
factory := cmdutil.NewFactory(matchVersionFlags)
|
||||
var bytes []byte
|
||||
var ns string
|
||||
bytes, ns, err = ConvertToKubeConfigBytes(factory)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if ns != data.Namespace {
|
||||
t.Fatalf("not equal")
|
||||
}
|
||||
newFactory := InitFactory(string(bytes), ns)
|
||||
config, err := newFactory.ToRESTConfig()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if config.Host != data.ApiServer {
|
||||
t.Fatalf("not equal")
|
||||
}
|
||||
}
|
||||
}
|
@@ -23,7 +23,8 @@ import (
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/cli-runtime/pkg/genericiooptions"
|
||||
"k8s.io/cli-runtime/pkg/resource"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
@@ -34,6 +35,8 @@ import (
|
||||
"k8s.io/client-go/transport/spdy"
|
||||
"k8s.io/kubectl/pkg/cmd/exec"
|
||||
"k8s.io/kubectl/pkg/cmd/util"
|
||||
"k8s.io/kubectl/pkg/polymorphichelpers"
|
||||
scheme2 "k8s.io/kubectl/pkg/scheme"
|
||||
"k8s.io/kubectl/pkg/util/podutils"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
@@ -253,7 +256,7 @@ func Shell(clientset *kubernetes.Clientset, restclient *rest.RESTClient, config
|
||||
Namespace: namespace,
|
||||
PodName: podName,
|
||||
ContainerName: containerName,
|
||||
IOStreams: genericclioptions.IOStreams{In: stdin, Out: stdout, ErrOut: nil},
|
||||
IOStreams: genericiooptions.IOStreams{In: stdin, Out: stdout, ErrOut: nil},
|
||||
}
|
||||
Executor := &exec.DefaultRemoteExecutor{}
|
||||
// ensure we can recover the terminal while attached
|
||||
@@ -371,3 +374,77 @@ func FindContainerByName(pod *corev1.Pod, name string) (*corev1.Container, int)
|
||||
}
|
||||
return nil, -1
|
||||
}
|
||||
|
||||
func CheckPodStatus(cCtx context.Context, cFunc context.CancelFunc, podName string, podInterface v12.PodInterface) {
|
||||
w, err := podInterface.Watch(cCtx, v1.ListOptions{
|
||||
FieldSelector: fields.OneTermEqualSelector("metadata.name", podName).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer w.Stop()
|
||||
for {
|
||||
select {
|
||||
case e, ok := <-w.ResultChan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
switch e.Type {
|
||||
case watch.Deleted:
|
||||
cFunc()
|
||||
return
|
||||
case watch.Error:
|
||||
return
|
||||
case watch.Added, watch.Modified, watch.Bookmark:
|
||||
// do nothing
|
||||
default:
|
||||
return
|
||||
}
|
||||
case <-cCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Rollback(f util.Factory, ns, workload string) {
|
||||
r := f.NewBuilder().
|
||||
WithScheme(scheme2.Scheme, scheme2.Scheme.PrioritizedVersionsAllGroups()...).
|
||||
NamespaceParam(ns).DefaultNamespace().
|
||||
ResourceTypeOrNameArgs(true, workload).
|
||||
ContinueOnError().
|
||||
Latest().
|
||||
Flatten().
|
||||
Do()
|
||||
if r.Err() == nil {
|
||||
_ = r.Visit(func(info *resource.Info, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rollbacker, err := polymorphichelpers.RollbackerFn(f, info.ResourceMapping())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = rollbacker.Rollback(info.Object, nil, 0, util.DryRunNone)
|
||||
return err
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func GetRunningPodList(ctx context.Context, clientset *kubernetes.Clientset, ns string, labelSelector string) ([]corev1.Pod, error) {
|
||||
list, err := clientset.CoreV1().Pods(ns).List(ctx, v1.ListOptions{
|
||||
LabelSelector: labelSelector,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := 0; i < len(list.Items); i++ {
|
||||
if list.Items[i].GetDeletionTimestamp() != nil || !AllContainerIsRunning(&list.Items[i]) {
|
||||
list.Items = append(list.Items[:i], list.Items[i+1:]...)
|
||||
i--
|
||||
}
|
||||
}
|
||||
if len(list.Items) == 0 {
|
||||
return nil, errors.New("can not found any running pod")
|
||||
}
|
||||
return list.Items, nil
|
||||
}
|
||||
|
173
pkg/util/volume.go
Normal file
173
pkg/util/volume.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/moby/term"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/api/core/v1"
|
||||
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/cli-runtime/pkg/genericiooptions"
|
||||
"k8s.io/kubectl/pkg/cmd/util"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/cp"
|
||||
)
|
||||
|
||||
// GetVolume key format: [container name]-[volume mount name]
|
||||
func GetVolume(ctx context.Context, f util.Factory, ns, podName string) (map[string][]mount.Mount, error) {
|
||||
clientSet, err := f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var pod *v1.Pod
|
||||
pod, err = clientSet.CoreV1().Pods(ns).Get(ctx, podName, v12.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := map[string][]mount.Mount{}
|
||||
for _, container := range pod.Spec.Containers {
|
||||
// if container name is vpn or envoy-proxy, not need to download volume
|
||||
if container.Name == config.ContainerSidecarVPN || container.Name == config.ContainerSidecarEnvoyProxy {
|
||||
continue
|
||||
}
|
||||
var m []mount.Mount
|
||||
for _, volumeMount := range container.VolumeMounts {
|
||||
if volumeMount.MountPath == "/tmp" {
|
||||
continue
|
||||
}
|
||||
localPath := filepath.Join(os.TempDir(), strconv.Itoa(rand.Int()))
|
||||
err = os.MkdirAll(localPath, 0755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if volumeMount.SubPath != "" {
|
||||
localPath = filepath.Join(localPath, volumeMount.SubPath)
|
||||
}
|
||||
// pod-namespace/pod-name:path
|
||||
remotePath := fmt.Sprintf("%s/%s:%s", ns, podName, volumeMount.MountPath)
|
||||
stdIn, stdOut, stdErr := term.StdStreams()
|
||||
copyOptions := cp.NewCopyOptions(genericiooptions.IOStreams{In: stdIn, Out: stdOut, ErrOut: stdErr})
|
||||
copyOptions.Container = container.Name
|
||||
copyOptions.MaxTries = 10
|
||||
err = copyOptions.Complete(f, &cobra.Command{}, []string{remotePath, localPath})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = copyOptions.Run()
|
||||
if err != nil {
|
||||
_, _ = fmt.Fprintf(os.Stderr, "failed to download volume %s path %s to %s, err: %v, ignore...\n", volumeMount.Name, remotePath, localPath, err)
|
||||
continue
|
||||
}
|
||||
m = append(m, mount.Mount{
|
||||
Type: mount.TypeBind,
|
||||
Source: localPath,
|
||||
Target: volumeMount.MountPath,
|
||||
})
|
||||
logrus.Infof("%s:%s", localPath, volumeMount.MountPath)
|
||||
}
|
||||
result[container.Name] = m
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func RemoveDir(volume map[string][]mount.Mount) error {
|
||||
var errs []error
|
||||
for _, mounts := range volume {
|
||||
for _, m := range mounts {
|
||||
err := os.RemoveAll(m.Source)
|
||||
if err != nil {
|
||||
errs = append(errs, fmt.Errorf("failed to delete dir %s: %v", m.Source, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func CopyVolumeIntoContainer(ctx context.Context, volume []mount.Mount, cli *client.Client, id string) error {
|
||||
// copy volume into container
|
||||
for _, v := range volume {
|
||||
target, err := CreateFolder(ctx, cli, id, v.Source, v.Target)
|
||||
if err != nil {
|
||||
logrus.Debugf("create folder %s previoully failed, err: %v", target, err)
|
||||
}
|
||||
logrus.Debugf("from %s to %s", v.Source, v.Target)
|
||||
srcInfo, err := archive.CopyInfoSourcePath(v.Source, true)
|
||||
if err != nil {
|
||||
logrus.Errorf("copy info source path, err: %v", err)
|
||||
return err
|
||||
}
|
||||
srcArchive, err := archive.TarResource(srcInfo)
|
||||
if err != nil {
|
||||
logrus.Errorf("tar resource failed, err: %v", err)
|
||||
return err
|
||||
}
|
||||
dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, archive.CopyInfo{Path: v.Target})
|
||||
if err != nil {
|
||||
logrus.Errorf("can not prepare archive copy, err: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = cli.CopyToContainer(ctx, id, dstDir, preparedArchive, types.CopyToContainerOptions{
|
||||
AllowOverwriteDirWithFile: true,
|
||||
CopyUIDGID: true,
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Infof("can not copy %s to container %s:%s, err: %v", v.Source, id, v.Target, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateFolder(ctx context.Context, cli *client.Client, id string, src string, target string) (string, error) {
|
||||
lstat, err := os.Lstat(src)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !lstat.IsDir() {
|
||||
target = filepath.Dir(target)
|
||||
}
|
||||
var create types.IDResponse
|
||||
create, err = cli.ContainerExecCreate(ctx, id, types.ExecConfig{
|
||||
AttachStdin: true,
|
||||
AttachStderr: true,
|
||||
AttachStdout: true,
|
||||
Cmd: []string{"mkdir", "-p", target},
|
||||
})
|
||||
if err != nil {
|
||||
logrus.Errorf("create folder %s previoully failed, err: %v", target, err)
|
||||
return "", err
|
||||
}
|
||||
err = cli.ContainerExecStart(ctx, create.ID, types.ExecStartCheck{})
|
||||
if err != nil {
|
||||
logrus.Errorf("create folder %s previoully failed, err: %v", target, err)
|
||||
return "", err
|
||||
}
|
||||
logrus.Infof("wait create folder %s in container %s to be done...", target, id)
|
||||
chanStop := make(chan struct{})
|
||||
wait.Until(func() {
|
||||
inspect, err := cli.ContainerExecInspect(ctx, create.ID)
|
||||
if err != nil {
|
||||
logrus.Warningf("can not inspect container %s, err: %v", id, err)
|
||||
return
|
||||
}
|
||||
if !inspect.Running {
|
||||
close(chanStop)
|
||||
}
|
||||
}, time.Second, chanStop)
|
||||
return target, nil
|
||||
}
|
2
vendor/cel.dev/expr/.bazelversion
vendored
Normal file
2
vendor/cel.dev/expr/.bazelversion
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
6.4.0
|
||||
# Keep this pinned version in parity with cel-go
|
2
vendor/cel.dev/expr/.gitattributes
vendored
Normal file
2
vendor/cel.dev/expr/.gitattributes
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
*.pb.go linguist-generated=true
|
||||
*.pb.go -diff -merge
|
1
vendor/cel.dev/expr/.gitignore
vendored
Normal file
1
vendor/cel.dev/expr/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
bazel-*
|
3
vendor/cel.dev/expr/BUILD.bazel
vendored
Normal file
3
vendor/cel.dev/expr/BUILD.bazel
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
25
vendor/cel.dev/expr/CODE_OF_CONDUCT.md
vendored
Normal file
25
vendor/cel.dev/expr/CODE_OF_CONDUCT.md
vendored
Normal file
@@ -0,0 +1,25 @@
|
||||
# Contributor Code of Conduct
|
||||
## Version 0.1.1 (adapted from 0.3b-angular)
|
||||
|
||||
As contributors and maintainers of the Common Expression Language
|
||||
(CEL) project, we pledge to respect everyone who contributes by
|
||||
posting issues, updating documentation, submitting pull requests,
|
||||
providing feedback in comments, and any other activities.
|
||||
|
||||
Communication through any of CEL's channels (GitHub, Gitter, IRC,
|
||||
mailing lists, Google+, Twitter, etc.) must be constructive and never
|
||||
resort to personal attacks, trolling, public or private harassment,
|
||||
insults, or other unprofessional conduct.
|
||||
|
||||
We promise to extend courtesy and respect to everyone involved in this
|
||||
project regardless of gender, gender identity, sexual orientation,
|
||||
disability, age, race, ethnicity, religion, or level of experience. We
|
||||
expect anyone contributing to the project to do the same.
|
||||
|
||||
If any member of the community violates this code of conduct, the
|
||||
maintainers of the CEL project may take action, removing issues,
|
||||
comments, and PRs or blocking accounts as deemed appropriate.
|
||||
|
||||
If you are subject to or witness unacceptable behavior, or have any
|
||||
other concerns, please email us at
|
||||
[cel-conduct@google.com](mailto:cel-conduct@google.com).
|
32
vendor/cel.dev/expr/CONTRIBUTING.md
vendored
Normal file
32
vendor/cel.dev/expr/CONTRIBUTING.md
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
# How to Contribute
|
||||
|
||||
We'd love to accept your patches and contributions to this project. There are a
|
||||
few guidelines you need to follow.
|
||||
|
||||
## Contributor License Agreement
|
||||
|
||||
Contributions to this project must be accompanied by a Contributor License
|
||||
Agreement. You (or your employer) retain the copyright to your contribution,
|
||||
this simply gives us permission to use and redistribute your contributions as
|
||||
part of the project. Head over to <https://cla.developers.google.com/> to see
|
||||
your current agreements on file or to sign a new one.
|
||||
|
||||
You generally only need to submit a CLA once, so if you've already submitted one
|
||||
(even if it was for a different project), you probably don't need to do it
|
||||
again.
|
||||
|
||||
## Code reviews
|
||||
|
||||
All submissions, including submissions by project members, require review. We
|
||||
use GitHub pull requests for this purpose. Consult
|
||||
[GitHub Help](https://help.github.com/articles/about-pull-requests/) for more
|
||||
information on using pull requests.
|
||||
|
||||
## What to expect from maintainers
|
||||
|
||||
Expect maintainers to respond to new issues or pull requests within a week.
|
||||
For outstanding and ongoing issues and particularly for long-running
|
||||
pull requests, expect the maintainers to review within a week of a
|
||||
contributor asking for a new review. There is no commitment to resolution --
|
||||
merging or closing a pull request, or fixing or closing an issue -- because some
|
||||
issues will require more discussion than others.
|
43
vendor/cel.dev/expr/GOVERNANCE.md
vendored
Normal file
43
vendor/cel.dev/expr/GOVERNANCE.md
vendored
Normal file
@@ -0,0 +1,43 @@
|
||||
# Project Governance
|
||||
|
||||
This document defines the governance process for the CEL language. CEL is
|
||||
Google-developed, but openly governed. Major contributors to the CEL
|
||||
specification and its corresponding implementations constitute the CEL
|
||||
Language Council. New members may be added by a unanimous vote of the
|
||||
Council.
|
||||
|
||||
The MAINTAINERS.md file lists the members of the CEL Language Council, and
|
||||
unofficially indicates the "areas of expertise" of each member with respect
|
||||
to the publicly available CEL repos.
|
||||
|
||||
## Code Changes
|
||||
|
||||
Code changes must follow the standard pull request (PR) model documented in the
|
||||
CONTRIBUTING.md for each CEL repo. All fixes and features must be reviewed by a
|
||||
maintainer. The maintainer reserves the right to request that any feature
|
||||
request (FR) or PR be reviewed by the language council.
|
||||
|
||||
## Syntax and Semantic Changes
|
||||
|
||||
Syntactic and semantic changes must be reviewed by the CEL Language Council.
|
||||
Maintainers may also request language council review at their discretion.
|
||||
|
||||
The review process is as follows:
|
||||
|
||||
- Create a Feature Request in the CEL-Spec repo. The feature description will
|
||||
serve as an abstract for the detailed design document.
|
||||
- Co-develop a design document with the Language Council.
|
||||
- Once the proposer gives the design document approval, the document will be
|
||||
linked to the FR in the CEL-Spec repo and opened for comments to members of
|
||||
the cel-lang-discuss@googlegroups.com.
|
||||
- The Language Council will review the design doc at the next council meeting
|
||||
(once every three weeks) and the council decision included in the document.
|
||||
|
||||
If the proposal is approved, the spec will be updated by a maintainer (if
|
||||
applicable) and a rationale will be included in the CEL-Spec wiki to ensure
|
||||
future developers may follow CEL's growth and direction over time.
|
||||
|
||||
Approved proposals may be implemented by the proposer or by the maintainers as
|
||||
the parties see fit. At the discretion of the maintainer, changes from the
|
||||
approved design are permitted during implementation if they improve the user
|
||||
experience and clarity of the feature.
|
13
vendor/cel.dev/expr/MAINTAINERS.md
vendored
Normal file
13
vendor/cel.dev/expr/MAINTAINERS.md
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
# CEL Language Council
|
||||
|
||||
| Name | Company | Area of Expertise |
|
||||
|-----------------|--------------|-------------------|
|
||||
| Alfred Fuller | Facebook | cel-cpp, cel-spec |
|
||||
| Jim Larson | Google | cel-go, cel-spec |
|
||||
| Matthais Blume | Google | cel-spec |
|
||||
| Tristan Swadell | Google | cel-go, cel-spec |
|
||||
|
||||
## Emeritus
|
||||
|
||||
* Sanjay Ghemawat (Google)
|
||||
* Wolfgang Grieskamp (Facebook)
|
65
vendor/cel.dev/expr/README.md
vendored
Normal file
65
vendor/cel.dev/expr/README.md
vendored
Normal file
@@ -0,0 +1,65 @@
|
||||
# Common Expression Language
|
||||
|
||||
The Common Expression Language (CEL) implements common semantics for expression
|
||||
evaluation, enabling different applications to more easily interoperate.
|
||||
|
||||
Key Applications
|
||||
|
||||
* Security policy: organizations have complex infrastructure and need common
|
||||
tooling to reason about the system as a whole
|
||||
* Protocols: expressions are a useful data type and require interoperability
|
||||
across programming languages and platforms.
|
||||
|
||||
|
||||
Guiding philosophy:
|
||||
|
||||
1. Keep it small & fast.
|
||||
* CEL evaluates in linear time, is mutation free, and not Turing-complete.
|
||||
This limitation is a feature of the language design, which allows the
|
||||
implementation to evaluate orders of magnitude faster than equivalently
|
||||
sandboxed JavaScript.
|
||||
2. Make it extensible.
|
||||
* CEL is designed to be embedded in applications, and allows for
|
||||
extensibility via its context which allows for functions and data to be
|
||||
provided by the software that embeds it.
|
||||
3. Developer-friendly.
|
||||
* The language is approachable to developers. The initial spec was based
|
||||
on the experience of developing Firebase Rules and usability testing
|
||||
many prior iterations.
|
||||
* The library itself and accompanying toolings should be easy to adopt by
|
||||
teams that seek to integrate CEL into their platforms.
|
||||
|
||||
The required components of a system that supports CEL are:
|
||||
|
||||
* The textual representation of an expression as written by a developer. It is
|
||||
of similar syntax to expressions in C/C++/Java/JavaScript
|
||||
* A binary representation of an expression. It is an abstract syntax tree
|
||||
(AST).
|
||||
* A compiler library that converts the textual representation to the binary
|
||||
representation. This can be done ahead of time (in the control plane) or
|
||||
just before evaluation (in the data plane).
|
||||
* A context containing one or more typed variables, often protobuf messages.
|
||||
Most use-cases will use `attribute_context.proto`
|
||||
* An evaluator library that takes the binary format in the context and
|
||||
produces a result, usually a Boolean.
|
||||
|
||||
Example of boolean conditions and object construction:
|
||||
|
||||
``` c
|
||||
// Condition
|
||||
account.balance >= transaction.withdrawal
|
||||
|| (account.overdraftProtection
|
||||
&& account.overdraftLimit >= transaction.withdrawal - account.balance)
|
||||
|
||||
// Object construction
|
||||
common.GeoPoint{ latitude: 10.0, longitude: -5.5 }
|
||||
```
|
||||
|
||||
For more detail, see:
|
||||
|
||||
* [Introduction](doc/intro.md)
|
||||
* [Language Definition](doc/langdef.md)
|
||||
|
||||
Released under the [Apache License](LICENSE).
|
||||
|
||||
Disclaimer: This is not an official Google product.
|
145
vendor/cel.dev/expr/WORKSPACE
vendored
Normal file
145
vendor/cel.dev/expr/WORKSPACE
vendored
Normal file
@@ -0,0 +1,145 @@
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
sha256 = "099a9fb96a376ccbbb7d291ed4ecbdfd42f6bc822ab77ae6f1b5cb9e914e94fa",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
|
||||
"https://github.com/bazelbuild/rules_go/releases/download/v0.35.0/rules_go-v0.35.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "bazel_gazelle",
|
||||
sha256 = "ecba0f04f96b4960a5b250c8e8eeec42281035970aa8852dda73098274d14a1d",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
|
||||
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.29.0/bazel-gazelle-v0.29.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "rules_proto",
|
||||
sha256 = "e017528fd1c91c5a33f15493e3a398181a9e821a804eb7ff5acdd1d2d6c2b18d",
|
||||
strip_prefix = "rules_proto-4.0.0-3.20.0",
|
||||
urls = [
|
||||
"https://github.com/bazelbuild/rules_proto/archive/refs/tags/4.0.0-3.20.0.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# googleapis as of 05/26/2023
|
||||
http_archive(
|
||||
name = "com_google_googleapis",
|
||||
strip_prefix = "googleapis-07c27163ac591955d736f3057b1619ece66f5b99",
|
||||
sha256 = "bd8e735d881fb829751ecb1a77038dda4a8d274c45490cb9fcf004583ee10571",
|
||||
urls = [
|
||||
"https://github.com/googleapis/googleapis/archive/07c27163ac591955d736f3057b1619ece66f5b99.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# protobuf
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
sha256 = "8242327e5df8c80ba49e4165250b8f79a76bd11765facefaaecfca7747dc8da2",
|
||||
strip_prefix = "protobuf-3.21.5",
|
||||
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.21.5.zip"],
|
||||
)
|
||||
|
||||
# googletest
|
||||
http_archive(
|
||||
name = "com_google_googletest",
|
||||
urls = ["https://github.com/google/googletest/archive/master.zip"],
|
||||
strip_prefix = "googletest-master",
|
||||
)
|
||||
|
||||
# gflags
|
||||
http_archive(
|
||||
name = "com_github_gflags_gflags",
|
||||
sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe",
|
||||
strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
|
||||
"https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# glog
|
||||
http_archive(
|
||||
name = "com_google_glog",
|
||||
sha256 = "1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21",
|
||||
strip_prefix = "glog-028d37889a1e80e8a07da1b8945ac706259e5fd8",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
|
||||
"https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
# absl
|
||||
http_archive(
|
||||
name = "com_google_absl",
|
||||
strip_prefix = "abseil-cpp-master",
|
||||
urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"],
|
||||
)
|
||||
|
||||
load("@io_bazel_rules_go//go:deps.bzl", "go_rules_dependencies", "go_register_toolchains")
|
||||
load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies", "go_repository")
|
||||
load("@com_google_googleapis//:repository_rules.bzl", "switched_rules_by_language")
|
||||
load("@rules_proto//proto:repositories.bzl", "rules_proto_dependencies", "rules_proto_toolchains")
|
||||
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
|
||||
|
||||
switched_rules_by_language(
|
||||
name = "com_google_googleapis_imports",
|
||||
cc = True,
|
||||
)
|
||||
|
||||
# Do *not* call *_dependencies(), etc, yet. See comment at the end.
|
||||
|
||||
# Generated Google APIs protos for Golang
|
||||
# Generated Google APIs protos for Golang 05/25/2023
|
||||
go_repository(
|
||||
name = "org_golang_google_genproto_googleapis_api",
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "google.golang.org/genproto/googleapis/api",
|
||||
sum = "h1:m8v1xLLLzMe1m5P+gCTF8nJB9epwZQUBERm20Oy1poQ=",
|
||||
version = "v0.0.0-20230525234035-dd9d682886f9",
|
||||
)
|
||||
|
||||
# Generated Google APIs protos for Golang 05/25/2023
|
||||
go_repository(
|
||||
name = "org_golang_google_genproto_googleapis_rpc",
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "google.golang.org/genproto/googleapis/rpc",
|
||||
sum = "h1:0nDDozoAU19Qb2HwhXadU8OcsiO/09cnTqhUtq2MEOM=",
|
||||
version = "v0.0.0-20230525234030-28d5490b6b19",
|
||||
)
|
||||
|
||||
# gRPC deps
|
||||
go_repository(
|
||||
name = "org_golang_google_grpc",
|
||||
build_file_proto_mode = "disable_global",
|
||||
importpath = "google.golang.org/grpc",
|
||||
tag = "v1.49.0",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_x_net",
|
||||
importpath = "golang.org/x/net",
|
||||
sum = "h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628=",
|
||||
version = "v0.0.0-20190311183353-d8887717615a",
|
||||
)
|
||||
|
||||
go_repository(
|
||||
name = "org_golang_x_text",
|
||||
importpath = "golang.org/x/text",
|
||||
sum = "h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=",
|
||||
version = "v0.3.2",
|
||||
)
|
||||
|
||||
# Run the dependencies at the end. These will silently try to import some
|
||||
# of the above repositories but at different versions, so ours must come first.
|
||||
go_rules_dependencies()
|
||||
go_register_toolchains(version = "1.19.1")
|
||||
gazelle_dependencies()
|
||||
rules_proto_dependencies()
|
||||
rules_proto_toolchains()
|
||||
protobuf_deps()
|
1432
vendor/cel.dev/expr/checked.pb.go
generated
vendored
Normal file
1432
vendor/cel.dev/expr/checked.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
9
vendor/cel.dev/expr/cloudbuild.yaml
vendored
Normal file
9
vendor/cel.dev/expr/cloudbuild.yaml
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
steps:
|
||||
- name: 'gcr.io/cloud-builders/bazel:6.4.0'
|
||||
entrypoint: bazel
|
||||
args: ['test', '--test_output=errors', '...']
|
||||
id: bazel-test
|
||||
waitFor: ['-']
|
||||
timeout: 15m
|
||||
options:
|
||||
machineType: 'N1_HIGHCPU_32'
|
490
vendor/cel.dev/expr/eval.pb.go
generated
vendored
Normal file
490
vendor/cel.dev/expr/eval.pb.go
generated
vendored
Normal file
@@ -0,0 +1,490 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.5
|
||||
// source: cel/expr/eval.proto
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
status "google.golang.org/genproto/googleapis/rpc/status"
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type EvalState struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Values []*ExprValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
Results []*EvalState_Result `protobuf:"bytes,3,rep,name=results,proto3" json:"results,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EvalState) Reset() {
|
||||
*x = EvalState{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EvalState) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EvalState) ProtoMessage() {}
|
||||
|
||||
func (x *EvalState) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EvalState.ProtoReflect.Descriptor instead.
|
||||
func (*EvalState) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *EvalState) GetValues() []*ExprValue {
|
||||
if x != nil {
|
||||
return x.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *EvalState) GetResults() []*EvalState_Result {
|
||||
if x != nil {
|
||||
return x.Results
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ExprValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Kind:
|
||||
//
|
||||
// *ExprValue_Value
|
||||
// *ExprValue_Error
|
||||
// *ExprValue_Unknown
|
||||
Kind isExprValue_Kind `protobuf_oneof:"kind"`
|
||||
}
|
||||
|
||||
func (x *ExprValue) Reset() {
|
||||
*x = ExprValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ExprValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ExprValue) ProtoMessage() {}
|
||||
|
||||
func (x *ExprValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ExprValue.ProtoReflect.Descriptor instead.
|
||||
func (*ExprValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (m *ExprValue) GetKind() isExprValue_Kind {
|
||||
if m != nil {
|
||||
return m.Kind
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetValue() *Value {
|
||||
if x, ok := x.GetKind().(*ExprValue_Value); ok {
|
||||
return x.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetError() *ErrorSet {
|
||||
if x, ok := x.GetKind().(*ExprValue_Error); ok {
|
||||
return x.Error
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *ExprValue) GetUnknown() *UnknownSet {
|
||||
if x, ok := x.GetKind().(*ExprValue_Unknown); ok {
|
||||
return x.Unknown
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type isExprValue_Kind interface {
|
||||
isExprValue_Kind()
|
||||
}
|
||||
|
||||
type ExprValue_Value struct {
|
||||
Value *Value `protobuf:"bytes,1,opt,name=value,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ExprValue_Error struct {
|
||||
Error *ErrorSet `protobuf:"bytes,2,opt,name=error,proto3,oneof"`
|
||||
}
|
||||
|
||||
type ExprValue_Unknown struct {
|
||||
Unknown *UnknownSet `protobuf:"bytes,3,opt,name=unknown,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*ExprValue_Value) isExprValue_Kind() {}
|
||||
|
||||
func (*ExprValue_Error) isExprValue_Kind() {}
|
||||
|
||||
func (*ExprValue_Unknown) isExprValue_Kind() {}
|
||||
|
||||
type ErrorSet struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Errors []*status.Status `protobuf:"bytes,1,rep,name=errors,proto3" json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ErrorSet) Reset() {
|
||||
*x = ErrorSet{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ErrorSet) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ErrorSet) ProtoMessage() {}
|
||||
|
||||
func (x *ErrorSet) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ErrorSet.ProtoReflect.Descriptor instead.
|
||||
func (*ErrorSet) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ErrorSet) GetErrors() []*status.Status {
|
||||
if x != nil {
|
||||
return x.Errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type UnknownSet struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Exprs []int64 `protobuf:"varint,1,rep,packed,name=exprs,proto3" json:"exprs,omitempty"`
|
||||
}
|
||||
|
||||
func (x *UnknownSet) Reset() {
|
||||
*x = UnknownSet{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *UnknownSet) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*UnknownSet) ProtoMessage() {}
|
||||
|
||||
func (x *UnknownSet) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use UnknownSet.ProtoReflect.Descriptor instead.
|
||||
func (*UnknownSet) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *UnknownSet) GetExprs() []int64 {
|
||||
if x != nil {
|
||||
return x.Exprs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type EvalState_Result struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Expr int64 `protobuf:"varint,1,opt,name=expr,proto3" json:"expr,omitempty"`
|
||||
Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) Reset() {
|
||||
*x = EvalState_Result{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EvalState_Result) ProtoMessage() {}
|
||||
|
||||
func (x *EvalState_Result) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_eval_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EvalState_Result.ProtoReflect.Descriptor instead.
|
||||
func (*EvalState_Result) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_eval_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) GetExpr() int64 {
|
||||
if x != nil {
|
||||
return x.Expr
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *EvalState_Result) GetValue() int64 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_cel_expr_eval_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cel_expr_eval_proto_rawDesc = []byte{
|
||||
0x0a, 0x13, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x76, 0x61, 0x6c, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x1a,
|
||||
0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x2e,
|
||||
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x17, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x72, 0x70,
|
||||
0x63, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xa2,
|
||||
0x01, 0x0a, 0x09, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x2b, 0x0a, 0x06,
|
||||
0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63,
|
||||
0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x34, 0x0a, 0x07, 0x72, 0x65, 0x73,
|
||||
0x75, 0x6c, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c,
|
||||
0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x76, 0x61, 0x6c, 0x53, 0x74, 0x61, 0x74, 0x65, 0x2e,
|
||||
0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x07, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x73, 0x1a,
|
||||
0x32, 0x0a, 0x06, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x65, 0x78, 0x70,
|
||||
0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x04, 0x65, 0x78, 0x70, 0x72, 0x12, 0x14, 0x0a,
|
||||
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x22, 0x9a, 0x01, 0x0a, 0x09, 0x45, 0x78, 0x70, 0x72, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x12, 0x27, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
|
||||
0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x48, 0x00, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x2a, 0x0a, 0x05, 0x65, 0x72,
|
||||
0x72, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c, 0x2e,
|
||||
0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
|
||||
0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x12, 0x30, 0x0a, 0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77,
|
||||
0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
|
||||
0x70, 0x72, 0x2e, 0x55, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x48, 0x00, 0x52,
|
||||
0x07, 0x75, 0x6e, 0x6b, 0x6e, 0x6f, 0x77, 0x6e, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64,
|
||||
0x22, 0x36, 0x0a, 0x08, 0x45, 0x72, 0x72, 0x6f, 0x72, 0x53, 0x65, 0x74, 0x12, 0x2a, 0x0a, 0x06,
|
||||
0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x72, 0x70, 0x63, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
|
||||
0x52, 0x06, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x73, 0x22, 0x22, 0x0a, 0x0a, 0x55, 0x6e, 0x6b, 0x6e,
|
||||
0x6f, 0x77, 0x6e, 0x53, 0x65, 0x74, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x18,
|
||||
0x01, 0x20, 0x03, 0x28, 0x03, 0x52, 0x05, 0x65, 0x78, 0x70, 0x72, 0x73, 0x42, 0x2c, 0x0a, 0x0c,
|
||||
0x64, 0x65, 0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x09, 0x45, 0x76,
|
||||
0x61, 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
|
||||
0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cel_expr_eval_proto_rawDescOnce sync.Once
|
||||
file_cel_expr_eval_proto_rawDescData = file_cel_expr_eval_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cel_expr_eval_proto_rawDescGZIP() []byte {
|
||||
file_cel_expr_eval_proto_rawDescOnce.Do(func() {
|
||||
file_cel_expr_eval_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_eval_proto_rawDescData)
|
||||
})
|
||||
return file_cel_expr_eval_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cel_expr_eval_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_cel_expr_eval_proto_goTypes = []interface{}{
|
||||
(*EvalState)(nil), // 0: cel.expr.EvalState
|
||||
(*ExprValue)(nil), // 1: cel.expr.ExprValue
|
||||
(*ErrorSet)(nil), // 2: cel.expr.ErrorSet
|
||||
(*UnknownSet)(nil), // 3: cel.expr.UnknownSet
|
||||
(*EvalState_Result)(nil), // 4: cel.expr.EvalState.Result
|
||||
(*Value)(nil), // 5: cel.expr.Value
|
||||
(*status.Status)(nil), // 6: google.rpc.Status
|
||||
}
|
||||
var file_cel_expr_eval_proto_depIdxs = []int32{
|
||||
1, // 0: cel.expr.EvalState.values:type_name -> cel.expr.ExprValue
|
||||
4, // 1: cel.expr.EvalState.results:type_name -> cel.expr.EvalState.Result
|
||||
5, // 2: cel.expr.ExprValue.value:type_name -> cel.expr.Value
|
||||
2, // 3: cel.expr.ExprValue.error:type_name -> cel.expr.ErrorSet
|
||||
3, // 4: cel.expr.ExprValue.unknown:type_name -> cel.expr.UnknownSet
|
||||
6, // 5: cel.expr.ErrorSet.errors:type_name -> google.rpc.Status
|
||||
6, // [6:6] is the sub-list for method output_type
|
||||
6, // [6:6] is the sub-list for method input_type
|
||||
6, // [6:6] is the sub-list for extension type_name
|
||||
6, // [6:6] is the sub-list for extension extendee
|
||||
0, // [0:6] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cel_expr_eval_proto_init() }
|
||||
func file_cel_expr_eval_proto_init() {
|
||||
if File_cel_expr_eval_proto != nil {
|
||||
return
|
||||
}
|
||||
file_cel_expr_value_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_cel_expr_eval_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EvalState); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_eval_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ExprValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_eval_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ErrorSet); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_eval_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*UnknownSet); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_eval_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EvalState_Result); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_cel_expr_eval_proto_msgTypes[1].OneofWrappers = []interface{}{
|
||||
(*ExprValue_Value)(nil),
|
||||
(*ExprValue_Error)(nil),
|
||||
(*ExprValue_Unknown)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cel_expr_eval_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 5,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_cel_expr_eval_proto_goTypes,
|
||||
DependencyIndexes: file_cel_expr_eval_proto_depIdxs,
|
||||
MessageInfos: file_cel_expr_eval_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cel_expr_eval_proto = out.File
|
||||
file_cel_expr_eval_proto_rawDesc = nil
|
||||
file_cel_expr_eval_proto_goTypes = nil
|
||||
file_cel_expr_eval_proto_depIdxs = nil
|
||||
}
|
236
vendor/cel.dev/expr/explain.pb.go
generated
vendored
Normal file
236
vendor/cel.dev/expr/explain.pb.go
generated
vendored
Normal file
@@ -0,0 +1,236 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.5
|
||||
// source: cel/expr/explain.proto
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
// Deprecated: Do not use.
|
||||
type Explain struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
ExprSteps []*Explain_ExprStep `protobuf:"bytes,2,rep,name=expr_steps,json=exprSteps,proto3" json:"expr_steps,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Explain) Reset() {
|
||||
*x = Explain{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Explain) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Explain) ProtoMessage() {}
|
||||
|
||||
func (x *Explain) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Explain.ProtoReflect.Descriptor instead.
|
||||
func (*Explain) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_explain_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (x *Explain) GetValues() []*Value {
|
||||
if x != nil {
|
||||
return x.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Explain) GetExprSteps() []*Explain_ExprStep {
|
||||
if x != nil {
|
||||
return x.ExprSteps
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Explain_ExprStep struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Id int64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"`
|
||||
ValueIndex int32 `protobuf:"varint,2,opt,name=value_index,json=valueIndex,proto3" json:"value_index,omitempty"`
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) Reset() {
|
||||
*x = Explain_ExprStep{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Explain_ExprStep) ProtoMessage() {}
|
||||
|
||||
func (x *Explain_ExprStep) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_explain_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Explain_ExprStep.ProtoReflect.Descriptor instead.
|
||||
func (*Explain_ExprStep) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_explain_proto_rawDescGZIP(), []int{0, 0}
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) GetId() int64 {
|
||||
if x != nil {
|
||||
return x.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Explain_ExprStep) GetValueIndex() int32 {
|
||||
if x != nil {
|
||||
return x.ValueIndex
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
var File_cel_expr_explain_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cel_expr_explain_proto_rawDesc = []byte{
|
||||
0x0a, 0x16, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x65, 0x78, 0x70, 0x6c, 0x61,
|
||||
0x69, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78,
|
||||
0x70, 0x72, 0x1a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x07, 0x45, 0x78, 0x70,
|
||||
0x6c, 0x61, 0x69, 0x6e, 0x12, 0x27, 0x0a, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
|
||||
0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x12, 0x39, 0x0a,
|
||||
0x0a, 0x65, 0x78, 0x70, 0x72, 0x5f, 0x73, 0x74, 0x65, 0x70, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28,
|
||||
0x0b, 0x32, 0x1a, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x78, 0x70,
|
||||
0x6c, 0x61, 0x69, 0x6e, 0x2e, 0x45, 0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x52, 0x09, 0x65,
|
||||
0x78, 0x70, 0x72, 0x53, 0x74, 0x65, 0x70, 0x73, 0x1a, 0x3b, 0x0a, 0x08, 0x45, 0x78, 0x70, 0x72,
|
||||
0x53, 0x74, 0x65, 0x70, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03,
|
||||
0x52, 0x02, 0x69, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x5f, 0x69, 0x6e,
|
||||
0x64, 0x65, 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x49, 0x6e, 0x64, 0x65, 0x78, 0x3a, 0x02, 0x18, 0x01, 0x42, 0x2f, 0x0a, 0x0c, 0x64, 0x65, 0x76,
|
||||
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0c, 0x45, 0x78, 0x70, 0x6c, 0x61,
|
||||
0x69, 0x6e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64,
|
||||
0x65, 0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cel_expr_explain_proto_rawDescOnce sync.Once
|
||||
file_cel_expr_explain_proto_rawDescData = file_cel_expr_explain_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cel_expr_explain_proto_rawDescGZIP() []byte {
|
||||
file_cel_expr_explain_proto_rawDescOnce.Do(func() {
|
||||
file_cel_expr_explain_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_explain_proto_rawDescData)
|
||||
})
|
||||
return file_cel_expr_explain_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cel_expr_explain_proto_msgTypes = make([]protoimpl.MessageInfo, 2)
|
||||
var file_cel_expr_explain_proto_goTypes = []interface{}{
|
||||
(*Explain)(nil), // 0: cel.expr.Explain
|
||||
(*Explain_ExprStep)(nil), // 1: cel.expr.Explain.ExprStep
|
||||
(*Value)(nil), // 2: cel.expr.Value
|
||||
}
|
||||
var file_cel_expr_explain_proto_depIdxs = []int32{
|
||||
2, // 0: cel.expr.Explain.values:type_name -> cel.expr.Value
|
||||
1, // 1: cel.expr.Explain.expr_steps:type_name -> cel.expr.Explain.ExprStep
|
||||
2, // [2:2] is the sub-list for method output_type
|
||||
2, // [2:2] is the sub-list for method input_type
|
||||
2, // [2:2] is the sub-list for extension type_name
|
||||
2, // [2:2] is the sub-list for extension extendee
|
||||
0, // [0:2] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cel_expr_explain_proto_init() }
|
||||
func file_cel_expr_explain_proto_init() {
|
||||
if File_cel_expr_explain_proto != nil {
|
||||
return
|
||||
}
|
||||
file_cel_expr_value_proto_init()
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_cel_expr_explain_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Explain); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_explain_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Explain_ExprStep); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cel_expr_explain_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 2,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_cel_expr_explain_proto_goTypes,
|
||||
DependencyIndexes: file_cel_expr_explain_proto_depIdxs,
|
||||
MessageInfos: file_cel_expr_explain_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cel_expr_explain_proto = out.File
|
||||
file_cel_expr_explain_proto_rawDesc = nil
|
||||
file_cel_expr_explain_proto_goTypes = nil
|
||||
file_cel_expr_explain_proto_depIdxs = nil
|
||||
}
|
9
vendor/cel.dev/expr/regen_go_proto.sh
vendored
Normal file
9
vendor/cel.dev/expr/regen_go_proto.sh
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
#!/bin/sh
|
||||
bazel build //proto/test/...
|
||||
files=($(bazel aquery 'kind(proto, //proto/...)' | grep Outputs | grep "[.]pb[.]go" | sed 's/Outputs: \[//' | sed 's/\]//' | tr "," "\n"))
|
||||
for src in ${files[@]};
|
||||
do
|
||||
dst=$(echo $src | sed 's/\(.*\%\/github.com\/google\/cel-spec\/\(.*\)\)/\2/')
|
||||
echo "copying $dst"
|
||||
$(cp $src $dst)
|
||||
done
|
10
vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
vendored
Normal file
10
vendor/cel.dev/expr/regen_go_proto_canonical_protos.sh
vendored
Normal file
@@ -0,0 +1,10 @@
|
||||
#!/usr/bin/env bash
|
||||
bazel build //proto/cel/expr:all
|
||||
|
||||
rm -vf ./*.pb.go
|
||||
|
||||
files=( $(bazel cquery //proto/cel/expr:expr_go_proto --output=starlark --starlark:expr="'\n'.join([f.path for f in target.output_groups.go_generated_srcs.to_list()])") )
|
||||
for src in "${files[@]}";
|
||||
do
|
||||
cp -v "${src}" ./
|
||||
done
|
1633
vendor/cel.dev/expr/syntax.pb.go
generated
vendored
Normal file
1633
vendor/cel.dev/expr/syntax.pb.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
653
vendor/cel.dev/expr/value.pb.go
generated
vendored
Normal file
653
vendor/cel.dev/expr/value.pb.go
generated
vendored
Normal file
@@ -0,0 +1,653 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.28.1
|
||||
// protoc v3.21.5
|
||||
// source: cel/expr/value.proto
|
||||
|
||||
package expr
|
||||
|
||||
import (
|
||||
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
|
||||
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
|
||||
anypb "google.golang.org/protobuf/types/known/anypb"
|
||||
structpb "google.golang.org/protobuf/types/known/structpb"
|
||||
reflect "reflect"
|
||||
sync "sync"
|
||||
)
|
||||
|
||||
const (
|
||||
// Verify that this generated code is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion)
|
||||
// Verify that runtime/protoimpl is sufficiently up-to-date.
|
||||
_ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20)
|
||||
)
|
||||
|
||||
type Value struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
// Types that are assignable to Kind:
|
||||
//
|
||||
// *Value_NullValue
|
||||
// *Value_BoolValue
|
||||
// *Value_Int64Value
|
||||
// *Value_Uint64Value
|
||||
// *Value_DoubleValue
|
||||
// *Value_StringValue
|
||||
// *Value_BytesValue
|
||||
// *Value_EnumValue
|
||||
// *Value_ObjectValue
|
||||
// *Value_MapValue
|
||||
// *Value_ListValue
|
||||
// *Value_TypeValue
|
||||
Kind isValue_Kind `protobuf_oneof:"kind"`
|
||||
}
|
||||
|
||||
func (x *Value) Reset() {
|
||||
*x = Value{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[0]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *Value) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*Value) ProtoMessage() {}
|
||||
|
||||
func (x *Value) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[0]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use Value.ProtoReflect.Descriptor instead.
|
||||
func (*Value) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{0}
|
||||
}
|
||||
|
||||
func (m *Value) GetKind() isValue_Kind {
|
||||
if m != nil {
|
||||
return m.Kind
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetNullValue() structpb.NullValue {
|
||||
if x, ok := x.GetKind().(*Value_NullValue); ok {
|
||||
return x.NullValue
|
||||
}
|
||||
return structpb.NullValue(0)
|
||||
}
|
||||
|
||||
func (x *Value) GetBoolValue() bool {
|
||||
if x, ok := x.GetKind().(*Value_BoolValue); ok {
|
||||
return x.BoolValue
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (x *Value) GetInt64Value() int64 {
|
||||
if x, ok := x.GetKind().(*Value_Int64Value); ok {
|
||||
return x.Int64Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Value) GetUint64Value() uint64 {
|
||||
if x, ok := x.GetKind().(*Value_Uint64Value); ok {
|
||||
return x.Uint64Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Value) GetDoubleValue() float64 {
|
||||
if x, ok := x.GetKind().(*Value_DoubleValue); ok {
|
||||
return x.DoubleValue
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (x *Value) GetStringValue() string {
|
||||
if x, ok := x.GetKind().(*Value_StringValue); ok {
|
||||
return x.StringValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *Value) GetBytesValue() []byte {
|
||||
if x, ok := x.GetKind().(*Value_BytesValue); ok {
|
||||
return x.BytesValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetEnumValue() *EnumValue {
|
||||
if x, ok := x.GetKind().(*Value_EnumValue); ok {
|
||||
return x.EnumValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetObjectValue() *anypb.Any {
|
||||
if x, ok := x.GetKind().(*Value_ObjectValue); ok {
|
||||
return x.ObjectValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetMapValue() *MapValue {
|
||||
if x, ok := x.GetKind().(*Value_MapValue); ok {
|
||||
return x.MapValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetListValue() *ListValue {
|
||||
if x, ok := x.GetKind().(*Value_ListValue); ok {
|
||||
return x.ListValue
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *Value) GetTypeValue() string {
|
||||
if x, ok := x.GetKind().(*Value_TypeValue); ok {
|
||||
return x.TypeValue
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
type isValue_Kind interface {
|
||||
isValue_Kind()
|
||||
}
|
||||
|
||||
type Value_NullValue struct {
|
||||
NullValue structpb.NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"`
|
||||
}
|
||||
|
||||
type Value_BoolValue struct {
|
||||
BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_Int64Value struct {
|
||||
Int64Value int64 `protobuf:"varint,3,opt,name=int64_value,json=int64Value,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_Uint64Value struct {
|
||||
Uint64Value uint64 `protobuf:"varint,4,opt,name=uint64_value,json=uint64Value,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_DoubleValue struct {
|
||||
DoubleValue float64 `protobuf:"fixed64,5,opt,name=double_value,json=doubleValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_StringValue struct {
|
||||
StringValue string `protobuf:"bytes,6,opt,name=string_value,json=stringValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_BytesValue struct {
|
||||
BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_EnumValue struct {
|
||||
EnumValue *EnumValue `protobuf:"bytes,9,opt,name=enum_value,json=enumValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_ObjectValue struct {
|
||||
ObjectValue *anypb.Any `protobuf:"bytes,10,opt,name=object_value,json=objectValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_MapValue struct {
|
||||
MapValue *MapValue `protobuf:"bytes,11,opt,name=map_value,json=mapValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_ListValue struct {
|
||||
ListValue *ListValue `protobuf:"bytes,12,opt,name=list_value,json=listValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
type Value_TypeValue struct {
|
||||
TypeValue string `protobuf:"bytes,15,opt,name=type_value,json=typeValue,proto3,oneof"`
|
||||
}
|
||||
|
||||
func (*Value_NullValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_BoolValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_Int64Value) isValue_Kind() {}
|
||||
|
||||
func (*Value_Uint64Value) isValue_Kind() {}
|
||||
|
||||
func (*Value_DoubleValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_StringValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_BytesValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_EnumValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_ObjectValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_MapValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_ListValue) isValue_Kind() {}
|
||||
|
||||
func (*Value_TypeValue) isValue_Kind() {}
|
||||
|
||||
type EnumValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
|
||||
Value int32 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *EnumValue) Reset() {
|
||||
*x = EnumValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[1]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *EnumValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*EnumValue) ProtoMessage() {}
|
||||
|
||||
func (x *EnumValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[1]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use EnumValue.ProtoReflect.Descriptor instead.
|
||||
func (*EnumValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{1}
|
||||
}
|
||||
|
||||
func (x *EnumValue) GetType() string {
|
||||
if x != nil {
|
||||
return x.Type
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (x *EnumValue) GetValue() int32 {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
type ListValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Values []*Value `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"`
|
||||
}
|
||||
|
||||
func (x *ListValue) Reset() {
|
||||
*x = ListValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[2]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *ListValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*ListValue) ProtoMessage() {}
|
||||
|
||||
func (x *ListValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[2]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use ListValue.ProtoReflect.Descriptor instead.
|
||||
func (*ListValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{2}
|
||||
}
|
||||
|
||||
func (x *ListValue) GetValues() []*Value {
|
||||
if x != nil {
|
||||
return x.Values
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MapValue struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Entries []*MapValue_Entry `protobuf:"bytes,1,rep,name=entries,proto3" json:"entries,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MapValue) Reset() {
|
||||
*x = MapValue{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[3]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MapValue) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MapValue) ProtoMessage() {}
|
||||
|
||||
func (x *MapValue) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[3]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MapValue.ProtoReflect.Descriptor instead.
|
||||
func (*MapValue) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{3}
|
||||
}
|
||||
|
||||
func (x *MapValue) GetEntries() []*MapValue_Entry {
|
||||
if x != nil {
|
||||
return x.Entries
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type MapValue_Entry struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Key *Value `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
|
||||
Value *Value `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) Reset() {
|
||||
*x = MapValue_Entry{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[4]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*MapValue_Entry) ProtoMessage() {}
|
||||
|
||||
func (x *MapValue_Entry) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_cel_expr_value_proto_msgTypes[4]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use MapValue_Entry.ProtoReflect.Descriptor instead.
|
||||
func (*MapValue_Entry) Descriptor() ([]byte, []int) {
|
||||
return file_cel_expr_value_proto_rawDescGZIP(), []int{3, 0}
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) GetKey() *Value {
|
||||
if x != nil {
|
||||
return x.Key
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *MapValue_Entry) GetValue() *Value {
|
||||
if x != nil {
|
||||
return x.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_cel_expr_value_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_cel_expr_value_proto_rawDesc = []byte{
|
||||
0x0a, 0x14, 0x63, 0x65, 0x6c, 0x2f, 0x65, 0x78, 0x70, 0x72, 0x2f, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x08, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72,
|
||||
0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
|
||||
0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1c, 0x67, 0x6f, 0x6f,
|
||||
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72,
|
||||
0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x9d, 0x04, 0x0a, 0x05, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x3b, 0x0a, 0x0a, 0x6e, 0x75, 0x6c, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65,
|
||||
0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x4e, 0x75, 0x6c, 0x6c, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6e, 0x75, 0x6c, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x12, 0x1f, 0x0a, 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02,
|
||||
0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x12, 0x21, 0x0a, 0x0b, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x48, 0x00, 0x52, 0x0a, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x56,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x75, 0x69, 0x6e, 0x74, 0x36, 0x34, 0x5f, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x48, 0x00, 0x52, 0x0b, 0x75, 0x69,
|
||||
0x6e, 0x74, 0x36, 0x34, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x64, 0x6f, 0x75,
|
||||
0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x01, 0x48,
|
||||
0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23,
|
||||
0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x06,
|
||||
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c,
|
||||
0x75, 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65,
|
||||
0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x6e, 0x75, 0x6d, 0x5f, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13, 0x2e, 0x63, 0x65, 0x6c,
|
||||
0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x45, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48,
|
||||
0x00, 0x52, 0x09, 0x65, 0x6e, 0x75, 0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x39, 0x0a, 0x0c,
|
||||
0x6f, 0x62, 0x6a, 0x65, 0x63, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0a, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x48, 0x00, 0x52, 0x0b, 0x6f, 0x62, 0x6a, 0x65,
|
||||
0x63, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x31, 0x0a, 0x09, 0x6d, 0x61, 0x70, 0x5f, 0x76,
|
||||
0x61, 0x6c, 0x75, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x63, 0x65, 0x6c,
|
||||
0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x48, 0x00,
|
||||
0x52, 0x08, 0x6d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x0a, 0x6c, 0x69,
|
||||
0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x13,
|
||||
0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x48, 0x00, 0x52, 0x09, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x12, 0x1f, 0x0a, 0x0a, 0x74, 0x79, 0x70, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x0f,
|
||||
0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x09, 0x74, 0x79, 0x70, 0x65, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x42, 0x06, 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x22, 0x35, 0x0a, 0x09, 0x45, 0x6e, 0x75,
|
||||
0x6d, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01,
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x05, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65,
|
||||
0x22, 0x34, 0x0a, 0x09, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x27, 0x0a,
|
||||
0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
|
||||
0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06,
|
||||
0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x4d, 0x61, 0x70, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x01,
|
||||
0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e,
|
||||
0x4d, 0x61, 0x70, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x2e, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x07,
|
||||
0x65, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x1a, 0x51, 0x0a, 0x05, 0x45, 0x6e, 0x74, 0x72, 0x79,
|
||||
0x12, 0x21, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x0f, 0x2e,
|
||||
0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x03,
|
||||
0x6b, 0x65, 0x79, 0x12, 0x25, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
|
||||
0x28, 0x0b, 0x32, 0x0f, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x2e, 0x56, 0x61,
|
||||
0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x2d, 0x0a, 0x0c, 0x64, 0x65,
|
||||
0x76, 0x2e, 0x63, 0x65, 0x6c, 0x2e, 0x65, 0x78, 0x70, 0x72, 0x42, 0x0a, 0x56, 0x61, 0x6c, 0x75,
|
||||
0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x0c, 0x63, 0x65, 0x6c, 0x2e, 0x64, 0x65,
|
||||
0x76, 0x2f, 0x65, 0x78, 0x70, 0x72, 0xf8, 0x01, 0x01, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
file_cel_expr_value_proto_rawDescOnce sync.Once
|
||||
file_cel_expr_value_proto_rawDescData = file_cel_expr_value_proto_rawDesc
|
||||
)
|
||||
|
||||
func file_cel_expr_value_proto_rawDescGZIP() []byte {
|
||||
file_cel_expr_value_proto_rawDescOnce.Do(func() {
|
||||
file_cel_expr_value_proto_rawDescData = protoimpl.X.CompressGZIP(file_cel_expr_value_proto_rawDescData)
|
||||
})
|
||||
return file_cel_expr_value_proto_rawDescData
|
||||
}
|
||||
|
||||
var file_cel_expr_value_proto_msgTypes = make([]protoimpl.MessageInfo, 5)
|
||||
var file_cel_expr_value_proto_goTypes = []interface{}{
|
||||
(*Value)(nil), // 0: cel.expr.Value
|
||||
(*EnumValue)(nil), // 1: cel.expr.EnumValue
|
||||
(*ListValue)(nil), // 2: cel.expr.ListValue
|
||||
(*MapValue)(nil), // 3: cel.expr.MapValue
|
||||
(*MapValue_Entry)(nil), // 4: cel.expr.MapValue.Entry
|
||||
(structpb.NullValue)(0), // 5: google.protobuf.NullValue
|
||||
(*anypb.Any)(nil), // 6: google.protobuf.Any
|
||||
}
|
||||
var file_cel_expr_value_proto_depIdxs = []int32{
|
||||
5, // 0: cel.expr.Value.null_value:type_name -> google.protobuf.NullValue
|
||||
1, // 1: cel.expr.Value.enum_value:type_name -> cel.expr.EnumValue
|
||||
6, // 2: cel.expr.Value.object_value:type_name -> google.protobuf.Any
|
||||
3, // 3: cel.expr.Value.map_value:type_name -> cel.expr.MapValue
|
||||
2, // 4: cel.expr.Value.list_value:type_name -> cel.expr.ListValue
|
||||
0, // 5: cel.expr.ListValue.values:type_name -> cel.expr.Value
|
||||
4, // 6: cel.expr.MapValue.entries:type_name -> cel.expr.MapValue.Entry
|
||||
0, // 7: cel.expr.MapValue.Entry.key:type_name -> cel.expr.Value
|
||||
0, // 8: cel.expr.MapValue.Entry.value:type_name -> cel.expr.Value
|
||||
9, // [9:9] is the sub-list for method output_type
|
||||
9, // [9:9] is the sub-list for method input_type
|
||||
9, // [9:9] is the sub-list for extension type_name
|
||||
9, // [9:9] is the sub-list for extension extendee
|
||||
0, // [0:9] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_cel_expr_value_proto_init() }
|
||||
func file_cel_expr_value_proto_init() {
|
||||
if File_cel_expr_value_proto != nil {
|
||||
return
|
||||
}
|
||||
if !protoimpl.UnsafeEnabled {
|
||||
file_cel_expr_value_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*Value); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*EnumValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MapValue); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*MapValue_Entry); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
file_cel_expr_value_proto_msgTypes[0].OneofWrappers = []interface{}{
|
||||
(*Value_NullValue)(nil),
|
||||
(*Value_BoolValue)(nil),
|
||||
(*Value_Int64Value)(nil),
|
||||
(*Value_Uint64Value)(nil),
|
||||
(*Value_DoubleValue)(nil),
|
||||
(*Value_StringValue)(nil),
|
||||
(*Value_BytesValue)(nil),
|
||||
(*Value_EnumValue)(nil),
|
||||
(*Value_ObjectValue)(nil),
|
||||
(*Value_MapValue)(nil),
|
||||
(*Value_ListValue)(nil),
|
||||
(*Value_TypeValue)(nil),
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
File: protoimpl.DescBuilder{
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_cel_expr_value_proto_rawDesc,
|
||||
NumEnums: 0,
|
||||
NumMessages: 5,
|
||||
NumExtensions: 0,
|
||||
NumServices: 0,
|
||||
},
|
||||
GoTypes: file_cel_expr_value_proto_goTypes,
|
||||
DependencyIndexes: file_cel_expr_value_proto_depIdxs,
|
||||
MessageInfos: file_cel_expr_value_proto_msgTypes,
|
||||
}.Build()
|
||||
File_cel_expr_value_proto = out.File
|
||||
file_cel_expr_value_proto_rawDesc = nil
|
||||
file_cel_expr_value_proto_goTypes = nil
|
||||
file_cel_expr_value_proto_depIdxs = nil
|
||||
}
|
2
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
2
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
@@ -15,4 +15,4 @@
|
||||
package internal
|
||||
|
||||
// Version is the current tagged release of the library.
|
||||
const Version = "1.23.4"
|
||||
const Version = "1.25.1"
|
||||
|
12
vendor/dario.cat/mergo/.deepsource.toml
vendored
Normal file
12
vendor/dario.cat/mergo/.deepsource.toml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
version = 1
|
||||
|
||||
test_patterns = [
|
||||
"*_test.go"
|
||||
]
|
||||
|
||||
[[analyzers]]
|
||||
name = "go"
|
||||
enabled = true
|
||||
|
||||
[analyzers.meta]
|
||||
import_path = "dario.cat/mergo"
|
33
vendor/dario.cat/mergo/.gitignore
vendored
Normal file
33
vendor/dario.cat/mergo/.gitignore
vendored
Normal file
@@ -0,0 +1,33 @@
|
||||
#### joe made this: http://goel.io/joe
|
||||
|
||||
#### go ####
|
||||
# Binaries for programs and plugins
|
||||
*.exe
|
||||
*.dll
|
||||
*.so
|
||||
*.dylib
|
||||
|
||||
# Test binary, build with `go test -c`
|
||||
*.test
|
||||
|
||||
# Output of the go coverage tool, specifically when used with LiteIDE
|
||||
*.out
|
||||
|
||||
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
|
||||
.glide/
|
||||
|
||||
#### vim ####
|
||||
# Swap
|
||||
[._]*.s[a-v][a-z]
|
||||
[._]*.sw[a-p]
|
||||
[._]s[a-v][a-z]
|
||||
[._]sw[a-p]
|
||||
|
||||
# Session
|
||||
Session.vim
|
||||
|
||||
# Temporary
|
||||
.netrwhist
|
||||
*~
|
||||
# Auto-generated tag files
|
||||
tags
|
12
vendor/dario.cat/mergo/.travis.yml
vendored
Normal file
12
vendor/dario.cat/mergo/.travis.yml
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
language: go
|
||||
arch:
|
||||
- amd64
|
||||
- ppc64le
|
||||
install:
|
||||
- go get -t
|
||||
- go get golang.org/x/tools/cmd/cover
|
||||
- go get github.com/mattn/goveralls
|
||||
script:
|
||||
- go test -race -v ./...
|
||||
after_script:
|
||||
- $HOME/gopath/bin/goveralls -service=travis-ci -repotoken $COVERALLS_TOKEN
|
46
vendor/dario.cat/mergo/CODE_OF_CONDUCT.md
vendored
Normal file
46
vendor/dario.cat/mergo/CODE_OF_CONDUCT.md
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to creating a positive environment include:
|
||||
|
||||
* Using welcoming and inclusive language
|
||||
* Being respectful of differing viewpoints and experiences
|
||||
* Gracefully accepting constructive criticism
|
||||
* Focusing on what is best for the community
|
||||
* Showing empathy towards other community members
|
||||
|
||||
Examples of unacceptable behavior by participants include:
|
||||
|
||||
* The use of sexualized language or imagery and unwelcome sexual attention or advances
|
||||
* Trolling, insulting/derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or electronic address, without explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a professional setting
|
||||
|
||||
## Our Responsibilities
|
||||
|
||||
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
|
||||
|
||||
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at i@dario.im. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
|
||||
|
||||
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
|
||||
|
||||
[homepage]: http://contributor-covenant.org
|
||||
[version]: http://contributor-covenant.org/version/1/4/
|
112
vendor/dario.cat/mergo/CONTRIBUTING.md
vendored
Normal file
112
vendor/dario.cat/mergo/CONTRIBUTING.md
vendored
Normal file
@@ -0,0 +1,112 @@
|
||||
<!-- omit in toc -->
|
||||
# Contributing to mergo
|
||||
|
||||
First off, thanks for taking the time to contribute! ❤️
|
||||
|
||||
All types of contributions are encouraged and valued. See the [Table of Contents](#table-of-contents) for different ways to help and details about how this project handles them. Please make sure to read the relevant section before making your contribution. It will make it a lot easier for us maintainers and smooth out the experience for all involved. The community looks forward to your contributions. 🎉
|
||||
|
||||
> And if you like the project, but just don't have time to contribute, that's fine. There are other easy ways to support the project and show your appreciation, which we would also be very happy about:
|
||||
> - Star the project
|
||||
> - Tweet about it
|
||||
> - Refer this project in your project's readme
|
||||
> - Mention the project at local meetups and tell your friends/colleagues
|
||||
|
||||
<!-- omit in toc -->
|
||||
## Table of Contents
|
||||
|
||||
- [Code of Conduct](#code-of-conduct)
|
||||
- [I Have a Question](#i-have-a-question)
|
||||
- [I Want To Contribute](#i-want-to-contribute)
|
||||
- [Reporting Bugs](#reporting-bugs)
|
||||
- [Suggesting Enhancements](#suggesting-enhancements)
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project and everyone participating in it is governed by the
|
||||
[mergo Code of Conduct](https://github.com/imdario/mergoblob/master/CODE_OF_CONDUCT.md).
|
||||
By participating, you are expected to uphold this code. Please report unacceptable behavior
|
||||
to <>.
|
||||
|
||||
|
||||
## I Have a Question
|
||||
|
||||
> If you want to ask a question, we assume that you have read the available [Documentation](https://pkg.go.dev/github.com/imdario/mergo).
|
||||
|
||||
Before you ask a question, it is best to search for existing [Issues](https://github.com/imdario/mergo/issues) that might help you. In case you have found a suitable issue and still need clarification, you can write your question in this issue. It is also advisable to search the internet for answers first.
|
||||
|
||||
If you then still feel the need to ask a question and need clarification, we recommend the following:
|
||||
|
||||
- Open an [Issue](https://github.com/imdario/mergo/issues/new).
|
||||
- Provide as much context as you can about what you're running into.
|
||||
- Provide project and platform versions (nodejs, npm, etc), depending on what seems relevant.
|
||||
|
||||
We will then take care of the issue as soon as possible.
|
||||
|
||||
## I Want To Contribute
|
||||
|
||||
> ### Legal Notice <!-- omit in toc -->
|
||||
> When contributing to this project, you must agree that you have authored 100% of the content, that you have the necessary rights to the content and that the content you contribute may be provided under the project license.
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
<!-- omit in toc -->
|
||||
#### Before Submitting a Bug Report
|
||||
|
||||
A good bug report shouldn't leave others needing to chase you up for more information. Therefore, we ask you to investigate carefully, collect information and describe the issue in detail in your report. Please complete the following steps in advance to help us fix any potential bug as fast as possible.
|
||||
|
||||
- Make sure that you are using the latest version.
|
||||
- Determine if your bug is really a bug and not an error on your side e.g. using incompatible environment components/versions (Make sure that you have read the [documentation](). If you are looking for support, you might want to check [this section](#i-have-a-question)).
|
||||
- To see if other users have experienced (and potentially already solved) the same issue you are having, check if there is not already a bug report existing for your bug or error in the [bug tracker](https://github.com/imdario/mergoissues?q=label%3Abug).
|
||||
- Also make sure to search the internet (including Stack Overflow) to see if users outside of the GitHub community have discussed the issue.
|
||||
- Collect information about the bug:
|
||||
- Stack trace (Traceback)
|
||||
- OS, Platform and Version (Windows, Linux, macOS, x86, ARM)
|
||||
- Version of the interpreter, compiler, SDK, runtime environment, package manager, depending on what seems relevant.
|
||||
- Possibly your input and the output
|
||||
- Can you reliably reproduce the issue? And can you also reproduce it with older versions?
|
||||
|
||||
<!-- omit in toc -->
|
||||
#### How Do I Submit a Good Bug Report?
|
||||
|
||||
> You must never report security related issues, vulnerabilities or bugs including sensitive information to the issue tracker, or elsewhere in public. Instead sensitive bugs must be sent by email to .
|
||||
<!-- You may add a PGP key to allow the messages to be sent encrypted as well. -->
|
||||
|
||||
We use GitHub issues to track bugs and errors. If you run into an issue with the project:
|
||||
|
||||
- Open an [Issue](https://github.com/imdario/mergo/issues/new). (Since we can't be sure at this point whether it is a bug or not, we ask you not to talk about a bug yet and not to label the issue.)
|
||||
- Explain the behavior you would expect and the actual behavior.
|
||||
- Please provide as much context as possible and describe the *reproduction steps* that someone else can follow to recreate the issue on their own. This usually includes your code. For good bug reports you should isolate the problem and create a reduced test case.
|
||||
- Provide the information you collected in the previous section.
|
||||
|
||||
Once it's filed:
|
||||
|
||||
- The project team will label the issue accordingly.
|
||||
- A team member will try to reproduce the issue with your provided steps. If there are no reproduction steps or no obvious way to reproduce the issue, the team will ask you for those steps and mark the issue as `needs-repro`. Bugs with the `needs-repro` tag will not be addressed until they are reproduced.
|
||||
- If the team is able to reproduce the issue, it will be marked `needs-fix`, as well as possibly other tags (such as `critical`), and the issue will be left to be implemented by someone.
|
||||
|
||||
### Suggesting Enhancements
|
||||
|
||||
This section guides you through submitting an enhancement suggestion for mergo, **including completely new features and minor improvements to existing functionality**. Following these guidelines will help maintainers and the community to understand your suggestion and find related suggestions.
|
||||
|
||||
<!-- omit in toc -->
|
||||
#### Before Submitting an Enhancement
|
||||
|
||||
- Make sure that you are using the latest version.
|
||||
- Read the [documentation]() carefully and find out if the functionality is already covered, maybe by an individual configuration.
|
||||
- Perform a [search](https://github.com/imdario/mergo/issues) to see if the enhancement has already been suggested. If it has, add a comment to the existing issue instead of opening a new one.
|
||||
- Find out whether your idea fits with the scope and aims of the project. It's up to you to make a strong case to convince the project's developers of the merits of this feature. Keep in mind that we want features that will be useful to the majority of our users and not just a small subset. If you're just targeting a minority of users, consider writing an add-on/plugin library.
|
||||
|
||||
<!-- omit in toc -->
|
||||
#### How Do I Submit a Good Enhancement Suggestion?
|
||||
|
||||
Enhancement suggestions are tracked as [GitHub issues](https://github.com/imdario/mergo/issues).
|
||||
|
||||
- Use a **clear and descriptive title** for the issue to identify the suggestion.
|
||||
- Provide a **step-by-step description of the suggested enhancement** in as many details as possible.
|
||||
- **Describe the current behavior** and **explain which behavior you expected to see instead** and why. At this point you can also tell which alternatives do not work for you.
|
||||
- You may want to **include screenshots and animated GIFs** which help you demonstrate the steps or point out the part which the suggestion is related to. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux. <!-- this should only be included if the project has a GUI -->
|
||||
- **Explain why this enhancement would be useful** to most mergo users. You may also want to point out the other projects that solved it better and which could serve as inspiration.
|
||||
|
||||
<!-- omit in toc -->
|
||||
## Attribution
|
||||
This guide is based on the **contributing-gen**. [Make your own](https://github.com/bttger/contributing-gen)!
|
@@ -1,4 +1,5 @@
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
Copyright (c) 2013 Dario Castañé. All rights reserved.
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
248
vendor/dario.cat/mergo/README.md
vendored
Normal file
248
vendor/dario.cat/mergo/README.md
vendored
Normal file
@@ -0,0 +1,248 @@
|
||||
# Mergo
|
||||
|
||||
[![GitHub release][5]][6]
|
||||
[![GoCard][7]][8]
|
||||
[![Test status][1]][2]
|
||||
[![OpenSSF Scorecard][21]][22]
|
||||
[![OpenSSF Best Practices][19]][20]
|
||||
[![Coverage status][9]][10]
|
||||
[![Sourcegraph][11]][12]
|
||||
[![FOSSA status][13]][14]
|
||||
|
||||
[![GoDoc][3]][4]
|
||||
[![Become my sponsor][15]][16]
|
||||
[![Tidelift][17]][18]
|
||||
|
||||
[1]: https://github.com/imdario/mergo/workflows/tests/badge.svg?branch=master
|
||||
[2]: https://github.com/imdario/mergo/actions/workflows/tests.yml
|
||||
[3]: https://godoc.org/github.com/imdario/mergo?status.svg
|
||||
[4]: https://godoc.org/github.com/imdario/mergo
|
||||
[5]: https://img.shields.io/github/release/imdario/mergo.svg
|
||||
[6]: https://github.com/imdario/mergo/releases
|
||||
[7]: https://goreportcard.com/badge/imdario/mergo
|
||||
[8]: https://goreportcard.com/report/github.com/imdario/mergo
|
||||
[9]: https://coveralls.io/repos/github/imdario/mergo/badge.svg?branch=master
|
||||
[10]: https://coveralls.io/github/imdario/mergo?branch=master
|
||||
[11]: https://sourcegraph.com/github.com/imdario/mergo/-/badge.svg
|
||||
[12]: https://sourcegraph.com/github.com/imdario/mergo?badge
|
||||
[13]: https://app.fossa.io/api/projects/git%2Bgithub.com%2Fimdario%2Fmergo.svg?type=shield
|
||||
[14]: https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_shield
|
||||
[15]: https://img.shields.io/github/sponsors/imdario
|
||||
[16]: https://github.com/sponsors/imdario
|
||||
[17]: https://tidelift.com/badges/package/go/github.com%2Fimdario%2Fmergo
|
||||
[18]: https://tidelift.com/subscription/pkg/go-github.com-imdario-mergo
|
||||
[19]: https://bestpractices.coreinfrastructure.org/projects/7177/badge
|
||||
[20]: https://bestpractices.coreinfrastructure.org/projects/7177
|
||||
[21]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo/badge
|
||||
[22]: https://api.securityscorecards.dev/projects/github.com/imdario/mergo
|
||||
|
||||
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
|
||||
|
||||
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
Also a lovely [comune](http://en.wikipedia.org/wiki/Mergo) (municipality) in the Province of Ancona in the Italian region of Marche.
|
||||
|
||||
## Status
|
||||
|
||||
It is ready for production use. [It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, Microsoft, etc](https://github.com/imdario/mergo#mergo-in-the-wild).
|
||||
|
||||
### Important notes
|
||||
|
||||
#### 1.0.0
|
||||
|
||||
In [1.0.0](//github.com/imdario/mergo/releases/tag/1.0.0) Mergo moves to a vanity URL `dario.cat/mergo`.
|
||||
|
||||
#### 0.3.9
|
||||
|
||||
Please keep in mind that a problematic PR broke [0.3.9](//github.com/imdario/mergo/releases/tag/0.3.9). I reverted it in [0.3.10](//github.com/imdario/mergo/releases/tag/0.3.10), and I consider it stable but not bug-free. Also, this version adds support for go modules.
|
||||
|
||||
Keep in mind that in [0.3.2](//github.com/imdario/mergo/releases/tag/0.3.2), Mergo changed `Merge()`and `Map()` signatures to support [transformers](#transformers). I added an optional/variadic argument so that it won't break the existing code.
|
||||
|
||||
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with ```go get -u dario.cat/mergo```. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
|
||||
|
||||
### Donations
|
||||
|
||||
If Mergo is useful to you, consider buying me a coffee, a beer, or making a monthly donation to allow me to keep building great free software. :heart_eyes:
|
||||
|
||||
<a href='https://ko-fi.com/B0B58839' target='_blank'><img height='36' style='border:0px;height:36px;' src='https://az743702.vo.msecnd.net/cdn/kofi1.png?v=0' border='0' alt='Buy Me a Coffee at ko-fi.com' /></a>
|
||||
<a href="https://liberapay.com/dario/donate"><img alt="Donate using Liberapay" src="https://liberapay.com/assets/widgets/donate.svg"></a>
|
||||
<a href='https://github.com/sponsors/imdario' target='_blank'><img alt="Become my sponsor" src="https://img.shields.io/github/sponsors/imdario?style=for-the-badge" /></a>
|
||||
|
||||
### Mergo in the wild
|
||||
|
||||
- [moby/moby](https://github.com/moby/moby)
|
||||
- [kubernetes/kubernetes](https://github.com/kubernetes/kubernetes)
|
||||
- [vmware/dispatch](https://github.com/vmware/dispatch)
|
||||
- [Shopify/themekit](https://github.com/Shopify/themekit)
|
||||
- [imdario/zas](https://github.com/imdario/zas)
|
||||
- [matcornic/hermes](https://github.com/matcornic/hermes)
|
||||
- [OpenBazaar/openbazaar-go](https://github.com/OpenBazaar/openbazaar-go)
|
||||
- [kataras/iris](https://github.com/kataras/iris)
|
||||
- [michaelsauter/crane](https://github.com/michaelsauter/crane)
|
||||
- [go-task/task](https://github.com/go-task/task)
|
||||
- [sensu/uchiwa](https://github.com/sensu/uchiwa)
|
||||
- [ory/hydra](https://github.com/ory/hydra)
|
||||
- [sisatech/vcli](https://github.com/sisatech/vcli)
|
||||
- [dairycart/dairycart](https://github.com/dairycart/dairycart)
|
||||
- [projectcalico/felix](https://github.com/projectcalico/felix)
|
||||
- [resin-os/balena](https://github.com/resin-os/balena)
|
||||
- [go-kivik/kivik](https://github.com/go-kivik/kivik)
|
||||
- [Telefonica/govice](https://github.com/Telefonica/govice)
|
||||
- [supergiant/supergiant](supergiant/supergiant)
|
||||
- [SergeyTsalkov/brooce](https://github.com/SergeyTsalkov/brooce)
|
||||
- [soniah/dnsmadeeasy](https://github.com/soniah/dnsmadeeasy)
|
||||
- [ohsu-comp-bio/funnel](https://github.com/ohsu-comp-bio/funnel)
|
||||
- [EagerIO/Stout](https://github.com/EagerIO/Stout)
|
||||
- [lynndylanhurley/defsynth-api](https://github.com/lynndylanhurley/defsynth-api)
|
||||
- [russross/canvasassignments](https://github.com/russross/canvasassignments)
|
||||
- [rdegges/cryptly-api](https://github.com/rdegges/cryptly-api)
|
||||
- [casualjim/exeggutor](https://github.com/casualjim/exeggutor)
|
||||
- [divshot/gitling](https://github.com/divshot/gitling)
|
||||
- [RWJMurphy/gorl](https://github.com/RWJMurphy/gorl)
|
||||
- [andrerocker/deploy42](https://github.com/andrerocker/deploy42)
|
||||
- [elwinar/rambler](https://github.com/elwinar/rambler)
|
||||
- [tmaiaroto/gopartman](https://github.com/tmaiaroto/gopartman)
|
||||
- [jfbus/impressionist](https://github.com/jfbus/impressionist)
|
||||
- [Jmeyering/zealot](https://github.com/Jmeyering/zealot)
|
||||
- [godep-migrator/rigger-host](https://github.com/godep-migrator/rigger-host)
|
||||
- [Dronevery/MultiwaySwitch-Go](https://github.com/Dronevery/MultiwaySwitch-Go)
|
||||
- [thoas/picfit](https://github.com/thoas/picfit)
|
||||
- [mantasmatelis/whooplist-server](https://github.com/mantasmatelis/whooplist-server)
|
||||
- [jnuthong/item_search](https://github.com/jnuthong/item_search)
|
||||
- [bukalapak/snowboard](https://github.com/bukalapak/snowboard)
|
||||
- [containerssh/containerssh](https://github.com/containerssh/containerssh)
|
||||
- [goreleaser/goreleaser](https://github.com/goreleaser/goreleaser)
|
||||
- [tjpnz/structbot](https://github.com/tjpnz/structbot)
|
||||
|
||||
## Install
|
||||
|
||||
go get dario.cat/mergo
|
||||
|
||||
// use in your .go code
|
||||
import (
|
||||
"dario.cat/mergo"
|
||||
)
|
||||
|
||||
## Usage
|
||||
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as [they are zero values](https://golang.org/ref/spec#The_zero_value) too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
```go
|
||||
if err := mergo.Merge(&dst, src); err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Also, you can merge overwriting values using the transformer `WithOverride`.
|
||||
|
||||
```go
|
||||
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Additionally, you can map a `map[string]interface{}` to a struct (and otherwise, from struct to map), following the same restrictions as in `Merge()`. Keys are capitalized to find each corresponding exported field.
|
||||
|
||||
```go
|
||||
if err := mergo.Map(&dst, srcMap); err != nil {
|
||||
// ...
|
||||
}
|
||||
```
|
||||
|
||||
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as `map[string]interface{}`. They will be just assigned as values.
|
||||
|
||||
Here is a nice example:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"dario.cat/mergo"
|
||||
)
|
||||
|
||||
type Foo struct {
|
||||
A string
|
||||
B int64
|
||||
}
|
||||
|
||||
func main() {
|
||||
src := Foo{
|
||||
A: "one",
|
||||
B: 2,
|
||||
}
|
||||
dest := Foo{
|
||||
A: "two",
|
||||
}
|
||||
mergo.Merge(&dest, src)
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// {two 2}
|
||||
}
|
||||
```
|
||||
|
||||
Note: if test are failing due missing package, please execute:
|
||||
|
||||
go get gopkg.in/yaml.v3
|
||||
|
||||
### Transformers
|
||||
|
||||
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, `time.Time` is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero `time.Time`?
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"dario.cat/mergo"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type timeTransformer struct {
|
||||
}
|
||||
|
||||
func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
|
||||
if typ == reflect.TypeOf(time.Time{}) {
|
||||
return func(dst, src reflect.Value) error {
|
||||
if dst.CanSet() {
|
||||
isZero := dst.MethodByName("IsZero")
|
||||
result := isZero.Call([]reflect.Value{})
|
||||
if result[0].Bool() {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
Time time.Time
|
||||
// ...
|
||||
}
|
||||
|
||||
func main() {
|
||||
src := Snapshot{time.Now()}
|
||||
dest := Snapshot{}
|
||||
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
|
||||
}
|
||||
```
|
||||
|
||||
## Contact me
|
||||
|
||||
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): [@im_dario](https://twitter.com/im_dario)
|
||||
|
||||
## About
|
||||
|
||||
Written by [Dario Castañé](http://dario.im).
|
||||
|
||||
## License
|
||||
|
||||
[BSD 3-Clause](http://opensource.org/licenses/BSD-3-Clause) license, as [Go language](http://golang.org/LICENSE).
|
||||
|
||||
[](https://app.fossa.io/projects/git%2Bgithub.com%2Fimdario%2Fmergo?ref=badge_large)
|
14
vendor/dario.cat/mergo/SECURITY.md
vendored
Normal file
14
vendor/dario.cat/mergo/SECURITY.md
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 0.3.x | :white_check_mark: |
|
||||
| < 0.3 | :x: |
|
||||
|
||||
## Security contact information
|
||||
|
||||
To report a security vulnerability, please use the
|
||||
[Tidelift security contact](https://tidelift.com/security).
|
||||
Tidelift will coordinate the fix and disclosure.
|
148
vendor/dario.cat/mergo/doc.go
vendored
Normal file
148
vendor/dario.cat/mergo/doc.go
vendored
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2013 Dario Castañé. All rights reserved.
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
/*
|
||||
A helper to merge structs and maps in Golang. Useful for configuration default values, avoiding messy if-statements.
|
||||
|
||||
Mergo merges same-type structs and maps by setting default values in zero-value fields. Mergo won't merge unexported (private) fields. It will do recursively any exported one. It also won't merge structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
# Status
|
||||
|
||||
It is ready for production use. It is used in several projects by Docker, Google, The Linux Foundation, VMWare, Shopify, etc.
|
||||
|
||||
# Important notes
|
||||
|
||||
1.0.0
|
||||
|
||||
In 1.0.0 Mergo moves to a vanity URL `dario.cat/mergo`.
|
||||
|
||||
0.3.9
|
||||
|
||||
Please keep in mind that a problematic PR broke 0.3.9. We reverted it in 0.3.10. We consider 0.3.10 as stable but not bug-free. . Also, this version adds suppot for go modules.
|
||||
|
||||
Keep in mind that in 0.3.2, Mergo changed Merge() and Map() signatures to support transformers. We added an optional/variadic argument so that it won't break the existing code.
|
||||
|
||||
If you were using Mergo before April 6th, 2015, please check your project works as intended after updating your local copy with go get -u dario.cat/mergo. I apologize for any issue caused by its previous behavior and any future bug that Mergo could cause in existing projects after the change (release 0.2.0).
|
||||
|
||||
# Install
|
||||
|
||||
Do your usual installation procedure:
|
||||
|
||||
go get dario.cat/mergo
|
||||
|
||||
// use in your .go code
|
||||
import (
|
||||
"dario.cat/mergo"
|
||||
)
|
||||
|
||||
# Usage
|
||||
|
||||
You can only merge same-type structs with exported fields initialized as zero value of their type and same-types maps. Mergo won't merge unexported (private) fields but will do recursively any exported one. It won't merge empty structs value as they are zero values too. Also, maps will be merged recursively except for structs inside maps (because they are not addressable using Go reflection).
|
||||
|
||||
if err := mergo.Merge(&dst, src); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
Also, you can merge overwriting values using the transformer WithOverride.
|
||||
|
||||
if err := mergo.Merge(&dst, src, mergo.WithOverride); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
Additionally, you can map a map[string]interface{} to a struct (and otherwise, from struct to map), following the same restrictions as in Merge(). Keys are capitalized to find each corresponding exported field.
|
||||
|
||||
if err := mergo.Map(&dst, srcMap); err != nil {
|
||||
// ...
|
||||
}
|
||||
|
||||
Warning: if you map a struct to map, it won't do it recursively. Don't expect Mergo to map struct members of your struct as map[string]interface{}. They will be just assigned as values.
|
||||
|
||||
Here is a nice example:
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"dario.cat/mergo"
|
||||
)
|
||||
|
||||
type Foo struct {
|
||||
A string
|
||||
B int64
|
||||
}
|
||||
|
||||
func main() {
|
||||
src := Foo{
|
||||
A: "one",
|
||||
B: 2,
|
||||
}
|
||||
dest := Foo{
|
||||
A: "two",
|
||||
}
|
||||
mergo.Merge(&dest, src)
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// {two 2}
|
||||
}
|
||||
|
||||
# Transformers
|
||||
|
||||
Transformers allow to merge specific types differently than in the default behavior. In other words, now you can customize how some types are merged. For example, time.Time is a struct; it doesn't have zero value but IsZero can return true because it has fields with zero value. How can we merge a non-zero time.Time?
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"dario.cat/mergo"
|
||||
"reflect"
|
||||
"time"
|
||||
)
|
||||
|
||||
type timeTransformer struct {
|
||||
}
|
||||
|
||||
func (t timeTransformer) Transformer(typ reflect.Type) func(dst, src reflect.Value) error {
|
||||
if typ == reflect.TypeOf(time.Time{}) {
|
||||
return func(dst, src reflect.Value) error {
|
||||
if dst.CanSet() {
|
||||
isZero := dst.MethodByName("IsZero")
|
||||
result := isZero.Call([]reflect.Value{})
|
||||
if result[0].Bool() {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type Snapshot struct {
|
||||
Time time.Time
|
||||
// ...
|
||||
}
|
||||
|
||||
func main() {
|
||||
src := Snapshot{time.Now()}
|
||||
dest := Snapshot{}
|
||||
mergo.Merge(&dest, src, mergo.WithTransformers(timeTransformer{}))
|
||||
fmt.Println(dest)
|
||||
// Will print
|
||||
// { 2018-01-12 01:15:00 +0000 UTC m=+0.000000001 }
|
||||
}
|
||||
|
||||
# Contact me
|
||||
|
||||
If I can help you, you have an idea or you are using Mergo in your projects, don't hesitate to drop me a line (or a pull request): https://twitter.com/im_dario
|
||||
|
||||
# About
|
||||
|
||||
Written by Dario Castañé: https://da.rio.hn
|
||||
|
||||
# License
|
||||
|
||||
BSD 3-Clause license, as Go language.
|
||||
*/
|
||||
package mergo
|
178
vendor/dario.cat/mergo/map.go
vendored
Normal file
178
vendor/dario.cat/mergo/map.go
vendored
Normal file
@@ -0,0 +1,178 @@
|
||||
// Copyright 2014 Dario Castañé. All rights reserved.
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Based on src/pkg/reflect/deepequal.go from official
|
||||
// golang's stdlib.
|
||||
|
||||
package mergo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
func changeInitialCase(s string, mapper func(rune) rune) string {
|
||||
if s == "" {
|
||||
return s
|
||||
}
|
||||
r, n := utf8.DecodeRuneInString(s)
|
||||
return string(mapper(r)) + s[n:]
|
||||
}
|
||||
|
||||
func isExported(field reflect.StructField) bool {
|
||||
r, _ := utf8.DecodeRuneInString(field.Name)
|
||||
return r >= 'A' && r <= 'Z'
|
||||
}
|
||||
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deepMap(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
|
||||
overwrite := config.Overwrite
|
||||
if dst.CanAddr() {
|
||||
addr := dst.UnsafeAddr()
|
||||
h := 17 * addr
|
||||
seen := visited[h]
|
||||
typ := dst.Type()
|
||||
for p := seen; p != nil; p = p.next {
|
||||
if p.ptr == addr && p.typ == typ {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Remember, remember...
|
||||
visited[h] = &visit{typ, seen, addr}
|
||||
}
|
||||
zeroValue := reflect.Value{}
|
||||
switch dst.Kind() {
|
||||
case reflect.Map:
|
||||
dstMap := dst.Interface().(map[string]interface{})
|
||||
for i, n := 0, src.NumField(); i < n; i++ {
|
||||
srcType := src.Type()
|
||||
field := srcType.Field(i)
|
||||
if !isExported(field) {
|
||||
continue
|
||||
}
|
||||
fieldName := field.Name
|
||||
fieldName = changeInitialCase(fieldName, unicode.ToLower)
|
||||
if v, ok := dstMap[fieldName]; !ok || (isEmptyValue(reflect.ValueOf(v), !config.ShouldNotDereference) || overwrite) {
|
||||
dstMap[fieldName] = src.Field(i).Interface()
|
||||
}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if dst.IsNil() {
|
||||
v := reflect.New(dst.Type().Elem())
|
||||
dst.Set(v)
|
||||
}
|
||||
dst = dst.Elem()
|
||||
fallthrough
|
||||
case reflect.Struct:
|
||||
srcMap := src.Interface().(map[string]interface{})
|
||||
for key := range srcMap {
|
||||
config.overwriteWithEmptyValue = true
|
||||
srcValue := srcMap[key]
|
||||
fieldName := changeInitialCase(key, unicode.ToUpper)
|
||||
dstElement := dst.FieldByName(fieldName)
|
||||
if dstElement == zeroValue {
|
||||
// We discard it because the field doesn't exist.
|
||||
continue
|
||||
}
|
||||
srcElement := reflect.ValueOf(srcValue)
|
||||
dstKind := dstElement.Kind()
|
||||
srcKind := srcElement.Kind()
|
||||
if srcKind == reflect.Ptr && dstKind != reflect.Ptr {
|
||||
srcElement = srcElement.Elem()
|
||||
srcKind = reflect.TypeOf(srcElement.Interface()).Kind()
|
||||
} else if dstKind == reflect.Ptr {
|
||||
// Can this work? I guess it can't.
|
||||
if srcKind != reflect.Ptr && srcElement.CanAddr() {
|
||||
srcPtr := srcElement.Addr()
|
||||
srcElement = reflect.ValueOf(srcPtr)
|
||||
srcKind = reflect.Ptr
|
||||
}
|
||||
}
|
||||
|
||||
if !srcElement.IsValid() {
|
||||
continue
|
||||
}
|
||||
if srcKind == dstKind {
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else if dstKind == reflect.Interface && dstElement.Kind() == reflect.Interface {
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else if srcKind == reflect.Map {
|
||||
if err = deepMap(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("type mismatch on %s field: found %v, expected %v", fieldName, srcKind, dstKind)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Map sets fields' values in dst from src.
|
||||
// src can be a map with string keys or a struct. dst must be the opposite:
|
||||
// if src is a map, dst must be a valid pointer to struct. If src is a struct,
|
||||
// dst must be map[string]interface{}.
|
||||
// It won't merge unexported (private) fields and will do recursively
|
||||
// any exported field.
|
||||
// If dst is a map, keys will be src fields' names in lower camel case.
|
||||
// Missing key in src that doesn't match a field in dst will be skipped. This
|
||||
// doesn't apply if dst is a map.
|
||||
// This is separated method from Merge because it is cleaner and it keeps sane
|
||||
// semantics: merging equal types, mapping different (restricted) types.
|
||||
func Map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return _map(dst, src, opts...)
|
||||
}
|
||||
|
||||
// MapWithOverwrite will do the same as Map except that non-empty dst attributes will be overridden by
|
||||
// non-empty src attribute values.
|
||||
// Deprecated: Use Map(…) with WithOverride
|
||||
func MapWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return _map(dst, src, append(opts, WithOverride)...)
|
||||
}
|
||||
|
||||
func _map(dst, src interface{}, opts ...func(*Config)) error {
|
||||
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
|
||||
return ErrNonPointerArgument
|
||||
}
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
err error
|
||||
)
|
||||
config := &Config{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
|
||||
return err
|
||||
}
|
||||
// To be friction-less, we redirect equal-type arguments
|
||||
// to deepMerge. Only because arguments can be anything.
|
||||
if vSrc.Kind() == vDst.Kind() {
|
||||
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
}
|
||||
switch vSrc.Kind() {
|
||||
case reflect.Struct:
|
||||
if vDst.Kind() != reflect.Map {
|
||||
return ErrExpectedMapAsDestination
|
||||
}
|
||||
case reflect.Map:
|
||||
if vDst.Kind() != reflect.Struct {
|
||||
return ErrExpectedStructAsDestination
|
||||
}
|
||||
default:
|
||||
return ErrNotSupported
|
||||
}
|
||||
return deepMap(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
}
|
409
vendor/dario.cat/mergo/merge.go
vendored
Normal file
409
vendor/dario.cat/mergo/merge.go
vendored
Normal file
@@ -0,0 +1,409 @@
|
||||
// Copyright 2013 Dario Castañé. All rights reserved.
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Based on src/pkg/reflect/deepequal.go from official
|
||||
// golang's stdlib.
|
||||
|
||||
package mergo
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
func hasMergeableFields(dst reflect.Value) (exported bool) {
|
||||
for i, n := 0, dst.NumField(); i < n; i++ {
|
||||
field := dst.Type().Field(i)
|
||||
if field.Anonymous && dst.Field(i).Kind() == reflect.Struct {
|
||||
exported = exported || hasMergeableFields(dst.Field(i))
|
||||
} else if isExportedComponent(&field) {
|
||||
exported = exported || len(field.PkgPath) == 0
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func isExportedComponent(field *reflect.StructField) bool {
|
||||
pkgPath := field.PkgPath
|
||||
if len(pkgPath) > 0 {
|
||||
return false
|
||||
}
|
||||
c := field.Name[0]
|
||||
if 'a' <= c && c <= 'z' || c == '_' {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type Config struct {
|
||||
Transformers Transformers
|
||||
Overwrite bool
|
||||
ShouldNotDereference bool
|
||||
AppendSlice bool
|
||||
TypeCheck bool
|
||||
overwriteWithEmptyValue bool
|
||||
overwriteSliceWithEmptyValue bool
|
||||
sliceDeepCopy bool
|
||||
debug bool
|
||||
}
|
||||
|
||||
type Transformers interface {
|
||||
Transformer(reflect.Type) func(dst, src reflect.Value) error
|
||||
}
|
||||
|
||||
// Traverses recursively both values, assigning src's fields values to dst.
|
||||
// The map argument tracks comparisons that have already been seen, which allows
|
||||
// short circuiting on recursive types.
|
||||
func deepMerge(dst, src reflect.Value, visited map[uintptr]*visit, depth int, config *Config) (err error) {
|
||||
overwrite := config.Overwrite
|
||||
typeCheck := config.TypeCheck
|
||||
overwriteWithEmptySrc := config.overwriteWithEmptyValue
|
||||
overwriteSliceWithEmptySrc := config.overwriteSliceWithEmptyValue
|
||||
sliceDeepCopy := config.sliceDeepCopy
|
||||
|
||||
if !src.IsValid() {
|
||||
return
|
||||
}
|
||||
if dst.CanAddr() {
|
||||
addr := dst.UnsafeAddr()
|
||||
h := 17 * addr
|
||||
seen := visited[h]
|
||||
typ := dst.Type()
|
||||
for p := seen; p != nil; p = p.next {
|
||||
if p.ptr == addr && p.typ == typ {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
// Remember, remember...
|
||||
visited[h] = &visit{typ, seen, addr}
|
||||
}
|
||||
|
||||
if config.Transformers != nil && !isReflectNil(dst) && dst.IsValid() {
|
||||
if fn := config.Transformers.Transformer(dst.Type()); fn != nil {
|
||||
err = fn(dst, src)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
switch dst.Kind() {
|
||||
case reflect.Struct:
|
||||
if hasMergeableFields(dst) {
|
||||
for i, n := 0, dst.NumField(); i < n; i++ {
|
||||
if err = deepMerge(dst.Field(i), src.Field(i), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if dst.CanSet() && (isReflectNil(dst) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc) {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
case reflect.Map:
|
||||
if dst.IsNil() && !src.IsNil() {
|
||||
if dst.CanSet() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
} else {
|
||||
dst = src
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if src.Kind() != reflect.Map {
|
||||
if overwrite && dst.CanSet() {
|
||||
dst.Set(src)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
for _, key := range src.MapKeys() {
|
||||
srcElement := src.MapIndex(key)
|
||||
if !srcElement.IsValid() {
|
||||
continue
|
||||
}
|
||||
dstElement := dst.MapIndex(key)
|
||||
switch srcElement.Kind() {
|
||||
case reflect.Chan, reflect.Func, reflect.Map, reflect.Interface, reflect.Slice:
|
||||
if srcElement.IsNil() {
|
||||
if overwrite {
|
||||
dst.SetMapIndex(key, srcElement)
|
||||
}
|
||||
continue
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if !srcElement.CanInterface() {
|
||||
continue
|
||||
}
|
||||
switch reflect.TypeOf(srcElement.Interface()).Kind() {
|
||||
case reflect.Struct:
|
||||
fallthrough
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Map:
|
||||
srcMapElm := srcElement
|
||||
dstMapElm := dstElement
|
||||
if srcMapElm.CanInterface() {
|
||||
srcMapElm = reflect.ValueOf(srcMapElm.Interface())
|
||||
if dstMapElm.IsValid() {
|
||||
dstMapElm = reflect.ValueOf(dstMapElm.Interface())
|
||||
}
|
||||
}
|
||||
if err = deepMerge(dstMapElm, srcMapElm, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
case reflect.Slice:
|
||||
srcSlice := reflect.ValueOf(srcElement.Interface())
|
||||
|
||||
var dstSlice reflect.Value
|
||||
if !dstElement.IsValid() || dstElement.IsNil() {
|
||||
dstSlice = reflect.MakeSlice(srcSlice.Type(), 0, srcSlice.Len())
|
||||
} else {
|
||||
dstSlice = reflect.ValueOf(dstElement.Interface())
|
||||
}
|
||||
|
||||
if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
|
||||
if typeCheck && srcSlice.Type() != dstSlice.Type() {
|
||||
return fmt.Errorf("cannot override two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
|
||||
}
|
||||
dstSlice = srcSlice
|
||||
} else if config.AppendSlice {
|
||||
if srcSlice.Type() != dstSlice.Type() {
|
||||
return fmt.Errorf("cannot append two slices with different type (%s, %s)", srcSlice.Type(), dstSlice.Type())
|
||||
}
|
||||
dstSlice = reflect.AppendSlice(dstSlice, srcSlice)
|
||||
} else if sliceDeepCopy {
|
||||
i := 0
|
||||
for ; i < srcSlice.Len() && i < dstSlice.Len(); i++ {
|
||||
srcElement := srcSlice.Index(i)
|
||||
dstElement := dstSlice.Index(i)
|
||||
|
||||
if srcElement.CanInterface() {
|
||||
srcElement = reflect.ValueOf(srcElement.Interface())
|
||||
}
|
||||
if dstElement.CanInterface() {
|
||||
dstElement = reflect.ValueOf(dstElement.Interface())
|
||||
}
|
||||
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
dst.SetMapIndex(key, dstSlice)
|
||||
}
|
||||
}
|
||||
|
||||
if dstElement.IsValid() && !isEmptyValue(dstElement, !config.ShouldNotDereference) {
|
||||
if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Slice {
|
||||
continue
|
||||
}
|
||||
if reflect.TypeOf(srcElement.Interface()).Kind() == reflect.Map && reflect.TypeOf(dstElement.Interface()).Kind() == reflect.Map {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if srcElement.IsValid() && ((srcElement.Kind() != reflect.Ptr && overwrite) || !dstElement.IsValid() || isEmptyValue(dstElement, !config.ShouldNotDereference)) {
|
||||
if dst.IsNil() {
|
||||
dst.Set(reflect.MakeMap(dst.Type()))
|
||||
}
|
||||
dst.SetMapIndex(key, srcElement)
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure that all keys in dst are deleted if they are not in src.
|
||||
if overwriteWithEmptySrc {
|
||||
for _, key := range dst.MapKeys() {
|
||||
srcElement := src.MapIndex(key)
|
||||
if !srcElement.IsValid() {
|
||||
dst.SetMapIndex(key, reflect.Value{})
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Slice:
|
||||
if !dst.CanSet() {
|
||||
break
|
||||
}
|
||||
if (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc || overwriteSliceWithEmptySrc) && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) && !config.AppendSlice && !sliceDeepCopy {
|
||||
dst.Set(src)
|
||||
} else if config.AppendSlice {
|
||||
if src.Type() != dst.Type() {
|
||||
return fmt.Errorf("cannot append two slice with different type (%s, %s)", src.Type(), dst.Type())
|
||||
}
|
||||
dst.Set(reflect.AppendSlice(dst, src))
|
||||
} else if sliceDeepCopy {
|
||||
for i := 0; i < src.Len() && i < dst.Len(); i++ {
|
||||
srcElement := src.Index(i)
|
||||
dstElement := dst.Index(i)
|
||||
if srcElement.CanInterface() {
|
||||
srcElement = reflect.ValueOf(srcElement.Interface())
|
||||
}
|
||||
if dstElement.CanInterface() {
|
||||
dstElement = reflect.ValueOf(dstElement.Interface())
|
||||
}
|
||||
|
||||
if err = deepMerge(dstElement, srcElement, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
case reflect.Ptr:
|
||||
fallthrough
|
||||
case reflect.Interface:
|
||||
if isReflectNil(src) {
|
||||
if overwriteWithEmptySrc && dst.CanSet() && src.Type().AssignableTo(dst.Type()) {
|
||||
dst.Set(src)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if src.Kind() != reflect.Interface {
|
||||
if dst.IsNil() || (src.Kind() != reflect.Ptr && overwrite) {
|
||||
if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
|
||||
dst.Set(src)
|
||||
}
|
||||
} else if src.Kind() == reflect.Ptr {
|
||||
if !config.ShouldNotDereference {
|
||||
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if overwriteWithEmptySrc || (overwrite && !src.IsNil()) || dst.IsNil() {
|
||||
dst.Set(src)
|
||||
}
|
||||
}
|
||||
} else if dst.Elem().Type() == src.Type() {
|
||||
if err = deepMerge(dst.Elem(), src, visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return ErrDifferentArgumentsTypes
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if dst.IsNil() || overwrite {
|
||||
if dst.CanSet() && (overwrite || isEmptyValue(dst, !config.ShouldNotDereference)) {
|
||||
dst.Set(src)
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
if dst.Elem().Kind() == src.Elem().Kind() {
|
||||
if err = deepMerge(dst.Elem(), src.Elem(), visited, depth+1, config); err != nil {
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
default:
|
||||
mustSet := (isEmptyValue(dst, !config.ShouldNotDereference) || overwrite) && (!isEmptyValue(src, !config.ShouldNotDereference) || overwriteWithEmptySrc)
|
||||
if mustSet {
|
||||
if dst.CanSet() {
|
||||
dst.Set(src)
|
||||
} else {
|
||||
dst = src
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Merge will fill any empty for value type attributes on the dst struct using corresponding
|
||||
// src attributes if they themselves are not empty. dst and src must be valid same-type structs
|
||||
// and dst must be a pointer to struct.
|
||||
// It won't merge unexported (private) fields and will do recursively any exported field.
|
||||
func Merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return merge(dst, src, opts...)
|
||||
}
|
||||
|
||||
// MergeWithOverwrite will do the same as Merge except that non-empty dst attributes will be overridden by
|
||||
// non-empty src attribute values.
|
||||
// Deprecated: use Merge(…) with WithOverride
|
||||
func MergeWithOverwrite(dst, src interface{}, opts ...func(*Config)) error {
|
||||
return merge(dst, src, append(opts, WithOverride)...)
|
||||
}
|
||||
|
||||
// WithTransformers adds transformers to merge, allowing to customize the merging of some types.
|
||||
func WithTransformers(transformers Transformers) func(*Config) {
|
||||
return func(config *Config) {
|
||||
config.Transformers = transformers
|
||||
}
|
||||
}
|
||||
|
||||
// WithOverride will make merge override non-empty dst attributes with non-empty src attributes values.
|
||||
func WithOverride(config *Config) {
|
||||
config.Overwrite = true
|
||||
}
|
||||
|
||||
// WithOverwriteWithEmptyValue will make merge override non empty dst attributes with empty src attributes values.
|
||||
func WithOverwriteWithEmptyValue(config *Config) {
|
||||
config.Overwrite = true
|
||||
config.overwriteWithEmptyValue = true
|
||||
}
|
||||
|
||||
// WithOverrideEmptySlice will make merge override empty dst slice with empty src slice.
|
||||
func WithOverrideEmptySlice(config *Config) {
|
||||
config.overwriteSliceWithEmptyValue = true
|
||||
}
|
||||
|
||||
// WithoutDereference prevents dereferencing pointers when evaluating whether they are empty
|
||||
// (i.e. a non-nil pointer is never considered empty).
|
||||
func WithoutDereference(config *Config) {
|
||||
config.ShouldNotDereference = true
|
||||
}
|
||||
|
||||
// WithAppendSlice will make merge append slices instead of overwriting it.
|
||||
func WithAppendSlice(config *Config) {
|
||||
config.AppendSlice = true
|
||||
}
|
||||
|
||||
// WithTypeCheck will make merge check types while overwriting it (must be used with WithOverride).
|
||||
func WithTypeCheck(config *Config) {
|
||||
config.TypeCheck = true
|
||||
}
|
||||
|
||||
// WithSliceDeepCopy will merge slice element one by one with Overwrite flag.
|
||||
func WithSliceDeepCopy(config *Config) {
|
||||
config.sliceDeepCopy = true
|
||||
config.Overwrite = true
|
||||
}
|
||||
|
||||
func merge(dst, src interface{}, opts ...func(*Config)) error {
|
||||
if dst != nil && reflect.ValueOf(dst).Kind() != reflect.Ptr {
|
||||
return ErrNonPointerArgument
|
||||
}
|
||||
var (
|
||||
vDst, vSrc reflect.Value
|
||||
err error
|
||||
)
|
||||
|
||||
config := &Config{}
|
||||
|
||||
for _, opt := range opts {
|
||||
opt(config)
|
||||
}
|
||||
|
||||
if vDst, vSrc, err = resolveValues(dst, src); err != nil {
|
||||
return err
|
||||
}
|
||||
if vDst.Type() != vSrc.Type() {
|
||||
return ErrDifferentArgumentsTypes
|
||||
}
|
||||
return deepMerge(vDst, vSrc, make(map[uintptr]*visit), 0, config)
|
||||
}
|
||||
|
||||
// IsReflectNil is the reflect value provided nil
|
||||
func isReflectNil(v reflect.Value) bool {
|
||||
k := v.Kind()
|
||||
switch k {
|
||||
case reflect.Interface, reflect.Slice, reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr:
|
||||
// Both interface and slice are nil if first word is 0.
|
||||
// Both are always bigger than a word; assume flagIndir.
|
||||
return v.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
81
vendor/dario.cat/mergo/mergo.go
vendored
Normal file
81
vendor/dario.cat/mergo/mergo.go
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
// Copyright 2013 Dario Castañé. All rights reserved.
|
||||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Based on src/pkg/reflect/deepequal.go from official
|
||||
// golang's stdlib.
|
||||
|
||||
package mergo
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// Errors reported by Mergo when it finds invalid arguments.
|
||||
var (
|
||||
ErrNilArguments = errors.New("src and dst must not be nil")
|
||||
ErrDifferentArgumentsTypes = errors.New("src and dst must be of same type")
|
||||
ErrNotSupported = errors.New("only structs, maps, and slices are supported")
|
||||
ErrExpectedMapAsDestination = errors.New("dst was expected to be a map")
|
||||
ErrExpectedStructAsDestination = errors.New("dst was expected to be a struct")
|
||||
ErrNonPointerArgument = errors.New("dst must be a pointer")
|
||||
)
|
||||
|
||||
// During deepMerge, must keep track of checks that are
|
||||
// in progress. The comparison algorithm assumes that all
|
||||
// checks in progress are true when it reencounters them.
|
||||
// Visited are stored in a map indexed by 17 * a1 + a2;
|
||||
type visit struct {
|
||||
typ reflect.Type
|
||||
next *visit
|
||||
ptr uintptr
|
||||
}
|
||||
|
||||
// From src/pkg/encoding/json/encode.go.
|
||||
func isEmptyValue(v reflect.Value, shouldDereference bool) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||
return v.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !v.Bool()
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return v.Float() == 0
|
||||
case reflect.Interface, reflect.Ptr:
|
||||
if v.IsNil() {
|
||||
return true
|
||||
}
|
||||
if shouldDereference {
|
||||
return isEmptyValue(v.Elem(), shouldDereference)
|
||||
}
|
||||
return false
|
||||
case reflect.Func:
|
||||
return v.IsNil()
|
||||
case reflect.Invalid:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func resolveValues(dst, src interface{}) (vDst, vSrc reflect.Value, err error) {
|
||||
if dst == nil || src == nil {
|
||||
err = ErrNilArguments
|
||||
return
|
||||
}
|
||||
vDst = reflect.ValueOf(dst).Elem()
|
||||
if vDst.Kind() != reflect.Struct && vDst.Kind() != reflect.Map && vDst.Kind() != reflect.Slice {
|
||||
err = ErrNotSupported
|
||||
return
|
||||
}
|
||||
vSrc = reflect.ValueOf(src)
|
||||
// We check if vSrc is a pointer to dereference it.
|
||||
if vSrc.Kind() == reflect.Ptr {
|
||||
vSrc = vSrc.Elem()
|
||||
}
|
||||
return
|
||||
}
|
41
vendor/github.com/Azure/go-ansiterm/SECURITY.md
generated
vendored
Normal file
41
vendor/github.com/Azure/go-ansiterm/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.8 BLOCK -->
|
||||
|
||||
## Security
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
||||
|
||||
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
||||
|
||||
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
||||
|
||||
## Preferred Languages
|
||||
|
||||
We prefer all communications to be in English.
|
||||
|
||||
## Policy
|
||||
|
||||
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
||||
|
||||
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
16
vendor/github.com/DataDog/appsec-internal-go/appsec/config.go
generated
vendored
16
vendor/github.com/DataDog/appsec-internal-go/appsec/config.go
generated
vendored
@@ -19,7 +19,7 @@ import (
|
||||
// Configuration environment variables
|
||||
const (
|
||||
// EnvAPISecEnabled is the env var used to enable API Security
|
||||
EnvAPISecEnabled = "DD_EXPERIMENTAL_API_SECURITY_ENABLED"
|
||||
EnvAPISecEnabled = "DD_API_SECURITY_ENABLED"
|
||||
// EnvAPISecSampleRate is the env var used to set the sampling rate of API Security schema extraction
|
||||
EnvAPISecSampleRate = "DD_API_SECURITY_REQUEST_SAMPLE_RATE"
|
||||
// EnvObfuscatorKey is the env var used to provide the WAF key obfuscation regexp
|
||||
@@ -42,8 +42,8 @@ const (
|
||||
DefaultObfuscatorKeyRegex = `(?i)(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?)key)|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)|bearer|authorization`
|
||||
// DefaultObfuscatorValueRegex is the default regexp used to obfuscate values
|
||||
DefaultObfuscatorValueRegex = `(?i)(?:p(?:ass)?w(?:or)?d|pass(?:_?phrase)?|secret|(?:api_?|private_?|public_?|access_?|secret_?)key(?:_?id)?|token|consumer_?(?:id|key|secret)|sign(?:ed|ature)?|auth(?:entication|orization)?)(?:\s*=[^;]|"\s*:\s*"[^"]+")|bearer\s+[a-z0-9\._\-]+|token:[a-z0-9]{13}|gh[opsu]_[0-9a-zA-Z]{36}|ey[I-L][\w=-]+\.ey[I-L][\w=-]+(?:\.[\w.+\/=-]+)?|[\-]{5}BEGIN[a-z\s]+PRIVATE\sKEY[\-]{5}[^\-]+[\-]{5}END[a-z\s]+PRIVATE\sKEY|ssh-rsa\s*[a-z0-9\/\.+]{100,}`
|
||||
// DefaultWAFTimeout is the default time limit (ms) past which a WAF run will timeout
|
||||
DefaultWAFTimeout = 4 * time.Millisecond
|
||||
// DefaultWAFTimeout is the default time limit past which a WAF run will timeout
|
||||
DefaultWAFTimeout = time.Millisecond
|
||||
// DefaultTraceRate is the default limit (trace/sec) past which ASM traces are sampled out
|
||||
DefaultTraceRate uint = 100 // up to 100 appsec traces/s
|
||||
)
|
||||
@@ -71,7 +71,15 @@ func NewAPISecConfig() APISecConfig {
|
||||
}
|
||||
|
||||
func apiSecurityEnabled() bool {
|
||||
enabled, _ := strconv.ParseBool(os.Getenv(EnvAPISecEnabled))
|
||||
enabled := true
|
||||
str, set := os.LookupEnv(EnvAPISecEnabled)
|
||||
if set {
|
||||
var err error
|
||||
enabled, err = strconv.ParseBool(str)
|
||||
if err != nil {
|
||||
logEnvVarParsingError(EnvAPISecEnabled, str, err, enabled)
|
||||
}
|
||||
}
|
||||
return enabled
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/DataDog/appsec-internal-go/appsec/embed.go
generated
vendored
4
vendor/github.com/DataDog/appsec-internal-go/appsec/embed.go
generated
vendored
@@ -7,8 +7,8 @@ package appsec
|
||||
|
||||
import _ "embed" // Blank import comment for golint compliance
|
||||
|
||||
// StaticRecommendedRules holds the recommended AppSec security rules (v1.10.0)
|
||||
// Source: https://github.com/DataDog/appsec-event-rules/blob/1.10.0/build/recommended.json
|
||||
// StaticRecommendedRules holds the recommended AppSec security rules (v1.11.0)
|
||||
// Source: https://github.com/DataDog/appsec-event-rules/blob/1.11.0/build/recommended.json
|
||||
//
|
||||
//go:embed rules.json
|
||||
var StaticRecommendedRules string
|
||||
|
94
vendor/github.com/DataDog/appsec-internal-go/appsec/rules.json
generated
vendored
94
vendor/github.com/DataDog/appsec-internal-go/appsec/rules.json
generated
vendored
File diff suppressed because one or more lines are too long
126
vendor/github.com/DataDog/appsec-internal-go/httpsec/client_ip.go
generated
vendored
126
vendor/github.com/DataDog/appsec-internal-go/httpsec/client_ip.go
generated
vendored
@@ -1,126 +0,0 @@
|
||||
package httpsec
|
||||
|
||||
import (
|
||||
"net"
|
||||
"net/textproto"
|
||||
"strings"
|
||||
|
||||
"github.com/DataDog/appsec-internal-go/netip"
|
||||
)
|
||||
|
||||
const (
|
||||
// RemoteIPTag is the tag name used for the remote HTTP request IP address.
|
||||
RemoteIPTag = "network.client.ip"
|
||||
// ClientIPTag is the tag name used for the client IP deduced from the HTTP
|
||||
// request headers with ClientIP().
|
||||
ClientIPTag = "http.client_ip"
|
||||
)
|
||||
|
||||
// ClientIPTags returns the resulting Datadog span tags `http.client_ip`
|
||||
// containing the client IP and `network.client.ip` containing the remote IP.
|
||||
// The tags are present only if a valid ip address has been returned by
|
||||
// ClientIP().
|
||||
func ClientIPTags(remoteIP, clientIP netip.Addr) (tags map[string]string) {
|
||||
remoteIPValid := remoteIP.IsValid()
|
||||
clientIPValid := clientIP.IsValid()
|
||||
if !remoteIPValid && !clientIPValid {
|
||||
return nil
|
||||
}
|
||||
|
||||
tags = make(map[string]string, 2)
|
||||
if remoteIPValid {
|
||||
tags[RemoteIPTag] = remoteIP.String()
|
||||
}
|
||||
if clientIPValid {
|
||||
tags[ClientIPTag] = clientIP.String()
|
||||
}
|
||||
return tags
|
||||
}
|
||||
|
||||
// ClientIP returns the first public IP address found in the given headers. If
|
||||
// none is present, it returns the first valid IP address present, possibly
|
||||
// being a local IP address. The remote address, when valid, is used as fallback
|
||||
// when no IP address has been found at all.
|
||||
func ClientIP(hdrs map[string][]string, hasCanonicalHeaders bool, remoteAddr string, monitoredHeaders []string) (remoteIP, clientIP netip.Addr) {
|
||||
// Walk IP-related headers
|
||||
var foundIP netip.Addr
|
||||
headersLoop:
|
||||
for _, headerName := range monitoredHeaders {
|
||||
if hasCanonicalHeaders {
|
||||
headerName = textproto.CanonicalMIMEHeaderKey(headerName)
|
||||
}
|
||||
|
||||
headerValues, exists := hdrs[headerName]
|
||||
if !exists {
|
||||
continue // this monitored header is not present
|
||||
}
|
||||
|
||||
// Assuming a list of comma-separated IP addresses, split them and build
|
||||
// the list of values to try to parse as IP addresses
|
||||
var ips []string
|
||||
for _, ip := range headerValues {
|
||||
ips = append(ips, strings.Split(ip, ",")...)
|
||||
}
|
||||
|
||||
// Look for the first valid or global IP address in the comma-separated list
|
||||
for _, ipstr := range ips {
|
||||
ip := parseIP(strings.TrimSpace(ipstr))
|
||||
if !ip.IsValid() {
|
||||
continue
|
||||
}
|
||||
// Replace foundIP if still not valid in order to keep the oldest
|
||||
if !foundIP.IsValid() {
|
||||
foundIP = ip
|
||||
}
|
||||
if isGlobal(ip) {
|
||||
foundIP = ip
|
||||
break headersLoop
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Decide which IP address is the client one by starting with the remote IP
|
||||
if ip := parseIP(remoteAddr); ip.IsValid() {
|
||||
remoteIP = ip
|
||||
clientIP = ip
|
||||
}
|
||||
|
||||
// The IP address found in the headers supersedes a private remote IP address.
|
||||
if foundIP.IsValid() && !isGlobal(remoteIP) || isGlobal(foundIP) {
|
||||
clientIP = foundIP
|
||||
}
|
||||
|
||||
return remoteIP, clientIP
|
||||
}
|
||||
|
||||
func parseIP(s string) netip.Addr {
|
||||
if ip, err := netip.ParseAddr(s); err == nil {
|
||||
return ip
|
||||
}
|
||||
if h, _, err := net.SplitHostPort(s); err == nil {
|
||||
if ip, err := netip.ParseAddr(h); err == nil {
|
||||
return ip
|
||||
}
|
||||
}
|
||||
return netip.Addr{}
|
||||
}
|
||||
|
||||
var ipv6SpecialNetworks = [...]netip.Prefix{
|
||||
netip.MustParsePrefix("fec0::/10"), // site local
|
||||
}
|
||||
|
||||
func isGlobal(ip netip.Addr) bool {
|
||||
// IsPrivate also checks for ipv6 ULA.
|
||||
// We care to check for these addresses are not considered public, hence not global.
|
||||
// See https://www.rfc-editor.org/rfc/rfc4193.txt for more details.
|
||||
isGlobal := ip.IsValid() && !ip.IsPrivate() && !ip.IsLoopback() && !ip.IsLinkLocalUnicast()
|
||||
if !isGlobal || !ip.Is6() {
|
||||
return isGlobal
|
||||
}
|
||||
for _, n := range ipv6SpecialNetworks {
|
||||
if n.Contains(ip) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return isGlobal
|
||||
}
|
@@ -1,16 +1,15 @@
|
||||
// Unless explicitly stated otherwise all files in this repository are licensed
|
||||
// under the Apache License Version 2.0.
|
||||
// This product includes software developed at Datadog (https://www.datadoghq.com/).
|
||||
// Copyright 2022 Datadog, Inc.
|
||||
// Copyright 2022-present Datadog, Inc.
|
||||
|
||||
//go:build appsec
|
||||
// +build appsec
|
||||
|
||||
package appsec
|
||||
// Package limiter provides simple rate limiting primitives, and an implementation of a token bucket rate limiter.
|
||||
package limiter
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// Limiter is used to abstract the rate limiter implementation to only expose the needed function for rate limiting.
|
||||
@@ -27,35 +26,43 @@ type Limiter interface {
|
||||
// TokenTicker.Allow() and TokenTicker.Stop() *must* be called once done using. Note that calling TokenTicker.Allow()
|
||||
// before TokenTicker.Start() is valid, but it means the bucket won't be refilling until the call to TokenTicker.Start() is made
|
||||
type TokenTicker struct {
|
||||
tokens int64
|
||||
maxTokens int64
|
||||
ticker *time.Ticker
|
||||
stopChan chan struct{}
|
||||
tokens atomic.Int64 // The amount of tokens currently available
|
||||
maxTokens int64 // The maximum amount of tokens the bucket can hold
|
||||
ticker *time.Ticker // The ticker used to update the bucket (nil if not started yet)
|
||||
stopChan chan struct{} // The channel to stop the ticker updater (nil if not started yet)
|
||||
}
|
||||
|
||||
// NewTokenTicker is a utility function that allocates a token ticker, initializes necessary fields and returns it
|
||||
func NewTokenTicker(tokens, maxTokens int64) *TokenTicker {
|
||||
return &TokenTicker{
|
||||
tokens: tokens,
|
||||
t := &TokenTicker{
|
||||
maxTokens: maxTokens,
|
||||
}
|
||||
t.tokens.Store(tokens)
|
||||
return t
|
||||
}
|
||||
|
||||
// updateBucket performs a select loop to update the token amount in the bucket.
|
||||
// Used in a goroutine by the rate limiter.
|
||||
func (t *TokenTicker) updateBucket(ticksChan <-chan time.Time, startTime time.Time, syncChan chan struct{}) {
|
||||
func (t *TokenTicker) updateBucket(startTime time.Time, ticksChan <-chan time.Time, stopChan <-chan struct{}, syncChan chan<- struct{}) {
|
||||
nsPerToken := time.Second.Nanoseconds() / t.maxTokens
|
||||
elapsedNs := int64(0)
|
||||
prevStamp := startTime
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t.stopChan:
|
||||
case <-stopChan:
|
||||
if syncChan != nil {
|
||||
close(syncChan)
|
||||
}
|
||||
return
|
||||
case stamp := <-ticksChan:
|
||||
case stamp, ok := <-ticksChan:
|
||||
if !ok {
|
||||
// The ticker has been closed, stamp is a zero-value, we ignore that. We nil-out the
|
||||
// ticksChan so we don't get stuck endlessly reading from this closed channel again.
|
||||
ticksChan = nil
|
||||
continue
|
||||
}
|
||||
|
||||
// Compute the time in nanoseconds that passed between the previous timestamp and this one
|
||||
// This will be used to know how many tokens can be added into the bucket depending on the limiter rate
|
||||
elapsedNs += stamp.Sub(prevStamp).Nanoseconds()
|
||||
@@ -67,7 +74,7 @@ func (t *TokenTicker) updateBucket(ticksChan <-chan time.Time, startTime time.Ti
|
||||
if elapsedNs >= nsPerToken {
|
||||
// Atomic spin lock to make sure we don't race for `t.tokens`
|
||||
for {
|
||||
tokens := atomic.LoadInt64(&t.tokens)
|
||||
tokens := t.tokens.Load()
|
||||
if tokens == t.maxTokens {
|
||||
break // Bucket is already full, nothing to do
|
||||
}
|
||||
@@ -76,7 +83,7 @@ func (t *TokenTicker) updateBucket(ticksChan <-chan time.Time, startTime time.Ti
|
||||
if tokens+inc > t.maxTokens {
|
||||
inc -= (tokens + inc) % t.maxTokens
|
||||
}
|
||||
if atomic.CompareAndSwapInt64(&t.tokens, tokens, tokens+inc) {
|
||||
if t.tokens.CompareAndSwap(tokens, tokens+inc) {
|
||||
// Keep track of remaining elapsed ns that were not taken into account for this computation,
|
||||
// so that increment computation remains precise over time
|
||||
elapsedNs = elapsedNs % nsPerToken
|
||||
@@ -98,22 +105,17 @@ func (t *TokenTicker) updateBucket(ticksChan <-chan time.Time, startTime time.Ti
|
||||
func (t *TokenTicker) Start() {
|
||||
timeNow := time.Now()
|
||||
t.ticker = time.NewTicker(500 * time.Microsecond)
|
||||
t.start(t.ticker.C, timeNow, false)
|
||||
t.start(timeNow, t.ticker.C, nil)
|
||||
}
|
||||
|
||||
// start is used for internal testing. Controlling the ticker means being able to test per-tick
|
||||
// rather than per-duration, which is more reliable if the app is under a lot of stress.
|
||||
// sync is used to decide whether the limiter should create a channel for synchronization with the testing app after a
|
||||
// bucket update. The limiter is in charge of closing the channel in this case.
|
||||
func (t *TokenTicker) start(ticksChan <-chan time.Time, startTime time.Time, sync bool) <-chan struct{} {
|
||||
// rather than per-duration, which is more reliable if the app is under a lot of stress. The
|
||||
// syncChan, if non-nil, will receive one message after each tick from the ticksChan has been
|
||||
// processed, providing a strong synchronization primitive. The limiter will close the syncChan when
|
||||
// it is stopped, signaling that no further ticks will be processed.
|
||||
func (t *TokenTicker) start(startTime time.Time, ticksChan <-chan time.Time, syncChan chan<- struct{}) {
|
||||
t.stopChan = make(chan struct{})
|
||||
var syncChan chan struct{}
|
||||
|
||||
if sync {
|
||||
syncChan = make(chan struct{})
|
||||
}
|
||||
go t.updateBucket(ticksChan, startTime, syncChan)
|
||||
return syncChan
|
||||
go t.updateBucket(startTime, ticksChan, t.stopChan, syncChan)
|
||||
}
|
||||
|
||||
// Stop shuts down the rate limiter, taking care stopping the ticker and closing all channels
|
||||
@@ -121,11 +123,13 @@ func (t *TokenTicker) Stop() {
|
||||
// Stop the ticker only if it has been instantiated (not the case when testing by calling start() directly)
|
||||
if t.ticker != nil {
|
||||
t.ticker.Stop()
|
||||
t.ticker = nil // Ensure stop can be called multiple times idempotently.
|
||||
}
|
||||
// Close the stop channel only if it has been created. This covers the case where Stop() is called without any prior
|
||||
// call to Start()
|
||||
if t.stopChan != nil {
|
||||
close(t.stopChan)
|
||||
t.stopChan = nil // Ensure stop can be called multiple times idempotently.
|
||||
}
|
||||
}
|
||||
|
||||
@@ -133,10 +137,10 @@ func (t *TokenTicker) Stop() {
|
||||
// Thread-safe.
|
||||
func (t *TokenTicker) Allow() bool {
|
||||
for {
|
||||
tokens := atomic.LoadInt64(&t.tokens)
|
||||
tokens := t.tokens.Load()
|
||||
if tokens == 0 {
|
||||
return false
|
||||
} else if atomic.CompareAndSwapInt64(&t.tokens, tokens, tokens-1) {
|
||||
} else if t.tokens.CompareAndSwap(tokens, tokens-1) {
|
||||
return true
|
||||
}
|
||||
}
|
32
vendor/github.com/DataDog/appsec-internal-go/netip/ip_default.go
generated
vendored
32
vendor/github.com/DataDog/appsec-internal-go/netip/ip_default.go
generated
vendored
@@ -1,32 +0,0 @@
|
||||
// Unless explicitly stated otherwise all files in this repository are licensed
|
||||
// under the Apache License Version 2.0.
|
||||
// This product includes software developed at Datadog (https://www.datadoghq.com/).
|
||||
// Copyright 2022 Datadog, Inc.
|
||||
|
||||
//go:build !go1.19
|
||||
// +build !go1.19
|
||||
|
||||
package netip
|
||||
|
||||
import "inet.af/netaddr"
|
||||
|
||||
// Addr wraps an netaddr.IP value
|
||||
type Addr = netaddr.IP
|
||||
|
||||
// Prefix wraps an netaddr.IPPrefix value
|
||||
type Prefix = netaddr.IPPrefix
|
||||
|
||||
var (
|
||||
// ParseAddr wraps the netaddr.ParseIP function
|
||||
ParseAddr = netaddr.ParseIP
|
||||
// ParsePrefix wraps the netaddr.ParseIPPrefix function
|
||||
ParsePrefix = netaddr.ParseIPPrefix
|
||||
// MustParsePrefix wraps the netaddr.MustParseIPPrefix function
|
||||
MustParsePrefix = netaddr.MustParseIPPrefix
|
||||
// MustParseAddr wraps the netaddr.MustParseIP function
|
||||
MustParseAddr = netaddr.MustParseIP
|
||||
// IPv4 wraps the netaddr.IPv4 function
|
||||
IPv4 = netaddr.IPv4
|
||||
// AddrFrom16 wraps the netaddr.IPv6Raw function
|
||||
AddrFrom16 = netaddr.IPv6Raw
|
||||
)
|
34
vendor/github.com/DataDog/appsec-internal-go/netip/ip_go119.go
generated
vendored
34
vendor/github.com/DataDog/appsec-internal-go/netip/ip_go119.go
generated
vendored
@@ -1,34 +0,0 @@
|
||||
// Unless explicitly stated otherwise all files in this repository are licensed
|
||||
// under the Apache License Version 2.0.
|
||||
// This product includes software developed at Datadog (https://www.datadoghq.com/).
|
||||
// Copyright 2022 Datadog, Inc.
|
||||
|
||||
//go:build go1.19
|
||||
// +build go1.19
|
||||
|
||||
package netip
|
||||
|
||||
import "net/netip"
|
||||
|
||||
// Addr wraps a netip.Addr value
|
||||
type Addr = netip.Addr
|
||||
|
||||
// Prefix wraps a netip.Prefix value
|
||||
type Prefix = netip.Prefix
|
||||
|
||||
var (
|
||||
// ParseAddr wraps the netip.ParseAddr function
|
||||
ParseAddr = netip.ParseAddr
|
||||
// MustParsePrefix wraps the netip.MustParsePrefix function
|
||||
MustParsePrefix = netip.MustParsePrefix
|
||||
// MustParseAddr wraps the netip.MustParseAddr function
|
||||
MustParseAddr = netip.MustParseAddr
|
||||
// AddrFrom16 wraps the netIP.AddrFrom16 function
|
||||
AddrFrom16 = netip.AddrFrom16
|
||||
)
|
||||
|
||||
// IPv4 wraps the netip.AddrFrom4 function
|
||||
func IPv4(a, b, c, d byte) Addr {
|
||||
e := [4]byte{a, b, c, d}
|
||||
return netip.AddrFrom4(e)
|
||||
}
|
4
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/cache.go
generated
vendored
4
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/cache.go
generated
vendored
@@ -41,8 +41,8 @@ func (c *measuredCache) statsLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-tick.C:
|
||||
c.statsd.Gauge("datadog.trace_agent.obfuscation.sql_cache.hits", float64(mx.Hits()), nil, 1) //nolint:errcheck
|
||||
c.statsd.Gauge("datadog.trace_agent.obfuscation.sql_cache.misses", float64(mx.Misses()), nil, 1) //nolint:errcheck
|
||||
_ = c.statsd.Gauge("datadog.trace_agent.obfuscation.sql_cache.hits", float64(mx.Hits()), nil, 1) //nolint:errcheck
|
||||
_ = c.statsd.Gauge("datadog.trace_agent.obfuscation.sql_cache.misses", float64(mx.Misses()), nil, 1) //nolint:errcheck
|
||||
case <-c.close:
|
||||
c.Cache.Close()
|
||||
return
|
||||
|
2
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go
generated
vendored
2
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/json_scanner.go
generated
vendored
@@ -533,6 +533,8 @@ func stateNul(s *scanner, c byte) int {
|
||||
|
||||
// stateError is the state after reaching a syntax error,
|
||||
// such as after reading `[1}` or `5.1.2`.
|
||||
//
|
||||
//nolint:revive // TODO(APM) Fix revive linter
|
||||
func stateError(s *scanner, c byte) int {
|
||||
return scanError
|
||||
}
|
||||
|
10
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go
generated
vendored
10
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/obfuscate.go
generated
vendored
@@ -165,6 +165,16 @@ type SQLConfig struct {
|
||||
// This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize".
|
||||
KeepPositionalParameter bool `json:"keep_positional_parameter" yaml:"keep_positional_parameter"`
|
||||
|
||||
// KeepTrailingSemicolon specifies whether to keep trailing semicolon.
|
||||
// By default, trailing semicolon is removed during normalization.
|
||||
// This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize".
|
||||
KeepTrailingSemicolon bool `json:"keep_trailing_semicolon" yaml:"keep_trailing_semicolon"`
|
||||
|
||||
// KeepIdentifierQuotation specifies whether to keep identifier quotation, e.g. "my_table" or [my_table].
|
||||
// By default, identifier quotation is removed during normalization.
|
||||
// This option is only valid when ObfuscationMode is "obfuscate_only" or "obfuscate_and_normalize".
|
||||
KeepIdentifierQuotation bool `json:"keep_identifier_quotation" yaml:"keep_identifier_quotation"`
|
||||
|
||||
// Cache reports whether the obfuscator should use a LRU look-up cache for SQL obfuscations.
|
||||
Cache bool
|
||||
}
|
||||
|
2
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go
generated
vendored
2
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/redis.go
generated
vendored
@@ -247,6 +247,8 @@ func obfuscateRedisCmd(out *strings.Builder, cmd string, args ...string) {
|
||||
|
||||
// removeAllRedisArgs will take in a command and obfuscate all arguments following
|
||||
// the command, regardless of if the command is valid Redis or not
|
||||
//
|
||||
//nolint:revive // TODO(APM) Fix revive linter
|
||||
func (*Obfuscator) RemoveAllRedisArgs(rediscmd string) string {
|
||||
fullCmd := strings.Fields(rediscmd)
|
||||
if len(fullCmd) == 0 {
|
||||
|
2
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go
generated
vendored
2
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql.go
generated
vendored
@@ -458,6 +458,8 @@ func (o *Obfuscator) ObfuscateWithSQLLexer(in string, opts *SQLConfig) (*Obfusca
|
||||
sqllexer.WithCollectProcedures(opts.CollectProcedures),
|
||||
sqllexer.WithKeepSQLAlias(opts.KeepSQLAlias),
|
||||
sqllexer.WithRemoveSpaceBetweenParentheses(opts.RemoveSpaceBetweenParentheses),
|
||||
sqllexer.WithKeepTrailingSemicolon(opts.KeepTrailingSemicolon),
|
||||
sqllexer.WithKeepIdentifierQuotation(opts.KeepIdentifierQuotation),
|
||||
)
|
||||
out, statementMetadata, err := sqllexer.ObfuscateAndNormalize(
|
||||
in,
|
||||
|
5
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go
generated
vendored
5
vendor/github.com/DataDog/datadog-agent/pkg/obfuscate/sql_tokenizer.go
generated
vendored
@@ -610,6 +610,7 @@ func (tkn *SQLTokenizer) scanIdentifier() (TokenKind, []byte) {
|
||||
return ID, t
|
||||
}
|
||||
|
||||
//nolint:revive // TODO(APM) Fix revive linter
|
||||
func (tkn *SQLTokenizer) scanVariableIdentifier(prefix rune) (TokenKind, []byte) {
|
||||
for tkn.advance(); tkn.lastChar != ')' && tkn.lastChar != EndChar; tkn.advance() {
|
||||
}
|
||||
@@ -622,6 +623,7 @@ func (tkn *SQLTokenizer) scanVariableIdentifier(prefix rune) (TokenKind, []byte)
|
||||
return Variable, tkn.bytes()
|
||||
}
|
||||
|
||||
//nolint:revive // TODO(APM) Fix revive linter
|
||||
func (tkn *SQLTokenizer) scanFormatParameter(prefix rune) (TokenKind, []byte) {
|
||||
tkn.advance()
|
||||
return Variable, tkn.bytes()
|
||||
@@ -675,6 +677,7 @@ func (tkn *SQLTokenizer) scanDollarQuotedString() (TokenKind, []byte) {
|
||||
return DollarQuotedString, buf.Bytes()
|
||||
}
|
||||
|
||||
//nolint:revive // TODO(APM) Fix revive linter
|
||||
func (tkn *SQLTokenizer) scanPreparedStatement(prefix rune) (TokenKind, []byte) {
|
||||
// a prepared statement expect a digit identifier like $1
|
||||
if !isDigit(tkn.lastChar) {
|
||||
@@ -692,6 +695,7 @@ func (tkn *SQLTokenizer) scanPreparedStatement(prefix rune) (TokenKind, []byte)
|
||||
return PreparedStatement, buff
|
||||
}
|
||||
|
||||
//nolint:revive // TODO(APM) Fix revive linter
|
||||
func (tkn *SQLTokenizer) scanEscapeSequence(braces rune) (TokenKind, []byte) {
|
||||
for tkn.lastChar != '}' && tkn.lastChar != EndChar {
|
||||
tkn.advance()
|
||||
@@ -821,6 +825,7 @@ func (tkn *SQLTokenizer) scanString(delim rune, kind TokenKind) (TokenKind, []by
|
||||
return kind, buf.Bytes()
|
||||
}
|
||||
|
||||
//nolint:revive // TODO(APM) Fix revive linter
|
||||
func (tkn *SQLTokenizer) scanCommentType1(prefix string) (TokenKind, []byte) {
|
||||
for tkn.lastChar != EndChar {
|
||||
if tkn.lastChar == '\n' {
|
||||
|
9
vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/configs_asm.go
generated
vendored
9
vendor/github.com/DataDog/datadog-agent/pkg/remoteconfig/state/configs_asm.go
generated
vendored
@@ -49,11 +49,14 @@ type ASMFeaturesConfig struct {
|
||||
Metadata Metadata
|
||||
}
|
||||
|
||||
// ASMFeaturesData describes the enabled state of ASM features
|
||||
// ASMFeaturesData describes the state of ASM and some of its features
|
||||
type ASMFeaturesData struct {
|
||||
ASM struct {
|
||||
Enabled bool `json:"enabled"`
|
||||
} `json:"asm"`
|
||||
APISecurity struct {
|
||||
RequestSampleRate float64 `json:"request_sample_rate"`
|
||||
} `json:"api_security"`
|
||||
}
|
||||
|
||||
func parseASMFeaturesConfig(data []byte, metadata Metadata) (ASMFeaturesConfig, error) {
|
||||
@@ -95,9 +98,13 @@ func (r *Repository) ASMFeaturesConfigs() map[string]ASMFeaturesConfig {
|
||||
type ApplyState uint64
|
||||
|
||||
const (
|
||||
//ApplyStateUnknown indicates that a client does not support the ApplyState feature
|
||||
ApplyStateUnknown ApplyState = iota
|
||||
// ApplyStateUnacknowledged indicates a client has received the config but has not specified success or failure
|
||||
ApplyStateUnacknowledged
|
||||
// ApplyStateAcknowledged indicates a client has successfully applied the config
|
||||
ApplyStateAcknowledged
|
||||
// ApplyStateError indicates that a client has failed to apply the config
|
||||
ApplyStateError
|
||||
)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user