feat: remove container only if auto-remove is true

This commit is contained in:
fengcaiwen
2023-09-02 08:42:13 +08:00
parent 7ee9925d5e
commit c1d1bacef0
12 changed files with 130 additions and 40 deletions

View File

@@ -281,7 +281,7 @@ Run the Kubernetes pod in the local Docker container, and cooperate with the ser
the specified header to the local, or all the traffic to the local.
```shell
➜ ~ kubevpn -n kube-system --headers a=1 -p 9080:9080 -p 80:80 dev deployment/authors
➜ ~ kubevpn -n kube-system --headers a=1 -p 9080:9080 -p 80:80 -it --entrypoint sh dev deployment/authors
got cidr from cache
update ref count successfully
traffic manager already exist, reuse it
@@ -348,6 +348,41 @@ de9e2f8ab57d nginx:latest "/docker-entrypoint.…" 5 seconds
➜ ~
```
If you just want to start up a docker image, you can use simple way like this:
```shell
kubevpn --headers user=naison dev deployment/authors
```
Example
```shell
root@27b74bde78b6:/app# kubevpn --headers user=naison dev deployment/authors
hostname 27b74bde78b6
got cidr from cache
update ref count successfully
traffic manager already exist, reuse it
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
port forward ready
tunnel connected
dns service ok
tar: removing leading '/' from member names
/tmp/3795398593261835591:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/tmp/1432525228828829439:/var/run/secrets/kubernetes.io/serviceaccount
Created container: nginx_default_kubevpn_08aba
Wait container nginx_default_kubevpn_08aba to be running...
Container nginx_default_kubevpn_08aba is running now
WARNING: The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested
Created container: authors_default_kubevpn_08ab9
2023/09/02 00:17:00 Start listening http port 9080 ...
```
Now the main process will hang up to show you log.
If you want to specify the image to start the container locally, you can use the parameter `--docker-image`. When the
image does not exist locally, it will be pulled from the corresponding mirror warehouse. If you want to specify startup
parameters, you can use `--entrypoint` parameter, replace it with the command you want to execute, such
@@ -362,14 +397,13 @@ need to special parameter `--network` (inner docker) for sharing network and pid
Example:
```shell
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v /Users/naison/.kube/config:/root/.kube/config naison/kubevpn:v1.1.35
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config naison/kubevpn:v1.1.36
```
```shell
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -c authors -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v /Users/naison/.kube/vke:/root/.kube/config -v /Users/naison/Desktop/kubevpn/bin:/app naison/kubevpn:v1.1.35
root@4d0c3c4eae2b:/# hostname
4d0c3c4eae2b
root@4d0c3c4eae2b:/# kubevpn -n kube-system --image naison/kubevpn:v1.1.35 --headers user=naison --network container:4d0c3c4eae2b --entrypoint /bin/bash dev deployment/authors
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -c authors -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/vke:/root/.kube/config naison/kubevpn:v1.1.36
root@4d0c3c4eae2b:/#
root@4d0c3c4eae2b:/# kubevpn -n kube-system --image naison/kubevpn:v1.1.36 --headers user=naison -it --entrypoint sh dev deployment/authors
----------------------------------------------------------------------------------
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
@@ -418,7 +452,7 @@ Container nginx_kube-system_kubevpn_c68e7 is running now
/opt/microservices # ps -ef
PID USER TIME COMMAND
1 root 0:00 {bash} /usr/bin/qemu-x86_64 /bin/bash /bin/bash
60 root 0:07 {kubevpn} /usr/bin/qemu-x86_64 kubevpn kubevpn dev deployment/authors -n kube-system --image naison/kubevpn:v1.1.35 --headers user=naison --parent
60 root 0:07 {kubevpn} /usr/bin/qemu-x86_64 kubevpn kubevpn dev deployment/authors -n kube-system --image naison/kubevpn:v1.1.36 --headers user=naison --parent
73 root 0:00 {tail} /usr/bin/qemu-x86_64 /usr/bin/tail tail -f /dev/null
80 root 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
92 root 0:00 {sh} /usr/bin/qemu-x86_64 /bin/sh /bin/sh
@@ -506,8 +540,8 @@ Answer: here are two solution to solve this problem
``` shell
➜ ~ kubevpn version
KubeVPN: CLI
Version: v1.1.35
Image: docker.io/naison/kubevpn:v1.1.35
Version: v1.1.36
Image: docker.io/naison/kubevpn:v1.1.36
Branch: master
Git commit: 87dac42dad3d8f472a9dcdfc2c6cd801551f23d1
Built time: 2023-01-15 04:19:45
@@ -516,11 +550,11 @@ Answer: here are two solution to solve this problem
➜ ~
```
Image is `docker.io/naison/kubevpn:v1.1.35`, transfer this image to private docker registry
Image is `docker.io/naison/kubevpn:v1.1.36`, transfer this image to private docker registry
```text
docker pull docker.io/naison/kubevpn:v1.1.35
docker tag docker.io/naison/kubevpn:v1.1.35 [docker registry]/[namespace]/[repo]:[tag]
docker pull docker.io/naison/kubevpn:v1.1.36
docker tag docker.io/naison/kubevpn:v1.1.36 [docker registry]/[namespace]/[repo]:[tag]
docker push [docker registry]/[namespace]/[repo]:[tag]
```

View File

@@ -278,7 +278,7 @@ Hello world!%
将 Kubernetes pod 运行在本地的 Docker 容器中,同时配合 service mesh, 拦截带有指定 header 的流量到本地,或者所有的流量到本地。这个开发模式依赖于本地 Docker。
```shell
➜ ~ kubevpn -n kube-system --headers a=1 -p 9080:9080 -p 80:80 dev deployment/authors
➜ ~ kubevpn -n kube-system --headers a=1 -p 9080:9080 -p 80:80 -it --entrypoint sh dev deployment/authors
got cidr from cache
update ref count successfully
traffic manager already exist, reuse it
@@ -343,6 +343,41 @@ de9e2f8ab57d nginx:latest "/docker-entrypoint.…" 5 seconds
➜ ~
```
如果你只是想在本地启动镜像,可以用一种简单的方式:
```shell
kubevpn --headers user=naison dev deployment/authors
```
例如:
```shell
root@27b74bde78b6:/app# kubevpn --headers user=naison dev deployment/authors
hostname 27b74bde78b6
got cidr from cache
update ref count successfully
traffic manager already exist, reuse it
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
Waiting for deployment "authors" rollout to finish: 1 old replicas are pending termination...
deployment "authors" successfully rolled out
port forward ready
tunnel connected
dns service ok
tar: removing leading '/' from member names
/tmp/3795398593261835591:/var/run/secrets/kubernetes.io/serviceaccount
tar: Removing leading `/' from member names
tar: Removing leading `/' from hard link targets
/tmp/1432525228828829439:/var/run/secrets/kubernetes.io/serviceaccount
Created container: nginx_default_kubevpn_08aba
Wait container nginx_default_kubevpn_08aba to be running...
Container nginx_default_kubevpn_08aba is running now
WARNING: The requested image's platform (linux/amd64) does not match the detected host platform (linux/arm64/v8) and no specific platform was requested
Created container: authors_default_kubevpn_08ab9
2023/09/02 00:17:00 Start listening http port 9080 ...
```
此时程序会挂起,默认为显示日志
如果你想指定在本地启动容器的镜像, 可以使用参数 `--docker-image`, 当本地不存在该镜像时, 会从对应的镜像仓库拉取。如果你想指定启动参数,可以使用 `--entrypoint`
参数,替换为你想要执行的命令,比如 `--entrypoint /bin/bash`, 更多使用参数,请参见 `kubevpn dev --help`.
@@ -354,14 +389,13 @@ de9e2f8ab57d nginx:latest "/docker-entrypoint.…" 5 seconds
例如:
```shell
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v /Users/naison/.kube/config:/root/.kube/config naison/kubevpn:v1.1.35
docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config naison/kubevpn:v1.1.36
```
```shell
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -c authors -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v /Users/naison/.kube/config:/root/.kube/config naison/kubevpn:v1.1.35
root@4d0c3c4eae2b:/# hostname
4d0c3c4eae2b
root@4d0c3c4eae2b:/# kubevpn -n kube-system --image naison/kubevpn:v1.1.35 --headers user=naison --network container:4d0c3c4eae2b --entrypoint /bin/bash dev deployment/authors
➜ ~ docker run -it --privileged --sysctl net.ipv6.conf.all.disable_ipv6=0 -c authors -v /var/run/docker.sock:/var/run/docker.sock -v /tmp:/tmp -v ~/.kube/config:/root/.kube/config naison/kubevpn:v1.1.36
root@4d0c3c4eae2b:/#
root@4d0c3c4eae2b:/# kubevpn -n kube-system --image naison/kubevpn:v1.1.36 --headers user=naison -it --entrypoint sh dev deployment/authors
----------------------------------------------------------------------------------
Warn: Use sudo to execute command kubevpn can not use user env KUBECONFIG.
@@ -410,7 +444,7 @@ Container nginx_kube-system_kubevpn_c68e7 is running now
/opt/microservices # ps -ef
PID USER TIME COMMAND
1 root 0:00 {bash} /usr/bin/qemu-x86_64 /bin/bash /bin/bash
60 root 0:07 {kubevpn} /usr/bin/qemu-x86_64 kubevpn kubevpn dev deployment/authors -n kube-system --image naison/kubevpn:v1.1.35 --headers user=naison --parent
60 root 0:07 {kubevpn} /usr/bin/qemu-x86_64 kubevpn kubevpn dev deployment/authors -n kube-system --image naison/kubevpn:v1.1.36 --headers user=naison --parent
73 root 0:00 {tail} /usr/bin/qemu-x86_64 /usr/bin/tail tail -f /dev/null
80 root 0:00 {nginx} /usr/bin/qemu-x86_64 /usr/sbin/nginx nginx -g daemon off;
92 root 0:00 {sh} /usr/bin/qemu-x86_64 /bin/sh /bin/sh
@@ -496,8 +530,8 @@ Windows
``` shell
➜ ~ kubevpn version
KubeVPN: CLI
Version: v1.1.35
Image: docker.io/naison/kubevpn:v1.1.35
Version: v1.1.36
Image: docker.io/naison/kubevpn:v1.1.36
Branch: master
Git commit: 87dac42dad3d8f472a9dcdfc2c6cd801551f23d1
Built time: 2023-01-15 04:19:45
@@ -506,18 +540,18 @@ Windows
➜ ~
```
镜像是 `docker.io/naison/kubevpn:v1.1.35`,将此镜像转存到自己的镜像仓库。
镜像是 `docker.io/naison/kubevpn:v1.1.36`,将此镜像转存到自己的镜像仓库。
```text
docker pull docker.io/naison/kubevpn:v1.1.35
docker tag docker.io/naison/kubevpn:v1.1.35 [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
docker pull docker.io/naison/kubevpn:v1.1.36
docker tag docker.io/naison/kubevpn:v1.1.36 [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
docker push [镜像仓库地址]/[命名空间]/[镜像仓库]:[镜像版本号]
```
然后就可以使用这个镜像了,如下:
```text
➜ ~ kubevpn connect --image docker.io/naison/kubevpn:v1.1.35
➜ ~ kubevpn connect --image docker.io/naison/kubevpn:v1.1.36
got cidr from cache
traffic manager not exist, try to create it...
pod [kubevpn-traffic-manager] status is Running

View File

@@ -33,7 +33,7 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command {
kubevpn connect
# Connect to api-server behind of bastion host or ssh jump host
kubevpn connect --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem
kubevpn connect --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐

View File

@@ -44,7 +44,7 @@ var cpExample = templates.Examples(i18n.T(`
kubectl cp <some-namespace>/<some-pod>:/tmp/foo /tmp/bar
# copy reverse proxy api-server behind of bastion host or ssh jump host
kubevpn cp deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem
kubevpn cp deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐

View File

@@ -57,7 +57,7 @@ Startup your kubernetes workloads in local Docker container with same volume、e
kubevpn dev --no-proxy service/productpage
# Develop workloads which api-server behind of bastion host or ssh jump host
kubevpn dev --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem deployment/productpage
kubevpn dev --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem deployment/productpage
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐

View File

@@ -51,7 +51,7 @@ func CmdDuplicate(f cmdutil.Factory) *cobra.Command {
kubevpn duplicate deployment/productpage --headers a=1
# duplicate workloads which api-server behind of bastion host or ssh jump host
kubevpn duplicate deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem --headers a=1
kubevpn duplicate deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers a=1
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐

View File

@@ -45,7 +45,7 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command {
kubevpn proxy service/productpage --headers a=1
# Connect to api-server behind of bastion host or ssh jump host and proxy kubernetes resource traffic into local PC
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem --headers a=1
kubevpn proxy deployment/productpage --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem --headers a=1
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐

View File

@@ -29,7 +29,7 @@ func CmdReset(factory cmdutil.Factory) *cobra.Command {
kubevpn reset -n test
# Reset cluster api-server behind of bastion host or ssh jump host
kubevpn reset --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile /Users/naison/.ssh/ssh.pem
kubevpn reset --ssh-addr 192.168.1.100:22 --ssh-username root --ssh-keyfile ~/.ssh/ssh.pem
# it also support ProxyJump, like
┌──────┐ ┌──────┐ ┌──────┐ ┌──────┐ ┌────────────┐

View File

@@ -11,6 +11,7 @@ import (
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
log "github.com/sirupsen/logrus"
)
type RunOptions struct {
@@ -87,6 +88,7 @@ func reportError(stderr io.Writer, name string, str string, withHelp bool) {
if withHelp {
str += "\nSee 'docker " + name + " --help'."
}
log.Error(str)
_, _ = fmt.Fprintln(stderr, "docker:", str)
}

View File

@@ -218,9 +218,11 @@ func (l ConfigList) Remove(ctx context.Context, cli *client.Client) error {
if err != nil {
log.Debug(err)
}
err = cli.ContainerRemove(ctx, runConfig.containerName, types.ContainerRemoveOptions{Force: true})
if err != nil {
log.Debug(err)
if runConfig.hostConfig.AutoRemove {
err = cli.ContainerRemove(ctx, runConfig.containerName, types.ContainerRemoveOptions{Force: true})
if err != nil {
log.Debug(err)
}
}
}
i, err := cli.NetworkInspect(ctx, config.ConfigMapPodTrafficManager, types.NetworkInspectOptions{})
@@ -395,6 +397,16 @@ func DoDev(devOptions *Options, flags *pflag.FlagSet, f cmdutil.Factory) error {
if !inspect.State.Running {
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
}
} else if mode.IsDefault() && util.RunningInContainer() {
var hostname string
if hostname, err = os.Hostname(); err != nil {
return err
}
log.Infof("hostname %s", hostname)
err = devOptions.Copts.netMode.Set(fmt.Sprintf("container:%s", hostname))
if err != nil {
return err
}
}
if err = connect.InitClient(f); err != nil {
@@ -444,7 +456,7 @@ func DoDev(devOptions *Options, flags *pflag.FlagSet, f cmdutil.Factory) error {
}
case ConnectModeContainer:
var connectContainer *RunConfig
connectContainer, err = createConnectContainer(*devOptions, connect, path, err, cli, platform)
connectContainer, err = createConnectContainer(*devOptions, connect, path, cli, platform)
if err != nil {
return err
}
@@ -510,7 +522,7 @@ func DoDev(devOptions *Options, flags *pflag.FlagSet, f cmdutil.Factory) error {
return err
}
func createConnectContainer(devOptions Options, connect handler.ConnectOptions, path string, err error, cli *client.Client, platform *specs.Platform) (*RunConfig, error) {
func createConnectContainer(devOptions Options, connect handler.ConnectOptions, path string, cli *client.Client, platform *specs.Platform) (*RunConfig, error) {
var entrypoint []string
if devOptions.NoProxy {
entrypoint = []string{"kubevpn", "connect", "-n", connect.Namespace, "--kubeconfig", "/root/.kube/config", "--image", config.Image}
@@ -593,8 +605,7 @@ func createConnectContainer(devOptions Options, connect handler.ConnectOptions,
if newUUID, err := uuid.NewUUID(); err == nil {
suffix = strings.ReplaceAll(newUUID.String(), "-", "")[:5]
}
var kubevpnNetwork string
kubevpnNetwork, err = createKubevpnNetwork(context.Background(), cli)
kubevpnNetwork, err := createKubevpnNetwork(context.Background(), cli)
if err != nil {
return nil, err
}

View File

@@ -72,7 +72,7 @@ func run(ctx context.Context, runConfig *RunConfig, cli *client.Client, c *comma
id = create.ID
log.Infof("Created container: %s", name)
defer func() {
if err != nil {
if err != nil && runConfig.hostConfig.AutoRemove {
_ = cli.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
}
}()
@@ -139,7 +139,7 @@ func runFirst(ctx context.Context, runConfig *RunConfig, cli *apiclient.Client,
rand.New(rand.NewSource(time.Now().UnixNano()))
defer func() {
if err != nil {
if err != nil && runConfig.hostConfig.AutoRemove {
_ = cli.ContainerRemove(ctx, id, types.ContainerRemoveOptions{Force: true})
}
}()

9
pkg/util/container.go Normal file
View File

@@ -0,0 +1,9 @@
package util
import "os"
// RunningInContainer returns true if the current process runs from inside a docker container.
func RunningInContainer() bool {
_, err := os.Stat("/.dockerenv")
return err == nil
}