mirror of
https://github.com/kubenetworks/kubevpn.git
synced 2025-10-05 07:16:54 +08:00
refactor: dev mode use exec command instead of library
This commit is contained in:
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/cli/command"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
cmdutil "k8s.io/kubectl/pkg/cmd/util"
|
||||
@@ -90,26 +90,16 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
util.InitLoggerForClient(config.Debug)
|
||||
|
||||
if p := options.RunOptions.Platform; p != "" {
|
||||
if _, err = platforms.Parse(p); err != nil {
|
||||
return fmt.Errorf("error parsing specified platform: %v", err)
|
||||
}
|
||||
}
|
||||
if err = validatePullOpt(options.RunOptions.Pull); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = daemon.StartupDaemon(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if transferImage {
|
||||
err = regctl.TransferImageWithRegctl(cmd.Context(), config.OriginImage, config.Image)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return pkgssh.SshJumpAndSetEnv(cmd.Context(), sshConf, cmd.Flags(), false)
|
||||
},
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
@@ -124,8 +114,8 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
defer func() {
|
||||
for _, function := range options.GetRollbackFuncList() {
|
||||
if function != nil {
|
||||
if er := function(); er != nil {
|
||||
log.Errorf("Rollback failed, error: %s", er.Error())
|
||||
if err := function(); err != nil {
|
||||
log.Errorf("Rollback failed, error: %s", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -135,8 +125,12 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
return err
|
||||
}
|
||||
|
||||
err := options.Main(cmd.Context(), sshConf, cmd.Flags(), transferImage, imagePullSecretName)
|
||||
conf, hostConfig, err := dev.Parse(cmd.Flags(), options.ContainerOptions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return options.Main(cmd.Context(), sshConf, conf, hostConfig, imagePullSecretName)
|
||||
},
|
||||
}
|
||||
cmd.Flags().SortFlags = false
|
||||
@@ -149,26 +143,12 @@ func CmdDev(f cmdutil.Factory) *cobra.Command {
|
||||
|
||||
// diy docker options
|
||||
cmd.Flags().StringVar(&options.DevImage, "dev-image", "", "Use to startup docker container, Default is pod image")
|
||||
// origin docker options
|
||||
dev.AddDockerFlags(options, cmd.Flags())
|
||||
|
||||
// -- origin docker options -- start
|
||||
options.ContainerOptions = dev.AddFlags(cmd.Flags())
|
||||
cmd.Flags().StringVar(&options.RunOptions.Pull, "pull", dev.PullImageMissing, `Pull image before running ("`+dev.PullImageAlways+`"|"`+dev.PullImageMissing+`"|"`+dev.PullImageNever+`")`)
|
||||
command.AddPlatformFlag(cmd.Flags(), &options.RunOptions.Platform)
|
||||
// -- origin docker options -- end
|
||||
handler.AddExtraRoute(cmd.Flags(), &options.ExtraRouteInfo)
|
||||
pkgssh.AddSshFlags(cmd.Flags(), sshConf)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func validatePullOpt(val string) error {
|
||||
switch val {
|
||||
case dev.PullImageAlways, dev.PullImageMissing, dev.PullImageNever, "":
|
||||
// valid option, but nothing to do yet
|
||||
return nil
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"invalid pull option: '%s': must be one of %q, %q or %q",
|
||||
val,
|
||||
dev.PullImageAlways,
|
||||
dev.PullImageMissing,
|
||||
dev.PullImageNever,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
191
pkg/dev/LICENSE
191
pkg/dev/LICENSE
@@ -1,191 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright 2013-2017 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
@@ -1,207 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"runtime"
|
||||
"sync"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/moby/term"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// The default escape key sequence: ctrl-p, ctrl-q
|
||||
// TODO: This could be moved to `pkg/term`.
|
||||
var defaultEscapeKeys = []byte{16, 17}
|
||||
|
||||
// A hijackedIOStreamer handles copying input to and output from streams to the
|
||||
// connection.
|
||||
type hijackedIOStreamer struct {
|
||||
streams command.Streams
|
||||
inputStream io.ReadCloser
|
||||
outputStream io.Writer
|
||||
errorStream io.Writer
|
||||
|
||||
resp types.HijackedResponse
|
||||
|
||||
tty bool
|
||||
detachKeys string
|
||||
}
|
||||
|
||||
// stream handles setting up the IO and then begins streaming stdin/stdout
|
||||
// to/from the hijacked connection, blocking until it is either done reading
|
||||
// output, the user inputs the detach key sequence when in TTY mode, or when
|
||||
// the given context is cancelled.
|
||||
func (h *hijackedIOStreamer) stream(ctx context.Context) error {
|
||||
restoreInput, err := h.setupInput()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to setup input stream: %s", err)
|
||||
}
|
||||
|
||||
defer restoreInput()
|
||||
|
||||
outputDone := h.beginOutputStream(restoreInput)
|
||||
inputDone, detached := h.beginInputStream(restoreInput)
|
||||
|
||||
select {
|
||||
case err := <-outputDone:
|
||||
return err
|
||||
case <-inputDone:
|
||||
// Input stream has closed.
|
||||
if h.outputStream != nil || h.errorStream != nil {
|
||||
// Wait for output to complete streaming.
|
||||
select {
|
||||
case err := <-outputDone:
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
case err := <-detached:
|
||||
// Got a detach key sequence.
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
func (h *hijackedIOStreamer) setupInput() (restore func(), err error) {
|
||||
if h.inputStream == nil || !h.tty {
|
||||
// No need to setup input TTY.
|
||||
// The restore func is a nop.
|
||||
return func() {}, nil
|
||||
}
|
||||
|
||||
if err := setRawTerminal(h.streams); err != nil {
|
||||
return nil, fmt.Errorf("unable to set IO streams as raw terminal: %s", err)
|
||||
}
|
||||
|
||||
// Use sync.Once so we may call restore multiple times but ensure we
|
||||
// only restore the terminal once.
|
||||
var restoreOnce sync.Once
|
||||
restore = func() {
|
||||
restoreOnce.Do(func() {
|
||||
_ = restoreTerminal(h.streams, h.inputStream)
|
||||
})
|
||||
}
|
||||
|
||||
// Wrap the input to detect detach escape sequence.
|
||||
// Use default escape keys if an invalid sequence is given.
|
||||
escapeKeys := defaultEscapeKeys
|
||||
if h.detachKeys != "" {
|
||||
customEscapeKeys, err := term.ToBytes(h.detachKeys)
|
||||
if err != nil {
|
||||
log.Warnf("Invalid detach escape keys, using default: %s", err)
|
||||
} else {
|
||||
escapeKeys = customEscapeKeys
|
||||
}
|
||||
}
|
||||
|
||||
h.inputStream = ioutils.NewReadCloserWrapper(term.NewEscapeProxy(h.inputStream, escapeKeys), h.inputStream.Close)
|
||||
|
||||
return restore, nil
|
||||
}
|
||||
|
||||
func (h *hijackedIOStreamer) beginOutputStream(restoreInput func()) <-chan error {
|
||||
if h.outputStream == nil && h.errorStream == nil {
|
||||
// There is no need to copy output.
|
||||
return nil
|
||||
}
|
||||
|
||||
outputDone := make(chan error)
|
||||
go func() {
|
||||
var err error
|
||||
|
||||
// When TTY is ON, use regular copy
|
||||
if h.outputStream != nil && h.tty {
|
||||
_, err = io.Copy(h.outputStream, h.resp.Reader)
|
||||
// We should restore the terminal as soon as possible
|
||||
// once the connection ends so any following print
|
||||
// messages will be in normal type.
|
||||
restoreInput()
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(h.outputStream, h.errorStream, h.resp.Reader)
|
||||
}
|
||||
|
||||
log.Debug("[hijack] End of stdout")
|
||||
|
||||
if err != nil {
|
||||
log.Debugf("Error receive stdout: %s", err)
|
||||
}
|
||||
|
||||
outputDone <- err
|
||||
}()
|
||||
|
||||
return outputDone
|
||||
}
|
||||
|
||||
func (h *hijackedIOStreamer) beginInputStream(restoreInput func()) (doneC <-chan struct{}, detachedC <-chan error) {
|
||||
inputDone := make(chan struct{})
|
||||
detached := make(chan error)
|
||||
|
||||
go func() {
|
||||
if h.inputStream != nil {
|
||||
_, err := io.Copy(h.resp.Conn, h.inputStream)
|
||||
// We should restore the terminal as soon as possible
|
||||
// once the connection ends so any following print
|
||||
// messages will be in normal type.
|
||||
restoreInput()
|
||||
|
||||
log.Debug("[hijack] End of stdin")
|
||||
|
||||
if _, ok := err.(term.EscapeError); ok {
|
||||
detached <- err
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// This error will also occur on the receive
|
||||
// side (from stdout) where it will be
|
||||
// propagated back to the caller.
|
||||
log.Debugf("Error send stdin: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := h.resp.CloseWrite(); err != nil {
|
||||
log.Debugf("Couldn't send EOF: %s", err)
|
||||
}
|
||||
|
||||
close(inputDone)
|
||||
}()
|
||||
|
||||
return inputDone, detached
|
||||
}
|
||||
|
||||
func setRawTerminal(streams command.Streams) error {
|
||||
if err := streams.In().SetRawTerminal(); err != nil {
|
||||
return err
|
||||
}
|
||||
return streams.Out().SetRawTerminal()
|
||||
}
|
||||
|
||||
func restoreTerminal(streams command.Streams, in io.Closer) error {
|
||||
streams.In().RestoreTerminal()
|
||||
streams.Out().RestoreTerminal()
|
||||
// WARNING: DO NOT REMOVE THE OS CHECKS !!!
|
||||
// For some reason this Close call blocks on darwin..
|
||||
// As the client exits right after, simply discard the close
|
||||
// until we find a better solution.
|
||||
//
|
||||
// This can also cause the client on Windows to get stuck in Win32 CloseHandle()
|
||||
// in some cases. See https://github.com/docker/docker/issues/28267#issuecomment-288237442
|
||||
// Tracked internally at Microsoft by VSO #11352156. In the
|
||||
// Windows case, you hit this if you are using the native/v2 console,
|
||||
// not the "legacy" console, and you start the client in a new window. eg
|
||||
// `start docker run --rm -it microsoft/nanoserver cmd /s /c echo foobar`
|
||||
// will hang. Remove start, and it won't repro.
|
||||
if in != nil && runtime.GOOS != "darwin" && runtime.GOOS != "windows" {
|
||||
return in.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -34,8 +34,8 @@ type ContainerOptions struct {
|
||||
Args []string
|
||||
}
|
||||
|
||||
// addFlags adds all command line flags that will be used by Parse to the FlagSet
|
||||
func addFlags(flags *pflag.FlagSet) *ContainerOptions {
|
||||
// AddFlags adds all command line flags that will be used by Parse to the FlagSet
|
||||
func AddFlags(flags *pflag.FlagSet) *ContainerOptions {
|
||||
copts := &ContainerOptions{
|
||||
attach: opts.NewListOpts(validateAttach),
|
||||
expose: opts.NewListOpts(nil),
|
||||
@@ -51,8 +51,7 @@ func addFlags(flags *pflag.FlagSet) *ContainerOptions {
|
||||
_ = flags.MarkHidden("interactive")
|
||||
flags.BoolVarP(&copts.tty, "tty", "t", true, "Allocate a pseudo-TTY")
|
||||
_ = flags.MarkHidden("tty")
|
||||
flags.BoolVar(&copts.autoRemove, "rm", true, "Automatically remove the container when it exits")
|
||||
_ = flags.MarkHidden("rm")
|
||||
flags.BoolVar(&copts.autoRemove, "rm", false, "Automatically remove the container when it exits")
|
||||
|
||||
// Security
|
||||
flags.BoolVar(&copts.privileged, "privileged", true, "Give extended privileges to this container")
|
||||
@@ -257,8 +256,6 @@ type HostConfig struct {
|
||||
}
|
||||
|
||||
type RunOptions struct {
|
||||
SigProxy bool
|
||||
DetachKeys string
|
||||
Platform string
|
||||
Pull string // always, missing, never
|
||||
}
|
||||
|
@@ -1,61 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/moby/sys/signal"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ForwardAllSignals forwards signals to the container
|
||||
//
|
||||
// The channel you pass in must already be setup to receive any signals you want to forward.
|
||||
func ForwardAllSignals(ctx context.Context, apiClient client.ContainerAPIClient, cid string, sigc <-chan os.Signal) {
|
||||
var (
|
||||
s os.Signal
|
||||
ok bool
|
||||
)
|
||||
for {
|
||||
select {
|
||||
case s, ok = <-sigc:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
}
|
||||
|
||||
if s == signal.SIGCHLD || s == signal.SIGPIPE {
|
||||
continue
|
||||
}
|
||||
|
||||
// In go1.14+, the go runtime issues SIGURG as an interrupt to support pre-emptable system calls on Linux.
|
||||
// Since we can't forward that along we'll check that here.
|
||||
if isRuntimeSig(s) {
|
||||
continue
|
||||
}
|
||||
var sig string
|
||||
for sigStr, sigN := range signal.SignalMap {
|
||||
if sigN == s {
|
||||
sig = sigStr
|
||||
break
|
||||
}
|
||||
}
|
||||
if sig == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := apiClient.ContainerKill(ctx, cid, sig); err != nil {
|
||||
log.Debugf("Error sending signal: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func notifyAllSignals() chan os.Signal {
|
||||
sigc := make(chan os.Signal, 128)
|
||||
gosignal.Notify(sigc)
|
||||
return sigc
|
||||
}
|
@@ -1,13 +0,0 @@
|
||||
//go:build !windows
|
||||
|
||||
package dev
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func isRuntimeSig(s os.Signal) bool {
|
||||
return s == unix.SIGURG
|
||||
}
|
@@ -1,7 +0,0 @@
|
||||
package dev
|
||||
|
||||
import "os"
|
||||
|
||||
func isRuntimeSig(_ os.Signal) bool {
|
||||
return false
|
||||
}
|
@@ -1,97 +0,0 @@
|
||||
package dev
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
gosignal "os/signal"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/moby/sys/signal"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// resizeTtyTo resizes tty to specific height and width
|
||||
func resizeTtyTo(ctx context.Context, apiClient client.ContainerAPIClient, id string, height, width uint, isExec bool) error {
|
||||
if height == 0 && width == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
options := container.ResizeOptions{
|
||||
Height: height,
|
||||
Width: width,
|
||||
}
|
||||
|
||||
var err error
|
||||
if isExec {
|
||||
err = apiClient.ContainerExecResize(ctx, id, options)
|
||||
} else {
|
||||
err = apiClient.ContainerResize(ctx, id, options)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.Debugf("Error resize: %s\r", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// resizeTty is to resize the tty with cli out's tty size
|
||||
func resizeTty(ctx context.Context, cli command.Cli, id string, isExec bool) error {
|
||||
height, width := cli.Out().GetTtySize()
|
||||
return resizeTtyTo(ctx, cli.Client(), id, height, width, isExec)
|
||||
}
|
||||
|
||||
// initTtySize is to init the tty's size to the same as the window, if there is an error, it will retry 10 times.
|
||||
func initTtySize(ctx context.Context, cli command.Cli, id string, isExec bool, resizeTtyFunc func(ctx context.Context, cli command.Cli, id string, isExec bool) error) {
|
||||
rttyFunc := resizeTtyFunc
|
||||
if rttyFunc == nil {
|
||||
rttyFunc = resizeTty
|
||||
}
|
||||
if err := rttyFunc(ctx, cli, id, isExec); err != nil {
|
||||
go func() {
|
||||
var err error
|
||||
for retry := 0; retry < 10; retry++ {
|
||||
time.Sleep(time.Duration(retry+1) * 10 * time.Millisecond)
|
||||
if err = rttyFunc(ctx, cli, id, isExec); err == nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintln(cli.Err(), "Failed to resize tty, using default size")
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
// MonitorTtySize updates the container tty size when the terminal tty changes size
|
||||
func MonitorTtySize(ctx context.Context, cli command.Cli, id string, isExec bool) error {
|
||||
initTtySize(ctx, cli, id, isExec, resizeTty)
|
||||
if runtime.GOOS == "windows" {
|
||||
go func() {
|
||||
prevH, prevW := cli.Out().GetTtySize()
|
||||
for {
|
||||
time.Sleep(time.Millisecond * 250)
|
||||
h, w := cli.Out().GetTtySize()
|
||||
|
||||
if prevW != w || prevH != h {
|
||||
resizeTty(ctx, cli, id, isExec)
|
||||
}
|
||||
prevH = h
|
||||
prevW = w
|
||||
}
|
||||
}()
|
||||
} else {
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
gosignal.Notify(sigchan, signal.SIGWINCH)
|
||||
go func() {
|
||||
for range sigchan {
|
||||
resizeTty(ctx, cli, id, isExec)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -3,174 +3,49 @@ package dev
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
image2 "github.com/docker/cli/cli/command/image"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/events"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/moby/sys/signal"
|
||||
"github.com/moby/term"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
)
|
||||
|
||||
func waitExitOrRemoved(ctx context.Context, apiClient client.APIClient, containerID string, waitRemove bool) <-chan int {
|
||||
if len(containerID) == 0 {
|
||||
// containerID can never be empty
|
||||
panic("Internal Error: waitExitOrRemoved needs a containerID as parameter")
|
||||
// Pull constants
|
||||
const (
|
||||
PullImageAlways = "always"
|
||||
PullImageMissing = "missing" // Default (matches previous behavior)
|
||||
PullImageNever = "never"
|
||||
)
|
||||
|
||||
func ConvertK8sImagePullPolicyToDocker(policy corev1.PullPolicy) string {
|
||||
switch policy {
|
||||
case corev1.PullAlways:
|
||||
return PullImageAlways
|
||||
case corev1.PullNever:
|
||||
return PullImageNever
|
||||
default:
|
||||
return PullImageMissing
|
||||
}
|
||||
}
|
||||
|
||||
// Older versions used the Events API, and even older versions did not
|
||||
// support server-side removal. This legacyWaitExitOrRemoved method
|
||||
// preserves that old behavior and any issues it may have.
|
||||
if versions.LessThan(apiClient.ClientVersion(), "1.30") {
|
||||
return legacyWaitExitOrRemoved(ctx, apiClient, containerID, waitRemove)
|
||||
}
|
||||
|
||||
condition := container.WaitConditionNextExit
|
||||
if waitRemove {
|
||||
condition = container.WaitConditionRemoved
|
||||
}
|
||||
|
||||
resultC, errC := apiClient.ContainerWait(ctx, containerID, condition)
|
||||
|
||||
statusC := make(chan int)
|
||||
go func() {
|
||||
select {
|
||||
case result := <-resultC:
|
||||
if result.Error != nil {
|
||||
log.Errorf("Error waiting for container: %v", result.Error.Message)
|
||||
statusC <- 125
|
||||
} else {
|
||||
statusC <- int(result.StatusCode)
|
||||
}
|
||||
case err := <-errC:
|
||||
log.Errorf("Error waiting for container: %v", err)
|
||||
statusC <- 125
|
||||
}
|
||||
}()
|
||||
|
||||
return statusC
|
||||
}
|
||||
|
||||
func legacyWaitExitOrRemoved(ctx context.Context, apiClient client.APIClient, containerID string, waitRemove bool) <-chan int {
|
||||
var removeErr error
|
||||
statusChan := make(chan int)
|
||||
exitCode := 125
|
||||
|
||||
// Get events via Events API
|
||||
f := filters.NewArgs()
|
||||
f.Add("type", "container")
|
||||
f.Add("container", containerID)
|
||||
options := types.EventsOptions{
|
||||
Filters: f,
|
||||
}
|
||||
eventCtx, cancel := context.WithCancel(ctx)
|
||||
eventq, errq := apiClient.Events(eventCtx, options)
|
||||
|
||||
eventProcessor := func(e events.Message) bool {
|
||||
stopProcessing := false
|
||||
switch e.Status {
|
||||
case "die":
|
||||
if v, ok := e.Actor.Attributes["exitCode"]; ok {
|
||||
code, cerr := strconv.Atoi(v)
|
||||
if cerr != nil {
|
||||
log.Errorf("Failed to convert exitcode '%q' to int: %v", v, cerr)
|
||||
} else {
|
||||
exitCode = code
|
||||
}
|
||||
}
|
||||
if !waitRemove {
|
||||
stopProcessing = true
|
||||
} else if versions.LessThan(apiClient.ClientVersion(), "1.25") {
|
||||
// If we are talking to an older daemon, `AutoRemove` is not supported.
|
||||
// We need to fall back to the old behavior, which is client-side removal
|
||||
go func() {
|
||||
removeErr = apiClient.ContainerRemove(ctx, containerID, container.RemoveOptions{RemoveVolumes: true})
|
||||
if removeErr != nil {
|
||||
log.Errorf("Error removing container: %v", removeErr)
|
||||
cancel() // cancel the event Q
|
||||
}
|
||||
}()
|
||||
}
|
||||
case "detach":
|
||||
exitCode = 0
|
||||
stopProcessing = true
|
||||
case "destroy":
|
||||
stopProcessing = true
|
||||
}
|
||||
return stopProcessing
|
||||
}
|
||||
|
||||
go func() {
|
||||
defer func() {
|
||||
statusChan <- exitCode // must always send an exit code or the caller will block
|
||||
cancel()
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-eventCtx.Done():
|
||||
if removeErr != nil {
|
||||
return
|
||||
}
|
||||
case evt := <-eventq:
|
||||
if eventProcessor(evt) {
|
||||
return
|
||||
}
|
||||
case err := <-errq:
|
||||
log.Errorf("Error getting events from daemon: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return statusChan
|
||||
}
|
||||
|
||||
func runLogsWaitRunning(ctx context.Context, dockerCli command.Cli, id string) error {
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
options := container.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Follow: true,
|
||||
}
|
||||
logStream, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer logStream.Close()
|
||||
|
||||
func RunLogsWaitRunning(ctx context.Context, name string) error {
|
||||
buf := bytes.NewBuffer(nil)
|
||||
w := io.MultiWriter(buf, dockerCli.Out())
|
||||
w := io.MultiWriter(buf, os.Stdout)
|
||||
|
||||
args := []string{"logs", name, "--since", "0m", "--details", "--follow"}
|
||||
cmd := exec.Command("docker", args...)
|
||||
cmd.Stdout = w
|
||||
go cmd.Start()
|
||||
|
||||
cancel, cancelFunc := context.WithCancel(ctx)
|
||||
defer cancelFunc()
|
||||
@@ -190,440 +65,181 @@ func runLogsWaitRunning(ctx context.Context, dockerCli command.Cli, id string) e
|
||||
var errChan = make(chan error)
|
||||
go func() {
|
||||
var err error
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(w, logStream)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(w, dockerCli.Err(), logStream)
|
||||
}
|
||||
_, err = stdcopy.StdCopy(w, os.Stdout, buf)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
}
|
||||
}()
|
||||
|
||||
select {
|
||||
case err = <-errChan:
|
||||
case err := <-errChan:
|
||||
return err
|
||||
case <-cancel.Done():
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func runLogsSinceNow(dockerCli command.Cli, id string, follow bool) error {
|
||||
ctx := context.Background()
|
||||
|
||||
c, err := dockerCli.Client().ContainerInspect(ctx, id)
|
||||
if err != nil {
|
||||
func RunLogsSinceNow(name string, follow bool) error {
|
||||
args := []string{"logs", name, "--since", "0m", "--details"}
|
||||
if follow {
|
||||
args = append(args, "--follow")
|
||||
}
|
||||
output, err := exec.Command("docker", args...).CombinedOutput()
|
||||
_, err = stdcopy.StdCopy(os.Stdout, os.Stderr, bytes.NewReader(output))
|
||||
return err
|
||||
}
|
||||
|
||||
options := container.LogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Since: "0m",
|
||||
Follow: follow,
|
||||
// CreateNetwork
|
||||
// docker create kubevpn-traffic-manager --labels owner=config.ConfigMapPodTrafficManager --subnet 223.255.0.0/16 --gateway 223.255.0.100
|
||||
func CreateNetwork(ctx context.Context, name string) (string, error) {
|
||||
args := []string{
|
||||
"network",
|
||||
"inspect",
|
||||
name,
|
||||
}
|
||||
responseBody, err := dockerCli.Client().ContainerLogs(ctx, c.ID, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
if c.Config.Tty {
|
||||
_, err = io.Copy(dockerCli.Out(), responseBody)
|
||||
} else {
|
||||
_, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody)
|
||||
}
|
||||
return err
|
||||
_, err := exec.CommandContext(ctx, "docker", args...).CombinedOutput()
|
||||
if err == nil {
|
||||
return name, nil
|
||||
}
|
||||
|
||||
func createNetwork(ctx context.Context, cli *client.Client) (string, error) {
|
||||
by := map[string]string{"owner": config.ConfigMapPodTrafficManager}
|
||||
list, _ := cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
for _, resource := range list {
|
||||
if reflect.DeepEqual(resource.Labels, by) {
|
||||
return resource.ID, nil
|
||||
}
|
||||
args = []string{
|
||||
"network",
|
||||
"create",
|
||||
name,
|
||||
"--label", "owner=" + name,
|
||||
"--subnet", config.DockerCIDR.String(),
|
||||
"--gateway", config.DockerRouterIP.String(),
|
||||
"--driver", "bridge",
|
||||
"--scope", "local",
|
||||
}
|
||||
|
||||
create, err := cli.NetworkCreate(ctx, config.ConfigMapPodTrafficManager, types.NetworkCreate{
|
||||
Driver: "bridge",
|
||||
Scope: "local",
|
||||
IPAM: &network.IPAM{
|
||||
Driver: "",
|
||||
Options: nil,
|
||||
Config: []network.IPAMConfig{
|
||||
{
|
||||
Subnet: config.DockerCIDR.String(),
|
||||
Gateway: config.DockerRouterIP.String(),
|
||||
},
|
||||
},
|
||||
},
|
||||
//Options: map[string]string{"--icc": "", "--ip-masq": ""},
|
||||
Labels: by,
|
||||
})
|
||||
if err != nil {
|
||||
if errdefs.IsForbidden(err) {
|
||||
list, _ = cli.NetworkList(ctx, types.NetworkListOptions{})
|
||||
for _, resource := range list {
|
||||
if reflect.DeepEqual(resource.Labels, by) {
|
||||
return resource.ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return create.ID, nil
|
||||
}
|
||||
|
||||
// Pull constants
|
||||
const (
|
||||
PullImageAlways = "always"
|
||||
PullImageMissing = "missing" // Default (matches previous behavior)
|
||||
PullImageNever = "never"
|
||||
)
|
||||
|
||||
func pullImage(ctx context.Context, dockerCli command.Cli, img string, options RunOptions) error {
|
||||
encodedAuth, err := command.RetrieveAuthTokenFromImage(dockerCli.ConfigFile(), img)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
responseBody, err := dockerCli.Client().ImageCreate(ctx, img, image.CreateOptions{
|
||||
RegistryAuth: encodedAuth,
|
||||
Platform: options.Platform,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer responseBody.Close()
|
||||
|
||||
out := dockerCli.Err()
|
||||
return jsonmessage.DisplayJSONMessagesToStream(responseBody, streams.NewOut(out), nil)
|
||||
}
|
||||
|
||||
//nolint:gocyclo
|
||||
func createContainer(ctx context.Context, dockerCli command.Cli, runConfig *RunConfig) (string, error) {
|
||||
config := runConfig.config
|
||||
hostConfig := runConfig.hostConfig
|
||||
networkingConfig := runConfig.networkingConfig
|
||||
var (
|
||||
trustedRef reference.Canonical
|
||||
namedRef reference.Named
|
||||
)
|
||||
|
||||
ref, err := reference.ParseAnyReference(config.Image)
|
||||
id, err := exec.CommandContext(ctx, "docker", args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if named, ok := ref.(reference.Named); ok {
|
||||
namedRef = reference.TagNameOnly(named)
|
||||
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && dockerCli.ContentTrustEnabled() {
|
||||
var err error
|
||||
trustedRef, err = image2.TrustedReference(ctx, dockerCli, taggedRef)
|
||||
return string(id), nil
|
||||
}
|
||||
|
||||
func RunContainer(ctx context.Context, runConfig *RunConfig) error {
|
||||
var result []string
|
||||
result = append(result, "run")
|
||||
result = append(result, runConfig.options...)
|
||||
if len(runConfig.command) != 0 {
|
||||
result = append(result, "--entrypoint", strings.Join(runConfig.command, " "))
|
||||
}
|
||||
result = append(result, runConfig.image)
|
||||
result = append(result, runConfig.args...)
|
||||
|
||||
cmd := exec.CommandContext(ctx, "docker", result...)
|
||||
cmd.Stdin = os.Stdin
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
log.Debugf("Run container with cmd: %v", cmd.Args)
|
||||
err := cmd.Start()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
config.Image = reference.FamiliarString(trustedRef)
|
||||
}
|
||||
}
|
||||
|
||||
pullAndTagImage := func() error {
|
||||
if err = pullImage(ctx, dockerCli, config.Image, runConfig.Options); err != nil {
|
||||
log.Errorf("Failed to run container with cmd: %v: %v", cmd.Args, err)
|
||||
return err
|
||||
}
|
||||
if taggedRef, ok := namedRef.(reference.NamedTagged); ok && trustedRef != nil {
|
||||
return image2.TagTrusted(ctx, dockerCli, trustedRef, taggedRef)
|
||||
}
|
||||
return nil
|
||||
return cmd.Wait()
|
||||
}
|
||||
|
||||
if runConfig.Options.Pull == PullImageAlways {
|
||||
if err = pullAndTagImage(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.Out().GetTtySize()
|
||||
|
||||
response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, runConfig.platform, runConfig.name)
|
||||
if err != nil {
|
||||
// Pull image if it does not exist locally and we have the PullImageMissing option. Default behavior.
|
||||
if errdefs.IsNotFound(err) && namedRef != nil && runConfig.Options.Pull == PullImageMissing {
|
||||
// we don't want to write to stdout anything apart from container.ID
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "Unable to find image '%s' locally\n", reference.FamiliarString(namedRef))
|
||||
|
||||
if err = pullAndTagImage(); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var retryErr error
|
||||
response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, runConfig.platform, runConfig.name)
|
||||
if retryErr != nil {
|
||||
return "", retryErr
|
||||
}
|
||||
} else {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
for _, w := range response.Warnings {
|
||||
_, _ = fmt.Fprintf(dockerCli.Err(), "WARNING: %s\n", w)
|
||||
}
|
||||
return response.ID, err
|
||||
}
|
||||
|
||||
func runContainer(ctx context.Context, dockerCli command.Cli, runConfig *RunConfig) error {
|
||||
config := runConfig.config
|
||||
stdout, stderr := dockerCli.Out(), dockerCli.Err()
|
||||
apiClient := dockerCli.Client()
|
||||
|
||||
config.ArgsEscaped = false
|
||||
|
||||
if err := dockerCli.In().CheckTty(config.AttachStdin, config.Tty); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancelFun := context.WithCancel(ctx)
|
||||
defer cancelFun()
|
||||
|
||||
containerID, err := createContainer(ctx, dockerCli, runConfig)
|
||||
if err != nil {
|
||||
reportError(stderr, err.Error())
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
if runConfig.Options.SigProxy {
|
||||
sigc := notifyAllSignals()
|
||||
go ForwardAllSignals(ctx, apiClient, containerID, sigc)
|
||||
defer signal.StopCatch(sigc)
|
||||
}
|
||||
|
||||
var (
|
||||
waitDisplayID chan struct{}
|
||||
errCh chan error
|
||||
)
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Make this asynchronous to allow the client to write to stdin before having to read the ID
|
||||
waitDisplayID = make(chan struct{})
|
||||
go func() {
|
||||
defer close(waitDisplayID)
|
||||
_, _ = fmt.Fprintln(stdout, containerID)
|
||||
}()
|
||||
}
|
||||
attach := config.AttachStdin || config.AttachStdout || config.AttachStderr
|
||||
if attach {
|
||||
closeFn, err := attachContainer(ctx, dockerCli, containerID, &errCh, config, container.AttachOptions{
|
||||
Stream: true,
|
||||
Stdin: config.AttachStdin,
|
||||
Stdout: config.AttachStdout,
|
||||
Stderr: config.AttachStderr,
|
||||
DetachKeys: dockerCli.ConfigFile().DetachKeys,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer closeFn()
|
||||
}
|
||||
|
||||
statusChan := waitExitOrRemoved(ctx, apiClient, containerID, runConfig.hostConfig.AutoRemove)
|
||||
|
||||
// start the container
|
||||
if err := apiClient.ContainerStart(ctx, containerID, container.StartOptions{}); err != nil {
|
||||
// If we have hijackedIOStreamer, we should notify
|
||||
// hijackedIOStreamer we are going to exit and wait
|
||||
// to avoid the terminal are not restored.
|
||||
if attach {
|
||||
cancelFun()
|
||||
<-errCh
|
||||
}
|
||||
|
||||
reportError(stderr, err.Error())
|
||||
if runConfig.hostConfig.AutoRemove {
|
||||
// wait container to be removed
|
||||
<-statusChan
|
||||
}
|
||||
return runStartContainerErr(err)
|
||||
}
|
||||
|
||||
if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.Out().IsTerminal() {
|
||||
if err := MonitorTtySize(ctx, dockerCli, containerID, false); err != nil {
|
||||
_, _ = fmt.Fprintln(stderr, "Error monitoring TTY size:", err)
|
||||
}
|
||||
}
|
||||
|
||||
if errCh != nil {
|
||||
if err := <-errCh; err != nil {
|
||||
if _, ok := err.(term.EscapeError); ok {
|
||||
// The user entered the detach escape sequence.
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("Error hijack: %s", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Detached mode: wait for the id to be displayed and return.
|
||||
if !config.AttachStdout && !config.AttachStderr {
|
||||
// Detached mode
|
||||
<-waitDisplayID
|
||||
return nil
|
||||
}
|
||||
|
||||
status := <-statusChan
|
||||
if status != 0 {
|
||||
return cli.StatusError{StatusCode: status}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func attachContainer(ctx context.Context, dockerCli command.Cli, containerID string, errCh *chan error, config *container.Config, options container.AttachOptions) (func(), error) {
|
||||
resp, errAttach := dockerCli.Client().ContainerAttach(ctx, containerID, options)
|
||||
if errAttach != nil {
|
||||
return nil, errAttach
|
||||
}
|
||||
|
||||
var (
|
||||
out, cerr io.Writer
|
||||
in io.ReadCloser
|
||||
)
|
||||
if options.Stdin {
|
||||
in = dockerCli.In()
|
||||
}
|
||||
if options.Stdout {
|
||||
out = dockerCli.Out()
|
||||
}
|
||||
if options.Stderr {
|
||||
if config.Tty {
|
||||
cerr = dockerCli.Out()
|
||||
} else {
|
||||
cerr = dockerCli.Err()
|
||||
}
|
||||
}
|
||||
|
||||
ch := make(chan error, 1)
|
||||
*errCh = ch
|
||||
|
||||
go func() {
|
||||
ch <- func() error {
|
||||
streamer := hijackedIOStreamer{
|
||||
streams: dockerCli,
|
||||
inputStream: in,
|
||||
outputStream: out,
|
||||
errorStream: cerr,
|
||||
resp: resp,
|
||||
tty: config.Tty,
|
||||
detachKeys: options.DetachKeys,
|
||||
}
|
||||
|
||||
if errHijack := streamer.stream(ctx); errHijack != nil {
|
||||
return errHijack
|
||||
}
|
||||
return errAttach
|
||||
}()
|
||||
}()
|
||||
return resp.Close, nil
|
||||
}
|
||||
|
||||
// reportError is a utility method that prints a user-friendly message
|
||||
// containing the error that occurred during parsing and a suggestion to get help
|
||||
func reportError(stderr io.Writer, str string) {
|
||||
str = strings.TrimSuffix(str, ".") + "."
|
||||
_, _ = fmt.Fprintln(stderr, "docker:", str)
|
||||
}
|
||||
|
||||
// if container start fails with 'not found'/'no such' error, return 127
|
||||
// if container start fails with 'permission denied' error, return 126
|
||||
// return 125 for generic docker daemon failures
|
||||
func runStartContainerErr(err error) error {
|
||||
trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ")
|
||||
statusError := cli.StatusError{StatusCode: 125, Status: trimmedErr}
|
||||
if strings.Contains(trimmedErr, "executable file not found") ||
|
||||
strings.Contains(trimmedErr, "no such file or directory") ||
|
||||
strings.Contains(trimmedErr, "system cannot find the file specified") {
|
||||
statusError = cli.StatusError{StatusCode: 127, Status: trimmedErr}
|
||||
} else if strings.Contains(trimmedErr, syscall.EACCES.Error()) ||
|
||||
strings.Contains(trimmedErr, syscall.EISDIR.Error()) {
|
||||
statusError = cli.StatusError{StatusCode: 126, Status: trimmedErr}
|
||||
}
|
||||
|
||||
return statusError
|
||||
}
|
||||
|
||||
func run(ctx context.Context, cli *client.Client, dockerCli *command.DockerCli, runConfig *RunConfig) (id string, err error) {
|
||||
rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
var config = runConfig.config
|
||||
var hostConfig = runConfig.hostConfig
|
||||
var platform = runConfig.platform
|
||||
var networkConfig = runConfig.networkingConfig
|
||||
var name = runConfig.name
|
||||
|
||||
var needPull bool
|
||||
var img types.ImageInspect
|
||||
img, _, err = cli.ImageInspectWithRaw(ctx, config.Image)
|
||||
if errdefs.IsNotFound(err) {
|
||||
log.Infof("Needs to pull image %s", config.Image)
|
||||
needPull = true
|
||||
err = nil
|
||||
} else if err != nil {
|
||||
log.Errorf("Image inspect failed: %v", err)
|
||||
return
|
||||
}
|
||||
if platform != nil && platform.Architecture != "" && platform.OS != "" {
|
||||
if img.Os != platform.OS || img.Architecture != platform.Architecture {
|
||||
needPull = true
|
||||
}
|
||||
}
|
||||
if needPull {
|
||||
err = pkgssh.PullImage(ctx, runConfig.platform, cli, dockerCli, config.Image, nil)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to pull image: %s, err: %s", config.Image, err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var create container.CreateResponse
|
||||
create, err = cli.ContainerCreate(ctx, config, hostConfig, networkConfig, platform, name)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create container: %s, err: %s", name, err)
|
||||
return
|
||||
}
|
||||
id = create.ID
|
||||
log.Infof("Created container: %s", name)
|
||||
err = cli.ContainerStart(ctx, create.ID, container.StartOptions{})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to startup container %s: %v", name, err)
|
||||
return
|
||||
}
|
||||
func WaitDockerContainerRunning(ctx context.Context, name string) error {
|
||||
log.Infof("Wait container %s to be running...", name)
|
||||
var inspect types.ContainerJSON
|
||||
ctx2, cancelFunc := context.WithCancel(ctx)
|
||||
wait.UntilWithContext(ctx2, func(ctx context.Context) {
|
||||
inspect, err = cli.ContainerInspect(ctx, create.ID)
|
||||
if errdefs.IsNotFound(err) {
|
||||
cancelFunc()
|
||||
return
|
||||
} else if err != nil {
|
||||
cancelFunc()
|
||||
return
|
||||
|
||||
for ctx.Err() == nil {
|
||||
time.Sleep(time.Second * 1)
|
||||
inspect, err := ContainerInspect(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if inspect.State != nil && (inspect.State.Status == "exited" || inspect.State.Status == "dead" || inspect.State.Dead) {
|
||||
cancelFunc()
|
||||
err = errors.New(fmt.Sprintf("container status: %s", inspect.State.Status))
|
||||
return
|
||||
break
|
||||
}
|
||||
if inspect.State != nil && inspect.State.Running {
|
||||
cancelFunc()
|
||||
return
|
||||
break
|
||||
}
|
||||
}, time.Second)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to wait container to be ready: %v", err)
|
||||
_ = runLogsSinceNow(dockerCli, id, false)
|
||||
return
|
||||
}
|
||||
|
||||
log.Infof("Container %s is running now", name)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
func ContainerInspect(ctx context.Context, name string) (types.ContainerJSON, error) {
|
||||
output, err := exec.CommandContext(ctx, "docker", "inspect", name).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to wait container to be ready output: %s: %v", string(output), err)
|
||||
_ = RunLogsSinceNow(name, false)
|
||||
return types.ContainerJSON{}, err
|
||||
}
|
||||
var inspect []types.ContainerJSON
|
||||
rdr := bytes.NewReader(output)
|
||||
err = json.NewDecoder(rdr).Decode(&inspect)
|
||||
if err != nil {
|
||||
return types.ContainerJSON{}, err
|
||||
}
|
||||
if len(inspect) == 0 {
|
||||
return types.ContainerJSON{}, err
|
||||
}
|
||||
return inspect[0], nil
|
||||
}
|
||||
|
||||
func NetworkInspect(ctx context.Context, name string) (types.NetworkResource, error) {
|
||||
//var cli *client.Client
|
||||
//var dockerCli *command.DockerCli
|
||||
//cli.NetworkInspect()
|
||||
output, err := exec.CommandContext(ctx, "docker", "network", "inspect", name).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to wait container to be ready: %v", err)
|
||||
_ = RunLogsSinceNow(name, false)
|
||||
return types.NetworkResource{}, err
|
||||
}
|
||||
var inspect []types.NetworkResource
|
||||
rdr := bytes.NewReader(output)
|
||||
err = json.NewDecoder(rdr).Decode(&inspect)
|
||||
if err != nil {
|
||||
return types.NetworkResource{}, err
|
||||
}
|
||||
if len(inspect) == 0 {
|
||||
return types.NetworkResource{}, err
|
||||
}
|
||||
return inspect[0], nil
|
||||
}
|
||||
|
||||
func NetworkRemove(ctx context.Context, name string) error {
|
||||
output, err := exec.CommandContext(ctx, "docker", "network", "remove", name).CombinedOutput()
|
||||
if err != nil && strings.Contains(string(output), "not found") {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// NetworkDisconnect
|
||||
// docker network disconnect --force
|
||||
func NetworkDisconnect(ctx context.Context, containerName string) ([]byte, error) {
|
||||
output, err := exec.CommandContext(ctx, "docker", "network", "disconnect", "--force", config.ConfigMapPodTrafficManager, containerName).CombinedOutput()
|
||||
if err != nil && strings.Contains(string(output), "not found") {
|
||||
return output, nil
|
||||
}
|
||||
return output, err
|
||||
}
|
||||
|
||||
// ContainerRemove
|
||||
// docker remove --force
|
||||
func ContainerRemove(ctx context.Context, containerName string) ([]byte, error) {
|
||||
output, err := exec.CommandContext(ctx, "docker", "remove", "--force", containerName).CombinedOutput()
|
||||
if err != nil && strings.Contains(string(output), "not found") {
|
||||
return output, nil
|
||||
}
|
||||
return output, err
|
||||
}
|
||||
|
||||
func ContainerKill(ctx context.Context, name *string) ([]byte, error) {
|
||||
output, err := exec.CommandContext(ctx, "docker", "kill", *name, "--signal", "SIGTERM").CombinedOutput()
|
||||
if err != nil && strings.Contains(string(output), "not found") {
|
||||
return output, nil
|
||||
}
|
||||
return output, err
|
||||
}
|
||||
|
@@ -5,21 +5,14 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
typescontainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/google/uuid"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/pflag"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -32,7 +25,6 @@ import (
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/inject"
|
||||
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
@@ -60,10 +52,6 @@ type Options struct {
|
||||
RunOptions RunOptions
|
||||
ContainerOptions *ContainerOptions
|
||||
|
||||
// inner
|
||||
cli *client.Client
|
||||
dockerCli *command.DockerCli
|
||||
|
||||
factory cmdutil.Factory
|
||||
clientset *kubernetes.Clientset
|
||||
restclient *rest.RESTClient
|
||||
@@ -73,22 +61,10 @@ type Options struct {
|
||||
rollbackFuncList []func() error
|
||||
}
|
||||
|
||||
func (option *Options) Main(ctx context.Context, sshConfig *pkgssh.SshConfig, flags *pflag.FlagSet, transferImage bool, imagePullSecretName string) error {
|
||||
func (option *Options) Main(ctx context.Context, sshConfig *pkgssh.SshConfig, config *Config, hostConfig *HostConfig, imagePullSecretName string) error {
|
||||
mode := typescontainer.NetworkMode(option.ContainerOptions.netMode.NetworkMode())
|
||||
if mode.IsContainer() {
|
||||
log.Infof("Network mode container is %s", mode.ConnectedContainer())
|
||||
inspect, err := option.cli.ContainerInspect(ctx, mode.ConnectedContainer())
|
||||
if err != nil {
|
||||
log.Errorf("Failed to inspect container %s, err: %v", mode.ConnectedContainer(), err)
|
||||
return err
|
||||
}
|
||||
if inspect.State == nil {
|
||||
return fmt.Errorf("can not get container status, please make container name is valid")
|
||||
}
|
||||
if !inspect.State.Running {
|
||||
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
|
||||
}
|
||||
log.Infof("Container %s is running", mode.ConnectedContainer())
|
||||
} else if mode.IsDefault() && util.RunningInContainer() {
|
||||
hostname, err := os.Hostname()
|
||||
if err != nil {
|
||||
@@ -101,14 +77,8 @@ func (option *Options) Main(ctx context.Context, sshConfig *pkgssh.SshConfig, fl
|
||||
}
|
||||
}
|
||||
|
||||
config, hostConfig, err := Parse(flags, option.ContainerOptions)
|
||||
// just in case the Parse does not exit
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Connect to cluster, in container or host
|
||||
err = option.Connect(ctx, sshConfig, transferImage, imagePullSecretName, hostConfig.PortBindings)
|
||||
err := option.Connect(ctx, sshConfig, imagePullSecretName, hostConfig.PortBindings)
|
||||
if err != nil {
|
||||
log.Errorf("Connect to cluster failed, err: %v", err)
|
||||
return err
|
||||
@@ -118,9 +88,8 @@ func (option *Options) Main(ctx context.Context, sshConfig *pkgssh.SshConfig, fl
|
||||
}
|
||||
|
||||
// Connect to cluster network on docker container or host
|
||||
func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig, transferImage bool, imagePullSecretName string, portBindings nat.PortMap) error {
|
||||
switch option.ConnectMode {
|
||||
case ConnectModeHost:
|
||||
func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig, imagePullSecretName string, portBindings nat.PortMap) error {
|
||||
if option.ConnectMode == ConnectModeHost {
|
||||
daemonCli := daemon.GetClient(false)
|
||||
if daemonCli == nil {
|
||||
return fmt.Errorf("get nil daemon client")
|
||||
@@ -144,19 +113,15 @@ func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig,
|
||||
KubeconfigBytes: string(kubeConfigBytes),
|
||||
Namespace: ns,
|
||||
Headers: option.Headers,
|
||||
Workloads: []string{option.Workload},
|
||||
Workloads: util.If(option.NoProxy, nil, []string{option.Workload}),
|
||||
ExtraRoute: option.ExtraRouteInfo.ToRPC(),
|
||||
Engine: string(option.Engine),
|
||||
OriginKubeconfigPath: util.GetKubeConfigPath(option.factory),
|
||||
TransferImage: transferImage,
|
||||
Image: config.Image,
|
||||
ImagePullSecretName: imagePullSecretName,
|
||||
Level: int32(logLevel),
|
||||
SshJump: sshConfig.ToRPC(),
|
||||
}
|
||||
if option.NoProxy {
|
||||
req.Workloads = nil
|
||||
}
|
||||
option.AddRollbackFunc(func() error {
|
||||
resp, err := daemonCli.Disconnect(ctx, &rpc.DisconnectRequest{
|
||||
KubeconfigBytes: ptr.To(string(kubeConfigBytes)),
|
||||
@@ -177,24 +142,25 @@ func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig,
|
||||
}
|
||||
err = util.PrintGRPCStream[rpc.CloneResponse](resp)
|
||||
return err
|
||||
}
|
||||
|
||||
case ConnectModeContainer:
|
||||
runConfig, err := option.CreateConnectContainer(portBindings)
|
||||
if option.ConnectMode == ConnectModeContainer {
|
||||
name, err := option.CreateConnectContainer(ctx, portBindings)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var id string
|
||||
log.Infof("Starting connect to cluster in container")
|
||||
id, err = run(ctx, option.cli, option.dockerCli, runConfig)
|
||||
err = WaitDockerContainerRunning(ctx, *name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
option.AddRollbackFunc(func() error {
|
||||
_ = option.cli.ContainerKill(context.Background(), id, "SIGTERM")
|
||||
_ = runLogsSinceNow(option.dockerCli, id, true)
|
||||
// docker kill --signal
|
||||
_, _ = ContainerKill(context.Background(), name)
|
||||
_ = RunLogsSinceNow(*name, true)
|
||||
return nil
|
||||
})
|
||||
err = runLogsWaitRunning(ctx, option.dockerCli, id)
|
||||
err = RunLogsWaitRunning(ctx, *name)
|
||||
if err != nil {
|
||||
// interrupt by signal KILL
|
||||
if errors.Is(err, context.Canceled) {
|
||||
@@ -203,16 +169,17 @@ func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig,
|
||||
return err
|
||||
}
|
||||
log.Infof("Connected to cluster in container")
|
||||
err = option.ContainerOptions.netMode.Set(fmt.Sprintf("container:%s", id))
|
||||
err = option.ContainerOptions.netMode.Set(fmt.Sprintf("container:%s", *name))
|
||||
return err
|
||||
default:
|
||||
return fmt.Errorf("unsupport connect mode: %s", option.ConnectMode)
|
||||
}
|
||||
}
|
||||
|
||||
func (option *Options) Dev(ctx context.Context, cConfig *Config, hostConfig *HostConfig) error {
|
||||
return fmt.Errorf("unsupport connect mode: %s", option.ConnectMode)
|
||||
}
|
||||
|
||||
func (option *Options) Dev(ctx context.Context, config *Config, hostConfig *HostConfig) error {
|
||||
templateSpec, err := option.GetPodTemplateSpec()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get unstructured object error: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -229,7 +196,13 @@ func (option *Options) Dev(ctx context.Context, cConfig *Config, hostConfig *Hos
|
||||
log.Errorf("Failed to get env from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
volume, err := util.GetVolume(ctx, option.factory, option.Namespace, list[0].Name)
|
||||
option.AddRollbackFunc(func() error {
|
||||
for _, s := range env {
|
||||
_ = os.RemoveAll(s)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
volume, err := util.GetVolume(ctx, option.clientset, option.factory, option.Namespace, list[0].Name)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get volume from k8s: %v", err)
|
||||
return err
|
||||
@@ -242,96 +215,20 @@ func (option *Options) Dev(ctx context.Context, cConfig *Config, hostConfig *Hos
|
||||
log.Errorf("Failed to get DNS from k8s: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
inject.RemoveContainers(templateSpec)
|
||||
if option.ContainerName != "" {
|
||||
var index = -1
|
||||
for i, c := range templateSpec.Spec.Containers {
|
||||
if option.ContainerName == c.Name {
|
||||
index = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if index != -1 {
|
||||
templateSpec.Spec.Containers[0], templateSpec.Spec.Containers[index] = templateSpec.Spec.Containers[index], templateSpec.Spec.Containers[0]
|
||||
}
|
||||
}
|
||||
configList := ConvertPodToContainer(option.Namespace, *templateSpec, env, volume, dns)
|
||||
MergeDockerOptions(configList, option, cConfig, hostConfig)
|
||||
|
||||
mode := container.NetworkMode(option.ContainerOptions.netMode.NetworkMode())
|
||||
if len(option.ContainerOptions.netMode.Value()) != 0 {
|
||||
log.Infof("Network mode is %s", option.ContainerOptions.netMode.NetworkMode())
|
||||
for _, runConfig := range configList[:] {
|
||||
// remove expose port
|
||||
runConfig.config.ExposedPorts = nil
|
||||
runConfig.hostConfig.NetworkMode = mode
|
||||
if mode.IsContainer() {
|
||||
runConfig.hostConfig.PidMode = typescontainer.PidMode(option.ContainerOptions.netMode.NetworkMode())
|
||||
}
|
||||
runConfig.hostConfig.PortBindings = nil
|
||||
|
||||
// remove dns
|
||||
runConfig.hostConfig.DNS = nil
|
||||
runConfig.hostConfig.DNSOptions = nil
|
||||
runConfig.hostConfig.DNSSearch = nil
|
||||
runConfig.hostConfig.PublishAllPorts = false
|
||||
runConfig.config.Hostname = ""
|
||||
}
|
||||
} else {
|
||||
var networkID string
|
||||
networkID, err = createNetwork(ctx, option.cli)
|
||||
configList, err := option.ConvertPodToContainerConfigList(ctx, *templateSpec, config, hostConfig, env, volume, dns)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create network for %s: %v", option.Workload, err)
|
||||
return err
|
||||
}
|
||||
log.Infof("Create docker network %s", networkID)
|
||||
|
||||
configList[len(configList)-1].networkingConfig.EndpointsConfig = map[string]*network.EndpointSettings{
|
||||
configList[len(configList)-1].name: {NetworkID: networkID},
|
||||
}
|
||||
var portMap = nat.PortMap{}
|
||||
var portSet = nat.PortSet{}
|
||||
for _, runConfig := range configList {
|
||||
for k, v := range runConfig.hostConfig.PortBindings {
|
||||
if oldValue, ok := portMap[k]; ok {
|
||||
portMap[k] = append(oldValue, v...)
|
||||
} else {
|
||||
portMap[k] = v
|
||||
}
|
||||
}
|
||||
for k, v := range runConfig.config.ExposedPorts {
|
||||
portSet[k] = v
|
||||
}
|
||||
}
|
||||
configList[len(configList)-1].hostConfig.PortBindings = portMap
|
||||
configList[len(configList)-1].config.ExposedPorts = portSet
|
||||
|
||||
// skip last, use last container network
|
||||
for _, runConfig := range configList[:len(configList)-1] {
|
||||
// remove expose port
|
||||
runConfig.config.ExposedPorts = nil
|
||||
runConfig.hostConfig.NetworkMode = typescontainer.NetworkMode("container:" + configList[len(configList)-1].name)
|
||||
runConfig.hostConfig.PidMode = typescontainer.PidMode("container:" + configList[len(configList)-1].name)
|
||||
runConfig.hostConfig.PortBindings = nil
|
||||
|
||||
// remove dns
|
||||
runConfig.hostConfig.DNS = nil
|
||||
runConfig.hostConfig.DNSOptions = nil
|
||||
runConfig.hostConfig.DNSSearch = nil
|
||||
runConfig.hostConfig.PublishAllPorts = false
|
||||
runConfig.config.Hostname = ""
|
||||
}
|
||||
}
|
||||
|
||||
option.AddRollbackFunc(func() error {
|
||||
_ = configList.Remove(ctx, option.cli)
|
||||
if hostConfig.AutoRemove {
|
||||
_ = configList.Remove(context.Background(), len(option.ContainerOptions.netMode.Value()) != 0)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return configList.Run(ctx, volume, option.cli, option.dockerCli)
|
||||
return configList.Run(ctx)
|
||||
}
|
||||
|
||||
func (option *Options) CreateConnectContainer(portBindings nat.PortMap) (*RunConfig, error) {
|
||||
func (option *Options) CreateConnectContainer(ctx context.Context, portBindings nat.PortMap) (*string, error) {
|
||||
portMap, portSet, err := option.GetExposePort(portBindings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -366,54 +263,49 @@ func (option *Options) CreateConnectContainer(portBindings nat.PortMap) (*RunCon
|
||||
entrypoint = append(entrypoint, "--extra-node-ip")
|
||||
}
|
||||
|
||||
runConfig := &container.Config{
|
||||
User: "root",
|
||||
ExposedPorts: portSet,
|
||||
Env: []string{},
|
||||
Cmd: []string{},
|
||||
Healthcheck: nil,
|
||||
Image: config.Image,
|
||||
Entrypoint: entrypoint,
|
||||
}
|
||||
hostConfig := &container.HostConfig{
|
||||
Binds: []string{fmt.Sprintf("%s:%s", kubeconfigPath, "/root/.kube/config")},
|
||||
LogConfig: container.LogConfig{},
|
||||
PortBindings: portMap,
|
||||
AutoRemove: true,
|
||||
Privileged: true,
|
||||
RestartPolicy: container.RestartPolicy{},
|
||||
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
|
||||
// https://stackoverflow.com/questions/24319662/from-inside-of-a-docker-container-how-do-i-connect-to-the-localhost-of-the-mach
|
||||
// couldn't get current server API group list: Get "https://host.docker.internal:62844/api?timeout=32s": tls: failed to verify certificate: x509: certificate is valid for kubernetes.default.svc.cluster.local, kubernetes.default.svc, kubernetes.default, kubernetes, istio-sidecar-injector.istio-system.svc, proxy-exporter.kube-system.svc, not host.docker.internal
|
||||
ExtraHosts: []string{"host.docker.internal:host-gateway", "kubernetes:host-gateway"},
|
||||
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
|
||||
Sysctls: map[string]string{"net.ipv6.conf.all.disable_ipv6": strconv.Itoa(0)},
|
||||
Resources: container.Resources{},
|
||||
}
|
||||
newUUID, err := uuid.NewUUID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
suffix := strings.ReplaceAll(newUUID.String(), "-", "")[:5]
|
||||
suffix := strings.ReplaceAll(uuid.New().String(), "-", "")[:5]
|
||||
name := util.Join(option.Namespace, "kubevpn", suffix)
|
||||
networkID, err := createNetwork(context.Background(), option.cli)
|
||||
_, err = CreateNetwork(ctx, config.ConfigMapPodTrafficManager)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var platform *specs.Platform
|
||||
if option.RunOptions.Platform != "" {
|
||||
plat, _ := platforms.Parse(option.RunOptions.Platform)
|
||||
platform = &plat
|
||||
args := []string{
|
||||
"run",
|
||||
"--detach",
|
||||
"--volume", fmt.Sprintf("%s:%s", kubeconfigPath, "/root/.kube/config"),
|
||||
"--privileged",
|
||||
"--rm",
|
||||
"--cap-add", "SYS_PTRACE",
|
||||
"--cap-add", "SYS_ADMIN",
|
||||
"--security-opt", "apparmor=unconfined",
|
||||
"--security-opt", "seccomp=unconfined",
|
||||
"--sysctl", "net.ipv6.conf.all.disable_ipv6=0",
|
||||
"--add-host", "host.docker.internal:host-gateway",
|
||||
"--add-host", "kubernetes:host-gateway",
|
||||
"--network", config.ConfigMapPodTrafficManager,
|
||||
"--name", name,
|
||||
}
|
||||
c := &RunConfig{
|
||||
config: runConfig,
|
||||
hostConfig: hostConfig,
|
||||
networkingConfig: &network.NetworkingConfig{EndpointsConfig: map[string]*network.EndpointSettings{name: {NetworkID: networkID}}},
|
||||
platform: platform,
|
||||
name: name,
|
||||
Options: RunOptions{Pull: PullImageMissing},
|
||||
for port := range portSet {
|
||||
args = append(args, "--expose", port.Port())
|
||||
}
|
||||
return c, nil
|
||||
for port, bindings := range portMap {
|
||||
args = append(args, "--publish", fmt.Sprintf("%s:%s", port.Port(), bindings[0].HostPort))
|
||||
}
|
||||
|
||||
var result []string
|
||||
result = append(result, args...)
|
||||
result = append(result, config.Image)
|
||||
result = append(result, entrypoint...)
|
||||
err = ContainerRun(ctx, result...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &name, nil
|
||||
}
|
||||
|
||||
func ContainerRun(ctx context.Context, args ...string) error {
|
||||
err := exec.CommandContext(ctx, "docker", args...).Run()
|
||||
return err
|
||||
}
|
||||
|
||||
func (option *Options) AddRollbackFunc(f func() error) {
|
||||
@@ -424,24 +316,10 @@ func (option *Options) GetRollbackFuncList() []func() error {
|
||||
return option.rollbackFuncList
|
||||
}
|
||||
|
||||
func AddDockerFlags(options *Options, p *pflag.FlagSet) {
|
||||
p.SetInterspersed(false)
|
||||
|
||||
// These are flags not stored in Config/HostConfig
|
||||
p.StringVar(&options.RunOptions.Pull, "pull", PullImageMissing, `Pull image before running ("`+PullImageAlways+`"|"`+PullImageMissing+`"|"`+PullImageNever+`")`)
|
||||
p.BoolVar(&options.RunOptions.SigProxy, "sig-proxy", true, "Proxy received signals to the process")
|
||||
|
||||
// Add an explicit help that doesn't have a `-h` to prevent the conflict
|
||||
// with hostname
|
||||
p.Bool("help", false, "Print usage")
|
||||
|
||||
command.AddPlatformFlag(p, &options.RunOptions.Platform)
|
||||
options.ContainerOptions = addFlags(p)
|
||||
}
|
||||
|
||||
func (option *Options) GetExposePort(portBinds nat.PortMap) (nat.PortMap, nat.PortSet, error) {
|
||||
templateSpec, err := option.GetPodTemplateSpec()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get unstructured object error: %v", err)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
@@ -483,16 +361,12 @@ func (option *Options) InitClient(f cmdutil.Factory) (err error) {
|
||||
if option.Namespace, _, err = option.factory.ToRawKubeConfigLoader().Namespace(); err != nil {
|
||||
return
|
||||
}
|
||||
if option.cli, option.dockerCli, err = pkgssh.GetClient(); err != nil {
|
||||
return err
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (option *Options) GetPodTemplateSpec() (*v1.PodTemplateSpec, error) {
|
||||
object, err := util.GetUnstructuredObject(option.factory, option.Namespace, option.Workload)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to get unstructured object error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
@@ -4,93 +4,74 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/api/types"
|
||||
typescontainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/go-connections/nat"
|
||||
"github.com/google/uuid"
|
||||
"github.com/miekg/dns"
|
||||
"github.com/opencontainers/image-spec/specs-go/v1"
|
||||
log "github.com/sirupsen/logrus"
|
||||
v12 "k8s.io/api/core/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/inject"
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
|
||||
)
|
||||
|
||||
type RunConfig struct {
|
||||
name string
|
||||
|
||||
config *typescontainer.Config
|
||||
hostConfig *typescontainer.HostConfig
|
||||
networkingConfig *network.NetworkingConfig
|
||||
platform *v1.Platform
|
||||
options []string
|
||||
image string
|
||||
args []string
|
||||
command []string
|
||||
|
||||
Options RunOptions
|
||||
Copts ContainerOptions
|
||||
//platform *v1.Platform
|
||||
//Options RunOptions
|
||||
//Copts ContainerOptions
|
||||
}
|
||||
|
||||
type ConfigList []*RunConfig
|
||||
|
||||
func (c ConfigList) Remove(ctx context.Context, cli *client.Client) error {
|
||||
var remove = false
|
||||
for _, runConfig := range c {
|
||||
if runConfig.hostConfig.AutoRemove {
|
||||
remove = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !remove {
|
||||
return nil
|
||||
}
|
||||
for _, runConfig := range c {
|
||||
err := cli.NetworkDisconnect(ctx, runConfig.name, runConfig.name, true)
|
||||
func (l ConfigList) Remove(ctx context.Context, userAnotherContainerNet bool) error {
|
||||
for index, runConfig := range l {
|
||||
if !userAnotherContainerNet && index == len(l)-1 {
|
||||
output, err := NetworkDisconnect(ctx, runConfig.name)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to disconnect container network: %v", err)
|
||||
log.Warnf("Failed to disconnect container network: %s: %v", string(output), err)
|
||||
}
|
||||
err = cli.ContainerRemove(ctx, runConfig.name, typescontainer.RemoveOptions{Force: true})
|
||||
}
|
||||
output, err := ContainerRemove(ctx, runConfig.name)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to remove container: %v", err)
|
||||
log.Warnf("Failed to remove container: %s: %v", string(output), err)
|
||||
}
|
||||
}
|
||||
inspect, err := cli.NetworkInspect(ctx, config.ConfigMapPodTrafficManager, types.NetworkInspectOptions{})
|
||||
name := config.ConfigMapPodTrafficManager
|
||||
inspect, err := NetworkInspect(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(inspect.Containers) == 0 {
|
||||
return cli.NetworkRemove(ctx, config.ConfigMapPodTrafficManager)
|
||||
return NetworkRemove(ctx, name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c ConfigList) Run(ctx context.Context, volume map[string][]mount.Mount, cli *client.Client, dockerCli *command.DockerCli) error {
|
||||
for index := len(c) - 1; index >= 0; index-- {
|
||||
runConfig := c[index]
|
||||
if index == 0 {
|
||||
err := runContainer(ctx, dockerCli, runConfig)
|
||||
func (l ConfigList) Run(ctx context.Context) error {
|
||||
for index := len(l) - 1; index >= 0; index-- {
|
||||
conf := l[index]
|
||||
|
||||
err := RunContainer(ctx, conf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
_, err := run(ctx, cli, dockerCli, runConfig)
|
||||
if err != nil {
|
||||
// try to copy volume into container, why?
|
||||
runConfig.hostConfig.Mounts = nil
|
||||
id, err1 := run(ctx, cli, dockerCli, runConfig)
|
||||
if err1 != nil {
|
||||
// return first error
|
||||
return err
|
||||
}
|
||||
err = util.CopyVolumeIntoContainer(ctx, volume[runConfig.name], cli, id)
|
||||
|
||||
if index != 0 {
|
||||
err := WaitDockerContainerRunning(ctx, conf.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -99,164 +80,184 @@ func (c ConfigList) Run(ctx context.Context, volume map[string][]mount.Mount, cl
|
||||
return nil
|
||||
}
|
||||
|
||||
func ConvertPodToContainer(ns string, temp v12.PodTemplateSpec, envMap map[string][]string, mountVolume map[string][]mount.Mount, dnsConfig *dns.ClientConfig) (list ConfigList) {
|
||||
getHostname := func(containerName string) string {
|
||||
for _, envEntry := range envMap[containerName] {
|
||||
env := strings.Split(envEntry, "=")
|
||||
if len(env) == 2 && env[0] == "HOSTNAME" {
|
||||
return env[1]
|
||||
// ConvertPodToContainerConfigList
|
||||
/**
|
||||
if len(option.ContainerOptions.netMode.Value()) is not empty
|
||||
--> use other container network, needs to clear dns config and exposePort config
|
||||
if len(option.ContainerOptions.netMode.Value()) is empty
|
||||
--> use last container(not dev container) network, other container network, needs to clear dns config and exposePort config
|
||||
*/
|
||||
func (option *Options) ConvertPodToContainerConfigList(
|
||||
ctx context.Context,
|
||||
temp corev1.PodTemplateSpec,
|
||||
conf *Config,
|
||||
hostConfig *HostConfig,
|
||||
envMap map[string]string,
|
||||
mountVolume map[string][]mount.Mount,
|
||||
dnsConfig *dns.ClientConfig,
|
||||
) (configList ConfigList, err error) {
|
||||
inject.RemoveContainers(&temp)
|
||||
// move dev container to location first
|
||||
for index, c := range temp.Spec.Containers {
|
||||
if option.ContainerName == c.Name {
|
||||
temp.Spec.Containers[0], temp.Spec.Containers[index] = temp.Spec.Containers[index], temp.Spec.Containers[0]
|
||||
break
|
||||
}
|
||||
}
|
||||
return temp.Spec.Hostname
|
||||
}
|
||||
|
||||
for _, c := range temp.Spec.Containers {
|
||||
containerConf := &typescontainer.Config{
|
||||
Hostname: getHostname(util.Join(ns, c.Name)),
|
||||
Domainname: temp.Spec.Subdomain,
|
||||
User: "root",
|
||||
AttachStdin: c.Stdin,
|
||||
AttachStdout: false,
|
||||
AttachStderr: false,
|
||||
ExposedPorts: nil,
|
||||
Tty: c.TTY,
|
||||
OpenStdin: c.Stdin,
|
||||
StdinOnce: c.StdinOnce,
|
||||
Env: envMap[util.Join(ns, c.Name)],
|
||||
Cmd: c.Args,
|
||||
Healthcheck: nil,
|
||||
ArgsEscaped: false,
|
||||
Image: c.Image,
|
||||
Volumes: nil,
|
||||
WorkingDir: c.WorkingDir,
|
||||
Entrypoint: c.Command,
|
||||
NetworkDisabled: false,
|
||||
OnBuild: nil,
|
||||
Labels: temp.Labels,
|
||||
StopSignal: "",
|
||||
StopTimeout: nil,
|
||||
Shell: nil,
|
||||
}
|
||||
if temp.DeletionGracePeriodSeconds != nil {
|
||||
containerConf.StopTimeout = (*int)(unsafe.Pointer(temp.DeletionGracePeriodSeconds))
|
||||
}
|
||||
hostConfig := &typescontainer.HostConfig{
|
||||
Binds: []string{},
|
||||
ContainerIDFile: "",
|
||||
LogConfig: typescontainer.LogConfig{},
|
||||
//NetworkMode: "",
|
||||
PortBindings: nil,
|
||||
RestartPolicy: typescontainer.RestartPolicy{},
|
||||
AutoRemove: false,
|
||||
VolumeDriver: "",
|
||||
VolumesFrom: nil,
|
||||
ConsoleSize: [2]uint{},
|
||||
CapAdd: strslice.StrSlice{"SYS_PTRACE", "SYS_ADMIN"}, // for dlv
|
||||
CgroupnsMode: "",
|
||||
DNS: dnsConfig.Servers,
|
||||
DNSOptions: []string{fmt.Sprintf("ndots=%d", dnsConfig.Ndots)},
|
||||
DNSSearch: dnsConfig.Search,
|
||||
ExtraHosts: nil,
|
||||
GroupAdd: nil,
|
||||
IpcMode: "",
|
||||
Cgroup: "",
|
||||
Links: nil,
|
||||
OomScoreAdj: 0,
|
||||
PidMode: "",
|
||||
Privileged: ptr.Deref(ptr.Deref(c.SecurityContext, v12.SecurityContext{}).Privileged, false),
|
||||
PublishAllPorts: false,
|
||||
ReadonlyRootfs: false,
|
||||
SecurityOpt: []string{"apparmor=unconfined", "seccomp=unconfined"},
|
||||
StorageOpt: nil,
|
||||
Tmpfs: nil,
|
||||
UTSMode: "",
|
||||
UsernsMode: "",
|
||||
ShmSize: 0,
|
||||
Sysctls: nil,
|
||||
Runtime: "",
|
||||
Isolation: "",
|
||||
Resources: typescontainer.Resources{},
|
||||
Mounts: mountVolume[util.Join(ns, c.Name)],
|
||||
MaskedPaths: nil,
|
||||
ReadonlyPaths: nil,
|
||||
Init: nil,
|
||||
}
|
||||
var portMap = nat.PortMap{}
|
||||
var portSet = nat.PortSet{}
|
||||
for _, port := range c.Ports {
|
||||
p := nat.Port(fmt.Sprintf("%d/%s", port.ContainerPort, strings.ToLower(string(port.Protocol))))
|
||||
if port.HostPort != 0 {
|
||||
binding := []nat.PortBinding{{HostPort: strconv.FormatInt(int64(port.HostPort), 10)}}
|
||||
portMap[p] = binding
|
||||
var allPortMap = nat.PortMap{}
|
||||
var allPortSet = nat.PortSet{}
|
||||
for k, v := range hostConfig.PortBindings {
|
||||
if oldValue, ok := allPortMap[k]; ok {
|
||||
allPortMap[k] = append(oldValue, v...)
|
||||
} else {
|
||||
binding := []nat.PortBinding{{HostPort: strconv.FormatInt(int64(port.ContainerPort), 10)}}
|
||||
portMap[p] = binding
|
||||
allPortMap[k] = v
|
||||
}
|
||||
portSet[p] = struct{}{}
|
||||
}
|
||||
hostConfig.PortBindings = portMap
|
||||
containerConf.ExposedPorts = portSet
|
||||
if c.SecurityContext != nil && c.SecurityContext.Capabilities != nil {
|
||||
hostConfig.CapAdd = append(hostConfig.CapAdd, *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Add))...)
|
||||
hostConfig.CapDrop = *(*strslice.StrSlice)(unsafe.Pointer(&c.SecurityContext.Capabilities.Drop))
|
||||
for k, v := range conf.ExposedPorts {
|
||||
allPortSet[k] = v
|
||||
}
|
||||
|
||||
lastContainerIdx := len(temp.Spec.Containers) - 1
|
||||
lastContainerRandomName := util.Join(option.Namespace, temp.Spec.Containers[lastContainerIdx].Name, strings.ReplaceAll(uuid.New().String(), "-", "")[:5])
|
||||
for index, container := range temp.Spec.Containers {
|
||||
name := util.Join(option.Namespace, container.Name)
|
||||
randomName := util.Join(name, strings.ReplaceAll(uuid.New().String(), "-", "")[:5])
|
||||
var options = []string{
|
||||
"--env-file", envMap[name],
|
||||
"--domainname", temp.Spec.Subdomain,
|
||||
"--workdir", container.WorkingDir,
|
||||
"--cap-add", "SYS_PTRACE",
|
||||
"--cap-add", "SYS_ADMIN",
|
||||
"--cap-add", "SYS_PTRACE",
|
||||
"--cap-add", "SYS_ADMIN",
|
||||
"--security-opt", "apparmor=unconfined",
|
||||
"--security-opt", "seccomp=unconfined",
|
||||
"--pull", ConvertK8sImagePullPolicyToDocker(container.ImagePullPolicy),
|
||||
"--name", util.If(index == lastContainerIdx, lastContainerRandomName, randomName),
|
||||
"--user", "root",
|
||||
"--env", "LC_ALL=C.UTF-8",
|
||||
}
|
||||
for k, v := range temp.Labels {
|
||||
options = append(options, "--label", fmt.Sprintf("%s=%s", k, v))
|
||||
}
|
||||
if container.TTY {
|
||||
options = append(options, "--tty")
|
||||
}
|
||||
if ptr.Deref(ptr.Deref(container.SecurityContext, corev1.SecurityContext{}).Privileged, false) {
|
||||
options = append(options, "--privileged")
|
||||
}
|
||||
for _, m := range mountVolume[name] {
|
||||
options = append(options, "--volume", fmt.Sprintf("%s:%s", m.Source, m.Target))
|
||||
}
|
||||
|
||||
for _, port := range container.Ports {
|
||||
p := nat.Port(fmt.Sprintf("%d/%s", port.ContainerPort, strings.ToLower(string(port.Protocol))))
|
||||
var portBinding nat.PortBinding
|
||||
if port.HostPort != 0 {
|
||||
portBinding = nat.PortBinding{HostPort: strconv.FormatInt(int64(port.HostPort), 10)}
|
||||
} else {
|
||||
portBinding = nat.PortBinding{HostPort: strconv.FormatInt(int64(port.ContainerPort), 10)}
|
||||
}
|
||||
if oldValue, ok := allPortMap[p]; ok {
|
||||
allPortMap[p] = append(oldValue, portBinding)
|
||||
} else {
|
||||
allPortMap[p] = []nat.PortBinding{portBinding}
|
||||
}
|
||||
allPortSet[p] = struct{}{}
|
||||
}
|
||||
|
||||
// if netMode is empty, then 0 ~ last-1 use last container network
|
||||
if len(option.ContainerOptions.netMode.Value()) == 0 {
|
||||
// set last container
|
||||
if lastContainerIdx == index {
|
||||
options = append(options,
|
||||
"--dns-option", fmt.Sprintf("ndots=%d", dnsConfig.Ndots),
|
||||
"--hostname", GetEnvByKey(envMap[name], "HOSTNAME", container.Name),
|
||||
)
|
||||
for _, server := range dnsConfig.Servers {
|
||||
options = append(options, "--dns", server)
|
||||
}
|
||||
for _, search := range dnsConfig.Search {
|
||||
options = append(options, "--dns-search", search)
|
||||
}
|
||||
for p, bindings := range allPortMap {
|
||||
for _, binding := range bindings {
|
||||
options = append(options, "--publish", fmt.Sprintf("%s:%s", p.Port(), binding.HostPort))
|
||||
}
|
||||
options = append(options, "--expose", p.Port())
|
||||
}
|
||||
if hostConfig.PublishAllPorts {
|
||||
options = append(options, "--publish-all")
|
||||
}
|
||||
_, err = CreateNetwork(ctx, config.ConfigMapPodTrafficManager)
|
||||
if err != nil {
|
||||
log.Errorf("Failed to create network: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
log.Infof("Create docker network %s", config.ConfigMapPodTrafficManager)
|
||||
options = append(options, "--network", config.ConfigMapPodTrafficManager)
|
||||
} else { // set 0 to last-1 container to use last container network
|
||||
options = append(options, "--network", util.ContainerNet(lastContainerRandomName))
|
||||
options = append(options, "--pid", util.ContainerNet(lastContainerRandomName))
|
||||
}
|
||||
} else { // set all containers to use network mode
|
||||
log.Infof("Network mode is %s", option.ContainerOptions.netMode.NetworkMode())
|
||||
options = append(options, "--network", option.ContainerOptions.netMode.NetworkMode())
|
||||
if typescontainer.NetworkMode(option.ContainerOptions.netMode.NetworkMode()).IsContainer() {
|
||||
options = append(options, "--pid", option.ContainerOptions.netMode.NetworkMode())
|
||||
}
|
||||
}
|
||||
|
||||
var r = RunConfig{
|
||||
name: util.Join(ns, c.Name),
|
||||
config: containerConf,
|
||||
hostConfig: hostConfig,
|
||||
networkingConfig: &network.NetworkingConfig{EndpointsConfig: make(map[string]*network.EndpointSettings)},
|
||||
platform: nil,
|
||||
Options: RunOptions{Pull: PullImageMissing},
|
||||
name: util.If(index == lastContainerIdx, lastContainerRandomName, randomName),
|
||||
options: util.If(index != 0, append(options, "--detach"), options),
|
||||
image: util.If(index == 0 && option.DevImage != "", option.DevImage, container.Image),
|
||||
args: util.If(index == 0 && len(conf.Cmd) != 0, conf.Cmd, container.Args),
|
||||
command: util.If(index == 0 && len(conf.Entrypoint) != 0, conf.Entrypoint, container.Command),
|
||||
}
|
||||
if index == 0 {
|
||||
MergeDockerOptions(&r, option, conf, hostConfig)
|
||||
}
|
||||
configList = append(configList, &r)
|
||||
}
|
||||
|
||||
list = append(list, &r)
|
||||
if hostConfig.AutoRemove {
|
||||
for index := range configList {
|
||||
configList[index].options = append(configList[index].options, "--rm")
|
||||
}
|
||||
}
|
||||
return configList, nil
|
||||
}
|
||||
|
||||
return list
|
||||
}
|
||||
|
||||
func MergeDockerOptions(list ConfigList, options *Options, config *Config, hostConfig *HostConfig) {
|
||||
conf := list[0]
|
||||
conf.Options = options.RunOptions
|
||||
conf.Copts = *options.ContainerOptions
|
||||
|
||||
func MergeDockerOptions(conf *RunConfig, options *Options, config *Config, hostConfig *HostConfig) {
|
||||
conf.options = append(conf.options, "--pull", options.RunOptions.Pull)
|
||||
if options.RunOptions.Platform != "" {
|
||||
p, _ := platforms.Parse(options.RunOptions.Platform)
|
||||
conf.platform = &p
|
||||
conf.options = append(conf.options, "--platform", options.RunOptions.Platform)
|
||||
}
|
||||
|
||||
// container config
|
||||
var entrypoint = conf.config.Entrypoint
|
||||
var args = conf.config.Cmd
|
||||
// if special --entrypoint, then use it
|
||||
if len(config.Entrypoint) != 0 {
|
||||
entrypoint = config.Entrypoint
|
||||
args = config.Cmd
|
||||
if config.AttachStdin {
|
||||
conf.options = append(conf.options, "--attach", "STDIN")
|
||||
}
|
||||
if len(config.Cmd) != 0 {
|
||||
args = config.Cmd
|
||||
if config.AttachStdout {
|
||||
conf.options = append(conf.options, "--attach", "STDOUT")
|
||||
}
|
||||
conf.config.Entrypoint = entrypoint
|
||||
conf.config.Cmd = args
|
||||
if options.DevImage != "" {
|
||||
conf.config.Image = options.DevImage
|
||||
if config.AttachStderr {
|
||||
conf.options = append(conf.options, "--attach", "STDERR")
|
||||
}
|
||||
conf.config.Volumes = util.Merge[string, struct{}](conf.config.Volumes, config.Volumes)
|
||||
for k, v := range config.ExposedPorts {
|
||||
if _, found := conf.config.ExposedPorts[k]; !found {
|
||||
conf.config.ExposedPorts[k] = v
|
||||
if config.Tty {
|
||||
conf.options = append(conf.options, "--tty")
|
||||
}
|
||||
if config.OpenStdin {
|
||||
conf.options = append(conf.options, "--interactive")
|
||||
}
|
||||
if hostConfig.Privileged {
|
||||
conf.options = append(conf.options, "--privileged")
|
||||
}
|
||||
for _, bind := range hostConfig.Binds {
|
||||
conf.options = append(conf.options, "--volume", bind)
|
||||
}
|
||||
conf.config.StdinOnce = config.StdinOnce
|
||||
conf.config.AttachStdin = config.AttachStdin
|
||||
conf.config.AttachStdout = config.AttachStdout
|
||||
conf.config.AttachStderr = config.AttachStderr
|
||||
conf.config.Tty = config.Tty
|
||||
conf.config.OpenStdin = config.OpenStdin
|
||||
|
||||
// host config
|
||||
var hosts []string
|
||||
for _, domain := range options.ExtraRouteInfo.ExtraDomain {
|
||||
ips, err := net.LookupIP(domain)
|
||||
if err != nil {
|
||||
@@ -264,22 +265,24 @@ func MergeDockerOptions(list ConfigList, options *Options, config *Config, hostC
|
||||
}
|
||||
for _, ip := range ips {
|
||||
if ip.To4() != nil {
|
||||
hosts = append(hosts, fmt.Sprintf("%s:%s", domain, ip.To4().String()))
|
||||
conf.options = append(conf.options, "--add-host", fmt.Sprintf("%s:%s", domain, ip.To4().String()))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
conf.hostConfig.ExtraHosts = hosts
|
||||
conf.hostConfig.AutoRemove = hostConfig.AutoRemove
|
||||
conf.hostConfig.Privileged = hostConfig.Privileged
|
||||
conf.hostConfig.PublishAllPorts = hostConfig.PublishAllPorts
|
||||
conf.hostConfig.Mounts = append(conf.hostConfig.Mounts, hostConfig.Mounts...)
|
||||
conf.hostConfig.Binds = append(conf.hostConfig.Binds, hostConfig.Binds...)
|
||||
for port, bindings := range hostConfig.PortBindings {
|
||||
if v, ok := conf.hostConfig.PortBindings[port]; ok {
|
||||
conf.hostConfig.PortBindings[port] = append(v, bindings...)
|
||||
} else {
|
||||
conf.hostConfig.PortBindings[port] = bindings
|
||||
}
|
||||
|
||||
func GetEnvByKey(filepath string, key string, defaultValue string) string {
|
||||
content, err := os.ReadFile(filepath)
|
||||
if err != nil {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
for _, kv := range strings.Split(string(content), "\n") {
|
||||
env := strings.Split(kv, "=")
|
||||
if len(env) == 2 && env[0] == key {
|
||||
return env[1]
|
||||
}
|
||||
}
|
||||
return defaultValue
|
||||
}
|
||||
|
@@ -18,7 +18,7 @@ import (
|
||||
)
|
||||
|
||||
func GetDNSServiceIPFromPod(ctx context.Context, clientset *kubernetes.Clientset, conf *rest.Config, podName, namespace string) (*dns.ClientConfig, error) {
|
||||
str, err := Shell(ctx, clientset, conf, podName, config.ContainerSidecarVPN, namespace, []string{"cat", "/etc/resolv.conf"})
|
||||
str, err := Shell(ctx, clientset, conf, podName, "", namespace, []string{"cat", "/etc/resolv.conf"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -1,7 +1,14 @@
|
||||
package util
|
||||
|
||||
import "strings"
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func Join(names ...string) string {
|
||||
return strings.Join(names, "_")
|
||||
}
|
||||
|
||||
func ContainerNet(name string) string {
|
||||
return fmt.Sprintf("container:%s", name)
|
||||
}
|
||||
|
@@ -100,19 +100,27 @@ func PrintStatusInline(pod *corev1.Pod) string {
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
func GetEnv(ctx context.Context, set *kubernetes.Clientset, config *rest.Config, ns, podName string) (map[string][]string, error) {
|
||||
func GetEnv(ctx context.Context, set *kubernetes.Clientset, config *rest.Config, ns, podName string) (map[string]string, error) {
|
||||
pod, err := set.CoreV1().Pods(ns).Get(ctx, podName, v1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := map[string][]string{}
|
||||
result := map[string]string{}
|
||||
for _, c := range pod.Spec.Containers {
|
||||
env, err := Shell(ctx, set, config, podName, c.Name, ns, []string{"env"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
split := strings.Split(env, "\n")
|
||||
result[Join(ns, c.Name)] = split
|
||||
temp, err := os.CreateTemp("", "*.env")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_, err = temp.WriteString(env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
_ = temp.Close()
|
||||
result[Join(ns, c.Name)] = temp.Name()
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
@@ -7,20 +7,16 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/client"
|
||||
"github.com/docker/docker/pkg/archive"
|
||||
"github.com/moby/term"
|
||||
pkgerr "github.com/pkg/errors"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/api/core/v1"
|
||||
v12 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/cli-runtime/pkg/genericiooptions"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/kubectl/pkg/cmd/util"
|
||||
|
||||
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
|
||||
@@ -28,13 +24,8 @@ import (
|
||||
)
|
||||
|
||||
// GetVolume key format: [container name]-[volume mount name]
|
||||
func GetVolume(ctx context.Context, f util.Factory, ns, podName string) (map[string][]mount.Mount, error) {
|
||||
clientSet, err := f.KubernetesClientSet()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var pod *v1.Pod
|
||||
pod, err = clientSet.CoreV1().Pods(ns).Get(ctx, podName, v12.GetOptions{})
|
||||
func GetVolume(ctx context.Context, clientset *kubernetes.Clientset, f util.Factory, ns, podName string) (map[string][]mount.Mount, error) {
|
||||
pod, err := clientset.CoreV1().Pods(ns).Get(ctx, podName, v12.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -89,85 +80,10 @@ func RemoveDir(volume map[string][]mount.Mount) error {
|
||||
for _, mounts := range volume {
|
||||
for _, m := range mounts {
|
||||
err := os.RemoveAll(m.Source)
|
||||
if err != nil {
|
||||
if err != nil && !pkgerr.Is(err, os.ErrNotExist) {
|
||||
errs = append(errs, fmt.Errorf("failed to delete dir %s: %v", m.Source, err))
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func CopyVolumeIntoContainer(ctx context.Context, volume []mount.Mount, cli *client.Client, id string) error {
|
||||
// copy volume into container
|
||||
for _, v := range volume {
|
||||
target, err := CreateFolder(ctx, cli, id, v.Source, v.Target)
|
||||
if err != nil {
|
||||
log.Errorf("Create folder %s previoully failed: %v", target, err)
|
||||
}
|
||||
log.Debugf("From %s to %s", v.Source, v.Target)
|
||||
srcInfo, err := archive.CopyInfoSourcePath(v.Source, true)
|
||||
if err != nil {
|
||||
log.Errorf("Copy info source path, err: %v", err)
|
||||
return err
|
||||
}
|
||||
srcArchive, err := archive.TarResource(srcInfo)
|
||||
if err != nil {
|
||||
log.Errorf("Tar resource failed, err: %v", err)
|
||||
return err
|
||||
}
|
||||
dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, archive.CopyInfo{Path: v.Target})
|
||||
if err != nil {
|
||||
log.Errorf("Failed to prepare archive copy, err: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = cli.CopyToContainer(ctx, id, dstDir, preparedArchive, types.CopyToContainerOptions{
|
||||
AllowOverwriteDirWithFile: true,
|
||||
CopyUIDGID: true,
|
||||
})
|
||||
if err != nil {
|
||||
log.Infof("Failed to copy %s to container %s:%s, err: %v", v.Source, id, v.Target, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func CreateFolder(ctx context.Context, cli *client.Client, id string, src string, target string) (string, error) {
|
||||
lstat, err := os.Lstat(src)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if !lstat.IsDir() {
|
||||
target = filepath.Dir(target)
|
||||
}
|
||||
var create types.IDResponse
|
||||
create, err = cli.ContainerExecCreate(ctx, id, types.ExecConfig{
|
||||
AttachStdin: true,
|
||||
AttachStderr: true,
|
||||
AttachStdout: true,
|
||||
Cmd: []string{"mkdir", "-p", target},
|
||||
})
|
||||
if err != nil {
|
||||
log.Errorf("Create folder %s previoully failed, err: %v", target, err)
|
||||
return "", err
|
||||
}
|
||||
err = cli.ContainerExecStart(ctx, create.ID, types.ExecStartCheck{})
|
||||
if err != nil {
|
||||
log.Errorf("Create folder %s previoully failed, err: %v", target, err)
|
||||
return "", err
|
||||
}
|
||||
log.Infof("Wait create folder %s in container %s to be done...", target, id)
|
||||
chanStop := make(chan struct{})
|
||||
wait.Until(func() {
|
||||
inspect, err := cli.ContainerExecInspect(ctx, create.ID)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to inspect container %s: %v", id, err)
|
||||
return
|
||||
}
|
||||
if !inspect.Running {
|
||||
close(chanStop)
|
||||
}
|
||||
}, time.Second, chanStop)
|
||||
return target, nil
|
||||
}
|
||||
|
Reference in New Issue
Block a user