feat: dev with ssh works well

This commit is contained in:
fengcaiwen
2023-09-23 14:16:10 +08:00
committed by naison
parent 6b1ea53cd6
commit e801209349
9 changed files with 60 additions and 38 deletions

View File

@@ -19,6 +19,7 @@ import (
miekgdns "github.com/miekg/dns"
"github.com/moby/term"
v12 "github.com/opencontainers/image-spec/specs-go/v1"
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
"k8s.io/api/core/v1"
v13 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -243,7 +244,7 @@ func GetVolume(ctx context.Context, f util.Factory, ns, pod string) (map[string]
Source: join,
Target: volumeMount.MountPath,
})
fmt.Printf("%s:%s\n", join, volumeMount.MountPath)
log.Infof("%s:%s", join, volumeMount.MountPath)
}
result[c.Name] = m
}

View File

@@ -84,6 +84,7 @@ func (d *Options) Main(ctx context.Context, tempContainerConfig *containerConfig
rand.Seed(time.Now().UnixNano())
object, err := util.GetUnstructuredObject(d.Factory, d.Namespace, d.Workload)
if err != nil {
log.Errorf("get unstructured object error: %v", err)
return err
}
@@ -112,6 +113,7 @@ func (d *Options) Main(ctx context.Context, tempContainerConfig *containerConfig
lab := labels.SelectorFromSet(templateSpec.Labels).String()
firstPod, _, err := polymorphichelpers.GetFirstPod(set.CoreV1(), d.Namespace, lab, time.Second*60, sortBy)
if err != nil {
log.Errorf("get first running pod from k8s: %v", err)
return err
}
@@ -141,13 +143,14 @@ func (d *Options) Main(ctx context.Context, tempContainerConfig *containerConfig
return err
}
// check resource
var outOfMemory bool
outOfMemory, _ = checkOutOfMemory(templateSpec, d.Cli)
if outOfMemory {
var oom bool
oom, _ = checkOutOfMemory(templateSpec, d.Cli)
if oom {
return fmt.Errorf("your pod resource request is bigger than docker-desktop resource, please adjust your docker-desktop resource")
}
mode := container.NetworkMode(d.Copts.netMode.NetworkMode())
if len(d.Copts.netMode.Value()) != 0 {
log.Infof("network mode is %s", d.Copts.netMode.NetworkMode())
for _, runConfig := range runConfigList[:] {
// remove expose port
runConfig.config.ExposedPorts = nil
@@ -171,6 +174,7 @@ func (d *Options) Main(ctx context.Context, tempContainerConfig *containerConfig
log.Errorf("create network for %s: %v", d.Workload, err)
return err
}
log.Infof("create docker network %s", networkID)
runConfigList[len(runConfigList)-1].networkingConfig.EndpointsConfig[runConfigList[len(runConfigList)-1].containerName] = &network.EndpointSettings{
NetworkID: networkID,
@@ -250,7 +254,6 @@ func (l ConfigList) Run(ctx context.Context, volume map[string][]mount.Mount, cl
if index == 0 {
_, err := runFirst(ctx, runConfig, cli, dockerCli)
if err != nil {
log.Errorf("run main container container failed: %v", err)
return err
}
} else {
@@ -375,9 +378,11 @@ func DoDev(ctx context.Context, devOption *Options, conf *util.SshConfig, flags
}
mode := container.NetworkMode(devOption.Copts.netMode.NetworkMode())
if mode.IsContainer() {
log.Infof("network mode container is %s", mode.ConnectedContainer())
var inspect types.ContainerJSON
inspect, err = cli.ContainerInspect(ctx, mode.ConnectedContainer())
if err != nil {
log.Errorf("can not inspect container %s, err: %v", mode.ConnectedContainer(), err)
return err
}
if inspect.State == nil {
@@ -386,12 +391,13 @@ func DoDev(ctx context.Context, devOption *Options, conf *util.SshConfig, flags
if !inspect.State.Running {
return fmt.Errorf("container %s status is %s, expect is running, please make sure your outer docker name is correct", mode.ConnectedContainer(), inspect.State.Status)
}
log.Infof("container %s is running", mode.ConnectedContainer())
} else if mode.IsDefault() && util.RunningInContainer() {
var hostname string
if hostname, err = os.Hostname(); err != nil {
return err
}
log.Infof("hostname %s", hostname)
log.Infof("hostname is %s", hostname)
err = devOption.Copts.netMode.Set(fmt.Sprintf("container:%s", hostname))
if err != nil {
return err
@@ -542,6 +548,7 @@ func (d *Options) doConnect(ctx context.Context, f cmdutil.Factory, conf *util.S
cancelCtx, cancelFunc := context.WithCancel(ctx)
defer cancelFunc()
var id string
log.Infof("starting container connect to cluster")
id, err = run(cancelCtx, connectContainer, d.Cli, d.DockerCli)
if err != nil {
return
@@ -567,6 +574,7 @@ func (d *Options) doConnect(ctx context.Context, f cmdutil.Factory, conf *util.S
}
return
}
log.Infof("container connect to cluster success")
err = d.Copts.netMode.Set(fmt.Sprintf("container:%s", id))
return
default:

View File

@@ -7,7 +7,6 @@ import (
"math/rand"
"strconv"
"strings"
"sync"
"time"
"github.com/docker/cli/cli/command"
@@ -38,6 +37,7 @@ func run(ctx context.Context, runConfig *RunConfig, cli *client.Client, c *comma
var img types.ImageInspect
img, _, err = cli.ImageInspectWithRaw(ctx, config.Image)
if errdefs.IsNotFound(err) {
log.Infof("needs to pull image %s", config.Image)
needPull = true
err = nil
} else if err != nil {
@@ -122,7 +122,7 @@ func run(ctx context.Context, runConfig *RunConfig, cli *client.Client, c *comma
str = fmt.Sprintf("Container %s is running on port %s now", name, strings.Join(list, " "))
}
if !empty {
log.Infoln(str)
log.Info(str)
} else {
log.Infof("Container %s is running now", name)
}
@@ -131,6 +131,12 @@ func run(ctx context.Context, runConfig *RunConfig, cli *client.Client, c *comma
func runFirst(ctx context.Context, runConfig *RunConfig, cli *apiclient.Client, dockerCli *command.DockerCli) (id string, err error) {
rand.New(rand.NewSource(time.Now().UnixNano()))
// default add -it
runConfig.Options.Detach = true
runConfig.config.AttachStdin = true
runConfig.config.AttachStdout = true
runConfig.config.AttachStderr = true
runConfig.config.Tty = true
defer func() {
if err != nil && runConfig.hostConfig.AutoRemove {
@@ -245,7 +251,7 @@ func runFirst(ctx context.Context, runConfig *RunConfig, cli *apiclient.Client,
if status != 0 {
return id, errors.New(strconv.Itoa(status))
}
log.Infof("Wait container %s to be running...", runConfig.containerName)
/*log.Infof("Wait container %s to be running...", runConfig.containerName)
chanStop := make(chan struct{})
var inspect types.ContainerJSON
var once = &sync.Once{}
@@ -271,32 +277,31 @@ func runFirst(ctx context.Context, runConfig *RunConfig, cli *apiclient.Client,
}, time.Second, chanStop)
if err != nil {
log.Errorf("wait container to be ready: %v", err)
err = fmt.Errorf("failed to wait container to be ready: %v", err)
return
}
}*/
// print port mapping to host
var empty = true
var str string
if inspect.NetworkSettings != nil && inspect.NetworkSettings.Ports != nil {
var list []string
for port, bindings := range inspect.NetworkSettings.Ports {
var p []string
for _, binding := range bindings {
if binding.HostPort != "" {
p = append(p, binding.HostPort)
empty = false
}
}
list = append(list, fmt.Sprintf("%s:%s", port, strings.Join(p, ",")))
}
str = fmt.Sprintf("Container %s is running on port %s now", runConfig.containerName, strings.Join(list, " "))
}
if !empty {
log.Infoln(str)
} else {
log.Infof("Container %s is running now", runConfig.containerName)
}
//var empty = true
//var str string
//if inspect.NetworkSettings != nil && inspect.NetworkSettings.Ports != nil {
// var list []string
// for port, bindings := range inspect.NetworkSettings.Ports {
// var p []string
// for _, binding := range bindings {
// if binding.HostPort != "" {
// p = append(p, binding.HostPort)
// empty = false
// }
// }
// list = append(list, fmt.Sprintf("%s:%s", port, strings.Join(p, ",")))
// }
// str = fmt.Sprintf("Container %s is running on port %s now", runConfig.containerName, strings.Join(list, " "))
//}
//if !empty {
// log.Infoln(str)
//} else {
// log.Infof("Container %s is running now", runConfig.containerName)
//}
return
}

View File

@@ -56,7 +56,9 @@ func waitExitOrRemoved(ctx context.Context, dockerCli command.Cli, containerID s
statusC <- int(result.StatusCode)
}
case err := <-errC:
logrus.Errorf("error waiting for container: %v", err)
if err != nil && err.Error() != "" {
logrus.Errorf("error waiting for container: %v", err)
}
statusC <- 125
}
}()