refactor: divide log to session and backend (#487)

* refactor: divide log to session and backend
This commit is contained in:
naison
2025-03-23 13:59:10 +08:00
committed by GitHub
parent a5622b9439
commit b46f7a9877
107 changed files with 873 additions and 871 deletions

View File

@@ -14,10 +14,10 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/stdcopy"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
)
// Pull constants
@@ -136,17 +136,17 @@ func RunContainer(ctx context.Context, runConfig *RunConfig) error {
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
log.Debugf("Run container with cmd: %v", cmd.Args)
plog.G(ctx).Debugf("Run container with cmd: %v", cmd.Args)
err := cmd.Start()
if err != nil {
log.Errorf("Failed to run container with cmd: %v: %v", cmd.Args, err)
plog.G(ctx).Errorf("Failed to run container with cmd: %v: %v", cmd.Args, err)
return err
}
return cmd.Wait()
}
func WaitDockerContainerRunning(ctx context.Context, name string) error {
log.Infof("Wait container %s to be running...", name)
plog.G(ctx).Infof("Wait container %s to be running...", name)
for ctx.Err() == nil {
time.Sleep(time.Second * 1)
@@ -163,14 +163,14 @@ func WaitDockerContainerRunning(ctx context.Context, name string) error {
}
}
log.Infof("Container %s is running now", name)
plog.G(ctx).Infof("Container %s is running now", name)
return nil
}
func ContainerInspect(ctx context.Context, name string) (types.ContainerJSON, error) {
output, err := exec.CommandContext(ctx, "docker", "inspect", name).CombinedOutput()
if err != nil {
log.Errorf("Failed to wait container to be ready output: %s: %v", string(output), err)
plog.G(ctx).Errorf("Failed to wait container to be ready output: %s: %v", string(output), err)
_ = RunLogsSinceNow(name, false)
return types.ContainerJSON{}, err
}
@@ -192,7 +192,7 @@ func NetworkInspect(ctx context.Context, name string) (types.NetworkResource, er
//cli.NetworkInspect()
output, err := exec.CommandContext(ctx, "docker", "network", "inspect", name).CombinedOutput()
if err != nil {
log.Errorf("Failed to wait container to be ready: %v", err)
plog.G(ctx).Errorf("Failed to wait container to be ready: %v", err)
_ = RunLogsSinceNow(name, false)
return types.NetworkResource{}, err
}

View File

@@ -25,6 +25,7 @@ import (
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon"
"github.com/wencaiwulue/kubevpn/v2/pkg/daemon/rpc"
"github.com/wencaiwulue/kubevpn/v2/pkg/handler"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
pkgssh "github.com/wencaiwulue/kubevpn/v2/pkg/ssh"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -64,13 +65,13 @@ type Options struct {
func (option *Options) Main(ctx context.Context, sshConfig *pkgssh.SshConfig, config *Config, hostConfig *HostConfig, imagePullSecretName string) error {
mode := typescontainer.NetworkMode(option.ContainerOptions.netMode.NetworkMode())
if mode.IsContainer() {
log.Infof("Network mode container is %s", mode.ConnectedContainer())
plog.G(ctx).Infof("Network mode container is %s", mode.ConnectedContainer())
} else if mode.IsDefault() && util.RunningInContainer() {
hostname, err := os.Hostname()
if err != nil {
return err
}
log.Infof("Hostname is %s", hostname)
plog.G(ctx).Infof("Hostname is %s", hostname)
err = option.ContainerOptions.netMode.Set(fmt.Sprintf("container:%s", hostname))
if err != nil {
return err
@@ -80,7 +81,7 @@ func (option *Options) Main(ctx context.Context, sshConfig *pkgssh.SshConfig, co
// Connect to cluster, in container or host
err := option.Connect(ctx, sshConfig, imagePullSecretName, hostConfig.PortBindings)
if err != nil {
log.Errorf("Connect to cluster failed, err: %v", err)
plog.G(ctx).Errorf("Connect to cluster failed, err: %v", err)
return err
}
@@ -103,10 +104,6 @@ func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig,
option.ExtraRouteInfo.ExtraCIDR = append(option.ExtraRouteInfo.ExtraCIDR, ip.String())
}
}
logLevel := log.InfoLevel
if config.Debug {
logLevel = log.DebugLevel
}
// not needs to ssh jump in daemon, because dev mode will hang up until user exit,
// so just ssh jump in client is enough
req := &rpc.ConnectRequest{
@@ -119,7 +116,7 @@ func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig,
OriginKubeconfigPath: util.GetKubeConfigPath(option.factory),
Image: config.Image,
ImagePullSecretName: imagePullSecretName,
Level: int32(logLevel),
Level: int32(util.If(config.Debug, log.DebugLevel, log.InfoLevel)),
SshJump: sshConfig.ToRPC(),
}
option.AddRollbackFunc(func() error {
@@ -137,7 +134,7 @@ func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig,
var resp rpc.Daemon_ConnectClient
resp, err = daemonCli.Proxy(ctx, req)
if err != nil {
log.Errorf("Connect to cluster error: %s", err.Error())
plog.G(ctx).Errorf("Connect to cluster error: %s", err.Error())
return err
}
err = util.PrintGRPCStream[rpc.CloneResponse](resp)
@@ -149,7 +146,7 @@ func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig,
if err != nil {
return err
}
log.Infof("Starting connect to cluster in container")
plog.G(ctx).Infof("Starting connect to cluster in container")
err = WaitDockerContainerRunning(ctx, *name)
if err != nil {
return err
@@ -168,7 +165,7 @@ func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig,
}
return err
}
log.Infof("Connected to cluster in container")
plog.G(ctx).Infof("Connected to cluster in container")
err = option.ContainerOptions.netMode.Set(fmt.Sprintf("container:%s", *name))
return err
}
@@ -179,7 +176,7 @@ func (option *Options) Connect(ctx context.Context, sshConfig *pkgssh.SshConfig,
func (option *Options) Dev(ctx context.Context, config *Config, hostConfig *HostConfig) error {
templateSpec, err := option.GetPodTemplateSpec()
if err != nil {
log.Errorf("Failed to get unstructured object error: %v", err)
plog.G(ctx).Errorf("Failed to get unstructured object error: %v", err)
return err
}
@@ -187,13 +184,13 @@ func (option *Options) Dev(ctx context.Context, config *Config, hostConfig *Host
var list []v1.Pod
list, err = util.GetRunningPodList(ctx, option.clientset, option.Namespace, label)
if err != nil {
log.Errorf("Failed to get first running pod from k8s: %v", err)
plog.G(ctx).Errorf("Failed to get first running pod from k8s: %v", err)
return err
}
env, err := util.GetEnv(ctx, option.clientset, option.config, option.Namespace, list[0].Name)
if err != nil {
log.Errorf("Failed to get env from k8s: %v", err)
plog.G(ctx).Errorf("Failed to get env from k8s: %v", err)
return err
}
option.AddRollbackFunc(func() error {
@@ -204,7 +201,7 @@ func (option *Options) Dev(ctx context.Context, config *Config, hostConfig *Host
})
volume, err := util.GetVolume(ctx, option.clientset, option.factory, option.Namespace, list[0].Name)
if err != nil {
log.Errorf("Failed to get volume from k8s: %v", err)
plog.G(ctx).Errorf("Failed to get volume from k8s: %v", err)
return err
}
option.AddRollbackFunc(func() error {
@@ -212,7 +209,7 @@ func (option *Options) Dev(ctx context.Context, config *Config, hostConfig *Host
})
dns, err := util.GetDNS(ctx, option.clientset, option.config, option.Namespace, list[0].Name)
if err != nil {
log.Errorf("Failed to get DNS from k8s: %v", err)
plog.G(ctx).Errorf("Failed to get DNS from k8s: %v", err)
return err
}
configList, err := option.ConvertPodToContainerConfigList(ctx, *templateSpec, config, hostConfig, env, volume, dns)
@@ -319,7 +316,7 @@ func (option *Options) GetRollbackFuncList() []func() error {
func (option *Options) GetExposePort(portBinds nat.PortMap) (nat.PortMap, nat.PortSet, error) {
templateSpec, err := option.GetPodTemplateSpec()
if err != nil {
log.Errorf("Failed to get unstructured object error: %v", err)
plog.G(context.Background()).Errorf("Failed to get unstructured object error: %v", err)
return nil, nil, err
}

View File

@@ -13,12 +13,12 @@ import (
"github.com/docker/go-connections/nat"
"github.com/google/uuid"
"github.com/miekg/dns"
log "github.com/sirupsen/logrus"
corev1 "k8s.io/api/core/v1"
"k8s.io/utils/ptr"
"github.com/wencaiwulue/kubevpn/v2/pkg/config"
"github.com/wencaiwulue/kubevpn/v2/pkg/inject"
plog "github.com/wencaiwulue/kubevpn/v2/pkg/log"
"github.com/wencaiwulue/kubevpn/v2/pkg/util"
)
@@ -42,12 +42,12 @@ func (l ConfigList) Remove(ctx context.Context, userAnotherContainerNet bool) er
if !userAnotherContainerNet && index == len(l)-1 {
output, err := NetworkDisconnect(ctx, runConfig.name)
if err != nil {
log.Warnf("Failed to disconnect container network: %s: %v", string(output), err)
plog.G(ctx).Warnf("Failed to disconnect container network: %s: %v", string(output), err)
}
}
output, err := ContainerRemove(ctx, runConfig.name)
if err != nil {
log.Warnf("Failed to remove container: %s: %v", string(output), err)
plog.G(ctx).Warnf("Failed to remove container: %s: %v", string(output), err)
}
}
name := config.ConfigMapPodTrafficManager
@@ -192,17 +192,17 @@ func (option *Options) ConvertPodToContainerConfigList(
}
_, err = CreateNetwork(ctx, config.ConfigMapPodTrafficManager)
if err != nil {
log.Errorf("Failed to create network: %v", err)
plog.G(ctx).Errorf("Failed to create network: %v", err)
return nil, err
}
log.Infof("Create docker network %s", config.ConfigMapPodTrafficManager)
plog.G(ctx).Infof("Create docker network %s", config.ConfigMapPodTrafficManager)
options = append(options, "--network", config.ConfigMapPodTrafficManager)
} else { // set 0 to last-1 container to use last container network
options = append(options, "--network", util.ContainerNet(lastContainerRandomName))
options = append(options, "--pid", util.ContainerNet(lastContainerRandomName))
}
} else { // set all containers to use network mode
log.Infof("Network mode is %s", option.ContainerOptions.netMode.NetworkMode())
plog.G(ctx).Infof("Network mode is %s", option.ContainerOptions.netMode.NetworkMode())
options = append(options, "--network", option.ContainerOptions.netMode.NetworkMode())
if typescontainer.NetworkMode(option.ContainerOptions.netMode.NetworkMode()).IsContainer() {
options = append(options, "--pid", option.ContainerOptions.netMode.NetworkMode())