diff --git a/Makefile b/Makefile index 72d5a8ef..7f177fd4 100644 --- a/Makefile +++ b/Makefile @@ -93,7 +93,7 @@ container-local: kubevpn-linux-amd64 .PHONY: container-test container-test: kubevpn-linux-amd64 - docker buildx build --platform linux/amd64,linux/arm64 -t docker.io/naison/kubevpn:test -f $(BUILD_DIR)/test.Dockerfile --push . + docker buildx build --platform linux/amd64,linux/arm64 -t docker.io/naison/kubevpn:test2309172253 -f $(BUILD_DIR)/test.Dockerfile --push . .PHONY: version version: diff --git a/cmd/kubevpn/cmds/clone.go b/cmd/kubevpn/cmds/clone.go index db3ff981..dc4d91c7 100644 --- a/cmd/kubevpn/cmds/clone.go +++ b/cmd/kubevpn/cmds/clone.go @@ -119,7 +119,7 @@ func CmdClone(f cmdutil.Factory) *cobra.Command { } else if err != nil { return err } - log.Print(recv.GetMessage()) + fmt.Fprint(os.Stdout, recv.GetMessage()) } util.Print(os.Stdout, "Now clone workloads running successfully on other cluster, enjoy it :)") return nil diff --git a/cmd/kubevpn/cmds/connect.go b/cmd/kubevpn/cmds/connect.go index a794611a..8c92adb6 100644 --- a/cmd/kubevpn/cmds/connect.go +++ b/cmd/kubevpn/cmds/connect.go @@ -80,7 +80,7 @@ func CmdConnect(f cmdutil.Factory) *cobra.Command { } else if err != nil { return err } - log.Print(recv.GetMessage()) + fmt.Fprint(os.Stdout, recv.GetMessage()) } util.Print(os.Stdout, "Now you can access resources in the kubernetes cluster, enjoy it :)") // hangup diff --git a/cmd/kubevpn/cmds/dev.go b/cmd/kubevpn/cmds/dev.go index 0b3d7641..b95033b8 100644 --- a/cmd/kubevpn/cmds/dev.go +++ b/cmd/kubevpn/cmds/dev.go @@ -72,6 +72,7 @@ Startup your kubernetes workloads in local Docker container with same volume态e Args: dockercli.RequiresMinArgs(1), DisableFlagsInUseLine: true, PreRunE: func(cmd *cobra.Command, args []string) error { + util.InitLogger(false) // not support temporally if devOptions.Engine == config.EngineGvisor { return fmt.Errorf(`not support type engine: %s, support ("%s"|"%s")`, config.EngineGvisor, config.EngineMix, config.EngineRaw) diff --git a/cmd/kubevpn/cmds/disconnect.go b/cmd/kubevpn/cmds/disconnect.go index 8c69083c..ae4961e7 100644 --- a/cmd/kubevpn/cmds/disconnect.go +++ b/cmd/kubevpn/cmds/disconnect.go @@ -4,7 +4,6 @@ import ( "fmt" "io" "os" - "time" "github.com/spf13/cobra" "google.golang.org/grpc/codes" @@ -24,34 +23,29 @@ func CmdDisconnect(f cmdutil.Factory) *cobra.Command { Long: templates.LongDesc(i18n.T(`Disconnect from kubernetes cluster network`)), Example: templates.Examples(i18n.T(``)), PreRunE: func(cmd *cobra.Command, args []string) (err error) { - t := time.Now() err = daemon.StartupDaemon(cmd.Context()) - fmt.Printf("exec prerun use %s\n", time.Now().Sub(t).String()) return err }, RunE: func(cmd *cobra.Command, args []string) error { - now := time.Now() client, err := daemon.GetClient(false).Disconnect( cmd.Context(), &rpc.DisconnectRequest{}, ) - fmt.Printf("call api disconnect use %s\n", time.Now().Sub(now).String()) - if err != nil { - return err - } var resp *rpc.DisconnectResponse for { resp, err = client.Recv() if err == io.EOF { - return nil + break } else if err == nil { fmt.Fprint(os.Stdout, resp.Message) } else if code := status.Code(err); code == codes.DeadlineExceeded || code == codes.Canceled { - return nil + break } else { return err } } + fmt.Fprint(os.Stdout, "disconnect successfully") + return nil }, } return cmd diff --git a/cmd/kubevpn/cmds/proxy.go b/cmd/kubevpn/cmds/proxy.go index 7396ca1d..02acf8c8 100644 --- a/cmd/kubevpn/cmds/proxy.go +++ b/cmd/kubevpn/cmds/proxy.go @@ -5,7 +5,6 @@ import ( "fmt" "io" "os" - "time" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" @@ -122,14 +121,9 @@ func CmdProxy(f cmdutil.Factory) *cobra.Command { // leave from cluster resources <-cmd.Context().Done() - now := time.Now() stream, err := cli.Leave(context.Background(), &rpc.LeaveRequest{ Workloads: args, }) - fmt.Printf("call api leave use %s\n", time.Now().Sub(now).String()) - if err != nil { - return err - } var resp *rpc.LeaveResponse for { resp, err = stream.Recv() diff --git a/cmd/kubevpn/cmds/quit.go b/cmd/kubevpn/cmds/quit.go index 6d5f2c19..42009d5c 100644 --- a/cmd/kubevpn/cmds/quit.go +++ b/cmd/kubevpn/cmds/quit.go @@ -29,6 +29,7 @@ func CmdQuit(f cmdutil.Factory) *cobra.Command { RunE: func(cmd *cobra.Command, args []string) error { _ = quit(cmd.Context(), true) _ = quit(cmd.Context(), false) + fmt.Fprint(os.Stdout, "quit success") return nil }, } diff --git a/cmd/kubevpn/cmds/reset.go b/cmd/kubevpn/cmds/reset.go index f720515c..a3cee358 100644 --- a/cmd/kubevpn/cmds/reset.go +++ b/cmd/kubevpn/cmds/reset.go @@ -50,7 +50,7 @@ func CmdReset(factory cmdutil.Factory) *cobra.Command { if err != nil { log.Fatal(err) } - fmt.Fprint(os.Stdout, "Done") + fmt.Fprint(os.Stdout, "done") }, } diff --git a/cmd/kubevpn/cmds/serve.go b/cmd/kubevpn/cmds/serve.go index 54170292..f25c6b8e 100644 --- a/cmd/kubevpn/cmds/serve.go +++ b/cmd/kubevpn/cmds/serve.go @@ -38,7 +38,7 @@ func CmdServe(_ cmdutil.Factory) *cobra.Command { defer func() { err := handler.ReleaseIPIfNeeded() if err != nil { - log.Error(err) + log.Errorf("release ip failed: %v", err) } }() servers, err := handler.Parse(*route) diff --git a/cmd/kubevpn/cmds/upgrade.go b/cmd/kubevpn/cmds/upgrade.go index 8e811f81..5f0a85b6 100644 --- a/cmd/kubevpn/cmds/upgrade.go +++ b/cmd/kubevpn/cmds/upgrade.go @@ -34,7 +34,7 @@ func CmdUpgrade(_ cmdutil.Factory) *cobra.Command { if err != nil { log.Fatal(err) } - fmt.Fprint(os.Stdout, "Done") + fmt.Fprint(os.Stdout, "done") }, } return cmd diff --git a/pkg/core/tunendpoint.go b/pkg/core/tunendpoint.go index 01f2a41b..196b2305 100755 --- a/pkg/core/tunendpoint.go +++ b/pkg/core/tunendpoint.go @@ -104,7 +104,12 @@ func (e *tunEndpoint) Attach(dispatcher stack.NetworkDispatcher) { bytes := config.LPool.Get().([]byte)[:] read, err := e.tun.Read(bytes[:]) if err != nil { - log.Warningln(err) + // if context is still going + if e.ctx.Err() == nil { + log.Errorf("[TUN]: read from tun failed: %s", err.Error()) + } else { + log.Info("tun device closed") + } return } if read == 0 { @@ -122,7 +127,7 @@ func (e *tunEndpoint) Attach(dispatcher stack.NetworkDispatcher) { protocol = header.IPv4ProtocolNumber ipHeader, err := ipv4.ParseHeader(bytes[:read]) if err != nil { - log.Error(err) + log.Errorf("parse ipv4 header failed: %s", err.Error()) continue } ipProtocol = ipHeader.Protocol @@ -132,7 +137,7 @@ func (e *tunEndpoint) Attach(dispatcher stack.NetworkDispatcher) { protocol = header.IPv6ProtocolNumber ipHeader, err := ipv6.ParseHeader(bytes[:read]) if err != nil { - log.Error(err) + log.Errorf("parse ipv6 header failed: %s", err.Error()) continue } ipProtocol = ipHeader.NextHeader diff --git a/pkg/core/tunhandler.go b/pkg/core/tunhandler.go index 8ac84d93..c9a99896 100644 --- a/pkg/core/tunhandler.go +++ b/pkg/core/tunhandler.go @@ -260,14 +260,14 @@ func heartbeats(in chan<- *DataElem) { if bytes == nil { bytes, err = genICMPPacket(srcIPv4, config.RouterIP) if err != nil { - log.Error(err) + log.Errorf("generate ipv4 packet error: %s", err.Error()) continue } } if bytes6 == nil { bytes6, err = genICMPPacketIPv6(srcIPv6, config.RouterIP6) if err != nil { - log.Error(err) + log.Errorf("generate ipv6 packet error: %s", err.Error()) continue } } @@ -353,7 +353,7 @@ func (d *Device) Start(ctx context.Context) { select { case err := <-d.chExit: - log.Error(err) + log.Errorf("device exit: %s", err.Error()) return case <-ctx.Done(): return diff --git a/pkg/core/tunhandlerclient.go b/pkg/core/tunhandlerclient.go index 5d70c401..30b0db7f 100644 --- a/pkg/core/tunhandlerclient.go +++ b/pkg/core/tunhandlerclient.go @@ -133,7 +133,7 @@ func (d *ClientDevice) Start(ctx context.Context) { select { case err := <-d.chExit: - log.Error(err) + log.Errorf("[tun-client]: %v", err) return case <-ctx.Done(): return diff --git a/pkg/cp/cp.go b/pkg/cp/cp.go index f41d3517..742343da 100644 --- a/pkg/cp/cp.go +++ b/pkg/cp/cp.go @@ -183,7 +183,7 @@ func (o *CopyOptions) copyToPod(src, dest fileSpec, options *exec.ExecOptions) e go func(src localPath, dest remotePath, writer io.WriteCloser) { defer writer.Close() if err := makeTar(src, dest, writer); err != nil { - log.Error(err) + log.Errorf("Error making tar: %v", err) } }(srcFile, destFile, writer) var cmdArr []string @@ -266,7 +266,7 @@ func (t *TarPipe) initReadFrom(n uint64) { go func() { defer t.outStream.Close() if err := t.o.execute(options); err != nil { - log.Error(err) + log.Errorf("Error executing command: %v", err) } }() } diff --git a/pkg/daemon/action/clone.go b/pkg/daemon/action/clone.go index e5687618..b3ed611b 100644 --- a/pkg/daemon/action/clone.go +++ b/pkg/daemon/action/clone.go @@ -87,11 +87,13 @@ func (svr *Server) Clone(req *rpc.CloneRequest, resp rpc.Daemon_CloneServer) err f := InitFactory(req.KubeconfigBytes, req.Namespace) err = options.InitClient(f) if err != nil { + log.Errorf("init client failed: %v", err) return err } config.Image = req.Image err = options.DoClone(resp.Context()) if err != nil { + log.Errorf("clone workloads failed: %v", err) return err } svr.clone = options @@ -112,13 +114,3 @@ func (r *cloneWarp) Write(p []byte) (n int, err error) { func newCloneWarp(server rpc.Daemon_CloneServer) io.Writer { return &cloneWarp{server: server} } - -//type daemonConnectServer struct { -// out io.Writer -// grpc.ServerStream -//} -// -//func (d *daemonConnectServer) Send(response *rpc.ConnectResponse) error { -// _, err := d.out.Write([]byte(response.Message)) -// return err -//} diff --git a/pkg/daemon/action/connect.go b/pkg/daemon/action/connect.go index 55fd872f..a6d9d1af 100644 --- a/pkg/daemon/action/connect.go +++ b/pkg/daemon/action/connect.go @@ -96,7 +96,7 @@ func (svr *Server) Connect(req *rpc.ConnectRequest, resp rpc.Daemon_ConnectServe config.Image = req.Image err = svr.connect.DoConnect(sshCtx) if err != nil { - log.Error(err) + log.Infof("do connect error: %v", err) svr.connect.Cleanup() return err } @@ -150,7 +150,7 @@ func (svr *Server) redirectToSudoDaemon(req *rpc.ConnectRequest, resp rpc.Daemon ) if err == nil && isSameCluster && svr.connect.Equal(connect) { // same cluster, do nothing - log.Debugf("already connect to cluster") + log.Infof("already connect to cluster") return nil } } diff --git a/pkg/daemon/action/disconnect.go b/pkg/daemon/action/disconnect.go index 8ccbf2ba..b7c1e8c6 100644 --- a/pkg/daemon/action/disconnect.go +++ b/pkg/daemon/action/disconnect.go @@ -24,9 +24,7 @@ func (svr *Server) Disconnect(req *rpc.DisconnectRequest, resp rpc.Daemon_Discon for { recv, err = connResp.Recv() if err == io.EOF { - svr.t = time.Time{} - svr.connect = nil - return nil + break } else if err != nil { return err } diff --git a/pkg/daemon/action/leave.go b/pkg/daemon/action/leave.go index 18cc5934..db8f04f4 100644 --- a/pkg/daemon/action/leave.go +++ b/pkg/daemon/action/leave.go @@ -19,6 +19,7 @@ func (svr *Server) Leave(req *rpc.LeaveRequest, resp rpc.Daemon_LeaveServer) err log.SetOutput(out) log.SetLevel(log.InfoLevel) if svr.connect == nil { + log.Infof("not proxy any resource in cluster") return fmt.Errorf("not proxy any resource in cluster") } @@ -27,10 +28,13 @@ func (svr *Server) Leave(req *rpc.LeaveRequest, resp rpc.Daemon_LeaveServer) err maps := svr.connect.GetClientset().CoreV1().ConfigMaps(namespace) for _, workload := range req.GetWorkloads() { // add rollback func to remove envoy config + log.Infof("leave workload %s", workload) err := handler.UnPatchContainer(factory, maps, namespace, workload, svr.connect.GetLocalTunIPv4()) if err != nil { - log.Error(err) + log.Errorf("leave workload %s failed: %v", workload, err) + continue } + log.Infof("leave workload %s success", workload) } return nil } diff --git a/pkg/daemon/action/list.go b/pkg/daemon/action/list.go index 493d0257..2e36684f 100644 --- a/pkg/daemon/action/list.go +++ b/pkg/daemon/action/list.go @@ -15,7 +15,7 @@ import ( ) func (svr *Server) List(ctx context.Context, req *rpc.ListRequest) (*rpc.ListResponse, error) { - if svr.connect == nil { + if svr.connect == nil || svr.connect.GetClientset() == nil { return nil, fmt.Errorf("not connect to any cluster") } mapInterface := svr.connect.GetClientset().CoreV1().ConfigMaps(svr.connect.Namespace) diff --git a/pkg/daemon/action/proxy.go b/pkg/daemon/action/proxy.go index 22cc538b..42a5e62c 100644 --- a/pkg/daemon/action/proxy.go +++ b/pkg/daemon/action/proxy.go @@ -74,9 +74,9 @@ func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) e ) if err == nil && isSameCluster && svr.connect.Equal(connect) { // same cluster, do nothing - log.Debugf("already connect to cluster") + log.Infof("already connect to cluster") } else { - log.Debugf("try to disconnect from another cluster") + log.Infof("try to disconnect from another cluster") var disconnect rpc.Daemon_DisconnectClient disconnect, err = daemonClient.Disconnect(ctx, &rpc.DisconnectRequest{}) if err != nil { @@ -95,11 +95,12 @@ func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) e return err } } + log.SetOutput(out) } } if svr.connect == nil { - log.Debugf("connectting to cluster") + log.Infof("connectting to cluster") var connResp rpc.Daemon_ConnectClient connResp, err = daemonClient.Connect(ctx, req) if err != nil { @@ -118,14 +119,14 @@ func (svr *Server) Proxy(req *rpc.ConnectRequest, resp rpc.Daemon_ProxyServer) e return err } } + log.SetOutput(out) } - log.Debugf("proxy resource...") err = svr.connect.CreateRemoteInboundPod(ctx) if err != nil { + log.Errorf("create remote inbound pod failed: %s", err.Error()) return err } - log.Debugf("proxy resource done") util.Print(out, "Now you can access resources in the kubernetes cluster, enjoy it :)") return nil } diff --git a/pkg/daemon/action/quit.go b/pkg/daemon/action/quit.go index e137170c..88eeb75a 100644 --- a/pkg/daemon/action/quit.go +++ b/pkg/daemon/action/quit.go @@ -16,12 +16,14 @@ func (svr *Server) Quit(req *rpc.QuitRequest, resp rpc.Daemon_QuitServer) error log.SetOutput(io.MultiWriter(newQuitWarp(resp), svr.LogFile)) log.SetLevel(log.InfoLevel) if svr.connect != nil { + log.Info("quit: cleanup connection") svr.connect.Cleanup() } if svr.Cancel != nil { svr.Cancel() } if svr.clone != nil { + log.Info("quit: cleanup clone") err := svr.clone.Cleanup(nil) if err != nil { log.Errorf("quit: cleanup clone failed: %v", err) diff --git a/pkg/daemon/action/stop.go b/pkg/daemon/action/stop.go index c107925f..4a9c90b1 100644 --- a/pkg/daemon/action/stop.go +++ b/pkg/daemon/action/stop.go @@ -10,9 +10,6 @@ import ( ) func (svr *Server) Stop(req *rpc.QuitRequest, resp rpc.Daemon_QuitServer) error { - if svr.connect == nil { - return nil - } defer func() { log.SetOutput(svr.LogFile) log.SetLevel(log.DebugLevel) @@ -21,6 +18,11 @@ func (svr *Server) Stop(req *rpc.QuitRequest, resp rpc.Daemon_QuitServer) error log.SetOutput(out) log.SetLevel(log.InfoLevel) + if svr.connect == nil { + log.Info("stop: no connection") + return nil + } + svr.connect.Cleanup() svr.t = time.Time{} svr.connect = nil diff --git a/pkg/daemon/client.go b/pkg/daemon/client.go index 79be05d9..0fe5096c 100644 --- a/pkg/daemon/client.go +++ b/pkg/daemon/client.go @@ -35,41 +35,39 @@ func GetClient(isSudo bool) rpc.DaemonClient { return daemonClient } - sudo := "" + name := "daemon" if isSudo { - sudo = "sudo" + name = "sudo daemon" } ctx := context.Background() conn, err := grpc.DialContext(ctx, "unix:"+GetSockPath(isSudo), grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { - log.Errorf("cannot connect to %s server: %v", sudo, err) - fmt.Println(fmt.Errorf("cannot connect to %s server: %v", sudo, err)) + log.Errorf("cannot connect to %s: %v", name, err) return nil } - c := rpc.NewDaemonClient(conn) - now := time.Now() + cli := rpc.NewDaemonClient(conn) healthClient := grpc_health_v1.NewHealthClient(conn) var response *grpc_health_v1.HealthCheckResponse response, err = healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{}) if err != nil { - log.Printf("%v", err) + log.Errorf("%v", err) return nil } - fmt.Println(response.Status, sudo, time.Now().Sub(now).String()) - now = time.Now() - _, err = c.Status(ctx, &rpc.StatusRequest{}) - fmt.Printf("call %s api status use %s\n", sudo, time.Now().Sub(now)) + if response.Status != grpc_health_v1.HealthCheckResponse_SERVING { + log.Error(fmt.Sprintf("%s is not health", name), "status", response.Status) + return nil + } + _, err = cli.Status(ctx, &rpc.StatusRequest{}) if err != nil { - fmt.Println(fmt.Errorf("cannot call %s api status: %v", sudo, err)) - log.Error(err) + log.Error("cannot call api status", "err", err) return nil } if isSudo { - sudoDaemonClient = c + sudoDaemonClient = cli } else { - daemonClient = c + daemonClient = cli } - return c + return cli } func GetSockPath(isSudo bool) string { @@ -119,7 +117,7 @@ func runDaemon(ctx context.Context, isSudo bool) error { var p *os.Process if p, err = os.FindProcess(pid); err == nil { if err = p.Kill(); err != nil && err != os.ErrProcessDone { - log.Error(err) + log.Error("kill process", "err", err) } _, _ = p.Wait() } diff --git a/pkg/daemon/daemon.go b/pkg/daemon/daemon.go index ca51ce51..54f01ddd 100644 --- a/pkg/daemon/daemon.go +++ b/pkg/daemon/daemon.go @@ -32,7 +32,7 @@ type SvrOption struct { func (o *SvrOption) Start(ctx context.Context) error { file, err := os.OpenFile(action.GetDaemonLogPath(), os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) if err != nil { - log.Error(err) + log.Errorf("open log file error: %v", err) return err } defer file.Close() diff --git a/pkg/dev/main.go b/pkg/dev/main.go index 24507e3c..05080dfc 100644 --- a/pkg/dev/main.go +++ b/pkg/dev/main.go @@ -494,7 +494,7 @@ func (d *Options) doConnect(ctx context.Context, f cmdutil.Factory, transferImag } else if err != nil { return cancel, err } - log.Print(recv.Message) + log.Infof(recv.Message) } case ConnectModeContainer: @@ -553,7 +553,7 @@ func disconnect(ctx context.Context, daemonClient rpc.DaemonClient) func() { return func() { resp, err := daemonClient.Disconnect(ctx, &rpc.DisconnectRequest{}) if err != nil { - log.Error(err) + log.Errorf("disconnect error: %v", err) return } for { @@ -561,10 +561,10 @@ func disconnect(ctx context.Context, daemonClient rpc.DaemonClient) func() { if err == io.EOF { return } else if err != nil { - log.Error(err) + log.Errorf("disconnect error: %v", err) return } - log.Print(msg.Message) + log.Info(msg.Message) } } } diff --git a/pkg/dns/dns_unix.go b/pkg/dns/dns_unix.go index e7ddbf64..3246c629 100644 --- a/pkg/dns/dns_unix.go +++ b/pkg/dns/dns_unix.go @@ -43,7 +43,7 @@ func usingResolver(clientConfig *miekgdns.ClientConfig, ns []string) { var err error _ = os.RemoveAll(filepath.Join("/", "etc", "resolver")) if err = os.MkdirAll(filepath.Join("/", "etc", "resolver"), fs.ModePerm); err != nil { - log.Error(err) + log.Errorf("create resolver error: %v", err) } config := miekgdns.ClientConfig{ Servers: clientConfig.Servers, @@ -59,7 +59,7 @@ func usingResolver(clientConfig *miekgdns.ClientConfig, ns []string) { var port int port, err = util.GetAvailableUDPPortOrDie() if err != nil { - log.Error(err) + log.Errorf("get available port error: %v", err) return } go func(port int, clientConfig *miekgdns.ClientConfig) { diff --git a/pkg/handler/cleaner.go b/pkg/handler/cleaner.go index d44785c1..74227d39 100644 --- a/pkg/handler/cleaner.go +++ b/pkg/handler/cleaner.go @@ -75,7 +75,7 @@ func (c *ConnectOptions) Cleanup() { // leave proxy resources err := c.LeaveProxyResources(context.Background()) if err != nil { - log.Error(err) + log.Errorf("leave proxy resources error: %v", err) } if c.cancel != nil { c.cancel() diff --git a/pkg/handler/clone.go b/pkg/handler/clone.go index ca887de1..1926ba24 100644 --- a/pkg/handler/clone.go +++ b/pkg/handler/clone.go @@ -139,6 +139,7 @@ func (d *CloneOptions) DoClone(ctx context.Context) error { } for _, workload := range d.Workloads { + log.Infof("clone workload %s", workload) var object *runtimeresource.Info object, err = util.GetUnstructuredObject(d.factory, d.Namespace, workload) if err != nil { @@ -723,8 +724,10 @@ func (d *CloneOptions) Cleanup(workloads []string) error { workloads = d.Workloads } for _, workload := range workloads { + log.Infof("start to clean up clone workload: %s", workload) object, err := util.GetUnstructuredObject(d.factory, d.Namespace, workload) if err != nil { + log.Errorf("get unstructured object error: %s", err.Error()) return err } labelsMap := map[string]string{ @@ -740,15 +743,18 @@ func (d *CloneOptions) Cleanup(workloads []string) error { var client dynamic.Interface client, err = d.targetFactory.DynamicClient() if err != nil { + log.Errorf("get dynamic client error: %s", err.Error()) return err } for _, cloneName := range controller.UnsortedList() { err = client.Resource(object.Mapping.Resource).Namespace(d.TargetNamespace).Delete(context.Background(), cloneName, metav1.DeleteOptions{}) if !apierrors.IsNotFound(err) { + log.Errorf("delete clone object error: %s", err.Error()) return err } log.Infof("delete clone object: %s", cloneName) } + log.Infof("clean up clone workload: %s successfully", workload) } return nil } diff --git a/pkg/handler/connect.go b/pkg/handler/connect.go index 795b0e85..4238bdc7 100644 --- a/pkg/handler/connect.go +++ b/pkg/handler/connect.go @@ -145,6 +145,7 @@ func (c *ConnectOptions) CreateRemoteInboundPod(ctx context.Context) (err error) } for _, workload := range c.Workloads { + log.Infof("start to create remote inbound pod for %s", workload) configInfo := util.PodRouteConfig{ LocalTunIPv4: c.localTunIPv4.IP.String(), LocalTunIPv6: c.localTunIPv6.IP.String(), @@ -156,8 +157,10 @@ func (c *ConnectOptions) CreateRemoteInboundPod(ctx context.Context) (err error) err = InjectVPNSidecar(ctx, c.factory, c.Namespace, workload, configInfo) } if err != nil { + log.Errorf("create remote inbound pod for %s failed: %s", workload, err.Error()) return err } + log.Infof("create remote inbound pod for %s successfully", workload) } return } @@ -190,13 +193,17 @@ func (c *ConnectOptions) DoConnect(ctx context.Context) (err error) { c.ctx, c.cancel = context.WithCancel(ctx) _ = os.Setenv(config.EnvKubeVPNTransportEngine, string(c.Engine)) + log.Info("start to connect") if err = c.InitDHCP(c.ctx); err != nil { + log.Errorf("init dhcp failed: %s", err.Error()) return } c.addCleanUpResourceHandler() if err = c.getCIDR(c.ctx); err != nil { + log.Errorf("get cidr failed: %s", err.Error()) return } + log.Info("get cidr successfully") if err = createOutboundPod(c.ctx, c.factory, c.clientset, c.Namespace); err != nil { return } @@ -233,16 +240,20 @@ func (c *ConnectOptions) DoConnect(ctx context.Context) (err error) { core.GvisorTCPForwardAddr = fmt.Sprintf("tcp://127.0.0.1:%d", gvisorTCPForwardPort) core.GvisorUDPForwardAddr = fmt.Sprintf("tcp://127.0.0.1:%d", gvisorUDPForwardPort) if err = c.startLocalTunServe(c.ctx, forward); err != nil { + log.Errorf("start local tun service failed: %s", err.Error()) return } if err = c.addRouteDynamic(c.ctx); err != nil { + log.Errorf("add route dynamic failed: %s", err.Error()) return } c.deleteFirewallRule(c.ctx) if err = c.addExtraRoute(c.ctx); err != nil { + log.Errorf("add extra route failed: %s", err.Error()) return } if err = c.setupDNS(c.ctx); err != nil { + log.Errorf("set up dns failed: %s", err.Error()) return } go c.heartbeats(c.ctx) @@ -600,7 +611,7 @@ func (c *ConnectOptions) setupDNS(ctx context.Context) error { const port = 53 pod, err := c.GetRunningPodList(ctx) if err != nil { - log.Error(err) + log.Errorf("get running pod list failed, err: %v", err) return err } relovConf, err := dns.GetDNSServiceIPFromPod(c.clientset, c.restclient, c.config, pod[0].GetName(), c.Namespace) diff --git a/pkg/handler/dhcp.go b/pkg/handler/dhcp.go index d593a1bb..14b30515 100644 --- a/pkg/handler/dhcp.go +++ b/pkg/handler/dhcp.go @@ -109,6 +109,9 @@ func (d *DHCPManager) RentIPRandom(ctx context.Context) (*net.IPNet, *net.IPNet, } func (d *DHCPManager) ReleaseIP(ctx context.Context, ips ...net.IP) error { + if len(ips) == 0 { + return nil + } return d.updateDHCPConfigMap(ctx, func(ipv4 *ipallocator.Range, ipv6 *ipallocator.Range) error { for _, ip := range ips { var use *ipallocator.Range diff --git a/pkg/handler/envoy.go b/pkg/handler/envoy.go index cc5c7a77..3280296e 100644 --- a/pkg/handler/envoy.go +++ b/pkg/handler/envoy.go @@ -56,7 +56,7 @@ func InjectVPNAndEnvoySidecar(ctx1 context.Context, factory cmdutil.Factory, cli err = addEnvoyConfig(clientset, nodeID, c, headers, port) if err != nil { - log.Warnln(err) + log.Errorf("add envoy config error: %v", err) return err } @@ -73,6 +73,7 @@ func InjectVPNAndEnvoySidecar(ctx1 context.Context, factory cmdutil.Factory, cli // log.Error(err) // } //}) + log.Infof("workload %s/%s has already been injected with sidecar", namespace, workload) return nil } // (1) add mesh container @@ -80,6 +81,7 @@ func InjectVPNAndEnvoySidecar(ctx1 context.Context, factory cmdutil.Factory, cli var b []byte b, err = k8sjson.Marshal(restorePatch) if err != nil { + log.Error("marshal patch error: %v", err) return err } @@ -104,18 +106,10 @@ func InjectVPNAndEnvoySidecar(ctx1 context.Context, factory cmdutil.Factory, cli } _, err = helper.Patch(object.Namespace, object.Name, types.JSONPatchType, bytes, &metav1.PatchOptions{}) if err != nil { - log.Warnf("error while path resource: %s %s, err: %v", object.Mapping.GroupVersionKind.GroupKind().String(), object.Name, err) - return err - } - - //RollbackFuncList = append(RollbackFuncList, func() { - // if err := UnPatchContainer(factory, clientset, namespace, workload, c.LocalTunIPv4); err != nil { - // log.Error(err) - // } - //}) - if err != nil { + log.Errorf("error while path resource: %s %s, err: %v", object.Mapping.GroupVersionKind.GroupKind().String(), object.Name, err) return err } + log.Infof("patch workload %s/%s with sidecar", namespace, workload) err = util.RolloutStatus(ctx1, factory, namespace, workload, time.Minute*60) return err } @@ -123,12 +117,14 @@ func InjectVPNAndEnvoySidecar(ctx1 context.Context, factory cmdutil.Factory, cli func UnPatchContainer(factory cmdutil.Factory, mapInterface v12.ConfigMapInterface, namespace, workload string, localTunIPv4 string) error { object, err := util.GetUnstructuredObject(factory, namespace, workload) if err != nil { + log.Errorf("get unstructured object error: %v", err) return err } u := object.Object.(*unstructured.Unstructured) templateSpec, depth, err := util.GetPodTemplateSpecPath(u) if err != nil { + log.Errorf("get template spec path error: %v", err) return err } @@ -137,7 +133,7 @@ func UnPatchContainer(factory cmdutil.Factory, mapInterface v12.ConfigMapInterfa var empty bool empty, err = removeEnvoyConfig(mapInterface, nodeID, localTunIPv4) if err != nil { - log.Warnln(err) + log.Errorf("remove envoy config error: %v", err) return err } @@ -156,6 +152,7 @@ func UnPatchContainer(factory cmdutil.Factory, mapInterface v12.ConfigMapInterfa helper := pkgresource.NewHelper(object.Client, object.Mapping) // pod without controller if len(depth) == 0 { + log.Infof("workload %s/%s is not controlled by any controller", namespace, workload) delete(templateSpec.ObjectMeta.GetAnnotations(), config.KubeVPNRestorePatchKey) pod := &v1.Pod{ObjectMeta: templateSpec.ObjectMeta, Spec: templateSpec.Spec} CleanupUselessInfo(pod) @@ -163,6 +160,7 @@ func UnPatchContainer(factory cmdutil.Factory, mapInterface v12.ConfigMapInterfa return err } + log.Infof("workload %s/%s is controlled by a controller", namespace, workload) // resource with controller, like deployment,statefulset var bytes []byte bytes, err = json.Marshal([]P{ diff --git a/pkg/handler/function_test.go b/pkg/handler/function_test.go index 91b588e1..8a3d0679 100644 --- a/pkg/handler/function_test.go +++ b/pkg/handler/function_test.go @@ -208,7 +208,7 @@ func fullDomain(t *testing.T) { } func dialUDP(t *testing.T) { - port := util.GetAvailableUDPPortOrDie() + port, _ := util.GetAvailableUDPPortOrDie() go server(port) list, err := clientset.CoreV1().Pods(namespace).List(context.Background(), v1.ListOptions{ diff --git a/pkg/handler/remote.go b/pkg/handler/remote.go index dd49e7b0..fb444776 100644 --- a/pkg/handler/remote.go +++ b/pkg/handler/remote.go @@ -73,8 +73,10 @@ func createOutboundPod(ctx context.Context, factory cmdutil.Factory, clientset * log.Infoln("traffic manager not exist, try to create it...") // 1) label namespace + log.Infof("label namespace %s", namespace) ns, err := clientset.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{}) if err != nil { + log.Errorf("get namespace error: %s", err.Error()) return err } if ns.Labels == nil { @@ -83,10 +85,12 @@ func createOutboundPod(ctx context.Context, factory cmdutil.Factory, clientset * ns.Labels["ns"] = namespace _, err = clientset.CoreV1().Namespaces().Update(ctx, ns, metav1.UpdateOptions{}) if err != nil { + log.Infof("label namespace error: %s", err.Error()) return err } // 2) create serviceAccount + log.Infof("create serviceAccount %s", config.ConfigMapPodTrafficManager) _, err = clientset.CoreV1().ServiceAccounts(namespace).Create(ctx, &v1.ServiceAccount{ ObjectMeta: metav1.ObjectMeta{ Name: config.ConfigMapPodTrafficManager, @@ -95,10 +99,12 @@ func createOutboundPod(ctx context.Context, factory cmdutil.Factory, clientset * AutomountServiceAccountToken: pointer.Bool(true), }, metav1.CreateOptions{}) if err != nil { + log.Infof("create serviceAccount error: %s", err.Error()) return err } // 3) create roles + log.Infof("create roles %s", config.ConfigMapPodTrafficManager) _, err = clientset.RbacV1().Roles(namespace).Create(ctx, &rbacv1.Role{ ObjectMeta: metav1.ObjectMeta{ Name: config.ConfigMapPodTrafficManager, @@ -112,10 +118,12 @@ func createOutboundPod(ctx context.Context, factory cmdutil.Factory, clientset * }}, }, metav1.CreateOptions{}) if err != nil { + log.Errorf("create roles error: %s", err.Error()) return err } // 4) create roleBinding + log.Infof("create roleBinding %s", config.ConfigMapPodTrafficManager) _, err = clientset.RbacV1().RoleBindings(namespace).Create(ctx, &rbacv1.RoleBinding{ ObjectMeta: metav1.ObjectMeta{ Name: config.ConfigMapPodTrafficManager, @@ -134,9 +142,12 @@ func createOutboundPod(ctx context.Context, factory cmdutil.Factory, clientset * }, }, metav1.CreateOptions{}) if err != nil { + log.Errorf("create roleBinding error: %s", err.Error()) return err } + // 5) create service + log.Infof("create service %s", config.ConfigMapPodTrafficManager) udp8422 := "8422-for-udp" tcp10800 := "10800-for-tcp" tcp9002 := "9002-for-envoy" @@ -173,6 +184,7 @@ func createOutboundPod(ctx context.Context, factory cmdutil.Factory, clientset * }, }, metav1.CreateOptions{}) if err != nil { + log.Errorf("create service error: %s", err.Error()) return err } @@ -201,6 +213,7 @@ func createOutboundPod(ctx context.Context, factory cmdutil.Factory, clientset * var crt, key []byte crt, key, err = cert.GenerateSelfSignedCertKey(domain, nil, nil) if err != nil { + log.Errorf("generate self signed cert and key error: %s", err.Error()) return err } @@ -221,9 +234,12 @@ func createOutboundPod(ctx context.Context, factory cmdutil.Factory, clientset * _, err = clientset.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{}) if err != nil && !k8serrors.IsAlreadyExists(err) { + log.Errorf("create secret error: %s", err.Error()) return err } + // 6) create deployment + log.Infof("create deployment %s", config.ConfigMapPodTrafficManager) deployment := &appsv1.Deployment{ ObjectMeta: metav1.ObjectMeta{ Name: config.ConfigMapPodTrafficManager, @@ -376,10 +392,12 @@ kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TunIPv4}" -L "gtcp://:1080 LabelSelector: fields.OneTermEqualSelector("app", config.ConfigMapPodTrafficManager).String(), }) if err != nil { + log.Errorf("Failed to create watch for %s: %v", config.ConfigMapPodTrafficManager, err) return err } defer watchStream.Stop() if _, err = clientset.AppsV1().Deployments(namespace).Create(ctx, deployment, metav1.CreateOptions{}); err != nil { + log.Errorf("Failed to create deployment for %s: %v", config.ConfigMapPodTrafficManager, err) return err } var ok bool @@ -390,6 +408,7 @@ kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TunIPv4}" -L "gtcp://:1080 LabelSelector: fields.OneTermEqualSelector("app", config.ConfigMapPodTrafficManager).String(), }) if err != nil { + log.Errorf("Failed to list pods for %s: %v", config.ConfigMapPodTrafficManager, err) return } @@ -423,9 +442,12 @@ kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TunIPv4}" -L "gtcp://:1080 } }, time.Second*3) if !ok { + log.Errorf("wait pod %s to be ready timeout", config.ConfigMapPodTrafficManager) return errors.New(fmt.Sprintf("wait pod %s to be ready timeout", config.ConfigMapPodTrafficManager)) } + // 7) create mutatingWebhookConfigurations + log.Infof("Creating mutatingWebhook_configuration for %s", config.ConfigMapPodTrafficManager) _, err = clientset.AdmissionregistrationV1().MutatingWebhookConfigurations().Create(ctx, &admissionv1.MutatingWebhookConfiguration{ ObjectMeta: metav1.ObjectMeta{ Name: config.ConfigMapPodTrafficManager + "." + namespace, @@ -465,13 +487,14 @@ kubevpn serve -L "tcp://:10800" -L "tun://:8422?net=${TunIPv4}" -L "gtcp://:1080 } _, err = updateRefCount(ctx, clientset.CoreV1().ConfigMaps(namespace), config.ConfigMapPodTrafficManager, 1) if err != nil { + log.Errorf("Failed to update ref count for %s: %v", config.ConfigMapPodTrafficManager, err) return } return } -func InjectVPNSidecar(ctx1 context.Context, factory cmdutil.Factory, namespace, workloads string, c util.PodRouteConfig) error { - object, err := util.GetUnstructuredObject(factory, namespace, workloads) +func InjectVPNSidecar(ctx1 context.Context, factory cmdutil.Factory, namespace, workload string, c util.PodRouteConfig) error { + object, err := util.GetUnstructuredObject(factory, namespace, workload) if err != nil { return err } @@ -491,6 +514,7 @@ func InjectVPNSidecar(ctx1 context.Context, factory cmdutil.Factory, namespace, // pods without controller if len(path) == 0 { + log.Infof("workload %s/%s is not controlled by any controller", namespace, workload) podTempSpec.Spec.PriorityClassName = "" for _, container := range podTempSpec.Spec.Containers { container.LivenessProbe = nil @@ -513,6 +537,7 @@ func InjectVPNSidecar(ctx1 context.Context, factory cmdutil.Factory, namespace, } else // controllers { + log.Infof("workload %s/%s is controlled by a controller", namespace, workload) // remove probe removePatch, restorePatch := patch(origin, path) b, _ := json.Marshal(restorePatch) @@ -536,7 +561,7 @@ func InjectVPNSidecar(ctx1 context.Context, factory cmdutil.Factory, namespace, } //RollbackFuncList = append(RollbackFuncList, func() { - // if err = removeInboundContainer(factory, namespace, workloads); err != nil { + // if err = removeInboundContainer(factory, namespace, workload); err != nil { // log.Error(err) // } // //b, _ := json.Marshal(restorePatch) @@ -550,7 +575,7 @@ func InjectVPNSidecar(ctx1 context.Context, factory cmdutil.Factory, namespace, return err } // todo not work? - err = util.RolloutStatus(ctx1, factory, namespace, workloads, time.Minute*60) + err = util.RolloutStatus(ctx1, factory, namespace, workload, time.Minute*60) return err } @@ -558,7 +583,7 @@ func CreateAfterDeletePod(factory cmdutil.Factory, p *v1.Pod, helper *pkgresourc if _, err := helper.DeleteWithOptions(p.Namespace, p.Name, &metav1.DeleteOptions{ GracePeriodSeconds: pointer.Int64(0), }); err != nil { - log.Error(err) + log.Errorf("error while delete resource: %s %s, ignore, err: %v", p.Namespace, p.Name, err) } if err := retry.OnError(wait.Backoff{ Steps: 10, @@ -663,7 +688,7 @@ func patch(spec v1.PodTemplateSpec, path []string) (remove []P, restore []P) { } marshal, err := k8sjson.Marshal(p) if err != nil { - log.Error(err) + log.Errorf("error while json marshal: %v", err) return "" } return string(marshal) @@ -720,7 +745,7 @@ func fromPatchToProbe(spec *v1.PodTemplateSpec, path []string, patch []P) { if !ok { marshal, err := k8sjson.Marshal(value) if err != nil { - log.Error(err) + log.Errorf("error while json marshal: %v", err) return nil } str = string(marshal) @@ -728,7 +753,7 @@ func fromPatchToProbe(spec *v1.PodTemplateSpec, path []string, patch []P) { var probe v1.Probe err := k8sjson.Unmarshal([]byte(str), &probe) if err != nil { - log.Error(err) + log.Errorf("error while json unmarsh: %v", err) return nil } return &probe diff --git a/pkg/handler/reset.go b/pkg/handler/reset.go index 49cfcb3f..a2fc737c 100644 --- a/pkg/handler/reset.go +++ b/pkg/handler/reset.go @@ -2,13 +2,12 @@ package handler import ( "context" - "fmt" - corev1 "k8s.io/api/core/v1" "strings" "github.com/docker/docker/api/types" "github.com/docker/docker/client" log "github.com/sirupsen/logrus" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "sigs.k8s.io/yaml" @@ -23,7 +22,7 @@ import ( func (c *ConnectOptions) Reset(ctx context.Context) error { err := c.LeaveProxyResources(ctx) if err != nil { - log.Error(err) + log.Errorf("leave proxy resources error: %v", err) } cleanup(ctx, c.clientset, c.Namespace, config.ConfigMapPodTrafficManager, false) @@ -55,13 +54,13 @@ func (c *ConnectOptions) LeaveProxyResources(ctx context.Context) (err error) { return } if cm == nil || cm.Data == nil || len(cm.Data[config.KeyEnvoy]) == 0 { - err = fmt.Errorf("can not found proxy resources") + log.Infof("no proxy resources found") return } var v = make([]*controlplane.Virtual, 0) str := cm.Data[config.KeyEnvoy] if err = yaml.Unmarshal([]byte(str), &v); err != nil { - log.Error(err) + log.Errorf("unmarshal envoy config error: %v", err) return } localTunIPv4 := c.GetLocalTunIPv4() @@ -69,11 +68,13 @@ func (c *ConnectOptions) LeaveProxyResources(ctx context.Context) (err error) { // deployments.apps.ry-server --> deployments.apps/ry-server lastIndex := strings.LastIndex(virtual.Uid, ".") uid := virtual.Uid[:lastIndex] + "/" + virtual.Uid[lastIndex+1:] + log.Infof("leave resource: %s", uid) err = UnPatchContainer(c.factory, c.clientset.CoreV1().ConfigMaps(c.Namespace), c.Namespace, uid, localTunIPv4) if err != nil { - log.Error(err) + log.Errorf("unpatch container error: %v", err) continue } + log.Infof("leave resource: %s success", uid) } return err } diff --git a/pkg/handler/serve.go b/pkg/handler/serve.go index 56b9a785..ece27c55 100644 --- a/pkg/handler/serve.go +++ b/pkg/handler/serve.go @@ -29,7 +29,7 @@ func RentIPIfNeeded(route *core.Route) error { var ip []byte ip, err = util.DoReq(req) if err != nil { - log.Error(err) + log.Errorf("can not get ip, err: %v", err) return err } log.Infof("rent an ip %s", strings.TrimSpace(string(ip))) @@ -38,11 +38,11 @@ func RentIPIfNeeded(route *core.Route) error { return fmt.Errorf("can not get ip from %s", string(ip)) } if err = os.Setenv(config.EnvInboundPodTunIPv4, ips[0]); err != nil { - log.Error(err) + log.Errorf("can not set ip, err: %v", err) return err } if err = os.Setenv(config.EnvInboundPodTunIPv6, ips[1]); err != nil { - log.Error(err) + log.Errorf("can not set ip, err: %v", err) return err } for i := 0; i < len(route.ServeNodes); i++ { diff --git a/pkg/mesh/controller.go b/pkg/mesh/controller.go index 6ba2ec7b..df283eb0 100644 --- a/pkg/mesh/controller.go +++ b/pkg/mesh/controller.go @@ -153,7 +153,7 @@ kubevpn serve -L "tun:/localhost:8422?net=${TunIPv4}&route=${CIDR4}" -F "tcp://$ func init() { json, err := yaml.ToJSON(envoyConfig) if err != nil { - log.Error(err) + log.Errorf("Error converting json to bytes: %v", err) return } envoyConfig = json diff --git a/pkg/util/util.go b/pkg/util/util.go index 775ae1ac..b2f05715 100644 --- a/pkg/util/util.go +++ b/pkg/util/util.go @@ -112,18 +112,18 @@ func PortForwardPod(config *rest.Config, clientset *rest.RESTClient, podName, na URL() transport, upgrader, err := spdy.RoundTripperFor(config) if err != nil { - log.Error(err) + log.Errorf("create spdy roundtripper error: %s", err.Error()) return err } dialer := spdy.NewDialer(upgrader, &http.Client{Transport: transport}, "POST", url) forwarder, err := portforward.NewOnAddresses(dialer, []string{"localhost"}, portPair, stopChan, readyChan, nil, os.Stderr) if err != nil { - log.Error(err) + log.Errorf("create port forward error: %s", err.Error()) return err } if err = forwarder.ForwardPorts(); err != nil { - log.Error(err) + log.Errorf("forward port error: %s", err.Error()) return err } return nil @@ -354,7 +354,15 @@ func Ping(targetIP string) (bool, error) { return stat.PacketsRecv == stat.PacketsSent, err } -func RolloutStatus(ctx1 context.Context, factory cmdutil.Factory, namespace, workloads string, timeout time.Duration) error { +func RolloutStatus(ctx1 context.Context, factory cmdutil.Factory, namespace, workloads string, timeout time.Duration) (err error) { + log.Infof("rollout status for %s", workloads) + defer func() { + if err != nil { + log.Errorf("rollout status for %s failed: %s", workloads, err.Error()) + } else { + log.Infof("rollout status for %s success", workloads) + } + }() client, _ := factory.DynamicClient() r := factory.NewBuilder(). WithScheme(scheme.Scheme, scheme.Scheme.PrioritizedVersionsAllGroups()...). @@ -363,7 +371,7 @@ func RolloutStatus(ctx1 context.Context, factory cmdutil.Factory, namespace, wor SingleResourceType(). Latest(). Do() - err := r.Err() + err = r.Err() if err != nil { return err } @@ -406,7 +414,7 @@ func RolloutStatus(ctx1 context.Context, factory cmdutil.Factory, namespace, wor if err != nil { return false, err } - log.Infof("%s", status) + log.Info(strings.TrimSpace(status)) // Quit waiting if the rollout is done if done { return true, nil diff --git a/pkg/webhook/dhcp.go b/pkg/webhook/dhcp.go index 80f51acb..a1c0eab8 100644 --- a/pkg/webhook/dhcp.go +++ b/pkg/webhook/dhcp.go @@ -29,7 +29,7 @@ func (d *dhcpServer) rentIP(w http.ResponseWriter, r *http.Request) { dhcp := handler.NewDHCPManager(cmi, namespace) v4, v6, err := dhcp.RentIPRandom(ctx) if err != nil { - log.Error(err) + log.Errorf("rent ip failed, err: %v", err) w.WriteHeader(http.StatusBadRequest) return } @@ -37,7 +37,7 @@ func (d *dhcpServer) rentIP(w http.ResponseWriter, r *http.Request) { // todo patch annotation _, err = w.Write([]byte(fmt.Sprintf("%s,%s", v4.String(), v6.String()))) if err != nil { - log.Error(err) + log.Errorf("write response failed, err: %v", err) } } @@ -59,7 +59,7 @@ func (d *dhcpServer) releaseIP(w http.ResponseWriter, r *http.Request) { cmi := d.clientset.CoreV1().ConfigMaps(namespace) dhcp := handler.NewDHCPManager(cmi, namespace) if err := dhcp.ReleaseIP(context.Background(), ips...); err != nil { - log.Error(err) + log.Errorf("release ip failed, err: %v", err) w.WriteHeader(http.StatusBadRequest) return } diff --git a/pkg/webhook/mutateadmissionwebhook.go b/pkg/webhook/mutateadmissionwebhook.go index c7c28eb2..f49af64b 100644 --- a/pkg/webhook/mutateadmissionwebhook.go +++ b/pkg/webhook/mutateadmissionwebhook.go @@ -113,13 +113,13 @@ func serve(w http.ResponseWriter, r *http.Request, admit admitHandler) { log.Infof("sending response: %v", responseObj) respBytes, err := json.Marshal(responseObj) if err != nil { - log.Error(err) + log.Errorf("Unable to encode response: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } w.Header().Set("Content-Type", "application/json") if _, err := w.Write(respBytes); err != nil { - log.Error(err) + log.Errorf("Unable to write response: %v", err) } }