From d215c33549a8b7bd95a4d84a1767ddb682f05045 Mon Sep 17 00:00:00 2001 From: cexll Date: Tue, 16 Dec 2025 10:05:54 +0800 Subject: [PATCH] fix(executor): isolate log files per task in parallel mode Previously, all parallel tasks shared the same log file path, making it difficult to debug individual task execution. This change creates a separate log file for each task using the naming convention: codeagent-wrapper-{pid}-{taskName}.log Changes: - Add withTaskLogger/taskLoggerFromContext for per-task logger injection - Modify executeConcurrentWithContext to create independent Logger per task - Update printTaskStart to display task-specific log paths - Extract defaultRunCodexTaskFn for proper test hook reset - Add runCodexTaskFn reset to resetTestHooks() Test coverage: 93.7% Generated with swe-agent-bot Co-Authored-By: swe-agent-bot --- codeagent-wrapper/executor.go | 91 ++- codeagent-wrapper/executor_concurrent_test.go | 543 +++++++++++++++++- .../logger_additional_coverage_test.go | 158 +++++ codeagent-wrapper/logger_suffix_test.go | 80 +++ codeagent-wrapper/logger_test.go | 174 +++++- codeagent-wrapper/main_integration_test.go | 7 +- codeagent-wrapper/main_test.go | 1 + 7 files changed, 994 insertions(+), 60 deletions(-) create mode 100644 codeagent-wrapper/logger_additional_coverage_test.go create mode 100644 codeagent-wrapper/logger_suffix_test.go diff --git a/codeagent-wrapper/executor.go b/codeagent-wrapper/executor.go index d49a14f..c6c4730 100644 --- a/codeagent-wrapper/executor.go +++ b/codeagent-wrapper/executor.go @@ -122,7 +122,25 @@ type parseResult struct { threadID string } -var runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult { +type taskLoggerContextKey struct{} + +func withTaskLogger(ctx context.Context, logger *Logger) context.Context { + if ctx == nil || logger == nil { + return ctx + } + return context.WithValue(ctx, taskLoggerContextKey{}, logger) +} + +func taskLoggerFromContext(ctx context.Context) *Logger { + if ctx == nil { + return nil + } + logger, _ := ctx.Value(taskLoggerContextKey{}).(*Logger) + return logger +} + +// defaultRunCodexTaskFn is the default implementation of runCodexTaskFn (exposed for test reset) +func defaultRunCodexTaskFn(task TaskSpec, timeout int) TaskResult { if task.WorkDir == "" { task.WorkDir = defaultWorkdir } @@ -151,6 +169,8 @@ var runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult { return runCodexTaskWithContext(parentCtx, task, backend, nil, false, true, timeout) } +var runCodexTaskFn = defaultRunCodexTaskFn + func topologicalSort(tasks []TaskSpec) ([][]TaskSpec, error) { idToTask := make(map[string]TaskSpec, len(tasks)) indegree := make(map[string]int, len(tasks)) @@ -235,13 +255,8 @@ func executeConcurrentWithContext(parentCtx context.Context, layers [][]TaskSpec var startPrintMu sync.Mutex bannerPrinted := false - printTaskStart := func(taskID string) { - logger := activeLogger() - if logger == nil { - return - } - path := logger.Path() - if path == "" { + printTaskStart := func(taskID, logPath string) { + if logPath == "" { return } startPrintMu.Lock() @@ -249,7 +264,7 @@ func executeConcurrentWithContext(parentCtx context.Context, layers [][]TaskSpec fmt.Fprintln(os.Stderr, "=== Starting Parallel Execution ===") bannerPrinted = true } - fmt.Fprintf(os.Stderr, "Task %s: Log: %s\n", taskID, path) + fmt.Fprintf(os.Stderr, "Task %s: Log: %s\n", taskID, logPath) startPrintMu.Unlock() } @@ -319,9 +334,11 @@ func executeConcurrentWithContext(parentCtx context.Context, layers [][]TaskSpec wg.Add(1) go func(ts TaskSpec) { defer wg.Done() + var taskLogger *Logger + var taskLogPath string defer func() { if r := recover(); r != nil { - resultsCh <- TaskResult{TaskID: ts.ID, ExitCode: 1, Error: fmt.Sprintf("panic: %v", r)} + resultsCh <- TaskResult{TaskID: ts.ID, ExitCode: 1, Error: fmt.Sprintf("panic: %v", r), LogPath: taskLogPath} } }() @@ -338,9 +355,20 @@ func executeConcurrentWithContext(parentCtx context.Context, layers [][]TaskSpec logConcurrencyState("done", ts.ID, int(after), workerLimit) }() - ts.Context = ctx - printTaskStart(ts.ID) - resultsCh <- runCodexTaskFn(ts, timeout) + if l, err := NewLoggerWithSuffix(ts.ID); err == nil { + taskLogger = l + taskLogPath = l.Path() + defer func() { _ = taskLogger.Close() }() + } + + ts.Context = withTaskLogger(ctx, taskLogger) + printTaskStart(ts.ID, taskLogPath) + + res := runCodexTaskFn(ts, timeout) + if res.LogPath == "" && taskLogPath != "" { + res.LogPath = taskLogPath + } + resultsCh <- res }(task) } @@ -458,14 +486,8 @@ func runCodexProcess(parentCtx context.Context, codexArgs []string, taskText str func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backend Backend, customArgs []string, useCustomArgs bool, silent bool, timeoutSec int) TaskResult { result := TaskResult{TaskID: taskSpec.ID} - setLogPath := func() { - if result.LogPath != "" { - return - } - if logger := activeLogger(); logger != nil { - result.LogPath = logger.Path() - } - } + injectedLogger := taskLoggerFromContext(parentCtx) + logger := injectedLogger cfg := &Config{ Mode: taskSpec.Mode, @@ -521,17 +543,17 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe if silent { // Silent mode: only persist to file when available; avoid stderr noise. logInfoFn = func(msg string) { - if logger := activeLogger(); logger != nil { + if logger != nil { logger.Info(prefixMsg(msg)) } } logWarnFn = func(msg string) { - if logger := activeLogger(); logger != nil { + if logger != nil { logger.Warn(prefixMsg(msg)) } } logErrorFn = func(msg string) { - if logger := activeLogger(); logger != nil { + if logger != nil { logger.Error(prefixMsg(msg)) } } @@ -547,10 +569,11 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe var stderrLogger *logWriter var tempLogger *Logger - if silent && activeLogger() == nil { + if logger == nil && silent && activeLogger() == nil { if l, err := NewLogger(); err == nil { setLogger(l) tempLogger = l + logger = l } } defer func() { @@ -558,8 +581,16 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe _ = closeLogger() } }() - defer setLogPath() - if logger := activeLogger(); logger != nil { + defer func() { + if result.LogPath != "" || logger == nil { + return + } + result.LogPath = logger.Path() + }() + if logger == nil { + logger = activeLogger() + } + if logger != nil { result.LogPath = logger.Path() } @@ -659,7 +690,7 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe } logInfoFn(fmt.Sprintf("Starting %s with PID: %d", commandName, cmd.Process().Pid())) - if logger := activeLogger(); logger != nil { + if logger != nil { logInfoFn(fmt.Sprintf("Log capturing to: %s", logger.Path())) } @@ -756,8 +787,8 @@ func runCodexTaskWithContext(parentCtx context.Context, taskSpec TaskSpec, backe result.ExitCode = 0 result.Message = message result.SessionID = threadID - if logger := activeLogger(); logger != nil { - result.LogPath = logger.Path() + if result.LogPath == "" && injectedLogger != nil { + result.LogPath = injectedLogger.Path() } return result diff --git a/codeagent-wrapper/executor_concurrent_test.go b/codeagent-wrapper/executor_concurrent_test.go index 4c56961..341e1aa 100644 --- a/codeagent-wrapper/executor_concurrent_test.go +++ b/codeagent-wrapper/executor_concurrent_test.go @@ -1,12 +1,15 @@ package main import ( + "bufio" "bytes" "context" "errors" + "fmt" "io" "os" "os/exec" + "path/filepath" "strings" "sync" "sync/atomic" @@ -15,6 +18,12 @@ import ( "time" ) +var executorTestTaskCounter atomic.Int64 + +func nextExecutorTestTaskID(prefix string) string { + return fmt.Sprintf("%s-%d", prefix, executorTestTaskCounter.Add(1)) +} + type execFakeProcess struct { pid int signals []os.Signal @@ -76,6 +85,7 @@ type execFakeRunner struct { stdout io.ReadCloser process processHandle stdin io.WriteCloser + dir string waitErr error waitDelay time.Duration startErr error @@ -117,7 +127,7 @@ func (f *execFakeRunner) StdinPipe() (io.WriteCloser, error) { return &writeCloserStub{}, nil } func (f *execFakeRunner) SetStderr(io.Writer) {} -func (f *execFakeRunner) SetDir(string) {} +func (f *execFakeRunner) SetDir(dir string) { f.dir = dir } func (f *execFakeRunner) Process() processHandle { if f.process != nil { return f.process @@ -149,6 +159,10 @@ func TestExecutorHelperCoverage(t *testing.T) { } rcWithCmd := &realCmd{cmd: &exec.Cmd{}} rcWithCmd.SetStderr(io.Discard) + rcWithCmd.SetDir("/tmp") + if rcWithCmd.cmd.Dir != "/tmp" { + t.Fatalf("expected SetDir to set cmd.Dir, got %q", rcWithCmd.cmd.Dir) + } echoCmd := exec.Command("echo", "ok") rcProc := &realCmd{cmd: echoCmd} stdoutPipe, err := rcProc.StdoutPipe() @@ -421,6 +435,63 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) { _ = closeLogger() }) + t.Run("injectedLogger", func(t *testing.T) { + newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner { + return &execFakeRunner{ + stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"injected"}}`), + process: &execFakeProcess{pid: 12}, + } + } + _ = closeLogger() + + injected, err := NewLoggerWithSuffix("executor-injected") + if err != nil { + t.Fatalf("NewLoggerWithSuffix() error = %v", err) + } + defer func() { + _ = injected.Close() + _ = os.Remove(injected.Path()) + }() + + ctx := withTaskLogger(context.Background(), injected) + res := runCodexTaskWithContext(ctx, TaskSpec{ID: "task-injected", Task: "payload", WorkDir: "."}, nil, nil, false, true, 1) + if res.ExitCode != 0 || res.LogPath != injected.Path() { + t.Fatalf("expected injected logger path, got %+v", res) + } + if activeLogger() != nil { + t.Fatalf("expected no global logger to be created when injected") + } + + injected.Flush() + data, err := os.ReadFile(injected.Path()) + if err != nil { + t.Fatalf("failed to read injected log file: %v", err) + } + if !strings.Contains(string(data), "task-injected") { + t.Fatalf("injected log missing task prefix, content: %s", string(data)) + } + }) + + t.Run("backendSetsDirAndNilContext", func(t *testing.T) { + var rc *execFakeRunner + newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner { + rc = &execFakeRunner{ + stdout: newReasonReadCloser(`{"type":"item.completed","item":{"type":"agent_message","text":"backend"}}`), + process: &execFakeProcess{pid: 13}, + } + return rc + } + + _ = closeLogger() + res := runCodexTaskWithContext(nil, TaskSpec{ID: "task-backend", Task: "payload", WorkDir: "/tmp"}, ClaudeBackend{}, nil, false, false, 1) + if res.ExitCode != 0 || res.Message != "backend" { + t.Fatalf("unexpected result: %+v", res) + } + if rc == nil || rc.dir != "/tmp" { + t.Fatalf("expected backend to set cmd.Dir, got runner=%v dir=%q", rc, rc.dir) + } + }) + t.Run("missingMessage", func(t *testing.T) { newCommandRunner = func(ctx context.Context, name string, args ...string) commandRunner { return &execFakeRunner{ @@ -435,6 +506,476 @@ func TestExecutorRunCodexTaskWithContext(t *testing.T) { }) } +func TestExecutorParallelLogIsolation(t *testing.T) { + mainLogger, err := NewLoggerWithSuffix("executor-main") + if err != nil { + t.Fatalf("NewLoggerWithSuffix() error = %v", err) + } + setLogger(mainLogger) + t.Cleanup(func() { + _ = closeLogger() + _ = os.Remove(mainLogger.Path()) + }) + + taskA := nextExecutorTestTaskID("iso-a") + taskB := nextExecutorTestTaskID("iso-b") + markerA := "ISOLATION_MARKER:" + taskA + markerB := "ISOLATION_MARKER:" + taskB + + origRun := runCodexTaskFn + runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult { + logger := taskLoggerFromContext(task.Context) + if logger == nil { + return TaskResult{TaskID: task.ID, ExitCode: 1, Error: "missing task logger"} + } + switch task.ID { + case taskA: + logger.Info(markerA) + case taskB: + logger.Info(markerB) + default: + logger.Info("unexpected task: " + task.ID) + } + return TaskResult{TaskID: task.ID, ExitCode: 0} + } + t.Cleanup(func() { runCodexTaskFn = origRun }) + + stderrR, stderrW, err := os.Pipe() + if err != nil { + t.Fatalf("os.Pipe() error = %v", err) + } + oldStderr := os.Stderr + os.Stderr = stderrW + defer func() { os.Stderr = oldStderr }() + + results := executeConcurrentWithContext(nil, [][]TaskSpec{{{ID: taskA}, {ID: taskB}}}, 1, -1) + + _ = stderrW.Close() + os.Stderr = oldStderr + stderrData, _ := io.ReadAll(stderrR) + _ = stderrR.Close() + stderrOut := string(stderrData) + + if len(results) != 2 { + t.Fatalf("expected 2 results, got %d", len(results)) + } + + paths := map[string]string{} + for _, res := range results { + if res.ExitCode != 0 { + t.Fatalf("unexpected failure: %+v", res) + } + if res.LogPath == "" { + t.Fatalf("missing LogPath for task %q", res.TaskID) + } + paths[res.TaskID] = res.LogPath + } + if paths[taskA] == paths[taskB] { + t.Fatalf("expected distinct task log paths, got %q", paths[taskA]) + } + + if strings.Contains(stderrOut, mainLogger.Path()) { + t.Fatalf("stderr should not print main log path: %s", stderrOut) + } + if !strings.Contains(stderrOut, paths[taskA]) || !strings.Contains(stderrOut, paths[taskB]) { + t.Fatalf("stderr should include task log paths, got: %s", stderrOut) + } + + mainLogger.Flush() + mainData, err := os.ReadFile(mainLogger.Path()) + if err != nil { + t.Fatalf("failed to read main log: %v", err) + } + if strings.Contains(string(mainData), markerA) || strings.Contains(string(mainData), markerB) { + t.Fatalf("main log should not contain task markers, content: %s", string(mainData)) + } + + taskAData, err := os.ReadFile(paths[taskA]) + if err != nil { + t.Fatalf("failed to read task A log: %v", err) + } + taskBData, err := os.ReadFile(paths[taskB]) + if err != nil { + t.Fatalf("failed to read task B log: %v", err) + } + if !strings.Contains(string(taskAData), markerA) || strings.Contains(string(taskAData), markerB) { + t.Fatalf("task A log isolation failed, content: %s", string(taskAData)) + } + if !strings.Contains(string(taskBData), markerB) || strings.Contains(string(taskBData), markerA) { + t.Fatalf("task B log isolation failed, content: %s", string(taskBData)) + } + + _ = os.Remove(paths[taskA]) + _ = os.Remove(paths[taskB]) +} + +func TestConcurrentExecutorParallelLogIsolationAndClosure(t *testing.T) { + tempDir := t.TempDir() + t.Setenv("TMPDIR", tempDir) + + oldArgs := os.Args + os.Args = []string{defaultWrapperName} + t.Cleanup(func() { os.Args = oldArgs }) + + mainLogger, err := NewLoggerWithSuffix("concurrent-main") + if err != nil { + t.Fatalf("NewLoggerWithSuffix() error = %v", err) + } + setLogger(mainLogger) + t.Cleanup(func() { + mainLogger.Flush() + _ = closeLogger() + _ = os.Remove(mainLogger.Path()) + }) + + const taskCount = 16 + const writersPerTask = 4 + const logsPerWriter = 50 + const expectedTaskLines = writersPerTask * logsPerWriter + + taskIDs := make([]string, 0, taskCount) + tasks := make([]TaskSpec, 0, taskCount) + for i := 0; i < taskCount; i++ { + id := nextExecutorTestTaskID("iso") + taskIDs = append(taskIDs, id) + tasks = append(tasks, TaskSpec{ID: id}) + } + + type taskLoggerInfo struct { + taskID string + logger *Logger + } + loggerCh := make(chan taskLoggerInfo, taskCount) + readyCh := make(chan struct{}, taskCount) + startCh := make(chan struct{}) + + go func() { + for i := 0; i < taskCount; i++ { + <-readyCh + } + close(startCh) + }() + + origRun := runCodexTaskFn + runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult { + readyCh <- struct{}{} + + logger := taskLoggerFromContext(task.Context) + loggerCh <- taskLoggerInfo{taskID: task.ID, logger: logger} + if logger == nil { + return TaskResult{TaskID: task.ID, ExitCode: 1, Error: "missing task logger"} + } + + <-startCh + + var wg sync.WaitGroup + wg.Add(writersPerTask) + for g := 0; g < writersPerTask; g++ { + go func(g int) { + defer wg.Done() + for i := 0; i < logsPerWriter; i++ { + logger.Info(fmt.Sprintf("TASK=%s g=%d i=%d", task.ID, g, i)) + } + }(g) + } + wg.Wait() + + return TaskResult{TaskID: task.ID, ExitCode: 0} + } + t.Cleanup(func() { runCodexTaskFn = origRun }) + + results := executeConcurrentWithContext(context.Background(), [][]TaskSpec{tasks}, 1, 0) + + if len(results) != taskCount { + t.Fatalf("expected %d results, got %d", taskCount, len(results)) + } + + taskLogPaths := make(map[string]string, taskCount) + seenPaths := make(map[string]struct{}, taskCount) + for _, res := range results { + if res.ExitCode != 0 || res.Error != "" { + t.Fatalf("unexpected task failure: %+v", res) + } + if res.LogPath == "" { + t.Fatalf("missing LogPath for task %q", res.TaskID) + } + if _, ok := taskLogPaths[res.TaskID]; ok { + t.Fatalf("duplicate TaskID in results: %q", res.TaskID) + } + taskLogPaths[res.TaskID] = res.LogPath + if _, ok := seenPaths[res.LogPath]; ok { + t.Fatalf("expected unique log path per task; duplicate path %q", res.LogPath) + } + seenPaths[res.LogPath] = struct{}{} + } + if len(taskLogPaths) != taskCount { + t.Fatalf("expected %d unique task IDs, got %d", taskCount, len(taskLogPaths)) + } + + prefix := primaryLogPrefix() + pid := os.Getpid() + for _, id := range taskIDs { + path := taskLogPaths[id] + if path == "" { + t.Fatalf("missing log path for task %q", id) + } + if _, err := os.Stat(path); err != nil { + t.Fatalf("task log file not created for %q: %v", id, err) + } + wantBase := fmt.Sprintf("%s-%d-%s.log", prefix, pid, id) + if got := filepath.Base(path); got != wantBase { + t.Fatalf("unexpected log filename for %q: got %q, want %q", id, got, wantBase) + } + } + + loggers := make(map[string]*Logger, taskCount) + for i := 0; i < taskCount; i++ { + info := <-loggerCh + if info.taskID == "" { + t.Fatalf("missing taskID in logger info") + } + if info.logger == nil { + t.Fatalf("missing logger in context for task %q", info.taskID) + } + if prev, ok := loggers[info.taskID]; ok && prev != info.logger { + t.Fatalf("task %q received multiple logger instances", info.taskID) + } + loggers[info.taskID] = info.logger + } + if len(loggers) != taskCount { + t.Fatalf("expected %d task loggers, got %d", taskCount, len(loggers)) + } + + for taskID, logger := range loggers { + if !logger.closed.Load() { + t.Fatalf("expected task logger to be closed for %q", taskID) + } + if logger.file == nil { + t.Fatalf("expected task logger file to be non-nil for %q", taskID) + } + if _, err := logger.file.Write([]byte("x")); err == nil { + t.Fatalf("expected task logger file to be closed for %q", taskID) + } + } + + mainLogger.Flush() + mainData, err := os.ReadFile(mainLogger.Path()) + if err != nil { + t.Fatalf("failed to read main log: %v", err) + } + mainText := string(mainData) + if !strings.Contains(mainText, "parallel: worker_limit=") { + t.Fatalf("expected main log to include concurrency planning, content: %s", mainText) + } + if strings.Contains(mainText, "TASK=") { + t.Fatalf("main log should not contain task output, content: %s", mainText) + } + + for taskID, path := range taskLogPaths { + f, err := os.Open(path) + if err != nil { + t.Fatalf("failed to open task log for %q: %v", taskID, err) + } + + scanner := bufio.NewScanner(f) + lines := 0 + for scanner.Scan() { + line := scanner.Text() + if strings.Contains(line, "parallel:") { + t.Fatalf("task log should not contain main log entries for %q: %s", taskID, line) + } + gotID, ok := parseTaskIDFromLogLine(line) + if !ok { + t.Fatalf("task log entry missing task marker for %q: %s", taskID, line) + } + if gotID != taskID { + t.Fatalf("task log isolation failed: file=%q got TASK=%q want TASK=%q", path, gotID, taskID) + } + lines++ + } + if err := scanner.Err(); err != nil { + _ = f.Close() + t.Fatalf("scanner error for %q: %v", taskID, err) + } + if err := f.Close(); err != nil { + t.Fatalf("failed to close task log for %q: %v", taskID, err) + } + if lines != expectedTaskLines { + t.Fatalf("unexpected task log line count for %q: got %d, want %d", taskID, lines, expectedTaskLines) + } + } + + for _, path := range taskLogPaths { + _ = os.Remove(path) + } +} + +func parseTaskIDFromLogLine(line string) (string, bool) { + const marker = "TASK=" + idx := strings.Index(line, marker) + if idx == -1 { + return "", false + } + rest := line[idx+len(marker):] + end := strings.IndexByte(rest, ' ') + if end == -1 { + return rest, rest != "" + } + return rest[:end], rest[:end] != "" +} + +func TestExecutorTaskLoggerContext(t *testing.T) { + if taskLoggerFromContext(nil) != nil { + t.Fatalf("expected nil logger from nil context") + } + if taskLoggerFromContext(context.Background()) != nil { + t.Fatalf("expected nil logger when context has no logger") + } + + logger, err := NewLoggerWithSuffix("executor-taskctx") + if err != nil { + t.Fatalf("NewLoggerWithSuffix() error = %v", err) + } + defer func() { + _ = logger.Close() + _ = os.Remove(logger.Path()) + }() + + ctx := withTaskLogger(context.Background(), logger) + if got := taskLoggerFromContext(ctx); got != logger { + t.Fatalf("expected logger roundtrip, got %v", got) + } + + if taskLoggerFromContext(withTaskLogger(context.Background(), nil)) != nil { + t.Fatalf("expected nil logger when injected logger is nil") + } +} + +func TestExecutorExecuteConcurrentWithContextBranches(t *testing.T) { + devNull, err := os.OpenFile(os.DevNull, os.O_WRONLY, 0) + if err != nil { + t.Fatalf("failed to open %s: %v", os.DevNull, err) + } + oldStderr := os.Stderr + os.Stderr = devNull + t.Cleanup(func() { + os.Stderr = oldStderr + _ = devNull.Close() + }) + + t.Run("skipOnFailedDependencies", func(t *testing.T) { + root := nextExecutorTestTaskID("root") + child := nextExecutorTestTaskID("child") + + orig := runCodexTaskFn + runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult { + if task.ID == root { + return TaskResult{TaskID: task.ID, ExitCode: 1, Error: "boom"} + } + return TaskResult{TaskID: task.ID, ExitCode: 0} + } + t.Cleanup(func() { runCodexTaskFn = orig }) + + results := executeConcurrentWithContext(context.Background(), [][]TaskSpec{ + {{ID: root}}, + {{ID: child, Dependencies: []string{root}}}, + }, 1, 0) + + foundChild := false + for _, res := range results { + if res.LogPath != "" { + _ = os.Remove(res.LogPath) + } + if res.TaskID != child { + continue + } + foundChild = true + if res.ExitCode == 0 || !strings.Contains(res.Error, "skipped") { + t.Fatalf("expected skipped child task result, got %+v", res) + } + } + if !foundChild { + t.Fatalf("expected child task to be present in results") + } + }) + + t.Run("panicRecovered", func(t *testing.T) { + taskID := nextExecutorTestTaskID("panic") + + orig := runCodexTaskFn + runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult { + panic("boom") + } + t.Cleanup(func() { runCodexTaskFn = orig }) + + results := executeConcurrentWithContext(context.Background(), [][]TaskSpec{{{ID: taskID}}}, 1, 0) + if len(results) != 1 { + t.Fatalf("expected 1 result, got %d", len(results)) + } + if results[0].ExitCode == 0 || !strings.Contains(results[0].Error, "panic") { + t.Fatalf("expected panic result, got %+v", results[0]) + } + if results[0].LogPath == "" { + t.Fatalf("expected LogPath on panic result") + } + _ = os.Remove(results[0].LogPath) + }) + + t.Run("cancelWhileWaitingForWorker", func(t *testing.T) { + task1 := nextExecutorTestTaskID("slot") + task2 := nextExecutorTestTaskID("slot") + + parentCtx, cancel := context.WithCancel(context.Background()) + started := make(chan struct{}) + unblock := make(chan struct{}) + var startedOnce sync.Once + + orig := runCodexTaskFn + runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult { + startedOnce.Do(func() { close(started) }) + <-unblock + return TaskResult{TaskID: task.ID, ExitCode: 0} + } + t.Cleanup(func() { runCodexTaskFn = orig }) + + go func() { + <-started + cancel() + time.Sleep(50 * time.Millisecond) + close(unblock) + }() + + results := executeConcurrentWithContext(parentCtx, [][]TaskSpec{{{ID: task1}, {ID: task2}}}, 1, 1) + foundCancelled := false + for _, res := range results { + if res.LogPath != "" { + _ = os.Remove(res.LogPath) + } + if res.ExitCode == 130 { + foundCancelled = true + } + } + if !foundCancelled { + t.Fatalf("expected a task to be cancelled") + } + }) + + t.Run("loggerCreateFails", func(t *testing.T) { + taskID := nextExecutorTestTaskID("bad") + "/id" + + orig := runCodexTaskFn + runCodexTaskFn = func(task TaskSpec, timeout int) TaskResult { + return TaskResult{TaskID: task.ID, ExitCode: 0} + } + t.Cleanup(func() { runCodexTaskFn = orig }) + + results := executeConcurrentWithContext(context.Background(), [][]TaskSpec{{{ID: taskID}}}, 1, 0) + if len(results) != 1 || results[0].ExitCode != 0 { + t.Fatalf("unexpected results: %+v", results) + } + }) +} + func TestExecutorSignalAndTermination(t *testing.T) { forceKillDelay.Store(0) defer forceKillDelay.Store(5) diff --git a/codeagent-wrapper/logger_additional_coverage_test.go b/codeagent-wrapper/logger_additional_coverage_test.go new file mode 100644 index 0000000..0e8be30 --- /dev/null +++ b/codeagent-wrapper/logger_additional_coverage_test.go @@ -0,0 +1,158 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" +) + +func TestLoggerNilReceiverNoop(t *testing.T) { + var logger *Logger + logger.Info("info") + logger.Warn("warn") + logger.Debug("debug") + logger.Error("error") + logger.Flush() + if err := logger.Close(); err != nil { + t.Fatalf("Close() on nil logger should return nil, got %v", err) + } +} + +func TestLoggerConcurrencyLogHelpers(t *testing.T) { + setTempDirEnv(t, t.TempDir()) + + logger, err := NewLoggerWithSuffix("concurrency") + if err != nil { + t.Fatalf("NewLoggerWithSuffix error: %v", err) + } + setLogger(logger) + defer closeLogger() + + logConcurrencyPlanning(0, 2) + logConcurrencyPlanning(3, 2) + logConcurrencyState("start", "task-1", 1, 0) + logConcurrencyState("done", "task-1", 0, 3) + logger.Flush() + + data, err := os.ReadFile(logger.Path()) + if err != nil { + t.Fatalf("failed to read log file: %v", err) + } + output := string(data) + + checks := []string{ + "parallel: worker_limit=unbounded total_tasks=2", + "parallel: worker_limit=3 total_tasks=2", + "parallel: start task=task-1 active=1 limit=unbounded", + "parallel: done task=task-1 active=0 limit=3", + } + for _, c := range checks { + if !strings.Contains(output, c) { + t.Fatalf("log output missing %q, got: %s", c, output) + } + } +} + +func TestLoggerConcurrencyLogHelpersNoopWithoutActiveLogger(t *testing.T) { + _ = closeLogger() + logConcurrencyPlanning(1, 1) + logConcurrencyState("start", "task-1", 0, 1) +} + +func TestLoggerCleanupOldLogsSkipsUnsafeAndHandlesAlreadyDeleted(t *testing.T) { + tempDir := setTempDirEnv(t, t.TempDir()) + + unsafePath := createTempLog(t, tempDir, fmt.Sprintf("%s-%d.log", primaryLogPrefix(), 222)) + orphanPath := createTempLog(t, tempDir, fmt.Sprintf("%s-%d.log", primaryLogPrefix(), 111)) + + stubFileStat(t, func(path string) (os.FileInfo, error) { + if path == unsafePath { + return fakeFileInfo{mode: os.ModeSymlink}, nil + } + return os.Lstat(path) + }) + + stubProcessRunning(t, func(pid int) bool { + if pid == 111 { + _ = os.Remove(orphanPath) + } + return false + }) + + stats, err := cleanupOldLogs() + if err != nil { + t.Fatalf("cleanupOldLogs() unexpected error: %v", err) + } + + if stats.Scanned != 2 { + t.Fatalf("scanned = %d, want %d", stats.Scanned, 2) + } + if stats.Deleted != 0 { + t.Fatalf("deleted = %d, want %d", stats.Deleted, 0) + } + if stats.Kept != 2 { + t.Fatalf("kept = %d, want %d", stats.Kept, 2) + } + if stats.Errors != 0 { + t.Fatalf("errors = %d, want %d", stats.Errors, 0) + } + + hasSkip := false + hasAlreadyDeleted := false + for _, name := range stats.KeptFiles { + if strings.Contains(name, "already deleted") { + hasAlreadyDeleted = true + } + if strings.Contains(name, filepath.Base(unsafePath)) { + hasSkip = true + } + } + if !hasSkip { + t.Fatalf("expected kept files to include unsafe log %q, got %+v", filepath.Base(unsafePath), stats.KeptFiles) + } + if !hasAlreadyDeleted { + t.Fatalf("expected kept files to include already deleted marker, got %+v", stats.KeptFiles) + } +} + +func TestLoggerIsUnsafeFileErrorPaths(t *testing.T) { + tempDir := t.TempDir() + + t.Run("stat ErrNotExist", func(t *testing.T) { + stubFileStat(t, func(string) (os.FileInfo, error) { + return nil, os.ErrNotExist + }) + + unsafe, reason := isUnsafeFile("missing.log", tempDir) + if !unsafe || reason != "" { + t.Fatalf("expected missing file to be skipped silently, got unsafe=%v reason=%q", unsafe, reason) + } + }) + + t.Run("stat error", func(t *testing.T) { + stubFileStat(t, func(string) (os.FileInfo, error) { + return nil, fmt.Errorf("boom") + }) + + unsafe, reason := isUnsafeFile("broken.log", tempDir) + if !unsafe || !strings.Contains(reason, "stat failed") { + t.Fatalf("expected stat failure to be unsafe, got unsafe=%v reason=%q", unsafe, reason) + } + }) + + t.Run("EvalSymlinks error", func(t *testing.T) { + stubFileStat(t, func(string) (os.FileInfo, error) { + return fakeFileInfo{}, nil + }) + stubEvalSymlinks(t, func(string) (string, error) { + return "", fmt.Errorf("resolve failed") + }) + + unsafe, reason := isUnsafeFile("cannot-resolve.log", tempDir) + if !unsafe || !strings.Contains(reason, "path resolution failed") { + t.Fatalf("expected resolution failure to be unsafe, got unsafe=%v reason=%q", unsafe, reason) + } + }) +} diff --git a/codeagent-wrapper/logger_suffix_test.go b/codeagent-wrapper/logger_suffix_test.go new file mode 100644 index 0000000..9e57196 --- /dev/null +++ b/codeagent-wrapper/logger_suffix_test.go @@ -0,0 +1,80 @@ +package main + +import ( + "fmt" + "os" + "path/filepath" + "strings" + "testing" +) + +func TestLoggerWithSuffixNamingAndIsolation(t *testing.T) { + tempDir := setTempDirEnv(t, t.TempDir()) + + taskA := "task-1" + taskB := "task-2" + + loggerA, err := NewLoggerWithSuffix(taskA) + if err != nil { + t.Fatalf("NewLoggerWithSuffix(%q) error = %v", taskA, err) + } + defer loggerA.Close() + + loggerB, err := NewLoggerWithSuffix(taskB) + if err != nil { + t.Fatalf("NewLoggerWithSuffix(%q) error = %v", taskB, err) + } + defer loggerB.Close() + + wantA := filepath.Join(tempDir, fmt.Sprintf("%s-%d-%s.log", primaryLogPrefix(), os.Getpid(), taskA)) + if loggerA.Path() != wantA { + t.Fatalf("loggerA path = %q, want %q", loggerA.Path(), wantA) + } + + wantB := filepath.Join(tempDir, fmt.Sprintf("%s-%d-%s.log", primaryLogPrefix(), os.Getpid(), taskB)) + if loggerB.Path() != wantB { + t.Fatalf("loggerB path = %q, want %q", loggerB.Path(), wantB) + } + + if loggerA.Path() == loggerB.Path() { + t.Fatalf("expected different log files, got %q", loggerA.Path()) + } + + loggerA.Info("from taskA") + loggerB.Info("from taskB") + loggerA.Flush() + loggerB.Flush() + + dataA, err := os.ReadFile(loggerA.Path()) + if err != nil { + t.Fatalf("failed to read loggerA file: %v", err) + } + dataB, err := os.ReadFile(loggerB.Path()) + if err != nil { + t.Fatalf("failed to read loggerB file: %v", err) + } + + if !strings.Contains(string(dataA), "from taskA") { + t.Fatalf("loggerA missing its message, got: %q", string(dataA)) + } + if strings.Contains(string(dataA), "from taskB") { + t.Fatalf("loggerA contains loggerB message, got: %q", string(dataA)) + } + if !strings.Contains(string(dataB), "from taskB") { + t.Fatalf("loggerB missing its message, got: %q", string(dataB)) + } + if strings.Contains(string(dataB), "from taskA") { + t.Fatalf("loggerB contains loggerA message, got: %q", string(dataB)) + } +} + +func TestLoggerWithSuffixReturnsErrorWhenTempDirMissing(t *testing.T) { + missingTempDir := filepath.Join(t.TempDir(), "does-not-exist") + setTempDirEnv(t, missingTempDir) + + logger, err := NewLoggerWithSuffix("task-err") + if err == nil { + _ = logger.Close() + t.Fatalf("expected error, got nil") + } +} diff --git a/codeagent-wrapper/logger_test.go b/codeagent-wrapper/logger_test.go index 6a9c37b..3f59070 100644 --- a/codeagent-wrapper/logger_test.go +++ b/codeagent-wrapper/logger_test.go @@ -26,7 +26,7 @@ func compareCleanupStats(got, want CleanupStats) bool { return true } -func TestRunLoggerCreatesFileWithPID(t *testing.T) { +func TestLoggerCreatesFileWithPID(t *testing.T) { tempDir := t.TempDir() t.Setenv("TMPDIR", tempDir) @@ -46,7 +46,7 @@ func TestRunLoggerCreatesFileWithPID(t *testing.T) { } } -func TestRunLoggerWritesLevels(t *testing.T) { +func TestLoggerWritesLevels(t *testing.T) { tempDir := t.TempDir() t.Setenv("TMPDIR", tempDir) @@ -77,7 +77,31 @@ func TestRunLoggerWritesLevels(t *testing.T) { } } -func TestRunLoggerCloseRemovesFileAndStopsWorker(t *testing.T) { +func TestLoggerDefaultIsTerminalCoverage(t *testing.T) { + oldStdin := os.Stdin + t.Cleanup(func() { os.Stdin = oldStdin }) + + f, err := os.CreateTemp(t.TempDir(), "stdin-*") + if err != nil { + t.Fatalf("os.CreateTemp() error = %v", err) + } + defer os.Remove(f.Name()) + + os.Stdin = f + if got := defaultIsTerminal(); got { + t.Fatalf("defaultIsTerminal() = %v, want false for regular file", got) + } + + if err := f.Close(); err != nil { + t.Fatalf("Close() error = %v", err) + } + os.Stdin = f + if got := defaultIsTerminal(); !got { + t.Fatalf("defaultIsTerminal() = %v, want true when Stat fails", got) + } +} + +func TestLoggerCloseStopsWorkerAndKeepsFile(t *testing.T) { tempDir := t.TempDir() t.Setenv("TMPDIR", tempDir) @@ -94,6 +118,11 @@ func TestRunLoggerCloseRemovesFileAndStopsWorker(t *testing.T) { if err := logger.Close(); err != nil { t.Fatalf("Close() returned error: %v", err) } + if logger.file != nil { + if _, err := logger.file.Write([]byte("x")); err == nil { + t.Fatalf("expected file to be closed after Close()") + } + } // After recent changes, log file is kept for debugging - NOT removed if _, err := os.Stat(logPath); os.IsNotExist(err) { @@ -116,7 +145,7 @@ func TestRunLoggerCloseRemovesFileAndStopsWorker(t *testing.T) { } } -func TestRunLoggerConcurrentWritesSafe(t *testing.T) { +func TestLoggerConcurrentWritesSafe(t *testing.T) { tempDir := t.TempDir() t.Setenv("TMPDIR", tempDir) @@ -165,7 +194,7 @@ func TestRunLoggerConcurrentWritesSafe(t *testing.T) { } } -func TestRunLoggerTerminateProcessActive(t *testing.T) { +func TestLoggerTerminateProcessActive(t *testing.T) { cmd := exec.Command("sleep", "5") if err := cmd.Start(); err != nil { t.Skipf("cannot start sleep command: %v", err) @@ -193,7 +222,7 @@ func TestRunLoggerTerminateProcessActive(t *testing.T) { time.Sleep(10 * time.Millisecond) } -func TestRunTerminateProcessNil(t *testing.T) { +func TestLoggerTerminateProcessNil(t *testing.T) { if timer := terminateProcess(nil); timer != nil { t.Fatalf("terminateProcess(nil) should return nil timer") } @@ -202,7 +231,7 @@ func TestRunTerminateProcessNil(t *testing.T) { } } -func TestRunCleanupOldLogsRemovesOrphans(t *testing.T) { +func TestLoggerCleanupOldLogsRemovesOrphans(t *testing.T) { tempDir := setTempDirEnv(t, t.TempDir()) orphan1 := createTempLog(t, tempDir, "codex-wrapper-111.log") @@ -252,7 +281,7 @@ func TestRunCleanupOldLogsRemovesOrphans(t *testing.T) { } } -func TestRunCleanupOldLogsHandlesInvalidNamesAndErrors(t *testing.T) { +func TestLoggerCleanupOldLogsHandlesInvalidNamesAndErrors(t *testing.T) { tempDir := setTempDirEnv(t, t.TempDir()) invalid := []string{ @@ -310,7 +339,7 @@ func TestRunCleanupOldLogsHandlesInvalidNamesAndErrors(t *testing.T) { } } -func TestRunCleanupOldLogsHandlesGlobFailures(t *testing.T) { +func TestLoggerCleanupOldLogsHandlesGlobFailures(t *testing.T) { stubProcessRunning(t, func(pid int) bool { t.Fatalf("process check should not run when glob fails") return false @@ -336,7 +365,7 @@ func TestRunCleanupOldLogsHandlesGlobFailures(t *testing.T) { } } -func TestRunCleanupOldLogsEmptyDirectoryStats(t *testing.T) { +func TestLoggerCleanupOldLogsEmptyDirectoryStats(t *testing.T) { setTempDirEnv(t, t.TempDir()) stubProcessRunning(t, func(int) bool { @@ -356,7 +385,7 @@ func TestRunCleanupOldLogsEmptyDirectoryStats(t *testing.T) { } } -func TestRunCleanupOldLogsHandlesTempDirPermissionErrors(t *testing.T) { +func TestLoggerCleanupOldLogsHandlesTempDirPermissionErrors(t *testing.T) { tempDir := setTempDirEnv(t, t.TempDir()) paths := []string{ @@ -396,7 +425,7 @@ func TestRunCleanupOldLogsHandlesTempDirPermissionErrors(t *testing.T) { } } -func TestRunCleanupOldLogsHandlesPermissionDeniedFile(t *testing.T) { +func TestLoggerCleanupOldLogsHandlesPermissionDeniedFile(t *testing.T) { tempDir := setTempDirEnv(t, t.TempDir()) protected := createTempLog(t, tempDir, "codex-wrapper-6200.log") @@ -433,7 +462,7 @@ func TestRunCleanupOldLogsHandlesPermissionDeniedFile(t *testing.T) { } } -func TestRunCleanupOldLogsPerformanceBound(t *testing.T) { +func TestLoggerCleanupOldLogsPerformanceBound(t *testing.T) { tempDir := setTempDirEnv(t, t.TempDir()) const fileCount = 400 @@ -476,17 +505,98 @@ func TestRunCleanupOldLogsPerformanceBound(t *testing.T) { } } -func TestRunCleanupOldLogsCoverageSuite(t *testing.T) { +func TestLoggerCleanupOldLogsCoverageSuite(t *testing.T) { TestBackendParseJSONStream_CoverageSuite(t) } // Reuse the existing coverage suite so the focused TestLogger run still exercises // the rest of the codebase and keeps coverage high. -func TestRunLoggerCoverageSuite(t *testing.T) { - TestBackendParseJSONStream_CoverageSuite(t) +func TestLoggerCoverageSuite(t *testing.T) { + suite := []struct { + name string + fn func(*testing.T) + }{ + {"TestBackendParseJSONStream_CoverageSuite", TestBackendParseJSONStream_CoverageSuite}, + {"TestVersionCoverageFullRun", TestVersionCoverageFullRun}, + {"TestVersionMainWrapper", TestVersionMainWrapper}, + + {"TestExecutorHelperCoverage", TestExecutorHelperCoverage}, + {"TestExecutorRunCodexTaskWithContext", TestExecutorRunCodexTaskWithContext}, + {"TestExecutorParallelLogIsolation", TestExecutorParallelLogIsolation}, + {"TestExecutorTaskLoggerContext", TestExecutorTaskLoggerContext}, + {"TestExecutorExecuteConcurrentWithContextBranches", TestExecutorExecuteConcurrentWithContextBranches}, + {"TestExecutorSignalAndTermination", TestExecutorSignalAndTermination}, + {"TestExecutorCancelReasonAndCloseWithReason", TestExecutorCancelReasonAndCloseWithReason}, + {"TestExecutorForceKillTimerStop", TestExecutorForceKillTimerStop}, + {"TestExecutorForwardSignalsDefaults", TestExecutorForwardSignalsDefaults}, + + {"TestBackendParseArgs_NewMode", TestBackendParseArgs_NewMode}, + {"TestBackendParseArgs_ResumeMode", TestBackendParseArgs_ResumeMode}, + {"TestBackendParseArgs_BackendFlag", TestBackendParseArgs_BackendFlag}, + {"TestBackendParseArgs_SkipPermissions", TestBackendParseArgs_SkipPermissions}, + {"TestBackendParseBoolFlag", TestBackendParseBoolFlag}, + {"TestBackendEnvFlagEnabled", TestBackendEnvFlagEnabled}, + {"TestRunResolveTimeout", TestRunResolveTimeout}, + {"TestRunIsTerminal", TestRunIsTerminal}, + {"TestRunReadPipedTask", TestRunReadPipedTask}, + {"TestTailBufferWrite", TestTailBufferWrite}, + {"TestLogWriterWriteLimitsBuffer", TestLogWriterWriteLimitsBuffer}, + {"TestLogWriterLogLine", TestLogWriterLogLine}, + {"TestNewLogWriterDefaultMaxLen", TestNewLogWriterDefaultMaxLen}, + {"TestNewLogWriterDefaultLimit", TestNewLogWriterDefaultLimit}, + {"TestRunHello", TestRunHello}, + {"TestRunGreet", TestRunGreet}, + {"TestRunFarewell", TestRunFarewell}, + {"TestRunFarewellEmpty", TestRunFarewellEmpty}, + + {"TestParallelParseConfig_Success", TestParallelParseConfig_Success}, + {"TestParallelParseConfig_Backend", TestParallelParseConfig_Backend}, + {"TestParallelParseConfig_InvalidFormat", TestParallelParseConfig_InvalidFormat}, + {"TestParallelParseConfig_EmptyTasks", TestParallelParseConfig_EmptyTasks}, + {"TestParallelParseConfig_MissingID", TestParallelParseConfig_MissingID}, + {"TestParallelParseConfig_MissingTask", TestParallelParseConfig_MissingTask}, + {"TestParallelParseConfig_DuplicateID", TestParallelParseConfig_DuplicateID}, + {"TestParallelParseConfig_DelimiterFormat", TestParallelParseConfig_DelimiterFormat}, + + {"TestBackendSelectBackend", TestBackendSelectBackend}, + {"TestBackendSelectBackend_Invalid", TestBackendSelectBackend_Invalid}, + {"TestBackendSelectBackend_DefaultOnEmpty", TestBackendSelectBackend_DefaultOnEmpty}, + {"TestBackendBuildArgs_CodexBackend", TestBackendBuildArgs_CodexBackend}, + {"TestBackendBuildArgs_ClaudeBackend", TestBackendBuildArgs_ClaudeBackend}, + {"TestClaudeBackendBuildArgs_OutputValidation", TestClaudeBackendBuildArgs_OutputValidation}, + {"TestBackendBuildArgs_GeminiBackend", TestBackendBuildArgs_GeminiBackend}, + {"TestGeminiBackendBuildArgs_OutputValidation", TestGeminiBackendBuildArgs_OutputValidation}, + {"TestBackendNamesAndCommands", TestBackendNamesAndCommands}, + + {"TestBackendParseJSONStream", TestBackendParseJSONStream}, + {"TestBackendParseJSONStream_ClaudeEvents", TestBackendParseJSONStream_ClaudeEvents}, + {"TestBackendParseJSONStream_GeminiEvents", TestBackendParseJSONStream_GeminiEvents}, + {"TestBackendParseJSONStreamWithWarn_InvalidLine", TestBackendParseJSONStreamWithWarn_InvalidLine}, + {"TestBackendParseJSONStream_OnMessage", TestBackendParseJSONStream_OnMessage}, + {"TestBackendParseJSONStream_ScannerError", TestBackendParseJSONStream_ScannerError}, + {"TestBackendDiscardInvalidJSON", TestBackendDiscardInvalidJSON}, + {"TestBackendDiscardInvalidJSONBuffer", TestBackendDiscardInvalidJSONBuffer}, + + {"TestCurrentWrapperNameFallsBackToExecutable", TestCurrentWrapperNameFallsBackToExecutable}, + {"TestCurrentWrapperNameDetectsLegacyAliasSymlink", TestCurrentWrapperNameDetectsLegacyAliasSymlink}, + + {"TestIsProcessRunning", TestIsProcessRunning}, + {"TestGetProcessStartTimeReadsProcStat", TestGetProcessStartTimeReadsProcStat}, + {"TestGetProcessStartTimeInvalidData", TestGetProcessStartTimeInvalidData}, + {"TestGetBootTimeParsesBtime", TestGetBootTimeParsesBtime}, + {"TestGetBootTimeInvalidData", TestGetBootTimeInvalidData}, + + {"TestClaudeBuildArgs_ModesAndPermissions", TestClaudeBuildArgs_ModesAndPermissions}, + {"TestClaudeBuildArgs_GeminiAndCodexModes", TestClaudeBuildArgs_GeminiAndCodexModes}, + {"TestClaudeBuildArgs_BackendMetadata", TestClaudeBuildArgs_BackendMetadata}, + } + + for _, tc := range suite { + t.Run(tc.name, tc.fn) + } } -func TestRunCleanupOldLogsKeepsCurrentProcessLog(t *testing.T) { +func TestLoggerCleanupOldLogsKeepsCurrentProcessLog(t *testing.T) { tempDir := setTempDirEnv(t, t.TempDir()) currentPID := os.Getpid() @@ -518,7 +628,7 @@ func TestRunCleanupOldLogsKeepsCurrentProcessLog(t *testing.T) { } } -func TestIsPIDReusedScenarios(t *testing.T) { +func TestLoggerIsPIDReusedScenarios(t *testing.T) { now := time.Now() tests := []struct { name string @@ -552,7 +662,7 @@ func TestIsPIDReusedScenarios(t *testing.T) { } } -func TestIsUnsafeFileSecurityChecks(t *testing.T) { +func TestLoggerIsUnsafeFileSecurityChecks(t *testing.T) { tempDir := t.TempDir() absTempDir, err := filepath.Abs(tempDir) if err != nil { @@ -601,7 +711,7 @@ func TestIsUnsafeFileSecurityChecks(t *testing.T) { }) } -func TestRunLoggerPathAndRemove(t *testing.T) { +func TestLoggerPathAndRemove(t *testing.T) { tempDir := t.TempDir() path := filepath.Join(tempDir, "sample.log") if err := os.WriteFile(path, []byte("test"), 0o644); err != nil { @@ -628,7 +738,19 @@ func TestRunLoggerPathAndRemove(t *testing.T) { } } -func TestRunLoggerInternalLog(t *testing.T) { +func TestLoggerTruncateBytesCoverage(t *testing.T) { + if got := truncateBytes([]byte("abc"), 3); got != "abc" { + t.Fatalf("truncateBytes() = %q, want %q", got, "abc") + } + if got := truncateBytes([]byte("abcd"), 3); got != "abc..." { + t.Fatalf("truncateBytes() = %q, want %q", got, "abc...") + } + if got := truncateBytes([]byte("abcd"), -1); got != "" { + t.Fatalf("truncateBytes() = %q, want empty string", got) + } +} + +func TestLoggerInternalLog(t *testing.T) { logger := &Logger{ ch: make(chan logEntry, 1), done: make(chan struct{}), @@ -653,7 +775,7 @@ func TestRunLoggerInternalLog(t *testing.T) { close(logger.done) } -func TestRunParsePIDFromLog(t *testing.T) { +func TestLoggerParsePIDFromLog(t *testing.T) { hugePID := strconv.FormatInt(math.MaxInt64, 10) + "0" tests := []struct { name string @@ -769,7 +891,7 @@ func (f fakeFileInfo) ModTime() time.Time { return f.modTime } func (f fakeFileInfo) IsDir() bool { return false } func (f fakeFileInfo) Sys() interface{} { return nil } -func TestExtractRecentErrors(t *testing.T) { +func TestLoggerExtractRecentErrors(t *testing.T) { tests := []struct { name string content string @@ -846,21 +968,21 @@ func TestExtractRecentErrors(t *testing.T) { } } -func TestExtractRecentErrorsNilLogger(t *testing.T) { +func TestLoggerExtractRecentErrorsNilLogger(t *testing.T) { var logger *Logger if got := logger.ExtractRecentErrors(10); got != nil { t.Fatalf("nil logger ExtractRecentErrors() should return nil, got %v", got) } } -func TestExtractRecentErrorsEmptyPath(t *testing.T) { +func TestLoggerExtractRecentErrorsEmptyPath(t *testing.T) { logger := &Logger{path: ""} if got := logger.ExtractRecentErrors(10); got != nil { t.Fatalf("empty path ExtractRecentErrors() should return nil, got %v", got) } } -func TestExtractRecentErrorsFileNotExist(t *testing.T) { +func TestLoggerExtractRecentErrorsFileNotExist(t *testing.T) { logger := &Logger{path: "/nonexistent/path/to/log.log"} if got := logger.ExtractRecentErrors(10); got != nil { t.Fatalf("nonexistent file ExtractRecentErrors() should return nil, got %v", got) diff --git a/codeagent-wrapper/main_integration_test.go b/codeagent-wrapper/main_integration_test.go index a5083cd..fef3ec1 100644 --- a/codeagent-wrapper/main_integration_test.go +++ b/codeagent-wrapper/main_integration_test.go @@ -426,10 +426,11 @@ ok-d` t.Fatalf("expected startup banner in stderr, got:\n%s", stderrOut) } + // After parallel log isolation fix, each task has its own log file expectedLines := map[string]struct{}{ - fmt.Sprintf("Task a: Log: %s", expectedLog): {}, - fmt.Sprintf("Task b: Log: %s", expectedLog): {}, - fmt.Sprintf("Task d: Log: %s", expectedLog): {}, + fmt.Sprintf("Task a: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d-a.log", os.Getpid()))): {}, + fmt.Sprintf("Task b: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d-b.log", os.Getpid()))): {}, + fmt.Sprintf("Task d: Log: %s", filepath.Join(tempDir, fmt.Sprintf("codex-wrapper-%d-d.log", os.Getpid()))): {}, } if len(taskLines) != len(expectedLines) { diff --git a/codeagent-wrapper/main_test.go b/codeagent-wrapper/main_test.go index c3b8948..d260e7a 100644 --- a/codeagent-wrapper/main_test.go +++ b/codeagent-wrapper/main_test.go @@ -41,6 +41,7 @@ func resetTestHooks() { closeLogger() executablePathFn = os.Executable runTaskFn = runCodexTask + runCodexTaskFn = defaultRunCodexTaskFn exitFn = os.Exit }