Files
golib/ioutils/aggregator/coverage_test.go
nabbar 3837f0b2bb Improvements, test & documentatons (2025-12 #1)
[file/bandwidth]
- ADD documentation: add enhanced README and TESTING guidelines
- ADD tests: complete test suites with benchmarks, concurrency, and edge cases

[file/perm]
- ADD documentation: add enhanced README and TESTING guidelines
- ADD tests: complete test suites with benchmarks, concurrency, and edge cases
- ADD function to parse form "rwx-wxr-x" or "-rwx-w-r-x"
- ADD function to ParseFileMode to convert os.FileMode to file.Perm

[file/progress]
- ADD documentation: add enhanced README and TESTING guidelines
- ADD tests: complete test suites with benchmarks, concurrency, and edge cases

[ioutils/...]
- UPDATE documentation: update enhanced README and TESTING guidelines
- UPDATE tests: complete test suites with benchmarks, concurrency, and edge cases

[logger/...]
- UPDATE documentation: update enhanced README and TESTING guidelines
- ADD documentation: add enhanced README and TESTING guidelines for sub
  packages
- UPDATE tests: complete test suites with benchmarks, concurrency, and edge cases
- UPDATE config: remove FileBufferSize from OptionFile (rework hookfile)
- UPDATE fields: expose Store function in interface
- REWORK hookfile: rework package, use aggregator to allow multi write and
  single file
- FIX hookstderr: fix bug with NonColorable
- FIX hookstdout: fix bug with NonColorable
- FIX hookwriter: fix bug with NonColorable

[network/protocol]
- ADD function IsTCP, IsUDP, IsUnixLike to check type of protocol

[runner]
- FIX typo

[socket]
- UPDATE documentation: update enhanced README and TESTING guidelines
- ADD documentation: add enhanced README and TESTING guidelines for sub
  packages
- UPDATE tests: complete test suites with benchmarks, concurrency, and edge cases
- REWORK server: use context compatible io.reader, io.writer, io.closer
  instead of reader / writer
- REWORK server: simplify, optimize server
- REMOVE reader, writer type
- ADD context: add new interface in root socket interface to expose
  context interface that extend context, io reader/writer/closer,
dediacted function to server (IsConnected, ...)
2025-12-02 02:56:20 +01:00

547 lines
13 KiB
Go

/*
* MIT License
*
* Copyright (c) 2025 Nicolas JUHEL
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package aggregator_test
import (
"context"
"sync"
"sync/atomic"
"time"
"github.com/nabbar/golib/ioutils/aggregator"
. "github.com/onsi/ginkgo/v2"
. "github.com/onsi/gomega"
)
// Tests specifically for improving code coverage of low-coverage functions
var _ = Describe("Coverage Improvements", func() {
var (
ctx context.Context
cancel context.CancelFunc
)
BeforeEach(func() {
ctx, cancel = context.WithCancel(testCtx)
})
AfterEach(func() {
cancel()
time.Sleep(50 * time.Millisecond)
})
Describe("Context.Done() coverage", func() {
It("should return closed channel when context is cancelled", func() {
writer := newTestWriter()
cfg := aggregator.Config{
FctWriter: writer.Write,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
Expect(agg).ToNot(BeNil())
Expect(startAndWait(agg, ctx)).To(Succeed())
// Done channel should be available
done := agg.Done()
Expect(done).ToNot(BeNil())
// Cancel context
cancel()
// Done should be closed
Eventually(done, 2*time.Second).Should(BeClosed())
agg.Close()
})
It("should return closed channel when aggregator is closed", func() {
writer := newTestWriter()
cfg := aggregator.Config{
FctWriter: writer.Write,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
Expect(startAndWait(agg, ctx)).To(Succeed())
done := agg.Done()
// Close aggregator
agg.Close()
// Done should eventually be closed
Eventually(done, 2*time.Second).Should(BeClosed())
})
It("should handle Done() call on uninitialized context", func() {
// Create aggregator but don't start it
writer := newTestWriter()
cfg := aggregator.Config{
FctWriter: writer.Write,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
// Done should still work
done := agg.Done()
Expect(done).ToNot(BeNil())
agg.Close()
})
})
Describe("AsyncFct and SyncFct coverage", func() {
It("should call AsyncFct periodically when configured", func() {
var asyncCount atomic.Int32
cfg := aggregator.Config{
BufWriter: 10,
FctWriter: func(p []byte) (int, error) { return len(p), nil },
AsyncTimer: 100 * time.Millisecond,
AsyncMax: 2,
AsyncFct: func(ctx context.Context) {
asyncCount.Add(1)
},
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
// Wait for async calls to happen - be more tolerant
Eventually(func() int32 {
return asyncCount.Load()
}, 2*time.Second, 50*time.Millisecond).Should(BeNumerically(">=", 1))
})
It("should call SyncFct periodically when configured", func() {
var syncCount atomic.Int32
cfg := aggregator.Config{
BufWriter: 10,
FctWriter: func(p []byte) (int, error) { return len(p), nil },
SyncTimer: 100 * time.Millisecond,
SyncFct: func(ctx context.Context) {
syncCount.Add(1)
},
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
// Wait for sync calls to happen - be more tolerant
Eventually(func() int32 {
return syncCount.Load()
}, 2*time.Second, 50*time.Millisecond).Should(BeNumerically(">=", 1))
})
It("should respect AsyncMax limit", func() {
var (
runningCount atomic.Int32
maxRunning atomic.Int32
)
cfg := aggregator.Config{
BufWriter: 10,
FctWriter: func(p []byte) (int, error) { return len(p), nil },
AsyncTimer: 20 * time.Millisecond,
AsyncMax: 2,
AsyncFct: func(ctx context.Context) {
current := runningCount.Add(1)
// Update max if needed
for {
currentMax := maxRunning.Load()
if current <= currentMax {
break
}
if maxRunning.CompareAndSwap(currentMax, current) {
break
}
}
// Simulate work
time.Sleep(50 * time.Millisecond)
runningCount.Add(-1)
},
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
// Let it run
time.Sleep(300 * time.Millisecond)
// Max running should not exceed AsyncMax
Expect(maxRunning.Load()).To(BeNumerically("<=", 2))
})
It("should not call AsyncFct when AsyncTimer is 0", func() {
var asyncCount atomic.Int32
cfg := aggregator.Config{
BufWriter: 10,
FctWriter: func(p []byte) (int, error) { return len(p), nil },
AsyncTimer: 0,
AsyncFct: func(ctx context.Context) {
asyncCount.Add(1)
},
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
time.Sleep(200 * time.Millisecond)
// Should not be called
Expect(asyncCount.Load()).To(Equal(int32(0)))
})
It("should not call SyncFct when SyncTimer is 0", func() {
var syncCount atomic.Int32
cfg := aggregator.Config{
BufWriter: 10,
FctWriter: func(p []byte) (int, error) { return len(p), nil },
SyncTimer: 0,
SyncFct: func(ctx context.Context) {
syncCount.Add(1)
},
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
time.Sleep(200 * time.Millisecond)
// Should not be called
Expect(syncCount.Load()).To(Equal(int32(0)))
})
It("should handle nil AsyncFct", func() {
cfg := aggregator.Config{
BufWriter: 10,
FctWriter: func(p []byte) (int, error) { return len(p), nil },
AsyncTimer: 50 * time.Millisecond,
AsyncFct: nil,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
// Should not panic
time.Sleep(200 * time.Millisecond)
})
It("should handle nil SyncFct", func() {
cfg := aggregator.Config{
BufWriter: 10,
FctWriter: func(p []byte) (int, error) { return len(p), nil },
SyncTimer: 50 * time.Millisecond,
SyncFct: nil,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
// Should not panic
time.Sleep(200 * time.Millisecond)
})
})
Describe("fctWrite edge cases", func() {
It("should handle empty data", func() {
var writeCount atomic.Int32
cfg := aggregator.Config{
BufWriter: 10,
FctWriter: func(p []byte) (int, error) {
writeCount.Add(1)
return len(p), nil
},
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
// Write empty data (should be filtered before reaching fctWrite)
n, err := agg.Write([]byte{})
Expect(err).ToNot(HaveOccurred())
Expect(n).To(Equal(0))
// Write non-empty data
n, err = agg.Write([]byte("test"))
Expect(err).ToNot(HaveOccurred())
Expect(n).To(Equal(4))
// Wait for processing
Eventually(func() int32 {
return writeCount.Load()
}, 2*time.Second, 10*time.Millisecond).Should(Equal(int32(1)))
})
})
Describe("chanData edge cases", func() {
It("should handle channel transitions", func() {
writer := newTestWriter()
cfg := aggregator.Config{
BufWriter: 10,
FctWriter: writer.Write,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
// Before start: write should fail
_, err = agg.Write([]byte("test"))
Expect(err).To(HaveOccurred())
// Start
Expect(startAndWait(agg, ctx)).To(Succeed())
// After start: write should succeed
_, err = agg.Write([]byte("test"))
Expect(err).ToNot(HaveOccurred())
// Close
agg.Close()
time.Sleep(100 * time.Millisecond)
// After close: write should fail
_, err = agg.Write([]byte("test"))
Expect(err).To(HaveOccurred())
})
})
Describe("ErrorsLast and ErrorsList coverage", func() {
It("should return nil when no errors and runner is nil", func() {
writer := newTestWriter()
cfg := aggregator.Config{
FctWriter: writer.Write,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
// Before start, runner might not be initialized
Expect(agg.ErrorsLast()).To(BeNil())
Expect(agg.ErrorsList()).To(BeNil())
agg.Close()
})
It("should return errors when they occur", func() {
var writeCount atomic.Int32
cfg := aggregator.Config{
BufWriter: 10,
FctWriter: func(p []byte) (int, error) {
count := writeCount.Add(1)
if count == 2 {
return 0, context.DeadlineExceeded
}
return len(p), nil
},
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
// Write data that will trigger error
agg.Write([]byte("first"))
agg.Write([]byte("second"))
agg.Write([]byte("third"))
// Wait for error to be logged
time.Sleep(200 * time.Millisecond)
// ErrorsLast should potentially have an error
// Note: errors are logged but not necessarily propagated to runner
// This tests the code path even if no error is returned
_ = agg.ErrorsLast()
_ = agg.ErrorsList()
})
})
Describe("setRunner edge cases", func() {
It("should create new runner when nil is provided", func() {
writer := newTestWriter()
cfg := aggregator.Config{
FctWriter: writer.Write,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
// Start to initialize runner
Expect(startAndWait(agg, ctx)).To(Succeed())
// IsRunning triggers setRunner internally
Expect(agg.IsRunning()).To(BeTrue())
})
})
Describe("Deadline coverage", func() {
It("should return zero time when no deadline", func() {
writer := newTestWriter()
cfg := aggregator.Config{
FctWriter: writer.Write,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
deadline, ok := agg.Deadline()
Expect(ok).To(BeFalse())
Expect(deadline.IsZero()).To(BeTrue())
})
})
Describe("Err() coverage", func() {
It("should return nil when no error", func() {
writer := newTestWriter()
cfg := aggregator.Config{
FctWriter: writer.Write,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
Expect(agg.Err()).To(BeNil())
})
It("should return error when context is cancelled", func() {
localCtx, localCancel := context.WithCancel(ctx)
writer := newTestWriter()
cfg := aggregator.Config{
FctWriter: writer.Write,
}
agg, err := aggregator.New(localCtx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, localCtx)).To(Succeed())
// Cancel context
localCancel()
// Err should eventually return error
Eventually(agg.Err, 2*time.Second, 10*time.Millisecond).ShouldNot(BeNil())
})
})
Describe("Value() coverage", func() {
It("should return nil for non-existent keys", func() {
writer := newTestWriter()
cfg := aggregator.Config{
FctWriter: writer.Write,
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
val := agg.Value("non-existent-key")
Expect(val).To(BeNil())
})
})
Describe("Counter edge cases", func() {
It("should handle counter decrements below zero", func() {
var mu sync.Mutex
var writes [][]byte
cfg := aggregator.Config{
BufWriter: 5,
FctWriter: func(p []byte) (int, error) {
mu.Lock()
writes = append(writes, p)
mu.Unlock()
return len(p), nil
},
}
agg, err := aggregator.New(ctx, cfg)
Expect(err).ToNot(HaveOccurred())
defer agg.Close()
Expect(startAndWait(agg, ctx)).To(Succeed())
// Write and immediately check metrics
for i := 0; i < 10; i++ {
agg.Write([]byte("data"))
}
// Metrics should never go negative
for i := 0; i < 20; i++ {
Expect(agg.NbWaiting()).To(BeNumerically(">=", 0))
Expect(agg.NbProcessing()).To(BeNumerically(">=", 0))
Expect(agg.SizeWaiting()).To(BeNumerically(">=", 0))
Expect(agg.SizeProcessing()).To(BeNumerically(">=", 0))
time.Sleep(10 * time.Millisecond)
}
})
})
})