chore: fix revive issues

This commit is contained in:
galargh
2025-06-11 10:00:47 +01:00
parent a0c9fdf2b3
commit 50f065b4df
80 changed files with 209 additions and 209 deletions

View File

@@ -7,7 +7,7 @@ import (
func TestNilOption(t *testing.T) {
var cfg Config
optsRun := 0
opt := func(c *Config) error {
opt := func(_ *Config) error {
optsRun++
return nil
}

View File

@@ -16,10 +16,10 @@ func (NullConnMgr) TagPeer(peer.ID, string, int) {}
func (NullConnMgr) UntagPeer(peer.ID, string) {}
func (NullConnMgr) UpsertTag(peer.ID, string, func(int) int) {}
func (NullConnMgr) GetTagInfo(peer.ID) *TagInfo { return &TagInfo{} }
func (NullConnMgr) TrimOpenConns(ctx context.Context) {}
func (NullConnMgr) TrimOpenConns(_ context.Context) {}
func (NullConnMgr) Notifee() network.Notifiee { return network.GlobalNoopNotifiee }
func (NullConnMgr) Protect(peer.ID, string) {}
func (NullConnMgr) Unprotect(peer.ID, string) bool { return false }
func (NullConnMgr) IsProtected(peer.ID, string) bool { return false }
func (NullConnMgr) CheckLimit(l GetConnLimiter) error { return nil }
func (NullConnMgr) CheckLimit(_ GetConnLimiter) error { return nil }
func (NullConnMgr) Close() error { return nil }

View File

@@ -61,7 +61,7 @@ func BumpSumBounded(min, max int) BumpFn {
// BumpOverwrite replaces the current value of the tag with the incoming one.
func BumpOverwrite() BumpFn {
return func(value DecayingValue, delta int) (after int) {
return func(_ DecayingValue, delta int) (after int) {
return delta
}
}

View File

@@ -19,7 +19,7 @@ type Secp256k1PrivateKey secp256k1.PrivateKey
type Secp256k1PublicKey secp256k1.PublicKey
// GenerateSecp256k1Key generates a new Secp256k1 private and public key pair
func GenerateSecp256k1Key(src io.Reader) (PrivKey, PubKey, error) {
func GenerateSecp256k1Key(_ io.Reader) (PrivKey, PubKey, error) {
privk, err := secp256k1.GeneratePrivateKey()
if err != nil {
return nil, nil, err

View File

@@ -61,7 +61,7 @@ type NoopNotifiee struct{}
var _ Notifiee = (*NoopNotifiee)(nil)
func (nn *NoopNotifiee) Connected(n Network, c Conn) {}
func (nn *NoopNotifiee) Disconnected(n Network, c Conn) {}
func (nn *NoopNotifiee) Listen(n Network, addr ma.Multiaddr) {}
func (nn *NoopNotifiee) ListenClose(n Network, addr ma.Multiaddr) {}
func (nn *NoopNotifiee) Connected(_ Network, _ Conn) {}
func (nn *NoopNotifiee) Disconnected(_ Network, _ Conn) {}
func (nn *NoopNotifiee) Listen(_ Network, _ ma.Multiaddr) {}
func (nn *NoopNotifiee) ListenClose(_ Network, _ ma.Multiaddr) {}

View File

@@ -291,27 +291,27 @@ func (n *NullResourceManager) ViewSystem(f func(ResourceScope) error) error {
func (n *NullResourceManager) ViewTransient(f func(ResourceScope) error) error {
return f(&NullScope{})
}
func (n *NullResourceManager) ViewService(svc string, f func(ServiceScope) error) error {
func (n *NullResourceManager) ViewService(_ string, f func(ServiceScope) error) error {
return f(&NullScope{})
}
func (n *NullResourceManager) ViewProtocol(p protocol.ID, f func(ProtocolScope) error) error {
func (n *NullResourceManager) ViewProtocol(_ protocol.ID, f func(ProtocolScope) error) error {
return f(&NullScope{})
}
func (n *NullResourceManager) ViewPeer(p peer.ID, f func(PeerScope) error) error {
func (n *NullResourceManager) ViewPeer(_ peer.ID, f func(PeerScope) error) error {
return f(&NullScope{})
}
func (n *NullResourceManager) OpenConnection(dir Direction, usefd bool, endpoint multiaddr.Multiaddr) (ConnManagementScope, error) {
func (n *NullResourceManager) OpenConnection(_ Direction, _ bool, _ multiaddr.Multiaddr) (ConnManagementScope, error) {
return &NullScope{}, nil
}
func (n *NullResourceManager) OpenStream(p peer.ID, dir Direction) (StreamManagementScope, error) {
func (n *NullResourceManager) OpenStream(_ peer.ID, _ Direction) (StreamManagementScope, error) {
return &NullScope{}, nil
}
func (n *NullResourceManager) Close() error {
return nil
}
func (n *NullScope) ReserveMemory(size int, prio uint8) error { return nil }
func (n *NullScope) ReleaseMemory(size int) {}
func (n *NullScope) ReserveMemory(_ int, _ uint8) error { return nil }
func (n *NullScope) ReleaseMemory(_ int) {}
func (n *NullScope) Stat() ScopeStat { return ScopeStat{} }
func (n *NullScope) BeginSpan() (ResourceScopeSpan, error) { return &NullScope{}, nil }
func (n *NullScope) Done() {}
@@ -321,6 +321,6 @@ func (n *NullScope) Peer() peer.ID { return "" }
func (n *NullScope) PeerScope() PeerScope { return &NullScope{} }
func (n *NullScope) SetPeer(peer.ID) error { return nil }
func (n *NullScope) ProtocolScope() ProtocolScope { return &NullScope{} }
func (n *NullScope) SetProtocol(proto protocol.ID) error { return nil }
func (n *NullScope) SetProtocol(_ protocol.ID) error { return nil }
func (n *NullScope) ServiceScope() ServiceScope { return &NullScope{} }
func (n *NullScope) SetService(srv string) error { return nil }
func (n *NullScope) SetService(_ string) error { return nil }

View File

@@ -164,7 +164,7 @@ func (r failingRecord) MarshalRecord() ([]byte, error) {
}
return nil, errors.New("marshal failed")
}
func (r failingRecord) UnmarshalRecord(data []byte) error {
func (r failingRecord) UnmarshalRecord(_ []byte) error {
if r.allowUnmarshal {
return nil
}

View File

@@ -20,7 +20,7 @@ func (p *testPayload) MarshalRecord() ([]byte, error) {
return []byte("hello"), nil
}
func (p *testPayload) UnmarshalRecord(bytes []byte) error {
func (p *testPayload) UnmarshalRecord(_ []byte) error {
p.unmarshalPayloadCalled = true
return nil
}

View File

@@ -56,7 +56,7 @@ func TestNewHost(t *testing.T) {
func TestTransportConstructor(t *testing.T) {
ctor := func(
h host.Host,
_ host.Host,
_ connmgr.ConnectionGater,
upgrader transport.Upgrader,
) transport.Transport {
@@ -157,7 +157,7 @@ func TestChainOptions(t *testing.T) {
newOpt := func() Option {
index := optcount
optcount++
return func(c *Config) error {
return func(_ *Config) error {
optsRun = append(optsRun, index)
return nil
}
@@ -321,7 +321,7 @@ func TestTransportCustomAddressWebTransport(t *testing.T) {
Transport(webtransport.New),
ListenAddrs(customAddr),
DisableRelay(),
AddrsFactory(func(multiaddrs []ma.Multiaddr) []ma.Multiaddr {
AddrsFactory(func(_ []ma.Multiaddr) []ma.Multiaddr {
return []ma.Multiaddr{customAddr}
}),
)
@@ -351,7 +351,7 @@ func TestTransportCustomAddressWebTransportDoesNotStall(t *testing.T) {
// Purposely not listening on the custom address so that we make sure the node doesn't stall if it fails to add a certhash to the multiaddr
// ListenAddrs(customAddr),
DisableRelay(),
AddrsFactory(func(multiaddrs []ma.Multiaddr) []ma.Multiaddr {
AddrsFactory(func(_ []ma.Multiaddr) []ma.Multiaddr {
return []ma.Multiaddr{customAddr}
}),
)
@@ -478,7 +478,7 @@ func TestDialCircuitAddrWithWrappedResourceManager(t *testing.T) {
func TestHostAddrsFactoryAddsCerthashes(t *testing.T) {
addr := ma.StringCast("/ip4/1.2.3.4/udp/1/quic-v1/webtransport")
h, err := New(
AddrsFactory(func(m []ma.Multiaddr) []ma.Multiaddr {
AddrsFactory(func(_ []ma.Multiaddr) []ma.Multiaddr {
return []ma.Multiaddr{addr}
}),
)
@@ -786,7 +786,7 @@ func TestSharedTCPAddr(t *testing.T) {
func TestCustomTCPDialer(t *testing.T) {
expectedErr := errors.New("custom dialer called, but not implemented")
customDialer := func(raddr ma.Multiaddr) (tcp.ContextDialer, error) {
customDialer := func(_ ma.Multiaddr) (tcp.ContextDialer, error) {
// Normally a user would implement this by returning a custom dialer
// Here, we just test that this is called.
return nil, expectedErr

View File

@@ -19,7 +19,7 @@ func (f *filtersConnectionGater) InterceptAddrDial(_ peer.ID, addr ma.Multiaddr)
return !(*ma.Filters)(f).AddrBlocked(addr)
}
func (f *filtersConnectionGater) InterceptPeerDial(p peer.ID) (allow bool) {
func (f *filtersConnectionGater) InterceptPeerDial(_ peer.ID) (allow bool) {
return true
}

View File

@@ -39,7 +39,7 @@ func FullJitter(duration, min, max time.Duration, rng *rand.Rand) time.Duration
}
// NoJitter returns the duration bounded between min and max
func NoJitter(duration, min, max time.Duration, rng *rand.Rand) time.Duration {
func NoJitter(duration, min, max time.Duration, _ *rand.Rand) time.Duration {
return boundedDuration(duration, min, max)
}

View File

@@ -127,17 +127,17 @@ func TestManyBackoffFactory(t *testing.T) {
rngSource := rand.NewSource(0)
concurrent := 10
t.Run("Exponential", func(t *testing.T) {
t.Run("Exponential", func(_ *testing.T) {
testManyBackoffFactoryHelper(concurrent,
NewExponentialBackoff(time.Millisecond*650, time.Second*7, FullJitter, time.Second, 1.5, -time.Millisecond*400, rngSource),
)
})
t.Run("Polynomial", func(t *testing.T) {
t.Run("Polynomial", func(_ *testing.T) {
testManyBackoffFactoryHelper(concurrent,
NewPolynomialBackoff(time.Second, time.Second*33, NoJitter, time.Second, []float64{0.5, 2, 3}, rngSource),
)
})
t.Run("Fixed", func(t *testing.T) {
t.Run("Fixed", func(_ *testing.T) {
testManyBackoffFactoryHelper(concurrent,
NewFixedBackoff(time.Second),
)

View File

@@ -93,7 +93,7 @@ func NewDiscoveryClient(h host.Host, server *MockDiscoveryServer) *MockDiscovery
}
}
func (d *MockDiscoveryClient) Advertise(ctx context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
func (d *MockDiscoveryClient) Advertise(_ context.Context, ns string, opts ...discovery.Option) (time.Duration, error) {
var options discovery.Options
err := options.Apply(opts...)
if err != nil {
@@ -103,7 +103,7 @@ func (d *MockDiscoveryClient) Advertise(ctx context.Context, ns string, opts ...
return d.server.Advertise(ns, *host.InfoFromHost(d.host), options.Ttl)
}
func (d *MockDiscoveryClient) FindPeers(ctx context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
func (d *MockDiscoveryClient) FindPeers(_ context.Context, ns string, opts ...discovery.Option) (<-chan peer.AddrInfo, error) {
var options discovery.Options
err := options.Apply(opts...)
if err != nil {

View File

@@ -36,7 +36,7 @@ func NewMockRouting(h host.Host, tab *mockRoutingTable) *mockRouting {
return &mockRouting{h: h, tab: tab}
}
func (m *mockRouting) Provide(ctx context.Context, cid cid.Cid, bcast bool) error {
func (m *mockRouting) Provide(_ context.Context, cid cid.Cid, _ bool) error {
m.tab.mx.Lock()
defer m.tab.mx.Unlock()
@@ -51,7 +51,7 @@ func (m *mockRouting) Provide(ctx context.Context, cid cid.Cid, bcast bool) erro
return nil
}
func (m *mockRouting) FindProvidersAsync(ctx context.Context, cid cid.Cid, limit int) <-chan peer.AddrInfo {
func (m *mockRouting) FindProvidersAsync(ctx context.Context, cid cid.Cid, _ int) <-chan peer.AddrInfo {
ch := make(chan peer.AddrInfo)
go func() {
defer close(ch)

View File

@@ -28,11 +28,11 @@ type mockT struct {
addr multiaddr.Multiaddr
}
func (m *mockT) Dial(ctx context.Context, a multiaddr.Multiaddr, p peer.ID) (transport.CapableConn, error) {
func (m *mockT) Dial(_ context.Context, _ multiaddr.Multiaddr, _ peer.ID) (transport.CapableConn, error) {
return nil, nil
}
func (m *mockT) CanDial(_ multiaddr.Multiaddr) bool { return true }
func (m *mockT) Listen(a multiaddr.Multiaddr) (transport.Listener, error) {
func (m *mockT) Listen(_ multiaddr.Multiaddr) (transport.Listener, error) {
return &mockL{m.ctx, m.cancel, m.addr}, nil
}
func (m *mockT) Protocols() []int { return []int{multiaddr.P_IP4} }

View File

@@ -10,13 +10,13 @@ import (
var _ network.Notifiee = (*AmbientAutoNAT)(nil)
// Listen is part of the network.Notifiee interface
func (as *AmbientAutoNAT) Listen(net network.Network, a ma.Multiaddr) {}
func (as *AmbientAutoNAT) Listen(_ network.Network, _ ma.Multiaddr) {}
// ListenClose is part of the network.Notifiee interface
func (as *AmbientAutoNAT) ListenClose(net network.Network, a ma.Multiaddr) {}
func (as *AmbientAutoNAT) ListenClose(_ network.Network, _ ma.Multiaddr) {}
// Connected is part of the network.Notifiee interface
func (as *AmbientAutoNAT) Connected(net network.Network, c network.Conn) {
func (as *AmbientAutoNAT) Connected(_ network.Network, c network.Conn) {
if c.Stat().Direction == network.DirInbound &&
manet.IsPublicAddr(c.RemoteMultiaddr()) {
select {
@@ -27,4 +27,4 @@ func (as *AmbientAutoNAT) Connected(net network.Network, c network.Conn) {
}
// Disconnected is part of the network.Notifiee interface
func (as *AmbientAutoNAT) Disconnected(net network.Network, c network.Conn) {}
func (as *AmbientAutoNAT) Disconnected(_ network.Network, _ network.Conn) {}

View File

@@ -67,7 +67,7 @@ func WithStaticRelays(static []peer.AddrInfo) Option {
return errAlreadyHavePeerSource
}
WithPeerSource(func(ctx context.Context, numPeers int) <-chan peer.AddrInfo {
WithPeerSource(func(_ context.Context, numPeers int) <-chan peer.AddrInfo {
if len(static) < numPeers {
numPeers = len(static)
}

View File

@@ -30,7 +30,7 @@ func TestAppendNATAddrs(t *testing.T) {
// nat mapping success, obsaddress ignored
Listen: ma.StringCast("/ip4/0.0.0.0/udp/1/quic-v1"),
Nat: ma.StringCast("/ip4/1.1.1.1/udp/10/quic-v1"),
ObsAddrFunc: func(m ma.Multiaddr) []ma.Multiaddr {
ObsAddrFunc: func(_ ma.Multiaddr) []ma.Multiaddr {
return []ma.Multiaddr{ma.StringCast("/ip4/2.2.2.2/udp/100/quic-v1")}
},
Expected: []ma.Multiaddr{ma.StringCast("/ip4/1.1.1.1/udp/10/quic-v1")},
@@ -116,7 +116,7 @@ func TestAppendNATAddrs(t *testing.T) {
t.Run(tc.Name, func(t *testing.T) {
as := &addrsManager{
natManager: &mockNatManager{
GetMappingFunc: func(addr ma.Multiaddr) ma.Multiaddr {
GetMappingFunc: func(_ ma.Multiaddr) ma.Multiaddr {
return tc.Nat
},
},
@@ -326,7 +326,7 @@ func TestAddrsManager(t *testing.T) {
}
am := newAddrsManagerTestCase(t, addrsManagerArgs{
ObservedAddrsManager: &mockObservedAddrs{
ObservedAddrsForFunc: func(addr ma.Multiaddr) []ma.Multiaddr {
ObservedAddrsForFunc: func(_ ma.Multiaddr) []ma.Multiaddr {
return quicAddrs
},
},
@@ -342,7 +342,7 @@ func TestAddrsManager(t *testing.T) {
t.Run("public addrs removed when private", func(t *testing.T) {
am := newAddrsManagerTestCase(t, addrsManagerArgs{
ObservedAddrsManager: &mockObservedAddrs{
ObservedAddrsForFunc: func(addr ma.Multiaddr) []ma.Multiaddr {
ObservedAddrsForFunc: func(_ ma.Multiaddr) []ma.Multiaddr {
return []ma.Multiaddr{publicQUIC}
},
},
@@ -384,7 +384,7 @@ func TestAddrsManager(t *testing.T) {
return nil
},
ObservedAddrsManager: &mockObservedAddrs{
ObservedAddrsForFunc: func(addr ma.Multiaddr) []ma.Multiaddr {
ObservedAddrsForFunc: func(_ ma.Multiaddr) []ma.Multiaddr {
return []ma.Multiaddr{publicQUIC}
},
},
@@ -404,7 +404,7 @@ func TestAddrsManager(t *testing.T) {
t.Run("updates addresses on signaling", func(t *testing.T) {
updateChan := make(chan struct{})
am := newAddrsManagerTestCase(t, addrsManagerArgs{
AddrsFactory: func(addrs []ma.Multiaddr) []ma.Multiaddr {
AddrsFactory: func(_ []ma.Multiaddr) []ma.Multiaddr {
select {
case <-updateChan:
return []ma.Multiaddr{publicQUIC}

View File

@@ -541,7 +541,7 @@ func (h *BasicHost) EventBus() event.Bus {
//
// (Thread-safe)
func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler network.StreamHandler) {
h.Mux().AddHandler(pid, func(p protocol.ID, rwc io.ReadWriteCloser) error {
h.Mux().AddHandler(pid, func(_ protocol.ID, rwc io.ReadWriteCloser) error {
is := rwc.(network.Stream)
handler(is)
return nil
@@ -554,7 +554,7 @@ func (h *BasicHost) SetStreamHandler(pid protocol.ID, handler network.StreamHand
// SetStreamHandlerMatch sets the protocol handler on the Host's Mux
// using a matching function to do protocol comparisons
func (h *BasicHost) SetStreamHandlerMatch(pid protocol.ID, m func(protocol.ID) bool, handler network.StreamHandler) {
h.Mux().AddHandlerWithFunc(pid, m, func(p protocol.ID, rwc io.ReadWriteCloser) error {
h.Mux().AddHandlerWithFunc(pid, m, func(_ protocol.ID, rwc io.ReadWriteCloser) error {
is := rwc.(network.Stream)
handler(is)
return nil

View File

@@ -170,9 +170,9 @@ func TestProtocolHandlerEvents(t *testing.T) {
}
}
h.SetStreamHandler(protocol.TestingID, func(s network.Stream) {})
h.SetStreamHandler(protocol.TestingID, func(_ network.Stream) {})
assert([]protocol.ID{protocol.TestingID}, nil)
h.SetStreamHandler("foo", func(s network.Stream) {})
h.SetStreamHandler("foo", func(_ network.Stream) {})
assert([]protocol.ID{"foo"}, nil)
h.RemoveStreamHandler(protocol.TestingID)
assert(nil, []protocol.ID{protocol.TestingID})
@@ -180,7 +180,7 @@ func TestProtocolHandlerEvents(t *testing.T) {
func TestHostAddrsFactory(t *testing.T) {
maddr := ma.StringCast("/ip4/1.2.3.4/tcp/1234")
addrsFactory := func(addrs []ma.Multiaddr) []ma.Multiaddr {
addrsFactory := func(_ []ma.Multiaddr) []ma.Multiaddr {
return []ma.Multiaddr{maddr}
}
@@ -240,7 +240,7 @@ func TestAllAddrsUnique(t *testing.T) {
}()
sendNewAddrs := make(chan struct{})
opts := HostOpts{
AddrsFactory: func(addrs []ma.Multiaddr) []ma.Multiaddr {
AddrsFactory: func(_ []ma.Multiaddr) []ma.Multiaddr {
select {
case <-sendNewAddrs:
return []ma.Multiaddr{
@@ -706,7 +706,7 @@ func TestHostAddrChangeDetection(t *testing.T) {
var lk sync.Mutex
currentAddrSet := 0
addrsFactory := func(addrs []ma.Multiaddr) []ma.Multiaddr {
addrsFactory := func(_ []ma.Multiaddr) []ma.Multiaddr {
lk.Lock()
defer lk.Unlock()
return addrSets[currentAddrSet]

View File

@@ -294,6 +294,6 @@ type nmgrNetNotifiee natManager
func (nn *nmgrNetNotifiee) natManager() *natManager { return (*natManager)(nn) }
func (nn *nmgrNetNotifiee) Listen(network.Network, ma.Multiaddr) { nn.natManager().sync() }
func (nn *nmgrNetNotifiee) ListenClose(n network.Network, addr ma.Multiaddr) { nn.natManager().sync() }
func (nn *nmgrNetNotifiee) ListenClose(_ network.Network, _ ma.Multiaddr) { nn.natManager().sync() }
func (nn *nmgrNetNotifiee) Connected(network.Network, network.Conn) {}
func (nn *nmgrNetNotifiee) Disconnected(network.Network, network.Conn) {}

View File

@@ -20,7 +20,7 @@ func setupMockNAT(t *testing.T) (mockNAT *MockNAT, reset func()) {
ctrl := gomock.NewController(t)
mockNAT = NewMockNAT(ctrl)
origDiscoverNAT := discoverNAT
discoverNAT = func(ctx context.Context) (nat, error) { return mockNAT, nil }
discoverNAT = func(_ context.Context) (nat, error) { return mockNAT, nil }
return mockNAT, func() {
discoverNAT = origDiscoverNAT
ctrl.Finish()

View File

@@ -470,7 +470,7 @@ func (ab *dsAddrBook) ClearAddrs(p peer.ID) {
}
}
func (ab *dsAddrBook) setAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, mode ttlWriteMode, signed bool) (err error) {
func (ab *dsAddrBook) setAddrs(p peer.ID, addrs []ma.Multiaddr, ttl time.Duration, mode ttlWriteMode, _ bool) (err error) {
if len(addrs) == 0 {
return nil
}

View File

@@ -17,21 +17,21 @@ type noopCache[K comparable, V any] struct {
var _ cache[int, int] = (*noopCache[int, int])(nil)
func (*noopCache[K, V]) Get(key K) (value V, ok bool) {
func (*noopCache[K, V]) Get(_ K) (value V, ok bool) {
return *new(V), false
}
func (*noopCache[K, V]) Add(key K, value V) {
func (*noopCache[K, V]) Add(_ K, _ V) {
}
func (*noopCache[K, V]) Remove(key K) {
func (*noopCache[K, V]) Remove(_ K) {
}
func (*noopCache[K, V]) Contains(key K) bool {
func (*noopCache[K, V]) Contains(_ K) bool {
return false
}
func (*noopCache[K, V]) Peek(key K) (value V, ok bool) {
func (*noopCache[K, V]) Peek(_ K) (value V, ok bool) {
return *new(V), false
}

View File

@@ -23,7 +23,7 @@ type cyclicBatch struct {
pending int
}
func newCyclicBatch(ds ds.Batching, threshold int) (ds.Batch, error) {
func newCyclicBatch(ds ds.Batching, _ int) (ds.Batch, error) {
batch, err := ds.Batch(context.TODO())
if err != nil {
return nil, err

View File

@@ -14,7 +14,7 @@ import (
"github.com/stretchr/testify/require"
)
func mapDBStore(tb testing.TB) (ds.Batching, func()) {
func mapDBStore(_ testing.TB) (ds.Batching, func()) {
store := ds.NewMapDatastore()
closer := func() {
store.Close()

View File

@@ -162,7 +162,7 @@ func testAddAddress(ab pstore.AddrBook, clk *mockClock.Mock) func(*testing.T) {
}
}
func testClearWorks(ab pstore.AddrBook, clk *mockClock.Mock) func(t *testing.T) {
func testClearWorks(ab pstore.AddrBook, _ *mockClock.Mock) func(t *testing.T) {
return func(t *testing.T) {
ids := GeneratePeerIDs(2)
addrs := GenerateAddrs(5)
@@ -183,7 +183,7 @@ func testClearWorks(ab pstore.AddrBook, clk *mockClock.Mock) func(t *testing.T)
}
}
func testSetNegativeTTLClears(m pstore.AddrBook, clk *mockClock.Mock) func(t *testing.T) {
func testSetNegativeTTLClears(m pstore.AddrBook, _ *mockClock.Mock) func(t *testing.T) {
return func(t *testing.T) {
id := GeneratePeerIDs(1)[0]
addrs := GenerateAddrs(100)
@@ -229,7 +229,7 @@ func testSetNegativeTTLClears(m pstore.AddrBook, clk *mockClock.Mock) func(t *te
func testUpdateTTLs(m pstore.AddrBook, clk *mockClock.Mock) func(t *testing.T) {
return func(t *testing.T) {
t.Run("update ttl of peer with no addrs", func(t *testing.T) {
t.Run("update ttl of peer with no addrs", func(_ *testing.T) {
id := GeneratePeerIDs(1)[0]
// Shouldn't panic.
@@ -293,8 +293,8 @@ func testUpdateTTLs(m pstore.AddrBook, clk *mockClock.Mock) func(t *testing.T) {
}
}
func testNilAddrsDontBreak(m pstore.AddrBook, clk *mockClock.Mock) func(t *testing.T) {
return func(t *testing.T) {
func testNilAddrsDontBreak(m pstore.AddrBook, _ *mockClock.Mock) func(t *testing.T) {
return func(_ *testing.T) {
id := GeneratePeerIDs(1)[0]
m.SetAddr(id, nil, time.Hour)
@@ -347,7 +347,7 @@ func testAddressesExpire(m pstore.AddrBook, clk *mockClock.Mock) func(t *testing
}
}
func testClearWithIterator(m pstore.AddrBook, clk *mockClock.Mock) func(t *testing.T) {
func testClearWithIterator(m pstore.AddrBook, _ *mockClock.Mock) func(t *testing.T) {
return func(t *testing.T) {
ids := GeneratePeerIDs(2)
addrs := GenerateAddrs(100)
@@ -374,7 +374,7 @@ func testClearWithIterator(m pstore.AddrBook, clk *mockClock.Mock) func(t *testi
}
}
func testPeersWithAddrs(m pstore.AddrBook, clk *mockClock.Mock) func(t *testing.T) {
func testPeersWithAddrs(m pstore.AddrBook, _ *mockClock.Mock) func(t *testing.T) {
return func(t *testing.T) {
// cannot run in parallel as the store is modified.
// go runs sequentially in the specified order

View File

@@ -7,7 +7,7 @@ import (
pstore "github.com/libp2p/go-libp2p/core/peerstore"
)
func BenchmarkPeerstore(b *testing.B, factory PeerstoreFactory, variant string) {
func BenchmarkPeerstore(b *testing.B, factory PeerstoreFactory, _ string) {
for _, sz := range []int{1, 10, 100} {
const N = 10000
peers := getPeerPairs(b, N, sz)

View File

@@ -33,7 +33,7 @@ func TestGracePeriod(t *testing.T) {
require.NoError(t, err)
start := time.Now()
removed := make(chan struct{})
pstore.EXPECT().RemovePeer(peer.ID("foobar")).DoAndReturn(func(p peer.ID) {
pstore.EXPECT().RemovePeer(peer.ID("foobar")).DoAndReturn(func(_ peer.ID) {
defer close(removed)
// make sure the call happened after the grace period
require.GreaterOrEqual(t, time.Since(start), gracePeriod)

View File

@@ -250,7 +250,7 @@ func TestConcurrentAuth(t *testing.T) {
},
TokenTTL: time.Hour,
NoTLS: true,
Next: func(peer peer.ID, w http.ResponseWriter, r *http.Request) {
Next: func(_ peer.ID, w http.ResponseWriter, r *http.Request) {
reqBody, err := io.ReadAll(r.Body)
require.NoError(t, err)
_, err = w.Write(reqBody)

View File

@@ -318,7 +318,7 @@ func FuzzServerHandshakeNoPanic(f *testing.F) {
zeroBytes := [32]byte{}
hmac := hmac.New(sha256.New, zeroBytes[:])
f.Fuzz(func(t *testing.T, data []byte) {
f.Fuzz(func(_ *testing.T, data []byte) {
hmac.Reset()
h := PeerIDAuthHandshakeServer{
Hostname: "example.com",
@@ -380,7 +380,7 @@ func BenchmarkOpaqueStateRead(b *testing.B) {
func FuzzParsePeerIDAuthSchemeParamsNoPanic(f *testing.F) {
p := params{}
// Just check that we don't panic
f.Fuzz(func(t *testing.T, data []byte) {
f.Fuzz(func(_ *testing.T, data []byte) {
p.parsePeerIDAuthSchemeParams(data)
})
}

View File

@@ -225,7 +225,7 @@ func ExampleHost_SetHTTPHandler() {
ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
}
server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Add("Content-Type", "text/plain")
w.Write([]byte("Hello World"))
}))
@@ -259,7 +259,7 @@ func ExampleHost_SetHTTPHandlerAtPath() {
ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
}
server.SetHTTPHandlerAtPath("/hello/1", "/other-place/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
server.SetHTTPHandlerAtPath("/hello/1", "/other-place/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Add("Content-Type", "text/plain")
w.Write([]byte("Hello World"))
}))
@@ -296,7 +296,7 @@ func ExampleHost_NamespacedClient() {
ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
}
server.SetHTTPHandlerAtPath("/hello/1", "/other-place/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
server.SetHTTPHandlerAtPath("/hello/1", "/other-place/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Add("Content-Type", "text/plain")
w.Write([]byte("Hello World"))
}))
@@ -334,7 +334,7 @@ func ExampleHost_NamespaceRoundTripper() {
ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
}
server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Add("Content-Type", "text/plain")
w.Write([]byte("Hello World"))
}))
@@ -378,7 +378,7 @@ func ExampleHost_NewConstrainedRoundTripper() {
ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
}
server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
server.SetHTTPHandler("/hello/1", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Add("Content-Type", "text/plain")
w.Write([]byte("Hello World"))
}))
@@ -446,7 +446,7 @@ func ExampleHost_RoundTrip() {
}
go server.Serve()
defer server.Close()
server.SetHTTPHandlerAtPath("/hello/", "/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
server.SetHTTPHandlerAtPath("/hello/", "/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("Hello World"))
}))

View File

@@ -44,7 +44,7 @@ func TestHTTPOverStreams(t *testing.T) {
httpHost := libp2phttp.Host{StreamHost: serverHost}
httpHost.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httpHost.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("hello"))
}))
@@ -124,7 +124,7 @@ func TestHTTPOverStreamsContextAndClientTimeout(t *testing.T) {
require.NoError(t, err)
httpHost := libp2phttp.Host{StreamHost: serverHost}
httpHost.SetHTTPHandler("/hello/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httpHost.SetHTTPHandler("/hello/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
time.Sleep(2 * clientTimeout)
w.Write([]byte("hello"))
}))
@@ -180,7 +180,7 @@ func TestHTTPOverStreamsReturnsConnectionClose(t *testing.T) {
httpHost := libp2phttp.Host{StreamHost: serverHost}
httpHost.SetHTTPHandlerAtPath("/hello", "/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httpHost.SetHTTPHandlerAtPath("/hello", "/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("hello"))
}))
@@ -222,7 +222,7 @@ func TestRoundTrippers(t *testing.T) {
ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
}
httpHost.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httpHost.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("hello"))
}))
@@ -240,7 +240,7 @@ func TestRoundTrippers(t *testing.T) {
}{
{
name: "HTTP preferred",
setupRoundTripper: func(t *testing.T, clientStreamHost host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
setupRoundTripper: func(t *testing.T, _ host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
rt, err := clientHTTPHost.NewConstrainedRoundTripper(peer.AddrInfo{
ID: serverHost.ID(),
Addrs: serverMultiaddrs,
@@ -251,7 +251,7 @@ func TestRoundTrippers(t *testing.T) {
},
{
name: "HTTP first",
setupRoundTripper: func(t *testing.T, clientStreamHost host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
setupRoundTripper: func(t *testing.T, _ host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
rt, err := clientHTTPHost.NewConstrainedRoundTripper(peer.AddrInfo{
ID: serverHost.ID(),
Addrs: []ma.Multiaddr{serverHTTPAddr, serverHost.Addrs()[0]},
@@ -262,7 +262,7 @@ func TestRoundTrippers(t *testing.T) {
},
{
name: "No HTTP transport",
setupRoundTripper: func(t *testing.T, clientStreamHost host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
setupRoundTripper: func(t *testing.T, _ host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
rt, err := clientHTTPHost.NewConstrainedRoundTripper(peer.AddrInfo{
ID: serverHost.ID(),
Addrs: []ma.Multiaddr{serverHost.Addrs()[0]},
@@ -274,7 +274,7 @@ func TestRoundTrippers(t *testing.T) {
},
{
name: "Stream transport first",
setupRoundTripper: func(t *testing.T, clientStreamHost host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
setupRoundTripper: func(t *testing.T, _ host.Host, clientHTTPHost *libp2phttp.Host) http.RoundTripper {
rt, err := clientHTTPHost.NewConstrainedRoundTripper(peer.AddrInfo{
ID: serverHost.ID(),
Addrs: []ma.Multiaddr{serverHost.Addrs()[0], serverHTTPAddr},
@@ -402,7 +402,7 @@ func TestPlainOldHTTPServer(t *testing.T) {
},
{
name: "using stock http client",
do: func(t *testing.T, request *http.Request) (*http.Response, error) {
do: func(_ *testing.T, request *http.Request) (*http.Response, error) {
request.URL.Scheme = "http"
request.URL.Host = l.Addr().String()
request.Host = l.Addr().String()
@@ -456,7 +456,7 @@ func TestHostZeroValue(t *testing.T) {
InsecureAllowHTTP: true,
ListenAddrs: []ma.Multiaddr{ma.StringCast("/ip4/127.0.0.1/tcp/0/http")},
}
server.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { w.Write([]byte("hello")) }))
server.SetHTTPHandler("/hello", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) { w.Write([]byte("hello")) }))
go func() {
server.Serve()
}()
@@ -564,7 +564,7 @@ func TestCustomServeMux(t *testing.T) {
}
func TestSetHandlerAtPath(t *testing.T) {
hf := func(w http.ResponseWriter, r *http.Request) {
hf := func(w http.ResponseWriter, _ *http.Request) {
w.Header().Add("Content-Type", "text/plain")
w.Write([]byte("Hello World"))
}
@@ -733,7 +733,7 @@ func TestResponseWriterShouldNotHaveCancelledContext(t *testing.T) {
defer httpHost.Close()
closeNotifyCh := make(chan bool, 1)
httpHost.SetHTTPHandlerAtPath("/test", "/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
httpHost.SetHTTPHandlerAtPath("/test", "/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
// Legacy code uses this to check if the connection was closed
//lint:ignore SA1019 This is a test to assert we do the right thing since Go HTTP stdlib depends on this.
ch := w.(http.CloseNotifier).CloseNotify()
@@ -781,7 +781,7 @@ func TestHTTPHostAsRoundTripper(t *testing.T) {
}))
// Different protocol.ID and mounted at a different path
serverHttpHost.SetHTTPHandlerAtPath("/hello-again", "/hello2", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
serverHttpHost.SetHTTPHandlerAtPath("/hello-again", "/hello2", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("hello"))
}))
@@ -844,30 +844,30 @@ func TestRedirects(t *testing.T) {
go serverHttpHost.Serve()
defer serverHttpHost.Close()
serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/a", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/a", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Location", "/b/")
w.WriteHeader(http.StatusMovedPermanently)
}))
serverHttpHost.SetHTTPHandlerAtPath("/redirect-2/0.0.1", "/b", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
serverHttpHost.SetHTTPHandlerAtPath("/redirect-2/0.0.1", "/b", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Location", "/c/")
w.WriteHeader(http.StatusMovedPermanently)
}))
serverHttpHost.SetHTTPHandlerAtPath("/redirect-3/0.0.1", "/c", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
serverHttpHost.SetHTTPHandlerAtPath("/redirect-3/0.0.1", "/c", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Location", "/d/")
w.WriteHeader(http.StatusMovedPermanently)
}))
serverHttpHost.SetHTTPHandlerAtPath("/redirect-4/0.0.1", "/d", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
serverHttpHost.SetHTTPHandlerAtPath("/redirect-4/0.0.1", "/d", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("hello"))
}))
serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/foo/bar/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/foo/bar/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Location", "../baz/")
w.WriteHeader(http.StatusMovedPermanently)
}))
serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/foo/baz/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/foo/baz/", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Write([]byte("hello"))
}))
@@ -945,12 +945,12 @@ func TestMultiaddrURIRedirect(t *testing.T) {
require.NotNil(t, streamMultiaddr)
// Redirect to a whole other transport!
serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/a", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
serverHttpHost.SetHTTPHandlerAtPath("/redirect-1/0.0.1", "/a", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.Header().Set("Location", fmt.Sprintf("multiaddr:%s/p2p/%s/http-path/b", streamMultiaddr, serverHost.ID()))
w.WriteHeader(http.StatusMovedPermanently)
}))
serverHttpHost.SetHTTPHandlerAtPath("/redirect-2/0.0.1", "/b", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
serverHttpHost.SetHTTPHandlerAtPath("/redirect-2/0.0.1", "/b", http.HandlerFunc(func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}))

View File

@@ -282,7 +282,7 @@ func (cg *BasicConnectionGater) InterceptPeerDial(p peer.ID) (allow bool) {
return !block
}
func (cg *BasicConnectionGater) InterceptAddrDial(p peer.ID, a ma.Multiaddr) (allow bool) {
func (cg *BasicConnectionGater) InterceptAddrDial(_ peer.ID, a ma.Multiaddr) (allow bool) {
// we have already filtered blocked peers in InterceptPeerDial, so we just check the IP
cg.RLock()
defer cg.RUnlock()
@@ -333,7 +333,7 @@ func (cg *BasicConnectionGater) InterceptAccept(cma network.ConnMultiaddrs) (all
return true
}
func (cg *BasicConnectionGater) InterceptSecured(dir network.Direction, p peer.ID, cma network.ConnMultiaddrs) (allow bool) {
func (cg *BasicConnectionGater) InterceptSecured(dir network.Direction, p peer.ID, _ network.ConnMultiaddrs) (allow bool) {
if dir == network.DirOutbound {
// we have already filtered those in InterceptPeerDial/InterceptAddrDial
return true

View File

@@ -660,7 +660,7 @@ func (nn *cmNotifee) cm() *BasicConnMgr {
// Connected is called by notifiers to inform that a new connection has been established.
// The notifee updates the BasicConnMgr to start tracking the connection. If the new connection
// count exceeds the high watermark, a trim may be triggered.
func (nn *cmNotifee) Connected(n network.Network, c network.Conn) {
func (nn *cmNotifee) Connected(_ network.Network, c network.Conn) {
cm := nn.cm()
p := c.RemotePeer()
@@ -699,7 +699,7 @@ func (nn *cmNotifee) Connected(n network.Network, c network.Conn) {
// Disconnected is called by notifiers to inform that an existing connection has been closed or terminated.
// The notifee updates the BasicConnMgr accordingly to stop tracking the connection, and performs housekeeping.
func (nn *cmNotifee) Disconnected(n network.Network, c network.Conn) {
func (nn *cmNotifee) Disconnected(_ network.Network, c network.Conn) {
cm := nn.cm()
p := c.RemotePeer()
@@ -727,7 +727,7 @@ func (nn *cmNotifee) Disconnected(n network.Network, c network.Conn) {
}
// Listen is no-op in this implementation.
func (nn *cmNotifee) Listen(n network.Network, addr ma.Multiaddr) {}
func (nn *cmNotifee) Listen(_ network.Network, _ ma.Multiaddr) {}
// ListenClose is no-op in this implementation.
func (nn *cmNotifee) ListenClose(n network.Network, addr ma.Multiaddr) {}
func (nn *cmNotifee) ListenClose(_ network.Network, _ ma.Multiaddr) {}

View File

@@ -36,7 +36,7 @@ func (c *tconn) Close() error {
return nil
}
func (c *tconn) CloseWithError(code network.ConnErrorCode) error {
func (c *tconn) CloseWithError(_ network.ConnErrorCode) error {
atomic.StoreUint32(&c.closed, 1)
if c.disconnectNotify != nil {
c.disconnectNotify(nil, c)
@@ -805,7 +805,7 @@ type mockConn struct {
}
func (m mockConn) Close() error { panic("implement me") }
func (m mockConn) CloseWithError(errCode network.ConnErrorCode) error { panic("implement me") }
func (m mockConn) CloseWithError(_ network.ConnErrorCode) error { panic("implement me") }
func (m mockConn) LocalPeer() peer.ID { panic("implement me") }
func (m mockConn) RemotePeer() peer.ID { panic("implement me") }
func (m mockConn) RemotePublicKey() crypto.PubKey { panic("implement me") }
@@ -814,7 +814,7 @@ func (m mockConn) RemoteMultiaddr() ma.Multiaddr { panic
func (m mockConn) Stat() network.ConnStats { return m.stats }
func (m mockConn) ID() string { panic("implement me") }
func (m mockConn) IsClosed() bool { panic("implement me") }
func (m mockConn) NewStream(ctx context.Context) (network.Stream, error) { panic("implement me") }
func (m mockConn) NewStream(_ context.Context) (network.Stream, error) { panic("implement me") }
func (m mockConn) GetStreams() []network.Stream { panic("implement me") }
func (m mockConn) Scope() network.ConnScope { panic("implement me") }
func (m mockConn) ConnState() network.ConnectionState { return network.ConnectionState{} }

View File

@@ -191,15 +191,15 @@ func newNetNotifiee(t *testing.T, buffer int) *netNotifiee {
return nn
}
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {
func (nn *netNotifiee) Listen(_ network.Network, a ma.Multiaddr) {
nn.listen <- a
}
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {
func (nn *netNotifiee) ListenClose(_ network.Network, a ma.Multiaddr) {
nn.listenClose <- a
}
func (nn *netNotifiee) Connected(n network.Network, v network.Conn) {
func (nn *netNotifiee) Connected(_ network.Network, v network.Conn) {
nn.connected <- v
}
func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) {
func (nn *netNotifiee) Disconnected(_ network.Network, v network.Conn) {
nn.disconnected <- v
}

View File

@@ -106,7 +106,7 @@ func (pn *peernet) handleNewStream(s network.Stream) {
// DialPeer attempts to establish a connection to a given peer.
// Respects the context.
func (pn *peernet) DialPeer(ctx context.Context, p peer.ID) (network.Conn, error) {
func (pn *peernet) DialPeer(_ context.Context, p peer.ID) (network.Conn, error) {
return pn.connect(p)
}
@@ -151,7 +151,7 @@ func (pn *peernet) connect(p peer.ID) (*conn, error) {
return pn.openConn(p, l.(*link))
}
func (pn *peernet) openConn(r peer.ID, l *link) (*conn, error) {
func (pn *peernet) openConn(_ peer.ID, l *link) (*conn, error) {
lc, rc := l.newConnPair(pn)
addConnPair(pn, rc.net, lc, rc)
log.Debugf("%s opening connection to %s", pn.LocalPeer(), lc.RemotePeer())
@@ -435,6 +435,6 @@ func (pn *peernet) ResourceManager() network.ResourceManager {
return &network.NullResourceManager{}
}
func (pn *peernet) CanDial(p peer.ID, addr ma.Multiaddr) bool {
func (pn *peernet) CanDial(_ peer.ID, _ ma.Multiaddr) bool {
return true
}

View File

@@ -174,15 +174,15 @@ func (s *stream) Conn() network.Conn {
return s.conn
}
func (s *stream) SetDeadline(t time.Time) error {
func (s *stream) SetDeadline(_ time.Time) error {
return &net.OpError{Op: "set", Net: "pipe", Source: nil, Addr: nil, Err: errors.New("deadline not supported")}
}
func (s *stream) SetReadDeadline(t time.Time) error {
func (s *stream) SetReadDeadline(_ time.Time) error {
return &net.OpError{Op: "set", Net: "pipe", Source: nil, Addr: nil, Err: errors.New("deadline not supported")}
}
func (s *stream) SetWriteDeadline(t time.Time) error {
func (s *stream) SetWriteDeadline(_ time.Time) error {
return &net.OpError{Op: "set", Net: "pipe", Source: nil, Addr: nil, Err: errors.New("deadline not supported")}
}

View File

@@ -20,7 +20,7 @@ func setupMockNAT(t *testing.T) (mockNAT *MockNAT, reset func()) {
mockNAT = NewMockNAT(ctrl)
mockNAT.EXPECT().GetDeviceAddress().Return(nil, errors.New("nope")) // is only used for logging
origDiscoverGateway := discoverGateway
discoverGateway = func(ctx context.Context) (nat.NAT, error) { return mockNAT, nil }
discoverGateway = func(_ context.Context) (nat.NAT, error) { return mockNAT, nil }
return mockNAT, func() {
discoverGateway = origDiscoverGateway
ctrl.Finish()

View File

@@ -8,7 +8,7 @@ import (
"testing"
)
func setupPSKConns(ctx context.Context, t *testing.T) (net.Conn, net.Conn) {
func setupPSKConns(_ context.Context, t *testing.T) (net.Conn, net.Conn) {
testPSK := make([]byte, 32) // null bytes are as good test key as any other key
conn1, conn2 := net.Pipe()

View File

@@ -45,7 +45,7 @@ func (t *Transport) DialContext(ctx context.Context, raddr ma.Multiaddr) (manet.
return maconn, nil
}
func (n *network) getDialer(network string) *dialer {
func (n *network) getDialer(_ string) *dialer {
n.mu.RLock()
d := n.dialer
n.mu.RUnlock()

View File

@@ -207,7 +207,7 @@ func TestGlobalToGlobal(t *testing.T) {
testUseFirst(t, globalV6, globalV6, loopbackV6)
}
func testUseFirst(t *testing.T, listen, use, never ma.Multiaddr) {
func testUseFirst(t *testing.T, _, _, _ ma.Multiaddr) {
var trA Transport
var trB Transport
listenerA, err := trA.Listen(globalV4)

View File

@@ -17,7 +17,7 @@ func getMockDialFunc() (dialWorkerFunc, func(), context.Context, <-chan struct{}
dfcalls := make(chan struct{}, 512) // buffer it large enough that we won't care
dialctx, cancel := context.WithCancel(context.Background())
ch := make(chan struct{})
f := func(p peer.ID, reqch <-chan dialRequest) {
f := func(_ peer.ID, reqch <-chan dialRequest) {
defer cancel()
dfcalls <- struct{}{}
go func() {
@@ -164,7 +164,7 @@ func TestDialSyncAllCancel(t *testing.T) {
func TestFailFirst(t *testing.T) {
var handledFirst atomic.Bool
dialErr := fmt.Errorf("gophers ate the modem")
f := func(p peer.ID, reqch <-chan dialRequest) {
f := func(_ peer.ID, reqch <-chan dialRequest) {
go func() {
for {
req, ok := <-reqch
@@ -195,8 +195,8 @@ func TestFailFirst(t *testing.T) {
require.NotNil(t, c, "should have gotten a 'real' conn back")
}
func TestStressActiveDial(t *testing.T) {
ds := newDialSync(func(p peer.ID, reqch <-chan dialRequest) {
func TestStressActiveDial(_ *testing.T) {
ds := newDialSync(func(_ peer.ID, reqch <-chan dialRequest) {
go func() {
for {
req, ok := <-reqch

View File

@@ -743,7 +743,7 @@ loop:
// makeRanker takes a slice of timedDial objects and returns a DialRanker
// which will trigger dials to addresses at the specified delays in the timedDials
func makeRanker(tc []timedDial) network.DialRanker {
return func(addrs []ma.Multiaddr) []network.AddrDelay {
return func(_ []ma.Multiaddr) []network.AddrDelay {
res := make([]network.AddrDelay, len(tc))
for i := 0; i < len(tc); i++ {
res[i] = network.AddrDelay{Addr: tc[i].addr, Delay: tc[i].delay}
@@ -1104,7 +1104,7 @@ func TestDialWorkerLoopTCPConnUpgradeWait(t *testing.T) {
s1.Peerstore().AddAddrs(s2.LocalPeer(), []ma.Multiaddr{a1, a2}, peerstore.PermanentAddrTTL)
rankerCalled := make(chan struct{})
s1.dialRanker = func(addrs []ma.Multiaddr) []network.AddrDelay {
s1.dialRanker = func(_ []ma.Multiaddr) []network.AddrDelay {
defer close(rankerCalled)
return []network.AddrDelay{{Addr: a1, Delay: 0}, {Addr: a2, Delay: 100 * time.Millisecond}}
}

View File

@@ -51,7 +51,7 @@ func tryDialAddrs(ctx context.Context, l *dialLimiter, p peer.ID, addrs []ma.Mul
}
func hangDialFunc(hang chan struct{}) dialfunc {
return func(ctx context.Context, p peer.ID, a ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
return func(_ context.Context, _ peer.ID, a ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
if mafmt.UTP.Matches(a) {
return transport.CapableConn(nil), nil
}
@@ -188,7 +188,7 @@ func TestFDLimiting(t *testing.T) {
func TestTokenRedistribution(t *testing.T) {
var lk sync.Mutex
hangchs := make(map[peer.ID]chan struct{})
df := func(ctx context.Context, p peer.ID, a ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
df := func(_ context.Context, p peer.ID, a ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
if tcpPortOver(a, 10) {
return (transport.CapableConn)(nil), nil
}
@@ -281,7 +281,7 @@ func TestTokenRedistribution(t *testing.T) {
}
func TestStressLimiter(t *testing.T) {
df := func(ctx context.Context, p peer.ID, a ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
df := func(_ context.Context, _ peer.ID, a ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
if tcpPortOver(a, 1000) {
return transport.CapableConn(nil), nil
}
@@ -335,7 +335,7 @@ func TestStressLimiter(t *testing.T) {
}
func TestFDLimitUnderflow(t *testing.T) {
df := func(ctx context.Context, p peer.ID, addr ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
df := func(ctx context.Context, _ peer.ID, _ ma.Multiaddr, _ chan<- transport.DialUpdate) (transport.CapableConn, error) {
select {
case <-ctx.Done():
case <-time.After(5 * time.Second):

View File

@@ -405,7 +405,7 @@ func (s *Swarm) resolveAddrs(ctx context.Context, pi peer.AddrInfo) []ma.Multiad
return ok
},
resolve: func(ctx context.Context, addr ma.Multiaddr, outputLimit int) ([]ma.Multiaddr, error) {
resolve: func(ctx context.Context, addr ma.Multiaddr, _ int) ([]ma.Multiaddr, error) {
tpt := s.TransportForDialing(addr)
resolver, ok := tpt.(transport.SkipResolver)
if !ok {

View File

@@ -171,15 +171,15 @@ func newNetNotifiee(buffer int) *netNotifiee {
}
}
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {
func (nn *netNotifiee) Listen(_ network.Network, a ma.Multiaddr) {
nn.listen <- a
}
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {
func (nn *netNotifiee) ListenClose(_ network.Network, a ma.Multiaddr) {
nn.listenClose <- a
}
func (nn *netNotifiee) Connected(n network.Network, v network.Conn) {
func (nn *netNotifiee) Connected(_ network.Network, v network.Conn) {
nn.connected <- v
}
func (nn *netNotifiee) Disconnected(n network.Network, v network.Conn) {
func (nn *netNotifiee) Disconnected(_ network.Network, v network.Conn) {
nn.disconnected <- v
}

View File

@@ -249,7 +249,7 @@ func TestConnectionGating(t *testing.T) {
},
"p1 gates outbound peer dial": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.PeerDial = func(p peer.ID) bool { return false }
c.PeerDial = func(_ peer.ID) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
@@ -258,7 +258,7 @@ func TestConnectionGating(t *testing.T) {
},
"p1 gates outbound addr dialing": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Dial = func(p peer.ID, addr ma.Multiaddr) bool { return false }
c.Dial = func(_ peer.ID, _ ma.Multiaddr) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
@@ -276,7 +276,7 @@ func TestConnectionGating(t *testing.T) {
},
"p2 gates inbound peer dial before securing": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Accept = func(c network.ConnMultiaddrs) bool { return false }
c.Accept = func(_ network.ConnMultiaddrs) bool { return false }
return c
},
p1ConnectednessToP2: network.NotConnected,
@@ -296,7 +296,7 @@ func TestConnectionGating(t *testing.T) {
},
"p2 gates inbound peer dial after upgrading": {
p1Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.Upgraded = func(c network.Conn) (bool, control.DisconnectReason) { return false, 0 }
c.Upgraded = func(_ network.Conn) (bool, control.DisconnectReason) { return false, 0 }
return c
},
p1ConnectednessToP2: network.NotConnected,
@@ -305,7 +305,7 @@ func TestConnectionGating(t *testing.T) {
},
"p2 gates outbound dials": {
p2Gater: func(c *MockConnectionGater) *MockConnectionGater {
c.PeerDial = func(p peer.ID) bool { return false }
c.PeerDial = func(_ peer.ID) bool { return false }
return c
},
p1ConnectednessToP2: network.Connected,
@@ -521,7 +521,7 @@ func TestResourceManagerAcceptStream(t *testing.T) {
rcmgr2 := mocknetwork.NewMockResourceManager(ctrl)
s2 := GenSwarm(t, WithSwarmOpts(swarm.WithResourceManager(rcmgr2)))
defer s2.Close()
s2.SetStreamHandler(func(str network.Stream) { t.Fatal("didn't expect to accept a stream") })
s2.SetStreamHandler(func(_ network.Stream) { t.Fatal("didn't expect to accept a stream") })
connectSwarms(t, context.Background(), []*swarm.Swarm{s1, s2})

View File

@@ -270,15 +270,15 @@ type MockConnectionGater struct {
func DefaultMockConnectionGater() *MockConnectionGater {
m := &MockConnectionGater{}
m.Dial = func(p peer.ID, addr ma.Multiaddr) bool {
m.Dial = func(_ peer.ID, _ ma.Multiaddr) bool {
return true
}
m.PeerDial = func(p peer.ID) bool {
m.PeerDial = func(_ peer.ID) bool {
return true
}
m.Accept = func(c network.ConnMultiaddrs) bool {
m.Accept = func(_ network.ConnMultiaddrs) bool {
return true
}
@@ -286,7 +286,7 @@ func DefaultMockConnectionGater() *MockConnectionGater {
return true
}
m.Upgraded = func(c network.Conn) (bool, control.DisconnectReason) {
m.Upgraded = func(_ network.Conn) (bool, control.DisconnectReason) {
return true, 0
}

View File

@@ -19,15 +19,15 @@ type dummyTransport struct {
closed bool
}
func (dt *dummyTransport) Dial(ctx context.Context, raddr ma.Multiaddr, p peer.ID) (transport.CapableConn, error) {
func (dt *dummyTransport) Dial(_ context.Context, _ ma.Multiaddr, _ peer.ID) (transport.CapableConn, error) {
panic("unimplemented")
}
func (dt *dummyTransport) CanDial(addr ma.Multiaddr) bool {
func (dt *dummyTransport) CanDial(_ ma.Multiaddr) bool {
panic("unimplemented")
}
func (dt *dummyTransport) Listen(laddr ma.Multiaddr) (transport.Listener, error) {
func (dt *dummyTransport) Listen(_ ma.Multiaddr) (transport.Listener, error) {
panic("unimplemented")
}

View File

@@ -33,28 +33,28 @@ func (t *testGater) BlockSecured(block bool) {
t.blockSecured = block
}
func (t *testGater) InterceptPeerDial(p peer.ID) (allow bool) {
func (t *testGater) InterceptPeerDial(_ peer.ID) (allow bool) {
panic("not implemented")
}
func (t *testGater) InterceptAddrDial(id peer.ID, multiaddr ma.Multiaddr) (allow bool) {
func (t *testGater) InterceptAddrDial(_ peer.ID, _ ma.Multiaddr) (allow bool) {
panic("not implemented")
}
func (t *testGater) InterceptAccept(multiaddrs network.ConnMultiaddrs) (allow bool) {
func (t *testGater) InterceptAccept(_ network.ConnMultiaddrs) (allow bool) {
t.Lock()
defer t.Unlock()
return !t.blockAccept
}
func (t *testGater) InterceptSecured(direction network.Direction, id peer.ID, multiaddrs network.ConnMultiaddrs) (allow bool) {
func (t *testGater) InterceptSecured(_ network.Direction, _ peer.ID, _ network.ConnMultiaddrs) (allow bool) {
t.Lock()
defer t.Unlock()
return !t.blockSecured
}
func (t *testGater) InterceptUpgraded(conn network.Conn) (allow bool, reason control.DisconnectReason) {
func (t *testGater) InterceptUpgraded(_ network.Conn) (allow bool, reason control.DisconnectReason) {
panic("not implemented")
}

View File

@@ -99,7 +99,7 @@ type errorMuxer struct{}
var _ network.Multiplexer = &errorMuxer{}
func (m *errorMuxer) NewConn(c net.Conn, isServer bool, scope network.PeerScope) (network.MuxedConn, error) {
func (m *errorMuxer) NewConn(_ net.Conn, _ bool, _ network.PeerScope) (network.MuxedConn, error) {
return nil, errors.New("mux error")
}

View File

@@ -197,7 +197,7 @@ func TestServerMaxConcurrentRequestsPerPeer(t *testing.T) {
doneChan := make(chan struct{})
an := newAutoNAT(t, dialer, allowPrivateAddrs, withDataRequestPolicy(
// stall all allowed requests
func(_, dialAddr ma.Multiaddr) bool {
func(_, _ ma.Multiaddr) bool {
<-doneChan
return true
}),
@@ -626,7 +626,7 @@ func FuzzServerDialRequest(f *testing.F) {
}
func FuzzReadDialData(f *testing.F) {
f.Fuzz(func(t *testing.T, numBytes int, data []byte) {
f.Fuzz(func(_ *testing.T, numBytes int, data []byte) {
readDialData(numBytes, bytes.NewReader(data))
})
}

View File

@@ -56,7 +56,7 @@ var _ io.Closer = (*Client)(nil)
// SkipResolve returns true since we always defer to the inner transport for
// the actual connection. By skipping resolution here, we let the inner
// transport decide how to resolve the multiaddr
func (c *Client) SkipResolve(ctx context.Context, maddr ma.Multiaddr) bool {
func (c *Client) SkipResolve(_ context.Context, _ ma.Multiaddr) bool {
return true
}

View File

@@ -638,7 +638,7 @@ func makeReservationMsg(
return rsvp
}
func (r *Relay) makeLimitMsg(p peer.ID) *pbv2.Limit {
func (r *Relay) makeLimitMsg(_ peer.ID) *pbv2.Limit {
if r.rc.Limit == nil {
return nil
}

View File

@@ -30,7 +30,7 @@ import (
ma "github.com/multiformats/go-multiaddr"
)
func getNetHosts(t *testing.T, ctx context.Context, n int) (hosts []host.Host, upgraders []transport.Upgrader) {
func getNetHosts(t *testing.T, _ context.Context, n int) (hosts []host.Host, upgraders []transport.Upgrader) {
for i := 0; i < n; i++ {
privk, pubk, err := crypto.GenerateKeyPair(crypto.Ed25519, 0)
if err != nil {

View File

@@ -345,13 +345,13 @@ func TestFailuresOnInitiator(t *testing.T) {
},
"responder does NOT reply within hole punch deadline": {
holePunchTimeout: 200 * time.Millisecond,
rhandler: func(s network.Stream) { time.Sleep(5 * time.Second) },
rhandler: func(_ network.Stream) { time.Sleep(5 * time.Second) },
errMsg: "i/o deadline reached",
},
"no addrs after filtering": {
errMsg: "aborting hole punch initiation as we have no public address",
rhandler: func(s network.Stream) { time.Sleep(5 * time.Second) },
filter: func(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr {
rhandler: func(_ network.Stream) { time.Sleep(5 * time.Second) },
filter: func(_ peer.ID, _ []ma.Multiaddr) []ma.Multiaddr {
return []ma.Multiaddr{}
},
},
@@ -491,7 +491,7 @@ func TestFailuresOnResponder(t *testing.T) {
})
time.Sleep(10 * time.Second)
},
filter: func(remoteID peer.ID, maddrs []ma.Multiaddr) []ma.Multiaddr {
filter: func(_ peer.ID, _ []ma.Multiaddr) []ma.Multiaddr {
return []ma.Multiaddr{}
},
},
@@ -630,7 +630,7 @@ type MockSourceIPSelector struct {
ip atomic.Pointer[net.IP]
}
func (m *MockSourceIPSelector) PreferredSourceIPForDestination(dst *net.UDPAddr) (net.IP, error) {
func (m *MockSourceIPSelector) PreferredSourceIPForDestination(_ *net.UDPAddr) (net.IP, error) {
return *m.ip.Load(), nil
}
@@ -641,7 +641,7 @@ func quicSimConn(isPubliclyReachably bool, router *simconn.SimpleFirewallRouter)
quicreuse.OverrideSourceIPSelector(func() (quicreuse.SourceIPSelector, error) {
return m, nil
}),
quicreuse.OverrideListenUDP(func(network string, address *net.UDPAddr) (net.PacketConn, error) {
quicreuse.OverrideListenUDP(func(_ string, address *net.UDPAddr) (net.PacketConn, error) {
m.ip.Store(&address.IP)
c := simconn.NewSimConn(address, router)
if isPubliclyReachably {

View File

@@ -302,6 +302,6 @@ func (nn *netNotifiee) Connected(_ network.Network, conn network.Conn) {
}
}
func (nn *netNotifiee) Disconnected(_ network.Network, v network.Conn) {}
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {}
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {}
func (nn *netNotifiee) Disconnected(_ network.Network, _ network.Conn) {}
func (nn *netNotifiee) Listen(_ network.Network, _ ma.Multiaddr) {}
func (nn *netNotifiee) ListenClose(_ network.Network, _ ma.Multiaddr) {}

View File

@@ -1104,8 +1104,8 @@ func (nn *netNotifiee) Disconnected(_ network.Network, c network.Conn) {
ids.Host.Peerstore().UpdateAddrs(c.RemotePeer(), peerstore.TempAddrTTL, 0)
}
func (nn *netNotifiee) Listen(n network.Network, a ma.Multiaddr) {}
func (nn *netNotifiee) ListenClose(n network.Network, a ma.Multiaddr) {}
func (nn *netNotifiee) Listen(_ network.Network, _ ma.Multiaddr) {}
func (nn *netNotifiee) ListenClose(_ network.Network, _ ma.Multiaddr) {}
// filterAddrs filters the address slice based on the remote multiaddr:
// - if it's a localhost address, no filtering is applied

View File

@@ -837,7 +837,7 @@ func TestIdentifyResponseReadTimeout(t *testing.T) {
ids2.Start()
// remote stream handler will just hang and not send back an identify response
h2.SetStreamHandler(identify.ID, func(s network.Stream) {
h2.SetStreamHandler(identify.ID, func(_ network.Stream) {
time.Sleep(100 * time.Second)
})
@@ -910,7 +910,7 @@ func TestOutOfOrderConnectedNotifs(t *testing.T) {
// This callback may be called before identify's Connnected callback completes. If it does, the IdentifyWait should still finish successfully.
h1.Network().Notify(&network.NotifyBundle{
ConnectedF: func(n network.Network, c network.Conn) {
ConnectedF: func(_ network.Network, c network.Conn) {
idChan := h1.(interface{ IDService() identify.IDService }).IDService().IdentifyWait(c)
go func() {
<-idChan
@@ -958,7 +958,7 @@ func waitForDisconnectNotification(swarm *swarm.Swarm) <-chan struct{} {
var once sync.Once
var nb *network.NotifyBundle
nb = &network.NotifyBundle{
DisconnectedF: func(n network.Network, c network.Conn) {
DisconnectedF: func(_ network.Network, _ network.Conn) {
once.Do(func() {
go func() {
swarm.StopNotify(nb)

View File

@@ -466,7 +466,7 @@ func TestObservedAddrManager(t *testing.T) {
return checkAllEntriesRemoved(o)
}, 1*time.Second, 100*time.Millisecond)
})
t.Run("Nil Input", func(t *testing.T) {
t.Run("Nil Input", func(_ *testing.T) {
o := newObservedAddrMgr()
defer o.Close()
o.maybeRecordObservation(nil, nil)
@@ -655,7 +655,7 @@ func FuzzObservedAddrManager(f *testing.F) {
return o
}
f.Fuzz(func(t *testing.T, port uint16) {
f.Fuzz(func(_ *testing.T, port uint16) {
addrs := []ma.Multiaddr{genIPMultiaddr(true), genIPMultiaddr(false)}
n := len(addrs)
for i := 0; i < n; i++ {

View File

@@ -606,7 +606,7 @@ func TestEarlyDataRejected(t *testing.T) {
func TestEarlyfffDataAcceptedWithNoHandler(t *testing.T) {
clientEDH := &earlyDataHandler{
send: func(ctx context.Context, conn net.Conn, id peer.ID) *pb.NoiseExtensions {
send: func(_ context.Context, _ net.Conn, _ peer.ID) *pb.NoiseExtensions {
return &pb.NoiseExtensions{WebtransportCerthashes: [][]byte{[]byte("foobar")}}
},
}

View File

@@ -291,7 +291,7 @@ func waitForChannel(ready chan struct{}, timeout time.Duration) func() error {
}
}
func TestReadmeExample(t *testing.T) {
func TestReadmeExample(_ *testing.T) {
// Start with the default scaling limits.
scalingLimits := rcmgr.DefaultLimits

View File

@@ -62,7 +62,7 @@ func TestResourceManagerIsUsed(t *testing.T) {
}
peerScope := mocknetwork.NewMockPeerScope(ctrl)
peerScope.EXPECT().ReserveMemory(gomock.Any(), gomock.Any()).AnyTimes().Do(func(amount int, pri uint8) {
peerScope.EXPECT().ReserveMemory(gomock.Any(), gomock.Any()).AnyTimes().Do(func(amount int, _ uint8) {
reservedMemory.Add(int32(amount))
})
peerScope.EXPECT().ReleaseMemory(gomock.Any()).AnyTimes().Do(func(amount int) {
@@ -97,7 +97,7 @@ func TestResourceManagerIsUsed(t *testing.T) {
var allStreamsDone sync.WaitGroup
rcmgr.EXPECT().OpenConnection(expectedDir, expectFd, expectedAddr).Return(connScope, nil)
rcmgr.EXPECT().OpenStream(expectedPeer, gomock.Any()).AnyTimes().DoAndReturn(func(id peer.ID, dir network.Direction) (network.StreamManagementScope, error) {
rcmgr.EXPECT().OpenStream(expectedPeer, gomock.Any()).AnyTimes().DoAndReturn(func(_ peer.ID, _ network.Direction) (network.StreamManagementScope, error) {
allStreamsDone.Add(1)
streamScope := mocknetwork.NewMockStreamManagementScope(ctrl)
// No need to track these memory reservations since we assert that Done is called

View File

@@ -607,7 +607,7 @@ func TestMoreStreamsThanOurLimits(t *testing.T) {
var err error
// maxRetries is an arbitrary retry amount if there's any error.
maxRetries := streamCount * 4
shouldRetry := func(err error) bool {
shouldRetry := func(_ error) bool {
didErr = true
sawFirstErr.Store(true)
maxRetries--

View File

@@ -111,7 +111,7 @@ func NewConnManager(statelessResetKey quic.StatelessResetKey, tokenKey quic.Toke
}
func (c *ConnManager) getTracer() func(context.Context, quiclogging.Perspective, quic.ConnectionID) *quiclogging.ConnectionTracer {
return func(ctx context.Context, p quiclogging.Perspective, ci quic.ConnectionID) *quiclogging.ConnectionTracer {
return func(_ context.Context, p quiclogging.Perspective, ci quic.ConnectionID) *quiclogging.ConnectionTracer {
var promTracer *quiclogging.ConnectionTracer
if c.enableMetrics {
switch p {

View File

@@ -188,7 +188,7 @@ func getTLSConfForProto(t *testing.T, alpn string) (peer.ID, *tls.Config) {
require.NoError(t, err)
var tlsConf tls.Config
tlsConf.NextProtos = []string{alpn}
tlsConf.GetConfigForClient = func(info *tls.ClientHelloInfo) (*tls.Config, error) {
tlsConf.GetConfigForClient = func(_ *tls.ClientHelloInfo) (*tls.Config, error) {
c, _ := identity.ConfigForPeer("")
c.NextProtos = tlsConf.NextProtos
return c, nil
@@ -375,12 +375,12 @@ func TestAssociate(t *testing.T) {
require.Contains(t, []string{ln2.Addr().String(), ln3.Addr().String()}, tr3.LocalAddr().String())
}
t.Run("MultipleUnspecifiedListeners", func(t *testing.T) {
t.Run("MultipleUnspecifiedListeners", func(_ *testing.T) {
testAssociate(ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1"),
ma.StringCast("/ip4/0.0.0.0/udp/0/quic-v1"),
&net.UDPAddr{IP: net.IPv4(1, 1, 1, 1), Port: 1})
})
t.Run("MultipleSpecificListeners", func(t *testing.T) {
t.Run("MultipleSpecificListeners", func(_ *testing.T) {
testAssociate(ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"),
ma.StringCast("/ip4/127.0.0.1/udp/0/quic-v1"),
&net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1},

View File

@@ -59,7 +59,7 @@ func (n *nonQUICPacketConn) SetReadDeadline(t time.Time) error {
}
// SetWriteDeadline implements net.PacketConn.
func (n *nonQUICPacketConn) SetWriteDeadline(t time.Time) error {
func (n *nonQUICPacketConn) SetWriteDeadline(_ time.Time) error {
// Unused. quic-go doesn't support deadlines for writes.
return nil
}

View File

@@ -211,7 +211,7 @@ type errDialer struct {
err error
}
func (d errDialer) DialContext(ctx context.Context, network, address string) (net.Conn, error) {
func (d errDialer) DialContext(_ context.Context, _, _ string) (net.Conn, error) {
return nil, d.err
}
@@ -230,7 +230,7 @@ func TestCustomOverrideTCPDialer(t *testing.T) {
ub, err := tptu.New(ib, muxers, nil, nil, nil)
require.NoError(t, err)
called := false
customDialer := func(raddr ma.Multiaddr) (ContextDialer, error) {
customDialer := func(_ ma.Multiaddr) (ContextDialer, error) {
called = true
return &net.Dialer{}, nil
}
@@ -260,7 +260,7 @@ func TestCustomOverrideTCPDialer(t *testing.T) {
ub, err := tptu.New(ib, muxers, nil, nil, nil)
require.NoError(t, err)
customErr := errors.New("custom dialer error")
customDialer := func(raddr ma.Multiaddr) (ContextDialer, error) {
customDialer := func(_ ma.Multiaddr) (ContextDialer, error) {
if test == "error in factory" {
return nil, customErr
} else {

View File

@@ -16,7 +16,7 @@ import (
var testData = []byte("this is some test data")
func SubtestProtocols(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, peerA peer.ID) {
func SubtestProtocols(t *testing.T, ta, tb transport.Transport, maddr ma.Multiaddr, _ peer.ID) {
rawIPAddr, _ := ma.NewMultiaddr("/ip4/1.2.3.4")
if ta.CanDial(rawIPAddr) || tb.CanDial(rawIPAddr) {
t.Error("nothing should be able to dial raw IP")

View File

@@ -333,7 +333,7 @@ func (l *listener) Multiaddr() ma.Multiaddr {
func addOnConnectionStateChangeCallback(pc *webrtc.PeerConnection) <-chan error {
errC := make(chan error, 1)
var once sync.Once
pc.OnConnectionStateChange(func(state webrtc.PeerConnectionState) {
pc.OnConnectionStateChange(func(_ webrtc.PeerConnectionState) {
switch pc.ConnectionState() {
case webrtc.PeerConnectionStateConnected:
once.Do(func() { close(errC) })

View File

@@ -63,7 +63,7 @@ type loggerFactory struct{}
// NewLogger returns pLog for all new logger instances. Internally pion creates lots of
// separate logging objects unnecessarily. To avoid the allocations we use a single log
// object for all of pion logging.
func (loggerFactory) NewLogger(scope string) pionLogging.LeveledLogger {
func (loggerFactory) NewLogger(_ string) pionLogging.LeveledLogger {
return pLog
}

View File

@@ -624,7 +624,7 @@ func newWebRTCConnection(settings webrtc.SettingEngine, config webrtc.Configurat
})
connectionClosedCh := make(chan struct{}, 1)
pc.SCTP().OnClose(func(err error) {
pc.SCTP().OnClose(func(_ error) {
// We only need one message. Closing a connection is a problem as pion might invoke the callback more than once.
select {
case connectionClosedCh <- struct{}{}:

View File

@@ -103,17 +103,17 @@ func (c *muxedConnection) close() {
func (c *muxedConnection) LocalAddr() net.Addr { return c.mux.socket.LocalAddr() }
func (*muxedConnection) SetDeadline(t time.Time) error {
func (*muxedConnection) SetDeadline(_ time.Time) error {
// no deadline is desired here
return nil
}
func (*muxedConnection) SetReadDeadline(t time.Time) error {
func (*muxedConnection) SetReadDeadline(_ time.Time) error {
// no read deadline is desired here
return nil
}
func (*muxedConnection) SetWriteDeadline(t time.Time) error {
func (*muxedConnection) SetWriteDeadline(_ time.Time) error {
// no write deadline is desired here
return nil
}

View File

@@ -121,7 +121,7 @@ func newListener(a ma.Multiaddr, tlsConf *tls.Config, sharedTcp *tcpreuse.ConnMg
wsurl: wsurl,
wsUpgrader: ws.Upgrader{
// Allow requests from *all* origins.
CheckOrigin: func(r *http.Request) bool {
CheckOrigin: func(_ *http.Request) bool {
return true
},
HandshakeTimeout: handshakeTimeout,

View File

@@ -669,7 +669,7 @@ func TestHandshakeTimeout(t *testing.T) {
fastWSDialer := gws.Dialer{
HandshakeTimeout: 10 * handshakeTimeout,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
NetDial: func(network, addr string) (net.Conn, error) {
NetDial: func(_, addr string) (net.Conn, error) {
tcpConn, err := net.Dial("tcp", addr)
if !assert.NoError(t, err) {
return nil, err
@@ -680,7 +680,7 @@ func TestHandshakeTimeout(t *testing.T) {
slowWSDialer := gws.Dialer{
HandshakeTimeout: 10 * handshakeTimeout,
NetDial: func(network, addr string) (net.Conn, error) {
NetDial: func(_, addr string) (net.Conn, error) {
tcpConn, err := net.Dial("tcp", addr)
if !assert.NoError(t, err) {
return nil, err

View File

@@ -110,7 +110,7 @@ func newListener(reuseListener quicreuse.Listener, t *transport, isStaticTLSConf
return context.WithValue(ctx, connKey{}, c)
},
},
CheckOrigin: func(r *http.Request) bool { return true },
CheckOrigin: func(_ *http.Request) bool { return true },
},
}
ln.ctx, ln.ctxCancel = context.WithCancel(context.Background())

View File

@@ -215,7 +215,7 @@ func (t *transport) dial(ctx context.Context, addr ma.Multiaddr, url, sni string
return nil, nil, err
}
dialer := webtransport.Dialer{
DialAddr: func(ctx context.Context, addr string, tlsCfg *tls.Config, cfg *quic.Config) (quic.EarlyConnection, error) {
DialAddr: func(_ context.Context, _ string, _ *tls.Config, _ *quic.Config) (quic.EarlyConnection, error) {
return conn.(quic.EarlyConnection), nil
},
QUICConfig: t.connManager.ClientConfig().Clone(),

View File

@@ -526,7 +526,7 @@ type reportingRcmgr struct {
report chan<- int
}
func (m *reportingRcmgr) OpenConnection(dir network.Direction, usefd bool, endpoint ma.Multiaddr) (network.ConnManagementScope, error) {
func (m *reportingRcmgr) OpenConnection(_ network.Direction, _ bool, _ ma.Multiaddr) (network.ConnManagementScope, error) {
return &reportingScope{report: m.report}, nil
}
@@ -681,7 +681,7 @@ func serverSendsBackValidCert(t *testing.T, timeSinceUnixEpoch time.Duration, ke
conn, err := quic.DialAddr(context.Background(), l.Addr().String(), &tls.Config{
NextProtos: []string{http3.NextProtoH3},
InsecureSkipVerify: true,
VerifyPeerCertificate: func(rawCerts [][]byte, verifiedChains [][]*x509.Certificate) error {
VerifyPeerCertificate: func(rawCerts [][]byte, _ [][]*x509.Certificate) error {
for _, c := range rawCerts {
cert, err := x509.ParseCertificate(c)
if err != nil {