mirror of
https://git.zx2c4.com/wireguard-go
synced 2025-10-21 07:40:44 +08:00
device: distribute crypto work as slice of elements
After reducing UDP stack traversal overhead via GSO and GRO, runtime.chanrecv() began to account for a high percentage (20% in one environment) of perf samples during a throughput benchmark. The individual packet channel ops with the crypto goroutines was the primary contributor to this overhead. Updating these channels to pass vectors, which the device package already handles at its ends, reduced this overhead substantially, and improved throughput. The iperf3 results below demonstrate the effect of this commit between two Linux computers with i5-12400 CPUs. There is roughly ~13us of round trip latency between them. The first result is with UDP GSO and GRO, and with single element channels. Starting Test: protocol: TCP, 1 streams, 131072 byte blocks [ ID] Interval Transfer Bitrate Retr Cwnd [ 5] 0.00-10.00 sec 12.3 GBytes 10.6 Gbits/sec 232 3.15 MBytes - - - - - - - - - - - - - - - - - - - - - - - - - Test Complete. Summary Results: [ ID] Interval Transfer Bitrate Retr [ 5] 0.00-10.00 sec 12.3 GBytes 10.6 Gbits/sec 232 sender [ 5] 0.00-10.04 sec 12.3 GBytes 10.6 Gbits/sec receiver The second result is with channels updated to pass a slice of elements. Starting Test: protocol: TCP, 1 streams, 131072 byte blocks [ ID] Interval Transfer Bitrate Retr Cwnd [ 5] 0.00-10.00 sec 13.2 GBytes 11.3 Gbits/sec 182 3.15 MBytes - - - - - - - - - - - - - - - - - - - - - - - - - Test Complete. Summary Results: [ ID] Interval Transfer Bitrate Retr [ 5] 0.00-10.00 sec 13.2 GBytes 11.3 Gbits/sec 182 sender [ 5] 0.00-10.04 sec 13.2 GBytes 11.3 Gbits/sec receiver Reviewed-by: Adrian Dewhurst <adrian@tailscale.com> Signed-off-by: Jordan Whited <jordan@tailscale.com> Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
This commit is contained in:

committed by
Jason A. Donenfeld

parent
6a84778f2c
commit
4201e08f1d
@@ -220,9 +220,7 @@ func (device *Device) RoutineReceiveIncoming(maxBatchSize int, recv conn.Receive
|
||||
for peer, elems := range elemsByPeer {
|
||||
if peer.isRunning.Load() {
|
||||
peer.queue.inbound.c <- elems
|
||||
for _, elem := range *elems {
|
||||
device.queue.decryption.c <- elem
|
||||
}
|
||||
device.queue.decryption.c <- elems
|
||||
} else {
|
||||
for _, elem := range *elems {
|
||||
device.PutMessageBuffer(elem.buffer)
|
||||
@@ -241,26 +239,28 @@ func (device *Device) RoutineDecryption(id int) {
|
||||
defer device.log.Verbosef("Routine: decryption worker %d - stopped", id)
|
||||
device.log.Verbosef("Routine: decryption worker %d - started", id)
|
||||
|
||||
for elem := range device.queue.decryption.c {
|
||||
// split message into fields
|
||||
counter := elem.packet[MessageTransportOffsetCounter:MessageTransportOffsetContent]
|
||||
content := elem.packet[MessageTransportOffsetContent:]
|
||||
for elems := range device.queue.decryption.c {
|
||||
for _, elem := range *elems {
|
||||
// split message into fields
|
||||
counter := elem.packet[MessageTransportOffsetCounter:MessageTransportOffsetContent]
|
||||
content := elem.packet[MessageTransportOffsetContent:]
|
||||
|
||||
// decrypt and release to consumer
|
||||
var err error
|
||||
elem.counter = binary.LittleEndian.Uint64(counter)
|
||||
// copy counter to nonce
|
||||
binary.LittleEndian.PutUint64(nonce[0x4:0xc], elem.counter)
|
||||
elem.packet, err = elem.keypair.receive.Open(
|
||||
content[:0],
|
||||
nonce[:],
|
||||
content,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
elem.packet = nil
|
||||
// decrypt and release to consumer
|
||||
var err error
|
||||
elem.counter = binary.LittleEndian.Uint64(counter)
|
||||
// copy counter to nonce
|
||||
binary.LittleEndian.PutUint64(nonce[0x4:0xc], elem.counter)
|
||||
elem.packet, err = elem.keypair.receive.Open(
|
||||
content[:0],
|
||||
nonce[:],
|
||||
content,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
elem.packet = nil
|
||||
}
|
||||
elem.Unlock()
|
||||
}
|
||||
elem.Unlock()
|
||||
}
|
||||
}
|
||||
|
||||
|
Reference in New Issue
Block a user