feat: update gvisor with tag go and go with 1.22 (#202)

This commit is contained in:
naison
2024-03-31 22:42:31 +08:00
committed by GitHub
parent fadfd00927
commit aacdc8a6d0
634 changed files with 5614 additions and 10332 deletions

View File

@@ -77,9 +77,6 @@ func (i *Int32) Store(v int32) {
//
// It may be helpful to document why a racy operation is permitted.
//
// Don't add fields to this struct. It is important that it remain the same
// size as its builtin analogue.
//
//go:nosplit
func (i *Int32) RacyStore(v int32) {
i.value = v
@@ -124,6 +121,9 @@ func (i *Int32) ptr() *int32 {
// Uint32 is an atomic uint32.
//
// Don't add fields to this struct. It is important that it remain the same
// size as its builtin analogue.
//
// See aligned_unsafe.go in this directory for justification.
//
// +stateify savable
@@ -210,4 +210,80 @@ func (u *Uint32) ptr() *uint32 {
return &u.value
}
// Bool is an atomic Boolean.
//
// It is implemented by a Uint32, with value 0 indicating false, and 1
// indicating true.
//
// +stateify savable
type Bool struct {
Uint32
}
// b32 returns a uint32 0 or 1 representing b.
func b32(b bool) uint32 {
if b {
return 1
}
return 0
}
// FromBool returns a Bool initialized to value val.
//
//go:nosplit
func FromBool(val bool) Bool {
return Bool{
Uint32: FromUint32(b32(val)),
}
}
// Load is analogous to atomic.LoadBool, if such a thing existed.
//
//go:nosplit
func (b *Bool) Load() bool {
return b.Uint32.Load() != 0
}
// RacyLoad is analogous to reading an atomic value without using
// synchronization.
//
// It may be helpful to document why a racy operation is permitted.
//
//go:nosplit
func (b *Bool) RacyLoad() bool {
return b.Uint32.RacyLoad() != 0
}
// Store is analogous to atomic.StoreBool, if such a thing existed.
//
//go:nosplit
func (b *Bool) Store(val bool) {
b.Uint32.Store(b32(val))
}
// RacyStore is analogous to setting an atomic value without using
// synchronization.
//
// It may be helpful to document why a racy operation is permitted.
//
//go:nosplit
func (b *Bool) RacyStore(val bool) {
b.Uint32.RacyStore(b32(val))
}
// Swap is analogous to atomic.SwapBool, if such a thing existed.
//
//go:nosplit
func (b *Bool) Swap(val bool) bool {
return b.Uint32.Swap(b32(val)) != 0
}
// CompareAndSwap is analogous to atomic.CompareAndSwapBool, if such a thing
// existed.
//
//go:nosplit
func (b *Bool) CompareAndSwap(oldVal, newVal bool) bool {
return b.Uint32.CompareAndSwap(b32(oldVal), b32(newVal))
}
// LINT.ThenChange(32b_64bit.go)

View File

@@ -77,9 +77,6 @@ func (i *Int32) Store(v int32) {
//
// It may be helpful to document why a racy operation is permitted.
//
// Don't add fields to this struct. It is important that it remain the same
// size as its builtin analogue.
//
//go:nosplit
func (i *Int32) RacyStore(v int32) {
i.value = v
@@ -124,6 +121,9 @@ func (i *Int32) ptr() *int32 {
// Uint32 is an atomic uint32.
//
// Don't add fields to this struct. It is important that it remain the same
// size as its builtin analogue.
//
// See aligned_unsafe.go in this directory for justification.
//
// +stateify savable
@@ -210,4 +210,80 @@ func (u *Uint32) ptr() *uint32 {
return &u.value
}
// Bool is an atomic Boolean.
//
// It is implemented by a Uint32, with value 0 indicating false, and 1
// indicating true.
//
// +stateify savable
type Bool struct {
Uint32
}
// b32 returns a uint32 0 or 1 representing b.
func b32(b bool) uint32 {
if b {
return 1
}
return 0
}
// FromBool returns a Bool initialized to value val.
//
//go:nosplit
func FromBool(val bool) Bool {
return Bool{
Uint32: FromUint32(b32(val)),
}
}
// Load is analogous to atomic.LoadBool, if such a thing existed.
//
//go:nosplit
func (b *Bool) Load() bool {
return b.Uint32.Load() != 0
}
// RacyLoad is analogous to reading an atomic value without using
// synchronization.
//
// It may be helpful to document why a racy operation is permitted.
//
//go:nosplit
func (b *Bool) RacyLoad() bool {
return b.Uint32.RacyLoad() != 0
}
// Store is analogous to atomic.StoreBool, if such a thing existed.
//
//go:nosplit
func (b *Bool) Store(val bool) {
b.Uint32.Store(b32(val))
}
// RacyStore is analogous to setting an atomic value without using
// synchronization.
//
// It may be helpful to document why a racy operation is permitted.
//
//go:nosplit
func (b *Bool) RacyStore(val bool) {
b.Uint32.RacyStore(b32(val))
}
// Swap is analogous to atomic.SwapBool, if such a thing existed.
//
//go:nosplit
func (b *Bool) Swap(val bool) bool {
return b.Uint32.Swap(b32(val)) != 0
}
// CompareAndSwap is analogous to atomic.CompareAndSwapBool, if such a thing
// existed.
//
//go:nosplit
func (b *Bool) CompareAndSwap(oldVal, newVal bool) bool {
return b.Uint32.CompareAndSwap(b32(oldVal), b32(newVal))
}
// LINT.ThenChange(32b_32bit.go)

View File

@@ -6,6 +6,8 @@
package atomicbitops
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -27,10 +29,10 @@ func (i *Int32) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(0, &i.value)
}
func (i *Int32) afterLoad() {}
func (i *Int32) afterLoad(context.Context) {}
// +checklocksignore
func (i *Int32) StateLoad(stateSourceObject state.Source) {
func (i *Int32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.value)
}
@@ -52,14 +54,40 @@ func (u *Uint32) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(0, &u.value)
}
func (u *Uint32) afterLoad() {}
func (u *Uint32) afterLoad(context.Context) {}
// +checklocksignore
func (u *Uint32) StateLoad(stateSourceObject state.Source) {
func (u *Uint32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &u.value)
}
func (b *Bool) StateTypeName() string {
return "pkg/atomicbitops.Bool"
}
func (b *Bool) StateFields() []string {
return []string{
"Uint32",
}
}
func (b *Bool) beforeSave() {}
// +checklocksignore
func (b *Bool) StateSave(stateSinkObject state.Sink) {
b.beforeSave()
stateSinkObject.Save(0, &b.Uint32)
}
func (b *Bool) afterLoad(context.Context) {}
// +checklocksignore
func (b *Bool) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &b.Uint32)
}
func init() {
state.Register((*Int32)(nil))
state.Register((*Uint32)(nil))
state.Register((*Bool)(nil))
}

View File

@@ -6,6 +6,8 @@
package atomicbitops
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -29,10 +31,10 @@ func (i *Int64) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(1, &i.value32)
}
func (i *Int64) afterLoad() {}
func (i *Int64) afterLoad(context.Context) {}
// +checklocksignore
func (i *Int64) StateLoad(stateSourceObject state.Source) {
func (i *Int64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.value)
stateSourceObject.Load(1, &i.value32)
}
@@ -57,10 +59,10 @@ func (u *Uint64) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(1, &u.value32)
}
func (u *Uint64) afterLoad() {}
func (u *Uint64) afterLoad(context.Context) {}
// +checklocksignore
func (u *Uint64) StateLoad(stateSourceObject state.Source) {
func (u *Uint64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &u.value)
stateSourceObject.Load(1, &u.value32)
}

View File

@@ -6,6 +6,8 @@
package atomicbitops
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -27,10 +29,10 @@ func (i *Int32) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(0, &i.value)
}
func (i *Int32) afterLoad() {}
func (i *Int32) afterLoad(context.Context) {}
// +checklocksignore
func (i *Int32) StateLoad(stateSourceObject state.Source) {
func (i *Int32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.value)
}
@@ -52,13 +54,38 @@ func (u *Uint32) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(0, &u.value)
}
func (u *Uint32) afterLoad() {}
func (u *Uint32) afterLoad(context.Context) {}
// +checklocksignore
func (u *Uint32) StateLoad(stateSourceObject state.Source) {
func (u *Uint32) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &u.value)
}
func (b *Bool) StateTypeName() string {
return "pkg/atomicbitops.Bool"
}
func (b *Bool) StateFields() []string {
return []string{
"Uint32",
}
}
func (b *Bool) beforeSave() {}
// +checklocksignore
func (b *Bool) StateSave(stateSinkObject state.Sink) {
b.beforeSave()
stateSinkObject.Save(0, &b.Uint32)
}
func (b *Bool) afterLoad(context.Context) {}
// +checklocksignore
func (b *Bool) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &b.Uint32)
}
func (i *Int64) StateTypeName() string {
return "pkg/atomicbitops.Int64"
}
@@ -77,10 +104,10 @@ func (i *Int64) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(0, &i.value)
}
func (i *Int64) afterLoad() {}
func (i *Int64) afterLoad(context.Context) {}
// +checklocksignore
func (i *Int64) StateLoad(stateSourceObject state.Source) {
func (i *Int64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.value)
}
@@ -102,16 +129,17 @@ func (u *Uint64) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(0, &u.value)
}
func (u *Uint64) afterLoad() {}
func (u *Uint64) afterLoad(context.Context) {}
// +checklocksignore
func (u *Uint64) StateLoad(stateSourceObject state.Source) {
func (u *Uint64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &u.value)
}
func init() {
state.Register((*Int32)(nil))
state.Register((*Uint32)(nil))
state.Register((*Bool)(nil))
state.Register((*Int64)(nil))
state.Register((*Uint64)(nil))
}

View File

@@ -8,6 +8,8 @@
package atomicbitops
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -29,39 +31,13 @@ func (f *Float64) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(0, &f.bits)
}
func (f *Float64) afterLoad() {}
func (f *Float64) afterLoad(context.Context) {}
// +checklocksignore
func (f *Float64) StateLoad(stateSourceObject state.Source) {
func (f *Float64) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &f.bits)
}
func (b *Bool) StateTypeName() string {
return "pkg/atomicbitops.Bool"
}
func (b *Bool) StateFields() []string {
return []string{
"Uint32",
}
}
func (b *Bool) beforeSave() {}
// +checklocksignore
func (b *Bool) StateSave(stateSinkObject state.Sink) {
b.beforeSave()
stateSinkObject.Save(0, &b.Uint32)
}
func (b *Bool) afterLoad() {}
// +checklocksignore
func (b *Bool) StateLoad(stateSourceObject state.Source) {
stateSourceObject.Load(0, &b.Uint32)
}
func init() {
state.Register((*Float64)(nil))
state.Register((*Bool)(nil))
}

View File

@@ -1,71 +0,0 @@
// Copyright 2022 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package atomicbitops
import "sync/atomic"
// Bool is an atomic Boolean.
//
// It is implemented by a Uint32, with value 0 indicating false, and 1
// indicating true.
//
// +stateify savable
type Bool struct {
Uint32
}
// FromBool returns an Bool initialized to value val.
//
//go:nosplit
func FromBool(val bool) Bool {
var u uint32
if val {
u = 1
}
return Bool{
Uint32{
value: u,
},
}
}
// Load is analogous to atomic.LoadBool, if such a thing existed.
//
//go:nosplit
func (b *Bool) Load() bool {
return atomic.LoadUint32(&b.value) == 1
}
// Store is analogous to atomic.StoreBool, if such a thing existed.
//
//go:nosplit
func (b *Bool) Store(val bool) {
var u uint32
if val {
u = 1
}
atomic.StoreUint32(&b.value, u)
}
// Swap is analogous to atomic.SwapBool, if such a thing existed.
//
//go:nosplit
func (b *Bool) Swap(val bool) bool {
var u uint32
if val {
u = 1
}
return atomic.SwapUint32(&b.value, u) == 1
}

View File

@@ -28,7 +28,7 @@ import (
//
// +stateify savable
type Buffer struct {
data viewList `state:".([]byte)"`
data ViewList `state:".([]byte)"`
size int64
}
@@ -189,12 +189,9 @@ func (b *Buffer) GrowTo(length int64, zero bool) {
sz = int(length - b.size)
}
// Zero the written section; note that this pattern is
// specifically recognized and optimized by the compiler.
// Zero the written section.
if zero {
for i := v.write; i < v.write+sz; i++ {
v.chunk.data[i] = 0
}
clear(v.chunk.data[v.write : v.write+sz])
}
// Advance the index.
@@ -401,6 +398,12 @@ func (b *Buffer) Size() int64 {
return b.size
}
// AsViewList returns the ViewList backing b. Users may not save or modify the
// ViewList returned.
func (b *Buffer) AsViewList() ViewList {
return b.data
}
// Clone creates a copy-on-write clone of b. The underlying chunks are shared
// until they are written to.
func (b *Buffer) Clone() Buffer {
@@ -479,7 +482,7 @@ func (b *Buffer) Checksum(offset int) uint16 {
// operation completes.
func (b *Buffer) Merge(other *Buffer) {
b.data.PushBackList(&other.data)
other.data = viewList{}
other.data = ViewList{}
// Adjust sizes.
b.size += other.size
@@ -489,6 +492,18 @@ func (b *Buffer) Merge(other *Buffer) {
// WriteFromReader writes to the buffer from an io.Reader. A maximum read size
// of MaxChunkSize is enforced to prevent allocating views from the heap.
func (b *Buffer) WriteFromReader(r io.Reader, count int64) (int64, error) {
return b.WriteFromReaderAndLimitedReader(r, count, nil)
}
// WriteFromReaderAndLimitedReader is the same as WriteFromReader, but
// optimized to avoid allocations if a LimitedReader is passed in.
//
// This function clobbers the values of lr.
func (b *Buffer) WriteFromReaderAndLimitedReader(r io.Reader, count int64, lr *io.LimitedReader) (int64, error) {
if lr == nil {
lr = &io.LimitedReader{}
}
var done int64
for done < count {
vsize := count - done
@@ -496,8 +511,9 @@ func (b *Buffer) WriteFromReader(r io.Reader, count int64) (int64, error) {
vsize = MaxChunkSize
}
v := NewView(int(vsize))
lr := io.LimitedReader{R: r, N: vsize}
n, err := io.Copy(v, &lr)
lr.R = r
lr.N = vsize
n, err := io.Copy(v, lr)
b.Append(v)
done += n
if err == io.EOF {
@@ -572,7 +588,7 @@ func (b *Buffer) readByte() (byte, error) {
return bt, nil
}
// AsBufferReader returns the Buffer as a BufferReader capabable of io methods.
// AsBufferReader returns the Buffer as a BufferReader capable of io methods.
// The new BufferReader takes ownership of b.
func (b *Buffer) AsBufferReader() BufferReader {
return BufferReader{b}

View File

@@ -14,12 +14,16 @@
package buffer
import (
"context"
)
// saveData is invoked by stateify.
func (b *Buffer) saveData() []byte {
return b.Flatten()
}
// loadData is invoked by stateify.
func (b *Buffer) loadData(data []byte) {
func (b *Buffer) loadData(_ context.Context, data []byte) {
*b = MakeWithData(data)
}

View File

@@ -3,6 +3,8 @@
package buffer
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -28,12 +30,12 @@ func (b *Buffer) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(1, &b.size)
}
func (b *Buffer) afterLoad() {}
func (b *Buffer) afterLoad(context.Context) {}
// +checklocksignore
func (b *Buffer) StateLoad(stateSourceObject state.Source) {
func (b *Buffer) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(1, &b.size)
stateSourceObject.LoadValue(0, new([]byte), func(y any) { b.loadData(y.([]byte)) })
stateSourceObject.LoadValue(0, new([]byte), func(y any) { b.loadData(ctx, y.([]byte)) })
}
func (c *chunk) StateTypeName() string {
@@ -56,10 +58,10 @@ func (c *chunk) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(1, &c.data)
}
func (c *chunk) afterLoad() {}
func (c *chunk) afterLoad(context.Context) {}
// +checklocksignore
func (c *chunk) StateLoad(stateSourceObject state.Source) {
func (c *chunk) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &c.chunkRefs)
stateSourceObject.Load(1, &c.data)
}
@@ -83,9 +85,9 @@ func (r *chunkRefs) StateSave(stateSinkObject state.Sink) {
}
// +checklocksignore
func (r *chunkRefs) StateLoad(stateSourceObject state.Source) {
func (r *chunkRefs) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &r.refCount)
stateSourceObject.AfterLoad(r.afterLoad)
stateSourceObject.AfterLoad(func() { r.afterLoad(ctx) })
}
func (v *View) StateTypeName() string {
@@ -110,67 +112,67 @@ func (v *View) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(2, &v.chunk)
}
func (v *View) afterLoad() {}
func (v *View) afterLoad(context.Context) {}
// +checklocksignore
func (v *View) StateLoad(stateSourceObject state.Source) {
func (v *View) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &v.read)
stateSourceObject.Load(1, &v.write)
stateSourceObject.Load(2, &v.chunk)
}
func (l *viewList) StateTypeName() string {
return "pkg/buffer.viewList"
func (l *ViewList) StateTypeName() string {
return "pkg/buffer.ViewList"
}
func (l *viewList) StateFields() []string {
func (l *ViewList) StateFields() []string {
return []string{
"head",
"tail",
}
}
func (l *viewList) beforeSave() {}
func (l *ViewList) beforeSave() {}
// +checklocksignore
func (l *viewList) StateSave(stateSinkObject state.Sink) {
func (l *ViewList) StateSave(stateSinkObject state.Sink) {
l.beforeSave()
stateSinkObject.Save(0, &l.head)
stateSinkObject.Save(1, &l.tail)
}
func (l *viewList) afterLoad() {}
func (l *ViewList) afterLoad(context.Context) {}
// +checklocksignore
func (l *viewList) StateLoad(stateSourceObject state.Source) {
func (l *ViewList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &l.head)
stateSourceObject.Load(1, &l.tail)
}
func (e *viewEntry) StateTypeName() string {
return "pkg/buffer.viewEntry"
func (e *ViewEntry) StateTypeName() string {
return "pkg/buffer.ViewEntry"
}
func (e *viewEntry) StateFields() []string {
func (e *ViewEntry) StateFields() []string {
return []string{
"next",
"prev",
}
}
func (e *viewEntry) beforeSave() {}
func (e *ViewEntry) beforeSave() {}
// +checklocksignore
func (e *viewEntry) StateSave(stateSinkObject state.Sink) {
func (e *ViewEntry) StateSave(stateSinkObject state.Sink) {
e.beforeSave()
stateSinkObject.Save(0, &e.next)
stateSinkObject.Save(1, &e.prev)
}
func (e *viewEntry) afterLoad() {}
func (e *ViewEntry) afterLoad(context.Context) {}
// +checklocksignore
func (e *viewEntry) StateLoad(stateSourceObject state.Source) {
func (e *ViewEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &e.next)
stateSourceObject.Load(1, &e.prev)
}
@@ -180,6 +182,6 @@ func init() {
state.Register((*chunk)(nil))
state.Register((*chunkRefs)(nil))
state.Register((*View)(nil))
state.Register((*viewList)(nil))
state.Register((*viewEntry)(nil))
state.Register((*ViewList)(nil))
state.Register((*ViewEntry)(nil))
}

View File

@@ -27,7 +27,7 @@ const (
// number and passing the result to MostSignificantOne64.
baseChunkSizeLog2 = 6
// This is the size of the buffers in the first pool. Each subsquent pool
// This is the size of the buffers in the first pool. Each subsequent pool
// creates payloads 2^(pool index) times larger than the first pool's
// payloads.
baseChunkSize = 1 << baseChunkSizeLog2 // 64
@@ -87,9 +87,7 @@ func newChunk(size int) *chunk {
} else {
pool := getChunkPool(size)
c = pool.Get().(*chunk)
for i := range c.data {
c.data[i] = 0
}
clear(c.data)
}
c.InitRefs()
return c

View File

@@ -1,6 +1,7 @@
package buffer
import (
"context"
"fmt"
"gvisor.dev/gvisor/pkg/atomicbitops"
@@ -134,7 +135,7 @@ func (r *chunkRefs) DecRef(destroy func()) {
}
}
func (r *chunkRefs) afterLoad() {
func (r *chunkRefs) afterLoad(context.Context) {
if r.ReadRefs() > 0 {
refs.Register(r)
}

View File

@@ -48,7 +48,7 @@ var viewPool = sync.Pool{
//
// +stateify savable
type View struct {
viewEntry `state:"nosave"`
ViewEntry `state:"nosave"`
read int
write int
chunk *chunk

View File

@@ -6,14 +6,14 @@ package buffer
// objects, if they are not the same. An ElementMapper is not typically
// required if: Linker is left as is, Element is left as is, or Linker and
// Element are the same type.
type viewElementMapper struct{}
type ViewElementMapper struct{}
// linkerFor maps an Element to a Linker.
//
// This default implementation should be inlined.
//
//go:nosplit
func (viewElementMapper) linkerFor(elem *View) *View { return elem }
func (ViewElementMapper) linkerFor(elem *View) *View { return elem }
// List is an intrusive list. Entries can be added to or removed from the list
// in O(1) time and with no additional memory allocations.
@@ -27,13 +27,13 @@ func (viewElementMapper) linkerFor(elem *View) *View { return elem }
// }
//
// +stateify savable
type viewList struct {
type ViewList struct {
head *View
tail *View
}
// Reset resets list l to the empty state.
func (l *viewList) Reset() {
func (l *ViewList) Reset() {
l.head = nil
l.tail = nil
}
@@ -41,21 +41,21 @@ func (l *viewList) Reset() {
// Empty returns true iff the list is empty.
//
//go:nosplit
func (l *viewList) Empty() bool {
func (l *ViewList) Empty() bool {
return l.head == nil
}
// Front returns the first element of list l or nil.
//
//go:nosplit
func (l *viewList) Front() *View {
func (l *ViewList) Front() *View {
return l.head
}
// Back returns the last element of list l or nil.
//
//go:nosplit
func (l *viewList) Back() *View {
func (l *ViewList) Back() *View {
return l.tail
}
@@ -64,8 +64,8 @@ func (l *viewList) Back() *View {
// NOTE: This is an O(n) operation.
//
//go:nosplit
func (l *viewList) Len() (count int) {
for e := l.Front(); e != nil; e = (viewElementMapper{}.linkerFor(e)).Next() {
func (l *ViewList) Len() (count int) {
for e := l.Front(); e != nil; e = (ViewElementMapper{}.linkerFor(e)).Next() {
count++
}
return count
@@ -74,12 +74,12 @@ func (l *viewList) Len() (count int) {
// PushFront inserts the element e at the front of list l.
//
//go:nosplit
func (l *viewList) PushFront(e *View) {
linker := viewElementMapper{}.linkerFor(e)
func (l *ViewList) PushFront(e *View) {
linker := ViewElementMapper{}.linkerFor(e)
linker.SetNext(l.head)
linker.SetPrev(nil)
if l.head != nil {
viewElementMapper{}.linkerFor(l.head).SetPrev(e)
ViewElementMapper{}.linkerFor(l.head).SetPrev(e)
} else {
l.tail = e
}
@@ -90,13 +90,13 @@ func (l *viewList) PushFront(e *View) {
// PushFrontList inserts list m at the start of list l, emptying m.
//
//go:nosplit
func (l *viewList) PushFrontList(m *viewList) {
func (l *ViewList) PushFrontList(m *ViewList) {
if l.head == nil {
l.head = m.head
l.tail = m.tail
} else if m.head != nil {
viewElementMapper{}.linkerFor(l.head).SetPrev(m.tail)
viewElementMapper{}.linkerFor(m.tail).SetNext(l.head)
ViewElementMapper{}.linkerFor(l.head).SetPrev(m.tail)
ViewElementMapper{}.linkerFor(m.tail).SetNext(l.head)
l.head = m.head
}
@@ -107,12 +107,12 @@ func (l *viewList) PushFrontList(m *viewList) {
// PushBack inserts the element e at the back of list l.
//
//go:nosplit
func (l *viewList) PushBack(e *View) {
linker := viewElementMapper{}.linkerFor(e)
func (l *ViewList) PushBack(e *View) {
linker := ViewElementMapper{}.linkerFor(e)
linker.SetNext(nil)
linker.SetPrev(l.tail)
if l.tail != nil {
viewElementMapper{}.linkerFor(l.tail).SetNext(e)
ViewElementMapper{}.linkerFor(l.tail).SetNext(e)
} else {
l.head = e
}
@@ -123,13 +123,13 @@ func (l *viewList) PushBack(e *View) {
// PushBackList inserts list m at the end of list l, emptying m.
//
//go:nosplit
func (l *viewList) PushBackList(m *viewList) {
func (l *ViewList) PushBackList(m *ViewList) {
if l.head == nil {
l.head = m.head
l.tail = m.tail
} else if m.head != nil {
viewElementMapper{}.linkerFor(l.tail).SetNext(m.head)
viewElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
ViewElementMapper{}.linkerFor(l.tail).SetNext(m.head)
ViewElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
l.tail = m.tail
}
@@ -140,9 +140,9 @@ func (l *viewList) PushBackList(m *viewList) {
// InsertAfter inserts e after b.
//
//go:nosplit
func (l *viewList) InsertAfter(b, e *View) {
bLinker := viewElementMapper{}.linkerFor(b)
eLinker := viewElementMapper{}.linkerFor(e)
func (l *ViewList) InsertAfter(b, e *View) {
bLinker := ViewElementMapper{}.linkerFor(b)
eLinker := ViewElementMapper{}.linkerFor(e)
a := bLinker.Next()
@@ -151,7 +151,7 @@ func (l *viewList) InsertAfter(b, e *View) {
bLinker.SetNext(e)
if a != nil {
viewElementMapper{}.linkerFor(a).SetPrev(e)
ViewElementMapper{}.linkerFor(a).SetPrev(e)
} else {
l.tail = e
}
@@ -160,9 +160,9 @@ func (l *viewList) InsertAfter(b, e *View) {
// InsertBefore inserts e before a.
//
//go:nosplit
func (l *viewList) InsertBefore(a, e *View) {
aLinker := viewElementMapper{}.linkerFor(a)
eLinker := viewElementMapper{}.linkerFor(e)
func (l *ViewList) InsertBefore(a, e *View) {
aLinker := ViewElementMapper{}.linkerFor(a)
eLinker := ViewElementMapper{}.linkerFor(e)
b := aLinker.Prev()
eLinker.SetNext(a)
@@ -170,7 +170,7 @@ func (l *viewList) InsertBefore(a, e *View) {
aLinker.SetPrev(e)
if b != nil {
viewElementMapper{}.linkerFor(b).SetNext(e)
ViewElementMapper{}.linkerFor(b).SetNext(e)
} else {
l.head = e
}
@@ -179,19 +179,19 @@ func (l *viewList) InsertBefore(a, e *View) {
// Remove removes e from l.
//
//go:nosplit
func (l *viewList) Remove(e *View) {
linker := viewElementMapper{}.linkerFor(e)
func (l *ViewList) Remove(e *View) {
linker := ViewElementMapper{}.linkerFor(e)
prev := linker.Prev()
next := linker.Next()
if prev != nil {
viewElementMapper{}.linkerFor(prev).SetNext(next)
ViewElementMapper{}.linkerFor(prev).SetNext(next)
} else if l.head == e {
l.head = next
}
if next != nil {
viewElementMapper{}.linkerFor(next).SetPrev(prev)
ViewElementMapper{}.linkerFor(next).SetPrev(prev)
} else if l.tail == e {
l.tail = prev
}
@@ -205,7 +205,7 @@ func (l *viewList) Remove(e *View) {
// methods needed by List.
//
// +stateify savable
type viewEntry struct {
type ViewEntry struct {
next *View
prev *View
}
@@ -213,27 +213,27 @@ type viewEntry struct {
// Next returns the entry that follows e in the list.
//
//go:nosplit
func (e *viewEntry) Next() *View {
func (e *ViewEntry) Next() *View {
return e.next
}
// Prev returns the entry that precedes e in the list.
//
//go:nosplit
func (e *viewEntry) Prev() *View {
func (e *ViewEntry) Prev() *View {
return e.prev
}
// SetNext assigns 'entry' as the entry that follows e in the list.
//
//go:nosplit
func (e *viewEntry) SetNext(elem *View) {
func (e *ViewEntry) SetNext(elem *View) {
e.next = elem
}
// SetPrev assigns 'entry' as the entry that precedes e in the list.
//
//go:nosplit
func (e *viewEntry) SetPrev(elem *View) {
func (e *ViewEntry) SetPrev(elem *View) {
e.prev = elem
}

View File

@@ -38,7 +38,7 @@ import (
"gvisor.dev/gvisor/pkg/sync"
)
// contextID is the package for context.Context.Value keys.
// contextID is the package for anyContext.Context.Value keys.
type contextID int
const (
@@ -51,13 +51,13 @@ const (
_AT_HWCAP2 = 26
)
// context represents context.Context.
type context interface {
// anyContext represents context.Context.
type anyContext interface {
Value(key any) any
}
// FromContext returns the FeatureSet from the context, if available.
func FromContext(ctx context) FeatureSet {
func FromContext(ctx anyContext) FeatureSet {
v := ctx.Value(CtxFeatureSet)
if v == nil {
return FeatureSet{} // Panics if used.

View File

@@ -18,6 +18,7 @@
package cpuid
import (
"context"
"fmt"
"io"
)
@@ -56,7 +57,7 @@ func (fs *FeatureSet) saveFunction() Static {
}
// loadFunction saves the function as a static query.
func (fs *FeatureSet) loadFunction(s Static) {
func (fs *FeatureSet) loadFunction(_ context.Context, s Static) {
fs.Function = s
}
@@ -309,7 +310,7 @@ func (fs FeatureSet) HasFeature(feature Feature) bool {
// a minimal /proc/cpuinfo, it is missing some fields like "microcode" that are
// not always printed in Linux. The bogomips field is simply made up.
func (fs FeatureSet) WriteCPUInfoTo(cpu uint, w io.Writer) {
// Avoid many redunant calls here, since this can occasionally appear
// Avoid many redundant calls here, since this can occasionally appear
// in the hot path. Read all basic information up front, see above.
ax, _, _, _ := fs.query(featureInfo)
ef, em, _, f, m, _ := signatureSplit(ax)
@@ -361,8 +362,22 @@ func (fs FeatureSet) Intel() bool {
// If xSaveInfo isn't supported, cpuid will not fault but will
// return bogus values.
var (
xsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ebx
maxXsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ecx
xsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ebx
maxXsaveSize = native(In{Eax: uint32(xSaveInfo)}).Ecx
amxTileCfgSize = native(In{Eax: uint32(xSaveInfo), Ecx: 17}).Eax
amxTileDataSize = native(In{Eax: uint32(xSaveInfo), Ecx: 18}).Eax
)
const (
// XCR0AMXMask are the bits that enable xsave to operate on AMX TILECFG
// and TILEDATA.
//
// Note: TILECFG and TILEDATA are always either both enabled or both
// disabled.
//
// See Intel® 64 and IA-32 Architectures Software Developers Manual Vol.1
// section 13.3 for details.
XCR0AMXMask = uint64((1 << 17) | (1 << 18))
)
// ExtendedStateSize returns the number of bytes needed to save the "extended
@@ -384,15 +399,30 @@ func (fs FeatureSet) ExtendedStateSize() (size, align uint) {
return 512, 16
}
// AMXExtendedStateSize returns the number of bytes within the "extended state"
// area that is used for AMX.
func (fs FeatureSet) AMXExtendedStateSize() uint {
if fs.UseXsave() {
xcr0 := xgetbv(0)
if (xcr0 & XCR0AMXMask) != 0 {
return uint(amxTileCfgSize + amxTileDataSize)
}
}
return 0
}
// ValidXCR0Mask returns the valid bits in control register XCR0.
//
// Always exclude AMX bits, because we do not support it.
// TODO(gvisor.dev/issues/9896): Implement AMX Support.
//
//go:nosplit
func (fs FeatureSet) ValidXCR0Mask() uint64 {
if !fs.HasFeature(X86FeatureXSAVE) {
return 0
}
ax, _, _, dx := fs.query(xSaveInfo)
return uint64(dx)<<32 | uint64(ax)
return (uint64(dx)<<32 | uint64(ax)) &^ XCR0AMXMask
}
// UseXsave returns the choice of fp state saving instruction.

View File

@@ -6,6 +6,8 @@
package cpuid
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -31,12 +33,12 @@ func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(1, &fs.hwCap)
}
func (fs *FeatureSet) afterLoad() {}
func (fs *FeatureSet) afterLoad(context.Context) {}
// +checklocksignore
func (fs *FeatureSet) StateLoad(stateSourceObject state.Source) {
func (fs *FeatureSet) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(1, &fs.hwCap)
stateSourceObject.LoadValue(0, new(Static), func(y any) { fs.loadFunction(y.(Static)) })
stateSourceObject.LoadValue(0, new(Static), func(y any) { fs.loadFunction(ctx, y.(Static)) })
}
func (i *In) StateTypeName() string {
@@ -59,10 +61,10 @@ func (i *In) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(1, &i.Ecx)
}
func (i *In) afterLoad() {}
func (i *In) afterLoad(context.Context) {}
// +checklocksignore
func (i *In) StateLoad(stateSourceObject state.Source) {
func (i *In) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.Eax)
stateSourceObject.Load(1, &i.Ecx)
}
@@ -91,10 +93,10 @@ func (o *Out) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(3, &o.Edx)
}
func (o *Out) afterLoad() {}
func (o *Out) afterLoad(context.Context) {}
// +checklocksignore
func (o *Out) StateLoad(stateSourceObject state.Source) {
func (o *Out) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &o.Eax)
stateSourceObject.Load(1, &o.Ebx)
stateSourceObject.Load(2, &o.Ecx)

View File

@@ -6,6 +6,8 @@
package cpuid
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -39,10 +41,10 @@ func (fs *FeatureSet) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(6, &fs.cpuRevDec)
}
func (fs *FeatureSet) afterLoad() {}
func (fs *FeatureSet) afterLoad(context.Context) {}
// +checklocksignore
func (fs *FeatureSet) StateLoad(stateSourceObject state.Source) {
func (fs *FeatureSet) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &fs.hwCap)
stateSourceObject.Load(1, &fs.cpuFreqMHz)
stateSourceObject.Load(2, &fs.cpuImplHex)

View File

@@ -3,6 +3,8 @@
package cpuid
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -26,10 +28,10 @@ func (h *hwCap) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(1, &h.hwCap2)
}
func (h *hwCap) afterLoad() {}
func (h *hwCap) afterLoad(context.Context) {}
// +checklocksignore
func (h *hwCap) StateLoad(stateSourceObject state.Source) {
func (h *hwCap) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &h.hwCap1)
stateSourceObject.Load(1, &h.hwCap2)
}

View File

@@ -127,6 +127,14 @@ func (f Feature) set(s ChangeableSet, on bool) {
}
}
s.Set(In{Eax: uint32(extendedFeatures)}, out)
case 7:
out := s.Query(In{Eax: uint32(extendedFeatureInfo)})
if on {
out.Edx |= f.bit()
} else {
out.Edx &^= f.bit()
}
s.Set(In{Eax: uint32(extendedFeatureInfo)}, out)
}
}
@@ -170,6 +178,9 @@ func (f Feature) check(fs FeatureSet) bool {
return ((dx &^ block6DuplicateMask) & f.bit()) != 0
}
return false
case 7:
_, _, _, dx := fs.query(extendedFeatureInfo)
return (dx & f.bit()) != 0
default:
return false
}
@@ -389,6 +400,43 @@ const (
X86Feature3DNOW Feature = 6*32 + 31
)
// Block 7 constants are the extended features bits in
// CPUID.(EAX=07H,ECX=0):EDX.
const (
_ Feature = 7*32 + iota // edx bit 0 is reserved.
_ // edx bit 1 is reserved.
X86FeatureAVX512_4VNNIW
X86FeatureAVX512_4FMAPS
X86FeatureFSRM
_ // edx bit 5 is not used in Linux.
_ // edx bit 6 is reserved.
_ // edx bit 7 is reserved.
X86FeatureAVX512_VP2INTERSECT
X86FeatureSRBDS_CTRL
X86FeatureMD_CLEAR
X86FeatureRTM_ALWAYS_ABORT
_ // edx bit 12 is reserved.
X86FeatureTSX_FORCE_ABORT
X86FeatureSERIALIZE
X86FeatureHYBRID_CPU
X86FeatureTSXLDTRK
_ // edx bit 17 is reserved.
X86FeaturePCONFIG
X86FeatureARCH_LBR
X86FeatureIBT
_ // edx bit 21 is reserved.
X86FeatureAMX_BF16
X86FeatureAVX512_FP16
X86FeatureAMX_TILE
X86FeatureAMX_INT8
X86FeatureSPEC_CTRL
X86FeatureINTEL_STIBP
X86FeatureFLUSH_L1D
X86FeatureARCH_CAPABILITIES
X86FeatureCORE_CAPABILITIES
X86FeatureSPEC_CTRL_SSBD
)
// These are the extended floating point state features. They are used to
// enumerate floating point features in XCR0, XSTATE_BV, etc.
const (
@@ -569,6 +617,32 @@ var allFeatures = map[Feature]allFeatureInfo{
X86FeatureLM: {"lm", true},
X86Feature3DNOWEXT: {"3dnowext", true},
X86Feature3DNOW: {"3dnow", true},
// Block 7.
X86FeatureAVX512_4VNNIW: {"avx512_4vnniw", true},
X86FeatureAVX512_4FMAPS: {"avx512_4fmaps", true},
X86FeatureFSRM: {"fsrm", true},
X86FeatureAVX512_VP2INTERSECT: {"avx512_vp2intersect", true},
X86FeatureSRBDS_CTRL: {"srbds_ctrl", false},
X86FeatureMD_CLEAR: {"md_clear", true},
X86FeatureRTM_ALWAYS_ABORT: {"rtm_always_abort", false},
X86FeatureTSX_FORCE_ABORT: {"tsx_force_abort", false},
X86FeatureSERIALIZE: {"serialize", true},
X86FeatureHYBRID_CPU: {"hybrid_cpu", false},
X86FeatureTSXLDTRK: {"tsxldtrk", true},
X86FeaturePCONFIG: {"pconfig", true},
X86FeatureARCH_LBR: {"arch_lbr", true},
X86FeatureIBT: {"ibt", true},
X86FeatureAMX_BF16: {"amx_bf16", true},
X86FeatureAVX512_FP16: {"avx512_fp16", true},
X86FeatureAMX_TILE: {"amx_tile", true},
X86FeatureAMX_INT8: {"amx_int8", true},
X86FeatureSPEC_CTRL: {"spec_ctrl", false},
X86FeatureINTEL_STIBP: {"intel_stibp", false},
X86FeatureFLUSH_L1D: {"flush_l1d", true},
X86FeatureARCH_CAPABILITIES: {"arch_capabilities", true},
X86FeatureCORE_CAPABILITIES: {"core_capabilities", false},
X86FeatureSPEC_CTRL_SSBD: {"spec_ctrl_ssbd", false},
}
// linuxBlockOrder defines the order in which linux organizes the feature
@@ -576,7 +650,7 @@ var allFeatures = map[Feature]allFeatureInfo{
// which doesn't match well here, so for the /proc/cpuinfo generation we simply
// re-map the blocks to Linux's ordering and then go through the bits in each
// block.
var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4, 3}
var linuxBlockOrder = []block{1, 6, 0, 5, 2, 4, 3, 7}
func archFlagOrder(fn func(Feature)) {
for _, b := range linuxBlockOrder {

View File

@@ -215,6 +215,9 @@ func readMaxCPUFreq() {
}
// xgetbv reads an extended control register.
func xgetbv(reg uintptr) uint64
// archInitialize initializes hostFeatureSet.
func archInitialize() {
hostFeatureSet = FeatureSet{

View File

@@ -23,3 +23,16 @@ TEXT ·native(SB),NOSPLIT|NOFRAME,$0-24
MOVL CX, ret_Ecx+16(FP)
MOVL DX, ret_Edx+20(FP)
RET
// xgetbv reads an extended control register.
//
// The code corresponds to:
//
// xgetbv
//
TEXT ·xgetbv(SB),NOSPLIT|NOFRAME,$0-16
MOVQ reg+0(FP), CX
BYTE $0x0f; BYTE $0x01; BYTE $0xd0;
MOVL AX, ret+8(FP)
MOVL DX, ret+12(FP)
RET

View File

@@ -17,6 +17,8 @@
package cpuid
import "context"
// Static is a static CPUID function.
//
// +stateify savable
@@ -90,7 +92,7 @@ func (s Static) ToFeatureSet() FeatureSet {
}
// afterLoad calls normalize.
func (s Static) afterLoad() {
func (s Static) afterLoad(context.Context) {
s.normalize()
}

View File

@@ -17,6 +17,8 @@ package log
import (
"encoding/json"
"fmt"
"runtime"
"strings"
"time"
)
@@ -62,9 +64,16 @@ type JSONEmitter struct {
}
// Emit implements Emitter.Emit.
func (e JSONEmitter) Emit(_ int, level Level, timestamp time.Time, format string, v ...any) {
func (e JSONEmitter) Emit(depth int, level Level, timestamp time.Time, format string, v ...any) {
logLine := fmt.Sprintf(format, v...)
if _, file, line, ok := runtime.Caller(depth + 1); ok {
if slash := strings.LastIndexByte(file, byte('/')); slash >= 0 {
file = file[slash+1:] // Trim any directory path from the file.
}
logLine = fmt.Sprintf("%s:%d] %s", file, line, logLine)
}
j := jsonLog{
Msg: fmt.Sprintf(format, v...),
Msg: logLine,
Level: level,
Time: timestamp,
}

View File

@@ -17,6 +17,8 @@ package log
import (
"encoding/json"
"fmt"
"runtime"
"strings"
"time"
)
@@ -33,9 +35,16 @@ type K8sJSONEmitter struct {
}
// Emit implements Emitter.Emit.
func (e K8sJSONEmitter) Emit(_ int, level Level, timestamp time.Time, format string, v ...any) {
func (e K8sJSONEmitter) Emit(depth int, level Level, timestamp time.Time, format string, v ...any) {
logLine := fmt.Sprintf(format, v...)
if _, file, line, ok := runtime.Caller(depth + 1); ok {
if slash := strings.LastIndexByte(file, byte('/')); slash >= 0 {
file = file[slash+1:] // Trim any directory path from the file.
}
logLine = fmt.Sprintf("%s:%d] %s", file, line, logLine)
}
j := k8sJSONLog{
Log: fmt.Sprintf(format, v...),
Log: logLine,
Level: level,
Time: timestamp,
}

View File

@@ -250,11 +250,11 @@ func (l *BasicLogger) SetLevel(level Level) {
var logMu sync.Mutex
// log is the default logger.
var log atomic.Value
var log atomic.Pointer[BasicLogger]
// Log retrieves the global logger.
func Log() *BasicLogger {
return log.Load().(*BasicLogger)
return log.Load()
}
// SetTarget sets the log target.

View File

@@ -15,8 +15,6 @@
//go:build !linux
// +build !linux
// Package rand implements a cryptographically secure pseudorandom number
// generator.
package rand
import "crypto/rand"

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package rand implements a cryptographically secure pseudorandom number
// generator.
package rand
import (
@@ -54,10 +52,17 @@ type bufferedReader struct {
// Read implements io.Reader.Read.
func (b *bufferedReader) Read(p []byte) (int, error) {
// In Linux, reads of up to page size bytes will always complete fully.
// See drivers/char/random.c:get_random_bytes_user().
// NOTE(gvisor.dev/issue/9445): Some applications rely on this behavior.
const pageSize = 4096
min := len(p)
if min > pageSize {
min = pageSize
}
b.mu.Lock()
n, err := b.r.Read(p)
b.mu.Unlock()
return n, err
defer b.mu.Unlock()
return io.ReadAtLeast(b.r, p, min)
}
// Reader is the default reader.

131
vendor/gvisor.dev/gvisor/pkg/rand/rng.go vendored Normal file
View File

@@ -0,0 +1,131 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package rand implements a cryptographically secure pseudorandom number
// generator.
package rand
import (
"encoding/binary"
"fmt"
"io"
)
// RNG exposes convenience functions based on a cryptographically secure
// io.Reader.
type RNG struct {
Reader io.Reader
}
// RNGFrom returns a new RNG. r must be a cryptographically secure io.Reader.
func RNGFrom(r io.Reader) RNG {
return RNG{Reader: r}
}
// Uint16 is analogous to the standard library's math/rand.Uint16.
func (rg *RNG) Uint16() uint16 {
var data [2]byte
if _, err := rg.Reader.Read(data[:]); err != nil {
panic(fmt.Sprintf("Read() failed: %v", err))
}
return binary.NativeEndian.Uint16(data[:])
}
// Uint32 is analogous to the standard library's math/rand.Uint32.
func (rg *RNG) Uint32() uint32 {
var data [4]byte
if _, err := rg.Reader.Read(data[:]); err != nil {
panic(fmt.Sprintf("Read() failed: %v", err))
}
return binary.NativeEndian.Uint32(data[:])
}
// Int63n is analogous to the standard library's math/rand.Int63n.
func (rg *RNG) Int63n(n int64) int64 {
// Based on Go's rand package implementation, but using
// cryptographically secure random numbers.
if n <= 0 {
panic(fmt.Sprintf("n must be positive, but got %d", n))
}
// This can be done quickly when n is a power of 2.
if n&(n-1) == 0 {
return int64(rg.Uint64()) & (n - 1)
}
// The naive approach would be to return rg.Int63()%n, but we need the
// random number to be fair. It shouldn't be biased towards certain
// results, but simple modular math can be very biased. For example, if
// n is 40% of the maximum int64, then the output values of rg.Int63
// map to return values as follows:
//
// - The first 40% of values map to themselves.
// - The second 40% map to themselves - maximum int64.
// - The remaining 20% map to the themselves - 2 * (maximum int64),
// i.e. the first half of possible output values.
//
// And thus 60% of results map the the first half of possible output
// values, and 40% map the second half. Oops!
//
// We use the same trick as Go to deal with this: shave off the last
// segment (the 20% in our example) to make the RNG more fair.
//
// In the worst case, n is just over half of maximum int64, meaning
// that the upper half of rg.Int63 return values are bad. So each call
// to rg.Int63 has, at worst, a 50% chance of needing a retry.
maximum := int64((1 << 63) - 1 - (1<<63)%uint64(n))
ret := rg.Int63()
for ret > maximum {
ret = rg.Int63()
}
return ret % n
}
// Int63 is analogous to the standard library's math/rand.Int63.
func (rg *RNG) Int63() int64 {
return ((1 << 63) - 1) & int64(rg.Uint64())
}
// Uint64 is analogous to the standard library's math/rand.Uint64.
func (rg *RNG) Uint64() uint64 {
var data [8]byte
if _, err := rg.Reader.Read(data[:]); err != nil {
panic(fmt.Sprintf("Read() failed: %v", err))
}
return binary.NativeEndian.Uint64(data[:])
}
// Uint32 is analogous to the standard library's math/rand.Uint32.
func Uint32() uint32 {
rng := RNG{Reader: Reader}
return rng.Uint32()
}
// Int63n is analogous to the standard library's math/rand.Int63n.
func Int63n(n int64) int64 {
rng := RNG{Reader: Reader}
return rng.Int63n(n)
}
// Int63 is analogous to the standard library's math/rand.Int63.
func Int63() int64 {
rng := RNG{Reader: Reader}
return rng.Int63()
}
// Uint64 is analogous to the standard library's math/rand.Uint64.
func Uint64() uint64 {
rng := RNG{Reader: Reader}
return rng.Uint64()
}

View File

@@ -68,6 +68,7 @@
package sleep
import (
"context"
"sync/atomic"
"unsafe"
@@ -129,7 +130,7 @@ func (s *Sleeper) saveSharedList() *Waker {
}
// loadSharedList is invoked by stateify.
func (s *Sleeper) loadSharedList(w *Waker) {
func (s *Sleeper) loadSharedList(_ context.Context, w *Waker) {
atomic.StorePointer(&s.sharedList, unsafe.Pointer(w))
}
@@ -206,7 +207,7 @@ func (s *Sleeper) nextWaker(block, wakepOrSleep bool) *Waker {
// See:runtime2.go in the go runtime package for
// the values to pass as the waitReason here.
const waitReasonSelect = 9
sync.Gopark(commitSleep, unsafe.Pointer(&s.waitingG), sync.WaitReasonSelect, sync.TraceEvGoBlockSelect, 0)
sync.Gopark(commitSleep, unsafe.Pointer(&s.waitingG), sync.WaitReasonSelect, sync.TraceBlockSelect, 0)
}
// Pull the shared list out and reverse it in the local
@@ -408,7 +409,7 @@ func (w *Waker) saveS() wakerState {
}
// loadS is invoked by stateify.
func (w *Waker) loadS(ws wakerState) {
func (w *Waker) loadS(_ context.Context, ws wakerState) {
if ws.asserted {
atomic.StorePointer(&w.s, unsafe.Pointer(&assertedSleeper))
} else {

View File

@@ -3,6 +3,8 @@
package sleep
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -30,13 +32,13 @@ func (s *Sleeper) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(2, &s.allWakers)
}
func (s *Sleeper) afterLoad() {}
func (s *Sleeper) afterLoad(context.Context) {}
// +checklocksignore
func (s *Sleeper) StateLoad(stateSourceObject state.Source) {
func (s *Sleeper) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(1, &s.localList)
stateSourceObject.Load(2, &s.allWakers)
stateSourceObject.LoadValue(0, new(*Waker), func(y any) { s.loadSharedList(y.(*Waker)) })
stateSourceObject.LoadValue(0, new(*Waker), func(y any) { s.loadSharedList(ctx, y.(*Waker)) })
}
func (w *Waker) StateTypeName() string {
@@ -63,13 +65,13 @@ func (w *Waker) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(2, &w.allWakersNext)
}
func (w *Waker) afterLoad() {}
func (w *Waker) afterLoad(context.Context) {}
// +checklocksignore
func (w *Waker) StateLoad(stateSourceObject state.Source) {
func (w *Waker) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(1, &w.next)
stateSourceObject.Load(2, &w.allWakersNext)
stateSourceObject.LoadValue(0, new(wakerState), func(y any) { w.loadS(y.(wakerState)) })
stateSourceObject.LoadValue(0, new(wakerState), func(y any) { w.loadS(ctx, y.(wakerState)) })
}
func init() {

View File

@@ -2,6 +2,7 @@ package state
import (
"bytes"
"context"
"fmt"
)
@@ -56,7 +57,7 @@ const (
//
// +stateify savable
type addrSet struct {
root addrnode `state:".(*addrSegmentDataSlices)"`
root addrnode `state:".([]addrFlatSegment)"`
}
// IsEmpty returns true if the set contains no segments.
@@ -228,42 +229,68 @@ func (s *addrSet) UpperBoundGap(max uintptr) addrGapIterator {
return seg.PrevGap()
}
// Add inserts the given segment into the set and returns true. If the new
// segment can be merged with adjacent segments, Add will do so. If the new
// segment would overlap an existing segment, Add returns false. If Add
// succeeds, all existing iterators are invalidated.
func (s *addrSet) Add(r addrRange, val *objectEncodeState) bool {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
// FirstLargeEnoughGap returns the first gap in the set with at least the given
// length. If no such gap exists, FirstLargeEnoughGap returns a terminal
// iterator.
//
// Precondition: trackGaps must be 1.
func (s *addrSet) FirstLargeEnoughGap(minSize uintptr) addrGapIterator {
if addrtrackGaps != 1 {
panic("set is not tracking gaps")
}
gap := s.FindGap(r.Start)
if !gap.Ok() {
return false
gap := s.FirstGap()
if gap.Range().Length() >= minSize {
return gap
}
if r.End > gap.End() {
return false
}
s.Insert(gap, r, val)
return true
return gap.NextLargeEnoughGap(minSize)
}
// AddWithoutMerging inserts the given segment into the set and returns true.
// If it would overlap an existing segment, AddWithoutMerging does nothing and
// returns false. If AddWithoutMerging succeeds, all existing iterators are
// invalidated.
func (s *addrSet) AddWithoutMerging(r addrRange, val *objectEncodeState) bool {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
// LastLargeEnoughGap returns the last gap in the set with at least the given
// length. If no such gap exists, LastLargeEnoughGap returns a terminal
// iterator.
//
// Precondition: trackGaps must be 1.
func (s *addrSet) LastLargeEnoughGap(minSize uintptr) addrGapIterator {
if addrtrackGaps != 1 {
panic("set is not tracking gaps")
}
gap := s.FindGap(r.Start)
if !gap.Ok() {
return false
gap := s.LastGap()
if gap.Range().Length() >= minSize {
return gap
}
if r.End > gap.End() {
return false
return gap.PrevLargeEnoughGap(minSize)
}
// LowerBoundLargeEnoughGap returns the first gap in the set with at least the
// given length and whose range contains a key greater than or equal to min. If
// no such gap exists, LowerBoundLargeEnoughGap returns a terminal iterator.
//
// Precondition: trackGaps must be 1.
func (s *addrSet) LowerBoundLargeEnoughGap(min, minSize uintptr) addrGapIterator {
if addrtrackGaps != 1 {
panic("set is not tracking gaps")
}
s.InsertWithoutMergingUnchecked(gap, r, val)
return true
gap := s.LowerBoundGap(min)
if gap.Range().Length() >= minSize {
return gap
}
return gap.NextLargeEnoughGap(minSize)
}
// UpperBoundLargeEnoughGap returns the last gap in the set with at least the
// given length and whose range contains a key less than or equal to max. If no
// such gap exists, UpperBoundLargeEnoughGap returns a terminal iterator.
//
// Precondition: trackGaps must be 1.
func (s *addrSet) UpperBoundLargeEnoughGap(max, minSize uintptr) addrGapIterator {
if addrtrackGaps != 1 {
panic("set is not tracking gaps")
}
gap := s.UpperBoundGap(max)
if gap.Range().Length() >= minSize {
return gap
}
return gap.PrevLargeEnoughGap(minSize)
}
// Insert inserts the given segment into the given gap. If the new segment can
@@ -360,6 +387,107 @@ func (s *addrSet) InsertWithoutMergingUnchecked(gap addrGapIterator, r addrRange
return addrIterator{gap.node, gap.index}
}
// InsertRange inserts the given segment into the set. If the new segment can
// be merged with adjacent segments, InsertRange will do so. InsertRange
// returns an iterator to the segment containing the inserted value (which may
// have been merged with other values). All existing iterators (excluding the
// returned iterator) are invalidated.
//
// If the new segment would overlap an existing segment, or if r is invalid,
// InsertRange panics.
//
// InsertRange searches the set to find the gap to insert into. If the caller
// already has the appropriate GapIterator, or if the caller needs to do
// additional work between finding the gap and insertion, use Insert instead.
func (s *addrSet) InsertRange(r addrRange, val *objectEncodeState) addrIterator {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
}
seg, gap := s.Find(r.Start)
if seg.Ok() {
panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, seg.Range()))
}
if gap.End() < r.End {
panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, gap.NextSegment().Range()))
}
return s.Insert(gap, r, val)
}
// InsertWithoutMergingRange inserts the given segment into the set and returns
// an iterator to the inserted segment. All existing iterators (excluding the
// returned iterator) are invalidated.
//
// If the new segment would overlap an existing segment, or if r is invalid,
// InsertWithoutMergingRange panics.
//
// InsertWithoutMergingRange searches the set to find the gap to insert into.
// If the caller already has the appropriate GapIterator, or if the caller
// needs to do additional work between finding the gap and insertion, use
// InsertWithoutMerging instead.
func (s *addrSet) InsertWithoutMergingRange(r addrRange, val *objectEncodeState) addrIterator {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
}
seg, gap := s.Find(r.Start)
if seg.Ok() {
panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, seg.Range()))
}
if gap.End() < r.End {
panic(fmt.Sprintf("new segment %v overlaps existing segment %v", r, gap.NextSegment().Range()))
}
return s.InsertWithoutMerging(gap, r, val)
}
// TryInsertRange attempts to insert the given segment into the set. If the new
// segment can be merged with adjacent segments, TryInsertRange will do so.
// TryInsertRange returns an iterator to the segment containing the inserted
// value (which may have been merged with other values). All existing iterators
// (excluding the returned iterator) are invalidated.
//
// If the new segment would overlap an existing segment, TryInsertRange does
// nothing and returns a terminal iterator.
//
// TryInsertRange searches the set to find the gap to insert into. If the
// caller already has the appropriate GapIterator, or if the caller needs to do
// additional work between finding the gap and insertion, use Insert instead.
func (s *addrSet) TryInsertRange(r addrRange, val *objectEncodeState) addrIterator {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
}
seg, gap := s.Find(r.Start)
if seg.Ok() {
return addrIterator{}
}
if gap.End() < r.End {
return addrIterator{}
}
return s.Insert(gap, r, val)
}
// TryInsertWithoutMergingRange attempts to insert the given segment into the
// set. If successful, it returns an iterator to the inserted segment; all
// existing iterators (excluding the returned iterator) are invalidated. If the
// new segment would overlap an existing segment, TryInsertWithoutMergingRange
// does nothing and returns a terminal iterator.
//
// TryInsertWithoutMergingRange searches the set to find the gap to insert
// into. If the caller already has the appropriate GapIterator, or if the
// caller needs to do additional work between finding the gap and insertion,
// use InsertWithoutMerging instead.
func (s *addrSet) TryInsertWithoutMergingRange(r addrRange, val *objectEncodeState) addrIterator {
if r.Length() <= 0 {
panic(fmt.Sprintf("invalid segment range %v", r))
}
seg, gap := s.Find(r.Start)
if seg.Ok() {
return addrIterator{}
}
if gap.End() < r.End {
return addrIterator{}
}
return s.InsertWithoutMerging(gap, r, val)
}
// Remove removes the given segment and returns an iterator to the vacated gap.
// All existing iterators (including seg, but not including the returned
// iterator) are invalidated.
@@ -396,6 +524,11 @@ func (s *addrSet) RemoveAll() {
// RemoveRange removes all segments in the given range. An iterator to the
// newly formed gap is returned, and all existing iterators are invalidated.
//
// RemoveRange searches the set to find segments to remove. If the caller
// already has an iterator to either end of the range of segments to remove, or
// if the caller needs to do additional work before removing each segment,
// iterate segments and call Remove in a loop instead.
func (s *addrSet) RemoveRange(r addrRange) addrGapIterator {
seg, gap := s.Find(r.Start)
if seg.Ok() {
@@ -403,12 +536,34 @@ func (s *addrSet) RemoveRange(r addrRange) addrGapIterator {
gap = s.Remove(seg)
}
for seg = gap.NextSegment(); seg.Ok() && seg.Start() < r.End; seg = gap.NextSegment() {
seg = s.Isolate(seg, r)
seg = s.SplitAfter(seg, r.End)
gap = s.Remove(seg)
}
return gap
}
// RemoveFullRange is equivalent to RemoveRange, except that if any key in the
// given range does not correspond to a segment, RemoveFullRange panics.
func (s *addrSet) RemoveFullRange(r addrRange) addrGapIterator {
seg := s.FindSegment(r.Start)
if !seg.Ok() {
panic(fmt.Sprintf("missing segment at %v", r.Start))
}
seg = s.SplitBefore(seg, r.Start)
for {
seg = s.SplitAfter(seg, r.End)
end := seg.End()
gap := s.Remove(seg)
if r.End <= end {
return gap
}
seg = gap.NextSegment()
if !seg.Ok() || seg.Start() != end {
panic(fmt.Sprintf("missing segment at %v", end))
}
}
}
// Merge attempts to merge two neighboring segments. If successful, Merge
// returns an iterator to the merged segment, and all existing iterators are
// invalidated. Otherwise, Merge returns a terminal iterator.
@@ -441,7 +596,68 @@ func (s *addrSet) MergeUnchecked(first, second addrIterator) addrIterator {
return addrIterator{}
}
// MergeAll attempts to merge all adjacent segments in the set. All existing
// MergePrev attempts to merge the given segment with its predecessor if
// possible, and returns an updated iterator to the extended segment. All
// existing iterators (including seg, but not including the returned iterator)
// are invalidated.
//
// MergePrev is usually used when mutating segments while iterating them in
// order of increasing keys, to attempt merging of each mutated segment with
// its previously-mutated predecessor. In such cases, merging a mutated segment
// with its unmutated successor would incorrectly cause the latter to be
// skipped.
func (s *addrSet) MergePrev(seg addrIterator) addrIterator {
if prev := seg.PrevSegment(); prev.Ok() {
if mseg := s.MergeUnchecked(prev, seg); mseg.Ok() {
seg = mseg
}
}
return seg
}
// MergeNext attempts to merge the given segment with its successor if
// possible, and returns an updated iterator to the extended segment. All
// existing iterators (including seg, but not including the returned iterator)
// are invalidated.
//
// MergeNext is usually used when mutating segments while iterating them in
// order of decreasing keys, to attempt merging of each mutated segment with
// its previously-mutated successor. In such cases, merging a mutated segment
// with its unmutated predecessor would incorrectly cause the latter to be
// skipped.
func (s *addrSet) MergeNext(seg addrIterator) addrIterator {
if next := seg.NextSegment(); next.Ok() {
if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
seg = mseg
}
}
return seg
}
// Unisolate attempts to merge the given segment with its predecessor and
// successor if possible, and returns an updated iterator to the extended
// segment. All existing iterators (including seg, but not including the
// returned iterator) are invalidated.
//
// Unisolate is usually used in conjunction with Isolate when mutating part of
// a single segment in a way that may affect its mergeability. For the reasons
// described by MergePrev and MergeNext, it is usually incorrect to use the
// return value of Unisolate in a loop variable.
func (s *addrSet) Unisolate(seg addrIterator) addrIterator {
if prev := seg.PrevSegment(); prev.Ok() {
if mseg := s.MergeUnchecked(prev, seg); mseg.Ok() {
seg = mseg
}
}
if next := seg.NextSegment(); next.Ok() {
if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
seg = mseg
}
}
return seg
}
// MergeAll merges all mergeable adjacent segments in the set. All existing
// iterators are invalidated.
func (s *addrSet) MergeAll() {
seg := s.FirstSegment()
@@ -458,15 +674,20 @@ func (s *addrSet) MergeAll() {
}
}
// MergeRange attempts to merge all adjacent segments that contain a key in the
// specific range. All existing iterators are invalidated.
func (s *addrSet) MergeRange(r addrRange) {
// MergeInsideRange attempts to merge all adjacent segments that contain a key
// in the specific range. All existing iterators are invalidated.
//
// MergeInsideRange only makes sense after mutating the set in a way that may
// change the mergeability of modified segments; callers should prefer to use
// MergePrev or MergeNext during the mutating loop instead (depending on the
// direction of iteration), in order to avoid a redundant search.
func (s *addrSet) MergeInsideRange(r addrRange) {
seg := s.LowerBoundSegment(r.Start)
if !seg.Ok() {
return
}
next := seg.NextSegment()
for next.Ok() && next.Range().Start < r.End {
for next.Ok() && next.Start() < r.End {
if mseg := s.MergeUnchecked(seg, next); mseg.Ok() {
seg, next = mseg, mseg.NextSegment()
} else {
@@ -475,9 +696,14 @@ func (s *addrSet) MergeRange(r addrRange) {
}
}
// MergeAdjacent attempts to merge the segment containing r.Start with its
// MergeOutsideRange attempts to merge the segment containing r.Start with its
// predecessor, and the segment containing r.End-1 with its successor.
func (s *addrSet) MergeAdjacent(r addrRange) {
//
// MergeOutsideRange only makes sense after mutating the set in a way that may
// change the mergeability of modified segments; callers should prefer to use
// MergePrev or MergeNext during the mutating loop instead (depending on the
// direction of iteration), in order to avoid two redundant searches.
func (s *addrSet) MergeOutsideRange(r addrRange) {
first := s.FindSegment(r.Start)
if first.Ok() {
if prev := first.PrevSegment(); prev.Ok() {
@@ -522,21 +748,58 @@ func (s *addrSet) SplitUnchecked(seg addrIterator, split uintptr) (addrIterator,
return seg2.PrevSegment(), seg2
}
// SplitAt splits the segment straddling split, if one exists. SplitAt returns
// true if a segment was split and false otherwise. If SplitAt splits a
// segment, all existing iterators are invalidated.
func (s *addrSet) SplitAt(split uintptr) bool {
if seg := s.FindSegment(split); seg.Ok() && seg.Range().CanSplitAt(split) {
s.SplitUnchecked(seg, split)
return true
// SplitBefore ensures that the given segment's start is at least start by
// splitting at start if necessary, and returns an updated iterator to the
// bounded segment. All existing iterators (including seg, but not including
// the returned iterator) are invalidated.
//
// SplitBefore is usually when mutating segments in a range. In such cases,
// when iterating segments in order of increasing keys, the first segment may
// extend beyond the start of the range to be mutated, and needs to be
// SplitBefore to ensure that only the part of the segment within the range is
// mutated. When iterating segments in order of decreasing keys, SplitBefore
// and SplitAfter; i.e. SplitBefore needs to be invoked on each segment, while
// SplitAfter only needs to be invoked on the first.
//
// Preconditions: start < seg.End().
func (s *addrSet) SplitBefore(seg addrIterator, start uintptr) addrIterator {
if seg.Range().CanSplitAt(start) {
_, seg = s.SplitUnchecked(seg, start)
}
return false
return seg
}
// Isolate ensures that the given segment's range does not escape r by
// splitting at r.Start and r.End if necessary, and returns an updated iterator
// to the bounded segment. All existing iterators (including seg, but not
// including the returned iterators) are invalidated.
// SplitAfter ensures that the given segment's end is at most end by splitting
// at end if necessary, and returns an updated iterator to the bounded segment.
// All existing iterators (including seg, but not including the returned
// iterator) are invalidated.
//
// SplitAfter is usually used when mutating segments in a range. In such cases,
// when iterating segments in order of increasing keys, each iterated segment
// may extend beyond the end of the range to be mutated, and needs to be
// SplitAfter to ensure that only the part of the segment within the range is
// mutated. When iterating segments in order of decreasing keys, SplitBefore
// and SplitAfter exchange roles; i.e. SplitBefore needs to be invoked on each
// segment, while SplitAfter only needs to be invoked on the first.
//
// Preconditions: seg.Start() < end.
func (s *addrSet) SplitAfter(seg addrIterator, end uintptr) addrIterator {
if seg.Range().CanSplitAt(end) {
seg, _ = s.SplitUnchecked(seg, end)
}
return seg
}
// Isolate ensures that the given segment's range is a subset of r by splitting
// at r.Start and r.End if necessary, and returns an updated iterator to the
// bounded segment. All existing iterators (including seg, but not including
// the returned iterators) are invalidated.
//
// Isolate is usually used when mutating part of a single segment, or when
// mutating segments in a range where the first segment is not necessarily
// split, making use of SplitBefore/SplitAfter complex.
//
// Preconditions: seg.Range().Overlaps(r).
func (s *addrSet) Isolate(seg addrIterator, r addrRange) addrIterator {
if seg.Range().CanSplitAt(r.Start) {
_, seg = s.SplitUnchecked(seg, r.Start)
@@ -547,32 +810,118 @@ func (s *addrSet) Isolate(seg addrIterator, r addrRange) addrIterator {
return seg
}
// ApplyContiguous applies a function to a contiguous range of segments,
// splitting if necessary. The function is applied until the first gap is
// encountered, at which point the gap is returned. If the function is applied
// across the entire range, a terminal gap is returned. All existing iterators
// are invalidated.
// LowerBoundSegmentSplitBefore combines LowerBoundSegment and SplitBefore.
//
// N.B. The Iterator must not be invalidated by the function.
func (s *addrSet) ApplyContiguous(r addrRange, fn func(seg addrIterator)) addrGapIterator {
seg, gap := s.Find(r.Start)
if !seg.Ok() {
return gap
// LowerBoundSegmentSplitBefore is usually used when mutating segments in a
// range while iterating them in order of increasing keys. In such cases,
// LowerBoundSegmentSplitBefore provides an iterator to the first segment to be
// mutated, suitable as the initial value for a loop variable.
func (s *addrSet) LowerBoundSegmentSplitBefore(min uintptr) addrIterator {
seg := s.LowerBoundSegment(min)
if seg.Ok() {
seg = s.SplitBefore(seg, min)
}
for {
seg = s.Isolate(seg, r)
fn(seg)
if seg.End() >= r.End {
return addrGapIterator{}
}
gap = seg.NextGap()
if !gap.IsEmpty() {
return gap
}
seg = gap.NextSegment()
if !seg.Ok() {
return seg
}
return addrGapIterator{}
// UpperBoundSegmentSplitAfter combines UpperBoundSegment and SplitAfter.
//
// UpperBoundSegmentSplitAfter is usually used when mutating segments in a
// range while iterating them in order of decreasing keys. In such cases,
// UpperBoundSegmentSplitAfter provides an iterator to the first segment to be
// mutated, suitable as the initial value for a loop variable.
func (s *addrSet) UpperBoundSegmentSplitAfter(max uintptr) addrIterator {
seg := s.UpperBoundSegment(max)
if seg.Ok() {
seg = s.SplitAfter(seg, max)
}
return seg
}
// VisitRange applies the function f to all segments intersecting the range r,
// in order of ascending keys. Segments will not be split, so f may be called
// on segments lying partially outside r. Non-empty gaps between segments are
// skipped. If a call to f returns false, VisitRange stops iteration
// immediately.
//
// N.B. f must not invalidate iterators into s.
func (s *addrSet) VisitRange(r addrRange, f func(seg addrIterator) bool) {
for seg := s.LowerBoundSegment(r.Start); seg.Ok() && seg.Start() < r.End; seg = seg.NextSegment() {
if !f(seg) {
return
}
}
}
// VisitFullRange is equivalent to VisitRange, except that if any key in r that
// is visited before f returns false does not correspond to a segment,
// VisitFullRange panics.
func (s *addrSet) VisitFullRange(r addrRange, f func(seg addrIterator) bool) {
pos := r.Start
seg := s.FindSegment(r.Start)
for {
if !seg.Ok() {
panic(fmt.Sprintf("missing segment at %v", pos))
}
if !f(seg) {
return
}
pos = seg.End()
if r.End <= pos {
return
}
seg, _ = seg.NextNonEmpty()
}
}
// MutateRange applies the function f to all segments intersecting the range r,
// in order of ascending keys. Segments that lie partially outside r are split
// before f is called, such that f only observes segments entirely within r.
// Iterated segments are merged again after f is called. Non-empty gaps between
// segments are skipped. If a call to f returns false, MutateRange stops
// iteration immediately.
//
// MutateRange invalidates all existing iterators.
//
// N.B. f must not invalidate iterators into s.
func (s *addrSet) MutateRange(r addrRange, f func(seg addrIterator) bool) {
seg := s.LowerBoundSegmentSplitBefore(r.Start)
for seg.Ok() && seg.Start() < r.End {
seg = s.SplitAfter(seg, r.End)
cont := f(seg)
seg = s.MergePrev(seg)
if !cont {
s.MergeNext(seg)
return
}
seg = seg.NextSegment()
}
if seg.Ok() {
s.MergePrev(seg)
}
}
// MutateFullRange is equivalent to MutateRange, except that if any key in r
// that is visited before f returns false does not correspond to a segment,
// MutateFullRange panics.
func (s *addrSet) MutateFullRange(r addrRange, f func(seg addrIterator) bool) {
seg := s.FindSegment(r.Start)
if !seg.Ok() {
panic(fmt.Sprintf("missing segment at %v", r.Start))
}
seg = s.SplitBefore(seg, r.Start)
for {
seg = s.SplitAfter(seg, r.End)
cont := f(seg)
end := seg.End()
seg = s.MergePrev(seg)
if !cont || r.End <= end {
s.MergeNext(seg)
return
}
seg = seg.NextSegment()
if !seg.Ok() || seg.Start() != end {
panic(fmt.Sprintf("missing segment at %v", end))
}
}
}
@@ -1243,11 +1592,10 @@ func (seg addrIterator) NextGap() addrGapIterator {
// Otherwise, exactly one of the iterators returned by PrevNonEmpty will be
// non-terminal.
func (seg addrIterator) PrevNonEmpty() (addrIterator, addrGapIterator) {
gap := seg.PrevGap()
if gap.Range().Length() != 0 {
return addrIterator{}, gap
if prev := seg.PrevSegment(); prev.Ok() && prev.End() == seg.Start() {
return prev, addrGapIterator{}
}
return gap.PrevSegment(), addrGapIterator{}
return addrIterator{}, seg.PrevGap()
}
// NextNonEmpty returns the iterated segment's successor if it is adjacent, or
@@ -1256,11 +1604,10 @@ func (seg addrIterator) PrevNonEmpty() (addrIterator, addrGapIterator) {
// Otherwise, exactly one of the iterators returned by NextNonEmpty will be
// non-terminal.
func (seg addrIterator) NextNonEmpty() (addrIterator, addrGapIterator) {
gap := seg.NextGap()
if gap.Range().Length() != 0 {
return addrIterator{}, gap
if next := seg.NextSegment(); next.Ok() && next.Start() == seg.End() {
return next, addrGapIterator{}
}
return gap.NextSegment(), addrGapIterator{}
return addrIterator{}, seg.NextGap()
}
// A GapIterator is conceptually one of:
@@ -1379,35 +1726,36 @@ func (gap addrGapIterator) NextLargeEnoughGap(minSize uintptr) addrGapIterator {
//
// Preconditions: gap is NOT the trailing gap of a non-leaf node.
func (gap addrGapIterator) nextLargeEnoughGapHelper(minSize uintptr) addrGapIterator {
for {
for gap.node != nil &&
(gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
}
if gap.node == nil {
return addrGapIterator{}
}
gap.index++
for gap.index <= gap.node.nrSegments {
if gap.node.hasChildren {
if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
return largeEnoughGap
}
} else {
if gap.Range().Length() >= minSize {
return gap
}
for gap.node != nil &&
(gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == gap.node.nrSegments)) {
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
}
gap.index++
}
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
if gap.node != nil && gap.index == gap.node.nrSegments {
if gap.node == nil {
return addrGapIterator{}
}
gap.index++
for gap.index <= gap.node.nrSegments {
if gap.node.hasChildren {
if largeEnoughGap := gap.node.children[gap.index].searchFirstLargeEnoughGap(minSize); largeEnoughGap.Ok() {
return largeEnoughGap
}
} else {
if gap.Range().Length() >= minSize {
return gap
}
}
gap.index++
}
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
if gap.node != nil && gap.index == gap.node.nrSegments {
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
}
}
return gap.nextLargeEnoughGapHelper(minSize)
}
// PrevLargeEnoughGap returns the iterated gap's first prev gap with larger or
@@ -1433,35 +1781,36 @@ func (gap addrGapIterator) PrevLargeEnoughGap(minSize uintptr) addrGapIterator {
//
// Preconditions: gap is NOT the first gap of a non-leaf node.
func (gap addrGapIterator) prevLargeEnoughGapHelper(minSize uintptr) addrGapIterator {
for {
for gap.node != nil &&
(gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
}
if gap.node == nil {
return addrGapIterator{}
}
gap.index--
for gap.index >= 0 {
if gap.node.hasChildren {
if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
return largeEnoughGap
}
} else {
if gap.Range().Length() >= minSize {
return gap
}
for gap.node != nil &&
(gap.node.maxGap.Get() < minSize || (!gap.node.hasChildren && gap.index == 0)) {
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
}
gap.index--
}
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
if gap.node != nil && gap.index == 0 {
if gap.node == nil {
return addrGapIterator{}
}
gap.index--
for gap.index >= 0 {
if gap.node.hasChildren {
if largeEnoughGap := gap.node.children[gap.index].searchLastLargeEnoughGap(minSize); largeEnoughGap.Ok() {
return largeEnoughGap
}
} else {
if gap.Range().Length() >= minSize {
return gap
}
}
gap.index--
}
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
if gap.node != nil && gap.index == 0 {
gap.node, gap.index = gap.node.parent, gap.node.parentIndex
}
}
return gap.prevLargeEnoughGapHelper(minSize)
}
// segmentBeforePosition returns the predecessor segment of the position given
@@ -1545,50 +1894,49 @@ func (n *addrnode) writeDebugString(buf *bytes.Buffer, prefix string) {
}
}
// SegmentDataSlices represents segments from a set as slices of start, end, and
// values. SegmentDataSlices is primarily used as an intermediate representation
// for save/restore and the layout here is optimized for that.
// FlatSegment represents a segment as a single object. FlatSegment is used as
// an intermediate representation for save/restore and tests.
//
// +stateify savable
type addrSegmentDataSlices struct {
Start []uintptr
End []uintptr
Values []*objectEncodeState
type addrFlatSegment struct {
Start uintptr
End uintptr
Value *objectEncodeState
}
// ExportSortedSlices returns a copy of all segments in the given set, in
// ascending key order.
func (s *addrSet) ExportSortedSlices() *addrSegmentDataSlices {
var sds addrSegmentDataSlices
// ExportSlice returns a copy of all segments in the given set, in ascending
// key order.
func (s *addrSet) ExportSlice() []addrFlatSegment {
var fs []addrFlatSegment
for seg := s.FirstSegment(); seg.Ok(); seg = seg.NextSegment() {
sds.Start = append(sds.Start, seg.Start())
sds.End = append(sds.End, seg.End())
sds.Values = append(sds.Values, seg.Value())
fs = append(fs, addrFlatSegment{
Start: seg.Start(),
End: seg.End(),
Value: seg.Value(),
})
}
sds.Start = sds.Start[:len(sds.Start):len(sds.Start)]
sds.End = sds.End[:len(sds.End):len(sds.End)]
sds.Values = sds.Values[:len(sds.Values):len(sds.Values)]
return &sds
return fs
}
// ImportSortedSlices initializes the given set from the given slice.
// ImportSlice initializes the given set from the given slice.
//
// Preconditions:
// - s must be empty.
// - sds must represent a valid set (the segments in sds must have valid
// - fs must represent a valid set (the segments in fs must have valid
// lengths that do not overlap).
// - The segments in sds must be sorted in ascending key order.
func (s *addrSet) ImportSortedSlices(sds *addrSegmentDataSlices) error {
// - The segments in fs must be sorted in ascending key order.
func (s *addrSet) ImportSlice(fs []addrFlatSegment) error {
if !s.IsEmpty() {
return fmt.Errorf("cannot import into non-empty set %v", s)
}
gap := s.FirstGap()
for i := range sds.Start {
r := addrRange{sds.Start[i], sds.End[i]}
for i := range fs {
f := &fs[i]
r := addrRange{f.Start, f.End}
if !gap.Range().IsSupersetOf(r) {
return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: [%d, %d) => %v", sds.Start[i], sds.End[i], sds.Values[i])
return fmt.Errorf("segment overlaps a preceding segment or is incorrectly sorted: %v => %v", r, f.Value)
}
gap = s.InsertWithoutMerging(gap, r, sds.Values[i]).NextGap()
gap = s.InsertWithoutMerging(gap, r, f.Value).NextGap()
}
return nil
}
@@ -1632,12 +1980,15 @@ func (s *addrSet) countSegments() (segments int) {
}
return segments
}
func (s *addrSet) saveRoot() *addrSegmentDataSlices {
return s.ExportSortedSlices()
func (s *addrSet) saveRoot() []addrFlatSegment {
fs := s.ExportSlice()
fs = fs[:len(fs):len(fs)]
return fs
}
func (s *addrSet) loadRoot(sds *addrSegmentDataSlices) {
if err := s.ImportSortedSlices(sds); err != nil {
func (s *addrSet) loadRoot(_ context.Context, fs []addrFlatSegment) {
if err := s.ImportSlice(fs); err != nil {
panic(err)
}
}

View File

@@ -244,7 +244,7 @@ func (ds *decodeState) waitObject(ods *objectDecodeState, encoded wire.Object, c
// See decodeObject; we need to wait for the array (if non-nil).
ds.wait(ods, objectID(sv.Ref.Root), callback)
} else if iv, ok := encoded.(*wire.Interface); ok {
// It's an interface (wait recurisvely).
// It's an interface (wait recursively).
ds.waitObject(ods, iv.Value, callback)
} else if callback != nil {
// Nothing to wait for: execute the callback immediately.
@@ -385,7 +385,7 @@ func (ds *decodeState) decodeStruct(ods *objectDecodeState, obj reflect.Value, e
if sl, ok := obj.Addr().Interface().(SaverLoader); ok {
// Note: may be a registered empty struct which does not
// implement the saver/loader interfaces.
sl.StateLoad(Source{internal: od})
sl.StateLoad(ds.ctx, Source{internal: od})
}
}
@@ -567,7 +567,7 @@ func (ds *decodeState) decodeObject(ods *objectDecodeState, obj reflect.Value, e
case *wire.Interface:
ds.decodeInterface(ods, obj, x)
default:
// Shoud not happen, not propagated as an error.
// Should not happen, not propagated as an error.
Failf("unknown object %#v for %q", encoded, obj.Type().Name())
}
}
@@ -691,7 +691,7 @@ func (ds *decodeState) Load(obj reflect.Value) {
}
}
}); err != nil {
Failf("error executing callbacks for %#v: %w", ods.obj.Interface(), err)
Failf("error executing callbacks: %w\nfor object %#v", err, ods.obj.Interface())
}
// Check if we have any remaining dependency cycles. If there are any

View File

@@ -31,7 +31,7 @@ type objectEncodeState struct {
// obj is the object value. Note that this may be replaced if we
// encounter an object that contains this object. When this happens (in
// resolve), we will update existing references approprately, below,
// resolve), we will update existing references appropriately, below,
// and defer a re-encoding of the object.
obj reflect.Value
@@ -417,7 +417,7 @@ func traverse(rootType, targetType reflect.Type, rootAddr, targetAddr uintptr) [
Failf("no field in root type %v contains target type %v", rootType, targetType)
case reflect.Array:
// Since arrays have homogenous types, all elements have the
// Since arrays have homogeneous types, all elements have the
// same size and we can compute where the target lives. This
// does not matter for the purpose of typing, but matters for
// the purpose of computing the address of the given index.
@@ -432,7 +432,7 @@ func traverse(rootType, targetType reflect.Type, rootAddr, targetAddr uintptr) [
default:
// For any other type, there's no possibility of aliasing so if
// the types didn't match earlier then we have an addresss
// the types didn't match earlier then we have an address
// collision which shouldn't be possible at this point.
Failf("traverse failed for root type %v and target type %v", rootType, targetType)
}

View File

@@ -211,7 +211,7 @@ type SaverLoader interface {
StateSave(Sink)
// StateLoad loads the state of the object.
StateLoad(Source)
StateLoad(context.Context, Source)
}
// Source is used for Type.StateLoad.

View File

@@ -198,7 +198,7 @@ var singleFieldOrder = []int{0}
// Lookup looks up or registers the given object.
//
// First, the typeID is searched to see if this has already been appropriately
// reconciled. If no, then a reconcilation will take place that may result in a
// reconciled. If no, then a reconciliation will take place that may result in a
// field ordering. If a nil reconciledTypeEntry is returned from this method,
// then the object does not support the Type interface.
//

View File

@@ -1,4 +1,4 @@
// Copyright 2018 The gVisor Authors.
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
package stack
package sync
// StackFromEnv is the global stack created in restore run.
// FIXME(b/36201077)
var StackFromEnv *Stack
// MemoryFenceReads ensures that all preceding memory loads happen before
// following memory loads.
func MemoryFenceReads()

View File

@@ -0,0 +1,26 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build amd64
// +build amd64
#include "textflag.h"
// func MemoryFenceReads()
TEXT ·MemoryFenceReads(SB),NOSPLIT|NOFRAME,$0-0
// No memory fence is required on x86. However, a compiler fence is
// required to prevent the compiler from reordering memory accesses. The Go
// compiler will not reorder memory accesses around a call to an assembly
// function; compare runtime.publicationBarrier.
RET

View File

@@ -0,0 +1,23 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build arm64
// +build arm64
#include "textflag.h"
// func MemoryFenceReads()
TEXT ·MemoryFenceReads(SB),NOSPLIT|NOFRAME,$0-0
DMB $0x9 // ISHLD
RET

View File

@@ -140,8 +140,8 @@ func (g *Gate) Close() {
// The last call to Leave arrived while we were setting up closingG.
return
}
// WaitReasonSemacquire/TraceEvGoBlockSync are consistent with WaitGroup.
gopark(gateCommit, gohacks.Noescape(unsafe.Pointer(&g.closingG)), WaitReasonSemacquire, TraceEvGoBlockSync, 0)
// WaitReasonSemacquire/TraceBlockSync are consistent with WaitGroup.
gopark(gateCommit, gohacks.Noescape(unsafe.Pointer(&g.closingG)), WaitReasonSemacquire, TraceBlockSync, 0)
}
//go:norace

View File

@@ -20,10 +20,3 @@ const (
WaitReasonChanReceive uint8 = 14 // +checkconst runtime waitReasonChanReceive
WaitReasonSemacquire uint8 = 18 // +checkconst runtime waitReasonSemacquire
)
// Values for the traceEv argument to gopark, from Go's src/runtime/trace.go.
const (
TraceEvGoBlockRecv byte = 23 // +checkconst runtime traceEvGoBlockRecv
TraceEvGoBlockSelect byte = 24 // +checkconst runtime traceEvGoBlockSelect
TraceEvGoBlockSync byte = 25 // +checkconst runtime traceEvGoBlockSync
)

View File

@@ -0,0 +1,29 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !goexperiment.exectracer2
package sync
// TraceBlockReason constants, from Go's src/runtime/trace.go.
const (
TraceBlockSelect TraceBlockReason = traceEvGoBlockSelect // +checkconst runtime traceBlockSelect
TraceBlockSync = traceEvGoBlockSync // +checkconst runtime traceBlockSync
)
// Tracer event types, from Go's src/runtime/trace.go.
const (
traceEvGoBlockSelect = 24 // +checkconst runtime traceEvGoBlockSelect
traceEvGoBlockSync = 25 // +checkconst runtime traceEvGoBlockSync
)

View File

@@ -0,0 +1,23 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build goexperiment.exectracer2
package sync
// TraceBlockReason constants, from Go's src/runtime/trace2runtime.go.
const (
TraceBlockSelect TraceBlockReason = 3 // +checkconst runtime traceBlockSelect
TraceBlockSync TraceBlockReason = 5 // +checkconst runtime traceBlockSync
)

View File

@@ -0,0 +1,16 @@
// Copyright 2023 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21
package sync
import (
"unsafe"
)
// Use checkoffset to assert that maptype.hasher (the only field we use) has
// the correct offset.
const maptypeHasherOffset = unsafe.Offsetof(maptype{}.Hasher) // +checkoffset internal/abi MapType.Hasher

View File

@@ -0,0 +1,18 @@
// Copyright 2023 The gVisor Authors.
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// runtime.maptype is moved to internal/abi.MapType in Go 1.21.
//
//go:build !go1.21
package sync
import (
"unsafe"
)
// Use checkoffset to assert that maptype.hasher (the only field we use) has
// the correct offset.
const maptypeHasherOffset = unsafe.Offsetof(maptype{}.Hasher) // +checkoffset runtime maptype.hasher

View File

@@ -15,4 +15,4 @@
//go:build !amd64
// This file is intentionally left blank. Other arches don't use
// addrOfSpinning, but we still need an input to the nogo temlate rule.
// addrOfSpinning, but we still need an input to the nogo template rule.

View File

@@ -3,16 +3,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.18 && !go1.22
// +build go1.18,!go1.22
// //go:linkname directives type-checked by checklinkname. Any other
// non-linkname assumptions outside the Go 1 compatibility guarantee should
// have an accompanied vet check or version guard build tag.
// Check type definitions and constants when updating Go version.
//
// TODO(b/165820485): add these checks to checklinkname.
// //go:linkname directives type-checked by checklinkname.
// Runtime type copies checked by checkoffset.
package sync
@@ -37,12 +29,15 @@ func Goyield() {
// splitting and race context are not available where it is called.
//
//go:nosplit
func Gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceEv byte, traceskip int) {
gopark(unlockf, lock, reason, traceEv, traceskip)
func Gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceReason TraceBlockReason, traceskip int) {
gopark(unlockf, lock, reason, traceReason, traceskip)
}
//go:linkname gopark runtime.gopark
func gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceEv byte, traceskip int)
func gopark(unlockf func(uintptr, unsafe.Pointer) bool, lock unsafe.Pointer, reason uint8, traceReason TraceBlockReason, traceskip int)
// TraceBlockReason is equivalent to runtime.traceBlockReason.
type TraceBlockReason uint8
//go:linkname wakep runtime.wakep
func wakep()
@@ -107,10 +102,10 @@ func MapKeyHasher(m any) func(unsafe.Pointer, uintptr) uintptr {
panic(fmt.Sprintf("sync.MapKeyHasher: m is %v, not map", rtyp))
}
mtyp := *(**maptype)(unsafe.Pointer(&m))
return mtyp.hasher
return mtyp.Hasher
}
// maptype is equivalent to the beginning of runtime.maptype.
// maptype is equivalent to the beginning of internal/abi.MapType.
type maptype struct {
size uintptr
ptrdata uintptr
@@ -126,7 +121,7 @@ type maptype struct {
key unsafe.Pointer
elem unsafe.Pointer
bucket unsafe.Pointer
hasher func(unsafe.Pointer, uintptr) uintptr
Hasher func(unsafe.Pointer, uintptr) uintptr
// more fields
}

View File

@@ -39,23 +39,6 @@ type SeqCount struct {
// SeqCountEpoch tracks writer critical sections in a SeqCount.
type SeqCountEpoch uint32
// We assume that:
//
// - All functions in sync/atomic that perform a memory read are at least a
// read fence: memory reads before calls to such functions cannot be reordered
// after the call, and memory reads after calls to such functions cannot be
// reordered before the call, even if those reads do not use sync/atomic.
//
// - All functions in sync/atomic that perform a memory write are at least a
// write fence: memory writes before calls to such functions cannot be
// reordered after the call, and memory writes after calls to such functions
// cannot be reordered before the call, even if those writes do not use
// sync/atomic.
//
// As of this writing, the Go memory model completely fails to describe
// sync/atomic, but these properties are implied by
// https://groups.google.com/forum/#!topic/golang-nuts/7EnEhM3U7B8.
// BeginRead indicates the beginning of a reader critical section. Reader
// critical sections DO NOT BLOCK writer critical sections, so operations in a
// reader critical section MAY RACE with writer critical sections. Races are
@@ -104,6 +87,7 @@ func (s *SeqCount) beginReadSlow() SeqCountEpoch {
// Reader critical sections do not need to be explicitly terminated; the last
// call to ReadOk is implicitly the end of the reader critical section.
func (s *SeqCount) ReadOk(epoch SeqCountEpoch) bool {
MemoryFenceReads()
return atomic.LoadUint32(&s.epoch) == uint32(epoch)
}

View File

@@ -179,6 +179,7 @@ func (d *deadlineTimer) setDeadline(cancelCh *chan struct{}, timer **time.Timer,
// "A zero value for t means I/O operations will not time out."
// - net.Conn.SetDeadline
if t.IsZero() {
*timer = nil
return
}
@@ -546,17 +547,15 @@ func DialContextTCP(ctx context.Context, s *stack.Stack, addr tcpip.FullAddress,
type UDPConn struct {
deadlineTimer
stack *stack.Stack
ep tcpip.Endpoint
wq *waiter.Queue
ep tcpip.Endpoint
wq *waiter.Queue
}
// NewUDPConn creates a new UDPConn.
func NewUDPConn(s *stack.Stack, wq *waiter.Queue, ep tcpip.Endpoint) *UDPConn {
func NewUDPConn(wq *waiter.Queue, ep tcpip.Endpoint) *UDPConn {
c := &UDPConn{
stack: s,
ep: ep,
wq: wq,
ep: ep,
wq: wq,
}
c.deadlineTimer.init()
return c
@@ -586,7 +585,7 @@ func DialUDP(s *stack.Stack, laddr, raddr *tcpip.FullAddress, network tcpip.Netw
}
}
c := NewUDPConn(s, &wq, ep)
c := NewUDPConn(&wq, ep)
if raddr != nil {
if err := c.ep.Connect(*raddr); err != nil {

View File

@@ -30,161 +30,13 @@ func Put(b []byte, xsum uint16) {
binary.BigEndian.PutUint16(b, xsum)
}
func calculateChecksum(buf []byte, odd bool, initial uint32) (uint16, bool) {
v := initial
if odd {
v += uint32(buf[0])
buf = buf[1:]
}
l := len(buf)
odd = l&1 != 0
if odd {
l--
v += uint32(buf[l]) << 8
}
for i := 0; i < l; i += 2 {
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
}
return Combine(uint16(v), uint16(v>>16)), odd
}
func unrolledCalculateChecksum(buf []byte, odd bool, initial uint32) (uint16, bool) {
v := initial
if odd {
v += uint32(buf[0])
buf = buf[1:]
}
l := len(buf)
odd = l&1 != 0
if odd {
l--
v += uint32(buf[l]) << 8
}
for (l - 64) >= 0 {
i := 0
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3])
v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5])
v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7])
v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9])
v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11])
v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13])
v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15])
i += 16
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3])
v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5])
v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7])
v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9])
v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11])
v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13])
v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15])
i += 16
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3])
v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5])
v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7])
v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9])
v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11])
v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13])
v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15])
i += 16
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3])
v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5])
v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7])
v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9])
v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11])
v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13])
v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15])
buf = buf[64:]
l = l - 64
}
if (l - 32) >= 0 {
i := 0
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3])
v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5])
v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7])
v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9])
v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11])
v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13])
v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15])
i += 16
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3])
v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5])
v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7])
v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9])
v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11])
v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13])
v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15])
buf = buf[32:]
l = l - 32
}
if (l - 16) >= 0 {
i := 0
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3])
v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5])
v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7])
v += (uint32(buf[i+8]) << 8) + uint32(buf[i+9])
v += (uint32(buf[i+10]) << 8) + uint32(buf[i+11])
v += (uint32(buf[i+12]) << 8) + uint32(buf[i+13])
v += (uint32(buf[i+14]) << 8) + uint32(buf[i+15])
buf = buf[16:]
l = l - 16
}
if (l - 8) >= 0 {
i := 0
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3])
v += (uint32(buf[i+4]) << 8) + uint32(buf[i+5])
v += (uint32(buf[i+6]) << 8) + uint32(buf[i+7])
buf = buf[8:]
l = l - 8
}
if (l - 4) >= 0 {
i := 0
v += (uint32(buf[i]) << 8) + uint32(buf[i+1])
v += (uint32(buf[i+2]) << 8) + uint32(buf[i+3])
buf = buf[4:]
l = l - 4
}
// At this point since l was even before we started unrolling
// there can be only two bytes left to add.
if l != 0 {
v += (uint32(buf[0]) << 8) + uint32(buf[1])
}
return Combine(uint16(v), uint16(v>>16)), odd
}
// Old calculates the checksum (as defined in RFC 1071) of the bytes in
// the given byte array. This function uses a non-optimized implementation. Its
// only retained for reference and to use as a benchmark/test. Most code should
// use the header.Checksum function.
//
// The initial checksum must have been computed on an even number of bytes.
func Old(buf []byte, initial uint16) uint16 {
s, _ := calculateChecksum(buf, false, uint32(initial))
return s
}
// Checksum calculates the checksum (as defined in RFC 1071) of the bytes in the
// given byte array. This function uses an optimized unrolled version of the
// checksum algorithm.
// given byte array. This function uses an optimized version of the checksum
// algorithm.
//
// The initial checksum must have been computed on an even number of bytes.
func Checksum(buf []byte, initial uint16) uint16 {
s, _ := unrolledCalculateChecksum(buf, false, uint32(initial))
s, _ := calculateChecksum(buf, false, initial)
return s
}
@@ -197,7 +49,7 @@ type Checksumer struct {
// Add adds b to checksum.
func (c *Checksumer) Add(b []byte) {
if len(b) > 0 {
c.sum, c.odd = unrolledCalculateChecksum(b, c.odd, uint32(c.sum))
c.sum, c.odd = calculateChecksum(b, c.odd, c.sum)
}
}

View File

@@ -0,0 +1,182 @@
// Copyright 2023 The gVisor Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package checksum
import (
"encoding/binary"
"math/bits"
"unsafe"
)
// Note: odd indicates whether initial is a partial checksum over an odd number
// of bytes.
func calculateChecksum(buf []byte, odd bool, initial uint16) (uint16, bool) {
// Use a larger-than-uint16 accumulator to benefit from parallel summation
// as described in RFC 1071 1.2.C.
acc := uint64(initial)
// Handle an odd number of previously-summed bytes, and get the return
// value for odd.
if odd {
acc += uint64(buf[0])
buf = buf[1:]
}
odd = len(buf)&1 != 0
// Aligning &buf[0] below is much simpler if len(buf) >= 8; special-case
// smaller bufs.
if len(buf) < 8 {
if len(buf) >= 4 {
acc += (uint64(buf[0]) << 8) + uint64(buf[1])
acc += (uint64(buf[2]) << 8) + uint64(buf[3])
buf = buf[4:]
}
if len(buf) >= 2 {
acc += (uint64(buf[0]) << 8) + uint64(buf[1])
buf = buf[2:]
}
if len(buf) >= 1 {
acc += uint64(buf[0]) << 8
// buf = buf[1:] is skipped because it's unused and nogo will
// complain.
}
return reduce(acc), odd
}
// On little-endian architectures, multi-byte loads from buf will load
// bytes in the wrong order. Rather than byte-swap after each load (slow),
// we byte-swap the accumulator before summing any bytes and byte-swap it
// back before returning, which still produces the correct result as
// described in RFC 1071 1.2.B "Byte Order Independence".
//
// acc is at most a uint16 + a uint8, so its upper 32 bits must be 0s. We
// preserve this property by byte-swapping only the lower 32 bits of acc,
// so that additions to acc performed during alignment can't overflow.
acc = uint64(bswapIfLittleEndian32(uint32(acc)))
// Align &buf[0] to an 8-byte boundary.
bswapped := false
if sliceAddr(buf)&1 != 0 {
// Compute the rest of the partial checksum with bytes swapped, and
// swap back before returning; see the last paragraph of
// RFC 1071 1.2.B.
acc = uint64(bits.ReverseBytes32(uint32(acc)))
bswapped = true
// No `<< 8` here due to the byte swap we just did.
acc += uint64(bswapIfLittleEndian16(uint16(buf[0])))
buf = buf[1:]
}
if sliceAddr(buf)&2 != 0 {
acc += uint64(*(*uint16)(unsafe.Pointer(&buf[0])))
buf = buf[2:]
}
if sliceAddr(buf)&4 != 0 {
acc += uint64(*(*uint32)(unsafe.Pointer(&buf[0])))
buf = buf[4:]
}
// Sum 64 bytes at a time. Beyond this point, additions to acc may
// overflow, so we have to handle carrying.
for len(buf) >= 64 {
var carry uint64
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[8])), carry)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[16])), carry)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[24])), carry)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[32])), carry)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[40])), carry)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[48])), carry)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[56])), carry)
acc, _ = bits.Add64(acc, 0, carry)
buf = buf[64:]
}
// Sum the remaining 0-63 bytes.
if len(buf) >= 32 {
var carry uint64
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[8])), carry)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[16])), carry)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[24])), carry)
acc, _ = bits.Add64(acc, 0, carry)
buf = buf[32:]
}
if len(buf) >= 16 {
var carry uint64
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0)
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[8])), carry)
acc, _ = bits.Add64(acc, 0, carry)
buf = buf[16:]
}
if len(buf) >= 8 {
var carry uint64
acc, carry = bits.Add64(acc, *(*uint64)(unsafe.Pointer(&buf[0])), 0)
acc, _ = bits.Add64(acc, 0, carry)
buf = buf[8:]
}
if len(buf) >= 4 {
var carry uint64
acc, carry = bits.Add64(acc, uint64(*(*uint32)(unsafe.Pointer(&buf[0]))), 0)
acc, _ = bits.Add64(acc, 0, carry)
buf = buf[4:]
}
if len(buf) >= 2 {
var carry uint64
acc, carry = bits.Add64(acc, uint64(*(*uint16)(unsafe.Pointer(&buf[0]))), 0)
acc, _ = bits.Add64(acc, 0, carry)
buf = buf[2:]
}
if len(buf) >= 1 {
// bswapIfBigEndian16(buf[0]) == bswapIfLittleEndian16(buf[0]<<8).
var carry uint64
acc, carry = bits.Add64(acc, uint64(bswapIfBigEndian16(uint16(buf[0]))), 0)
acc, _ = bits.Add64(acc, 0, carry)
// buf = buf[1:] is skipped because it's unused and nogo will complain.
}
// Reduce the checksum to 16 bits and undo byte swaps before returning.
acc16 := bswapIfLittleEndian16(reduce(acc))
if bswapped {
acc16 = bits.ReverseBytes16(acc16)
}
return acc16, odd
}
func reduce(acc uint64) uint16 {
// Ideally we would do:
// return uint16(acc>>48) +' uint16(acc>>32) +' uint16(acc>>16) +' uint16(acc)
// for more instruction-level parallelism; however, there is no
// bits.Add16().
acc = (acc >> 32) + (acc & 0xffff_ffff) // at most 0x1_ffff_fffe
acc32 := uint32(acc>>32 + acc) // at most 0xffff_ffff
acc32 = (acc32 >> 16) + (acc32 & 0xffff) // at most 0x1_fffe
return uint16(acc32>>16 + acc32) // at most 0xffff
}
func bswapIfLittleEndian32(val uint32) uint32 {
return binary.BigEndian.Uint32((*[4]byte)(unsafe.Pointer(&val))[:])
}
func bswapIfLittleEndian16(val uint16) uint16 {
return binary.BigEndian.Uint16((*[2]byte)(unsafe.Pointer(&val))[:])
}
func bswapIfBigEndian16(val uint16) uint16 {
return binary.LittleEndian.Uint16((*[2]byte)(unsafe.Pointer(&val))[:])
}
func sliceAddr(buf []byte) uintptr {
return uintptr(unsafe.Pointer(unsafe.SliceData(buf)))
}

View File

@@ -0,0 +1,3 @@
// automatically generated by stateify.
package checksum

View File

@@ -589,7 +589,7 @@ func (*ErrMissingRequiredFields) isError() {}
func (*ErrMissingRequiredFields) IgnoreStats() bool {
return true
}
func (*ErrMissingRequiredFields) String() string { return "mising required fields" }
func (*ErrMissingRequiredFields) String() string { return "missing required fields" }
// ErrMulticastInputCannotBeOutput indicates that an input interface matches an
// output interface in the same multicast route.

View File

@@ -32,9 +32,9 @@ func PseudoHeaderChecksum(protocol tcpip.TransportProtocolNumber, srcAddr tcpip.
xsum = checksum.Checksum(dstAddr.AsSlice(), xsum)
// Add the length portion of the checksum to the pseudo-checksum.
tmp := make([]byte, 2)
binary.BigEndian.PutUint16(tmp, totalLen)
xsum = checksum.Checksum(tmp, xsum)
var tmp [2]byte
binary.BigEndian.PutUint16(tmp[:], totalLen)
xsum = checksum.Checksum(tmp[:], xsum)
return checksum.Checksum([]byte{0, uint8(protocol)}, xsum)
}
@@ -57,6 +57,9 @@ func checksumUpdate2ByteAlignedUint16(xsum, old, new uint16) uint16 {
// checksum C, the new checksum C' is:
//
// C' = C + (-m) + m' = C + (m' - m)
if old == new {
return xsum
}
return checksum.Combine(xsum, checksum.Combine(new, ^old))
}

View File

@@ -46,6 +46,9 @@ const (
// EthernetMinimumSize is the minimum size of a valid ethernet frame.
EthernetMinimumSize = 14
// EthernetMaximumSize is the maximum size of a valid ethernet frame.
EthernetMaximumSize = 18
// EthernetAddressSize is the size, in bytes, of an ethernet address.
EthernetAddressSize = 6
@@ -82,7 +85,7 @@ const (
// capture all traffic.
EthernetProtocolAll tcpip.NetworkProtocolNumber = 0x0003
// EthernetProtocolPUP is the PARC Universial Packet protocol ethertype.
// EthernetProtocolPUP is the PARC Universal Packet protocol ethertype.
EthernetProtocolPUP tcpip.NetworkProtocolNumber = 0x0200
)

View File

@@ -3,6 +3,8 @@
package header
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -36,10 +38,10 @@ func (t *TCPSynOptions) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(6, &t.Flags)
}
func (t *TCPSynOptions) afterLoad() {}
func (t *TCPSynOptions) afterLoad(context.Context) {}
// +checklocksignore
func (t *TCPSynOptions) StateLoad(stateSourceObject state.Source) {
func (t *TCPSynOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &t.MSS)
stateSourceObject.Load(1, &t.WS)
stateSourceObject.Load(2, &t.TS)
@@ -69,10 +71,10 @@ func (r *SACKBlock) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(1, &r.End)
}
func (r *SACKBlock) afterLoad() {}
func (r *SACKBlock) afterLoad(context.Context) {}
// +checklocksignore
func (r *SACKBlock) StateLoad(stateSourceObject state.Source) {
func (r *SACKBlock) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &r.Start)
stateSourceObject.Load(1, &r.End)
}
@@ -101,10 +103,10 @@ func (t *TCPOptions) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(3, &t.SACKBlocks)
}
func (t *TCPOptions) afterLoad() {}
func (t *TCPOptions) afterLoad(context.Context) {}
// +checklocksignore
func (t *TCPOptions) StateLoad(stateSourceObject state.Source) {
func (t *TCPOptions) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &t.TS)
stateSourceObject.Load(1, &t.TSVal)
stateSourceObject.Load(2, &t.TSEcr)

View File

@@ -53,7 +53,7 @@ const (
ICMPv6EchoMinimumSize = 8
// ICMPv6ErrorHeaderSize is the size of an ICMP error packet header,
// as per RFC 4443, Apendix A, item 4 and the errata.
// as per RFC 4443, Appendix A, item 4 and the errata.
// ... all ICMP error messages shall have exactly
// 32 bits of type-specific data, so that receivers can reliably find
// the embedded invoking packet even when they don't recognize the

View File

@@ -378,7 +378,7 @@ func (r IGMPv3ReportGroupAddressRecord) RecordType() IGMPv3ReportRecordType {
return IGMPv3ReportRecordType(r[igmpv3ReportGroupAddressRecordTypeOffset])
}
// AuxDataLen returns the length of the auxillary data in this record.
// AuxDataLen returns the length of the auxiliary data in this record.
func (r IGMPv3ReportGroupAddressRecord) AuxDataLen() int {
return int(r[igmpv3ReportGroupAddressRecordAuxDataLenOffset]) * igmpv3ReportGroupAddressRecordAuxDataLenUnits
}

View File

@@ -1137,9 +1137,7 @@ func (s IPv4OptionsSerializer) Serialize(b []byte) uint8 {
// header ends on a 32 bit boundary. The padding is zero.
padded := padIPv4OptionsLength(total)
b = b[:padded-total]
for i := range b {
b[i] = 0
}
clear(b)
return padded
}

View File

@@ -110,7 +110,7 @@ const (
// IPv6FragmentExtHdrFragmentOffsetBytesPerUnit is the unit size of a Fragment
// extension header's Fragment Offset field. That is, given a Fragment Offset
// of 2, the extension header is indiciating that the fragment's payload
// of 2, the extension header is indicating that the fragment's payload
// starts at the 16th byte in the reassembled packet.
IPv6FragmentExtHdrFragmentOffsetBytesPerUnit = 8
)
@@ -130,9 +130,7 @@ func padIPv6Option(b []byte) {
b[ipv6ExtHdrOptionTypeOffset] = uint8(ipv6Pad1ExtHdrOptionIdentifier)
default: // Pad with PadN.
s := b[ipv6ExtHdrOptionPayloadOffset:]
for i := range s {
s[i] = 0
}
clear(s)
b[ipv6ExtHdrOptionTypeOffset] = uint8(ipv6PadNExtHdrOptionIdentifier)
b[ipv6ExtHdrOptionLengthOffset] = uint8(len(s))
}
@@ -317,7 +315,7 @@ func (*IPv6UnknownExtHdrOption) isIPv6ExtHdrOption() {}
//
// The return is of the format (option, done, error). done will be true when
// Next is unable to return anything because the iterator has reached the end of
// the options data, or an error occured.
// the options data, or an error occurred.
func (i *IPv6OptionsExtHdrOptionsIterator) Next() (IPv6ExtHdrOption, bool, error) {
for {
i.optionOffset = i.nextOptionOffset
@@ -462,7 +460,7 @@ func (b IPv6FragmentExtHdr) More() bool {
// ID returns the Identification field.
//
// This value is used to uniquely identify the packet, between a
// souce and destination.
// source and destination.
func (b IPv6FragmentExtHdr) ID() uint32 {
return binary.BigEndian.Uint32(b[ipv6FragmentExtHdrIdentificationOffset:])
}
@@ -568,7 +566,7 @@ func (i *IPv6PayloadIterator) AsRawHeader(consume bool) IPv6RawPayloadHeader {
//
// The return is of the format (header, done, error). done will be true when
// Next is unable to return anything because the iterator has reached the end of
// the payload, or an error occured.
// the payload, or an error occurred.
func (i *IPv6PayloadIterator) Next() (IPv6PayloadHeader, bool, error) {
i.headerOffset = i.nextOffset
i.parseOffset = 0

View File

@@ -422,7 +422,7 @@ func (r MLDv2ReportMulticastAddressRecord) RecordType() MLDv2ReportRecordType {
return MLDv2ReportRecordType(r[mldv2ReportMulticastAddressRecordTypeOffset])
}
// AuxDataLen returns the length of the auxillary data in this record.
// AuxDataLen returns the length of the auxiliary data in this record.
func (r MLDv2ReportMulticastAddressRecord) AuxDataLen() int {
return int(r[mldv2ReportMulticastAddressRecordAuxDataLenOffset]) * mldv2ReportMulticastAddressRecordAuxDataLenUnits
}

View File

@@ -63,7 +63,7 @@ const (
// ndpPrefixInformationLength is the expected length, in bytes, of the
// body of an NDP Prefix Information option, as per RFC 4861 section
// 4.6.2 which specifies that the Length field is 4. Given this, the
// expected length, in bytes, is 30 becuase 4 * lengthByteUnits (8) - 2
// expected length, in bytes, is 30 because 4 * lengthByteUnits (8) - 2
// (Type & Length) = 30.
ndpPrefixInformationLength = 30
@@ -173,7 +173,7 @@ var (
)
// Next returns the next element in the backing NDPOptions, or true if we are
// done, or false if an error occured.
// done, or false if an error occurred.
//
// The return can be read as option, done, error. Note, option should only be
// used if done is false and error is nil.
@@ -339,8 +339,8 @@ func (b NDPOptions) Serialize(s NDPOptionsSerializer) int {
used := o.serializeInto(b[2:])
// Zero out remaining (padding) bytes, if any exists.
for i := used + 2; i < l; i++ {
b[i] = 0
if used+2 < l {
clear(b[used+2 : l])
}
b = b[l:]
@@ -566,9 +566,7 @@ func (o NDPPrefixInformation) serializeInto(b []byte) int {
// Zero out the Reserved2 field.
reserved2 := b[ndpPrefixInformationReserved2Offset:][:ndpPrefixInformationReserved2Length]
for i := range reserved2 {
reserved2[i] = 0
}
clear(reserved2)
return used
}
@@ -687,9 +685,7 @@ func (o NDPRecursiveDNSServer) serializeInto(b []byte) int {
used := copy(b, o)
// Zero out the reserved bytes that are before the Lifetime field.
for i := 0; i < ndpRecursiveDNSServerLifetimeOffset; i++ {
b[i] = 0
}
clear(b[0:ndpRecursiveDNSServerLifetimeOffset])
return used
}
@@ -782,9 +778,7 @@ func (o NDPDNSSearchList) serializeInto(b []byte) int {
used := copy(b, o)
// Zero out the reserved bytes that are before the Lifetime field.
for i := 0; i < ndpDNSSearchListLifetimeOffset; i++ {
b[i] = 0
}
clear(b[0:ndpDNSSearchListLifetimeOffset])
return used
}

View File

@@ -27,7 +27,7 @@ import (
// pkt.Data.
//
// Returns true if the header was successfully parsed.
func ARP(pkt stack.PacketBufferPtr) bool {
func ARP(pkt *stack.PacketBuffer) bool {
_, ok := pkt.NetworkHeader().Consume(header.ARPSize)
if ok {
pkt.NetworkProtocolNumber = header.ARPProtocolNumber
@@ -39,7 +39,7 @@ func ARP(pkt stack.PacketBufferPtr) bool {
// header with the IPv4 header.
//
// Returns true if the header was successfully parsed.
func IPv4(pkt stack.PacketBufferPtr) bool {
func IPv4(pkt *stack.PacketBuffer) bool {
hdr, ok := pkt.Data().PullUp(header.IPv4MinimumSize)
if !ok {
return false
@@ -71,7 +71,7 @@ func IPv4(pkt stack.PacketBufferPtr) bool {
// IPv6 parses an IPv6 packet found in pkt.Data and populates pkt's network
// header with the IPv6 header.
func IPv6(pkt stack.PacketBufferPtr) (proto tcpip.TransportProtocolNumber, fragID uint32, fragOffset uint16, fragMore bool, ok bool) {
func IPv6(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNumber, fragID uint32, fragOffset uint16, fragMore bool, ok bool) {
hdr, ok := pkt.Data().PullUp(header.IPv6MinimumSize)
if !ok {
return 0, 0, 0, false, false
@@ -157,7 +157,7 @@ traverseExtensions:
// header with the UDP header.
//
// Returns true if the header was successfully parsed.
func UDP(pkt stack.PacketBufferPtr) bool {
func UDP(pkt *stack.PacketBuffer) bool {
_, ok := pkt.TransportHeader().Consume(header.UDPMinimumSize)
pkt.TransportProtocolNumber = header.UDPProtocolNumber
return ok
@@ -167,7 +167,7 @@ func UDP(pkt stack.PacketBufferPtr) bool {
// header with the TCP header.
//
// Returns true if the header was successfully parsed.
func TCP(pkt stack.PacketBufferPtr) bool {
func TCP(pkt *stack.PacketBuffer) bool {
// TCP header is variable length, peek at it first.
hdrLen := header.TCPMinimumSize
hdr, ok := pkt.Data().PullUp(hdrLen)
@@ -191,7 +191,7 @@ func TCP(pkt stack.PacketBufferPtr) bool {
// if present.
//
// Returns true if an ICMPv4 header was successfully parsed.
func ICMPv4(pkt stack.PacketBufferPtr) bool {
func ICMPv4(pkt *stack.PacketBuffer) bool {
if _, ok := pkt.TransportHeader().Consume(header.ICMPv4MinimumSize); ok {
pkt.TransportProtocolNumber = header.ICMPv4ProtocolNumber
return true
@@ -203,7 +203,7 @@ func ICMPv4(pkt stack.PacketBufferPtr) bool {
// if present.
//
// Returns true if an ICMPv6 header was successfully parsed.
func ICMPv6(pkt stack.PacketBufferPtr) bool {
func ICMPv6(pkt *stack.PacketBuffer) bool {
hdr, ok := pkt.Data().PullUp(header.ICMPv6MinimumSize)
if !ok {
return false

View File

@@ -216,6 +216,15 @@ const (
// TCPHeaderMaximumSize is the maximum header size of a TCP packet.
TCPHeaderMaximumSize = TCPMinimumSize + TCPOptionsMaximumSize
// TCPTotalHeaderMaximumSize is the maximum size of headers from all layers in
// a TCP packet. It analogous to MAX_TCP_HEADER in Linux.
//
// TODO(b/319936470): Investigate why this needs to be at least 140 bytes. In
// Linux this value is at least 160, but in theory we should be able to use
// 138. In practice anything less than 140 starts to break GSO on gVNIC
// hardware.
TCPTotalHeaderMaximumSize = 160
// TCPProtocolNumber is TCP's transport protocol number.
TCPProtocolNumber tcpip.TransportProtocolNumber = 6
@@ -689,7 +698,7 @@ func Acceptable(segSeq seqnum.Value, segLen seqnum.Size, rcvNxt, rcvAcc seqnum.V
return segSeq.InRange(rcvNxt, rcvAcc.Add(1))
}
// Page 70 of RFC 793 allows packets that can be made "acceptable" by trimming
// the payload, so we'll accept any payload that overlaps the receieve window.
// the payload, so we'll accept any payload that overlaps the receive window.
// segSeq < rcvAcc is more correct according to RFC, however, Linux does it
// differently, it uses segSeq <= rcvAcc, we'd want to keep the same behavior
// as Linux.

View File

@@ -3,6 +3,8 @@
package tcp
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -24,10 +26,10 @@ func (offset *TSOffset) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(0, &offset.milliseconds)
}
func (offset *TSOffset) afterLoad() {}
func (offset *TSOffset) afterLoad(context.Context) {}
// +checklocksignore
func (offset *TSOffset) StateLoad(stateSourceObject state.Source) {
func (offset *TSOffset) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &offset.milliseconds)
}

View File

@@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// Package channel provides the implemention of channel-based data-link layer
// Package channel provides the implementation of channel-based data-link layer
// endpoints. Such endpoints allow injection of inbound packets and store
// outbound packets in a channel.
package channel
@@ -43,7 +43,7 @@ type NotificationHandle struct {
type queue struct {
// c is the outbound packet channel.
c chan stack.PacketBufferPtr
c chan *stack.PacketBuffer
mu sync.RWMutex
// +checklocks:mu
notify []*NotificationHandle
@@ -58,7 +58,7 @@ func (q *queue) Close() {
q.closed = true
}
func (q *queue) Read() stack.PacketBufferPtr {
func (q *queue) Read() *stack.PacketBuffer {
select {
case p := <-q.c:
return p
@@ -67,7 +67,7 @@ func (q *queue) Read() stack.PacketBufferPtr {
}
}
func (q *queue) ReadContext(ctx context.Context) stack.PacketBufferPtr {
func (q *queue) ReadContext(ctx context.Context) *stack.PacketBuffer {
select {
case pkt := <-q.c:
return pkt
@@ -76,7 +76,7 @@ func (q *queue) ReadContext(ctx context.Context) stack.PacketBufferPtr {
}
}
func (q *queue) Write(pkt stack.PacketBufferPtr) tcpip.Error {
func (q *queue) Write(pkt *stack.PacketBuffer) tcpip.Error {
// q holds the PacketBuffer.
q.mu.RLock()
if q.closed {
@@ -152,7 +152,7 @@ type Endpoint struct {
func New(size int, mtu uint32, linkAddr tcpip.LinkAddress) *Endpoint {
return &Endpoint{
q: &queue{
c: make(chan stack.PacketBufferPtr, size),
c: make(chan *stack.PacketBuffer, size),
},
mtu: mtu,
linkAddr: linkAddr,
@@ -167,20 +167,20 @@ func (e *Endpoint) Close() {
}
// Read does non-blocking read one packet from the outbound packet queue.
func (e *Endpoint) Read() stack.PacketBufferPtr {
func (e *Endpoint) Read() *stack.PacketBuffer {
return e.q.Read()
}
// ReadContext does blocking read for one packet from the outbound packet queue.
// It can be cancelled by ctx, and in this case, it returns nil.
func (e *Endpoint) ReadContext(ctx context.Context) stack.PacketBufferPtr {
func (e *Endpoint) ReadContext(ctx context.Context) *stack.PacketBuffer {
return e.q.ReadContext(ctx)
}
// Drain removes all outbound packets from the channel and counts them.
func (e *Endpoint) Drain() int {
c := 0
for pkt := e.Read(); !pkt.IsNil(); pkt = e.Read() {
for pkt := e.Read(); pkt != nil; pkt = e.Read() {
pkt.DecRef()
c++
}
@@ -194,7 +194,7 @@ func (e *Endpoint) NumQueued() int {
// InjectInbound injects an inbound packet. If the endpoint is not attached, the
// packet is not delivered.
func (e *Endpoint) InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
func (e *Endpoint) InjectInbound(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
e.mu.RLock()
d := e.dispatcher
e.mu.RUnlock()
@@ -287,4 +287,7 @@ func (*Endpoint) ARPHardwareType() header.ARPHardwareType {
}
// AddHeader implements stack.LinkEndpoint.AddHeader.
func (*Endpoint) AddHeader(stack.PacketBufferPtr) {}
func (*Endpoint) AddHeader(*stack.PacketBuffer) {}
// ParseHeader implements stack.LinkEndpoint.ParseHeader.
func (*Endpoint) ParseHeader(*stack.PacketBuffer) bool { return true }

View File

@@ -3,6 +3,8 @@
package channel
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -24,10 +26,10 @@ func (n *NotificationHandle) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(0, &n.n)
}
func (n *NotificationHandle) afterLoad() {}
func (n *NotificationHandle) afterLoad(context.Context) {}
// +checklocksignore
func (n *NotificationHandle) StateLoad(stateSourceObject state.Source) {
func (n *NotificationHandle) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &n.n)
}

View File

@@ -51,7 +51,7 @@ func (e *Endpoint) Init(child stack.LinkEndpoint, embedder stack.NetworkDispatch
}
// DeliverNetworkPacket implements stack.NetworkDispatcher.
func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
e.mu.RLock()
d := e.dispatcher
e.mu.RUnlock()
@@ -61,7 +61,7 @@ func (e *Endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pk
}
// DeliverLinkPacket implements stack.NetworkDispatcher.
func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
func (e *Endpoint) DeliverLinkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
e.mu.RLock()
d := e.dispatcher
e.mu.RUnlock()
@@ -144,6 +144,11 @@ func (e *Endpoint) ARPHardwareType() header.ARPHardwareType {
}
// AddHeader implements stack.LinkEndpoint.AddHeader.
func (e *Endpoint) AddHeader(pkt stack.PacketBufferPtr) {
func (e *Endpoint) AddHeader(pkt *stack.PacketBuffer) {
e.child.AddHeader(pkt)
}
// ParseHeader implements stack.LinkEndpoint.ParseHeader.
func (e *Endpoint) ParseHeader(pkt *stack.PacketBuffer) bool {
return e.child.ParseHeader(pkt)
}

View File

@@ -40,7 +40,7 @@ func New(lower stack.LinkEndpoint) stack.LinkEndpoint {
}
// DeliverNetworkPacket implements stack.NetworkDispatcher.
func (e *endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt stack.PacketBufferPtr) {
func (e *endpoint) DeliverNetworkPacket(protocol tcpip.NetworkProtocolNumber, pkt *stack.PacketBuffer) {
e.Endpoint.DeliverLinkPacket(protocol, pkt)
e.Endpoint.DeliverNetworkPacket(protocol, pkt)

View File

@@ -97,7 +97,7 @@ type TimeoutHandler interface {
// OnReassemblyTimeout will be called with the first fragment (or nil, if the
// first fragment has not been received) of a packet whose reassembly has
// timed out.
OnReassemblyTimeout(pkt stack.PacketBufferPtr)
OnReassemblyTimeout(pkt *stack.PacketBuffer)
}
// NewFragmentation creates a new Fragmentation.
@@ -155,8 +155,8 @@ func NewFragmentation(blockSize uint16, highMemoryLimit, lowMemoryLimit int, rea
// to be given here outside of the FragmentID struct because IPv6 should not use
// the protocol to identify a fragment.
func (f *Fragmentation) Process(
id FragmentID, first, last uint16, more bool, proto uint8, pkt stack.PacketBufferPtr) (
stack.PacketBufferPtr, uint8, bool, error) {
id FragmentID, first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (
*stack.PacketBuffer, uint8, bool, error) {
if first > last {
return nil, 0, false, fmt.Errorf("first=%d is greater than last=%d: %w", first, last, ErrInvalidArgs)
}
@@ -251,12 +251,12 @@ func (f *Fragmentation) release(r *reassembler, timedOut bool) {
if h := f.timeoutHandler; timedOut && h != nil {
h.OnReassemblyTimeout(r.pkt)
}
if !r.pkt.IsNil() {
if r.pkt != nil {
r.pkt.DecRef()
r.pkt = nil
}
for _, h := range r.holes {
if !h.pkt.IsNil() {
if h.pkt != nil {
h.pkt.DecRef()
h.pkt = nil
}
@@ -308,7 +308,7 @@ type PacketFragmenter struct {
//
// reserve is the number of bytes that should be reserved for the headers in
// each generated fragment.
func MakePacketFragmenter(pkt stack.PacketBufferPtr, fragmentPayloadLen uint32, reserve int) PacketFragmenter {
func MakePacketFragmenter(pkt *stack.PacketBuffer, fragmentPayloadLen uint32, reserve int) PacketFragmenter {
// As per RFC 8200 Section 4.5, some IPv6 extension headers should not be
// repeated in each fragment. However we do not currently support any header
// of that kind yet, so the following computation is valid for both IPv4 and
@@ -339,7 +339,7 @@ func MakePacketFragmenter(pkt stack.PacketBufferPtr, fragmentPayloadLen uint32,
// Note that the returned packet will not have its network and link headers
// populated, but space for them will be reserved. The transport header will be
// stored in the packet's data.
func (pf *PacketFragmenter) BuildNextFragment() (stack.PacketBufferPtr, int, int, bool) {
func (pf *PacketFragmenter) BuildNextFragment() (*stack.PacketBuffer, int, int, bool) {
if pf.currentFragment >= pf.fragmentCount {
panic("BuildNextFragment should not be called again after the last fragment was returned")
}

View File

@@ -3,6 +3,8 @@
package fragmentation
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -26,10 +28,10 @@ func (l *reassemblerList) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(1, &l.tail)
}
func (l *reassemblerList) afterLoad() {}
func (l *reassemblerList) afterLoad(context.Context) {}
// +checklocksignore
func (l *reassemblerList) StateLoad(stateSourceObject state.Source) {
func (l *reassemblerList) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &l.head)
stateSourceObject.Load(1, &l.tail)
}
@@ -54,10 +56,10 @@ func (e *reassemblerEntry) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(1, &e.prev)
}
func (e *reassemblerEntry) afterLoad() {}
func (e *reassemblerEntry) afterLoad(context.Context) {}
// +checklocksignore
func (e *reassemblerEntry) StateLoad(stateSourceObject state.Source) {
func (e *reassemblerEntry) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &e.next)
stateSourceObject.Load(1, &e.prev)
}

View File

@@ -30,7 +30,7 @@ type hole struct {
final bool
// pkt is the fragment packet if hole is filled. We keep the whole pkt rather
// than the fragmented payload to prevent binding to specific buffer types.
pkt stack.PacketBufferPtr
pkt *stack.PacketBuffer
}
type reassembler struct {
@@ -43,7 +43,7 @@ type reassembler struct {
filled int
done bool
createdAt tcpip.MonotonicTime
pkt stack.PacketBufferPtr
pkt *stack.PacketBuffer
}
func newReassembler(id FragmentID, clock tcpip.Clock) *reassembler {
@@ -60,7 +60,7 @@ func newReassembler(id FragmentID, clock tcpip.Clock) *reassembler {
return r
}
func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt stack.PacketBufferPtr) (stack.PacketBufferPtr, uint8, bool, int, error) {
func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt *stack.PacketBuffer) (*stack.PacketBuffer, uint8, bool, int, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.done {
@@ -145,7 +145,7 @@ func (r *reassembler) process(first, last uint16, more bool, proto uint8, pkt st
// options received in the first fragment should be used - and they should
// override options from following fragments.
if first == 0 {
if !r.pkt.IsNil() {
if r.pkt != nil {
r.pkt.DecRef()
}
r.pkt = pkt.IncRef()

View File

@@ -20,7 +20,7 @@ import (
"gvisor.dev/gvisor/pkg/tcpip"
)
// ForwardingError represents an error that occured while trying to forward
// ForwardingError represents an error that occurred while trying to forward
// a packet.
type ForwardingError interface {
isForwardingError()
@@ -75,7 +75,7 @@ func (*ErrLinkLocalDestinationAddress) isForwardingError() {}
func (*ErrLinkLocalDestinationAddress) String() string { return "link local destination address" }
// ErrHostUnreachable indicates that the destinatino host could not be reached.
// ErrHostUnreachable indicates that the destination host could not be reached.
type ErrHostUnreachable struct{}
func (*ErrHostUnreachable) isForwardingError() {}

View File

@@ -390,7 +390,9 @@ func (g *GenericMulticastProtocolState) MakeAllNonMemberLocked() {
switch g.mode {
case protocolModeV2:
v2ReportBuilder = g.opts.Protocol.NewReportV2Builder()
handler = func(groupAddress tcpip.Address, _ *multicastGroupState) {
handler = func(groupAddress tcpip.Address, info *multicastGroupState) {
info.cancelDelayedReportJob()
// Send a report immediately to announce us leaving the group.
v2ReportBuilder.AddRecord(
MulticastGroupProtocolV2ReportRecordChangeToIncludeMode,

View File

@@ -3,6 +3,8 @@
package ip
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -21,10 +23,10 @@ func (e *ErrMessageTooLong) StateSave(stateSinkObject state.Sink) {
e.beforeSave()
}
func (e *ErrMessageTooLong) afterLoad() {}
func (e *ErrMessageTooLong) afterLoad(context.Context) {}
// +checklocksignore
func (e *ErrMessageTooLong) StateLoad(stateSourceObject state.Source) {
func (e *ErrMessageTooLong) StateLoad(ctx context.Context, stateSourceObject state.Source) {
}
func (e *ErrNoMulticastPendingQueueBufferSpace) StateTypeName() string {
@@ -42,10 +44,10 @@ func (e *ErrNoMulticastPendingQueueBufferSpace) StateSave(stateSinkObject state.
e.beforeSave()
}
func (e *ErrNoMulticastPendingQueueBufferSpace) afterLoad() {}
func (e *ErrNoMulticastPendingQueueBufferSpace) afterLoad(context.Context) {}
// +checklocksignore
func (e *ErrNoMulticastPendingQueueBufferSpace) StateLoad(stateSourceObject state.Source) {
func (e *ErrNoMulticastPendingQueueBufferSpace) StateLoad(ctx context.Context, stateSourceObject state.Source) {
}
func init() {

View File

@@ -66,7 +66,7 @@ type MultiCounterIPForwardingStats struct {
UnknownOutputEndpoint tcpip.MultiCounterStat
// NoMulticastPendingQueueBufferSpace is the number of multicast packets that
// were dropped due to insufficent buffer space in the pending packet queue.
// were dropped due to insufficient buffer space in the pending packet queue.
NoMulticastPendingQueueBufferSpace tcpip.MultiCounterStat
// OutgoingDeviceNoBufferSpace is the number of packets that were dropped due

View File

@@ -116,7 +116,7 @@ func (r *InstalledRoute) SetLastUsedTimestamp(monotonicTime tcpip.MonotonicTime)
// for the entry. For such routes, packets are added to an expiring queue until
// a route is installed.
type PendingRoute struct {
packets []stack.PacketBufferPtr
packets []*stack.PacketBuffer
// expiration is the timestamp at which the pending route should be expired.
//
@@ -265,7 +265,7 @@ func (r *RouteTable) cleanupPendingRoutes() {
func (r *RouteTable) newPendingRoute() PendingRoute {
return PendingRoute{
packets: make([]stack.PacketBufferPtr, 0, r.config.MaxPendingQueueSize),
packets: make([]*stack.PacketBuffer, 0, r.config.MaxPendingQueueSize),
expiration: r.config.Clock.NowMonotonic().Add(DefaultPendingRouteExpiration),
}
}
@@ -326,7 +326,7 @@ func (e GetRouteResultState) String() string {
//
// If the relevant pending route queue is at max capacity, then returns false.
// Otherwise, returns true.
func (r *RouteTable) GetRouteOrInsertPending(key stack.UnicastSourceAndMulticastDestination, pkt stack.PacketBufferPtr) (GetRouteResult, bool) {
func (r *RouteTable) GetRouteOrInsertPending(key stack.UnicastSourceAndMulticastDestination, pkt *stack.PacketBuffer) (GetRouteResult, bool) {
r.installedMu.RLock()
defer r.installedMu.RUnlock()
@@ -374,7 +374,7 @@ func (r *RouteTable) getOrCreatePendingRouteRLocked(key stack.UnicastSourceAndMu
// returned. The caller assumes ownership of these packets and is responsible
// for forwarding and releasing them. If an installed route already exists for
// the provided key, then it is overwritten.
func (r *RouteTable) AddInstalledRoute(key stack.UnicastSourceAndMulticastDestination, route *InstalledRoute) []stack.PacketBufferPtr {
func (r *RouteTable) AddInstalledRoute(key stack.UnicastSourceAndMulticastDestination, route *InstalledRoute) []*stack.PacketBuffer {
r.installedMu.Lock()
defer r.installedMu.Unlock()
r.installedRoutes[key] = route

View File

@@ -243,7 +243,7 @@ func (e *endpoint) checkLocalAddress(addr tcpip.Address) bool {
// of the original packet that caused the ICMP one to be sent. This information
// is used to find out which transport endpoint must be notified about the ICMP
// packet. We only expect the payload, not the enclosing ICMP packet.
func (e *endpoint) handleControl(errInfo stack.TransportError, pkt stack.PacketBufferPtr) {
func (e *endpoint) handleControl(errInfo stack.TransportError, pkt *stack.PacketBuffer) {
h, ok := pkt.Data().PullUp(header.IPv4MinimumSize)
if !ok {
return
@@ -280,7 +280,7 @@ func (e *endpoint) handleControl(errInfo stack.TransportError, pkt stack.PacketB
e.dispatcher.DeliverTransportError(srcAddr, dstAddr, ProtocolNumber, p, errInfo, pkt)
}
func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr) {
func (e *endpoint) handleICMP(pkt *stack.PacketBuffer) {
received := e.stats.icmp.packetsReceived
h := header.ICMPv4(pkt.TransportHeader().Slice())
if len(h) < header.ICMPv4MinimumSize {
@@ -607,7 +607,7 @@ func (*icmpReasonHostUnreachable) isICMPReason() {}
// the problematic packet. It incorporates as much of that packet as
// possible as well as any error metadata as is available. returnError
// expects pkt to hold a valid IPv4 packet as per the wire format.
func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, deliveredLocally bool) tcpip.Error {
func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer, deliveredLocally bool) tcpip.Error {
origIPHdr := header.IPv4(pkt.NetworkHeader().Slice())
origIPHdrSrc := origIPHdr.SourceAddress()
origIPHdrDst := origIPHdr.DestinationAddress()
@@ -807,7 +807,7 @@ func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, del
}
// OnReassemblyTimeout implements fragmentation.TimeoutHandler.
func (p *protocol) OnReassemblyTimeout(pkt stack.PacketBufferPtr) {
func (p *protocol) OnReassemblyTimeout(pkt *stack.PacketBuffer) {
// OnReassemblyTimeout sends a Time Exceeded Message, as per RFC 792:
//
// If a host reassembling a fragmented datagram cannot complete the
@@ -816,7 +816,7 @@ func (p *protocol) OnReassemblyTimeout(pkt stack.PacketBufferPtr) {
//
// If fragment zero is not available then no time exceeded need be sent at
// all.
if !pkt.IsNil() {
if pkt != nil {
p.returnError(&icmpReasonReassemblyTimeout{}, pkt, true /* deliveredLocally */)
}
}

View File

@@ -283,7 +283,7 @@ func (*igmpState) V2QueryMaxRespCodeToV1Delay(code uint16) time.Duration {
func (igmp *igmpState) init(ep *endpoint) {
igmp.ep = ep
igmp.genericMulticastProtocol.Init(&ep.mu, ip.GenericMulticastProtocolOptions{
Rand: ep.protocol.stack.Rand(),
Rand: ep.protocol.stack.InsecureRNG(),
Clock: ep.protocol.stack.Clock(),
Protocol: igmp,
MaxUnsolicitedReportDelay: UnsolicitedReportIntervalMax,
@@ -328,7 +328,7 @@ func (igmp *igmpState) isSourceIPValidLocked(src tcpip.Address, messageType head
}
// +checklocks:igmp.ep.mu
func (igmp *igmpState) isPacketValidLocked(pkt stack.PacketBufferPtr, messageType header.IGMPType, hasRouterAlertOption bool) bool {
func (igmp *igmpState) isPacketValidLocked(pkt *stack.PacketBuffer, messageType header.IGMPType, hasRouterAlertOption bool) bool {
// We can safely assume that the IP header is valid if we got this far.
iph := header.IPv4(pkt.NetworkHeader().Slice())
@@ -346,7 +346,7 @@ func (igmp *igmpState) isPacketValidLocked(pkt stack.PacketBufferPtr, messageTyp
// handleIGMP handles an IGMP packet.
//
// +checklocks:igmp.ep.mu
func (igmp *igmpState) handleIGMP(pkt stack.PacketBufferPtr, hasRouterAlertOption bool) {
func (igmp *igmpState) handleIGMP(pkt *stack.PacketBuffer, hasRouterAlertOption bool) {
received := igmp.ep.stats.igmp.packetsReceived
hdr, ok := pkt.Data().PullUp(pkt.Data().Size())
if !ok {
@@ -521,7 +521,7 @@ func (igmp *igmpState) writePacketInner(buf *buffer.View, reportStat tcpip.Multi
})
defer pkt.DecRef()
addressEndpoint := igmp.ep.acquireOutgoingPrimaryAddressRLocked(destAddress, false /* allowExpired */)
addressEndpoint := igmp.ep.acquireOutgoingPrimaryAddressRLocked(destAddress, tcpip.Address{} /* srcHint */, false /* allowExpired */)
if addressEndpoint == nil {
return false, nil
}
@@ -586,7 +586,7 @@ func (igmp *igmpState) softLeaveAll() {
igmp.genericMulticastProtocol.MakeAllNonMemberLocked()
}
// initializeAll attemps to initialize the IGMP state for each group that has
// initializeAll attempts to initialize the IGMP state for each group that has
// been joined locally.
//
// +checklocks:igmp.ep.mu

View File

@@ -137,7 +137,7 @@ func (e *endpoint) getIGMPVersionLocked() IGMPVersion {
}
// HandleLinkResolutionFailure implements stack.LinkResolvableNetworkEndpoint.
func (e *endpoint) HandleLinkResolutionFailure(pkt stack.PacketBufferPtr) {
func (e *endpoint) HandleLinkResolutionFailure(pkt *stack.PacketBuffer) {
// If we are operating as a router, return an ICMP error to the original
// packet's sender.
if pkt.NetworkPacketInfo.IsForwardedPacket {
@@ -437,7 +437,18 @@ func (e *endpoint) NetworkProtocolNumber() tcpip.NetworkProtocolNumber {
return e.protocol.Number()
}
func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketBufferPtr, params stack.NetworkHeaderParams, options header.IPv4OptionsSerializer) tcpip.Error {
// getID returns a random uint16 number (other than zero) to be used as ID in
// the IPv4 header.
func (e *endpoint) getID() uint16 {
rng := e.protocol.stack.SecureRNG()
id := rng.Uint16()
for id == 0 {
id = rng.Uint16()
}
return id
}
func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams, options header.IPv4OptionsSerializer) tcpip.Error {
hdrLen := header.IPv4MinimumSize
var optLen int
if options != nil {
@@ -455,10 +466,9 @@ func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketB
// RFC 6864 section 4.3 mandates uniqueness of ID values for non-atomic
// datagrams. Since the DF bit is never being set here, all datagrams
// are non-atomic and need an ID.
id := e.protocol.ids[hashRoute(srcAddr, dstAddr, params.Protocol, e.protocol.hashIV)%buckets].Add(1)
ipH.Encode(&header.IPv4Fields{
TotalLength: uint16(length),
ID: uint16(id),
ID: e.getID(),
TTL: params.TTL,
TOS: params.TOS,
Protocol: uint8(params.Protocol),
@@ -475,7 +485,7 @@ func (e *endpoint) addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketB
// fragment. It returns the number of fragments handled and the number of
// fragments left to be processed. The IP header must already be present in the
// original packet.
func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt stack.PacketBufferPtr, handler func(stack.PacketBufferPtr) tcpip.Error) (int, int, tcpip.Error) {
func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt *stack.PacketBuffer, handler func(*stack.PacketBuffer) tcpip.Error) (int, int, tcpip.Error) {
// Round the MTU down to align to 8 bytes.
fragmentPayloadSize := networkMTU &^ 7
networkHeader := header.IPv4(pkt.NetworkHeader().Slice())
@@ -498,7 +508,7 @@ func (e *endpoint) handleFragments(_ *stack.Route, networkMTU uint32, pkt stack.
}
// WritePacket writes a packet to the given destination address and protocol.
func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt stack.PacketBufferPtr) tcpip.Error {
func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt *stack.PacketBuffer) tcpip.Error {
if err := e.addIPHeader(r.LocalAddress(), r.RemoteAddress(), pkt, params, nil /* options */); err != nil {
return err
}
@@ -506,7 +516,7 @@ func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams,
return e.writePacket(r, pkt)
}
func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip.Error {
func (e *endpoint) writePacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error {
netHeader := header.IPv4(pkt.NetworkHeader().Slice())
dstAddr := netHeader.DestinationAddress()
@@ -538,7 +548,7 @@ func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip.
return e.writePacketPostRouting(r, pkt, false /* headerIncluded */)
}
func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt stack.PacketBufferPtr, headerIncluded bool) tcpip.Error {
func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt *stack.PacketBuffer, headerIncluded bool) tcpip.Error {
if r.Loop()&stack.PacketLoop != 0 {
// If the packet was generated by the stack (not a raw/packet endpoint
// where a packet may be written with the header included), then we can
@@ -573,7 +583,7 @@ func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt stack.PacketBuffer
// is set but the packet must be fragmented for the non-forwarding case.
return &tcpip.ErrMessageTooLong{}
}
sent, remain, err := e.handleFragments(r, networkMTU, pkt, func(fragPkt stack.PacketBufferPtr) tcpip.Error {
sent, remain, err := e.handleFragments(r, networkMTU, pkt, func(fragPkt *stack.PacketBuffer) tcpip.Error {
// TODO(gvisor.dev/issue/3884): Evaluate whether we want to send each
// fragment one by one using WritePacket() (current strategy) or if we
// want to create a PacketBufferList from the fragments and feed it to
@@ -594,7 +604,7 @@ func (e *endpoint) writePacketPostRouting(r *stack.Route, pkt stack.PacketBuffer
}
// WriteHeaderIncludedPacket implements stack.NetworkEndpoint.
func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip.Error {
func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error {
// The packet already has an IP header, but there are a few required
// checks.
h, ok := pkt.Data().PullUp(header.IPv4MinimumSize)
@@ -628,7 +638,7 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBuf
// non-atomic datagrams, so assign an ID to all such datagrams
// according to the definition given in RFC 6864 section 4.
if ipH.Flags()&header.IPv4FlagDontFragment == 0 || ipH.Flags()&header.IPv4FlagMoreFragments != 0 || ipH.FragmentOffset() > 0 {
ipH.SetID(uint16(e.protocol.ids[hashRoute(r.LocalAddress(), r.RemoteAddress(), 0 /* protocol */, e.protocol.hashIV)%buckets].Add(1)))
ipH.SetID(e.getID())
}
}
@@ -656,7 +666,7 @@ func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBuf
// updating the options.
//
// This method should be invoked by the endpoint that received the pkt.
func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBufferPtr, updateOptions bool) ip.ForwardingError {
func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt *stack.PacketBuffer, updateOptions bool) ip.ForwardingError {
h := header.IPv4(pkt.NetworkHeader().Slice())
stk := e.protocol.stack
@@ -726,7 +736,7 @@ func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBu
}
// forwardUnicastPacket attempts to forward a packet to its final destination.
func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.ForwardingError {
func (e *endpoint) forwardUnicastPacket(pkt *stack.PacketBuffer) ip.ForwardingError {
hView := pkt.NetworkHeader().View()
defer hView.Release()
h := header.IPv4(hView.AsSlice())
@@ -804,7 +814,7 @@ func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.Forwarding
// HandlePacket is called by the link layer when new ipv4 packets arrive for
// this endpoint.
func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) {
func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {
stats := e.stats.ip
stats.PacketsReceived.Increment()
@@ -863,7 +873,7 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) {
// handleLocalPacket is like HandlePacket except it does not perform the
// prerouting iptables hook or check for loopback traffic that originated from
// outside of the netstack (i.e. martian loopback packets).
func (e *endpoint) handleLocalPacket(pkt stack.PacketBufferPtr, canSkipRXChecksum bool) {
func (e *endpoint) handleLocalPacket(pkt *stack.PacketBuffer, canSkipRXChecksum bool) {
stats := e.stats.ip
stats.PacketsReceived.Increment()
@@ -935,7 +945,7 @@ func validateAddressesForForwarding(h header.IPv4) ip.ForwardingError {
//
// This method should be invoked for incoming multicast packets using the
// endpoint that received the packet.
func (e *endpoint) forwardMulticastPacket(h header.IPv4, pkt stack.PacketBufferPtr) ip.ForwardingError {
func (e *endpoint) forwardMulticastPacket(h header.IPv4, pkt *stack.PacketBuffer) ip.ForwardingError {
if err := validateAddressesForForwarding(h); err != nil {
return err
}
@@ -988,7 +998,7 @@ func (e *endpoint) forwardMulticastPacket(h header.IPv4, pkt stack.PacketBufferP
return &ip.ErrHostUnreachable{}
}
func (e *endpoint) updateOptionsForForwarding(pkt stack.PacketBufferPtr) ip.ForwardingError {
func (e *endpoint) updateOptionsForForwarding(pkt *stack.PacketBuffer) ip.ForwardingError {
h := header.IPv4(pkt.NetworkHeader().Slice())
if opts := h.Options(); len(opts) != 0 {
newOpts, _, optProblem := e.processIPOptions(pkt, opts, &optionUsageForward{})
@@ -1023,7 +1033,7 @@ func (e *endpoint) updateOptionsForForwarding(pkt stack.PacketBufferPtr) ip.Forw
// provided installedRoute.
//
// This method should be invoked by the endpoint that received the pkt.
func (e *endpoint) forwardValidatedMulticastPacket(pkt stack.PacketBufferPtr, installedRoute *multicast.InstalledRoute) ip.ForwardingError {
func (e *endpoint) forwardValidatedMulticastPacket(pkt *stack.PacketBuffer, installedRoute *multicast.InstalledRoute) ip.ForwardingError {
// Per RFC 1812 section 5.2.1.3,
//
// Based on the IP source and destination addresses found in the datagram
@@ -1056,7 +1066,7 @@ func (e *endpoint) forwardValidatedMulticastPacket(pkt stack.PacketBufferPtr, in
// of the provided outgoingInterface.
//
// This method should be invoked by the endpoint that received the pkt.
func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt stack.PacketBufferPtr, outgoingInterface stack.MulticastRouteOutgoingInterface) ip.ForwardingError {
func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt *stack.PacketBuffer, outgoingInterface stack.MulticastRouteOutgoingInterface) ip.ForwardingError {
h := header.IPv4(pkt.NetworkHeader().Slice())
// Per RFC 1812 section 5.2.1.3,
@@ -1083,7 +1093,7 @@ func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt stack.PacketBu
return e.forwardPacketWithRoute(route, pkt, true /* updateOptions */)
}
func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt stack.PacketBufferPtr, inNICName string) {
func (e *endpoint) handleValidatedPacket(h header.IPv4, pkt *stack.PacketBuffer, inNICName string) {
pkt.NICID = e.nic.ID()
// Raw socket packets are delivered based solely on the transport protocol
@@ -1194,7 +1204,7 @@ func (e *endpoint) handleForwardingError(err ip.ForwardingError) {
stats.Forwarding.Errors.Increment()
}
func (e *endpoint) deliverPacketLocally(h header.IPv4, pkt stack.PacketBufferPtr, inNICName string) {
func (e *endpoint) deliverPacketLocally(h header.IPv4, pkt *stack.PacketBuffer, inNICName string) {
stats := e.stats
// iptables filtering. All packets that reach here are intended for
// this machine and will not be forwarded.
@@ -1352,8 +1362,8 @@ func (e *endpoint) Close() {
// AddAndAcquirePermanentAddress implements stack.AddressableEndpoint.
func (e *endpoint) AddAndAcquirePermanentAddress(addr tcpip.AddressWithPrefix, properties stack.AddressProperties) (stack.AddressEndpoint, tcpip.Error) {
e.mu.RLock()
defer e.mu.RUnlock()
e.mu.Lock()
defer e.mu.Unlock()
ep, err := e.addressableEndpointState.AddAndAcquireAddress(addr, properties, stack.Permanent)
if err == nil {
@@ -1364,7 +1374,7 @@ func (e *endpoint) AddAndAcquirePermanentAddress(addr tcpip.AddressWithPrefix, p
// sendQueuedReports sends queued igmp reports.
//
// +checklocksread:e.mu
// +checklocks:e.mu
// +checklocksalias:e.igmp.ep.mu=e.mu
func (e *endpoint) sendQueuedReports() {
e.igmp.sendQueuedReports()
@@ -1413,18 +1423,18 @@ func (e *endpoint) AcquireAssignedAddress(localAddr tcpip.Address, allowTemp boo
}
// AcquireOutgoingPrimaryAddress implements stack.AddressableEndpoint.
func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {
func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr, srcHint tcpip.Address, allowExpired bool) stack.AddressEndpoint {
e.mu.RLock()
defer e.mu.RUnlock()
return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, allowExpired)
return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, srcHint, allowExpired)
}
// acquireOutgoingPrimaryAddressRLocked is like AcquireOutgoingPrimaryAddress
// but with locking requirements
//
// +checklocksread:e.mu
func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {
return e.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, allowExpired)
func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr, srcHint tcpip.Address, allowExpired bool) stack.AddressEndpoint {
return e.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, srcHint, allowExpired)
}
// PrimaryAddresses implements stack.AddressableEndpoint.
@@ -1514,6 +1524,8 @@ type protocol struct {
ids []atomicbitops.Uint32
hashIV uint32
// idTS is the unix timestamp in milliseconds 'ids' was last accessed.
idTS atomicbitops.Int64
fragmentation *fragmentation.Fragmentation
@@ -1704,7 +1716,7 @@ func (p *protocol) MulticastRouteLastUsedTime(addresses stack.UnicastSourceAndMu
return timestamp, nil
}
func (p *protocol) forwardPendingMulticastPacket(pkt stack.PacketBufferPtr, installedRoute *multicast.InstalledRoute) {
func (p *protocol) forwardPendingMulticastPacket(pkt *stack.PacketBuffer, installedRoute *multicast.InstalledRoute) {
defer pkt.DecRef()
// Attempt to forward the packet using the endpoint that it originally
@@ -1761,7 +1773,7 @@ func (p *protocol) isSubnetLocalBroadcastAddress(addr tcpip.Address) bool {
// returns the parsed IP header.
//
// Returns true if the IP header was successfully parsed.
func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bool) {
func (p *protocol) parseAndValidate(pkt *stack.PacketBuffer) (*buffer.View, bool) {
transProtoNum, hasTransportHdr, ok := p.Parse(pkt)
if !ok {
return nil, false
@@ -1785,7 +1797,7 @@ func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bo
return pkt.NetworkHeader().View(), true
}
func (p *protocol) parseTransport(pkt stack.PacketBufferPtr, transProtoNum tcpip.TransportProtocolNumber) {
func (p *protocol) parseTransport(pkt *stack.PacketBuffer, transProtoNum tcpip.TransportProtocolNumber) {
if transProtoNum == header.ICMPv4ProtocolNumber {
// The transport layer will handle transport layer parsing errors.
_ = parse.ICMPv4(pkt)
@@ -1803,7 +1815,7 @@ func (p *protocol) parseTransport(pkt stack.PacketBufferPtr, transProtoNum tcpip
}
// Parse implements stack.NetworkProtocol.
func (*protocol) Parse(pkt stack.PacketBufferPtr) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) {
func (*protocol) Parse(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) {
if ok := parse.IPv4(pkt); !ok {
return 0, false, false
}
@@ -1830,7 +1842,7 @@ func (p *protocol) allowICMPReply(icmpType header.ICMPv4Type, code header.ICMPv4
}
// SendRejectionError implements stack.RejectIPv4WithHandler.
func (p *protocol) SendRejectionError(pkt stack.PacketBufferPtr, rejectWith stack.RejectIPv4WithICMPType, inputHook bool) tcpip.Error {
func (p *protocol) SendRejectionError(pkt *stack.PacketBuffer, rejectWith stack.RejectIPv4WithICMPType, inputHook bool) tcpip.Error {
switch rejectWith {
case stack.RejectIPv4WithICMPNetUnreachable:
return p.returnError(&icmpReasonNetworkUnreachable{}, pkt, inputHook)
@@ -1872,7 +1884,7 @@ func calculateNetworkMTU(linkMTU, networkHeaderSize uint32) (uint32, tcpip.Error
return networkMTU - networkHeaderSize, nil
}
func packetMustBeFragmented(pkt stack.PacketBufferPtr, networkMTU uint32) bool {
func packetMustBeFragmented(pkt *stack.PacketBuffer, networkMTU uint32) bool {
payload := len(pkt.TransportHeader().Slice()) + pkt.Data().Size()
return pkt.GSOOptions.Type == stack.GSONone && uint32(payload) > networkMTU
}
@@ -1949,7 +1961,7 @@ func NewProtocol(s *stack.Stack) stack.NetworkProtocol {
return NewProtocolWithOptions(Options{})(s)
}
func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeader header.IPv4) (stack.PacketBufferPtr, bool) {
func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeader header.IPv4) (*stack.PacketBuffer, bool) {
fragPkt, offset, copied, more := pf.BuildNextFragment()
fragPkt.NetworkProtocolNumber = ProtocolNumber
@@ -2290,7 +2302,7 @@ type optionTracker struct {
//
// If there were no errors during parsing, the new set of options is returned as
// a new buffer.
func (e *endpoint) processIPOptions(pkt stack.PacketBufferPtr, opts header.IPv4Options, usage optionsUsage) (header.IPv4Options, optionTracker, *header.IPv4OptParameterProblem) {
func (e *endpoint) processIPOptions(pkt *stack.PacketBuffer, opts header.IPv4Options, usage optionsUsage) (header.IPv4Options, optionTracker, *header.IPv4OptParameterProblem) {
stats := e.stats.ip
optIter := opts.MakeIterator()

View File

@@ -3,6 +3,8 @@
package ipv4
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -21,10 +23,10 @@ func (i *icmpv4DestinationUnreachableSockError) StateSave(stateSinkObject state.
i.beforeSave()
}
func (i *icmpv4DestinationUnreachableSockError) afterLoad() {}
func (i *icmpv4DestinationUnreachableSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv4DestinationUnreachableSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv4DestinationUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
}
func (i *icmpv4DestinationHostUnreachableSockError) StateTypeName() string {
@@ -45,10 +47,10 @@ func (i *icmpv4DestinationHostUnreachableSockError) StateSave(stateSinkObject st
stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
}
func (i *icmpv4DestinationHostUnreachableSockError) afterLoad() {}
func (i *icmpv4DestinationHostUnreachableSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv4DestinationHostUnreachableSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv4DestinationHostUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
}
@@ -70,10 +72,10 @@ func (i *icmpv4DestinationNetUnreachableSockError) StateSave(stateSinkObject sta
stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
}
func (i *icmpv4DestinationNetUnreachableSockError) afterLoad() {}
func (i *icmpv4DestinationNetUnreachableSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv4DestinationNetUnreachableSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv4DestinationNetUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
}
@@ -95,10 +97,10 @@ func (i *icmpv4DestinationPortUnreachableSockError) StateSave(stateSinkObject st
stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
}
func (i *icmpv4DestinationPortUnreachableSockError) afterLoad() {}
func (i *icmpv4DestinationPortUnreachableSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv4DestinationPortUnreachableSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv4DestinationPortUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
}
@@ -120,10 +122,10 @@ func (i *icmpv4DestinationProtoUnreachableSockError) StateSave(stateSinkObject s
stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
}
func (i *icmpv4DestinationProtoUnreachableSockError) afterLoad() {}
func (i *icmpv4DestinationProtoUnreachableSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv4DestinationProtoUnreachableSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv4DestinationProtoUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
}
@@ -145,10 +147,10 @@ func (i *icmpv4SourceRouteFailedSockError) StateSave(stateSinkObject state.Sink)
stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
}
func (i *icmpv4SourceRouteFailedSockError) afterLoad() {}
func (i *icmpv4SourceRouteFailedSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv4SourceRouteFailedSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv4SourceRouteFailedSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
}
@@ -170,10 +172,10 @@ func (i *icmpv4SourceHostIsolatedSockError) StateSave(stateSinkObject state.Sink
stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
}
func (i *icmpv4SourceHostIsolatedSockError) afterLoad() {}
func (i *icmpv4SourceHostIsolatedSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv4SourceHostIsolatedSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv4SourceHostIsolatedSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
}
@@ -195,10 +197,10 @@ func (i *icmpv4DestinationHostUnknownSockError) StateSave(stateSinkObject state.
stateSinkObject.Save(0, &i.icmpv4DestinationUnreachableSockError)
}
func (i *icmpv4DestinationHostUnknownSockError) afterLoad() {}
func (i *icmpv4DestinationHostUnknownSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv4DestinationHostUnknownSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv4DestinationHostUnknownSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.icmpv4DestinationUnreachableSockError)
}
@@ -222,10 +224,10 @@ func (e *icmpv4FragmentationNeededSockError) StateSave(stateSinkObject state.Sin
stateSinkObject.Save(1, &e.mtu)
}
func (e *icmpv4FragmentationNeededSockError) afterLoad() {}
func (e *icmpv4FragmentationNeededSockError) afterLoad(context.Context) {}
// +checklocksignore
func (e *icmpv4FragmentationNeededSockError) StateLoad(stateSourceObject state.Source) {
func (e *icmpv4FragmentationNeededSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &e.icmpv4DestinationUnreachableSockError)
stateSourceObject.Load(1, &e.mtu)
}

View File

@@ -164,7 +164,7 @@ func (e *endpoint) checkLocalAddress(addr tcpip.Address) bool {
// the original packet that caused the ICMP one to be sent. This information is
// used to find out which transport endpoint must be notified about the ICMP
// packet.
func (e *endpoint) handleControl(transErr stack.TransportError, pkt stack.PacketBufferPtr) {
func (e *endpoint) handleControl(transErr stack.TransportError, pkt *stack.PacketBuffer) {
h, ok := pkt.Data().PullUp(header.IPv6MinimumSize)
if !ok {
return
@@ -267,7 +267,7 @@ func getTargetLinkAddr(it header.NDPOptionIterator) (tcpip.LinkAddress, bool) {
})
}
func isMLDValid(pkt stack.PacketBufferPtr, iph header.IPv6, routerAlert *header.IPv6RouterAlertOption) bool {
func isMLDValid(pkt *stack.PacketBuffer, iph header.IPv6, routerAlert *header.IPv6RouterAlertOption) bool {
// As per RFC 2710 section 3:
// All MLD messages described in this document are sent with a link-local
// IPv6 Source Address, an IPv6 Hop Limit of 1, and an IPv6 Router Alert
@@ -287,7 +287,7 @@ func isMLDValid(pkt stack.PacketBufferPtr, iph header.IPv6, routerAlert *header.
return true
}
func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr, hasFragmentHeader bool, routerAlert *header.IPv6RouterAlertOption) {
func (e *endpoint) handleICMP(pkt *stack.PacketBuffer, hasFragmentHeader bool, routerAlert *header.IPv6RouterAlertOption) {
sent := e.stats.icmp.packetsSent
received := e.stats.icmp.packetsReceived
h := header.ICMPv6(pkt.TransportHeader().Slice())
@@ -540,6 +540,7 @@ func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr, hasFragmentHeader bool,
//
na.SetSolicitedFlag(!unspecifiedSource)
na.SetOverrideFlag(true)
na.SetRouterFlag(e.Forwarding())
na.SetTargetAddress(targetAddr)
na.Options().Serialize(optsSerializer)
packet.SetChecksum(header.ICMPv6Checksum(header.ICMPv6ChecksumParams{
@@ -595,7 +596,7 @@ func (e *endpoint) handleICMP(pkt stack.PacketBufferPtr, hasFragmentHeader bool,
// We just got an NA from a node that owns an address we are performing
// DAD on, implying the address is not unique. In this case we let the
// stack know so it can handle such a scenario and do nothing furthur with
// stack know so it can handle such a scenario and do nothing further with
// the NDP NA.
//
// We would get an error if the address no longer exists or the address
@@ -913,7 +914,7 @@ func (e *endpoint) LinkAddressRequest(targetAddr, localAddr tcpip.Address, remot
if localAddr.BitLen() == 0 {
// Find an address that we can use as our source address.
addressEndpoint := e.AcquireOutgoingPrimaryAddress(remoteAddr, false /* allowExpired */)
addressEndpoint := e.AcquireOutgoingPrimaryAddress(remoteAddr, tcpip.Address{} /* srcHint */, false /* allowExpired */)
if addressEndpoint == nil {
return &tcpip.ErrNetworkUnreachable{}
}
@@ -960,7 +961,7 @@ type icmpReason interface {
type icmpReasonParameterProblem struct {
code header.ICMPv6Code
// pointer is defined in the RFC 4443 setion 3.4 which reads:
// pointer is defined in the RFC 4443 section 3.4 which reads:
//
// Pointer Identifies the octet offset within the invoking packet
// where the error was detected.
@@ -1052,7 +1053,7 @@ func (*icmpReasonReassemblyTimeout) respondsToMulticast() bool {
// returnError takes an error descriptor and generates the appropriate ICMP
// error packet for IPv6 and sends it.
func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, deliveredLocally bool) tcpip.Error {
func (p *protocol) returnError(reason icmpReason, pkt *stack.PacketBuffer, deliveredLocally bool) tcpip.Error {
origIPHdr := header.IPv6(pkt.NetworkHeader().Slice())
origIPHdrSrc := origIPHdr.SourceAddress()
origIPHdrDst := origIPHdr.DestinationAddress()
@@ -1217,14 +1218,14 @@ func (p *protocol) returnError(reason icmpReason, pkt stack.PacketBufferPtr, del
}
// OnReassemblyTimeout implements fragmentation.TimeoutHandler.
func (p *protocol) OnReassemblyTimeout(pkt stack.PacketBufferPtr) {
func (p *protocol) OnReassemblyTimeout(pkt *stack.PacketBuffer) {
// OnReassemblyTimeout sends a Time Exceeded Message as per RFC 2460 Section
// 4.5:
//
// If the first fragment (i.e., the one with a Fragment Offset of zero) has
// been received, an ICMP Time Exceeded -- Fragment Reassembly Time Exceeded
// message should be sent to the source of that fragment.
if !pkt.IsNil() {
if pkt != nil {
p.returnError(&icmpReasonReassemblyTimeout{}, pkt, true /* deliveredLocally */)
}
}

View File

@@ -16,9 +16,7 @@
package ipv6
import (
"encoding/binary"
"fmt"
"hash/fnv"
"math"
"reflect"
"sort"
@@ -30,7 +28,6 @@ import (
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
"gvisor.dev/gvisor/pkg/tcpip/header/parse"
"gvisor.dev/gvisor/pkg/tcpip/network/hash"
"gvisor.dev/gvisor/pkg/tcpip/network/internal/fragmentation"
"gvisor.dev/gvisor/pkg/tcpip/network/internal/ip"
"gvisor.dev/gvisor/pkg/tcpip/network/internal/multicast"
@@ -286,7 +283,7 @@ func (*endpoint) DuplicateAddressProtocol() tcpip.NetworkProtocolNumber {
}
// HandleLinkResolutionFailure implements stack.LinkResolvableNetworkEndpoint.
func (e *endpoint) HandleLinkResolutionFailure(pkt stack.PacketBufferPtr) {
func (e *endpoint) HandleLinkResolutionFailure(pkt *stack.PacketBuffer) {
// If we are operating as a router, we should return an ICMP error to the
// original packet's sender.
if pkt.NetworkPacketInfo.IsForwardedPacket {
@@ -724,7 +721,7 @@ func (e *endpoint) MaxHeaderLength() uint16 {
return e.nic.MaxHeaderLength() + header.IPv6MinimumSize
}
func addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketBufferPtr, params stack.NetworkHeaderParams, extensionHeaders header.IPv6ExtHdrSerializer) tcpip.Error {
func addIPHeader(srcAddr, dstAddr tcpip.Address, pkt *stack.PacketBuffer, params stack.NetworkHeaderParams, extensionHeaders header.IPv6ExtHdrSerializer) tcpip.Error {
extHdrsLen := extensionHeaders.Length()
length := pkt.Size() + extensionHeaders.Length()
if length > math.MaxUint16 {
@@ -743,7 +740,7 @@ func addIPHeader(srcAddr, dstAddr tcpip.Address, pkt stack.PacketBufferPtr, para
return nil
}
func packetMustBeFragmented(pkt stack.PacketBufferPtr, networkMTU uint32) bool {
func packetMustBeFragmented(pkt *stack.PacketBuffer, networkMTU uint32) bool {
payload := len(pkt.TransportHeader().Slice()) + pkt.Data().Size()
return pkt.GSOOptions.Type == stack.GSONone && uint32(payload) > networkMTU
}
@@ -753,7 +750,7 @@ func packetMustBeFragmented(pkt stack.PacketBufferPtr, networkMTU uint32) bool {
// fragments left to be processed. The IP header must already be present in the
// original packet. The transport header protocol number is required to avoid
// parsing the IPv6 extension headers.
func (e *endpoint) handleFragments(r *stack.Route, networkMTU uint32, pkt stack.PacketBufferPtr, transProto tcpip.TransportProtocolNumber, handler func(stack.PacketBufferPtr) tcpip.Error) (int, int, tcpip.Error) {
func (e *endpoint) handleFragments(r *stack.Route, networkMTU uint32, pkt *stack.PacketBuffer, transProto tcpip.TransportProtocolNumber, handler func(*stack.PacketBuffer) tcpip.Error) (int, int, tcpip.Error) {
networkHeader := header.IPv6(pkt.NetworkHeader().Slice())
// TODO(gvisor.dev/issue/3912): Once the Authentication or ESP Headers are
@@ -777,7 +774,7 @@ func (e *endpoint) handleFragments(r *stack.Route, networkMTU uint32, pkt stack.
pf := fragmentation.MakePacketFragmenter(pkt, fragmentPayloadLen, calculateFragmentReserve(pkt))
defer pf.Release()
id := e.protocol.ids[hashRoute(r, e.protocol.hashIV)%buckets].Add(1)
id := e.getFragmentID()
var n int
for {
@@ -795,7 +792,7 @@ func (e *endpoint) handleFragments(r *stack.Route, networkMTU uint32, pkt stack.
}
// WritePacket writes a packet to the given destination address and protocol.
func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt stack.PacketBufferPtr) tcpip.Error {
func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams, pkt *stack.PacketBuffer) tcpip.Error {
dstAddr := r.RemoteAddress()
if err := addIPHeader(r.LocalAddress(), dstAddr, pkt, params, nil /* extensionHeaders */); err != nil {
return err
@@ -829,7 +826,7 @@ func (e *endpoint) WritePacket(r *stack.Route, params stack.NetworkHeaderParams,
return e.writePacket(r, pkt, params.Protocol, false /* headerIncluded */)
}
func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr, protocol tcpip.TransportProtocolNumber, headerIncluded bool) tcpip.Error {
func (e *endpoint) writePacket(r *stack.Route, pkt *stack.PacketBuffer, protocol tcpip.TransportProtocolNumber, headerIncluded bool) tcpip.Error {
if r.Loop()&stack.PacketLoop != 0 {
// If the packet was generated by the stack (not a raw/packet endpoint
// where a packet may be written with the header included), then we can
@@ -863,7 +860,7 @@ func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr, protoc
// not by routers along a packet's delivery path.
return &tcpip.ErrMessageTooLong{}
}
sent, remain, err := e.handleFragments(r, networkMTU, pkt, protocol, func(fragPkt stack.PacketBufferPtr) tcpip.Error {
sent, remain, err := e.handleFragments(r, networkMTU, pkt, protocol, func(fragPkt *stack.PacketBuffer) tcpip.Error {
// TODO(gvisor.dev/issue/3884): Evaluate whether we want to send each
// fragment one by one using WritePacket() (current strategy) or if we
// want to create a PacketBufferList from the fragments and feed it to
@@ -885,7 +882,7 @@ func (e *endpoint) writePacket(r *stack.Route, pkt stack.PacketBufferPtr, protoc
}
// WriteHeaderIncludedPacket implements stack.NetworkEndpoint.
func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt stack.PacketBufferPtr) tcpip.Error {
func (e *endpoint) WriteHeaderIncludedPacket(r *stack.Route, pkt *stack.PacketBuffer) tcpip.Error {
// The packet already has an IP header, but there are a few required checks.
h, ok := pkt.Data().PullUp(header.IPv6MinimumSize)
if !ok {
@@ -951,7 +948,7 @@ func validateAddressesForForwarding(h header.IPv6) ip.ForwardingError {
// forwardUnicastPacket attempts to forward a unicast packet to its final
// destination.
func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.ForwardingError {
func (e *endpoint) forwardUnicastPacket(pkt *stack.PacketBuffer) ip.ForwardingError {
h := header.IPv6(pkt.NetworkHeader().Slice())
if err := validateAddressesForForwarding(h); err != nil {
@@ -1020,7 +1017,7 @@ func (e *endpoint) forwardUnicastPacket(pkt stack.PacketBufferPtr) ip.Forwarding
// forwardPacketWithRoute emits the pkt using the provided route.
//
// This method should be invoked by the endpoint that received the pkt.
func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBufferPtr) ip.ForwardingError {
func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt *stack.PacketBuffer) ip.ForwardingError {
h := header.IPv6(pkt.NetworkHeader().Slice())
stk := e.protocol.stack
@@ -1072,7 +1069,7 @@ func (e *endpoint) forwardPacketWithRoute(route *stack.Route, pkt stack.PacketBu
// HandlePacket is called by the link layer when new ipv6 packets arrive for
// this endpoint.
func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) {
func (e *endpoint) HandlePacket(pkt *stack.PacketBuffer) {
stats := e.stats.ip
stats.PacketsReceived.Increment()
@@ -1135,7 +1132,7 @@ func (e *endpoint) HandlePacket(pkt stack.PacketBufferPtr) {
// handleLocalPacket is like HandlePacket except it does not perform the
// prerouting iptables hook or check for loopback traffic that originated from
// outside of the netstack (i.e. martian loopback packets).
func (e *endpoint) handleLocalPacket(pkt stack.PacketBufferPtr, canSkipRXChecksum bool) {
func (e *endpoint) handleLocalPacket(pkt *stack.PacketBuffer, canSkipRXChecksum bool) {
stats := e.stats.ip
stats.PacketsReceived.Increment()
@@ -1162,7 +1159,7 @@ func (e *endpoint) handleLocalPacket(pkt stack.PacketBufferPtr, canSkipRXChecksu
//
// This method should be invoked for incoming multicast packets using the
// endpoint that received the packet.
func (e *endpoint) forwardMulticastPacket(h header.IPv6, pkt stack.PacketBufferPtr) ip.ForwardingError {
func (e *endpoint) forwardMulticastPacket(h header.IPv6, pkt *stack.PacketBuffer) ip.ForwardingError {
if err := validateAddressesForForwarding(h); err != nil {
return err
}
@@ -1208,7 +1205,7 @@ func (e *endpoint) forwardMulticastPacket(h header.IPv6, pkt stack.PacketBufferP
// provided installedRoute.
//
// This method should be invoked by the endpoint that received the pkt.
func (e *endpoint) forwardValidatedMulticastPacket(pkt stack.PacketBufferPtr, installedRoute *multicast.InstalledRoute) ip.ForwardingError {
func (e *endpoint) forwardValidatedMulticastPacket(pkt *stack.PacketBuffer, installedRoute *multicast.InstalledRoute) ip.ForwardingError {
// Per RFC 1812 section 5.2.1.3,
//
// Based on the IP source and destination addresses found in the datagram
@@ -1241,7 +1238,7 @@ func (e *endpoint) forwardValidatedMulticastPacket(pkt stack.PacketBufferPtr, in
// of the provided outgoing interface.
//
// This method should be invoked by the endpoint that received the pkt.
func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt stack.PacketBufferPtr, outgoingInterface stack.MulticastRouteOutgoingInterface) ip.ForwardingError {
func (e *endpoint) forwardMulticastPacketForOutgoingInterface(pkt *stack.PacketBuffer, outgoingInterface stack.MulticastRouteOutgoingInterface) ip.ForwardingError {
h := header.IPv6(pkt.NetworkHeader().Slice())
// Per RFC 1812 section 5.2.1.3,
@@ -1302,7 +1299,7 @@ func (e *endpoint) handleForwardingError(err ip.ForwardingError) {
stats.Forwarding.Errors.Increment()
}
func (e *endpoint) handleValidatedPacket(h header.IPv6, pkt stack.PacketBufferPtr, inNICName string) {
func (e *endpoint) handleValidatedPacket(h header.IPv6, pkt *stack.PacketBuffer, inNICName string) {
pkt.NICID = e.nic.ID()
// Raw socket packets are delivered based solely on the transport protocol
@@ -1361,7 +1358,7 @@ func (e *endpoint) handleValidatedPacket(h header.IPv6, pkt stack.PacketBufferPt
}
}
func (e *endpoint) deliverPacketLocally(h header.IPv6, pkt stack.PacketBufferPtr, inNICName string) {
func (e *endpoint) deliverPacketLocally(h header.IPv6, pkt *stack.PacketBuffer, inNICName string) {
stats := e.stats.ip
// iptables filtering. All packets that reach here are intended for
@@ -1377,7 +1374,7 @@ func (e *endpoint) deliverPacketLocally(h header.IPv6, pkt stack.PacketBufferPtr
_ = e.processExtensionHeaders(h, pkt, false /* forwarding */)
}
func (e *endpoint) processExtensionHeader(it *header.IPv6PayloadIterator, pkt *stack.PacketBufferPtr, h header.IPv6, routerAlert **header.IPv6RouterAlertOption, hasFragmentHeader *bool, forwarding bool) (bool, error) {
func (e *endpoint) processExtensionHeader(it *header.IPv6PayloadIterator, pkt **stack.PacketBuffer, h header.IPv6, routerAlert **header.IPv6RouterAlertOption, hasFragmentHeader *bool, forwarding bool) (bool, error) {
stats := e.stats.ip
dstAddr := h.DestinationAddress()
// Keep track of the start of the previous header so we can report the
@@ -1455,7 +1452,7 @@ func (e *endpoint) processExtensionHeader(it *header.IPv6PayloadIterator, pkt *s
// processExtensionHeaders processes the extension headers in the given packet.
// Returns an error if the processing of a header failed or if the packet should
// be discarded.
func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt stack.PacketBufferPtr, forwarding bool) error {
func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt *stack.PacketBuffer, forwarding bool) error {
// Create a VV to parse the packet. We don't plan to modify anything here.
// vv consists of:
// - Any IPv6 header bytes after the first 40 (i.e. extensions).
@@ -1491,7 +1488,7 @@ func (e *endpoint) processExtensionHeaders(h header.IPv6, pkt stack.PacketBuffer
}
}
func (e *endpoint) processIPv6RawPayloadHeader(extHdr *header.IPv6RawPayloadHeader, it *header.IPv6PayloadIterator, pkt stack.PacketBufferPtr, routerAlert *header.IPv6RouterAlertOption, previousHeaderStart uint32, hasFragmentHeader bool) error {
func (e *endpoint) processIPv6RawPayloadHeader(extHdr *header.IPv6RawPayloadHeader, it *header.IPv6PayloadIterator, pkt *stack.PacketBuffer, routerAlert *header.IPv6RouterAlertOption, previousHeaderStart uint32, hasFragmentHeader bool) error {
stats := e.stats.ip
// If the last header in the payload isn't a known IPv6 extension header,
// handle it as if it is transport layer data.å
@@ -1573,7 +1570,7 @@ func (e *endpoint) processIPv6RawPayloadHeader(extHdr *header.IPv6RawPayloadHead
}
}
func (e *endpoint) processIPv6RoutingExtHeader(extHdr *header.IPv6RoutingExtHdr, it *header.IPv6PayloadIterator, pkt stack.PacketBufferPtr) error {
func (e *endpoint) processIPv6RoutingExtHeader(extHdr *header.IPv6RoutingExtHdr, it *header.IPv6PayloadIterator, pkt *stack.PacketBuffer) error {
// As per RFC 8200 section 4.4, if a node encounters a routing header with
// an unrecognized routing type value, with a non-zero Segments Left
// value, the node must discard the packet and send an ICMP Parameter
@@ -1596,7 +1593,7 @@ func (e *endpoint) processIPv6RoutingExtHeader(extHdr *header.IPv6RoutingExtHdr,
return fmt.Errorf("found unrecognized routing type with non-zero segments left in header = %#v", extHdr)
}
func (e *endpoint) processIPv6DestinationOptionsExtHdr(extHdr *header.IPv6DestinationOptionsExtHdr, it *header.IPv6PayloadIterator, pkt stack.PacketBufferPtr, dstAddr tcpip.Address) error {
func (e *endpoint) processIPv6DestinationOptionsExtHdr(extHdr *header.IPv6DestinationOptionsExtHdr, it *header.IPv6PayloadIterator, pkt *stack.PacketBuffer, dstAddr tcpip.Address) error {
stats := e.stats.ip
optsIt := extHdr.Iter()
var uopt *header.IPv6UnknownExtHdrOption
@@ -1659,7 +1656,7 @@ func (e *endpoint) processIPv6DestinationOptionsExtHdr(extHdr *header.IPv6Destin
return nil
}
func (e *endpoint) processIPv6HopByHopOptionsExtHdr(extHdr *header.IPv6HopByHopOptionsExtHdr, it *header.IPv6PayloadIterator, pkt stack.PacketBufferPtr, dstAddr tcpip.Address, routerAlert **header.IPv6RouterAlertOption, previousHeaderStart uint32, forwarding bool) error {
func (e *endpoint) processIPv6HopByHopOptionsExtHdr(extHdr *header.IPv6HopByHopOptionsExtHdr, it *header.IPv6PayloadIterator, pkt *stack.PacketBuffer, dstAddr tcpip.Address, routerAlert **header.IPv6RouterAlertOption, previousHeaderStart uint32, forwarding bool) error {
stats := e.stats.ip
// As per RFC 8200 section 4.1, the Hop By Hop extension header is
// restricted to appear immediately after an IPv6 fixed header.
@@ -1741,7 +1738,7 @@ func (e *endpoint) processIPv6HopByHopOptionsExtHdr(extHdr *header.IPv6HopByHopO
return nil
}
func (e *endpoint) processFragmentExtHdr(extHdr *header.IPv6FragmentExtHdr, it *header.IPv6PayloadIterator, pkt *stack.PacketBufferPtr, h header.IPv6) error {
func (e *endpoint) processFragmentExtHdr(extHdr *header.IPv6FragmentExtHdr, it *header.IPv6PayloadIterator, pkt **stack.PacketBuffer, h header.IPv6) error {
stats := e.stats.ip
fragmentFieldOffset := it.ParseOffset()
@@ -2054,10 +2051,10 @@ func (e *endpoint) acquireAddressOrCreateTempLocked(localAddr tcpip.Address, all
}
// AcquireOutgoingPrimaryAddress implements stack.AddressableEndpoint.
func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {
func (e *endpoint) AcquireOutgoingPrimaryAddress(remoteAddr, srcHint tcpip.Address, allowExpired bool) stack.AddressEndpoint {
e.mu.RLock()
defer e.mu.RUnlock()
return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, allowExpired)
return e.acquireOutgoingPrimaryAddressRLocked(remoteAddr, srcHint, allowExpired)
}
// getLinkLocalAddressRLocked returns a link-local address from the primary list
@@ -2084,7 +2081,9 @@ func (e *endpoint) getLinkLocalAddressRLocked() tcpip.Address {
// but with locking requirements.
//
// Precondition: e.mu must be read locked.
func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address, allowExpired bool) stack.AddressEndpoint {
func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr, srcHint tcpip.Address, allowExpired bool) stack.AddressEndpoint {
// TODO(b/309216156): Support IPv6 hints.
// addrCandidate is a candidate for Source Address Selection, as per
// RFC 6724 section 5.
type addrCandidate struct {
@@ -2097,7 +2096,7 @@ func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address
}
if remoteAddr.BitLen() == 0 {
return e.mu.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, allowExpired)
return e.mu.addressableEndpointState.AcquireOutgoingPrimaryAddress(remoteAddr, srcHint, allowExpired)
}
// Create a candidate set of available addresses we can potentially use as a
@@ -2196,7 +2195,7 @@ func (e *endpoint) acquireOutgoingPrimaryAddressRLocked(remoteAddr tcpip.Address
// Return the most preferred address that can have its reference count
// incremented.
for _, c := range cs {
if c.addressEndpoint.IncRef() {
if c.addressEndpoint.TryIncRef() {
return c.addressEndpoint
}
}
@@ -2288,9 +2287,6 @@ type protocol struct {
multicastForwardingDisp stack.MulticastForwardingEventDispatcher
}
ids []atomicbitops.Uint32
hashIV uint32
// defaultTTL is the current default TTL for the protocol. Only the
// uint8 portion of it is meaningful.
defaultTTL atomicbitops.Uint32
@@ -2341,7 +2337,7 @@ func (p *protocol) NewEndpoint(nic stack.NetworkInterface, dispatcher stack.Tran
const maxMulticastSolicit = 3
dadOptions := ip.DADOptions{
Clock: p.stack.Clock(),
SecureRNG: p.stack.SecureRNG(),
SecureRNG: p.stack.SecureRNG().Reader,
NonceSize: nonceSize,
ExtendDADTransmits: maxMulticastSolicit,
Protocol: &e.mu.ndp,
@@ -2562,7 +2558,7 @@ func (p *protocol) DisableMulticastForwarding() {
p.multicastRouteTable.RemoveAllInstalledRoutes()
}
func (p *protocol) forwardPendingMulticastPacket(pkt stack.PacketBufferPtr, installedRoute *multicast.InstalledRoute) {
func (p *protocol) forwardPendingMulticastPacket(pkt *stack.PacketBuffer, installedRoute *multicast.InstalledRoute) {
defer pkt.DecRef()
// Attempt to forward the packet using the endpoint that it originally
@@ -2592,7 +2588,7 @@ func (*protocol) Wait() {}
// for releasing the returned View.
//
// Returns true if the IP header was successfully parsed.
func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bool) {
func (p *protocol) parseAndValidate(pkt *stack.PacketBuffer) (*buffer.View, bool) {
transProtoNum, hasTransportHdr, ok := p.Parse(pkt)
if !ok {
return nil, false
@@ -2612,7 +2608,7 @@ func (p *protocol) parseAndValidate(pkt stack.PacketBufferPtr) (*buffer.View, bo
return pkt.NetworkHeader().View(), true
}
func (p *protocol) parseTransport(pkt stack.PacketBufferPtr, transProtoNum tcpip.TransportProtocolNumber) {
func (p *protocol) parseTransport(pkt *stack.PacketBuffer, transProtoNum tcpip.TransportProtocolNumber) {
if transProtoNum == header.ICMPv6ProtocolNumber {
// The transport layer will handle transport layer parsing errors.
_ = parse.ICMPv6(pkt)
@@ -2630,7 +2626,7 @@ func (p *protocol) parseTransport(pkt stack.PacketBufferPtr, transProtoNum tcpip
}
// Parse implements stack.NetworkProtocol.
func (*protocol) Parse(pkt stack.PacketBufferPtr) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) {
func (*protocol) Parse(pkt *stack.PacketBuffer) (proto tcpip.TransportProtocolNumber, hasTransportHdr bool, ok bool) {
proto, _, fragOffset, fragMore, ok := parse.IPv6(pkt)
if !ok {
return 0, false, false
@@ -2652,7 +2648,7 @@ func (p *protocol) allowICMPReply(icmpType header.ICMPv6Type) bool {
}
// SendRejectionError implements stack.RejectIPv6WithHandler.
func (p *protocol) SendRejectionError(pkt stack.PacketBufferPtr, rejectWith stack.RejectIPv6WithICMPType, inputHook bool) tcpip.Error {
func (p *protocol) SendRejectionError(pkt *stack.PacketBuffer, rejectWith stack.RejectIPv6WithICMPType, inputHook bool) tcpip.Error {
switch rejectWith {
case stack.RejectIPv6WithICMPNoRoute:
return p.returnError(&icmpReasonNetUnreachable{}, pkt, inputHook)
@@ -2749,21 +2745,10 @@ type Options struct {
func NewProtocolWithOptions(opts Options) stack.NetworkProtocolFactory {
opts.NDPConfigs.validate()
ids := hash.RandN32(buckets)
hashIV := hash.RandN32(1)[0]
atomicIds := make([]atomicbitops.Uint32, len(ids))
for i := range ids {
atomicIds[i] = atomicbitops.FromUint32(ids[i])
}
return func(s *stack.Stack) stack.NetworkProtocol {
p := &protocol{
stack: s,
options: opts,
ids: atomicIds,
hashIV: hashIV,
}
p.fragmentation = fragmentation.NewFragmentation(header.IPv6FragmentExtHdrFragmentOffsetBytesPerUnit, fragmentation.HighFragThreshold, fragmentation.LowFragThreshold, ReassembleTimeout, s.Clock(), p)
p.mu.eps = make(map[tcpip.NICID]*endpoint)
@@ -2796,35 +2781,22 @@ func NewProtocol(s *stack.Stack) stack.NetworkProtocol {
return NewProtocolWithOptions(Options{})(s)
}
func calculateFragmentReserve(pkt stack.PacketBufferPtr) int {
func calculateFragmentReserve(pkt *stack.PacketBuffer) int {
return pkt.AvailableHeaderBytes() + len(pkt.NetworkHeader().Slice()) + header.IPv6FragmentHeaderSize
}
// hashRoute calculates a hash value for the given route. It uses the source &
// destination address and 32-bit number to generate the hash.
func hashRoute(r *stack.Route, hashIV uint32) uint32 {
// The FNV-1a was chosen because it is a fast hashing algorithm, and
// cryptographic properties are not needed here.
h := fnv.New32a()
localAddr := r.LocalAddress()
if _, err := h.Write(localAddr.AsSlice()); err != nil {
panic(fmt.Sprintf("Hash.Write: %s, but Hash' implementation of Write is not expected to ever return an error", err))
// getFragmentID returns a random uint32 number (other than zero) to be used as
// fragment ID in the IPv6 header.
func (e *endpoint) getFragmentID() uint32 {
rng := e.protocol.stack.SecureRNG()
id := rng.Uint32()
for id == 0 {
id = rng.Uint32()
}
remoteAddr := r.RemoteAddress()
if _, err := h.Write(remoteAddr.AsSlice()); err != nil {
panic(fmt.Sprintf("Hash.Write: %s, but Hash' implementation of Write is not expected to ever return an error", err))
}
s := make([]byte, 4)
binary.LittleEndian.PutUint32(s, hashIV)
if _, err := h.Write(s); err != nil {
panic(fmt.Sprintf("Hash.Write: %s, but Hash' implementation of Write is not expected ever to return an error", err))
}
return h.Sum32()
return id
}
func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeaders header.IPv6, transportProto tcpip.TransportProtocolNumber, id uint32) (stack.PacketBufferPtr, bool) {
func buildNextFragment(pf *fragmentation.PacketFragmenter, originalIPHeaders header.IPv6, transportProto tcpip.TransportProtocolNumber, id uint32) (*stack.PacketBuffer, bool) {
fragPkt, offset, copied, more := pf.BuildNextFragment()
fragPkt.NetworkProtocolNumber = ProtocolNumber

View File

@@ -3,6 +3,8 @@
package ipv6
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -21,10 +23,10 @@ func (i *icmpv6DestinationUnreachableSockError) StateSave(stateSinkObject state.
i.beforeSave()
}
func (i *icmpv6DestinationUnreachableSockError) afterLoad() {}
func (i *icmpv6DestinationUnreachableSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv6DestinationUnreachableSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv6DestinationUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
}
func (i *icmpv6DestinationNetworkUnreachableSockError) StateTypeName() string {
@@ -45,10 +47,10 @@ func (i *icmpv6DestinationNetworkUnreachableSockError) StateSave(stateSinkObject
stateSinkObject.Save(0, &i.icmpv6DestinationUnreachableSockError)
}
func (i *icmpv6DestinationNetworkUnreachableSockError) afterLoad() {}
func (i *icmpv6DestinationNetworkUnreachableSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv6DestinationNetworkUnreachableSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv6DestinationNetworkUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.icmpv6DestinationUnreachableSockError)
}
@@ -70,10 +72,10 @@ func (i *icmpv6DestinationPortUnreachableSockError) StateSave(stateSinkObject st
stateSinkObject.Save(0, &i.icmpv6DestinationUnreachableSockError)
}
func (i *icmpv6DestinationPortUnreachableSockError) afterLoad() {}
func (i *icmpv6DestinationPortUnreachableSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv6DestinationPortUnreachableSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv6DestinationPortUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.icmpv6DestinationUnreachableSockError)
}
@@ -95,10 +97,10 @@ func (i *icmpv6DestinationAddressUnreachableSockError) StateSave(stateSinkObject
stateSinkObject.Save(0, &i.icmpv6DestinationUnreachableSockError)
}
func (i *icmpv6DestinationAddressUnreachableSockError) afterLoad() {}
func (i *icmpv6DestinationAddressUnreachableSockError) afterLoad(context.Context) {}
// +checklocksignore
func (i *icmpv6DestinationAddressUnreachableSockError) StateLoad(stateSourceObject state.Source) {
func (i *icmpv6DestinationAddressUnreachableSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &i.icmpv6DestinationUnreachableSockError)
}
@@ -120,10 +122,10 @@ func (e *icmpv6PacketTooBigSockError) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(0, &e.mtu)
}
func (e *icmpv6PacketTooBigSockError) afterLoad() {}
func (e *icmpv6PacketTooBigSockError) afterLoad(context.Context) {}
// +checklocksignore
func (e *icmpv6PacketTooBigSockError) StateLoad(stateSourceObject state.Source) {
func (e *icmpv6PacketTooBigSockError) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &e.mtu)
}

View File

@@ -230,7 +230,7 @@ func (*mldState) V2QueryMaxRespCodeToV1Delay(code uint16) time.Duration {
func (mld *mldState) init(ep *endpoint) {
mld.ep = ep
mld.genericMulticastProtocol.Init(&ep.mu.RWMutex, ip.GenericMulticastProtocolOptions{
Rand: ep.protocol.stack.Rand(),
Rand: ep.protocol.stack.InsecureRNG(),
Clock: ep.protocol.stack.Clock(),
Protocol: mld,
MaxUnsolicitedReportDelay: UnsolicitedReportIntervalMax,
@@ -308,7 +308,7 @@ func (mld *mldState) softLeaveAll() {
mld.genericMulticastProtocol.MakeAllNonMemberLocked()
}
// initializeAll attemps to initialize the MLD state for each group that has
// initializeAll attempts to initialize the MLD state for each group that has
// been joined locally.
//
// Precondition: mld.ep.mu must be locked.

View File

@@ -1140,12 +1140,6 @@ func (ndp *ndpState) doSLAAC(prefix tcpip.Subnet, pl, vl time.Duration) {
//
// The IPv6 endpoint that ndp belongs to MUST be locked.
func (ndp *ndpState) addAndAcquireSLAACAddr(addr tcpip.AddressWithPrefix, temporary bool, lifetimes stack.AddressLifetimes) stack.AddressEndpoint {
// Inform the integrator that we have a new SLAAC address.
ndpDisp := ndp.ep.protocol.options.NDPDisp
if ndpDisp == nil {
return nil
}
addressEndpoint, err := ndp.ep.addAndAcquirePermanentAddressLocked(addr, stack.AddressProperties{
PEB: stack.FirstPrimaryEndpoint,
ConfigType: stack.AddressConfigSlaac,
@@ -1156,8 +1150,11 @@ func (ndp *ndpState) addAndAcquireSLAACAddr(addr tcpip.AddressWithPrefix, tempor
panic(fmt.Sprintf("ndp: error when adding SLAAC address %+v: %s", addr, err))
}
if disp := ndpDisp.OnAutoGenAddress(ndp.ep.nic.ID(), addr); disp != nil {
addressEndpoint.RegisterDispatcher(disp)
// Inform the integrator that we have a new SLAAC address.
if ndpDisp := ndp.ep.protocol.options.NDPDisp; ndpDisp != nil {
if disp := ndpDisp.OnAutoGenAddress(ndp.ep.nic.ID(), addr); disp != nil {
addressEndpoint.RegisterDispatcher(disp)
}
}
return addressEndpoint
@@ -1172,7 +1169,7 @@ func (ndp *ndpState) addAndAcquireSLAACAddr(addr tcpip.AddressWithPrefix, tempor
// The IPv6 endpoint that ndp belongs to MUST be locked.
func (ndp *ndpState) generateSLAACAddr(prefix tcpip.Subnet, state *slaacPrefixState) bool {
if addressEndpoint := state.stableAddr.addressEndpoint; addressEndpoint != nil {
panic(fmt.Sprintf("ndp: SLAAC prefix %s already has a permenant address %s", prefix, addressEndpoint.AddressWithPrefix()))
panic(fmt.Sprintf("ndp: SLAAC prefix %s already has a permanent address %s", prefix, addressEndpoint.AddressWithPrefix()))
}
// If we have already reached the maximum address generation attempts for the
@@ -1623,7 +1620,7 @@ func (ndp *ndpState) refreshSLAACPrefixLifetimes(prefix tcpip.Subnet, prefixStat
// have been regenerated, or we need to immediately regenerate an address
// due to an update in preferred lifetime.
//
// If each temporay address has already been regenerated, no new temporary
// If each temporary address has already been regenerated, no new temporary
// address is generated. To ensure continuation of temporary SLAAC addresses,
// we manually try to regenerate an address here.
if regenForAddr.BitLen() != 0 || allAddressesRegenerated {
@@ -1823,7 +1820,7 @@ func (ndp *ndpState) startSolicitingRouters() {
// 4861 section 6.3.7.
var delay time.Duration
if ndp.configs.MaxRtrSolicitationDelay > 0 {
delay = time.Duration(ndp.ep.protocol.stack.Rand().Int63n(int64(ndp.configs.MaxRtrSolicitationDelay)))
delay = time.Duration(ndp.ep.protocol.stack.InsecureRNG().Int63n(int64(ndp.configs.MaxRtrSolicitationDelay)))
}
// Protected by ndp.ep.mu.
@@ -1840,7 +1837,7 @@ func (ndp *ndpState) startSolicitingRouters() {
// the unspecified address if no address is assigned
// to the sending interface.
localAddr := header.IPv6Any
if addressEndpoint := ndp.ep.AcquireOutgoingPrimaryAddress(header.IPv6AllRoutersLinkLocalMulticastAddress, false); addressEndpoint != nil {
if addressEndpoint := ndp.ep.AcquireOutgoingPrimaryAddress(header.IPv6AllRoutersLinkLocalMulticastAddress, tcpip.Address{} /* srcHint */, false); addressEndpoint != nil {
localAddr = addressEndpoint.AddressWithPrefix().Address
addressEndpoint.DecRef()
}
@@ -1968,7 +1965,7 @@ func (ndp *ndpState) init(ep *endpoint, dadOptions ip.DADOptions) {
ndp.slaacPrefixes = make(map[tcpip.Subnet]slaacPrefixState)
header.InitialTempIID(ndp.temporaryIIDHistory[:], ndp.ep.protocol.options.TempIIDSeed, ndp.ep.nic.ID())
ndp.temporaryAddressDesyncFactor = time.Duration(ep.protocol.stack.Rand().Int63n(int64(MaxDesyncFactor)))
ndp.temporaryAddressDesyncFactor = time.Duration(ep.protocol.stack.InsecureRNG().Int63n(int64(MaxDesyncFactor)))
}
func (ndp *ndpState) SendDADMessage(addr tcpip.Address, nonce []byte) tcpip.Error {

View File

@@ -23,7 +23,7 @@ type Flags struct {
// LoadBalanced indicates SO_REUSEPORT.
//
// LoadBalanced takes precidence over MostRecent.
// LoadBalanced takes precedence over MostRecent.
LoadBalanced bool
// TupleOnly represents TCP SO_REUSEADDR.

View File

@@ -18,9 +18,8 @@ package ports
import (
"math"
"math/rand"
"gvisor.dev/gvisor/pkg/atomicbitops"
"gvisor.dev/gvisor/pkg/rand"
"gvisor.dev/gvisor/pkg/sync"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
@@ -228,13 +227,6 @@ type PortManager struct {
ephemeralMu sync.RWMutex
firstEphemeral uint16
numEphemeral uint16
// hint is used to pick ports ephemeral ports in a stable order for
// a given port offset.
//
// hint must be accessed using the portHint/incPortHint helpers.
// TODO(gvisor.dev/issue/940): S/R this field.
hint atomicbitops.Uint32
}
// NewPortManager creates new PortManager.
@@ -255,41 +247,13 @@ type PortTester func(port uint16) (good bool, err tcpip.Error)
// possible ephemeral ports, allowing the caller to decide whether a given port
// is suitable for its needs, and stopping when a port is found or an error
// occurs.
func (pm *PortManager) PickEphemeralPort(rng *rand.Rand, testPort PortTester) (port uint16, err tcpip.Error) {
func (pm *PortManager) PickEphemeralPort(rng rand.RNG, testPort PortTester) (port uint16, err tcpip.Error) {
pm.ephemeralMu.RLock()
firstEphemeral := pm.firstEphemeral
numEphemeral := pm.numEphemeral
pm.ephemeralMu.RUnlock()
offset := uint32(rng.Int31n(int32(numEphemeral)))
return pickEphemeralPort(offset, firstEphemeral, numEphemeral, testPort)
}
// portHint atomically reads and returns the pm.hint value.
func (pm *PortManager) portHint() uint32 {
return pm.hint.Load()
}
// incPortHint atomically increments pm.hint by 1.
func (pm *PortManager) incPortHint() {
pm.hint.Add(1)
}
// PickEphemeralPortStable starts at the specified offset + pm.portHint and
// iterates over all ephemeral ports, allowing the caller to decide whether a
// given port is suitable for its needs and stopping when a port is found or an
// error occurs.
func (pm *PortManager) PickEphemeralPortStable(offset uint32, testPort PortTester) (port uint16, err tcpip.Error) {
pm.ephemeralMu.RLock()
firstEphemeral := pm.firstEphemeral
numEphemeral := pm.numEphemeral
pm.ephemeralMu.RUnlock()
p, err := pickEphemeralPort(pm.portHint()+offset, firstEphemeral, numEphemeral, testPort)
if err == nil {
pm.incPortHint()
}
return p, err
return pickEphemeralPort(rng.Uint32(), firstEphemeral, numEphemeral, testPort)
}
// pickEphemeralPort starts at the offset specified from the FirstEphemeral port
@@ -297,6 +261,7 @@ func (pm *PortManager) PickEphemeralPortStable(offset uint32, testPort PortTeste
// caller to decide whether a given port is suitable for its needs, and stopping
// when a port is found or an error occurs.
func pickEphemeralPort(offset uint32, first, count uint16, testPort PortTester) (port uint16, err tcpip.Error) {
// This implements Algorithm 1 as per RFC 6056 Section 3.3.1.
for i := uint32(0); i < uint32(count); i++ {
port := uint16(uint32(first) + (offset+i)%uint32(count))
ok, err := testPort(port)
@@ -320,7 +285,7 @@ func pickEphemeralPort(offset uint32, first, count uint16, testPort PortTester)
// An optional PortTester can be passed in which if provided will be used to
// test if the picked port can be used. The function should return true if the
// port is safe to use, false otherwise.
func (pm *PortManager) ReservePort(rng *rand.Rand, res Reservation, testPort PortTester) (reservedPort uint16, err tcpip.Error) {
func (pm *PortManager) ReservePort(rng rand.RNG, res Reservation, testPort PortTester) (reservedPort uint16, err tcpip.Error) {
pm.mu.Lock()
defer pm.mu.Unlock()

View File

@@ -3,6 +3,8 @@
package ports
import (
"context"
"gvisor.dev/gvisor/pkg/state"
)
@@ -28,10 +30,10 @@ func (f *Flags) StateSave(stateSinkObject state.Sink) {
stateSinkObject.Save(2, &f.TupleOnly)
}
func (f *Flags) afterLoad() {}
func (f *Flags) afterLoad(context.Context) {}
// +checklocksignore
func (f *Flags) StateLoad(stateSourceObject state.Source) {
func (f *Flags) StateLoad(ctx context.Context, stateSourceObject state.Source) {
stateSourceObject.Load(0, &f.MostRecent)
stateSourceObject.Load(1, &f.LoadBalanced)
stateSourceObject.Load(2, &f.TupleOnly)

View File

@@ -63,6 +63,10 @@ type SocketOptionsHandler interface {
// changed. The handler notifies the writers if the send buffer size is
// increased with setsockopt(2) for TCP endpoints.
WakeupWriters()
// GetAcceptConn returns true if the socket is a TCP socket and is in
// listening state.
GetAcceptConn() bool
}
// DefaultSocketOptionsHandler is an embeddable type that implements no-op
@@ -112,6 +116,11 @@ func (*DefaultSocketOptionsHandler) OnSetReceiveBufferSize(v, oldSz int64) (newS
return v, nil
}
// GetAcceptConn implements SocketOptionsHandler.GetAcceptConn.
func (*DefaultSocketOptionsHandler) GetAcceptConn() bool {
return false
}
// StackHandler holds methods to access the stack options. These must be
// implemented by the stack.
type StackHandler interface {
@@ -742,3 +751,8 @@ func (so *SocketOptions) SetRcvlowat(rcvlowat int32) Error {
so.rcvlowat.Store(rcvlowat)
return nil
}
// GetAcceptConn gets value for SO_ACCEPTCONN option.
func (so *SocketOptions) GetAcceptConn() bool {
return so.handler.GetAcceptConn()
}

View File

@@ -17,7 +17,7 @@ type addressStateRWMutex struct {
var addressStatelockNames []string
// lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
// refering to an index within lockNames.
// referring to an index within lockNames.
// Values are specified using the "consts" field of go_template_instance.
type addressStatelockNameIndex int

View File

@@ -1,6 +1,7 @@
package stack
import (
"context"
"fmt"
"gvisor.dev/gvisor/pkg/atomicbitops"
@@ -134,7 +135,7 @@ func (r *addressStateRefs) DecRef(destroy func()) {
}
}
func (r *addressStateRefs) afterLoad() {
func (r *addressStateRefs) afterLoad(context.Context) {
if r.ReadRefs() > 0 {
refs.Register(r)
}

View File

@@ -18,6 +18,7 @@ import (
"fmt"
"gvisor.dev/gvisor/pkg/tcpip"
"gvisor.dev/gvisor/pkg/tcpip/header"
)
func (lifetimes *AddressLifetimes) sanitize() {
@@ -433,7 +434,7 @@ func (a *AddressableEndpointState) MainAddress() tcpip.AddressWithPrefix {
a.mu.RLock()
defer a.mu.RUnlock()
ep := a.acquirePrimaryAddressRLocked(func(ep *addressState) bool {
ep := a.acquirePrimaryAddressRLocked(tcpip.Address{}, tcpip.Address{} /* srcHint */, func(ep *addressState) bool {
switch kind := ep.GetKind(); kind {
case Permanent:
return a.networkEndpoint.Enabled() || !a.options.HiddenWhileDisabled
@@ -461,7 +462,34 @@ func (a *AddressableEndpointState) MainAddress() tcpip.AddressWithPrefix {
// valid according to isValid.
//
// +checklocksread:a.mu
func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(isValid func(*addressState) bool) *addressState {
func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(remoteAddr, srcHint tcpip.Address, isValid func(*addressState) bool) *addressState {
// TODO: Move this out into IPv4-specific code.
// IPv6 handles source IP selection elsewhere. We have to do source
// selection only for IPv4, in which case ep is never deprecated. Thus
// we don't have to worry about refcounts.
if remoteAddr.Len() == header.IPv4AddressSize && remoteAddr != (tcpip.Address{}) {
var best *addressState
var bestLen uint8
for _, state := range a.primary {
if !isValid(state) {
continue
}
// Source hint takes precedent over prefix matching.
if state.addr.Address == srcHint && srcHint != (tcpip.Address{}) {
best = state
break
}
stateLen := state.addr.Address.MatchingPrefix(remoteAddr)
if best == nil || bestLen < stateLen {
best = state
bestLen = stateLen
}
}
if best != nil && best.TryIncRef() {
return best
}
}
var deprecatedEndpoint *addressState
for _, ep := range a.primary {
if !isValid(ep) {
@@ -469,7 +497,7 @@ func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(isValid func(*ad
}
if !ep.Deprecated() {
if ep.IncRef() {
if ep.TryIncRef() {
// ep is not deprecated, so return it immediately.
//
// If we kept track of a deprecated endpoint, decrement its reference
@@ -486,7 +514,7 @@ func (a *AddressableEndpointState) acquirePrimaryAddressRLocked(isValid func(*ad
return ep
}
} else if deprecatedEndpoint == nil && ep.IncRef() {
} else if deprecatedEndpoint == nil && ep.TryIncRef() {
// We prefer an endpoint that is not deprecated, but we keep track of
// ep in case a doesn't have any non-deprecated endpoints.
//
@@ -518,7 +546,7 @@ func (a *AddressableEndpointState) AcquireAssignedAddressOrMatching(localAddr tc
return nil
}
if !addrState.IncRef() {
if !addrState.TryIncRef() {
panic(fmt.Sprintf("failed to increase the reference count for address = %s", addrState.addr))
}
@@ -527,7 +555,7 @@ func (a *AddressableEndpointState) AcquireAssignedAddressOrMatching(localAddr tc
if f != nil {
for _, addrState := range a.endpoints {
if addrState.IsAssigned(allowTemp) && f(addrState) && addrState.IncRef() {
if addrState.IsAssigned(allowTemp) && f(addrState) && addrState.TryIncRef() {
return addrState
}
}
@@ -595,11 +623,11 @@ func (a *AddressableEndpointState) AcquireAssignedAddress(localAddr tcpip.Addres
}
// AcquireOutgoingPrimaryAddress implements AddressableEndpoint.
func (a *AddressableEndpointState) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, allowExpired bool) AddressEndpoint {
func (a *AddressableEndpointState) AcquireOutgoingPrimaryAddress(remoteAddr tcpip.Address, srcHint tcpip.Address, allowExpired bool) AddressEndpoint {
a.mu.Lock()
defer a.mu.Unlock()
ep := a.acquirePrimaryAddressRLocked(func(ep *addressState) bool {
ep := a.acquirePrimaryAddressRLocked(remoteAddr, srcHint, func(ep *addressState) bool {
return ep.IsAssigned(allowExpired)
})
@@ -782,7 +810,7 @@ func (a *addressState) IsAssigned(allowExpired bool) bool {
}
// IncRef implements AddressEndpoint.
func (a *addressState) IncRef() bool {
func (a *addressState) TryIncRef() bool {
return a.refs.TryIncRef()
}

View File

@@ -17,7 +17,7 @@ type addressableEndpointStateRWMutex struct {
var addressableEndpointStatelockNames []string
// lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
// refering to an index within lockNames.
// referring to an index within lockNames.
// Values are specified using the "consts" field of go_template_instance.
type addressableEndpointStatelockNameIndex int

View File

@@ -17,7 +17,7 @@ type bucketRWMutex struct {
var bucketlockNames []string
// lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
// refering to an index within lockNames.
// referring to an index within lockNames.
// Values are specified using the "consts" field of go_template_instance.
type bucketlockNameIndex int

View File

@@ -19,7 +19,7 @@ var cleanupEndpointsprefixIndex *locking.MutexClass
var cleanupEndpointslockNames []string
// lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
// refering to an index within lockNames.
// referring to an index within lockNames.
// Values are specified using the "consts" field of go_template_instance.
type cleanupEndpointslockNameIndex int

View File

@@ -17,7 +17,7 @@ type connRWMutex struct {
var connlockNames []string
// lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
// refering to an index within lockNames.
// referring to an index within lockNames.
// Values are specified using the "consts" field of go_template_instance.
type connlockNameIndex int

View File

@@ -17,7 +17,7 @@ type connTrackRWMutex struct {
var connTracklockNames []string
// lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
// refering to an index within lockNames.
// referring to an index within lockNames.
// Values are specified using the "consts" field of go_template_instance.
type connTracklockNameIndex int

View File

@@ -177,7 +177,7 @@ func (cn *conn) timedOut(now tcpip.MonotonicTime) bool {
}
// update the connection tracking state.
func (cn *conn) update(pkt PacketBufferPtr, reply bool) {
func (cn *conn) update(pkt *PacketBuffer, reply bool) {
cn.stateMu.Lock()
defer cn.stateMu.Unlock()
@@ -269,7 +269,7 @@ func v6NetAndTransHdr(icmpPayload []byte, minTransHdrLen int) (header.Network, [
return netHdr, transHdr[:minTransHdrLen]
}
func getEmbeddedNetAndTransHeaders(pkt PacketBufferPtr, netHdrLength int, getNetAndTransHdr netAndTransHeadersFunc, transProto tcpip.TransportProtocolNumber) (header.Network, header.ChecksummableTransport, bool) {
func getEmbeddedNetAndTransHeaders(pkt *PacketBuffer, netHdrLength int, getNetAndTransHdr netAndTransHeadersFunc, transProto tcpip.TransportProtocolNumber) (header.Network, header.ChecksummableTransport, bool) {
switch transProto {
case header.TCPProtocolNumber:
if netAndTransHeader, ok := pkt.Data().PullUp(netHdrLength + header.TCPMinimumSize); ok {
@@ -285,7 +285,7 @@ func getEmbeddedNetAndTransHeaders(pkt PacketBufferPtr, netHdrLength int, getNet
return nil, nil, false
}
func getHeaders(pkt PacketBufferPtr) (netHdr header.Network, transHdr header.Transport, isICMPError bool, ok bool) {
func getHeaders(pkt *PacketBuffer) (netHdr header.Network, transHdr header.Transport, isICMPError bool, ok bool) {
switch pkt.TransportProtocolNumber {
case header.TCPProtocolNumber:
if tcpHeader := header.TCP(pkt.TransportHeader().Slice()); len(tcpHeader) >= header.TCPMinimumSize {
@@ -373,7 +373,7 @@ func getTupleIDForRegularPacket(netHdr header.Network, netProto tcpip.NetworkPro
}
}
func getTupleIDForPacketInICMPError(pkt PacketBufferPtr, getNetAndTransHdr netAndTransHeadersFunc, netProto tcpip.NetworkProtocolNumber, netLen int, transProto tcpip.TransportProtocolNumber) (tupleID, bool) {
func getTupleIDForPacketInICMPError(pkt *PacketBuffer, getNetAndTransHdr netAndTransHeadersFunc, netProto tcpip.NetworkProtocolNumber, netLen int, transProto tcpip.TransportProtocolNumber) (tupleID, bool) {
if netHdr, transHdr, ok := getEmbeddedNetAndTransHeaders(pkt, netLen, getNetAndTransHdr, transProto); ok {
return tupleID{
srcAddr: netHdr.DestinationAddress(),
@@ -396,7 +396,7 @@ const (
getTupleIDOKAndDontAllowNewConn
)
func getTupleIDForEchoPacket(pkt PacketBufferPtr, ident uint16, request bool) tupleID {
func getTupleIDForEchoPacket(pkt *PacketBuffer, ident uint16, request bool) tupleID {
netHdr := pkt.Network()
tid := tupleID{
srcAddr: netHdr.SourceAddress(),
@@ -414,7 +414,7 @@ func getTupleIDForEchoPacket(pkt PacketBufferPtr, ident uint16, request bool) tu
return tid
}
func getTupleID(pkt PacketBufferPtr) (tupleID, getTupleIDDisposition) {
func getTupleID(pkt *PacketBuffer) (tupleID, getTupleIDDisposition) {
switch pkt.TransportProtocolNumber {
case header.TCPProtocolNumber:
if transHeader := header.TCP(pkt.TransportHeader().Slice()); len(transHeader) >= header.TCPMinimumSize {
@@ -504,7 +504,7 @@ func (ct *ConnTrack) init() {
//
// If the packet's protocol is trackable, the connection's state is updated to
// match the contents of the packet.
func (ct *ConnTrack) getConnAndUpdate(pkt PacketBufferPtr, skipChecksumValidation bool) *tuple {
func (ct *ConnTrack) getConnAndUpdate(pkt *PacketBuffer, skipChecksumValidation bool) *tuple {
// Get or (maybe) create a connection.
t := func() *tuple {
var allowNewConn bool
@@ -695,20 +695,41 @@ func (cn *conn) finalize() bool {
}
}
func (cn *conn) maybePerformNoopNAT(dnat bool) {
// If NAT has not been configured for this connection, either mark the
// connection as configured for "no-op NAT", in the case of DNAT, or, in the
// case of SNAT, perform source port remapping so that source ports used by
// locally-generated traffic do not conflict with ports occupied by existing NAT
// bindings.
//
// Note that in the typical case this is also a no-op, because `snatAction`
// will do nothing if the original tuple is already unique.
func (cn *conn) maybePerformNoopNAT(pkt *PacketBuffer, hook Hook, r *Route, dnat bool) {
cn.mu.Lock()
defer cn.mu.Unlock()
var manip *manipType
if dnat {
manip = &cn.destinationManip
} else {
manip = &cn.sourceManip
}
if *manip == manipNotPerformed {
*manip = manipPerformedNoop
if *manip != manipNotPerformed {
cn.mu.Unlock()
_ = cn.handlePacket(pkt, hook, r)
return
}
if dnat {
*manip = manipPerformedNoop
cn.mu.Unlock()
_ = cn.handlePacket(pkt, hook, r)
return
}
cn.mu.Unlock()
// At this point, we know that NAT has not yet been performed on this
// connection, and the DNAT case has been handled with a no-op. For SNAT, we
// simply perform source port remapping to ensure that source ports for
// locally generated traffic do not clash with ports used by existing NAT
// bindings.
_, _ = snatAction(pkt, hook, r, 0, tcpip.Address{}, true /* changePort */, false /* changeAddress */)
}
type portOrIdentRange struct {
@@ -725,7 +746,7 @@ type portOrIdentRange struct {
//
// Generally, only the first packet of a connection reaches this method; other
// packets will be manipulated without needing to modify the connection.
func (cn *conn) performNAT(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIdents portOrIdentRange, natAddress tcpip.Address, dnat bool) {
func (cn *conn) performNAT(pkt *PacketBuffer, hook Hook, r *Route, portsOrIdents portOrIdentRange, natAddress tcpip.Address, dnat, changePort, changeAddress bool) {
lastPortOrIdent := func() uint16 {
lastPortOrIdent := uint32(portsOrIdents.start) + portsOrIdents.size - 1
if lastPortOrIdent > math.MaxUint16 {
@@ -762,12 +783,24 @@ func (cn *conn) performNAT(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIden
return
}
*manip = manipPerformed
*address = natAddress
if changeAddress {
*address = natAddress
}
// Everything below here is port-fiddling.
if !changePort {
return
}
// Does the current port/ident fit in the range?
if portsOrIdents.start <= *portOrIdent && *portOrIdent <= lastPortOrIdent {
// Yes, is the current reply tuple unique?
if other := cn.ct.connForTID(cn.reply.tupleID); other == nil {
//
// Or, does the reply tuple refer to the same connection as the current one that
// we are NATing? This would apply, for example, to a self-connected socket,
// where the original and reply tuples are identical.
other := cn.ct.connForTID(cn.reply.tupleID)
if other == nil || other.conn == cn {
// Yes! No need to change the port.
return
}
@@ -826,7 +859,7 @@ func (cn *conn) performNAT(pkt PacketBufferPtr, hook Hook, r *Route, portsOrIden
// has had NAT performed on it.
//
// Returns true if the packet can skip the NAT table.
func (cn *conn) handlePacket(pkt PacketBufferPtr, hook Hook, rt *Route) bool {
func (cn *conn) handlePacket(pkt *PacketBuffer, hook Hook, rt *Route) bool {
netHdr, transHdr, isICMPError, ok := getHeaders(pkt)
if !ok {
return false

View File

@@ -17,7 +17,7 @@ type endpointsByNICRWMutex struct {
var endpointsByNIClockNames []string
// lockNameIndex is used as an index passed to NestedLock and NestedUnlock,
// refering to an index within lockNames.
// referring to an index within lockNames.
// Values are specified using the "consts" field of go_template_instance.
type endpointsByNIClockNameIndex int

Some files were not shown because too many files have changed in this diff Show More