ip报文的分片机制 解析中 学得胃疼。。。

This commit is contained in:
impact-eintr
2022-11-27 22:44:41 +08:00
parent 5e317b273a
commit 553a16f655
9 changed files with 681 additions and 5 deletions

View File

@@ -0,0 +1,77 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fragmentation
import (
"container/heap"
"fmt"
"netstack/tcpip/buffer"
)
type fragment struct {
offset uint16
vv buffer.VectorisedView
}
type fragHeap []fragment
func (h *fragHeap) Len() int {
return len(*h)
}
func (h *fragHeap) Less(i, j int) bool {
return (*h)[i].offset < (*h)[j].offset
}
func (h *fragHeap) Swap(i, j int) {
(*h)[i], (*h)[j] = (*h)[j], (*h)[i]
}
func (h *fragHeap) Push(x interface{}) {
*h = append(*h, x.(fragment))
}
func (h *fragHeap) Pop() interface{} {
old := *h
n := len(old)
x := old[n-1]
*h = old[:n-1]
return x
}
// reassamble empties the heap and returns a VectorisedView
// containing a reassambled version of the fragments inside the heap.
func (h *fragHeap) reassemble() (buffer.VectorisedView, error) {
curr := heap.Pop(h).(fragment)
views := curr.vv.Views()
size := curr.vv.Size()
if curr.offset != 0 {
return buffer.VectorisedView{}, fmt.Errorf("offset of the first packet is != 0 (%d)", curr.offset)
}
for h.Len() > 0 {
curr := heap.Pop(h).(fragment)
if int(curr.offset) < size {
curr.vv.TrimFront(size - int(curr.offset))
} else if int(curr.offset) > size {
return buffer.VectorisedView{}, fmt.Errorf("packet has a hole, expected offset %d, got %d", size, curr.offset)
}
size += curr.vv.Size()
views = append(views, curr.vv.Views()...)
}
return buffer.NewVectorisedView(size, views), nil
}

View File

@@ -0,0 +1,68 @@
package fragmentation
import (
"log"
"netstack/tcpip/buffer"
"sync"
"time"
)
// DefaultReassembleTimeout is based on the linux stack: net.ipv4.ipfrag_time.
const DefaultReassembleTimeout = 30 * time.Second
// HighFragThreshold is the threshold at which we start trimming old
// fragmented packets. Linux uses a default value of 4 MB. See
// net.ipv4.ipfrag_high_thresh for more information.
const HighFragThreshold = 4 << 20 // 4MB
// LowFragThreshold is the threshold we reach to when we start dropping
// older fragmented packets. It's important that we keep enough room for newer
// packets to be re-assembled. Hence, this needs to be lower than
// HighFragThreshold enough. Linux uses a default value of 3 MB. See
// net.ipv4.ipfrag_low_thresh for more information.
const LowFragThreshold = 3 << 20 // 3MB
type Fragmentation struct {
mu sync.Mutex
highLimit int
lowLimit int
reassemblers map[uint32]*reassembler
rList reassemblerList
size int
timeout time.Duration
}
func NewFragmentation(highMemoryLimit, lowMemoryLimit int, reassemblingTimeout time.Duration) *Fragmentation {
if lowMemoryLimit >= highMemoryLimit {
lowMemoryLimit = highMemoryLimit
}
if lowMemoryLimit < 0 {
lowMemoryLimit = 0
}
return &Fragmentation{
reassemblers: make(map[uint32]*reassembler),
highLimit: highMemoryLimit,
lowLimit: lowMemoryLimit,
timeout: reassemblingTimeout,
}
}
func (f *Fragmentation) Process(id uint32, first, last uint16, more bool, vv buffer.VectorisedView) (buffer.VectorisedView, bool) {
log.Println("分片机制工作中", id, first, last, vv.First())
f.mu.Lock()
r, ok := f.reassemblers[id]
if ok && r.tooOld(f.timeout) {
// This is very likely to be an id-collision or someone performing a slow-rate attack.
//f.release(r)
ok = false
}
if !ok {
r = newReassembler(id)
f.reassemblers[id] = r
f.rList.PushFront(r)
}
f.mu.Unlock()
return buffer.VectorisedView{}, false
}

View File

@@ -0,0 +1,139 @@
package fragmentation_test
import (
"netstack/tcpip"
"netstack/tcpip/buffer"
"netstack/tcpip/header"
"netstack/tcpip/link/channel"
"netstack/tcpip/network/arp"
"netstack/tcpip/network/ipv4"
"netstack/tcpip/stack"
"testing"
"time"
)
const (
stackLinkAddr = tcpip.LinkAddress("\x0a\x0a\x0b\x0b\x0c\x0c") // 0a:0a:0b:0b:0c:0c
stackAddr1 = tcpip.Address("\x0a\x00\x00\x01") // 10.0.0.1
stackAddr2 = tcpip.Address("\x0a\x00\x00\x02") // 10.0.0.2
stackAddrBad = tcpip.Address("\x0a\x00\x00\x03") // 10.0.0.3
)
type testContext struct {
t *testing.T
linkEP *channel.Endpoint
s *stack.Stack
}
func newTestContext(t *testing.T) *testContext {
s := stack.New([]string{ipv4.ProtocolName, arp.ProtocolName}, nil, stack.Options{})
const defaultMTU = 65536
id, linkEP := channel.New(256, defaultMTU, stackLinkAddr)
if err := s.CreateNIC(1, id); err != nil {
t.Fatalf("CreateNIC failed: %v", err)
}
if err := s.AddAddress(1, ipv4.ProtocolNumber, stackAddr1); err != nil {
t.Fatalf("AddAddress for ipv4 failed: %v", err)
}
if err := s.AddAddress(1, ipv4.ProtocolNumber, stackAddr2); err != nil {
t.Fatalf("AddAddress for ipv4 failed: %v", err)
}
if err := s.AddAddress(1, arp.ProtocolNumber, arp.ProtocolAddress); err != nil {
t.Fatalf("AddAddress for arp failed: %v", err)
}
s.SetRouteTable([]tcpip.Route{{
Destination: "\x00\x00\x00\x00",
Mask: "\x00\x00\x00\x00",
Gateway: "",
NIC: 1,
}})
return &testContext{
t: t,
s: s,
linkEP: linkEP,
}
}
func (c *testContext) cleanup() {
close(c.linkEP.C)
}
func TestFragmentationBase(t *testing.T) {
c := newTestContext(t)
defer c.cleanup()
const senderMAC = "\x01\x02\x03\x04\x05\x06"
const senderIPv4 = "\x0a\x00\x00\x02"
v := make(buffer.View, header.ARPSize)
h := header.ARP(v)
h.SetIPv4OverEthernet()
h.SetOp(header.ARPRequest) // 一个ARP请求
copy(h.HardwareAddressSender(), senderMAC) // Local MAC
copy(h.ProtocolAddressSender(), senderIPv4) // Local IP
inject := func(addr tcpip.Address) {
copy(h.ProtocolAddressTarget(), addr)
c.linkEP.Inject(arp.ProtocolNumber, v.ToVectorisedView()) // 往链路层注入一个arp报文 链路层将会自动分发它
}
inject(stackAddr1) // target IP 10.0.0.1
select {
case pkt := <-c.linkEP.C:
if pkt.Proto != arp.ProtocolNumber {
t.Fatalf("stackAddr1: expected ARP response, got network protocol number %v", pkt.Proto)
}
rep := header.ARP(pkt.Header)
if !rep.IsValid() {
t.Fatalf("stackAddr1: invalid ARP response len(pkt.Header)=%d", len(pkt.Header))
}
if tcpip.Address(rep.ProtocolAddressSender()) != stackAddr1 {
t.Errorf("stackAddr1: expected sender to be set")
}
if got := tcpip.LinkAddress(rep.HardwareAddressSender()); got != stackLinkAddr {
t.Errorf("stackAddr1: expected sender to be stackLinkAddr, got %q", got)
}
case <-time.After(100 * time.Millisecond):
t.Fatalf("Case #1 Time Out\n")
}
// 一个纯粹的IP报文
v = make(buffer.View, header.IPv4MinimumSize+256)
hdr := buffer.NewPrependable(header.IPv4MinimumSize)
ip := header.IPv4(hdr.Prepend(header.IPv4MinimumSize))
buf := make(buffer.View, 256)
for i := range buf {
buf[i] = 1
}
payload := buffer.NewVectorisedView(256, buf.ToVectorisedView().Views())
length := uint16(hdr.UsedLength() + payload.Size())
// ip首部编码
ip.Encode(&header.IPv4Fields{
IHL: header.IPv4MinimumSize,
TotalLength: length,
ID: uint16(2),
Flags: 0x1,
FragmentOffset: 1024,
TTL: 255,
Protocol: uint8(0x6), // tcp 伪装报文
SrcAddr: senderIPv4,
DstAddr: stackAddr1,
})
//ip.SetFlagsFragmentOffset()
// 计算校验和和设置校验和
ip.SetChecksum(^ip.CalculateChecksum())
copy(v, ip)
copy(v[header.IPv4MinimumSize:], payload.First())
inject = func(addr tcpip.Address) {
copy(h.ProtocolAddressTarget(), addr)
c.linkEP.Inject(ipv4.ProtocolNumber, v.ToVectorisedView()) // 往链路层注入一个arp报文 链路层将会自动分发它
}
inject(stackAddr1)
}

View File

@@ -0,0 +1,118 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package fragmentation
import (
"container/heap"
"fmt"
"math"
"sync"
"time"
"netstack/tcpip/buffer"
)
type hole struct {
first uint16
last uint16
deleted bool
}
type reassembler struct {
reassemblerEntry
id uint32
size int
mu sync.Mutex
holes []hole
deleted int
heap fragHeap
done bool
creationTime time.Time
}
func newReassembler(id uint32) *reassembler {
r := &reassembler{
id: id,
holes: make([]hole, 0, 16),
deleted: 0,
heap: make(fragHeap, 0, 8),
creationTime: time.Now(),
}
r.holes = append(r.holes, hole{
first: 0,
last: math.MaxUint16,
deleted: false})
return r
}
// updateHoles updates the list of holes for an incoming fragment and
// returns true iff the fragment filled at least part of an existing hole.
func (r *reassembler) updateHoles(first, last uint16, more bool) bool {
used := false
for i := range r.holes {
if r.holes[i].deleted || first > r.holes[i].last || last < r.holes[i].first {
continue
}
used = true
r.deleted++
r.holes[i].deleted = true
if first > r.holes[i].first {
r.holes = append(r.holes, hole{r.holes[i].first, first - 1, false})
}
if last < r.holes[i].last && more {
r.holes = append(r.holes, hole{last + 1, r.holes[i].last, false})
}
}
return used
}
func (r *reassembler) process(first, last uint16, more bool, vv buffer.VectorisedView) (buffer.VectorisedView, bool, int) {
r.mu.Lock()
defer r.mu.Unlock()
consumed := 0
if r.done {
// A concurrent goroutine might have already reassembled
// the packet and emptied the heap while this goroutine
// was waiting on the mutex. We don't have to do anything in this case.
return buffer.VectorisedView{}, false, consumed
}
if r.updateHoles(first, last, more) {
// We store the incoming packet only if it filled some holes.
heap.Push(&r.heap, fragment{offset: first, vv: vv.Clone(nil)})
consumed = vv.Size()
r.size += consumed
}
// Check if all the holes have been deleted and we are ready to reassamble.
if r.deleted < len(r.holes) {
return buffer.VectorisedView{}, false, consumed
}
res, err := r.heap.reassemble()
if err != nil {
panic(fmt.Sprintf("reassemble failed with: %v. There is probably a bug in the code handling the holes.", err))
}
return res, true, consumed
}
func (r *reassembler) tooOld(timeout time.Duration) bool {
return time.Now().Sub(r.creationTime) > timeout
}
func (r *reassembler) checkDoneOrMark() bool {
r.mu.Lock()
prev := r.done
r.done = true
r.mu.Unlock()
return prev
}

View File

@@ -0,0 +1,173 @@
package fragmentation
// ElementMapper provides an identity mapping by default.
//
// This can be replaced to provide a struct that maps elements to linker
// objects, if they are not the same. An ElementMapper is not typically
// required if: Linker is left as is, Element is left as is, or Linker and
// Element are the same type.
type reassemblerElementMapper struct{}
// linkerFor maps an Element to a Linker.
//
// This default implementation should be inlined.
//
//go:nosplit
func (reassemblerElementMapper) linkerFor(elem *reassembler) *reassembler { return elem }
// List is an intrusive list. Entries can be added to or removed from the list
// in O(1) time and with no additional memory allocations.
//
// The zero value for List is an empty list ready to use.
//
// To iterate over a list (where l is a List):
// for e := l.Front(); e != nil; e = e.Next() {
// // do something with e.
// }
//
// +stateify savable
type reassemblerList struct {
head *reassembler
tail *reassembler
}
// Reset resets list l to the empty state.
func (l *reassemblerList) Reset() {
l.head = nil
l.tail = nil
}
// Empty returns true iff the list is empty.
func (l *reassemblerList) Empty() bool {
return l.head == nil
}
// Front returns the first element of list l or nil.
func (l *reassemblerList) Front() *reassembler {
return l.head
}
// Back returns the last element of list l or nil.
func (l *reassemblerList) Back() *reassembler {
return l.tail
}
// PushFront inserts the element e at the front of list l.
func (l *reassemblerList) PushFront(e *reassembler) {
reassemblerElementMapper{}.linkerFor(e).SetNext(l.head)
reassemblerElementMapper{}.linkerFor(e).SetPrev(nil)
if l.head != nil {
reassemblerElementMapper{}.linkerFor(l.head).SetPrev(e)
} else {
l.tail = e
}
l.head = e
}
// PushBack inserts the element e at the back of list l.
func (l *reassemblerList) PushBack(e *reassembler) {
reassemblerElementMapper{}.linkerFor(e).SetNext(nil)
reassemblerElementMapper{}.linkerFor(e).SetPrev(l.tail)
if l.tail != nil {
reassemblerElementMapper{}.linkerFor(l.tail).SetNext(e)
} else {
l.head = e
}
l.tail = e
}
// PushBackList inserts list m at the end of list l, emptying m.
func (l *reassemblerList) PushBackList(m *reassemblerList) {
if l.head == nil {
l.head = m.head
l.tail = m.tail
} else if m.head != nil {
reassemblerElementMapper{}.linkerFor(l.tail).SetNext(m.head)
reassemblerElementMapper{}.linkerFor(m.head).SetPrev(l.tail)
l.tail = m.tail
}
m.head = nil
m.tail = nil
}
// InsertAfter inserts e after b.
func (l *reassemblerList) InsertAfter(b, e *reassembler) {
a := reassemblerElementMapper{}.linkerFor(b).Next()
reassemblerElementMapper{}.linkerFor(e).SetNext(a)
reassemblerElementMapper{}.linkerFor(e).SetPrev(b)
reassemblerElementMapper{}.linkerFor(b).SetNext(e)
if a != nil {
reassemblerElementMapper{}.linkerFor(a).SetPrev(e)
} else {
l.tail = e
}
}
// InsertBefore inserts e before a.
func (l *reassemblerList) InsertBefore(a, e *reassembler) {
b := reassemblerElementMapper{}.linkerFor(a).Prev()
reassemblerElementMapper{}.linkerFor(e).SetNext(a)
reassemblerElementMapper{}.linkerFor(e).SetPrev(b)
reassemblerElementMapper{}.linkerFor(a).SetPrev(e)
if b != nil {
reassemblerElementMapper{}.linkerFor(b).SetNext(e)
} else {
l.head = e
}
}
// Remove removes e from l.
func (l *reassemblerList) Remove(e *reassembler) {
prev := reassemblerElementMapper{}.linkerFor(e).Prev()
next := reassemblerElementMapper{}.linkerFor(e).Next()
if prev != nil {
reassemblerElementMapper{}.linkerFor(prev).SetNext(next)
} else {
l.head = next
}
if next != nil {
reassemblerElementMapper{}.linkerFor(next).SetPrev(prev)
} else {
l.tail = prev
}
}
// Entry is a default implementation of Linker. Users can add anonymous fields
// of this type to their structs to make them automatically implement the
// methods needed by List.
//
// +stateify savable
type reassemblerEntry struct {
next *reassembler
prev *reassembler
}
// Next returns the entry that follows e in the list.
func (e *reassemblerEntry) Next() *reassembler {
return e.next
}
// Prev returns the entry that precedes e in the list.
func (e *reassemblerEntry) Prev() *reassembler {
return e.prev
}
// SetNext assigns 'entry' as the entry that follows e in the list.
func (e *reassemblerEntry) SetNext(elem *reassembler) {
e.next = elem
}
// SetPrev assigns 'entry' as the entry that precedes e in the list.
func (e *reassemblerEntry) SetPrev(elem *reassembler) {
e.prev = elem
}