aboutsummaryrefslogtreecommitdiffstats
path: root/vendor/github.com/google/gopacket/reassembly
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/github.com/google/gopacket/reassembly')
-rw-r--r--vendor/github.com/google/gopacket/reassembly/cap2test.go105
-rw-r--r--vendor/github.com/google/gopacket/reassembly/memory.go254
-rw-r--r--vendor/github.com/google/gopacket/reassembly/tcpassembly.go1311
-rw-r--r--vendor/github.com/google/gopacket/reassembly/tcpassembly_test.go1660
-rw-r--r--vendor/github.com/google/gopacket/reassembly/tcpcheck.go246
-rw-r--r--vendor/github.com/google/gopacket/reassembly/tcpcheck_test.go249
6 files changed, 3825 insertions, 0 deletions
diff --git a/vendor/github.com/google/gopacket/reassembly/cap2test.go b/vendor/github.com/google/gopacket/reassembly/cap2test.go
new file mode 100644
index 0000000..1d43f1d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/reassembly/cap2test.go
@@ -0,0 +1,105 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "log"
+ "os"
+ "strings"
+
+ "github.com/google/gopacket"
+ "github.com/google/gopacket/layers"
+ "github.com/google/gopacket/pcap"
+)
+
+var input = flag.String("i", "", "Input filename")
+
+func main() {
+ var handler *pcap.Handle
+ var err error
+ flag.Parse()
+ if *input == "" {
+ log.Fatalf("Please specify input filename")
+ }
+ if handler, err = pcap.OpenOffline(*input); err != nil {
+ log.Fatalf("Failed to open: %s: %s", *input, err)
+ }
+ args := flag.Args()
+ if len(args) > 0 {
+ filter := strings.Join(args, " ")
+ if err := handler.SetBPFFilter(filter); err != nil {
+ log.Fatalf("Failed to set BPF filter \"%s\": %s", filter, err)
+ }
+ handler.Stats()
+ }
+ var decoder gopacket.Decoder
+ var ok bool
+ linkType := fmt.Sprintf("%s", handler.LinkType())
+ if decoder, ok = gopacket.DecodersByLayerName[linkType]; !ok {
+ log.Fatalf("Failed to find decoder to pcap's linktype %s", linkType)
+ }
+ source := gopacket.NewPacketSource(handler, decoder)
+ count := uint64(0)
+ pktNonTcp := uint64(0)
+ pktTcp := uint64(0)
+ fmt.Println("test([]testSequence{")
+ for packet := range source.Packets() {
+ count++
+ tcp := packet.Layer(layers.LayerTypeTCP)
+ if tcp == nil {
+ pktNonTcp++
+ continue
+ } else {
+ pktTcp++
+ tcp := tcp.(*layers.TCP)
+ //fmt.Printf("packet: %s\n", tcp)
+ var b bytes.Buffer
+ b.WriteString("{\n")
+ // TCP
+ b.WriteString("tcp: layers.TCP{\n")
+ if tcp.SYN {
+ b.WriteString(" SYN: true,\n")
+ }
+ if tcp.ACK {
+ b.WriteString(" ACK: true,\n")
+ }
+ if tcp.RST {
+ b.WriteString(" RST: true,\n")
+ }
+ if tcp.FIN {
+ b.WriteString(" FIN: true,\n")
+ }
+ b.WriteString(fmt.Sprintf(" SrcPort: %d,\n", tcp.SrcPort))
+ b.WriteString(fmt.Sprintf(" DstPort: %d,\n", tcp.DstPort))
+ b.WriteString(fmt.Sprintf(" Seq: %d,\n", tcp.Seq))
+ b.WriteString(fmt.Sprintf(" Ack: %d,\n", tcp.Ack))
+ b.WriteString(" BaseLayer: layers.BaseLayer{Payload: []byte{")
+ for _, p := range tcp.Payload {
+ b.WriteString(fmt.Sprintf("%d,", p))
+ }
+ b.WriteString("}},\n")
+ b.WriteString("},\n")
+ // CaptureInfo
+ b.WriteString("ci: gopacket.CaptureInfo{\n")
+ ts := packet.Metadata().CaptureInfo.Timestamp
+ b.WriteString(fmt.Sprintf(" Timestamp: time.Unix(%d,%d),\n", ts.Unix(), ts.Nanosecond()))
+ b.WriteString("},\n")
+ // Struct
+ b.WriteString("},\n")
+ fmt.Print(b.String())
+ }
+
+ }
+ fmt.Println("})")
+
+ fmt.Fprintf(os.Stderr, "Total: %d, TCP: %d, non-TCP: %d\n", count, pktTcp, pktNonTcp)
+}
diff --git a/vendor/github.com/google/gopacket/reassembly/memory.go b/vendor/github.com/google/gopacket/reassembly/memory.go
new file mode 100644
index 0000000..c1b2ae7
--- /dev/null
+++ b/vendor/github.com/google/gopacket/reassembly/memory.go
@@ -0,0 +1,254 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package reassembly
+
+import (
+ "flag"
+ "log"
+ "sync"
+ "time"
+
+ "github.com/google/gopacket/layers"
+)
+
+var memLog = flag.Bool("assembly_memuse_log", defaultDebug, "If true, the github.com/google/gopacket/reassembly library will log information regarding its memory use every once in a while.")
+
+/*
+ * pageCache
+ */
+// pageCache is a concurrency-unsafe store of page objects we use to avoid
+// memory allocation as much as we can.
+type pageCache struct {
+ free []*page
+ pcSize int
+ size, used int
+ pageRequests int64
+ ops int
+ nextShrink int
+}
+
+const initialAllocSize = 1024
+
+func newPageCache() *pageCache {
+ pc := &pageCache{
+ free: make([]*page, 0, initialAllocSize),
+ pcSize: initialAllocSize,
+ }
+ pc.grow()
+ return pc
+}
+
+// grow exponentially increases the size of our page cache as much as necessary.
+func (c *pageCache) grow() {
+ pages := make([]page, c.pcSize)
+ c.size += c.pcSize
+ for i := range pages {
+ c.free = append(c.free, &pages[i])
+ }
+ if *memLog {
+ log.Println("PageCache: created", c.pcSize, "new pages, size:", c.size, "cap:", cap(c.free), "len:", len(c.free))
+ }
+ // control next shrink attempt
+ c.nextShrink = c.pcSize
+ c.ops = 0
+ // prepare for next alloc
+ c.pcSize *= 2
+}
+
+// Remove references to unused pages to let GC collect them
+// Note: memory used by c.free itself it not collected.
+func (c *pageCache) tryShrink() {
+ var min = c.pcSize / 2
+ if min < initialAllocSize {
+ min = initialAllocSize
+ }
+ if len(c.free) <= min {
+ return
+ }
+ for i := range c.free[min:] {
+ c.free[min+i] = nil
+ }
+ c.size -= len(c.free) - min
+ c.free = c.free[:min]
+ c.pcSize = min
+}
+
+// next returns a clean, ready-to-use page object.
+func (c *pageCache) next(ts time.Time) (p *page) {
+ if *memLog {
+ c.pageRequests++
+ if c.pageRequests&0xFFFF == 0 {
+ log.Println("PageCache:", c.pageRequests, "requested,", c.used, "used,", len(c.free), "free")
+ }
+ }
+ if len(c.free) == 0 {
+ c.grow()
+ }
+ i := len(c.free) - 1
+ p, c.free = c.free[i], c.free[:i]
+ p.seen = ts
+ p.bytes = p.buf[:0]
+ c.used++
+ if *memLog {
+ log.Printf("allocator returns %s\n", p)
+ }
+ c.ops++
+ if c.ops > c.nextShrink {
+ c.ops = 0
+ c.tryShrink()
+ }
+
+ return p
+}
+
+// replace replaces a page into the pageCache.
+func (c *pageCache) replace(p *page) {
+ c.used--
+ if *memLog {
+ log.Printf("replacing %s\n", p)
+ }
+ p.prev = nil
+ p.next = nil
+ c.free = append(c.free, p)
+}
+
+/*
+ * StreamPool
+ */
+
+// StreamPool stores all streams created by Assemblers, allowing multiple
+// assemblers to work together on stream processing while enforcing the fact
+// that a single stream receives its data serially. It is safe
+// for concurrency, usable by multiple Assemblers at once.
+//
+// StreamPool handles the creation and storage of Stream objects used by one or
+// more Assembler objects. When a new TCP stream is found by an Assembler, it
+// creates an associated Stream by calling its StreamFactory's New method.
+// Thereafter (until the stream is closed), that Stream object will receive
+// assembled TCP data via Assembler's calls to the stream's Reassembled
+// function.
+//
+// Like the Assembler, StreamPool attempts to minimize allocation. Unlike the
+// Assembler, though, it does have to do some locking to make sure that the
+// connection objects it stores are accessible to multiple Assemblers.
+type StreamPool struct {
+ conns map[key]*connection
+ users int
+ mu sync.RWMutex
+ factory StreamFactory
+ free []*connection
+ all [][]connection
+ nextAlloc int
+ newConnectionCount int64
+}
+
+func (p *StreamPool) grow() {
+ conns := make([]connection, p.nextAlloc)
+ p.all = append(p.all, conns)
+ for i := range conns {
+ p.free = append(p.free, &conns[i])
+ }
+ if *memLog {
+ log.Println("StreamPool: created", p.nextAlloc, "new connections")
+ }
+ p.nextAlloc *= 2
+}
+
+// Dump logs all connections
+func (p *StreamPool) Dump() {
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ log.Printf("Remaining %d connections: ", len(p.conns))
+ for _, conn := range p.conns {
+ log.Printf("%v %s", conn.key, conn)
+ }
+}
+
+func (p *StreamPool) remove(conn *connection) {
+ p.mu.Lock()
+ if _, ok := p.conns[conn.key]; ok {
+ delete(p.conns, conn.key)
+ p.free = append(p.free, conn)
+ }
+ p.mu.Unlock()
+}
+
+// NewStreamPool creates a new connection pool. Streams will
+// be created as necessary using the passed-in StreamFactory.
+func NewStreamPool(factory StreamFactory) *StreamPool {
+ return &StreamPool{
+ conns: make(map[key]*connection, initialAllocSize),
+ free: make([]*connection, 0, initialAllocSize),
+ factory: factory,
+ nextAlloc: initialAllocSize,
+ }
+}
+
+func (p *StreamPool) connections() []*connection {
+ p.mu.RLock()
+ conns := make([]*connection, 0, len(p.conns))
+ for _, conn := range p.conns {
+ conns = append(conns, conn)
+ }
+ p.mu.RUnlock()
+ return conns
+}
+
+func (p *StreamPool) newConnection(k key, s Stream, ts time.Time) (c *connection, h *halfconnection, r *halfconnection) {
+ if *memLog {
+ p.newConnectionCount++
+ if p.newConnectionCount&0x7FFF == 0 {
+ log.Println("StreamPool:", p.newConnectionCount, "requests,", len(p.conns), "used,", len(p.free), "free")
+ }
+ }
+ if len(p.free) == 0 {
+ p.grow()
+ }
+ index := len(p.free) - 1
+ c, p.free = p.free[index], p.free[:index]
+ c.reset(k, s, ts)
+ return c, &c.c2s, &c.s2c
+}
+
+func (p *StreamPool) getHalf(k key) (*connection, *halfconnection, *halfconnection) {
+ conn := p.conns[k]
+ if conn != nil {
+ return conn, &conn.c2s, &conn.s2c
+ }
+ rk := k.Reverse()
+ conn = p.conns[rk]
+ if conn != nil {
+ return conn, &conn.s2c, &conn.c2s
+ }
+ return nil, nil, nil
+}
+
+// getConnection returns a connection. If end is true and a connection
+// does not already exist, returns nil. This allows us to check for a
+// connection without actually creating one if it doesn't already exist.
+func (p *StreamPool) getConnection(k key, end bool, ts time.Time, tcp *layers.TCP, ac AssemblerContext) (*connection, *halfconnection, *halfconnection) {
+ p.mu.RLock()
+ conn, half, rev := p.getHalf(k)
+ p.mu.RUnlock()
+ if end || conn != nil {
+ return conn, half, rev
+ }
+ s := p.factory.New(k[0], k[1], tcp, ac)
+ p.mu.Lock()
+ defer p.mu.Unlock()
+ conn, half, rev = p.newConnection(k, s, ts)
+ conn2, half2, rev2 := p.getHalf(k)
+ if conn2 != nil {
+ if conn2.key != k {
+ panic("FIXME: other dir added in the meantime...")
+ }
+ // FIXME: delete s ?
+ return conn2, half2, rev2
+ }
+ p.conns[k] = conn
+ return conn, half, rev
+}
diff --git a/vendor/github.com/google/gopacket/reassembly/tcpassembly.go b/vendor/github.com/google/gopacket/reassembly/tcpassembly.go
new file mode 100644
index 0000000..bdf0deb
--- /dev/null
+++ b/vendor/github.com/google/gopacket/reassembly/tcpassembly.go
@@ -0,0 +1,1311 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+// Package reassembly provides TCP stream re-assembly.
+//
+// The reassembly package implements uni-directional TCP reassembly, for use in
+// packet-sniffing applications. The caller reads packets off the wire, then
+// presents them to an Assembler in the form of gopacket layers.TCP packets
+// (github.com/google/gopacket, github.com/google/gopacket/layers).
+//
+// The Assembler uses a user-supplied
+// StreamFactory to create a user-defined Stream interface, then passes packet
+// data in stream order to that object. A concurrency-safe StreamPool keeps
+// track of all current Streams being reassembled, so multiple Assemblers may
+// run at once to assemble packets while taking advantage of multiple cores.
+//
+// TODO: Add simplest example
+package reassembly
+
+import (
+ "encoding/hex"
+ "flag"
+ "fmt"
+ "log"
+ "sync"
+ "time"
+
+ "github.com/google/gopacket"
+ "github.com/google/gopacket/layers"
+)
+
+// TODO:
+// - push to Stream on Ack
+// - implement chunked (cheap) reads and Reader() interface
+// - better organize file: split files: 'mem', 'misc' (seq + flow)
+
+var defaultDebug = false
+
+var debugLog = flag.Bool("assembly_debug_log", defaultDebug, "If true, the github.com/google/gopacket/reassembly library will log verbose debugging information (at least one line per packet)")
+
+const invalidSequence = -1
+const uint32Max = 0xFFFFFFFF
+
+// Sequence is a TCP sequence number. It provides a few convenience functions
+// for handling TCP wrap-around. The sequence should always be in the range
+// [0,0xFFFFFFFF]... its other bits are simply used in wrap-around calculations
+// and should never be set.
+type Sequence int64
+
+// Difference defines an ordering for comparing TCP sequences that's safe for
+// roll-overs. It returns:
+// > 0 : if t comes after s
+// < 0 : if t comes before s
+// 0 : if t == s
+// The number returned is the sequence difference, so 4.Difference(8) will
+// return 4.
+//
+// It handles rollovers by considering any sequence in the first quarter of the
+// uint32 space to be after any sequence in the last quarter of that space, thus
+// wrapping the uint32 space.
+func (s Sequence) Difference(t Sequence) int {
+ if s > uint32Max-uint32Max/4 && t < uint32Max/4 {
+ t += uint32Max
+ } else if t > uint32Max-uint32Max/4 && s < uint32Max/4 {
+ s += uint32Max
+ }
+ return int(t - s)
+}
+
+// Add adds an integer to a sequence and returns the resulting sequence.
+func (s Sequence) Add(t int) Sequence {
+ return (s + Sequence(t)) & uint32Max
+}
+
+// TCPAssemblyStats provides some figures for a ScatterGather
+type TCPAssemblyStats struct {
+ // For this ScatterGather
+ Chunks int
+ Packets int
+ // For the half connection, since last call to ReassembledSG()
+ QueuedBytes int
+ QueuedPackets int
+ OverlapBytes int
+ OverlapPackets int
+}
+
+// ScatterGather is used to pass reassembled data and metadata of reassembled
+// packets to a Stream via ReassembledSG
+type ScatterGather interface {
+ // Returns the length of available bytes and saved bytes
+ Lengths() (int, int)
+ // Returns the bytes up to length (shall be <= available bytes)
+ Fetch(length int) []byte
+ // Tell to keep from offset
+ KeepFrom(offset int)
+ // Return CaptureInfo of packet corresponding to given offset
+ CaptureInfo(offset int) gopacket.CaptureInfo
+ // Return some info about the reassembled chunks
+ Info() (direction TCPFlowDirection, start bool, end bool, skip int)
+ // Return some stats regarding the state of the stream
+ Stats() TCPAssemblyStats
+}
+
+// byteContainer is either a page or a livePacket
+type byteContainer interface {
+ getBytes() []byte
+ length() int
+ convertToPages(*pageCache, int, AssemblerContext) (*page, *page, int)
+ captureInfo() gopacket.CaptureInfo
+ assemblerContext() AssemblerContext
+ release(*pageCache) int
+ isStart() bool
+ isEnd() bool
+ getSeq() Sequence
+ isPacket() bool
+}
+
+// Implements a ScatterGather
+type reassemblyObject struct {
+ all []byteContainer
+ Skip int
+ Direction TCPFlowDirection
+ saved int
+ toKeep int
+ // stats
+ queuedBytes int
+ queuedPackets int
+ overlapBytes int
+ overlapPackets int
+}
+
+func (rl *reassemblyObject) Lengths() (int, int) {
+ l := 0
+ for _, r := range rl.all {
+ l += r.length()
+ }
+ return l, rl.saved
+}
+
+func (rl *reassemblyObject) Fetch(l int) []byte {
+ if l <= rl.all[0].length() {
+ return rl.all[0].getBytes()[:l]
+ }
+ bytes := make([]byte, 0, l)
+ for _, bc := range rl.all {
+ bytes = append(bytes, bc.getBytes()...)
+ }
+ return bytes[:l]
+}
+
+func (rl *reassemblyObject) KeepFrom(offset int) {
+ rl.toKeep = offset
+}
+
+func (rl *reassemblyObject) CaptureInfo(offset int) gopacket.CaptureInfo {
+ current := 0
+ for _, r := range rl.all {
+ if current >= offset {
+ return r.captureInfo()
+ }
+ current += r.length()
+ }
+ // Invalid offset
+ return gopacket.CaptureInfo{}
+}
+
+func (rl *reassemblyObject) Info() (TCPFlowDirection, bool, bool, int) {
+ return rl.Direction, rl.all[0].isStart(), rl.all[len(rl.all)-1].isEnd(), rl.Skip
+}
+
+func (rl *reassemblyObject) Stats() TCPAssemblyStats {
+ packets := int(0)
+ for _, r := range rl.all {
+ if r.isPacket() {
+ packets++
+ }
+ }
+ return TCPAssemblyStats{
+ Chunks: len(rl.all),
+ Packets: packets,
+ QueuedBytes: rl.queuedBytes,
+ QueuedPackets: rl.queuedPackets,
+ OverlapBytes: rl.overlapBytes,
+ OverlapPackets: rl.overlapPackets,
+ }
+}
+
+const pageBytes = 1900
+
+// TCPFlowDirection distinguish the two half-connections directions.
+//
+// TCPDirClientToServer is assigned to half-connection for the first received
+// packet, hence might be wrong if packets are not received in order.
+// It's up to the caller (e.g. in Accept()) to decide if the direction should
+// be interpretted differently.
+type TCPFlowDirection bool
+
+// Value are not really useful
+const (
+ TCPDirClientToServer TCPFlowDirection = false
+ TCPDirServerToClient TCPFlowDirection = true
+)
+
+func (dir TCPFlowDirection) String() string {
+ switch dir {
+ case TCPDirClientToServer:
+ return "client->server"
+ case TCPDirServerToClient:
+ return "server->client"
+ }
+ return ""
+}
+
+// Reverse returns the reversed direction
+func (dir TCPFlowDirection) Reverse() TCPFlowDirection {
+ return !dir
+}
+
+/* page: implements a byteContainer */
+
+// page is used to store TCP data we're not ready for yet (out-of-order
+// packets). Unused pages are stored in and returned from a pageCache, which
+// avoids memory allocation. Used pages are stored in a doubly-linked list in
+// a connection.
+type page struct {
+ bytes []byte
+ seq Sequence
+ prev, next *page
+ buf [pageBytes]byte
+ ac AssemblerContext // only set for the first page of a packet
+ seen time.Time
+ start, end bool
+}
+
+func (p *page) getBytes() []byte {
+ return p.bytes
+}
+func (p *page) captureInfo() gopacket.CaptureInfo {
+ return p.ac.GetCaptureInfo()
+}
+func (p *page) assemblerContext() AssemblerContext {
+ return p.ac
+}
+func (p *page) convertToPages(pc *pageCache, skip int, ac AssemblerContext) (*page, *page, int) {
+ if skip != 0 {
+ p.bytes = p.bytes[skip:]
+ p.seq = p.seq.Add(skip)
+ }
+ p.prev, p.next = nil, nil
+ return p, p, 1
+}
+func (p *page) length() int {
+ return len(p.bytes)
+}
+func (p *page) release(pc *pageCache) int {
+ pc.replace(p)
+ return 1
+}
+func (p *page) isStart() bool {
+ return p.start
+}
+func (p *page) isEnd() bool {
+ return p.end
+}
+func (p *page) getSeq() Sequence {
+ return p.seq
+}
+func (p *page) isPacket() bool {
+ return p.ac != nil
+}
+func (p *page) String() string {
+ return fmt.Sprintf("page@%p{seq: %v, bytes:%d, -> nextSeq:%v} (prev:%p, next:%p)", p, p.seq, len(p.bytes), p.seq+Sequence(len(p.bytes)), p.prev, p.next)
+}
+
+/* livePacket: implements a byteContainer */
+type livePacket struct {
+ bytes []byte
+ start bool
+ end bool
+ ci gopacket.CaptureInfo
+ ac AssemblerContext
+ seq Sequence
+}
+
+func (lp *livePacket) getBytes() []byte {
+ return lp.bytes
+}
+func (lp *livePacket) captureInfo() gopacket.CaptureInfo {
+ return lp.ci
+}
+func (lp *livePacket) assemblerContext() AssemblerContext {
+ return lp.ac
+}
+func (lp *livePacket) length() int {
+ return len(lp.bytes)
+}
+func (lp *livePacket) isStart() bool {
+ return lp.start
+}
+func (lp *livePacket) isEnd() bool {
+ return lp.end
+}
+func (lp *livePacket) getSeq() Sequence {
+ return lp.seq
+}
+func (lp *livePacket) isPacket() bool {
+ return true
+}
+
+// Creates a page (or set of pages) from a TCP packet: returns the first and last
+// page in its doubly-linked list of new pages.
+func (lp *livePacket) convertToPages(pc *pageCache, skip int, ac AssemblerContext) (*page, *page, int) {
+ ts := lp.ci.Timestamp
+ first := pc.next(ts)
+ current := first
+ current.prev = nil
+ first.ac = ac
+ numPages := 1
+ seq, bytes := lp.seq.Add(skip), lp.bytes[skip:]
+ for {
+ length := min(len(bytes), pageBytes)
+ current.bytes = current.buf[:length]
+ copy(current.bytes, bytes)
+ current.seq = seq
+ bytes = bytes[length:]
+ if len(bytes) == 0 {
+ current.end = lp.isEnd()
+ current.next = nil
+ break
+ }
+ seq = seq.Add(length)
+ current.next = pc.next(ts)
+ current.next.prev = current
+ current = current.next
+ current.ac = nil
+ numPages++
+ }
+ return first, current, numPages
+}
+func (lp *livePacket) estimateNumberOfPages() int {
+ return (len(lp.bytes) + pageBytes + 1) / pageBytes
+}
+
+func (lp *livePacket) release(*pageCache) int {
+ return 0
+}
+
+// Stream is implemented by the caller to handle incoming reassembled
+// TCP data. Callers create a StreamFactory, then StreamPool uses
+// it to create a new Stream for every TCP stream.
+//
+// assembly will, in order:
+// 1) Create the stream via StreamFactory.New
+// 2) Call ReassembledSG 0 or more times, passing in reassembled TCP data in order
+// 3) Call ReassemblyComplete one time, after which the stream is dereferenced by assembly.
+type Stream interface {
+ // Tell whether the TCP packet should be accepted, start could be modified to force a start even if no SYN have been seen
+ Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir TCPFlowDirection, ackSeq Sequence, start *bool, ac AssemblerContext) bool
+
+ // ReassembledSG is called zero or more times.
+ // ScatterGather is reused after each Reassembled call,
+ // so it's important to copy anything you need out of it,
+ // especially bytes (or use KeepFrom())
+ ReassembledSG(sg ScatterGather, ac AssemblerContext)
+
+ // ReassemblyComplete is called when assembly decides there is
+ // no more data for this Stream, either because a FIN or RST packet
+ // was seen, or because the stream has timed out without any new
+ // packet data (due to a call to FlushCloseOlderThan).
+ // It should return true if the connection should be removed from the pool
+ // It can return false if it want to see subsequent packets with Accept(), e.g. to
+ // see FIN-ACK, for deeper state-machine analysis.
+ ReassemblyComplete(ac AssemblerContext) bool
+}
+
+// StreamFactory is used by assembly to create a new stream for each
+// new TCP session.
+type StreamFactory interface {
+ // New should return a new stream for the given TCP key.
+ New(netFlow, tcpFlow gopacket.Flow, tcp *layers.TCP, ac AssemblerContext) Stream
+}
+
+type key [2]gopacket.Flow
+
+func (k *key) String() string {
+ return fmt.Sprintf("%s:%s", k[0], k[1])
+}
+
+func (k *key) Reverse() key {
+ return key{
+ k[0].Reverse(),
+ k[1].Reverse(),
+ }
+}
+
+const assemblerReturnValueInitialSize = 16
+
+/* one-way connection, i.e. halfconnection */
+type halfconnection struct {
+ dir TCPFlowDirection
+ pages int // Number of pages used (both in first/last and saved)
+ saved *page // Doubly-linked list of in-order pages (seq < nextSeq) already given to Stream who told us to keep
+ first, last *page // Doubly-linked list of out-of-order pages (seq > nextSeq)
+ nextSeq Sequence // sequence number of in-order received bytes
+ ackSeq Sequence
+ created, lastSeen time.Time
+ stream Stream
+ closed bool
+ // for stats
+ queuedBytes int
+ queuedPackets int
+ overlapBytes int
+ overlapPackets int
+}
+
+func (half *halfconnection) String() string {
+ closed := ""
+ if half.closed {
+ closed = "closed "
+ }
+ return fmt.Sprintf("%screated:%v, last:%v", closed, half.created, half.lastSeen)
+}
+
+// Dump returns a string (crypticly) describing the halfconnction
+func (half *halfconnection) Dump() string {
+ s := fmt.Sprintf("pages: %d\n"+
+ "nextSeq: %d\n"+
+ "ackSeq: %d\n"+
+ "Seen : %s\n"+
+ "dir: %s\n", half.pages, half.nextSeq, half.ackSeq, half.lastSeen, half.dir)
+ nb := 0
+ for p := half.first; p != nil; p = p.next {
+ s += fmt.Sprintf(" Page[%d] %s len: %d\n", nb, p, len(p.bytes))
+ nb++
+ }
+ return s
+}
+
+/* Bi-directionnal connection */
+
+type connection struct {
+ key key // client->server
+ c2s, s2c halfconnection
+ mu sync.Mutex
+}
+
+func (c *connection) reset(k key, s Stream, ts time.Time) {
+ c.key = k
+ base := halfconnection{
+ nextSeq: invalidSequence,
+ ackSeq: invalidSequence,
+ created: ts,
+ lastSeen: ts,
+ stream: s,
+ }
+ c.c2s, c.s2c = base, base
+ c.c2s.dir, c.s2c.dir = TCPDirClientToServer, TCPDirServerToClient
+}
+
+func (c *connection) String() string {
+ return fmt.Sprintf("c2s: %s, s2c: %s", &c.c2s, &c.s2c)
+}
+
+/*
+ * Assembler
+ */
+
+// DefaultAssemblerOptions provides default options for an assembler.
+// These options are used by default when calling NewAssembler, so if
+// modified before a NewAssembler call they'll affect the resulting Assembler.
+//
+// Note that the default options can result in ever-increasing memory usage
+// unless one of the Flush* methods is called on a regular basis.
+var DefaultAssemblerOptions = AssemblerOptions{
+ MaxBufferedPagesPerConnection: 0, // unlimited
+ MaxBufferedPagesTotal: 0, // unlimited
+}
+
+// AssemblerOptions controls the behavior of each assembler. Modify the
+// options of each assembler you create to change their behavior.
+type AssemblerOptions struct {
+ // MaxBufferedPagesTotal is an upper limit on the total number of pages to
+ // buffer while waiting for out-of-order packets. Once this limit is
+ // reached, the assembler will degrade to flushing every connection it
+ // gets a packet for. If <= 0, this is ignored.
+ MaxBufferedPagesTotal int
+ // MaxBufferedPagesPerConnection is an upper limit on the number of pages
+ // buffered for a single connection. Should this limit be reached for a
+ // particular connection, the smallest sequence number will be flushed, along
+ // with any contiguous data. If <= 0, this is ignored.
+ MaxBufferedPagesPerConnection int
+}
+
+// Assembler handles reassembling TCP streams. It is not safe for
+// concurrency... after passing a packet in via the Assemble call, the caller
+// must wait for that call to return before calling Assemble again. Callers can
+// get around this by creating multiple assemblers that share a StreamPool. In
+// that case, each individual stream will still be handled serially (each stream
+// has an individual mutex associated with it), however multiple assemblers can
+// assemble different connections concurrently.
+//
+// The Assembler provides (hopefully) fast TCP stream re-assembly for sniffing
+// applications written in Go. The Assembler uses the following methods to be
+// as fast as possible, to keep packet processing speedy:
+//
+// Avoids Lock Contention
+//
+// Assemblers locks connections, but each connection has an individual lock, and
+// rarely will two Assemblers be looking at the same connection. Assemblers
+// lock the StreamPool when looking up connections, but they use Reader
+// locks initially, and only force a write lock if they need to create a new
+// connection or close one down. These happen much less frequently than
+// individual packet handling.
+//
+// Each assembler runs in its own goroutine, and the only state shared between
+// goroutines is through the StreamPool. Thus all internal Assembler state
+// can be handled without any locking.
+//
+// NOTE: If you can guarantee that packets going to a set of Assemblers will
+// contain information on different connections per Assembler (for example,
+// they're already hashed by PF_RING hashing or some other hashing mechanism),
+// then we recommend you use a seperate StreamPool per Assembler, thus
+// avoiding all lock contention. Only when different Assemblers could receive
+// packets for the same Stream should a StreamPool be shared between them.
+//
+// Avoids Memory Copying
+//
+// In the common case, handling of a single TCP packet should result in zero
+// memory allocations. The Assembler will look up the connection, figure out
+// that the packet has arrived in order, and immediately pass that packet on to
+// the appropriate connection's handling code. Only if a packet arrives out of
+// order is its contents copied and stored in memory for later.
+//
+// Avoids Memory Allocation
+//
+// Assemblers try very hard to not use memory allocation unless absolutely
+// necessary. Packet data for sequential packets is passed directly to streams
+// with no copying or allocation. Packet data for out-of-order packets is
+// copied into reusable pages, and new pages are only allocated rarely when the
+// page cache runs out. Page caches are Assembler-specific, thus not used
+// concurrently and requiring no locking.
+//
+// Internal representations for connection objects are also reused over time.
+// Because of this, the most common memory allocation done by the Assembler is
+// generally what's done by the caller in StreamFactory.New. If no allocation
+// is done there, then very little allocation is done ever, mostly to handle
+// large increases in bandwidth or numbers of connections.
+//
+// TODO: The page caches used by an Assembler will grow to the size necessary
+// to handle a workload, and currently will never shrink. This means that
+// traffic spikes can result in large memory usage which isn't garbage
+// collected when typical traffic levels return.
+type Assembler struct {
+ AssemblerOptions
+ ret []byteContainer
+ pc *pageCache
+ connPool *StreamPool
+ cacheLP livePacket
+ cacheSG reassemblyObject
+ start bool
+}
+
+// NewAssembler creates a new assembler. Pass in the StreamPool
+// to use, may be shared across assemblers.
+//
+// This sets some sane defaults for the assembler options,
+// see DefaultAssemblerOptions for details.
+func NewAssembler(pool *StreamPool) *Assembler {
+ pool.mu.Lock()
+ pool.users++
+ pool.mu.Unlock()
+ return &Assembler{
+ ret: make([]byteContainer, assemblerReturnValueInitialSize),
+ pc: newPageCache(),
+ connPool: pool,
+ AssemblerOptions: DefaultAssemblerOptions,
+ }
+}
+
+// Dump returns a short string describing the page usage of the Assembler
+func (a *Assembler) Dump() string {
+ s := ""
+ s += fmt.Sprintf("pageCache: used: %d, size: %d, free: %d", a.pc.used, a.pc.size, len(a.pc.free))
+ return s
+}
+
+// AssemblerContext provides method to get metadata
+type AssemblerContext interface {
+ GetCaptureInfo() gopacket.CaptureInfo
+}
+
+// Implements AssemblerContext for Assemble()
+type assemblerSimpleContext gopacket.CaptureInfo
+
+func (asc *assemblerSimpleContext) GetCaptureInfo() gopacket.CaptureInfo {
+ return gopacket.CaptureInfo(*asc)
+}
+
+// Assemble calls AssembleWithContext with the current timestamp, useful for
+// packets being read directly off the wire.
+func (a *Assembler) Assemble(netFlow gopacket.Flow, t *layers.TCP) {
+ ctx := assemblerSimpleContext(gopacket.CaptureInfo{Timestamp: time.Now()})
+ a.AssembleWithContext(netFlow, t, &ctx)
+}
+
+type assemblerAction struct {
+ nextSeq Sequence
+ queue bool
+}
+
+// AssembleWithContext reassembles the given TCP packet into its appropriate
+// stream.
+//
+// The timestamp passed in must be the timestamp the packet was seen.
+// For packets read off the wire, time.Now() should be fine. For packets read
+// from PCAP files, CaptureInfo.Timestamp should be passed in. This timestamp
+// will affect which streams are flushed by a call to FlushCloseOlderThan.
+//
+// Each AssembleWithContext call results in, in order:
+//
+// zero or one call to StreamFactory.New, creating a stream
+// zero or one call to ReassembledSG on a single stream
+// zero or one call to ReassemblyComplete on the same stream
+func (a *Assembler) AssembleWithContext(netFlow gopacket.Flow, t *layers.TCP, ac AssemblerContext) {
+ var conn *connection
+ var half *halfconnection
+ var rev *halfconnection
+
+ a.ret = a.ret[:0]
+ key := key{netFlow, t.TransportFlow()}
+ ci := ac.GetCaptureInfo()
+ timestamp := ci.Timestamp
+
+ conn, half, rev = a.connPool.getConnection(key, false, timestamp, t, ac)
+ if conn == nil {
+ if *debugLog {
+ log.Printf("%v got empty packet on otherwise empty connection", key)
+ }
+ return
+ }
+ conn.mu.Lock()
+ defer conn.mu.Unlock()
+ if half.lastSeen.Before(timestamp) {
+ half.lastSeen = timestamp
+ }
+ a.start = half.nextSeq == invalidSequence && t.SYN
+ if !half.stream.Accept(t, ci, half.dir, rev.ackSeq, &a.start, ac) {
+ if *debugLog {
+ log.Printf("Ignoring packet")
+ }
+ return
+ }
+ if half.closed {
+ // this way is closed
+ return
+ }
+
+ seq, ack, bytes := Sequence(t.Seq), Sequence(t.Ack), t.Payload
+ if t.ACK {
+ half.ackSeq = ack
+ }
+ // TODO: push when Ack is seen ??
+ action := assemblerAction{
+ nextSeq: Sequence(invalidSequence),
+ queue: true,
+ }
+ a.dump("AssembleWithContext()", half)
+ if half.nextSeq == invalidSequence {
+ if t.SYN {
+ if *debugLog {
+ log.Printf("%v saw first SYN packet, returning immediately, seq=%v", key, seq)
+ }
+ seq = seq.Add(1)
+ half.nextSeq = seq
+ action.queue = false
+ } else if a.start {
+ if *debugLog {
+ log.Printf("%v start forced", key)
+ }
+ half.nextSeq = seq
+ action.queue = false
+ } else {
+ if *debugLog {
+ log.Printf("%v waiting for start, storing into connection", key)
+ }
+ }
+ } else {
+ diff := half.nextSeq.Difference(seq)
+ if diff > 0 {
+ if *debugLog {
+ log.Printf("%v gap in sequence numbers (%v, %v) diff %v, storing into connection", key, half.nextSeq, seq, diff)
+ }
+ } else {
+ if *debugLog {
+ log.Printf("%v found contiguous data (%v, %v), returning immediately: len:%d", key, seq, half.nextSeq, len(bytes))
+ }
+ action.queue = false
+ }
+ }
+
+ action = a.handleBytes(bytes, seq, half, ci, t.SYN, t.RST || t.FIN, action, ac)
+ if len(a.ret) > 0 {
+ action.nextSeq = a.sendToConnection(conn, half, ac)
+ }
+ if action.nextSeq != invalidSequence {
+ half.nextSeq = action.nextSeq
+ if t.FIN {
+ half.nextSeq = half.nextSeq.Add(1)
+ }
+ }
+ if *debugLog {
+ log.Printf("%v nextSeq:%d", key, half.nextSeq)
+ }
+}
+
+// Overlap strategies:
+// - new packet overlaps with sent packets:
+// 1) discard new overlapping part
+// 2) overwrite old overlapped (TODO)
+// - new packet overlaps existing queued packets:
+// a) consider "age" by timestamp (TODO)
+// b) consider "age" by being present
+// Then
+// 1) discard new overlapping part
+// 2) overwrite queued part
+
+func (a *Assembler) checkOverlap(half *halfconnection, queue bool, ac AssemblerContext) {
+ var next *page
+ cur := half.last
+ bytes := a.cacheLP.bytes
+ start := a.cacheLP.seq
+ end := start.Add(len(bytes))
+
+ a.dump("before checkOverlap", half)
+
+ // [s6 : e6]
+ // [s1:e1][s2:e2] -- [s3:e3] -- [s4:e4][s5:e5]
+ // [s <--ds-- : --de--> e]
+ for cur != nil {
+
+ if *debugLog {
+ log.Printf("cur = %p (%s)\n", cur, cur)
+ }
+
+ // end < cur.start: continue (5)
+ if end.Difference(cur.seq) > 0 {
+ if *debugLog {
+ log.Printf("case 5\n")
+ }
+ next = cur
+ cur = cur.prev
+ continue
+ }
+
+ curEnd := cur.seq.Add(len(cur.bytes))
+ // start > cur.end: stop (1)
+ if start.Difference(curEnd) <= 0 {
+ if *debugLog {
+ log.Printf("case 1\n")
+ }
+ break
+ }
+
+ diffStart := start.Difference(cur.seq)
+ diffEnd := end.Difference(curEnd)
+
+ // end > cur.end && start < cur.start: drop (3)
+ if diffEnd <= 0 && diffStart >= 0 {
+ if *debugLog {
+ log.Printf("case 3\n")
+ }
+ if cur.isPacket() {
+ half.overlapPackets++
+ }
+ half.overlapBytes += len(cur.bytes)
+ // update links
+ if cur.prev != nil {
+ cur.prev.next = cur.next
+ } else {
+ half.first = cur.next
+ }
+ if cur.next != nil {
+ cur.next.prev = cur.prev
+ } else {
+ half.last = cur.prev
+ }
+ tmp := cur.prev
+ half.pages -= cur.release(a.pc)
+ cur = tmp
+ continue
+ }
+
+ // end > cur.end && start < cur.end: drop cur's end (2)
+ if diffEnd < 0 && start.Difference(curEnd) > 0 {
+ if *debugLog {
+ log.Printf("case 2\n")
+ }
+ cur.bytes = cur.bytes[:-start.Difference(cur.seq)]
+ break
+ } else
+
+ // start < cur.start && end > cur.start: drop cur's start (4)
+ if diffStart > 0 && end.Difference(cur.seq) < 0 {
+ if *debugLog {
+ log.Printf("case 4\n")
+ }
+ cur.bytes = cur.bytes[-end.Difference(cur.seq):]
+ cur.seq = cur.seq.Add(-end.Difference(cur.seq))
+ next = cur
+ } else
+
+ // end < cur.end && start > cur.start: replace bytes inside cur (6)
+ if diffEnd > 0 && diffStart < 0 {
+ if *debugLog {
+ log.Printf("case 6\n")
+ }
+ copy(cur.bytes[-diffStart:-diffStart+len(bytes)], bytes)
+ bytes = bytes[:0]
+ } else {
+ if *debugLog {
+ log.Printf("no overlap\n")
+ }
+ next = cur
+ }
+ cur = cur.prev
+ }
+
+ // Split bytes into pages, and insert in queue
+ a.cacheLP.bytes = bytes
+ a.cacheLP.seq = start
+ if len(bytes) > 0 && queue {
+ p, p2, numPages := a.cacheLP.convertToPages(a.pc, 0, ac)
+ half.queuedPackets++
+ half.queuedBytes += len(bytes)
+ half.pages += numPages
+ if cur != nil {
+ if *debugLog {
+ log.Printf("adding %s after %s", p, cur)
+ }
+ cur.next = p
+ p.prev = cur
+ } else {
+ if *debugLog {
+ log.Printf("adding %s as first", p)
+ }
+ half.first = p
+ }
+ if next != nil {
+ if *debugLog {
+ log.Printf("setting %s as next of new %s", next, p2)
+ }
+ p2.next = next
+ next.prev = p2
+ } else {
+ if *debugLog {
+ log.Printf("setting %s as last", p2)
+ }
+ half.last = p2
+ }
+ }
+ a.dump("After checkOverlap", half)
+}
+
+// Warning: this is a low-level dumper, i.e. a.ret or a.cacheSG might
+// be strange, but it could be ok.
+func (a *Assembler) dump(text string, half *halfconnection) {
+ if !*debugLog {
+ return
+ }
+ log.Printf("%s: dump\n", text)
+ if half != nil {
+ p := half.first
+ if p == nil {
+ log.Printf(" * half.first = %p, no chunks queued\n", p)
+ } else {
+ s := 0
+ nb := 0
+ log.Printf(" * half.first = %p, queued chunks:", p)
+ for p != nil {
+ log.Printf("\t%s bytes:%s\n", p, hex.EncodeToString(p.bytes))
+ s += len(p.bytes)
+ nb++
+ p = p.next
+ }
+ log.Printf("\t%d chunks for %d bytes", nb, s)
+ }
+ log.Printf(" * half.last = %p\n", half.last)
+ log.Printf(" * half.saved = %p\n", half.saved)
+ p = half.saved
+ for p != nil {
+ log.Printf("\tseq:%d %s bytes:%s\n", p.getSeq(), p, hex.EncodeToString(p.bytes))
+ p = p.next
+ }
+ }
+ log.Printf(" * a.ret\n")
+ for i, r := range a.ret {
+ log.Printf("\t%d: %s b:%s\n", i, r.captureInfo(), hex.EncodeToString(r.getBytes()))
+ }
+ log.Printf(" * a.cacheSG.all\n")
+ for i, r := range a.cacheSG.all {
+ log.Printf("\t%d: %s b:%s\n", i, r.captureInfo(), hex.EncodeToString(r.getBytes()))
+ }
+}
+
+func (a *Assembler) overlapExisting(half *halfconnection, start, end Sequence, bytes []byte) ([]byte, Sequence) {
+ if half.nextSeq == invalidSequence {
+ // no start yet
+ return bytes, start
+ }
+ diff := start.Difference(half.nextSeq)
+ if diff == 0 {
+ return bytes, start
+ }
+ s := 0
+ e := len(bytes)
+ // TODO: depending on strategy, we might want to shrink half.saved if possible
+ if e != 0 {
+ if *debugLog {
+ log.Printf("Overlap detected: ignoring current packet's first %d bytes", diff)
+ }
+ half.overlapPackets++
+ half.overlapBytes += diff
+ }
+ start = start.Add(diff)
+ s += diff
+ if s >= e {
+ // Completely included in sent
+ s = e
+ }
+ bytes = bytes[s:]
+ e -= diff
+ return bytes, start
+}
+
+// Prepare send or queue
+func (a *Assembler) handleBytes(bytes []byte, seq Sequence, half *halfconnection, ci gopacket.CaptureInfo, start bool, end bool, action assemblerAction, ac AssemblerContext) assemblerAction {
+ a.cacheLP.bytes = bytes
+ a.cacheLP.start = start
+ a.cacheLP.end = end
+ a.cacheLP.seq = seq
+ a.cacheLP.ci = ci
+ a.cacheLP.ac = ac
+
+ if action.queue {
+ a.checkOverlap(half, true, ac)
+ if (a.MaxBufferedPagesPerConnection > 0 && half.pages >= a.MaxBufferedPagesPerConnection) ||
+ (a.MaxBufferedPagesTotal > 0 && a.pc.used >= a.MaxBufferedPagesTotal) {
+ if *debugLog {
+ log.Printf("hit max buffer size: %+v, %v, %v", a.AssemblerOptions, half.pages, a.pc.used)
+ }
+ action.queue = false
+ a.addNextFromConn(half)
+ }
+ a.dump("handleBytes after queue", half)
+ } else {
+ a.cacheLP.bytes, a.cacheLP.seq = a.overlapExisting(half, seq, seq.Add(len(bytes)), a.cacheLP.bytes)
+ a.checkOverlap(half, false, ac)
+ if len(a.cacheLP.bytes) != 0 || end || start {
+ a.ret = append(a.ret, &a.cacheLP)
+ }
+ a.dump("handleBytes after no queue", half)
+ }
+ return action
+}
+
+func (a *Assembler) setStatsToSG(half *halfconnection) {
+ a.cacheSG.queuedBytes = half.queuedBytes
+ half.queuedBytes = 0
+ a.cacheSG.queuedPackets = half.queuedPackets
+ half.queuedPackets = 0
+ a.cacheSG.overlapBytes = half.overlapBytes
+ half.overlapBytes = 0
+ a.cacheSG.overlapPackets = half.overlapPackets
+ half.overlapPackets = 0
+}
+
+// Build the ScatterGather object, i.e. prepend saved bytes and
+// append continuous bytes.
+func (a *Assembler) buildSG(half *halfconnection) (bool, Sequence) {
+ // find if there are skipped bytes
+ skip := -1
+ if half.nextSeq != invalidSequence {
+ skip = half.nextSeq.Difference(a.ret[0].getSeq())
+ }
+ last := a.ret[0].getSeq().Add(a.ret[0].length())
+ // Prepend saved bytes
+ saved := a.addPending(half, a.ret[0].getSeq())
+ // Append continuous bytes
+ nextSeq := a.addContiguous(half, last)
+ a.cacheSG.all = a.ret
+ a.cacheSG.Direction = half.dir
+ a.cacheSG.Skip = skip
+ a.cacheSG.saved = saved
+ a.cacheSG.toKeep = -1
+ a.setStatsToSG(half)
+ a.dump("after buildSG", half)
+ return a.ret[len(a.ret)-1].isEnd(), nextSeq
+}
+
+func (a *Assembler) cleanSG(half *halfconnection, ac AssemblerContext) {
+ cur := 0
+ ndx := 0
+ skip := 0
+
+ a.dump("cleanSG(start)", half)
+
+ var r byteContainer
+ // Find first page to keep
+ if a.cacheSG.toKeep < 0 {
+ ndx = len(a.cacheSG.all)
+ } else {
+ skip = a.cacheSG.toKeep
+ found := false
+ for ndx, r = range a.cacheSG.all {
+ if a.cacheSG.toKeep < cur+r.length() {
+ found = true
+ break
+ }
+ cur += r.length()
+ if skip >= r.length() {
+ skip -= r.length()
+ }
+ }
+ if !found {
+ ndx++
+ }
+ }
+ // Release consumed pages
+ for _, r := range a.cacheSG.all[:ndx] {
+ if r == half.saved {
+ if half.saved.next != nil {
+ half.saved.next.prev = nil
+ }
+ half.saved = half.saved.next
+ } else if r == half.first {
+ if half.first.next != nil {
+ half.first.next.prev = nil
+ }
+ if half.first == half.last {
+ half.first, half.last = nil, nil
+ } else {
+ half.first = half.first.next
+ }
+ }
+ half.pages -= r.release(a.pc)
+ }
+ a.dump("after consumed release", half)
+ // Keep un-consumed pages
+ nbKept := 0
+ half.saved = nil
+ var saved *page
+ for _, r := range a.cacheSG.all[ndx:] {
+ first, last, nb := r.convertToPages(a.pc, skip, ac)
+ if half.saved == nil {
+ half.saved = first
+ } else {
+ saved.next = first
+ first.prev = saved
+ }
+ saved = last
+ nbKept += nb
+ }
+ if *debugLog {
+ log.Printf("Remaining %d chunks in SG\n", nbKept)
+ log.Printf("%s\n", a.Dump())
+ a.dump("after cleanSG()", half)
+ }
+}
+
+// sendToConnection sends the current values in a.ret to the connection, closing
+// the connection if the last thing sent had End set.
+func (a *Assembler) sendToConnection(conn *connection, half *halfconnection, ac AssemblerContext) Sequence {
+ if *debugLog {
+ log.Printf("sendToConnection\n")
+ }
+ end, nextSeq := a.buildSG(half)
+ half.stream.ReassembledSG(&a.cacheSG, ac)
+ a.cleanSG(half, ac)
+ if end {
+ a.closeHalfConnection(conn, half)
+ }
+ if *debugLog {
+ log.Printf("after sendToConnection: nextSeq: %d\n", nextSeq)
+ }
+ return nextSeq
+}
+
+//
+func (a *Assembler) addPending(half *halfconnection, firstSeq Sequence) int {
+ if half.saved == nil {
+ return 0
+ }
+ s := 0
+ ret := []byteContainer{}
+ for p := half.saved; p != nil; p = p.next {
+ if *debugLog {
+ log.Printf("adding pending @%p %s (%s)\n", p, p, hex.EncodeToString(p.bytes))
+ }
+ ret = append(ret, p)
+ s += len(p.bytes)
+ }
+ if half.saved.seq.Add(s) != firstSeq {
+ // non-continuous saved: drop them
+ var next *page
+ for p := half.saved; p != nil; p = next {
+ next = p.next
+ p.release(a.pc)
+ }
+ half.saved = nil
+ ret = []byteContainer{}
+ s = 0
+ }
+
+ a.ret = append(ret, a.ret...)
+ return s
+}
+
+// addContiguous adds contiguous byte-sets to a connection.
+func (a *Assembler) addContiguous(half *halfconnection, lastSeq Sequence) Sequence {
+ page := half.first
+ if page == nil {
+ if *debugLog {
+ log.Printf("addContiguous(%d): no pages\n", lastSeq)
+ }
+ return lastSeq
+ }
+ if lastSeq == invalidSequence {
+ lastSeq = page.seq
+ }
+ for page != nil && lastSeq.Difference(page.seq) == 0 {
+ if *debugLog {
+ log.Printf("addContiguous: lastSeq: %d, first.seq=%d, page.seq=%d\n", half.nextSeq, half.first.seq, page.seq)
+ }
+ lastSeq = lastSeq.Add(len(page.bytes))
+ a.ret = append(a.ret, page)
+ half.first = page.next
+ if half.first == nil {
+ half.last = nil
+ }
+ if page.next != nil {
+ page.next.prev = nil
+ }
+ page = page.next
+ }
+ return lastSeq
+}
+
+// skipFlush skips the first set of bytes we're waiting for and returns the
+// first set of bytes we have. If we have no bytes saved, it closes the
+// connection.
+func (a *Assembler) skipFlush(conn *connection, half *halfconnection) {
+ if *debugLog {
+ log.Printf("skipFlush %v\n", half.nextSeq)
+ }
+ // Well, it's embarassing it there is still something in half.saved
+ // FIXME: change API to give back saved + new/no packets
+ if half.first == nil {
+ a.closeHalfConnection(conn, half)
+ return
+ }
+ a.ret = a.ret[:0]
+ a.addNextFromConn(half)
+ nextSeq := a.sendToConnection(conn, half, a.ret[0].assemblerContext())
+ if nextSeq != invalidSequence {
+ half.nextSeq = nextSeq
+ }
+}
+
+func (a *Assembler) closeHalfConnection(conn *connection, half *halfconnection) {
+ if *debugLog {
+ log.Printf("%v closing", conn)
+ }
+ half.closed = true
+ for p := half.first; p != nil; p = p.next {
+ // FIXME: it should be already empty
+ a.pc.replace(p)
+ half.pages--
+ }
+ if conn.s2c.closed && conn.c2s.closed {
+ if half.stream.ReassemblyComplete(nil) { //FIXME: which context to pass ?
+ a.connPool.remove(conn)
+ }
+ }
+}
+
+// addNextFromConn pops the first page from a connection off and adds it to the
+// return array.
+func (a *Assembler) addNextFromConn(conn *halfconnection) {
+ if conn.first == nil {
+ return
+ }
+ if *debugLog {
+ log.Printf(" adding from conn (%v, %v) %v (%d)\n", conn.first.seq, conn.nextSeq, conn.nextSeq-conn.first.seq, len(conn.first.bytes))
+ }
+ a.ret = append(a.ret, conn.first)
+ conn.first = conn.first.next
+ if conn.first != nil {
+ conn.first.prev = nil
+ } else {
+ conn.last = nil
+ }
+}
+
+// FlushOptions provide options for flushing connections.
+type FlushOptions struct {
+ T time.Time // If nonzero, only connections with data older than T are flushed
+ TC time.Time // If nonzero, only connections with data older than TC are closed (if no FIN/RST received)
+}
+
+// FlushWithOptions finds any streams waiting for packets older than
+// the given time T, and pushes through the data they have (IE: tells
+// them to stop waiting and skip the data they're waiting for).
+//
+// It also closes streams older than TC (that can be set to zero, to keep
+// long-lived stream alive, but to flush data anyway).
+//
+// Each Stream maintains a list of zero or more sets of bytes it has received
+// out-of-order. For example, if it has processed up through sequence number
+// 10, it might have bytes [15-20), [20-25), [30,50) in its list. Each set of
+// bytes also has the timestamp it was originally viewed. A flush call will
+// look at the smallest subsequent set of bytes, in this case [15-20), and if
+// its timestamp is older than the passed-in time, it will push it and all
+// contiguous byte-sets out to the Stream's Reassembled function. In this case,
+// it will push [15-20), but also [20-25), since that's contiguous. It will
+// only push [30-50) if its timestamp is also older than the passed-in time,
+// otherwise it will wait until the next FlushCloseOlderThan to see if bytes
+// [25-30) come in.
+//
+// Returns the number of connections flushed, and of those, the number closed
+// because of the flush.
+func (a *Assembler) FlushWithOptions(opt FlushOptions) (flushed, closed int) {
+ conns := a.connPool.connections()
+ closes := 0
+ flushes := 0
+ for _, conn := range conns {
+ remove := false
+ conn.mu.Lock()
+ for _, half := range []*halfconnection{&conn.s2c, &conn.c2s} {
+ flushed, closed := a.flushClose(conn, half, opt.T, opt.TC)
+ if flushed {
+ flushes++
+ }
+ if closed {
+ closes++
+ }
+ }
+ if conn.s2c.closed && conn.c2s.closed && conn.s2c.lastSeen.Before(opt.TC) && conn.c2s.lastSeen.Before(opt.TC) {
+ remove = true
+ }
+ conn.mu.Unlock()
+ if remove {
+ a.connPool.remove(conn)
+ }
+ }
+ return flushes, closes
+}
+
+// FlushCloseOlderThan flushes and closes streams older than given time
+func (a *Assembler) FlushCloseOlderThan(t time.Time) (flushed, closed int) {
+ return a.FlushWithOptions(FlushOptions{T: t, TC: t})
+}
+
+func (a *Assembler) flushClose(conn *connection, half *halfconnection, t time.Time, tc time.Time) (bool, bool) {
+ flushed, closed := false, false
+ if half.closed {
+ return flushed, closed
+ }
+ for half.first != nil && half.first.seen.Before(t) {
+ flushed = true
+ a.skipFlush(conn, half)
+ if half.closed {
+ closed = true
+ }
+ }
+ if !half.closed && half.first == nil && half.lastSeen.Before(tc) {
+ a.closeHalfConnection(conn, half)
+ closed = true
+ }
+ return flushed, closed
+}
+
+// FlushAll flushes all remaining data into all remaining connections and closes
+// those connections. It returns the total number of connections flushed/closed
+// by the call.
+func (a *Assembler) FlushAll() (closed int) {
+ conns := a.connPool.connections()
+ closed = len(conns)
+ for _, conn := range conns {
+ conn.mu.Lock()
+ for _, half := range []*halfconnection{&conn.s2c, &conn.c2s} {
+ for !half.closed {
+ a.skipFlush(conn, half)
+ }
+ if !half.closed {
+ a.closeHalfConnection(conn, half)
+ }
+ }
+ conn.mu.Unlock()
+ }
+ return
+}
+
+func min(a, b int) int {
+ if a < b {
+ return a
+ }
+ return b
+}
diff --git a/vendor/github.com/google/gopacket/reassembly/tcpassembly_test.go b/vendor/github.com/google/gopacket/reassembly/tcpassembly_test.go
new file mode 100644
index 0000000..b29cf2f
--- /dev/null
+++ b/vendor/github.com/google/gopacket/reassembly/tcpassembly_test.go
@@ -0,0 +1,1660 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package reassembly
+
+import (
+ "encoding/hex"
+ "fmt"
+ "net"
+ "reflect"
+ "runtime"
+ "testing"
+ "time"
+
+ "github.com/google/gopacket"
+ "github.com/google/gopacket/layers"
+)
+
+var netFlow gopacket.Flow
+
+var testDebug = false
+
+func init() {
+ netFlow, _ = gopacket.FlowFromEndpoints(
+ layers.NewIPEndpoint(net.IP{1, 2, 3, 4}),
+ layers.NewIPEndpoint(net.IP{5, 6, 7, 8}))
+}
+
+type Reassembly struct {
+ Bytes []byte
+ Start bool
+ End bool
+ Skip int
+}
+
+type testSequence struct {
+ in layers.TCP
+ want []Reassembly
+}
+
+/* For benchmark: do nothing */
+type testFactoryBench struct {
+}
+
+func (t *testFactoryBench) New(a, b gopacket.Flow, tcp *layers.TCP, ac AssemblerContext) Stream {
+ return t
+}
+func (t *testFactoryBench) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir TCPFlowDirection, seq Sequence, start *bool, ac AssemblerContext) bool {
+ return true
+}
+func (t *testFactoryBench) ReassembledSG(sg ScatterGather, ac AssemblerContext) {
+}
+func (t *testFactoryBench) ReassemblyComplete(ac AssemblerContext) bool {
+ return true
+}
+
+/* For tests: append bytes */
+type testFactory struct {
+ reassembly []Reassembly
+}
+
+func (t *testFactory) New(a, b gopacket.Flow, tcp *layers.TCP, ac AssemblerContext) Stream {
+ return t
+}
+func (t *testFactory) Reassembled(r []Reassembly) {
+ t.reassembly = r
+ for i := 0; i < len(r); i++ {
+ //t.reassembly[i].Seen = time.Time{}
+ }
+}
+func (t *testFactory) ReassembledSG(sg ScatterGather, ac AssemblerContext) {
+ _, start, end, skip := sg.Info()
+ l, _ := sg.Lengths()
+ t.reassembly = append(t.reassembly, Reassembly{
+ Bytes: sg.Fetch(l),
+ Skip: skip,
+ Start: start,
+ End: end,
+ })
+}
+
+func (t *testFactory) ReassemblyComplete(ac AssemblerContext) bool {
+ return true
+}
+
+func (t *testFactory) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir TCPFlowDirection, seq Sequence, start *bool, ac AssemblerContext) bool {
+ return true
+}
+
+/* For memory checks: counts bytes */
+type testMemoryFactory struct {
+ bytes int
+}
+
+func (tf *testMemoryFactory) New(a, b gopacket.Flow, tcp *layers.TCP, ac AssemblerContext) Stream {
+ return tf
+}
+func (tf *testMemoryFactory) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir TCPFlowDirection, seq Sequence, start *bool, ac AssemblerContext) bool {
+ return true
+}
+func (tf *testMemoryFactory) ReassembledSG(sg ScatterGather, ac AssemblerContext) {
+ bytes, _ := sg.Lengths()
+ tf.bytes += bytes
+}
+func (tf *testMemoryFactory) ReassemblyComplete(ac AssemblerContext) bool {
+ return true
+}
+
+/*
+ * Tests
+ */
+
+func test(t *testing.T, s []testSequence) {
+ fact := &testFactory{}
+ p := NewStreamPool(fact)
+ a := NewAssembler(p)
+ a.MaxBufferedPagesPerConnection = 4
+ for i, test := range s {
+ fact.reassembly = []Reassembly{}
+ if testDebug {
+ fmt.Printf("#### test: #%d: sending:%s\n", i, hex.EncodeToString(test.in.BaseLayer.Payload))
+ }
+ a.Assemble(netFlow, &test.in)
+ final := []Reassembly{}
+ if len(test.want) > 0 {
+ final = append(final, Reassembly{})
+ for _, w := range test.want {
+ final[0].Bytes = append(final[0].Bytes, w.Bytes...)
+ if w.End {
+ final[0].End = true
+ }
+ if w.Start {
+ final[0].Start = true
+ }
+ if w.Skip != 0 {
+ final[0].Skip = w.Skip
+ }
+ }
+ }
+ if !reflect.DeepEqual(fact.reassembly, final) {
+ t.Fatalf("test %v:\nwant: %v\n got: %v\n", i, final, fact.reassembly)
+ }
+ if testDebug {
+ fmt.Printf("test %v passing...(%s)\n", i, final)
+ }
+ }
+}
+
+func TestReorder(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1001,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1004,
+ BaseLayer: layers.BaseLayer{Payload: []byte{4, 5, 6}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1010,
+ BaseLayer: layers.BaseLayer{Payload: []byte{10, 11, 12}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{7, 8, 9}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Skip: -1,
+ Bytes: []byte{1, 2, 3},
+ },
+ Reassembly{
+ Bytes: []byte{4, 5, 6},
+ },
+ Reassembly{
+ Bytes: []byte{7, 8, 9},
+ },
+ Reassembly{
+ Bytes: []byte{10, 11, 12},
+ },
+ },
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1016,
+ BaseLayer: layers.BaseLayer{Payload: []byte{2, 2, 3}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1019,
+ BaseLayer: layers.BaseLayer{Payload: []byte{3, 2, 3}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1013,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Bytes: []byte{1, 2, 3},
+ },
+ Reassembly{
+ Bytes: []byte{2, 2, 3},
+ },
+ Reassembly{
+ Bytes: []byte{3, 2, 3},
+ },
+ },
+ },
+ })
+}
+
+func TestMaxPerSkip(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1000,
+ SYN: true,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Start: true,
+ Bytes: []byte{1, 2, 3},
+ },
+ },
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{3, 2, 3}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1010,
+ BaseLayer: layers.BaseLayer{Payload: []byte{4, 2, 3}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1013,
+ BaseLayer: layers.BaseLayer{Payload: []byte{5, 2, 3}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1016,
+ BaseLayer: layers.BaseLayer{Payload: []byte{6, 2, 3}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Skip: 3,
+ Bytes: []byte{3, 2, 3},
+ },
+ Reassembly{
+ Bytes: []byte{4, 2, 3},
+ },
+ Reassembly{
+ Bytes: []byte{5, 2, 3},
+ },
+ Reassembly{
+ Bytes: []byte{6, 2, 3},
+ },
+ },
+ },
+ })
+}
+
+func TestReorderFast(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Start: true,
+ Bytes: []byte{1, 2, 3},
+ },
+ },
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{3, 2, 3}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1004,
+ BaseLayer: layers.BaseLayer{Payload: []byte{2, 2, 3}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Bytes: []byte{2, 2, 3},
+ },
+ Reassembly{
+ Bytes: []byte{3, 2, 3},
+ },
+ },
+ },
+ })
+}
+
+func TestOverlap(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Start: true,
+ Bytes: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0},
+ },
+ },
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{7, 8, 9, 0, 1, 2, 3, 4, 5}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Bytes: []byte{1, 2, 3, 4, 5},
+ },
+ },
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1010,
+ BaseLayer: layers.BaseLayer{Payload: []byte{0, 1, 2, 3, 4, 5, 6, 7}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Bytes: []byte{6, 7},
+ },
+ },
+ },
+ })
+}
+
+func TestBufferedOverlap1(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{7, 8, 9, 0, 1, 2, 3, 4, 5}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1010,
+ BaseLayer: layers.BaseLayer{Payload: []byte{0, 1, 2, 3, 4, 5, 6, 7}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Start: true,
+ Bytes: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0},
+ },
+ Reassembly{
+ Bytes: []byte{1, 2, 3, 4, 5},
+ },
+ Reassembly{
+ Bytes: []byte{6, 7},
+ },
+ },
+ },
+ })
+}
+
+func TestBufferedOverlapCase6(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{7, 8, 9, 0, 1, 2, 3, 4, 5}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1010,
+ BaseLayer: layers.BaseLayer{Payload: []byte{10, 11, 12, 13, 14}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Start: true,
+ Bytes: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0},
+ },
+ Reassembly{
+ Bytes: []byte{11, 12, 13, 14, 5},
+ },
+ },
+ },
+ })
+}
+
+func TestBufferedOverlapExisting(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1000,
+ SYN: true,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Start: true,
+ Bytes: []byte{1, 2, 3, 4, 5, 6, 7},
+ },
+ },
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1005,
+ BaseLayer: layers.BaseLayer{Payload: []byte{5, 6, 7, 8, 9, 10}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Bytes: []byte{8, 9, 10},
+ },
+ },
+ },
+ })
+}
+
+func TestBufferedOverlapReemit(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1000,
+ SYN: true,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Start: true,
+ Bytes: []byte{1, 2, 3, 4, 5, 6, 7},
+ },
+ },
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1003,
+ BaseLayer: layers.BaseLayer{Payload: []byte{3, 4, 5}},
+ },
+ want: []Reassembly{},
+ },
+ })
+}
+
+func TestReorderRetransmission2(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1001,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{2, 2, 3}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{2, 2, 3}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1010,
+ BaseLayer: layers.BaseLayer{Payload: []byte{10, 11}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1004,
+ BaseLayer: layers.BaseLayer{Payload: []byte{6, 6, 6, 2, 2}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Skip: -1,
+ Bytes: []byte{1, 2, 3},
+ },
+ Reassembly{
+ Bytes: []byte{6, 6, 6},
+ },
+ Reassembly{
+ Bytes: []byte{2, 2, 3},
+ },
+ Reassembly{
+ Bytes: []byte{10, 11},
+ },
+ },
+ },
+ })
+}
+
+func TestOverrun1(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 0xFFFFFFFF,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Start: true,
+ Bytes: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0},
+ },
+ },
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 10,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Bytes: []byte{1, 2, 3, 4},
+ },
+ },
+ },
+ })
+}
+
+func TestOverrun2(t *testing.T) {
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 10,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4}},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 0xFFFFFFFF,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Start: true,
+ Bytes: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0},
+ },
+ Reassembly{
+ Bytes: []byte{1, 2, 3, 4},
+ },
+ },
+ },
+ })
+}
+
+func TestCacheLargePacket(t *testing.T) {
+ data := make([]byte, pageBytes*3)
+ test(t, []testSequence{
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1001,
+ BaseLayer: layers.BaseLayer{Payload: data},
+ },
+ want: []Reassembly{},
+ },
+ {
+ in: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1000,
+ SYN: true,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ want: []Reassembly{
+ Reassembly{
+ Start: true,
+ Bytes: []byte{},
+ },
+ Reassembly{
+ Bytes: data[:pageBytes],
+ },
+ Reassembly{
+ Bytes: data[pageBytes : pageBytes*2],
+ },
+ Reassembly{
+ Bytes: data[pageBytes*2 : pageBytes*3],
+ },
+ },
+ },
+ })
+}
+
+/*
+ * Keep
+ */
+type testKeepFactory struct {
+ keep int
+ bytes []byte
+ skipped int
+ t *testing.T
+}
+
+func (tkf *testKeepFactory) New(a, b gopacket.Flow, tcp *layers.TCP, ac AssemblerContext) Stream {
+ return tkf
+}
+func (tkf *testKeepFactory) ReassembledSG(sg ScatterGather, ac AssemblerContext) {
+ l, _ := sg.Lengths()
+ _, _, _, tkf.skipped = sg.Info()
+ tkf.bytes = sg.Fetch(l)
+ sg.KeepFrom(tkf.keep)
+}
+func (tkf *testKeepFactory) ReassemblyComplete(ac AssemblerContext) bool {
+ return true
+}
+
+func (tkf *testKeepFactory) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir TCPFlowDirection, seq Sequence, start *bool, ac AssemblerContext) bool {
+ return true
+}
+
+type testKeepSequence struct {
+ tcp layers.TCP
+ keep int
+ want []byte
+ skipped int
+}
+
+func testKeep(t *testing.T, s []testKeepSequence) {
+ fact := &testKeepFactory{t: t}
+ p := NewStreamPool(fact)
+ a := NewAssembler(p)
+ a.MaxBufferedPagesPerConnection = 4
+ port := layers.TCPPort(0)
+ for i, test := range s {
+ // Fake some values according to ports
+ flow := netFlow
+ dir := TCPDirClientToServer
+ if port == 0 {
+ port = test.tcp.SrcPort
+ }
+ if port != test.tcp.SrcPort {
+ dir = dir.Reverse()
+ flow = flow.Reverse()
+ }
+ test.tcp.SetInternalPortsForTesting()
+ fact.keep = test.keep
+ fact.bytes = []byte{}
+ if testDebug {
+ fmt.Printf("#### testKeep: #%d: sending:%s\n", i, hex.EncodeToString(test.tcp.BaseLayer.Payload))
+ }
+ a.Assemble(flow, &test.tcp)
+ if !reflect.DeepEqual(fact.bytes, test.want) {
+ t.Fatalf("#%d: invalid bytes: got %v, expected %v", i, fact.bytes, test.want)
+ }
+ if fact.skipped != test.skipped {
+ t.Fatalf("#%d: expecting %d skipped bytes, got %d", i, test.skipped, fact.skipped)
+ }
+ if testDebug {
+ fmt.Printf("#### testKeep: #%d: bytes: %s\n", i, hex.EncodeToString(fact.bytes))
+ }
+ }
+}
+
+func TestKeepSimpleOnBoundary(t *testing.T) {
+ testKeep(t, []testKeepSequence{
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ },
+ keep: 0,
+ want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{7, 8, 9, 0, 1, 2, 3, 4, 5}},
+ },
+ want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5},
+ },
+ })
+}
+
+func TestKeepSimpleNotBoundaryLive(t *testing.T) {
+ testKeep(t, []testKeepSequence{
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ },
+ keep: 1,
+ want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{7, 8, 9, 0, 1, 2, 3, 4, 5}},
+ },
+ want: []byte{2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5},
+ },
+ })
+}
+
+func TestKeepSimpleNotBoundaryAlreadyKept(t *testing.T) {
+ testKeep(t, []testKeepSequence{
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0x10}},
+ },
+ keep: 0, // 1→10
+ want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0x10},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{7, 8, 9, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15}},
+ },
+ keep: 11, // 12→15
+ want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1016,
+ BaseLayer: layers.BaseLayer{Payload: []byte{0x16, 0x17, 0x18}},
+ },
+ want: []byte{0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18},
+ },
+ })
+}
+
+func TestKeepLonger(t *testing.T) {
+ testKeep(t, []testKeepSequence{
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}},
+ },
+ keep: 0,
+ want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1007,
+ BaseLayer: layers.BaseLayer{Payload: []byte{7, 8, 9, 10, 11, 12, 13, 14, 15}},
+ },
+ keep: 0,
+ want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1010,
+ BaseLayer: layers.BaseLayer{Payload: []byte{10, 11, 12, 13, 14, 15, 16, 17}},
+ },
+ want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17},
+ },
+ })
+}
+
+func TestKeepWithFlush(t *testing.T) {
+ testKeep(t, []testKeepSequence{
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1}},
+ },
+ keep: 1,
+ want: []byte{1},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1003,
+ BaseLayer: layers.BaseLayer{Payload: []byte{3}},
+ },
+ keep: 0,
+ want: []byte{},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1004,
+ BaseLayer: layers.BaseLayer{Payload: []byte{4}},
+ },
+ keep: 0,
+ want: []byte{},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1006,
+ BaseLayer: layers.BaseLayer{Payload: []byte{6}},
+ },
+ keep: 0,
+ want: []byte{},
+ },
+ // Exceeding 4 pages: flushing first continuous pages
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1008,
+ BaseLayer: layers.BaseLayer{Payload: []byte{8}},
+ },
+ keep: 0,
+ skipped: 1,
+ want: []byte{3, 4},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1010,
+ BaseLayer: layers.BaseLayer{Payload: []byte{10}},
+ },
+ keep: 0,
+ skipped: 1,
+ want: []byte{6},
+ },
+ {
+ tcp: layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 1012,
+ BaseLayer: layers.BaseLayer{Payload: []byte{12}},
+ },
+ keep: 0,
+ skipped: 1,
+ want: []byte{8},
+ },
+ })
+}
+
+/*
+ * FSM tests
+ */
+/* For FSM: bump nb on accepted packet */
+type testFSMFactory struct {
+ nb int
+ fsm TCPSimpleFSM
+}
+
+func (t *testFSMFactory) New(a, b gopacket.Flow, tcp *layers.TCP, ac AssemblerContext) Stream {
+ return t
+}
+func (t *testFSMFactory) ReassembledSG(sg ScatterGather, ac AssemblerContext) {
+}
+func (t *testFSMFactory) ReassemblyComplete(ac AssemblerContext) bool {
+ return false
+}
+
+func (t *testFSMFactory) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir TCPFlowDirection, seq Sequence, start *bool, ac AssemblerContext) bool {
+ ok := t.fsm.CheckState(tcp, dir)
+ if ok {
+ t.nb++
+ }
+ return ok
+}
+
+type testFSMSequence struct {
+ tcp layers.TCP
+ ci gopacket.CaptureInfo
+ nb int
+}
+
+func (seq *testFSMSequence) GetCaptureInfo() gopacket.CaptureInfo {
+ return seq.ci
+}
+
+func testFSM(t *testing.T, s []testFSMSequence) {
+ fact := &testFSMFactory{}
+ p := NewStreamPool(fact)
+ a := NewAssembler(p)
+ //a.MaxBufferedPagesPerConnection = 4
+ fact.nb = 0
+ port := layers.TCPPort(0)
+ for i, test := range s {
+ // Fake some values according to ports
+ flow := netFlow
+ dir := TCPDirClientToServer
+ if port == 0 {
+ port = test.tcp.SrcPort
+ }
+ if port != test.tcp.SrcPort {
+ dir = dir.Reverse()
+ flow = flow.Reverse()
+ }
+ test.tcp.SetInternalPortsForTesting()
+ a.AssembleWithContext(flow, &test.tcp, &test)
+ if fact.nb != test.nb {
+ t.Fatalf("#%d: packet rejected: got %d, expected %d", i, fact.nb, test.nb)
+ }
+ }
+}
+
+func TestFSMnormalFlow(t *testing.T) {
+ testFSM(t, []testFSMSequence{
+ {
+ tcp: layers.TCP{
+ SYN: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511116,
+ Ack: 0,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 566690000),
+ },
+ nb: 1,
+ },
+ {
+ tcp: layers.TCP{
+ SYN: true,
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787765,
+ Ack: 374511117,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590332000),
+ },
+ nb: 2,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590346000),
+ },
+ nb: 3,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{0, 31, 104, 196, 0, 32, 0, 1, 0, 0, 0, 0, 0, 1, 2, 85, 83, 0, 0, 6, 0, 1, 0, 0, 41, 16, 0, 0, 0, 128, 0, 0, 0}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590387000),
+ },
+ nb: 4,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787766,
+ Ack: 374511150,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 613687000),
+ },
+ nb: 5,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787766,
+ Ack: 374511150,
+ BaseLayer: layers.BaseLayer{Payload: []byte{8, 133, 104, 196, 132, 0, 0, 1, 0, 2, 0, 7, 0, 19, 2, 85, 83, 0, 0, 6, 0, 1, 2, 117, 115, 0, 0, 6, 0, 1, 0, 0, 3, 132, 0, 54, 1, 97, 5, 99, 99, 116, 108, 100, 192, 20, 10, 104, 111, 115, 116, 109, 97, 115, 116, 101, 114, 7, 110, 101, 117, 115, 116, 97, 114, 3, 98, 105, 122, 0, 120, 18, 40, 205, 0, 0, 3, 132, 0, 0, 3, 132, 0, 9, 58, 128, 0, 1, 81, 128, 192, 20, 0, 46, 0, 1, 0, 0, 3, 132, 0, 150, 0, 6, 5, 1, 0, 0, 3, 132, 85, 138, 90, 146, 85, 98, 191, 130, 27, 78, 2, 117, 115, 0, 69, 13, 35, 189, 141, 225, 107, 238, 108, 182, 207, 44, 105, 31, 212, 103, 32, 93, 217, 108, 20, 231, 188, 28, 241, 237, 104, 182, 117, 121, 195, 112, 64, 96, 237, 248, 6, 181, 186, 96, 60, 6, 18, 29, 188, 96, 201, 140, 251, 61, 71, 177, 108, 156, 9, 83, 125, 172, 188, 75, 81, 67, 218, 55, 93, 131, 243, 15, 190, 75, 4, 165, 226, 124, 49, 67, 142, 131, 239, 240, 76, 225, 10, 242, 68, 88, 240, 200, 27, 97, 102, 73, 92, 73, 133, 170, 175, 198, 99, 109, 90, 16, 162, 101, 95, 96, 102, 250, 91, 74, 80, 3, 87, 167, 50, 230, 9, 213, 7, 222, 197, 87, 183, 190, 148, 247, 207, 204, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 10, 1, 102, 5, 99, 99, 116, 108, 100, 192, 12, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 97, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 98, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 99, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 101, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 107, 193, 8, 192, 118, 0, 46, 0, 1, 0, 7, 233, 0, 0, 150, 0, 2, 5, 1, 0, 7, 233, 0, 85, 127, 33, 92, 85, 87, 134, 98, 27, 78, 2, 117, 115, 0, 19, 227, 175, 75, 88, 245, 164, 158, 150, 198, 57, 253, 150, 179, 161, 52, 24, 56, 229, 176, 175, 40, 45, 232, 188, 171, 131, 197, 107, 125, 218, 192, 78, 221, 146, 33, 114, 55, 43, 12, 131, 213, 51, 98, 37, 2, 102, 161, 232, 115, 177, 210, 51, 169, 215, 133, 56, 190, 91, 75, 8, 222, 231, 202, 139, 28, 187, 249, 72, 21, 23, 56, 63, 72, 126, 142, 242, 195, 242, 64, 208, 134, 100, 157, 197, 159, 43, 148, 20, 70, 117, 152, 159, 35, 200, 220, 49, 234, 173, 210, 91, 34, 210, 192, 7, 197, 112, 117, 208, 234, 42, 49, 133, 237, 197, 14, 244, 149, 191, 142, 36, 252, 42, 48, 182, 189, 9, 68, 1, 65, 5, 67, 67, 84, 76, 68, 193, 126, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 124, 70, 1, 66, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 125, 70, 194, 26, 0, 28, 0, 1, 0, 0, 28, 32, 0, 16, 32, 1, 5, 3, 209, 174, 255, 255, 255, 255, 255, 255, 255, 255, 255, 126, 1, 67, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 127, 70, 1, 69, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 126, 70, 1, 70, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 209, 173, 58, 70, 194, 108, 0, 28, 0, 1, 0, 0, 28, 32, 0, 16, 32, 1, 5, 0, 54, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 1, 75, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 128, 70, 194, 154, 0, 28, 0, 1, 0, 0, 28, 32, 0, 16, 32, 1, 5, 3, 226, 57, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 194, 2, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 112, 230, 49, 85, 73, 83, 2, 27, 78, 2, 117, 115, 0, 82, 36, 11, 141, 74, 85, 70, 98, 179, 63, 173, 83, 8, 70, 155, 41, 102, 166, 140, 62, 71, 178, 130, 38, 171, 200, 180, 68, 2, 215, 45, 6, 43, 59, 171, 146, 223, 215, 9, 77, 5, 104, 167, 42, 237, 170, 30, 114, 205, 129, 59, 225, 152, 224, 79, 1, 65, 68, 208, 153, 121, 237, 199, 87, 2, 251, 100, 105, 59, 24, 73, 226, 169, 121, 250, 91, 41, 124, 14, 23, 135, 52, 2, 86, 72, 224, 100, 135, 70, 216, 16, 107, 84, 59, 13, 168, 58, 187, 54, 98, 230, 167, 246, 42, 46, 156, 206, 238, 120, 199, 25, 144, 98, 249, 70, 162, 34, 43, 145, 114, 186, 233, 47, 42, 75, 95, 152, 235, 194, 26, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 112, 190, 140, 85, 73, 36, 78, 27, 78, 2, 117, 115, 0, 160, 95, 100, 37, 167, 82, 93, 165, 126, 247, 147, 173, 238, 154, 206, 174, 96, 175, 209, 7, 8, 169, 171, 223, 29, 201, 161, 177, 98, 54, 94, 62, 70, 127, 142, 109, 206, 42, 179, 109, 156, 160, 156, 20, 59, 24, 147, 164, 13, 121, 192, 84, 157, 26, 56, 177, 151, 210, 7, 197, 229, 110, 60, 58, 224, 42, 77, 5, 59, 80, 216, 221, 248, 19, 66, 102, 74, 199, 238, 120, 231, 201, 187, 29, 11, 46, 195, 164, 8, 221, 128, 25, 205, 42, 247, 152, 112, 176, 14, 117, 150, 223, 245, 32, 212, 107, 4, 245, 27, 126, 224, 216, 0, 89, 106, 238, 185, 206, 44, 56, 204, 175, 7, 139, 233, 228, 127, 175, 194, 26, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 28, 5, 3, 0, 0, 28, 32, 85, 108, 217, 174, 85, 69, 70, 242, 27, 78, 2, 117, 115, 0, 172, 117, 89, 89, 73, 249, 245, 211, 100, 127, 48, 135, 224, 97, 172, 146, 128, 30, 190, 72, 199, 170, 97, 179, 136, 109, 86, 110, 235, 214, 47, 50, 115, 11, 226, 168, 56, 198, 24, 212, 205, 207, 2, 116, 104, 112, 99, 234, 236, 44, 70, 19, 19, 215, 127, 200, 162, 215, 142, 45, 135, 91, 219, 217, 86, 231, 154, 87, 222, 161, 32, 66, 196, 55, 117, 20, 186, 9, 134, 252, 249, 219, 9, 196, 128, 8, 222, 201, 131, 210, 182, 232, 142, 72, 160, 171, 95, 231, 232, 156, 28, 34, 54, 94, 73, 183, 38, 160, 123, 175, 157, 21, 163, 8, 214, 155, 172, 237, 169, 28, 15, 138, 105, 107, 251, 109, 131, 240, 194, 72, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 112, 190, 140, 85, 73, 36, 78, 27, 78, 2, 117, 115, 0, 77, 207, 197, 130, 236, 138, 192, 241, 225, 114, 8, 22, 76, 54, 43, 121, 42, 44, 9, 92, 56, 253, 224, 179, 191, 131, 40, 176, 94, 61, 33, 12, 43, 82, 156, 236, 211, 29, 187, 100, 220, 243, 24, 134, 42, 204, 46, 161, 214, 91, 68, 119, 40, 252, 53, 54, 146, 136, 196, 168, 204, 195, 131, 110, 6, 73, 16, 161, 86, 35, 150, 153, 162, 185, 227, 65, 228, 160, 203, 42, 250, 121, 14, 42, 115, 221, 232, 96, 99, 164, 230, 29, 195, 149, 85, 206, 41, 1, 252, 77, 188, 88, 8, 182, 37, 249, 6, 158, 6, 244, 158, 254, 141, 203, 6, 158, 198, 103, 130, 98, 123, 34, 245, 44, 126, 77, 24, 187, 194, 90, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 108, 194, 203, 85, 69, 51, 125, 27, 78, 2, 117, 115, 0, 86, 26, 187, 56, 252, 194, 199, 140, 229, 133, 186, 187, 20, 174, 26, 48, 212, 129, 10, 20, 167, 179, 53, 72, 176, 92, 153, 48, 146, 15, 163, 182, 80, 138, 181, 135, 98, 129, 17, 66, 55, 184, 76, 225, 72, 104, 7, 221, 40, 71, 41, 202, 246, 154, 166, 199, 74, 175, 146, 54, 25, 56, 115, 243}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 621198000),
+ },
+ nb: 6,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511150,
+ Ack: 3465789226,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 621220000),
+ },
+ nb: 7,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465789226,
+ Ack: 374511150,
+ BaseLayer: layers.BaseLayer{Payload: []byte{153, 141, 101, 187, 110, 15, 63, 42, 81, 100, 95, 68, 241, 85, 160, 227, 3, 1, 12, 80, 166, 1, 98, 2, 44, 98, 63, 203, 70, 164, 99, 195, 23, 152, 223, 253, 208, 10, 12, 19, 66, 121, 9, 158, 205, 96, 218, 0, 80, 70, 58, 95, 41, 124, 216, 13, 122, 135, 102, 200, 181, 233, 129, 174, 194, 108, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 108, 223, 157, 85, 69, 74, 55, 27, 78, 2, 117, 115, 0, 149, 71, 215, 149, 16, 165, 115, 229, 141, 136, 187, 158, 88, 225, 131, 231, 182, 218, 235, 27, 48, 65, 244, 77, 186, 135, 72, 18, 87, 52, 180, 128, 130, 67, 75, 173, 160, 243, 104, 178, 103, 117, 96, 209, 36, 51, 108, 47, 232, 214, 254, 15, 208, 182, 218, 174, 248, 237, 88, 150, 35, 190, 239, 249, 171, 151, 9, 236, 2, 252, 255, 13, 79, 190, 147, 36, 161, 210, 202, 80, 209, 136, 167, 180, 186, 68, 246, 249, 48, 123, 46, 11, 132, 103, 132, 207, 186, 68, 110, 133, 142, 109, 194, 19, 122, 57, 203, 217, 120, 93, 67, 168, 91, 252, 87, 38, 33, 228, 229, 162, 190, 170, 23, 188, 89, 15, 241, 71, 194, 108, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 28, 5, 3, 0, 0, 28, 32, 85, 108, 217, 174, 85, 69, 70, 242, 27, 78, 2, 117, 115, 0, 206, 97, 120, 37, 255, 252, 7, 156, 162, 192, 43, 84, 105, 94, 125, 55, 13, 247, 234, 9, 25, 100, 246, 25, 77, 168, 199, 208, 187, 209, 164, 123, 234, 138, 238, 15, 86, 45, 163, 108, 162, 117, 247, 128, 3, 187, 100, 185, 193, 191, 134, 86, 161, 254, 236, 99, 66, 66, 35, 173, 91, 243, 175, 3, 175, 94, 79, 68, 246, 109, 200, 154, 209, 185, 11, 210, 50, 147, 136, 213, 158, 81, 111, 17, 149, 239, 110, 114, 25, 234, 247, 158, 233, 33, 36, 181, 66, 84, 189, 37, 207, 58, 9, 171, 143, 66, 69, 137, 192, 6, 187, 59, 16, 51, 80, 56, 89, 170, 12, 195, 69, 133, 188, 110, 171, 17, 17, 213, 194, 154, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 112, 190, 140, 85, 73, 36, 78, 27, 78, 2, 117, 115, 0, 123, 36, 154, 4, 158, 41, 96, 252, 116, 114, 16, 137, 28, 177, 206, 33, 192, 88, 89, 1, 69, 252, 206, 88, 89, 152, 210, 179, 248, 44, 202, 239, 95, 131, 126, 147, 249, 93, 57, 166, 215, 184, 211, 164, 196, 71, 170, 3, 25, 18, 177, 214, 94, 147, 181, 148, 197, 11, 171, 219, 107, 48, 105, 81, 239, 110, 249, 140, 68, 127, 193, 146, 176, 161, 246, 108, 75, 141, 205, 211, 73, 247, 125, 205, 120, 156, 82, 55, 130, 250, 26, 15, 44, 214, 91, 115, 11, 103, 22, 83, 184, 96, 107, 138, 2, 127, 168, 191, 92, 102, 137, 161, 63, 225, 134, 17, 178, 242, 11, 43, 8, 30, 164, 28, 140, 195, 83, 121, 194, 154, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 28, 5, 3, 0, 0, 28, 32, 85, 112, 190, 140, 85, 73, 36, 78, 27, 78, 2, 117, 115, 0, 189, 98, 234, 251, 237, 24, 143, 210, 30, 242, 97, 66, 50, 211, 47, 109, 110, 121, 244, 239, 89, 0, 39, 92, 218, 155, 71, 5, 23, 136, 231, 107, 95, 52, 231, 118, 253, 206, 250, 178, 209, 136, 13, 36, 36, 54, 157, 237, 35, 110, 134, 253, 80, 237, 162, 163, 38, 21, 54, 241, 240, 253, 73, 33, 191, 128, 32, 6, 198, 165, 35, 203, 244, 15, 166, 250, 159, 67, 149, 56, 19, 243, 230, 87, 6, 44, 150, 90, 79, 107, 18, 121, 112, 23, 176, 104, 50, 110, 176, 138, 250, 6, 209, 22, 41, 73, 234, 4, 124, 233, 208, 218, 236, 117, 232, 217, 10, 172, 18, 215, 143, 119, 193, 113, 10, 59, 255, 221, 0, 0, 41, 16, 0, 0, 0, 128, 0, 0, 0}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 622508000),
+ },
+ nb: 8,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511150,
+ Ack: 3465789949,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 622531000),
+ },
+ nb: 9,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ FIN: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511150,
+ Ack: 3465789949,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 622907000),
+ },
+ nb: 10,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ FIN: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465789949,
+ Ack: 374511151,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 652784000),
+ },
+ nb: 11,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511151,
+ Ack: 3465789950,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 652809000),
+ },
+ nb: 12,
+ },
+ })
+}
+
+func TestFSMearlyRST(t *testing.T) {
+ testFSM(t, []testFSMSequence{
+ {
+ tcp: layers.TCP{
+ SYN: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511116,
+ Ack: 0,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 566690000),
+ },
+ nb: 1,
+ },
+ {
+ tcp: layers.TCP{
+ SYN: true,
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787765,
+ Ack: 374511117,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590332000),
+ },
+ nb: 2,
+ },
+ {
+ tcp: layers.TCP{
+ RST: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590346000),
+ },
+ nb: 3,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{0, 31, 104, 196, 0, 32, 0, 1, 0, 0, 0, 0, 0, 1, 2, 85, 83, 0, 0, 6, 0, 1, 0, 0, 41, 16, 0, 0, 0, 128, 0, 0, 0}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590387000),
+ },
+ nb: 3,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787766,
+ Ack: 374511150,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 613687000),
+ },
+ nb: 3,
+ },
+ })
+}
+
+func TestFSMestablishedThenRST(t *testing.T) {
+ testFSM(t, []testFSMSequence{
+ {
+ tcp: layers.TCP{
+ SYN: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511116,
+ Ack: 0,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 566690000),
+ },
+ nb: 1,
+ },
+ {
+ tcp: layers.TCP{
+ SYN: true,
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787765,
+ Ack: 374511117,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590332000),
+ },
+ nb: 2,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590346000),
+ },
+ nb: 3,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{0, 31, 104, 196, 0, 32, 0, 1, 0, 0, 0, 0, 0, 1, 2, 85, 83, 0, 0, 6, 0, 1, 0, 0, 41, 16, 0, 0, 0, 128, 0, 0, 0}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590387000),
+ },
+ nb: 4,
+ },
+ {
+ tcp: layers.TCP{
+ RST: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787766,
+ Ack: 374511150,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 613687000),
+ },
+ nb: 5,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787766,
+ Ack: 374511150,
+ BaseLayer: layers.BaseLayer{Payload: []byte{8, 133, 104, 196, 132, 0, 0, 1, 0, 2, 0, 7, 0, 19, 2, 85, 83, 0, 0, 6, 0, 1, 2, 117, 115, 0, 0, 6, 0, 1, 0, 0, 3, 132, 0, 54, 1, 97, 5, 99, 99, 116, 108, 100, 192, 20, 10, 104, 111, 115, 116, 109, 97, 115, 116, 101, 114, 7, 110, 101, 117, 115, 116, 97, 114, 3, 98, 105, 122, 0, 120, 18, 40, 205, 0, 0, 3, 132, 0, 0, 3, 132, 0, 9, 58, 128, 0, 1, 81, 128, 192, 20, 0, 46, 0, 1, 0, 0, 3, 132, 0, 150, 0, 6, 5, 1, 0, 0, 3, 132, 85, 138, 90, 146, 85, 98, 191, 130, 27, 78, 2, 117, 115, 0, 69, 13, 35, 189, 141, 225, 107, 238, 108, 182, 207, 44, 105, 31, 212, 103, 32, 93, 217, 108, 20, 231, 188, 28, 241, 237, 104, 182, 117, 121, 195, 112, 64, 96, 237, 248, 6, 181, 186, 96, 60, 6, 18, 29, 188, 96, 201, 140, 251, 61, 71, 177, 108, 156, 9, 83, 125, 172, 188, 75, 81, 67, 218, 55, 93, 131, 243, 15, 190, 75, 4, 165, 226, 124, 49, 67, 142, 131, 239, 240, 76, 225, 10, 242, 68, 88, 240, 200, 27, 97, 102, 73, 92, 73, 133, 170, 175, 198, 99, 109, 90, 16, 162, 101, 95, 96, 102, 250, 91, 74, 80, 3, 87, 167, 50, 230, 9, 213, 7, 222, 197, 87, 183, 190, 148, 247, 207, 204, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 10, 1, 102, 5, 99, 99, 116, 108, 100, 192, 12, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 97, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 98, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 99, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 101, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 107, 193, 8, 192, 118, 0, 46, 0, 1, 0, 7, 233, 0, 0, 150, 0, 2, 5, 1, 0, 7, 233, 0, 85, 127, 33, 92, 85, 87, 134, 98, 27, 78, 2, 117, 115, 0, 19, 227, 175, 75, 88, 245, 164, 158, 150, 198, 57, 253, 150, 179, 161, 52, 24, 56, 229, 176, 175, 40, 45, 232, 188, 171, 131, 197, 107, 125, 218, 192, 78, 221, 146, 33, 114, 55, 43, 12, 131, 213, 51, 98, 37, 2, 102, 161, 232, 115, 177, 210, 51, 169, 215, 133, 56, 190, 91, 75, 8, 222, 231, 202, 139, 28, 187, 249, 72, 21, 23, 56, 63, 72, 126, 142, 242, 195, 242, 64, 208, 134, 100, 157, 197, 159, 43, 148, 20, 70, 117, 152, 159, 35, 200, 220, 49, 234, 173, 210, 91, 34, 210, 192, 7, 197, 112, 117, 208, 234, 42, 49, 133, 237, 197, 14, 244, 149, 191, 142, 36, 252, 42, 48, 182, 189, 9, 68, 1, 65, 5, 67, 67, 84, 76, 68, 193, 126, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 124, 70, 1, 66, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 125, 70, 194, 26, 0, 28, 0, 1, 0, 0, 28, 32, 0, 16, 32, 1, 5, 3, 209, 174, 255, 255, 255, 255, 255, 255, 255, 255, 255, 126, 1, 67, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 127, 70, 1, 69, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 126, 70, 1, 70, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 209, 173, 58, 70, 194, 108, 0, 28, 0, 1, 0, 0, 28, 32, 0, 16, 32, 1, 5, 0, 54, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 1, 75, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 128, 70, 194, 154, 0, 28, 0, 1, 0, 0, 28, 32, 0, 16, 32, 1, 5, 3, 226, 57, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 194, 2, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 112, 230, 49, 85, 73, 83, 2, 27, 78, 2, 117, 115, 0, 82, 36, 11, 141, 74, 85, 70, 98, 179, 63, 173, 83, 8, 70, 155, 41, 102, 166, 140, 62, 71, 178, 130, 38, 171, 200, 180, 68, 2, 215, 45, 6, 43, 59, 171, 146, 223, 215, 9, 77, 5, 104, 167, 42, 237, 170, 30, 114, 205, 129, 59, 225, 152, 224, 79, 1, 65, 68, 208, 153, 121, 237, 199, 87, 2, 251, 100, 105, 59, 24, 73, 226, 169, 121, 250, 91, 41, 124, 14, 23, 135, 52, 2, 86, 72, 224, 100, 135, 70, 216, 16, 107, 84, 59, 13, 168, 58, 187, 54, 98, 230, 167, 246, 42, 46, 156, 206, 238, 120, 199, 25, 144, 98, 249, 70, 162, 34, 43, 145, 114, 186, 233, 47, 42, 75, 95, 152, 235, 194, 26, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 112, 190, 140, 85, 73, 36, 78, 27, 78, 2, 117, 115, 0, 160, 95, 100, 37, 167, 82, 93, 165, 126, 247, 147, 173, 238, 154, 206, 174, 96, 175, 209, 7, 8, 169, 171, 223, 29, 201, 161, 177, 98, 54, 94, 62, 70, 127, 142, 109, 206, 42, 179, 109, 156, 160, 156, 20, 59, 24, 147, 164, 13, 121, 192, 84, 157, 26, 56, 177, 151, 210, 7, 197, 229, 110, 60, 58, 224, 42, 77, 5, 59, 80, 216, 221, 248, 19, 66, 102, 74, 199, 238, 120, 231, 201, 187, 29, 11, 46, 195, 164, 8, 221, 128, 25, 205, 42, 247, 152, 112, 176, 14, 117, 150, 223, 245, 32, 212, 107, 4, 245, 27, 126, 224, 216, 0, 89, 106, 238, 185, 206, 44, 56, 204, 175, 7, 139, 233, 228, 127, 175, 194, 26, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 28, 5, 3, 0, 0, 28, 32, 85, 108, 217, 174, 85, 69, 70, 242, 27, 78, 2, 117, 115, 0, 172, 117, 89, 89, 73, 249, 245, 211, 100, 127, 48, 135, 224, 97, 172, 146, 128, 30, 190, 72, 199, 170, 97, 179, 136, 109, 86, 110, 235, 214, 47, 50, 115, 11, 226, 168, 56, 198, 24, 212, 205, 207, 2, 116, 104, 112, 99, 234, 236, 44, 70, 19, 19, 215, 127, 200, 162, 215, 142, 45, 135, 91, 219, 217, 86, 231, 154, 87, 222, 161, 32, 66, 196, 55, 117, 20, 186, 9, 134, 252, 249, 219, 9, 196, 128, 8, 222, 201, 131, 210, 182, 232, 142, 72, 160, 171, 95, 231, 232, 156, 28, 34, 54, 94, 73, 183, 38, 160, 123, 175, 157, 21, 163, 8, 214, 155, 172, 237, 169, 28, 15, 138, 105, 107, 251, 109, 131, 240, 194, 72, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 112, 190, 140, 85, 73, 36, 78, 27, 78, 2, 117, 115, 0, 77, 207, 197, 130, 236, 138, 192, 241, 225, 114, 8, 22, 76, 54, 43, 121, 42, 44, 9, 92, 56, 253, 224, 179, 191, 131, 40, 176, 94, 61, 33, 12, 43, 82, 156, 236, 211, 29, 187, 100, 220, 243, 24, 134, 42, 204, 46, 161, 214, 91, 68, 119, 40, 252, 53, 54, 146, 136, 196, 168, 204, 195, 131, 110, 6, 73, 16, 161, 86, 35, 150, 153, 162, 185, 227, 65, 228, 160, 203, 42, 250, 121, 14, 42, 115, 221, 232, 96, 99, 164, 230, 29, 195, 149, 85, 206, 41, 1, 252, 77, 188, 88, 8, 182, 37, 249, 6, 158, 6, 244, 158, 254, 141, 203, 6, 158, 198, 103, 130, 98, 123, 34, 245, 44, 126, 77, 24, 187, 194, 90, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 108, 194, 203, 85, 69, 51, 125, 27, 78, 2, 117, 115, 0, 86, 26, 187, 56, 252, 194, 199, 140, 229, 133, 186, 187, 20, 174, 26, 48, 212, 129, 10, 20, 167, 179, 53, 72, 176, 92, 153, 48, 146, 15, 163, 182, 80, 138, 181, 135, 98, 129, 17, 66, 55, 184, 76, 225, 72, 104, 7, 221, 40, 71, 41, 202, 246, 154, 166, 199, 74, 175, 146, 54, 25, 56, 115, 243}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 621198000),
+ },
+ nb: 5,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511150,
+ Ack: 3465789226,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 621220000),
+ },
+ nb: 5,
+ },
+ })
+}
+
+func TestFSMmissingSYNACK(t *testing.T) {
+ testFSM(t, []testFSMSequence{
+ {
+ tcp: layers.TCP{
+ SYN: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511116,
+ Ack: 0,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 566690000),
+ },
+ nb: 1,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590346000),
+ },
+ nb: 1,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{0, 31, 104, 196, 0, 32, 0, 1, 0, 0, 0, 0, 0, 1, 2, 85, 83, 0, 0, 6, 0, 1, 0, 0, 41, 16, 0, 0, 0, 128, 0, 0, 0}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590387000),
+ },
+ nb: 1,
+ },
+ })
+}
+
+/*
+ * Memory test
+ */
+func TestMemoryShrink(t *testing.T) {
+ tcp := layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 999,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ }
+ a := NewAssembler(NewStreamPool(&testFactoryBench{}))
+ var before runtime.MemStats
+ runtime.GC()
+ runtime.ReadMemStats(&before)
+ run := 1050
+ // Allocate > initial
+ for i := 0; i < run; i++ {
+ a.Assemble(netFlow, &tcp)
+ if tcp.SYN {
+ tcp.SYN = false
+ tcp.Seq += 1 + 1
+ }
+ tcp.Seq += 10
+ }
+ var after runtime.MemStats
+ a.FlushAll()
+ runtime.GC()
+ runtime.ReadMemStats(&after)
+ if after.HeapAlloc < before.HeapAlloc {
+ t.Fatalf("Nothing allocated for %d run: before: %d, after: %d", run, before.HeapAlloc, after.HeapAlloc)
+ }
+ before = after
+ // Do ~ initial allocs+free()
+ run *= 2
+ for i := 0; i < run; i++ {
+ a.Assemble(netFlow, &tcp)
+ if i%50 == 0 {
+ a.FlushAll()
+ }
+ tcp.Seq += 10
+ }
+ runtime.GC()
+ runtime.ReadMemStats(&after)
+ if after.HeapAlloc >= before.HeapAlloc {
+ t.Fatalf("Nothing freed for %d run: before: %d, after: %d", run, before.HeapAlloc, after.HeapAlloc)
+ }
+}
+
+/*
+ * Benchmark tests
+ */
+func BenchmarkSingleStreamNo(b *testing.B) {
+ t := layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ }
+ a := NewAssembler(NewStreamPool(&testFactoryBench{}))
+ for i := 0; i < b.N; i++ {
+ a.Assemble(netFlow, &t)
+ if t.SYN {
+ t.SYN = false
+ t.Seq++
+ }
+ t.Seq += 10
+ }
+}
+
+func BenchmarkSingleStreamSkips(b *testing.B) {
+ t := layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ }
+ a := NewAssembler(NewStreamPool(&testFactoryBench{}))
+ skipped := false
+ for i := 0; i < b.N; i++ {
+ if i%10 == 9 {
+ t.Seq += 10
+ skipped = true
+ } else if skipped {
+ t.Seq -= 20
+ }
+ a.Assemble(netFlow, &t)
+ if t.SYN {
+ t.SYN = false
+ t.Seq++
+ }
+ t.Seq += 10
+ if skipped {
+ t.Seq += 10
+ skipped = false
+ }
+ }
+}
+
+func BenchmarkSingleStreamLoss(b *testing.B) {
+ t := layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ SYN: true,
+ Seq: 1000,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ }
+ a := NewAssembler(NewStreamPool(&testFactoryBench{}))
+ for i := 0; i < b.N; i++ {
+ a.Assemble(netFlow, &t)
+ t.SYN = false
+ t.Seq += 11
+ }
+}
+
+func BenchmarkMultiStreamGrow(b *testing.B) {
+ t := layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 0,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ }
+ a := NewAssembler(NewStreamPool(&testFactoryBench{}))
+ for i := 0; i < b.N; i++ {
+ t.SrcPort = layers.TCPPort(i)
+ a.Assemble(netFlow, &t)
+ t.Seq += 10
+ }
+}
+
+func BenchmarkMultiStreamConn(b *testing.B) {
+ t := layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 0,
+ SYN: true,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ }
+ a := NewAssembler(NewStreamPool(&testFactoryBench{}))
+ for i := 0; i < b.N; i++ {
+ t.SrcPort = layers.TCPPort(i)
+ a.Assemble(netFlow, &t)
+ if i%65536 == 65535 {
+ if t.SYN {
+ t.SYN = false
+ t.Seq++
+ }
+ t.Seq += 10
+ }
+ }
+}
+
+type testMemoryContext struct{}
+
+func (t *testMemoryContext) GetCaptureInfo() gopacket.CaptureInfo {
+ return gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590387000),
+ }
+}
+
+func TestFullyOrderedAndCompleteStreamDoesNotAlloc(t *testing.T) {
+ c2s := layers.TCP{
+ SrcPort: 1,
+ DstPort: 2,
+ Seq: 0,
+ SYN: true,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ }
+ s2c := layers.TCP{
+ SrcPort: c2s.DstPort,
+ DstPort: c2s.SrcPort,
+ Seq: 0,
+ SYN: true,
+ ACK: true,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 0}},
+ }
+ tf := testMemoryFactory{}
+ a := NewAssembler(NewStreamPool(&tf))
+
+ ctx := &testMemoryContext{}
+ // First packet
+ a.AssembleWithContext(netFlow, &c2s, ctx)
+ a.AssembleWithContext(netFlow.Reverse(), &s2c, ctx)
+ c2s.SYN, s2c.SYN = false, false
+ c2s.ACK = true
+ c2s.Seq++
+ s2c.Seq++
+ N := 1000
+ if n := testing.AllocsPerRun(N, func() {
+ c2s.Seq += 10
+ s2c.Seq += 10
+ c2s.Ack += 10
+ s2c.Ack += 10
+ a.AssembleWithContext(netFlow, &c2s, ctx)
+ a.AssembleWithContext(netFlow.Reverse(), &s2c, ctx)
+ }); n > 0 {
+ t.Error(n, "mallocs for normal TCP stream")
+ }
+ // Ensure all bytes have been through the stream
+ // +1 for first packet and +1 because AllocsPerRun seems to run fun N+1 times.
+ if tf.bytes != 10*2*(N+1+1) {
+ t.Error(tf.bytes, "bytes handled, expected", 10*2*(N+1+1))
+ }
+}
diff --git a/vendor/github.com/google/gopacket/reassembly/tcpcheck.go b/vendor/github.com/google/gopacket/reassembly/tcpcheck.go
new file mode 100644
index 0000000..4b52aba
--- /dev/null
+++ b/vendor/github.com/google/gopacket/reassembly/tcpcheck.go
@@ -0,0 +1,246 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package reassembly
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/google/gopacket"
+ "github.com/google/gopacket/layers"
+)
+
+/*
+ * Check TCP packet against options (window, MSS)
+ */
+
+type tcpStreamOptions struct {
+ mss int
+ scale int
+ receiveWindow uint
+}
+
+// TCPOptionCheck contains options for the two directions
+type TCPOptionCheck struct {
+ options [2]tcpStreamOptions
+}
+
+func (t *TCPOptionCheck) getOptions(dir TCPFlowDirection) *tcpStreamOptions {
+ if dir == TCPDirClientToServer {
+ return &t.options[0]
+ }
+ return &t.options[1]
+}
+
+// NewTCPOptionCheck creates default options
+func NewTCPOptionCheck() TCPOptionCheck {
+ return TCPOptionCheck{
+ options: [2]tcpStreamOptions{
+ tcpStreamOptions{
+ mss: 0,
+ scale: -1,
+ receiveWindow: 0,
+ }, tcpStreamOptions{
+ mss: 0,
+ scale: -1,
+ receiveWindow: 0,
+ },
+ },
+ }
+}
+
+// Accept checks whether the packet should be accepted by checking TCP options
+func (t *TCPOptionCheck) Accept(tcp *layers.TCP, ci gopacket.CaptureInfo, dir TCPFlowDirection, acked Sequence, start *bool) error {
+ options := t.getOptions(dir)
+ if tcp.SYN {
+ mss := -1
+ scale := -1
+ for _, o := range tcp.Options {
+ // MSS
+ if o.OptionType == 2 {
+ if len(o.OptionData) != 2 {
+ return fmt.Errorf("MSS option data length expected 2, got %d", len(o.OptionData))
+ }
+ mss = int(binary.BigEndian.Uint16(o.OptionData[:2]))
+ }
+ // Window scaling
+ if o.OptionType == 3 {
+ if len(o.OptionData) != 1 {
+ return fmt.Errorf("Window scaling length expected: 1, got %d", len(o.OptionData))
+ }
+ scale = int(o.OptionData[0])
+ }
+ }
+ options.mss = mss
+ options.scale = scale
+ } else {
+ if acked != invalidSequence {
+ revOptions := t.getOptions(dir.Reverse())
+ length := len(tcp.Payload)
+
+ // Check packet is in the correct window
+ diff := acked.Difference(Sequence(tcp.Seq))
+ if diff == -1 && (length == 1 || length == 0) {
+ // This is probably a Keep-alive
+ // TODO: check byte is ok
+ } else if diff < 0 {
+ return fmt.Errorf("Re-emitted packet (diff:%d,seq:%d,rev-ack:%d)", diff,
+ tcp.Seq, acked)
+ } else if revOptions.mss > 0 && length > revOptions.mss {
+ return fmt.Errorf("%d > mss (%d)", length, revOptions.mss)
+ } else if revOptions.receiveWindow != 0 && revOptions.scale < 0 && diff > int(revOptions.receiveWindow) {
+ return fmt.Errorf("%d > receiveWindow(%d)", diff, revOptions.receiveWindow)
+ }
+ }
+ }
+ // Compute receiveWindow
+ options.receiveWindow = uint(tcp.Window)
+ if options.scale > 0 {
+ options.receiveWindow = options.receiveWindow << (uint(options.scale))
+ }
+ return nil
+}
+
+// TCPSimpleFSM implements a very simple TCP state machine
+//
+// Usage:
+// When implementing a Stream interface and to avoid to consider packets that
+// would be rejected due to client/server's TCP stack, the Accept() can call
+// TCPSimpleFSM.CheckState().
+//
+// Limitations:
+// - packet should be received in-order.
+// - no check on sequence number is performed
+// - no RST
+type TCPSimpleFSM struct {
+ dir TCPFlowDirection
+ state int
+ options TCPSimpleFSMOptions
+}
+
+// TCPSimpleFSMOptions holds options for TCPSimpleFSM
+type TCPSimpleFSMOptions struct {
+ SupportMissingEstablishment bool // Allow missing SYN, SYN+ACK, ACK
+}
+
+// Internal values of state machine
+const (
+ TCPStateClosed = 0
+ TCPStateSynSent = 1
+ TCPStateEstablished = 2
+ TCPStateCloseWait = 3
+ TCPStateLastAck = 4
+ TCPStateReset = 5
+)
+
+// NewTCPSimpleFSM creates a new TCPSimpleFSM
+func NewTCPSimpleFSM(options TCPSimpleFSMOptions) *TCPSimpleFSM {
+ return &TCPSimpleFSM{
+ state: TCPStateClosed,
+ options: options,
+ }
+}
+
+func (t *TCPSimpleFSM) String() string {
+ switch t.state {
+ case TCPStateClosed:
+ return "Closed"
+ case TCPStateSynSent:
+ return "SynSent"
+ case TCPStateEstablished:
+ return "Established"
+ case TCPStateCloseWait:
+ return "CloseWait"
+ case TCPStateLastAck:
+ return "LastAck"
+ case TCPStateReset:
+ return "Reset"
+ }
+ return "?"
+}
+
+// CheckState returns false if tcp is invalid wrt current state or update the state machine's state
+func (t *TCPSimpleFSM) CheckState(tcp *layers.TCP, dir TCPFlowDirection) bool {
+ if t.state == TCPStateClosed && t.options.SupportMissingEstablishment && !(tcp.SYN && !tcp.ACK) {
+ /* try to figure out state */
+ switch true {
+ case tcp.SYN && tcp.ACK:
+ t.state = TCPStateSynSent
+ t.dir = dir.Reverse()
+ case tcp.FIN && !tcp.ACK:
+ t.state = TCPStateEstablished
+ case tcp.FIN && tcp.ACK:
+ t.state = TCPStateCloseWait
+ t.dir = dir.Reverse()
+ default:
+ t.state = TCPStateEstablished
+ }
+ }
+
+ switch t.state {
+ /* openning connection */
+ case TCPStateClosed:
+ if tcp.SYN && !tcp.ACK {
+ t.dir = dir
+ t.state = TCPStateSynSent
+ return true
+ }
+ case TCPStateSynSent:
+ if tcp.RST {
+ t.state = TCPStateReset
+ return true
+ }
+
+ if tcp.SYN && tcp.ACK && dir == t.dir.Reverse() {
+ t.state = TCPStateEstablished
+ return true
+ }
+ if tcp.SYN && !tcp.ACK && dir == t.dir {
+ // re-transmission
+ return true
+ }
+ /* established */
+ case TCPStateEstablished:
+ if tcp.RST {
+ t.state = TCPStateReset
+ return true
+ }
+
+ if tcp.FIN {
+ t.state = TCPStateCloseWait
+ t.dir = dir
+ return true
+ }
+ // accept any packet
+ return true
+ /* closing connection */
+ case TCPStateCloseWait:
+ if tcp.RST {
+ t.state = TCPStateReset
+ return true
+ }
+
+ if tcp.FIN && tcp.ACK && dir == t.dir.Reverse() {
+ t.state = TCPStateLastAck
+ return true
+ }
+ if tcp.ACK {
+ return true
+ }
+ case TCPStateLastAck:
+ if tcp.RST {
+ t.state = TCPStateReset
+ return true
+ }
+
+ if tcp.ACK && t.dir == dir {
+ t.state = TCPStateClosed
+ return true
+ }
+ }
+ return false
+}
diff --git a/vendor/github.com/google/gopacket/reassembly/tcpcheck_test.go b/vendor/github.com/google/gopacket/reassembly/tcpcheck_test.go
new file mode 100644
index 0000000..4c2391d
--- /dev/null
+++ b/vendor/github.com/google/gopacket/reassembly/tcpcheck_test.go
@@ -0,0 +1,249 @@
+// Copyright 2012 Google, Inc. All rights reserved.
+//
+// Use of this source code is governed by a BSD-style license
+// that can be found in the LICENSE file in the root of the source
+// tree.
+
+package reassembly
+
+import (
+ "testing"
+ "time"
+
+ "github.com/google/gopacket"
+ "github.com/google/gopacket/layers"
+)
+
+// netFlow declared in tcpassembly_test
+
+/*
+ * FSM tests
+ */
+
+type testCheckFSMSequence struct {
+ tcp layers.TCP
+ ci gopacket.CaptureInfo
+ expected bool
+}
+
+func testCheckFSM(t *testing.T, options TCPSimpleFSMOptions, s []testCheckFSMSequence) {
+ fsm := NewTCPSimpleFSM(options)
+ port := layers.TCPPort(0)
+ for i, test := range s {
+ // Fake some values according to ports
+ flow := netFlow
+ dir := TCPDirClientToServer
+ if port == 0 {
+ port = test.tcp.SrcPort
+ }
+ if port != test.tcp.SrcPort {
+ dir = dir.Reverse()
+ flow = flow.Reverse()
+ }
+ res := fsm.CheckState(&test.tcp, dir)
+ if res != test.expected {
+ t.Fatalf("#%d: packet rejected (%s): got %s, expected %s. State:%s", i, gopacket.LayerDump(&test.tcp), res, test.expected, fsm.String())
+ }
+ }
+}
+
+func TestCheckFSM(t *testing.T) {
+ testCheckFSM(t, TCPSimpleFSMOptions{}, []testCheckFSMSequence{
+ {
+ tcp: layers.TCP{
+ SYN: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511116,
+ Ack: 0,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 566690000),
+ },
+ expected: true,
+ },
+ {
+ tcp: layers.TCP{
+ SYN: true,
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787765,
+ Ack: 374511117,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590332000),
+ },
+ expected: true,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590346000),
+ },
+ expected: true,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{0, 31, 104, 196, 0, 32, 0, 1, 0, 0, 0, 0, 0, 1, 2, 85, 83, 0, 0, 6, 0, 1, 0, 0, 41, 16, 0, 0, 0, 128, 0, 0, 0}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590387000),
+ },
+ expected: true,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787766,
+ Ack: 374511150,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 613687000),
+ },
+ expected: true,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 3465787766,
+ Ack: 374511150,
+ BaseLayer: layers.BaseLayer{Payload: []byte{8, 133, 104, 196, 132, 0, 0, 1, 0, 2, 0, 7, 0, 19, 2, 85, 83, 0, 0, 6, 0, 1, 2, 117, 115, 0, 0, 6, 0, 1, 0, 0, 3, 132, 0, 54, 1, 97, 5, 99, 99, 116, 108, 100, 192, 20, 10, 104, 111, 115, 116, 109, 97, 115, 116, 101, 114, 7, 110, 101, 117, 115, 116, 97, 114, 3, 98, 105, 122, 0, 120, 18, 40, 205, 0, 0, 3, 132, 0, 0, 3, 132, 0, 9, 58, 128, 0, 1, 81, 128, 192, 20, 0, 46, 0, 1, 0, 0, 3, 132, 0, 150, 0, 6, 5, 1, 0, 0, 3, 132, 85, 138, 90, 146, 85, 98, 191, 130, 27, 78, 2, 117, 115, 0, 69, 13, 35, 189, 141, 225, 107, 238, 108, 182, 207, 44, 105, 31, 212, 103, 32, 93, 217, 108, 20, 231, 188, 28, 241, 237, 104, 182, 117, 121, 195, 112, 64, 96, 237, 248, 6, 181, 186, 96, 60, 6, 18, 29, 188, 96, 201, 140, 251, 61, 71, 177, 108, 156, 9, 83, 125, 172, 188, 75, 81, 67, 218, 55, 93, 131, 243, 15, 190, 75, 4, 165, 226, 124, 49, 67, 142, 131, 239, 240, 76, 225, 10, 242, 68, 88, 240, 200, 27, 97, 102, 73, 92, 73, 133, 170, 175, 198, 99, 109, 90, 16, 162, 101, 95, 96, 102, 250, 91, 74, 80, 3, 87, 167, 50, 230, 9, 213, 7, 222, 197, 87, 183, 190, 148, 247, 207, 204, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 10, 1, 102, 5, 99, 99, 116, 108, 100, 192, 12, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 97, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 98, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 99, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 101, 193, 8, 192, 118, 0, 2, 0, 1, 0, 7, 233, 0, 0, 4, 1, 107, 193, 8, 192, 118, 0, 46, 0, 1, 0, 7, 233, 0, 0, 150, 0, 2, 5, 1, 0, 7, 233, 0, 85, 127, 33, 92, 85, 87, 134, 98, 27, 78, 2, 117, 115, 0, 19, 227, 175, 75, 88, 245, 164, 158, 150, 198, 57, 253, 150, 179, 161, 52, 24, 56, 229, 176, 175, 40, 45, 232, 188, 171, 131, 197, 107, 125, 218, 192, 78, 221, 146, 33, 114, 55, 43, 12, 131, 213, 51, 98, 37, 2, 102, 161, 232, 115, 177, 210, 51, 169, 215, 133, 56, 190, 91, 75, 8, 222, 231, 202, 139, 28, 187, 249, 72, 21, 23, 56, 63, 72, 126, 142, 242, 195, 242, 64, 208, 134, 100, 157, 197, 159, 43, 148, 20, 70, 117, 152, 159, 35, 200, 220, 49, 234, 173, 210, 91, 34, 210, 192, 7, 197, 112, 117, 208, 234, 42, 49, 133, 237, 197, 14, 244, 149, 191, 142, 36, 252, 42, 48, 182, 189, 9, 68, 1, 65, 5, 67, 67, 84, 76, 68, 193, 126, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 124, 70, 1, 66, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 125, 70, 194, 26, 0, 28, 0, 1, 0, 0, 28, 32, 0, 16, 32, 1, 5, 3, 209, 174, 255, 255, 255, 255, 255, 255, 255, 255, 255, 126, 1, 67, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 127, 70, 1, 69, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 126, 70, 1, 70, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 209, 173, 58, 70, 194, 108, 0, 28, 0, 1, 0, 0, 28, 32, 0, 16, 32, 1, 5, 0, 54, 130, 0, 0, 0, 0, 0, 0, 0, 0, 0, 17, 1, 75, 194, 4, 0, 1, 0, 1, 0, 0, 28, 32, 0, 4, 156, 154, 128, 70, 194, 154, 0, 28, 0, 1, 0, 0, 28, 32, 0, 16, 32, 1, 5, 3, 226, 57, 0, 0, 0, 0, 0, 0, 0, 3, 0, 1, 194, 2, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 112, 230, 49, 85, 73, 83, 2, 27, 78, 2, 117, 115, 0, 82, 36, 11, 141, 74, 85, 70, 98, 179, 63, 173, 83, 8, 70, 155, 41, 102, 166, 140, 62, 71, 178, 130, 38, 171, 200, 180, 68, 2, 215, 45, 6, 43, 59, 171, 146, 223, 215, 9, 77, 5, 104, 167, 42, 237, 170, 30, 114, 205, 129, 59, 225, 152, 224, 79, 1, 65, 68, 208, 153, 121, 237, 199, 87, 2, 251, 100, 105, 59, 24, 73, 226, 169, 121, 250, 91, 41, 124, 14, 23, 135, 52, 2, 86, 72, 224, 100, 135, 70, 216, 16, 107, 84, 59, 13, 168, 58, 187, 54, 98, 230, 167, 246, 42, 46, 156, 206, 238, 120, 199, 25, 144, 98, 249, 70, 162, 34, 43, 145, 114, 186, 233, 47, 42, 75, 95, 152, 235, 194, 26, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 112, 190, 140, 85, 73, 36, 78, 27, 78, 2, 117, 115, 0, 160, 95, 100, 37, 167, 82, 93, 165, 126, 247, 147, 173, 238, 154, 206, 174, 96, 175, 209, 7, 8, 169, 171, 223, 29, 201, 161, 177, 98, 54, 94, 62, 70, 127, 142, 109, 206, 42, 179, 109, 156, 160, 156, 20, 59, 24, 147, 164, 13, 121, 192, 84, 157, 26, 56, 177, 151, 210, 7, 197, 229, 110, 60, 58, 224, 42, 77, 5, 59, 80, 216, 221, 248, 19, 66, 102, 74, 199, 238, 120, 231, 201, 187, 29, 11, 46, 195, 164, 8, 221, 128, 25, 205, 42, 247, 152, 112, 176, 14, 117, 150, 223, 245, 32, 212, 107, 4, 245, 27, 126, 224, 216, 0, 89, 106, 238, 185, 206, 44, 56, 204, 175, 7, 139, 233, 228, 127, 175, 194, 26, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 28, 5, 3, 0, 0, 28, 32, 85, 108, 217, 174, 85, 69, 70, 242, 27, 78, 2, 117, 115, 0, 172, 117, 89, 89, 73, 249, 245, 211, 100, 127, 48, 135, 224, 97, 172, 146, 128, 30, 190, 72, 199, 170, 97, 179, 136, 109, 86, 110, 235, 214, 47, 50, 115, 11, 226, 168, 56, 198, 24, 212, 205, 207, 2, 116, 104, 112, 99, 234, 236, 44, 70, 19, 19, 215, 127, 200, 162, 215, 142, 45, 135, 91, 219, 217, 86, 231, 154, 87, 222, 161, 32, 66, 196, 55, 117, 20, 186, 9, 134, 252, 249, 219, 9, 196, 128, 8, 222, 201, 131, 210, 182, 232, 142, 72, 160, 171, 95, 231, 232, 156, 28, 34, 54, 94, 73, 183, 38, 160, 123, 175, 157, 21, 163, 8, 214, 155, 172, 237, 169, 28, 15, 138, 105, 107, 251, 109, 131, 240, 194, 72, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 112, 190, 140, 85, 73, 36, 78, 27, 78, 2, 117, 115, 0, 77, 207, 197, 130, 236, 138, 192, 241, 225, 114, 8, 22, 76, 54, 43, 121, 42, 44, 9, 92, 56, 253, 224, 179, 191, 131, 40, 176, 94, 61, 33, 12, 43, 82, 156, 236, 211, 29, 187, 100, 220, 243, 24, 134, 42, 204, 46, 161, 214, 91, 68, 119, 40, 252, 53, 54, 146, 136, 196, 168, 204, 195, 131, 110, 6, 73, 16, 161, 86, 35, 150, 153, 162, 185, 227, 65, 228, 160, 203, 42, 250, 121, 14, 42, 115, 221, 232, 96, 99, 164, 230, 29, 195, 149, 85, 206, 41, 1, 252, 77, 188, 88, 8, 182, 37, 249, 6, 158, 6, 244, 158, 254, 141, 203, 6, 158, 198, 103, 130, 98, 123, 34, 245, 44, 126, 77, 24, 187, 194, 90, 0, 46, 0, 1, 0, 0, 28, 32, 0, 150, 0, 1, 5, 3, 0, 0, 28, 32, 85, 108, 194, 203, 85, 69, 51, 125, 27, 78, 2, 117, 115, 0, 86, 26, 187, 56, 252, 194, 199, 140, 229, 133, 186, 187, 20, 174, 26, 48, 212, 129, 10, 20, 167, 179, 53, 72, 176, 92, 153, 48, 146, 15, 163, 182, 80, 138, 181, 135, 98, 129, 17, 66, 55, 184, 76, 225, 72, 104, 7, 221, 40, 71, 41, 202, 246, 154, 166, 199, 74, 175, 146, 54, 25, 56, 115, 243}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 621198000),
+ },
+ expected: true,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511150,
+ Ack: 3465789226,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 621220000),
+ },
+ expected: true,
+ },
+ })
+}
+
+func TestCheckFSMmissingSYNACK(t *testing.T) {
+ testCheckFSM(t, TCPSimpleFSMOptions{}, []testCheckFSMSequence{
+ {
+ tcp: layers.TCP{
+ SYN: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511116,
+ Ack: 0,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 566690000),
+ },
+ expected: true,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590346000),
+ },
+ expected: false,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 374511117,
+ Ack: 3465787766,
+ BaseLayer: layers.BaseLayer{Payload: []byte{0, 31, 104, 196, 0, 32, 0, 1, 0, 0, 0, 0, 0, 1, 2, 85, 83, 0, 0, 6, 0, 1, 0, 0, 41, 16, 0, 0, 0, 128, 0, 0, 0}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590387000),
+ },
+ expected: false,
+ },
+ })
+}
+
+// Support packets w/o SYN (+SYN+ACK) depending on option
+func TestCheckFSMmissingSYN(t *testing.T) {
+ for _, val := range []bool{false, true} {
+ testCheckFSM(t, TCPSimpleFSMOptions{SupportMissingEstablishment: val}, []testCheckFSMSequence{
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 54842,
+ DstPort: 53,
+ Seq: 12,
+ Ack: 1012,
+ BaseLayer: layers.BaseLayer{Payload: []byte{1}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 566690000),
+ },
+ expected: val,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 1012,
+ Ack: 13,
+ BaseLayer: layers.BaseLayer{Payload: []byte{2}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590346000),
+ },
+ expected: val,
+ },
+ {
+ tcp: layers.TCP{
+ ACK: true,
+ SrcPort: 53,
+ DstPort: 54842,
+ Seq: 1013,
+ Ack: 13,
+ BaseLayer: layers.BaseLayer{Payload: []byte{3}},
+ },
+ ci: gopacket.CaptureInfo{
+ Timestamp: time.Unix(1432538521, 590387000),
+ },
+ expected: val,
+ },
+ })
+ }
+}