/* * Copyright (c) 2016-2019 Cisco and/or its affiliates. * Copyright (c) 2019 Arm Limited * Copyright (c) 2010-2017 Intel Corporation and/or its affiliates. * Copyright (c) 2007-2009 Kip Macy kmacy@freebsd.org * Inspired from DPDK rte_ring.h (SPSC only) (derived from freebsd bufring.h). * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __included_ssvm_fifo_h__ #define __included_ssvm_fifo_h__ #include #include #include #include #include /** Out-of-order segment */ typedef struct { u32 next; /**< Next linked-list element pool index */ u32 prev; /**< Previous linked-list element pool index */ u32 start; /**< Start of segment, normalized*/ u32 length; /**< Length of segment */ } ooo_segment_t; #define SVM_FIFO_TRACE (0) #define OOO_SEGMENT_INVALID_INDEX ((u32)~0) #define SVM_FIFO_INVALID_SESSION_INDEX ((u32)~0) #define SVM_FIFO_INVALID_INDEX ((u32)~0) #define SVM_FIFO_MAX_EVT_SUBSCRIBERS 7 typedef enum svm_fifo_tx_ntf_ { SVM_FIFO_NO_TX_NOTIF = 0, SVM_FIFO_WANT_TX_NOTIF = 1, SVM_FIFO_WANT_TX_NOTIF_IF_FULL = 2, } svm_fifo_tx_ntf_t; typedef struct { u32 offset; u32 len; u32 action; } svm_fifo_trace_elem_t; typedef struct svm_fifo_chunk_ { u32 start_byte; /**< chunk start byte */ u32 length; /**< length of chunk in bytes */ struct svm_fifo_chunk_ *next; /**< pointer to next chunk in linked-lists */ u8 data[0]; /**< start of chunk data */ } svm_fifo_chunk_t; typedef enum svm_fifo_flag_ { SVM_FIFO_F_MULTI_CHUNK = 1 << 0, SVM_FIFO_F_GROW = 1 << 1, SVM_FIFO_F_SHRINK = 1 << 2, SVM_FIFO_F_COLLECT_CHUNKS = 1 << 3, SVM_FIFO_F_LL_TRACKED = 1 << 4, } svm_fifo_flag_t; typedef struct _svm_fifo { CLIB_CACHE_LINE_ALIGN_MARK (shared_first); u32 size; /**< size of the fifo in bytes */ u32 nitems; /**< usable size (size-1) */ u8 flags; /**< fifo flags */ svm_fifo_chunk_t *start_chunk;/**< first chunk in fifo chunk list */ svm_fifo_chunk_t *end_chunk; /**< end chunk in fifo chunk list */ svm_fifo_chunk_t *new_chunks; /**< chunks yet to be added to list */ rb_tree_t chunk_lookup; CLIB_CACHE_LINE_ALIGN_MARK (shared_second); volatile u32 has_event; /**< non-zero if deq event exists */ u32 master_session_index; /**< session layer session index */ u32 client_session_index; /**< app session index */ u8 master_thread_index; /**< session layer thread index */ u8 client_thread_index; /**< app worker index */ i8 refcnt; /**< reference count */ u32 segment_manager; /**< session layer segment manager index */ u32 segment_index; /**< segment index in segment manager */ struct _svm_fifo *next; /**< next in freelist/active chain */ struct _svm_fifo *prev; /**< prev in active chain */ u32 size_decrement; /**< bytes to remove from fifo */ CLIB_CACHE_LINE_ALIGN_MARK (consumer); u32 head; /**< fifo head position/byte */ svm_fifo_chunk_t *head_chunk; /**< tracks chunk where head lands */ svm_fifo_chunk_t *ooo_deq; /**< last chunk used for ooo dequeue */ volatile u32 want_tx_ntf; /**< producer wants nudge */ volatile u32 has_tx_ntf; CLIB_CACHE_LINE_ALIGN_MARK (producer); u32 tail; /**< fifo tail position/byte */ u32 ooos_list_head; /**< Head of out-of-order linked-list */ svm_fifo_chunk_t *tail_chunk; /**< tracks chunk where tail lands */ svm_fifo_chunk_t *ooo_enq; /**< last chunk used for ooo enqueue */ ooo_segment_t *ooo_segments; /**< Pool of ooo segments */ u32 ooos_newest; /**< Last segment to have been updated */ volatile u8 n_subscribers; /**< Number of subscribers for io events */ u8 subscribers[SVM_FIFO_MAX_EVT_SUBSCRIBERS]; #if SVM_FIFO_TRACE svm_fifo_trace_elem_t *trace; #endif } svm_fifo_t; typedef enum { SVM_FIFO_EFULL = -2, SVM_FIFO_EEMPTY = -3, } svm_fifo_err_t; typedef struct svm_fifo_seg_ { u8 *data; u32 len; } svm_fifo_seg_t; #if SVM_FIFO_TRACE #define svm_fifo_trace_add(_f, _s, _l, _t) \ { \ svm_fifo_trace_elem_t *trace_elt; \ vec_add2(_f->trace, trace_elt, 1); \ trace_elt->offset = _s; \ trace_elt->len = _l; \ trace_elt->action = _t; \ } #else #define svm_fifo_trace_add(_f, _s, _l, _t) #endif u8 *svm_fifo_dump_trace (u8 * s, svm_fifo_t * f); u8 *svm_fifo_replay (u8 * s, svm_fifo_t * f, u8 no_read, u8 verbose); /** * Load head and tail optimized for consumer * * Internal function. */ static inline void f_load_head_tail_cons (svm_fifo_t * f, u32 * head, u32 * tail) { /* load-relaxed: consumer owned index */ *head = f->head; /* load-acq: consumer foreign index (paired with store-rel in producer) */ *tail = clib_atomic_load_acq_n (&f->tail); } /** Load head and tail optimized for producer * * Internal function */ static inline void f_load_head_tail_prod (svm_fifo_t * f, u32 * head, u32 * tail) { /* load relaxed: producer owned index */ *tail = f->tail; /* load-acq: producer foreign index (paired with store-rel in consumer) */ *head = clib_atomic_load_acq_n (&f->head); } /** * Load head and tail independent of producer/consumer role * * Internal function. */ static inline void f_load_head_tail_all_acq (svm_fifo_t * f, u32 * head, u32 * tail) { /* load-acq : consumer foreign index (paired with store-rel) */ *tail = clib_atomic_load_acq_n (&f->tail); /* load-acq : producer foriegn index (paired with store-rel) */ *head = clib_atomic_load_acq_n (&f->head); } /** * Distance to a from b, i.e., a - b in the fifo * * Internal function. */ static inline u32 f_distance_to (svm_fifo_t * f, u32 a, u32 b) { return ((f->size + a - b) % f->size); } /** * Distance from a to b, i.e., b - a in the fifo * * Internal function. */ static inline u32 f_distance_from (svm_fifo_t * f, u32 a, u32 b) { return ((f->size + b - a) % f->size); } /** * Fifo current size, i.e., number of bytes enqueued * * Internal function. */ static inline u32 f_cursize (svm_fifo_t * f, u32 head, u32 tail) { return (head <= tail ? tail - head : f->size + tail - head); } /** * Fifo free bytes, i.e., number of free bytes * * Internal function */ static inline u32 f_free_count (svm_fifo_t * f, u32 head, u32 tail) { return (f->nitems - f_cursize (f, head, tail)); } /** * Try to shrink fifo size. * * Internal function. */ void svm_fifo_try_shrink (svm_fifo_t * f, u32 head, u32 tail); /** * Create fifo of requested size * * Allocates fifo on current heap. * * @param size data size in bytes for fifo to be allocated. Will be * rounded to the next highest power-of-two value. * @return pointer to new fifo */ svm_fifo_t *svm_fifo_create (u32 size); /** * Initialize fifo * * @param size size for fifo */ void svm_fifo_init (svm_fifo_t * f, u32 size); /** * Allocate a fifo chunk on heap * * If the chunk is allocated on a fifo segment, this should be called * with the segment's heap pushed. * * @param size chunk size in bytes. Will be rounded to the next highest * power-of-two * @return new chunk or 0 if alloc failed */ svm_fifo_chunk_t *svm_fifo_chunk_alloc (u32 size); /** * Grow fifo size by adding chunk to chunk list * * If fifos are allocated on a segment, this should be called with * the segment's heap pushed. * * @param f fifo to be extended * @param c chunk or linked list of chunks to be added */ void svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c); /** * Request to reduce fifo size by amount of bytes * * Because the producer might be enqueuing data when this is called, the * actual size update is only applied when producer tries to enqueue new * data, unless @param try_shrink is set. * * @param f fifo * @param len number of bytes to remove from fifo. The actual number * of bytes to be removed will be less or equal to this * value. * @param try_shrink flg to indicate if it's safe to try to shrink fifo * size. It should be set only if this is called by the * producer of if the producer is not using the fifo * @return actual length fifo size will be reduced by */ int svm_fifo_reduce_size (svm_fifo_t * f, u32 len, u8 try_shrink); /** * Removes chunks that are after fifo end byte * * Needs to be called with segment heap pushed. * * @param f fifo */ svm_fifo_chunk_t *svm_fifo_collect_chunks (svm_fifo_t * f); /** * Free fifo and associated state * * @param f fifo */ void svm_fifo_free (svm_fifo_t * f); /** * Cleanup fifo chunk lookup rb tree * * The rb tree is allocated in segment heap so this should be called * with it pushed. * * @param f fifo to cleanup */ void svm_fifo_free_chunk_lookup (svm_fifo_t * f); /** * Cleanup fifo ooo data * * The ooo data is allocated in producer process memory. The fifo * segment heap should not be pushed. * * @param f fifo to cleanup */ void svm_fifo_free_ooo_data (svm_fifo_t * f); /** * Init fifo head and tail * * @param f fifo * @param head head value that will be matched to a chunk * @param tail tail value that will be matched to a chunk */ void svm_fifo_init_pointers (svm_fifo_t * f, u32 head, u32 tail); /** * Clone fifo * * Clones single/default chunk fifo. It does not work for fifos with * multiple chunks. */ void svm_fifo_clone (svm_fifo_t * df, svm_fifo_t * sf); /** * Enqueue data to fifo * * Data is enqueued and tail pointer is updated atomically. If the new data * enqueued partly overlaps or "touc
#!/usr/bin/env python3

import abc

from scapy.layers.l2 import Ether
from scapy.packet import Raw
from scapy.layers.inet import IP, UDP


class BridgeDomain(metaclass=abc.ABCMeta):
    """Bridge domain abstraction"""

    @property
    def frame_request(self):
        """Ethernet frame modeling a generic request"""
        return (
            Ether(src="00:00:00:00:00:01", dst="00:00:00:00:00:02")
            / IP(src="1.2.3.4", dst="4.3.2.1")
            / UDP(sport=10000, dport=20000)
            / Raw("\xa5" * 100)
        )

    @property
    def frame_reply(self):
        """Ethernet frame modeling a generic reply"""
        return (
            Ether(src="00:00:00:00:00:02"