summaryrefslogtreecommitdiffstats
path: root/src/vnet
AgeCommit message (Expand)AuthorFilesLines
2020-06-16gso: fix the udp checksum in testMohsin Kazmi2-13/+8
2020-06-15vxlan: remove judgement with always trueZhiyong Yang1-27/+15
2020-06-12virtio: interface type based improvementsMohsin Kazmi3-75/+142
2020-06-12ip: allocate ip4 mtrie pages in htlb memoryDave Barach3-2/+48
2020-06-10ip: reassembly: LRU algorithm should eliminate the longest unused nodeszhengdelun2-2/+2
2020-06-09tap: enable gso/csum offload for tunMohsin Kazmi1-7/+0
2020-06-09vxlan: fix the inner packet checksum calculationMohsin Kazmi2-4/+101
2020-06-09misc: add code styling for vxlan encap and vxlan_packet filesMohsin Kazmi2-256/+282
2020-06-08tcp: improve rtt estimation with rate samplingFlorin Coras1-18/+29
2020-06-08vxlan: Fixed checksum caclculation offsetVladimir Isaev6-72/+44
2020-06-07fib: fix multiple dpo pool expand casesDave Barach6-7/+76
2020-06-05fib: fix adj pool expand casesDave Barach1-1/+24
2020-06-04fib: add barrier sync, pool/vector expand casesDave Barach1-1/+25
2020-06-01ip: fix IPv6 mask to prefix length conversionAndreas Schultz1-10/+5
2020-05-29bonding: Revert adjust link stateMatthew Smith1-20/+3
2020-05-29fib: Safe adj walkNeale Ranns1-8/+15
2020-05-29gso: fix flag for inner headerMohsin Kazmi1-2/+3
2020-05-29docs: Minor updates to feature.yaml filesJohn DeNisco2-6/+8
2020-05-29misc: ipfix-export unformat u16 collector_port fixElias Rudberg3-1/+20
2020-05-28tap: fix rx queue indexMohsin Kazmi1-1/+2
2020-05-27ipsec: Add/Del IPSec SA is not MP safeNeale Ranns1-7/+0
2020-05-27ethernet: fix DMAC check and skip unnecessary ones (VPP-1868)John Lo8-31/+104
2020-05-27ip: reassembly: use correct IP header offsetKlement Sekera1-2/+7
2020-05-27bonding: adjust link state based on active slavesMatthew Smith1-3/+20
2020-05-26fib: Fix interpose source reactivateNeale Ranns1-0/+14
2020-05-26fib: Use basic hash for adjacency neighbour tableNeale Ranns1-82/+42
2020-05-25tcp: fix sack block validation on wrapFlorin Coras1-1/+3
2020-05-25api: add new stream message conventionOle Troan1-0/+1
2020-05-24tcp: track buffer alloc failuresFlorin Coras2-3/+21
2020-05-24ipsec: fixed chaining ops after add footer and icvPiotrX Kleski1-8/+11
2020-05-21vcl: support connected udp listensFlorin Coras2-0/+2
2020-05-21ip: Dual loop error in midchain chksumNeale Ranns1-1/+1
2020-05-19vppinfra: refactor mpcap.hDave Barach1-0/+113
2020-05-19interface: fix interface rx mode config APIJakub Grajciar1-2/+4
2020-05-17ip6-nd: correct set-ip6-nd-proxy CLI short_helpIgnas Bacius1-1/+1
2020-05-16l2: L2/L3 mode swicth cleanup and l2-patch fixJohn Lo4-31/+16
2020-05-16misc: fix typo in set-ipfix-exporter CLI short_helpIgnas Bacius1-1/+1
2020-05-15misc: removed executable bits from source filesRay Kinsella15-0/+0
2020-05-15session: track detached listener segment managersFlorin Coras4-7/+77
2020-05-14tcp: fix bogus time update due to missing castFlorin Coras1-1/+2
2020-05-14lisp: API cleanupOnong Tayeng3-41/+6
2020-05-14nat: ED: store both thread&session idx in hashKlement Sekera1-1/+1
2020-05-14ip: fix interface ip address del sw_if_index checkyedg4-6/+27
2020-05-13sr: fix srv6/srv6-ad/srv6-as promisc mode switchJohn Lo1-16/+10
2020-05-13gso: remove ordering dependency on esp-encrypt-tunNeale Ranns1-2/+2
2020-05-13interface: fix the checksum offload in quad loopMohsin Kazmi1-20/+25
2020-05-13feature: Config end nodes are user specificNeale Ranns5-24/+41
2020-05-13bonding: fix the GSO flagsMohsin Kazmi1-2/+8
2020-05-13ipsec: Support 4o6 and 6o4 for SPD tunnel mode SAsNeale Ranns2-38/+65
2020-05-13nat: handoff next node feature fixFilip Varga1-1/+1
an class="p">; uword alloc_arena_size; /** * A custom format function to print the Key and Value of bihash_key instead of default hexdump */ format_function_t *fmt_fn; } BVT (clib_bihash); static inline void BV (clib_bihash_update_lru) (BVT (clib_bihash_bucket) * b, u8 slot) { #if BIHASH_KVP_CACHE_SIZE > 1 u16 value, tmp, mask; u8 found_lru_pos; u16 save_hi; ASSERT (slot < BIHASH_KVP_CACHE_SIZE); /* First, find the slot in cache_lru */ mask = slot; if (BIHASH_KVP_CACHE_SIZE > 1) mask |= slot << 3; if (BIHASH_KVP_CACHE_SIZE > 2) mask |= slot << 6; if (BIHASH_KVP_CACHE_SIZE > 3) mask |= slot << 9; if (BIHASH_KVP_CACHE_SIZE > 4) mask |= slot << 12; value = b->cache_lru; tmp = value ^ mask; /* Already the most-recently used? */ if ((tmp & 7) == 0) return; found_lru_pos = ((tmp & (7 << 3)) == 0) ? 1 : 0; if (BIHASH_KVP_CACHE_SIZE > 2) found_lru_pos = ((tmp & (7 << 6)) == 0) ? 2 : found_lru_pos; if (BIHASH_KVP_CACHE_SIZE > 3) found_lru_pos = ((tmp & (7 << 9)) == 0) ? 3 : found_lru_pos; if (BIHASH_KVP_CACHE_SIZE > 4) found_lru_pos = ((tmp & (7 << 12)) == 0) ? 4 : found_lru_pos; ASSERT (found_lru_pos); /* create a mask to kill bits in or above slot */ mask = 0xFFFF << found_lru_pos; mask <<= found_lru_pos; mask <<= found_lru_pos; mask ^= 0xFFFF; tmp = value & mask; /* Save bits above slot */ mask ^= 0xFFFF; mask <<= 3; save_hi = value & mask; value = save_hi | (tmp << 3) | slot; b->cache_lru = value; #endif } void BV (clib_bihash_update_lru_not_inline) (BVT (clib_bihash_bucket) * b, u8 slot); static inline u8 BV (clib_bihash_get_lru) (BVT (clib_bihash_bucket) * b) { #if BIHASH_KVP_CACHE_SIZE > 0 return (b->cache_lru >> (3 * (BIHASH_KVP_CACHE_SIZE - 1))) & 7; #else return 0; #endif } static inline void BV (clib_bihash_reset_cache) (BVT (clib_bihash_bucket) * b) { #if BIHASH_KVP_CACHE_SIZE > 0 u16 initial_lru_value; memset (b->cache, 0xff, sizeof (b->cache)); /* * We'll want the cache to be loaded from slot 0 -> slot N, so * the initial LRU order is reverse index order. */ if (BIHASH_KVP_CACHE_SIZE == 1) initial_lru_value = 0; else if (BIHASH_KVP_CACHE_SIZE == 2) initial_lru_value = (0 << 3) | (1 << 0); else if (BIHASH_KVP_CACHE_SIZE == 3) initial_lru_value = (0 << 6) | (1 << 3) | (2 << 0); else if (BIHASH_KVP_CACHE_SIZE == 4) initial_lru_value = (0 << 9) | (1 << 6) | (2 << 3) | (3 << 0); else if (BIHASH_KVP_CACHE_SIZE == 5) initial_lru_value = (0 << 12) | (1 << 9) | (2 << 6) | (3 << 3) | (4 << 0); b->cache_lru = initial_lru_value; #endif } static inline int BV (clib_bihash_lock_bucket) (BVT (clib_bihash_bucket) * b) { #if BIHASH_KVP_CACHE_SIZE > 0 u16 cache_lru_bit; u16 rv; cache_lru_bit = 1 << 15; rv = __sync_fetch_and_or (&b->cache_lru, cache_lru_bit); /* Was already locked? */ if (rv & (1 << 15)) return 0; #endif return 1; } static inline void BV (clib_bihash_unlock_bucket) (BVT (clib_bihash_bucket) * b) { #if BIHASH_KVP_CACHE_SIZE > 0 u16 cache_lru; cache_lru = b->cache_lru & ~(1 << 15); b->cache_lru = cache_lru; #endif } static inline void *BV (clib_bihash_get_value) (BVT (clib_bihash) * h, uword offset) { u8 *hp = (u8 *) h->alloc_arena; u8 *vp = hp + offset; return (void *) vp; } static inline uword BV (clib_bihash_get_offset) (BVT (clib_bihash) * h, void *v) { u8 *hp, *vp; hp = (u8 *) h->alloc_arena; vp = (u8 *) v; return vp - hp; } void BV (clib_bihash_init) (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size); void BV (clib_bihash_set_kvp_format_fn) (BVT (clib_bihash) * h, format_function_t * fmt_fn); void BV (clib_bihash_free) (BVT (clib_bihash) * h); int BV (clib_bihash_add_del) (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add); int BV (clib_bihash_search) (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * search_v, BVT (clib_bihash_kv) * return_v); void BV (clib_bihash_foreach_key_value_pair) (BVT (clib_bihash) * h, void *callback, void *arg); format_function_t BV (format_bihash); format_function_t BV (format_bihash_kvp); format_function_t BV (format_bihash_lru); static inline int BV (clib_bihash_search_inline_with_hash) (BVT (clib_bihash) * h, u64 hash, BVT (clib_bihash_kv) * key_result) { u32 bucket_index; BVT (clib_bihash_value) * v; BVT (clib_bihash_bucket) * b; #if BIHASH_KVP_CACHE_SIZE > 0 BVT (clib_bihash_kv) * kvp; #endif int i, limit; bucket_index = hash & (h->nbuckets - 1); b = &h->buckets[bucket_index]; if (b->offset == 0) return -1; #if BIHASH_KVP_CACHE_SIZE > 0 /* Check the cache, if not currently locked */ if (PREDICT_TRUE ((b->cache_lru & (1 << 15)) == 0)) { limit = BIHASH_KVP_CACHE_SIZE; kvp = b->cache; for (i = 0; i < limit; i++) { if (BV (clib_bihash_key_compare) (kvp[i].key, key_result->key)) { *key_result = kvp[i]; h->cache_hits++; return 0; } } } #endif hash >>= h->log2_nbuckets; v = BV (clib_bihash_get_value) (h, b->offset); /* If the bucket has unresolvable collisions, use linear search */ limit = BIHASH_KVP_PER_PAGE; v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0; if (PREDICT_FALSE (b->linear_search)) limit <<= b->log2_pages; for (i = 0; i < limit; i++) { if (BV (clib_bihash_key_compare) (v->kvp[i].key, key_result->key)) { *key_result = v->kvp[i]; #if BIHASH_KVP_CACHE_SIZE > 0 u8 cache_slot; /* Try to lock the bucket */ if (BV (clib_bihash_lock_bucket) (b)) { cache_slot = BV (clib_bihash_get_lru) (b); b->cache[cache_slot] = v->kvp[i]; BV (clib_bihash_update_lru) (b, cache_slot); /* Unlock the bucket */ BV (clib_bihash_unlock_bucket) (b); h->cache_misses++; } #endif return 0; } } return -1; } static inline int BV (clib_bihash_search_inline) (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * key_result) { u64 hash; hash = BV (clib_bihash_hash) (key_result); return BV (clib_bihash_search_inline_with_hash) (h, hash, key_result); } static inline void BV (clib_bihash_prefetch_bucket) (BVT (clib_bihash) * h, u64 hash) { u32 bucket_index; BVT (clib_bihash_bucket) * b; bucket_index = hash & (h->nbuckets - 1); b = &h->buckets[bucket_index]; CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, READ); } static inline void BV (clib_bihash_prefetch_data) (BVT (clib_bihash) * h, u64 hash) { u32 bucket_index; BVT (clib_bihash_value) * v; BVT (clib_bihash_bucket) * b; bucket_index = hash & (h->nbuckets - 1); b = &h->buckets[bucket_index]; if (PREDICT_FALSE (b->offset == 0)) return; hash >>= h->log2_nbuckets; v = BV (clib_bihash_get_value) (h, b->offset); v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0; CLIB_PREFETCH (v, CLIB_CACHE_LINE_BYTES, READ); } static inline int BV (clib_bihash_search_inline_2) (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep) { u64 hash; u32 bucket_index; BVT (clib_bihash_value) * v; BVT (clib_bihash_bucket) * b; #if BIHASH_KVP_CACHE_SIZE > 0 BVT (clib_bihash_kv) * kvp; #endif int i, limit; ASSERT (valuep); hash = BV (clib_bihash_hash) (search_key); bucket_index = hash & (h->nbuckets - 1); b = &h->buckets[bucket_index]; if (b->offset == 0) return -1; /* Check the cache, if currently unlocked */ #if BIHASH_KVP_CACHE_SIZE > 0 if (PREDICT_TRUE ((b->cache_lru & (1 << 15)) == 0)) { limit = BIHASH_KVP_CACHE_SIZE; kvp = b->cache; for (i = 0; i < limit; i++) { if (BV (clib_bihash_key_compare) (kvp[i].key, search_key->key)) { *valuep = kvp[i]; h->cache_hits++; return 0; } } } #endif hash >>= h->log2_nbuckets; v = BV (clib_bihash_get_value) (h, b->offset); /* If the bucket has unresolvable collisions, use linear search */ limit = BIHASH_KVP_PER_PAGE; v += (b->linear_search == 0) ? hash & ((1 << b->log2_pages) - 1) : 0; if (PREDICT_FALSE (b->linear_search)) limit <<= b->log2_pages; for (i = 0; i < limit; i++) { if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key)) { *valuep = v->kvp[i]; #if BIHASH_KVP_CACHE_SIZE > 0 u8 cache_slot; /* Try to lock the bucket */ if (BV (clib_bihash_lock_bucket) (b)) { cache_slot = BV (clib_bihash_get_lru) (b); b->cache[cache_slot] = v->kvp[i]; BV (clib_bihash_update_lru) (b, cache_slot); /* Reenable the cache */ BV (clib_bihash_unlock_bucket) (b); h->cache_misses++; } #endif return 0; } } return -1; } #endif /* __included_bihash_template_h__ */ /** @endcond */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */