summaryrefslogtreecommitdiffstats
path: root/src/vnet/vxlan-gbp
AgeCommit message (Expand)AuthorFilesLines
2021-06-21vxlan: api cleanupFilip Tehlar1-52/+9
2021-03-14ip: extend punt CLI for exception packetsMohammed Hawari1-8/+9
2021-02-15vlib: refactor checksum offload supportMohsin Kazmi1-10/+16
2020-12-14misc: move to new pool_foreach macrosDamjan Marion2-6/+6
2020-10-21misc: minimize dependencies on udp.hFlorin Coras1-1/+2
2020-09-22vxlan-gbp: Mark APIs as in-progressNeale Ranns1-0/+5
2020-06-09vxlan: fix the inner packet checksum calculationMohsin Kazmi1-2/+50
2020-04-28tests: move defaults from defaultmapping to .api filesPaul Vinciguerra1-1/+1
2020-02-25vlib: Punt reason allocation listener enable/disable callbackNeale Ranns1-0/+2
2020-02-18misc: fix coverity warningsDave Barach1-1/+1
2020-02-17misc: fix coverity warningsDave Barach1-1/+1
2020-01-03vxlan-gbp: api cleanupJakub Grajciar1-11/+11
2020-01-03nat: use SVRKlement Sekera1-0/+1
2019-12-20vxlan: reuse inner packet flow hash for tunnel outer header load balanceShawn Ji1-0/+7
2019-12-17vxlan geneve gtpu: fix short helpPaul Vinciguerra1-1/+1
2019-12-10api: multiple connections per processDave Barach1-1/+1
2019-11-26fib: reduce save_rewrite_length to u8Klement Sekera1-1/+1
2019-08-05vxlan: fix VXLANoIP6 checksum offload setupJohn Lo1-3/+3
2019-07-19fib: FIB Entry trackingNeale Ranns1-8/+7
2019-07-05vxlan-gbp: Decap ignores reserved bitsNeale Ranns2-18/+22
2019-06-26api: initialize sw_if_index earlier in vxlan_gbp_tunnel_add_delPaul Vinciguerra1-1/+1
2019-06-20api: fix return code in vxlan_gbp_tunnel_add_delPaul Vinciguerra1-1/+1
2019-06-18fib: fib api updatesNeale Ranns1-4/+4
2019-05-29vxlan-gbp: prefetch encap header memory areaZhiyong Yang1-66/+70
2019-05-16init / exit function orderingDave Barach1-6/+7
2019-04-03GBP: iVXLAN reflection checkNeale Ranns3-4/+26
2019-03-28Punt InfraNeale Ranns3-7/+43
2019-03-28VXLAN-GBP: format flags in encap traceNeale Ranns1-2/+3
2019-03-28Typos. A bunch of typos I've been collecting.Paul Vinciguerra1-1/+1
2019-03-27GBP: fixes for l3-out routingNeale Ranns2-1/+32
2019-03-26Simplify adjacency rewrite codeBenoƮt Ganne1-18/+5
2019-03-06vxlan*: migrate old MULTIARCH macros to VLIB_NODE_FNFilip Tehlar2-37/+24
2019-03-06GBP: learn from ARP and L2 packetsNeale Ranns1-2/+0
2019-03-05VXLAN-GBP: decap checks src,dst&VNI for unicast, then checks only dst&VNI for...Neale Ranns2-71/+62
2019-01-22GBP: Sclass to src-epg conversionsNeale Ranns2-9/+9
2019-01-10ARP/ND: copy opaque2 persistent fields to new packetNeale Ranns1-2/+8
2018-11-15VXLAN-GBP: c-n-p error in the dual-loop tracingNeale Ranns1-1/+1
2018-11-14Remove c-11 memcpy checks from perf-critical codeDave Barach1-6/+9
2018-11-07GBP Endpoint LearningNeale Ranns7-104/+252
2018-10-26vxlan-gbp: On demand udp ports registrationMohsin Kazmi2-5/+44
2018-10-23c11 safe string handling supportDave Barach3-5/+5
2018-09-24Trivial: Clean up some typos.Paul Vinciguerra4-10/+10
2018-09-12VXLAN-GBP: use common types on the APINeale Ranns4-81/+73
2018-09-10vxlan-gbp: Add support for vxlan gbpMohsin Kazmi9-0/+3407
lass="p">); vec_free (feature_vector); } static u32 add_next (vlib_main_t * vm, vnet_config_main_t * cm, u32 last_node_index, u32 this_node_index) { u32 i, ni = ~0; if (last_node_index != ~0) return vlib_node_add_next (vm, last_node_index, this_node_index); for (i = 0; i < vec_len (cm->start_node_indices); i++) { u32 tmp; tmp = vlib_node_add_next (vm, cm->start_node_indices[i], this_node_index); if (ni == ~0) ni = tmp; /* Start nodes to first must agree on next indices. */ ASSERT (ni == tmp); } return ni; } static vnet_config_t * find_config_with_features (vlib_main_t * vm, vnet_config_main_t * cm, vnet_config_feature_t * feature_vector, u32 end_node_index) { u32 last_node_index = ~0; vnet_config_feature_t *f; u32 *config_string; uword *p; vnet_config_t *c; config_string = cm->config_string_temp; cm->config_string_temp = 0; if (config_string) _vec_len (config_string) = 0; vec_foreach (f, feature_vector) { /* Connect node graph. */ f->next_index = add_next (vm, cm, last_node_index, f->node_index); last_node_index = f->node_index; /* Store next index in config string. */ vec_add1 (config_string, f->next_index); /* Store feature config. */ vec_add (config_string, f->feature_config, vec_len (f->feature_config)); } /* Terminate config string with next for end node. */ if (last_node_index == ~0 || last_node_index != end_node_index) { u32 next_index = add_next (vm, cm, last_node_index, end_node_index); vec_add1 (config_string, next_index); } /* See if config string is unique. */ p = hash_get_mem (cm->config_string_hash, config_string); if (p) { /* Not unique. Share existing config. */ cm->config_string_temp = config_string; /* we'll use it again later. */ free_feature_vector (feature_vector); c = pool_elt_at_index (cm->config_pool, p[0]); } else { u32 *d; pool_get (cm->config_pool, c); c->index = c - cm->config_pool; c->features = feature_vector; c->config_string_vector = config_string; /* Allocate copy of config string in heap. VLIB buffers will maintain pointers to heap as they read out configuration data. */ c->config_string_heap_index = heap_alloc (cm->config_string_heap, vec_len (config_string) + 1, c->config_string_heap_handle); /* First element in heap points back to pool index. */ d = vec_elt_at_index (cm->config_string_heap, c->config_string_heap_index); d[0] = c->index; clib_memcpy (d + 1, config_string, vec_bytes (config_string)); hash_set_mem (cm->config_string_hash, config_string, c->index); c->reference_count = 0; /* will be incremented by caller. */ vec_validate_init_empty (cm->end_node_indices_by_user_index, c->config_string_heap_index + 1, cm->default_end_node_index); cm->end_node_indices_by_user_index[c->config_string_heap_index + 1] = end_node_index; } return c; } void vnet_config_init (vlib_main_t * vm, vnet_config_main_t * cm, char *start_node_names[], int n_start_node_names, char *feature_node_names[], int n_feature_node_names) { vlib_node_t *n; u32 i; clib_memset (cm, 0, sizeof (cm[0])); cm->config_string_hash = hash_create_vec (0, STRUCT_SIZE_OF (vnet_config_t, config_string_vector[0]), sizeof (uword)); ASSERT (n_feature_node_names >= 1); vec_resize (cm->start_node_indices, n_start_node_names); for (i = 0; i < n_start_node_names; i++) { n = vlib_get_node_by_name (vm, (u8 *) start_node_names[i]); /* Given node name must exist. */ ASSERT (n != 0); cm->start_node_indices[i] = n->index; } vec_resize (cm->node_index_by_feature_index, n_feature_node_names); for (i = 0; i < n_feature_node_names; i++) { if (!feature_node_names[i]) cm->node_index_by_feature_index[i] = ~0; else { n = vlib_get_node_by_name (vm, (u8 *) feature_node_names[i]); /* Given node may exist in plug-in library which is not present */ if (n) { if (i + 1 == n_feature_node_names) cm->default_end_node_index = n->index; cm->node_index_by_feature_index[i] = n->index; } else cm->node_index_by_feature_index[i] = ~0; } } } static void remove_reference (vnet_config_main_t * cm, vnet_config_t * c) { ASSERT (c->reference_count > 0); c->reference_count -= 1; if (c->reference_count == 0) { hash_unset (cm->config_string_hash, c->config_string_vector); vnet_config_free (cm, c); pool_put (cm->config_pool, c); } } static int feature_cmp (void *a1, void *a2) { vnet_config_feature_t *f1 = a1; vnet_config_feature_t *f2 = a2; return (int) f1->feature_index - f2->feature_index; } always_inline u32 * vnet_get_config_heap (vnet_config_main_t * cm, u32 ci) { return heap_elt_at_index (cm->config_string_heap, ci); } void vnet_config_del (vnet_config_main_t * cm, u32 config_id) { u32 *p = vnet_get_config_heap (cm, config_id); vnet_config_t *old = pool_elt_at_index (cm->config_pool, p[-1]); remove_reference (cm, old); } u32 vnet_config_modify_end_node (vlib_main_t * vm, vnet_config_main_t * cm, u32 config_string_heap_index, u32 end_node_index) { vnet_config_feature_t *new_features; vnet_config_t *old, *new; if (end_node_index == ~0) // feature node does not exist return ~0; if (config_string_heap_index == ~0) { old = 0; new_features = 0; } else { u32 *p = vnet_get_config_heap (cm, config_string_heap_index); old = pool_elt_at_index (cm->config_pool, p[-1]); new_features = old->features; if (new_features) new_features = duplicate_feature_vector (new_features); } if (vec_len (new_features)) { /* is the last feature the cuurent end node */ u32 last = vec_len (new_features) - 1; if (new_features[last].node_index == cm->default_end_node_index) { vec_free (new_features->feature_config); _vec_len (new_features) = last; } } if (old) remove_reference (cm, old); new = find_config_with_features (vm, cm, new_features, end_node_index); new->reference_count += 1; /* * User gets pointer to config string first element * (which defines the pool index * this config string comes from). */ vec_validate (cm->config_pool_index_by_user_index, new->config_string_heap_index + 1); cm->config_pool_index_by_user_index[new->config_string_heap_index + 1] = new - cm->config_pool; return new->config_string_heap_index + 1; } u32 vnet_config_add_feature (vlib_main_t * vm, vnet_config_main_t * cm, u32 config_string_heap_index, u32 feature_index, void *feature_config, u32 n_feature_config_bytes) { vnet_config_t *old, *new; vnet_config_feature_t *new_features, *f; u32 n_feature_config_u32s, end_node_index; u32 node_index = vec_elt (cm->node_index_by_feature_index, feature_index); if (node_index == ~0) // feature node does not exist return ~0; if (config_string_heap_index == ~0) { old = 0; new_features = 0; end_node_index = cm->default_end_node_index; } else { u32 *p = vnet_get_config_heap (cm, config_string_heap_index); old = pool_elt_at_index (cm->config_pool, p[-1]); new_features = old->features; end_node_index = cm->end_node_indices_by_user_index[config_string_heap_index]; if (new_features) new_features = duplicate_feature_vector (new_features); } vec_add2 (new_features, f, 1); f->feature_index = feature_index; f->node_index = node_index; if (n_feature_config_bytes) { n_feature_config_u32s = round_pow2 (n_feature_config_bytes, sizeof (f->feature_config[0])) / sizeof (f->feature_config[0]); vec_validate (f->feature_config, n_feature_config_u32s - 1); clib_memcpy_fast (f->feature_config, feature_config, n_feature_config_bytes); } /* Sort (prioritize) features. */ if (vec_len (new_features) > 1) vec_sort_with_function (new_features, feature_cmp); if (old) remove_reference (cm, old); new = find_config_with_features (vm, cm, new_features, end_node_index); new->reference_count += 1; /* * User gets pointer to config string first element * (which defines the pool index * this config string comes from). */ vec_validate (cm->config_pool_index_by_user_index, new->config_string_heap_index + 1); cm->config_pool_index_by_user_index[new->config_string_heap_index + 1] = new - cm->config_pool; return new->config_string_heap_index + 1; } u32 vnet_config_del_feature (vlib_main_t * vm, vnet_config_main_t * cm, u32 config_string_heap_index, u32 feature_index, void *feature_config, u32 n_feature_config_bytes) { vnet_config_t *old, *new; vnet_config_feature_t *new_features, *f; u32 n_feature_config_u32s; { u32 *p = vnet_get_config_heap (cm, config_string_heap_index); old = pool_elt_at_index (cm->config_pool, p[-1]); } n_feature_config_u32s = round_pow2 (n_feature_config_bytes, sizeof (f->feature_config[0])) / sizeof (f->feature_config[0]); /* Find feature with same index and opaque data. */ vec_foreach (f, old->features) { if (f->feature_index == feature_index && vec_len (f->feature_config) == n_feature_config_u32s && (n_feature_config_u32s == 0 || !memcmp (f->feature_config, feature_config, n_feature_config_bytes))) break; } /* Feature not found. */ if (f >= vec_end (old->features)) return ~0; new_features = duplicate_feature_vector (old->features); f = new_features + (f - old->features); vnet_config_feature_free (f); vec_delete (new_features, 1, f - new_features); /* must remove old from config_pool now as it may be expanded and change memory location if the following function find_config_with_features() adds a new config because none of existing config's has matching features and so can be reused */ remove_reference (cm, old); new = find_config_with_features (vm, cm, new_features, cm->end_node_indices_by_user_index [config_string_heap_index]); new->reference_count += 1; vec_validate (cm->config_pool_index_by_user_index, new->config_string_heap_index + 1); cm->config_pool_index_by_user_index[new->config_string_heap_index + 1] = new - cm->config_pool; return new->config_string_heap_index + 1; } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */