aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/vxlan
AgeCommit message (Expand)AuthorFilesLines
2021-02-05vxlan: add udp-port configuration supportArtem Glazychev5-60/+320
2021-02-03vxlan: fix interface namingRay Kinsella1-6/+8
2021-01-20vxlan: fix SEGV reported in VPP-1962Ed Warnicke1-5/+29
2021-01-14vxlan: Protect against tunnel config where source is not localNeale Ranns1-2/+11
2020-12-14misc: move to new pool_foreach macrosDamjan Marion2-6/+6
2020-10-21misc: minimize dependencies on udp.hFlorin Coras3-1/+3
2020-10-07misc: Purge unused pg includesNeale Ranns1-1/+0
2020-09-04ip: enhance vtep4_check of tunnel by vector wayZhiyong Yang1-12/+32
2020-08-31flow: code refactorChenmin Sun1-3/+7
2020-06-15vxlan: remove judgement with always trueZhiyong Yang1-27/+15
2020-06-09vxlan: fix the inner packet checksum calculationMohsin Kazmi1-2/+51
2020-06-09misc: add code styling for vxlan encap and vxlan_packet filesMohsin Kazmi2-256/+282
2020-03-30vxlan: leverage vlib_get_buffers in vxlan_encap_inlineZhiyong Yang1-12/+14
2020-03-16vxlan: vxlan/vxlan.api API cleanupJakub Grajciar2-38/+47
2020-03-03geneve gtpu vxlan vxlan-gpe: VRF-aware bypass nodeNick Zavaritsky3-93/+30
2020-02-17misc: fix coverity warningsDave Barach1-2/+2
2020-01-10docs: Edit FEATURE.yaml files so they can be publishedJohn DeNisco1-2/+2
2020-01-04vxlan: Add FEATURE.yamlJohn Lo2-1/+15
2019-12-20vxlan: reuse inner packet flow hash for tunnel outer header load balanceShawn Ji1-0/+7
2019-12-17vxlan geneve gtpu: fix short helpPaul Vinciguerra1-1/+1
2019-12-10api: multiple connections per processDave Barach1-1/+1
2019-12-05vxlan: fix vxlan hw offload issueChenmin Sun1-1/+3
2019-11-26fib: reduce save_rewrite_length to u8Klement Sekera1-1/+1
2019-08-05vxlan: fix VXLANoIP6 checksum offload setupJohn Lo1-4/+3
2019-07-19fib: FIB Entry trackingNeale Ranns1-7/+7
2019-06-18fib: fib api updatesNeale Ranns1-7/+5
2019-06-04features will register udp ports once configuredJakub Grajciar1-5/+11
2019-03-28Typos. A bunch of typos I've been collecting.Paul Vinciguerra1-1/+1
2019-03-26Simplify adjacency rewrite codeBenoƮt Ganne1-12/+2
2019-03-06vxlan*: migrate old MULTIARCH macros to VLIB_NODE_FNFilip Tehlar1-10/+4
2019-01-09VXLAN: Prevent duplicate bypass graph nodes.Jon Loeliger2-4/+37
2018-11-14Remove c-11 memcpy checks from perf-critical codeDave Barach1-3/+3
2018-11-05vxlan: prefetch extra one cacheline holding external headerZhiyong Yang1-2/+2
2018-10-23c11 safe string handling supportDave Barach3-5/+5
2018-10-17vxlan:decap caching error (VPP-1462)Eyal Bari1-1/+1
2018-10-14vxlan:fix ip6 tunnel deletionEyal Bari1-1/+2
2018-10-05vxlan:ip4 decap:remove access to tunnel objectEyal Bari3-183/+190
2018-09-24Trivial: Clean up some typos.Paul Vinciguerra4-12/+12
2018-09-06vxlan: improve encap performanceZhiyong Yang1-2/+10
2018-08-28vxlan: decap use vlib_buffer_enqueue_to_nextEyal Bari2-255/+221
2018-08-01vxlan:optimize cached entry compareEyal Bari1-9/+5
2018-07-31vxlan:decap.c conform coding styleEyal Bari1-620/+690
2018-07-31fix 'sh vxlan tunnels'Neale Ranns1-15/+4
2018-07-23fix vector index range checksEyal Bari1-1/+1
2018-07-19Remove unused argument to vlib_feature_nextDamjan Marion1-3/+3
2018-07-09vxlan:use bihash_16_8 for ipv4 lookupEyal Bari3-66/+76
2018-07-05vxlan:use bihash_24_8 for ipv6 lookupEyal Bari3-116/+167
2018-06-14vxlan:use VLIB_NODE_FN for multiarch selectionEyal Bari1-17/+1
2018-06-13vxlan:offload RX floweyal bari5-1/+579
2018-05-07vxlan:vxlan.c conform coding styleEyal Bari1-354/+374
else { p = vlib_buffer_put_uninit (b, gid_address_size_to_put (gid)); gid_address_put (p, gid); } return p; } static void * lisp_msg_put_itr_rlocs (lisp_cp_main_t * lcm, vlib_buffer_t * b, gid_address_t * rlocs, u8 * locs_put) { u8 *bp, count = 0; u32 i; bp = vlib_buffer_get_current (b); for (i = 0; i < vec_len (rlocs); i++) { lisp_msg_put_gid (b, &rlocs[i]); count++; } *locs_put = count - 1; return bp; } void * lisp_msg_put_eid_rec (vlib_buffer_t * b, gid_address_t * eid) { eid_record_hdr_t *h = vlib_buffer_put_uninit (b, sizeof (*h)); clib_memset (h, 0, sizeof (*h)); EID_REC_MLEN (h) = gid_address_len (eid); lisp_msg_put_gid (b, eid); return h; } u64 nonce_build (u32 seed) { u64 nonce; u32 nonce_lower; u32 nonce_upper; struct timespec ts; /* Put nanosecond clock in lower 32-bits and put an XOR of the nanosecond * clock with the second clock in the upper 32-bits. */ syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts); nonce_lower = ts.tv_nsec; nonce_upper = ts.tv_sec ^ clib_host_to_net_u32 (nonce_lower); /* OR in a caller provided seed to the low-order 32-bits. */ nonce_lower |= seed; /* Return 64-bit nonce. */ nonce = nonce_upper; nonce = (nonce << 32) | nonce_lower; return nonce; } void * lisp_msg_put_map_reply (vlib_buffer_t * b, mapping_t * records, u64 nonce, u8 probe_bit) { map_reply_hdr_t *h = vlib_buffer_put_uninit (b, sizeof (h[0])); clib_memset (h, 0, sizeof (h[0])); MREP_TYPE (h) = LISP_MAP_REPLY; MREP_NONCE (h) = nonce; MREP_REC_COUNT (h) = 1; MREP_RLOC_PROBE (h) = probe_bit; lisp_msg_put_mreg_records (b, records); return h; } void * lisp_msg_put_map_register (vlib_buffer_t * b, mapping_t * records, u8 want_map_notify, u16 auth_data_len, u64 * nonce, u32 * msg_len) { u8 *auth_data = 0; /* Basic header init */ map_register_hdr_t *h = vlib_buffer_put_uninit (b, sizeof (h[0])); clib_memset (h, 0, sizeof (h[0])); MREG_TYPE (h) = LISP_MAP_REGISTER; MREG_NONCE (h) = nonce_build (0); MREG_WANT_MAP_NOTIFY (h) = want_map_notify ? 1 : 0; MREG_REC_COUNT (h) = vec_len (records); auth_data = vlib_buffer_put_uninit (b, auth_data_len); clib_memset (auth_data, 0, auth_data_len); /* Put map register records */ lisp_msg_put_mreg_records (b, records); nonce[0] = MREG_NONCE (h); msg_len[0] = vlib_buffer_get_tail (b) - (u8 *) h; return h; } void * lisp_msg_put_mreq (lisp_cp_main_t * lcm, vlib_buffer_t * b, gid_address_t * seid, gid_address_t * deid, gid_address_t * rlocs, u8 is_smr_invoked, u8 rloc_probe_set, u64 * nonce) { u8 loc_count = 0; /* Basic header init */ map_request_hdr_t *h = vlib_buffer_put_uninit (b, sizeof (h[0])); clib_memset (h, 0, sizeof (h[0])); MREQ_TYPE (h) = LISP_MAP_REQUEST; MREQ_NONCE (h) = nonce_build (0); MREQ_SMR_INVOKED (h) = is_smr_invoked ? 1 : 0; MREQ_RLOC_PROBE (h) = rloc_probe_set ? 1 : 0; /* We're adding one eid record */ increment_record_count (h); /* Fill source eid */ lisp_msg_put_gid (b, seid); /* Put itr rlocs */ lisp_msg_put_itr_rlocs (lcm, b, rlocs, &loc_count); MREQ_ITR_RLOC_COUNT (h) = loc_count; /* Put eid record */ lisp_msg_put_eid_rec (b, deid); nonce[0] = MREQ_NONCE (h); return h; } void * lisp_msg_push_ecm (vlib_main_t * vm, vlib_buffer_t * b, int lp, int rp, gid_address_t * la, gid_address_t * ra) { ecm_hdr_t *h; ip_address_t _src_ip, *src_ip = &_src_ip, _dst_ip, *dst_ip = &_dst_ip; if (gid_address_type (la) != GID_ADDR_IP_PREFIX) { /* empty ip4 */ clib_memset (src_ip, 0, sizeof (src_ip[0])); clib_memset (dst_ip, 0, sizeof (dst_ip[0])); } else { src_ip = &gid_address_ip (la); dst_ip = &gid_address_ip (ra); } /* Push inner ip and udp */ pkt_push_udp_and_ip (vm, b, lp, rp, src_ip, dst_ip, 0); /* Push lisp ecm hdr */ h = pkt_push_ecm_hdr (b); return h; } static u32 msg_type_to_hdr_len (lisp_msg_type_e type) { switch (type) { case LISP_MAP_REQUEST: return (sizeof (map_request_hdr_t)); case LISP_MAP_REPLY: return (sizeof (map_reply_hdr_t)); default: return (0); } } void * lisp_msg_pull_hdr (vlib_buffer_t * b, lisp_msg_type_e type) { return vlib_buffer_pull (b, msg_type_to_hdr_len (type)); } u32 lisp_msg_parse_addr (vlib_buffer_t * b, gid_address_t * eid) { u32 len; clib_memset (eid, 0, sizeof (*eid)); len = gid_address_parse (vlib_buffer_get_current (b), eid); if (len != ~0) vlib_buffer_pull (b, len); return len; } u32 lisp_msg_parse_eid_rec (vlib_buffer_t * b, gid_address_t * eid) { eid_record_hdr_t *h = vlib_buffer_get_current (b); u32 len; clib_memset (eid, 0, sizeof (*eid)); len = gid_address_parse (EID_REC_ADDR (h), eid); if (len == ~0) return len; gid_address_ippref_len (eid) = EID_REC_MLEN (h); vlib_buffer_pull (b, len + sizeof (eid_record_hdr_t)); return len + sizeof (eid_record_hdr_t); } u32 lisp_msg_parse_itr_rlocs (vlib_buffer_t * b, gid_address_t ** rlocs, u8 rloc_count) { gid_address_t tloc; u32 i, len = 0, tlen = 0; //MREQ_ITR_RLOC_COUNT(mreq_hdr) + 1 for (i = 0; i < rloc_count; i++) { len = lisp_msg_parse_addr (b, &tloc); if (len == ~0) return len; vec_add1 (*rlocs, tloc); tlen += len; } return tlen; } u32 lisp_msg_parse_loc (vlib_buffer_t * b, locator_t * loc) { int len; len = locator_parse (vlib_buffer_get_current (b), loc); if (len == ~0) return ~0; if (!vlib_buffer_has_space (b, sizeof (len))) return ~0; vlib_buffer_pull (b, len); return len; } u32 lisp_msg_parse_mapping_record (vlib_buffer_t * b, gid_address_t * eid, locator_t ** locs, locator_t * probed_) { void *h = 0, *loc_hdr = 0; locator_t loc, *probed = 0; int i = 0, len = 0, llen = 0; h = vlib_buffer_get_current (b); if (!vlib_buffer_has_space (b, sizeof (mapping_record_hdr_t))) return ~0; vlib_buffer_pull (b, sizeof (mapping_record_hdr_t)); clib_memset (eid, 0, sizeof (*eid)); len = gid_address_parse (vlib_buffer_get_current (b), eid); if (len == ~0) return len; if (!vlib_buffer_has_space (b, sizeof (len))) return ~0; vlib_buffer_pull (b, len); if (GID_ADDR_IP_PREFIX == gid_address_type (eid)) gid_address_ippref_len (eid) = MAP_REC_EID_PLEN (h); for (i = 0; i < MAP_REC_LOC_COUNT (h); i++) { loc_hdr = vlib_buffer_get_current (b); llen = lisp_msg_parse_loc (b, &loc); if (llen == ~0) return llen; vec_add1 (*locs, loc); len += llen; if (LOC_PROBED (loc_hdr)) { if (probed != 0) clib_warning ("Multiple locators probed! Probing only the first!"); else probed = &loc; } } /* XXX */ if (probed_ != 0 && probed) *probed_ = *probed; return len + sizeof (map_reply_hdr_t); } /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */