summaryrefslogtreecommitdiffstats
path: root/test
AgeCommit message (Expand)AuthorFilesLines
2017-04-05GRE over IPv6Ciara Loftus2-7/+177
2017-04-03make test: relax BFD time intervalsKlement Sekera2-8/+10
2017-04-03make test: tweak helper scriptsKlement Sekera2-3/+8
2017-04-02make test: add scripts for easy test loopingKlement Sekera2-0/+122
2017-03-30VPP-669: ping: fix coverity check error 165075 + add ping testcaseAndrew Yourtchenko1-0/+118
2017-03-29VXLAN:validate mcast encapsulation ip/macEyal Bari2-4/+11
2017-03-29Sub-net broadcast addresses for IPv4Neale Ranns2-2/+125
2017-03-28NAT: Test refactoring to avoid redundant code for verification and creating p...Martin Gálik1-48/+59
2017-03-27make test: properly propagate exit statusKlement Sekera1-1/+2
2017-03-27Mcast rewrite no memcpyNeale Ranns1-2/+0
2017-03-24make test: fix broken plugin pathsKlement Sekera1-0/+2
2017-03-22make test: support out-of-tree pluginsKlement Sekera2-2/+14
2017-03-22make test: support out-of-tree testsKlement Sekera2-4/+58
2017-03-22SNAT: added actual delete to snat_det_mapMartin1-0/+4
2017-03-21ACL plugin 1.2Andrew Yourtchenko1-0/+722
2017-03-17Fix IP feature ordering.Neale Ranns6-20/+304
2017-03-17Attached hostsNeale Ranns2-12/+166
2017-03-17Adjacency refinement; check the cover's interface against the adjacency'sNeale Ranns1-20/+53
2017-03-15Python API: Change from cPython to CFFI.Ole Troan2-2/+2
2017-03-15No vector allocation during buffer copyNeale Ranns1-9/+38
2017-03-13Add MAC address check in ethernet-input node if interface in L3 modeJohn Lo2-2/+11
2017-03-13ACL plugin rejects ICMP messages (VPP-624)Pavel Kotucek2-1/+1023
2017-03-10Retire vpp_liteDamjan Marion1-1/+3
2017-03-10Fix MAP-E UT. Add functionality in MAP code to delete the pre-resolved next-h...Neale Ranns2-10/+20
2017-03-09make test: temporary disable MAP-E testDamjan Marion1-0/+1
2017-03-09make test: automatic sanity checkKlement Sekera2-2/+18
2017-03-09make test: add make test-shell[-debug] targetsKlement Sekera1-0/+13
2017-03-09IMplementation for option to not create a FIB table entry when adding a neigh...Neale Ranns5-40/+98
2017-03-09MAP pre-resolve - use FIB to track pre-resolved next-hopNeale Ranns2-0/+199
2017-03-09Tests to target holes in adjacency and DPO test coverageNeale Ranns6-15/+281
2017-03-08SNAT: deterministic map dumpMartin2-1/+28
2017-03-08make test: split into basic and extended testsKlement Sekera3-7/+64
2017-03-08SNAT: user_session_dump is_ip4 and vat unformating addedMartin1-2/+4
2017-03-08Proxy ND (RFC4389 - or a sub-set thereof). This allows the 'emulation' of bri...Neale Ranns2-17/+282
2017-03-07DHCP Multiple Servers (VPP-602, VPP-605)Neale Ranns1-44/+312
2017-03-07CGN: Deterministic NAT (VPP-623)Matus Fabian2-0/+100
2017-03-07Add setting of tenant VRF id for SNAT addresses (VPP-641)Juraj Sloboda2-3/+74
2017-03-06make test: reset object registry if vpp diesKlement Sekera2-2/+9
2017-03-06make test: tell vpp to set coredump sizeKlement Sekera2-6/+14
2017-03-06make test: don't run if other vpp process runsKlement Sekera1-2/+22
2017-03-03IPv6 RA improvementsNeale Ranns4-6/+251
2017-03-03Changing the IP table for an interface is an error if the interface already h...Neale Ranns6-4/+25
2017-03-03python API: work towards python/vpp api separationKlement Sekera1-6/+6
2017-03-02SNAT: user's dump and session dump of a certain snat user.magalik2-0/+82
2017-03-02Remove the unused VRF ID parameter from the IP neighbour Add/Del APINeale Ranns4-10/+7
2017-03-02BFD: command line interfaceKlement Sekera4-23/+578
2017-02-27[Proxy] ARP testsNeale Ranns5-0/+587
2017-02-26BFD: echo functionKlement Sekera4-49/+455
2017-02-25Enable tests with VRF resetJan Gelety1-37/+73
2017-02-23Remove prints from LISP testFilip Tehlar1-5/+0
_analyse_trace_record; typedef struct { ioam_analyse_trace_record path_data[IOAM_MAX_PATHS_PER_FLOW]; } ioam_analyse_trace_data; /** @brief Analysed iOAM pot data. @note cache aligned. */ typedef struct { /** Number of packets validated (passes through the service chain) within the timestamps. */ u32 sfc_validated_count; /** Number of packets invalidated (failed through the service chain) within the timestamps. */ u32 sfc_invalidated_count; } ioam_analyse_pot_data; /** @brief Analysed iOAM data. @note cache aligned. */ typedef struct ioam_analyser_data_t_ { u8 is_free; u8 pad[3]; /** Num of pkts sent for this flow. */ u32 pkt_sent; /** Num of pkts matching this flow. */ u32 pkt_counter; /** Num of bytes matching this flow. */ u32 bytes_counter; /** Analysed iOAM trace data. */ ioam_analyse_trace_data trace_data; /** Analysed iOAM pot data. */ ioam_analyse_pot_data pot_data; /** Analysed iOAM seqno data. */ seqno_rx_info seqno_data; /** Cache of previously analysed data, useful for export. */ struct ioam_analyser_data_t_ *chached_data_list; /** Lock to since we use this to export the data in other thread. */ volatile u32 *writer_lock; } ioam_analyser_data_t; always_inline f64 ip6_ioam_analyse_calc_delay (ioam_trace_hdr_t * trace, u16 trace_len, u8 oneway) { u16 size_of_all_traceopts; u8 size_of_traceopt_per_node; u8 num_nodes; u32 *start_elt, *end_elt, *uturn_elt;; u32 start_time, end_time; u8 done = 0; size_of_traceopt_per_node = fetch_trace_data_size (trace->ioam_trace_type); // Unknown trace type if (size_of_traceopt_per_node == 0) return 0; size_of_all_traceopts = trace_len; /*ioam_trace_type,data_list_elts_left */ num_nodes = (u8) (size_of_all_traceopts / size_of_traceopt_per_node); if ((num_nodes == 0) || (num_nodes <= trace->data_list_elts_left)) return 0; num_nodes -= trace->data_list_elts_left; start_elt = trace->elts; end_elt = trace->elts + (u32) ((size_of_traceopt_per_node / sizeof (u32)) * (num_nodes - 1)); if (oneway && (trace->ioam_trace_type & BIT_TTL_NODEID)) { done = 0; do { uturn_elt = start_elt - size_of_traceopt_per_node / sizeof (u32); if ((clib_net_to_host_u32 (*start_elt) >> 24) <= (clib_net_to_host_u32 (*uturn_elt) >> 24)) done = 1; } while (!done && (start_elt = uturn_elt) != end_elt); } if (trace->ioam_trace_type & BIT_TTL_NODEID) { start_elt++; end_elt++; } if (trace->ioam_trace_type & BIT_ING_INTERFACE) { start_elt++; end_elt++; } start_time = clib_net_to_host_u32 (*start_elt); end_time = clib_net_to_host_u32 (*end_elt); return (f64) (end_time - start_time); } always_inline void ip6_ioam_analyse_set_paths_down (ioam_analyser_data_t * data) { ioam_analyse_trace_data *trace_data; ioam_analyse_trace_record *trace_record; ioam_path_map_t *path; u8 k, i; while (__sync_lock_test_and_set (data->writer_lock, 1)) ; trace_data = &data->trace_data; for (i = 0; i < IOAM_MAX_PATHS_PER_FLOW; i++) { trace_record = trace_data->path_data + i; if (trace_record->is_free) continue; path = trace_record->path; for (k = 0; k < trace_record->num_nodes; k++) path[k].state_up = 0; } *(data->writer_lock) = 0; } always_inline void ip6_ioam_analyse_hbh_trace_loopback (ioam_analyser_data_t * data, ioam_trace_hdr_t * trace, u16 trace_len) { ioam_analyse_trace_data *trace_data; ioam_analyse_trace_record *trace_record; ioam_path_map_t *path; u8 i, j, k, num_nodes, max_nodes; u8 *ptr; u32 nodeid; u16 ingress_if, egress_if; u16 size_of_traceopt_per_node; u16 size_of_all_traceopts; while (__sync_lock_test_and_set (data->writer_lock, 1)) ; trace_data = &data->trace_data; size_of_traceopt_per_node = fetch_trace_data_size (trace->ioam_trace_type); if (0 == size_of_traceopt_per_node) goto end; size_of_all_traceopts = trace_len; ptr = (u8 *) trace->elts; max_nodes = (u8) (size_of_all_traceopts / size_of_traceopt_per_node); num_nodes = max_nodes - trace->data_list_elts_left; for (i = 0; i < IOAM_MAX_PATHS_PER_FLOW; i++) { trace_record = trace_data->path_data + i; path = trace_record->path; if (trace_record->is_free) continue; for (j = max_nodes, k = 0; k < num_nodes; j--, k++) { ptr = (u8 *) ((u8 *) trace->elts + (size_of_traceopt_per_node * (j - 1))); nodeid = clib_net_to_host_u32 (*((u32 *) ptr)) & 0x00ffffff; ptr += 4; if (nodeid != path[k].node_id) goto end; if ((trace->ioam_trace_type == TRACE_TYPE_IF_TS_APP) || (trace->ioam_trace_type == TRACE_TYPE_IF)) { ingress_if = clib_net_to_host_u16 (*((u16 *) ptr)); ptr += 2; egress_if = clib_net_to_host_u16 (*((u16 *) ptr)); if ((ingress_if != path[k].ingress_if) || (egress_if != path[k].egress_if)) { goto end; } } /* Found Match - set path hop state to up */ path[k].state_up = 1; } } end: *(data->writer_lock) = 0; } always_inline int ip6_ioam_analyse_hbh_trace (ioam_analyser_data_t * data, ioam_trace_hdr_t * trace, u16 pak_len, u16 trace_len) { ioam_analyse_trace_data *trace_data; u16 size_of_traceopt_per_node; u16 size_of_all_traceopts; u8 i, j, k, num_nodes, max_nodes; u8 *ptr; u32 nodeid; u16 ingress_if, egress_if; ioam_path_map_t *path = NULL; ioam_analyse_trace_record *trace_record; while (__sync_lock_test_and_set (data->writer_lock, 1)) ; trace_data = &data->trace_data; size_of_traceopt_per_node = fetch_trace_data_size (trace->ioam_trace_type); // Unknown trace type if (size_of_traceopt_per_node == 0) goto DONE; size_of_all_traceopts = trace_len; ptr = (u8 *) trace->elts; max_nodes = (u8) (size_of_all_traceopts / size_of_traceopt_per_node); num_nodes = max_nodes - trace->data_list_elts_left; for (i = 0; i < IOAM_MAX_PATHS_PER_FLOW; i++) { trace_record = trace_data->path_data + i; if (trace_record->is_free || (num_nodes != trace_record->num_nodes) || (trace->ioam_trace_type != trace_record->trace_type)) continue; path = trace_record->path; for (j = max_nodes, k = 0; k < num_nodes; j--, k++) { ptr = (u8 *) ((u8 *) trace->elts + (size_of_traceopt_per_node * (j - 1))); nodeid = clib_net_to_host_u32 (*((u32 *) ptr)) & 0x00ffffff; ptr += 4; if (nodeid != path[k].node_id) break; if ((trace->ioam_trace_type == TRACE_TYPE_IF_TS_APP) || (trace->ioam_trace_type == TRACE_TYPE_IF)) { ingress_if = clib_net_to_host_u16 (*((u16 *) ptr)); ptr += 2; egress_if = clib_net_to_host_u16 (*((u16 *) ptr)); if ((ingress_if != path[k].ingress_if) || (egress_if != path[k].egress_if)) { break; } } } if (k == num_nodes) { goto found_match; } } for (i = 0; i < IOAM_MAX_PATHS_PER_FLOW; i++) { trace_record = trace_data->path_data + i; if (trace_record->is_free) { trace_record->is_free = 0; trace_record->num_nodes = num_nodes; trace_record->trace_type = trace->ioam_trace_type; path = trace_data->path_data[i].path; trace_record->pkt_counter = 0; trace_record->bytes_counter = 0; trace_record->min_delay = 0xFFFFFFFF; trace_record->max_delay = 0; trace_record->mean_delay = 0; break; } } for (j = max_nodes, k = 0; k < num_nodes; j--, k++) { ptr = (u8 *) ((u8 *) trace->elts + (size_of_traceopt_per_node * (j - 1))); path[k].node_id = clib_net_to_host_u32 (*((u32 *) ptr)) & 0x00ffffff; ptr += 4; if ((trace->ioam_trace_type == TRACE_TYPE_IF_TS_APP) || (trace->ioam_trace_type == TRACE_TYPE_IF)) { path[k].ingress_if = clib_net_to_host_u16 (*((u16 *) ptr)); ptr += 2; path[k].egress_if = clib_net_to_host_u16 (*((u16 *) ptr)); } } found_match: /* Set path state to UP */ for (k = 0; k < num_nodes; k++) path[k].state_up = 1; trace_record->pkt_counter++; trace_record->bytes_counter += pak_len; if (trace->ioam_trace_type & BIT_TIMESTAMP) { /* Calculate time delay */ u32 delay = (u32) ip6_ioam_analyse_calc_delay (trace, trace_len, 0); if (delay < trace_record->min_delay) trace_record->min_delay = delay; else if (delay > trace_record->max_delay) trace_record->max_delay = delay; u64 sum = (trace_record->mean_delay * data->seqno_data.rx_packets); trace_record->mean_delay = (u32) ((sum + delay) / (data->seqno_data.rx_packets + 1)); } DONE: *(data->writer_lock) = 0; return 0; } always_inline int ip6_ioam_analyse_hbh_e2e (ioam_analyser_data_t * data, ioam_e2e_packet_t * e2e, u16 len) { while (__sync_lock_test_and_set (data->writer_lock, 1)) ; ioam_analyze_seqno (&data->seqno_data, (u64) clib_net_to_host_u32 (e2e->e2e_data)); *(data->writer_lock) = 0; return 0; } always_inline u8 * format_path_map (u8 * s, va_list * args) { ioam_path_map_t *pm = va_arg (*args, ioam_path_map_t *); u32 num_of_elts = va_arg (*args, u32); u32 i; for (i = 0; i < num_of_elts; i++) { s = format (s, "node_id: 0x%x, ingress_if: 0x%x, egress_if:0x%x, state:%s\n", pm->node_id, pm->ingress_if, pm->egress_if, pm->state_up ? "UP" : "DOWN"); pm++; } return (s); } always_inline u8 * print_analyse_flow (u8 * s, ioam_analyser_data_t * record) { int j; ioam_analyse_trace_record *trace_record; s = format (s, "pkt_sent : %u\n", record->pkt_sent); s = format (s, "pkt_counter : %u\n", record->pkt_counter); s = format (s, "bytes_counter : %u\n", record->bytes_counter); s = format (s, "Trace data: \n"); for (j = 0; j < IOAM_MAX_PATHS_PER_FLOW; j++) { trace_record = record->trace_data.path_data + j; if (trace_record->is_free) continue; s = format (s, "path_map:\n%U", format_path_map, trace_record->path, trace_record->num_nodes); s = format (s, "pkt_counter: %u\n", trace_record->pkt_counter); s = format (s, "bytes_counter: %u\n", trace_record->bytes_counter); s = format (s, "min_delay: %u\n", trace_record->min_delay); s = format (s, "max_delay: %u\n", trace_record->max_delay); s = format (s, "mean_delay: %u\n", trace_record->mean_delay); } s = format (s, "\nPOT data: \n"); s = format (s, "sfc_validated_count : %u\n", record->pot_data.sfc_validated_count); s = format (s, "sfc_invalidated_count : %u\n", record->pot_data.sfc_invalidated_count); s = format (s, "\nSeqno Data:\n"); s = format (s, "RX Packets : %lu\n" "Lost Packets : %lu\n" "Duplicate Packets : %lu\n" "Reordered Packets : %lu\n", record->seqno_data.rx_packets, record->seqno_data.lost_packets, record->seqno_data.dup_packets, record->seqno_data.reordered_packets); s = format (s, "\n"); return s; } always_inline void ioam_analyse_init_data (ioam_analyser_data_t * data) { u16 j; ioam_analyse_trace_data *trace_data; data->is_free = 1; /* We maintain data corresponding to last IP-Fix export, this may * get extended in future to maintain history of data */ vec_validate_aligned (data->chached_data_list, 0, CLIB_CACHE_LINE_BYTES); data->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); *(data->writer_lock) = 0; trace_data = &(data->trace_data); for (j = 0; j < IOAM_MAX_PATHS_PER_FLOW; j++) trace_data->path_data[j].is_free = 1; } #endif /* PLUGINS_IOAM_PLUGIN_IOAM_ANALYSE_IOAM_ANALYSE_H_ */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */