summaryrefslogtreecommitdiffstats
AgeCommit message (Expand)AuthorFilesLines
2019-08-02vmxnet3: don't bypass ethernet_inputSteven Luong2-15/+6
2019-08-02lb: fix usage of lb_main in testFlorin Coras1-2/+18
2019-08-02ipsec: FIx feature orderingNeale Ranns2-4/+2
2019-08-02virtio: add the display information in virtio cliChenmin Sun2-2/+2
2019-08-02quic: fix connection move with multiple workersAloys Augustin1-13/+35
2019-08-01interface: fix pcap tx/rx trace cli handlingJohn Lo4-16/+7
2019-08-01session: session pool migration notificationFlorin Coras6-0/+43
2019-08-01vppinfra: fix spinlock and rwlock testsFlorin Coras3-15/+15
2019-08-01ipsec: Redo the anit-replay check post decryptNeale Ranns3-0/+51
2019-08-01docs: Small fixes and orderingNathan Skrzypczak7-13/+33
2019-08-01vppinfra: refactor clib_rwlock_t to use single condition variablejaszha032-25/+19
2019-08-01vppinfra: remove unused historical codeDave Barach3-375/+0
2019-08-01vppinfra: make first bihash add thread-safeDave Barach2-7/+25
2019-08-01quic: Improve quicly_ctx handling & crypto ctxNathan Skrzypczak2-135/+148
2019-08-01ethernet: Fix node ordering on ARP feautre ARCNeale Ranns1-2/+13
2019-08-01nat: handoff rewrite & fixes for multi-workerFilip Varga2-60/+163
2019-08-01vppinfra: refactor clib_spinlock_t to use compare and swapjaszha032-2/+13
2019-08-01tcp: honor snd_wnd in fast rxtFlorin Coras1-1/+4
2019-08-01qos: Uninitialised variable (coverity)Neale Ranns1-0/+3
2019-08-01vppapigen: revert "implement reversible repr's"Vratko Polak1-34/+15
2019-07-31lb: vip and as dump/detail api'sHongjun Ni9-75/+601
2019-07-31api: add prefix matcher typedefPaul Vinciguerra1-0/+11
2019-07-31nat: elog rewrite for multi-worker supportFilip Varga17-142/+340
2019-07-31gso: fix the test caseMohsin Kazmi1-1/+0
2019-07-31devices interface tests: vhosst GSO supportSteven Luong11-26/+313
2019-07-31qos: Store functionNeale Ranns16-9/+1400
2019-07-31vppapigen: implement reversible repr'sPaul Vinciguerra1-15/+34
2019-07-31build: Append build number for cpack packagesYohanPipereau1-31/+32
2019-07-31pg: clarify the text of error messageAndrew Yourtchenko1-1/+1
2019-07-31vlib: fix format_error_traceDave Barach1-1/+2
2019-07-31vppinfra: added performance test for clib_rwlock_t (test_rwlock.c)jaszha032-0/+265
2019-07-31vppinfra: refactor test_and_set spinlocks to use clib_spinlock_tjaszha0317-105/+83
2019-07-31vppinfra: added lock performance test for clib_spinlock_t (test_spinlock.c)jaszha032-0/+207
2019-07-31vcl: fix epoll chain validationFlorin Coras1-22/+22
2019-07-31api papi: add alias for timestamp(datetime)/timedeltaPaul Vinciguerra5-47/+75
2019-07-31vppapigen: add endian_string for f64Paul Vinciguerra1-0/+1
2019-07-31tests: disable pg capture before enabling itAndrew Yourtchenko4-2/+31
2019-07-31tests: Split IPSec ESP into parameterized tests per engineNeale Ranns2-67/+60
2019-07-31ip: Ensure reassembly runs before IPSec decrypt.Neale Ranns4-5/+84
2019-07-31fib: Add some path-list flags to its keyNeale Ranns2-1/+11
2019-07-31fib: fix calls to unformat_fib_pathNeale Ranns5-7/+10
2019-07-31ipsec: Typo in flag nameNeale Ranns1-1/+1
2019-07-30quic: fix cert loadingNathan Skrzypczak3-12/+17
2019-07-30quic: cleanup and refactorisationAloys Augustin7-525/+634
2019-07-30vppinfra: refactor use of CLIB_MEMORY_BARRIER ()jaszha037-27/+14
2019-07-30vlib: Fix packet tracingNeale Ranns1-1/+1
2019-07-30tap: fix segv when host-if-name is not givenMohsin Kazmi1-8/+10
2019-07-30vppinfra: conformed spinlocks to use CLIB_PAUSEjaszha036-8/+14
2019-07-29misc: add vnet/pipeline.h exampleDave Barach1-1/+89
2019-07-29session: fix vpp to app msg generationFlorin Coras3-68/+32
class="mi">4) { u32 arc_next0, arc_next1, arc_next2, arc_next3; u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3; u32 rx_fib_index0, rx_fib_index1, rx_fib_index2, rx_fib_index3; u32 iph_offset0 = 0, iph_offset1 = 0, iph_offset2 = 0, iph_offset3 = 0; ip4_header_t *ip0, *ip1, *ip2, *ip3; if (PREDICT_TRUE (n_left_from >= 8)) { vlib_prefetch_buffer_header (b[4], STORE); vlib_prefetch_buffer_header (b[5], STORE); vlib_prefetch_buffer_header (b[6], STORE); vlib_prefetch_buffer_header (b[7], STORE); CLIB_PREFETCH (&b[4]->data, CLIB_CACHE_LINE_BYTES, STORE); CLIB_PREFETCH (&b[5]->data, CLIB_CACHE_LINE_BYTES, STORE); CLIB_PREFETCH (&b[6]->data, CLIB_CACHE_LINE_BYTES, STORE); CLIB_PREFETCH (&b[7]->data, CLIB_CACHE_LINE_BYTES, STORE); } if (is_output) { iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length; iph_offset1 = vnet_buffer (b[1])->ip.save_rewrite_length; iph_offset2 = vnet_buffer (b[2])->ip.save_rewrite_length; iph_offset3 = vnet_buffer (b[3])->ip.save_rewrite_length; } ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) + iph_offset0); ip1 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[1]) + iph_offset1); ip2 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[2]) + iph_offset2); ip3 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[3]) + iph_offset3); vnet_feature_next (&arc_next0, b[0]); vnet_feature_next (&arc_next1, b[1]); vnet_feature_next (&arc_next2, b[2]); vnet_feature_next (&arc_next3, b[3]); vnet_buffer2 (b[0])->nat.arc_next = arc_next0; vnet_buffer2 (b[1])->nat.arc_next = arc_next1; vnet_buffer2 (b[2])->nat.arc_next = arc_next2; vnet_buffer2 (b[3])->nat.arc_next = arc_next3; sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX]; sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX]; sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX]; rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0); rx_fib_index1 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index1); rx_fib_index2 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index2); rx_fib_index3 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index3); if (is_in2out) { ti[0] = sm->worker_in2out_cb (ip0, rx_fib_index0, is_output); ti[1] = sm->worker_in2out_cb (ip1, rx_fib_index1, is_output); ti[2] = sm->worker_in2out_cb (ip2, rx_fib_index2, is_output); ti[3] = sm->worker_in2out_cb (ip3, rx_fib_index3, is_output); } else { ti[0] = sm->worker_out2in_cb (b[0], ip0, rx_fib_index0, is_output); ti[1] = sm->worker_out2in_cb (b[1], ip1, rx_fib_index1, is_output); ti[2] = sm->worker_out2in_cb (b[2], ip2, rx_fib_index2, is_output); ti[3] = sm->worker_out2in_cb (b[3], ip3, rx_fib_index3, is_output); } if (ti[0] == thread_index) same_worker++; else do_handoff++; if (ti[1] == thread_index) same_worker++; else do_handoff++; if (ti[2] == thread_index) same_worker++; else do_handoff++; if (ti[3] == thread_index) same_worker++; else do_handoff++; b += 4; ti += 4; n_left_from -= 4; } while (n_left_from > 0) { u32 arc_next0; u32 sw_if_index0; u32 rx_fib_index0; u32 iph_offset0 = 0; ip4_header_t *ip0; if (is_output) iph_offset0 = vnet_buffer (b[0])->ip.save_rewrite_length; ip0 = (ip4_header_t *) ((u8 *) vlib_buffer_get_current (b[0]) + iph_offset0); vnet_feature_next (&arc_next0, b[0]); vnet_buffer2 (b[0])->nat.arc_next = arc_next0; sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; rx_fib_index0 = ip4_fib_table_get_index_for_sw_if_index (sw_if_index0); if (is_in2out) { ti[0] = sm->worker_in2out_cb (ip0, rx_fib_index0, is_output); } else { ti[0] = sm->worker_out2in_cb (b[0], ip0, rx_fib_index0, is_output); } if (ti[0] == thread_index) same_worker++; else do_handoff++; b += 1; ti += 1; n_left_from -= 1; } if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE))) { u32 i; b = bufs; ti = thread_indices; for (i = 0; i < frame->n_vectors; i++) { if (b[0]->flags & VLIB_BUFFER_IS_TRACED) { nat44_handoff_trace_t *t = vlib_add_trace (vm, node, b[0], sizeof (*t)); t->next_worker_index = ti[0]; t->trace_index = vlib_buffer_get_trace_index (b[0]); t->in2out = is_in2out; t->output = is_output; b += 1; ti += 1; } else break; } } n_enq = vlib_buffer_enqueue_to_thread (vm, fq_index, from, thread_indices, frame->n_vectors, 1); if (n_enq < frame->n_vectors) { vlib_node_increment_counter (vm, node->node_index, NAT44_HANDOFF_ERROR_CONGESTION_DROP, frame->n_vectors - n_enq); } vlib_node_increment_counter (vm, node->node_index, NAT44_HANDOFF_ERROR_SAME_WORKER, same_worker); vlib_node_increment_counter (vm, node->node_index, NAT44_HANDOFF_ERROR_DO_HANDOFF, do_handoff); return frame->n_vectors; } VLIB_NODE_FN (snat_in2out_worker_handoff_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 1); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (snat_in2out_worker_handoff_node) = { .name = "nat44-in2out-worker-handoff", .vector_size = sizeof (u32), .sibling_of = "nat-default", .format_trace = format_nat44_handoff_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(nat44_handoff_error_strings), .error_strings = nat44_handoff_error_strings, }; /* *INDENT-ON* */ VLIB_NODE_FN (snat_in2out_output_worker_handoff_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return nat44_worker_handoff_fn_inline (vm, node, frame, 1, 1); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (snat_in2out_output_worker_handoff_node) = { .name = "nat44-in2out-output-worker-handoff", .vector_size = sizeof (u32), .sibling_of = "nat-default", .format_trace = format_nat44_handoff_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(nat44_handoff_error_strings), .error_strings = nat44_handoff_error_strings, }; /* *INDENT-ON* */ VLIB_NODE_FN (snat_out2in_worker_handoff_node) (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) { return nat44_worker_handoff_fn_inline (vm, node, frame, 0, 0); } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (snat_out2in_worker_handoff_node) = { .name = "nat44-out2in-worker-handoff", .vector_size = sizeof (u32), .sibling_of = "nat-default", .format_trace = format_nat44_handoff_trace, .type = VLIB_NODE_TYPE_INTERNAL, .n_errors = ARRAY_LEN(nat44_handoff_error_strings), .error_strings = nat44_handoff_error_strings, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */