diff options
author | Zhiyong Yang <zhiyong.yang@intel.com> | 2020-05-15 22:32:34 +0800 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2020-06-27 10:26:13 +0000 |
commit | 975a3cbb130fe7f3d5415dee7c05ea5e7bc59daf (patch) | |
tree | 7e4cc9763ac70b365a4ca2665d254b024dba04e1 /src | |
parent | d352bf8276f33f33e4af5e8a7b85dad8187f531d (diff) |
l2: performance enhancement in l2input
Short Load/Stores combined with prefetching in the beginning of the loop
place too much pressure on AGUs and memory accesses.
The patch interleaves load/store operations with computational operations
to alleviate the pain point.
vlib_get_buffers is also leveraged.
Redefine u8 dst_and_src[12] instead of dst[6] and src[6] in struct
l2input_trace_t in order to merge two copys into one.
Type: improvement
Signed-off-by: Zhiyong Yang <zhiyong.yang@intel.com>
Change-Id: I7d3df7732c476069235e3019c68f0f53bca9637e
Diffstat (limited to 'src')
-rw-r--r-- | src/vnet/l2/l2_input.c | 149 |
1 files changed, 69 insertions, 80 deletions
diff --git a/src/vnet/l2/l2_input.c b/src/vnet/l2/l2_input.c index 8a7ab802175..542bd42c4c2 100644 --- a/src/vnet/l2/l2_input.c +++ b/src/vnet/l2/l2_input.c @@ -101,8 +101,7 @@ format_l2_input_features (u8 * s, va_list * args) typedef struct { /* per-pkt trace data */ - u8 src[6]; - u8 dst[6]; + u8 dst_and_src[12]; u32 next_index; u32 sw_if_index; } l2input_trace_t; @@ -117,8 +116,8 @@ format_l2input_trace (u8 * s, va_list * args) s = format (s, "l2-input: sw_if_index %d dst %U src %U", t->sw_if_index, - format_ethernet_address, t->dst, - format_ethernet_address, t->src); + format_ethernet_address, t->dst_and_src, + format_ethernet_address, t->dst_and_src + 6); return s; } @@ -298,10 +297,12 @@ l2input_node_inline (vlib_main_t * vm, u32 n_left_from, *from, *to_next; l2input_next_t next_index; l2input_main_t *msm = &l2input_main; + vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; from = vlib_frame_vector_args (frame); n_left_from = frame->n_vectors; /* number of packets to process */ next_index = node->cached_next_index; + vlib_get_buffers (vm, from, bufs, n_left_from); while (n_left_from > 0) { @@ -312,30 +313,22 @@ l2input_node_inline (vlib_main_t * vm, while (n_left_from >= 8 && n_left_to_next >= 4) { - u32 bi0, bi1, bi2, bi3; - vlib_buffer_t *b0, *b1, *b2, *b3; u32 next0, next1, next2, next3; u32 sw_if_index0, sw_if_index1, sw_if_index2, sw_if_index3; /* Prefetch next iteration. */ { - vlib_buffer_t *p4, *p5, *p6, *p7; - - p4 = vlib_get_buffer (vm, from[4]); - p5 = vlib_get_buffer (vm, from[5]); - p6 = vlib_get_buffer (vm, from[6]); - p7 = vlib_get_buffer (vm, from[7]); /* Prefetch the buffer header and packet for the N+2 loop iteration */ - vlib_prefetch_buffer_header (p4, LOAD); - vlib_prefetch_buffer_header (p5, LOAD); - vlib_prefetch_buffer_header (p6, LOAD); - vlib_prefetch_buffer_header (p7, LOAD); + vlib_prefetch_buffer_header (b[4], LOAD); + vlib_prefetch_buffer_header (b[5], LOAD); + vlib_prefetch_buffer_header (b[6], LOAD); + vlib_prefetch_buffer_header (b[7], LOAD); - CLIB_PREFETCH (p4->data, CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (p5->data, CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (p6->data, CLIB_CACHE_LINE_BYTES, STORE); - CLIB_PREFETCH (p7->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (b[4]->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (b[5]->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (b[6]->data, CLIB_CACHE_LINE_BYTES, STORE); + CLIB_PREFETCH (b[7]->data, CLIB_CACHE_LINE_BYTES, STORE); /* * Don't bother prefetching the bridge-domain config (which @@ -347,112 +340,108 @@ l2input_node_inline (vlib_main_t * vm, /* speculatively enqueue b0 and b1 to the current next frame */ /* bi is "buffer index", b is pointer to the buffer */ - to_next[0] = bi0 = from[0]; - to_next[1] = bi1 = from[1]; - to_next[2] = bi2 = from[2]; - to_next[3] = bi3 = from[3]; - from += 4; - to_next += 4; - n_left_from -= 4; - n_left_to_next -= 4; - - b0 = vlib_get_buffer (vm, bi0); - b1 = vlib_get_buffer (vm, bi1); - b2 = vlib_get_buffer (vm, bi2); - b3 = vlib_get_buffer (vm, bi3); if (do_trace) { /* RX interface handles */ - sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; - sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX]; - sw_if_index2 = vnet_buffer (b2)->sw_if_index[VLIB_RX]; - sw_if_index3 = vnet_buffer (b3)->sw_if_index[VLIB_RX]; + sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; + sw_if_index1 = vnet_buffer (b[1])->sw_if_index[VLIB_RX]; + sw_if_index2 = vnet_buffer (b[2])->sw_if_index[VLIB_RX]; + sw_if_index3 = vnet_buffer (b[3])->sw_if_index[VLIB_RX]; - if (b0->flags & VLIB_BUFFER_IS_TRACED) + if (b[0]->flags & VLIB_BUFFER_IS_TRACED) { - ethernet_header_t *h0 = vlib_buffer_get_current (b0); + ethernet_header_t *h0 = vlib_buffer_get_current (b[0]); l2input_trace_t *t = - vlib_add_trace (vm, node, b0, sizeof (*t)); + vlib_add_trace (vm, node, b[0], sizeof (*t)); t->sw_if_index = sw_if_index0; - clib_memcpy_fast (t->src, h0->src_address, 6); - clib_memcpy_fast (t->dst, h0->dst_address, 6); + clib_memcpy_fast (t->dst_and_src, h0->dst_address, + sizeof (h0->dst_address) + + sizeof (h0->src_address)); } - if (b1->flags & VLIB_BUFFER_IS_TRACED) + if (b[1]->flags & VLIB_BUFFER_IS_TRACED) { - ethernet_header_t *h1 = vlib_buffer_get_current (b1); + ethernet_header_t *h1 = vlib_buffer_get_current (b[1]); l2input_trace_t *t = - vlib_add_trace (vm, node, b1, sizeof (*t)); + vlib_add_trace (vm, node, b[1], sizeof (*t)); t->sw_if_index = sw_if_index1; - clib_memcpy_fast (t->src, h1->src_address, 6); - clib_memcpy_fast (t->dst, h1->dst_address, 6); + clib_memcpy_fast (t->dst_and_src, h1->dst_address, + sizeof (h1->dst_address) + + sizeof (h1->src_address)); } - if (b2->flags & VLIB_BUFFER_IS_TRACED) + if (b[2]->flags & VLIB_BUFFER_IS_TRACED) { - ethernet_header_t *h2 = vlib_buffer_get_current (b2); + ethernet_header_t *h2 = vlib_buffer_get_current (b[2]); l2input_trace_t *t = - vlib_add_trace (vm, node, b2, sizeof (*t)); + vlib_add_trace (vm, node, b[2], sizeof (*t)); t->sw_if_index = sw_if_index2; - clib_memcpy_fast (t->src, h2->src_address, 6); - clib_memcpy_fast (t->dst, h2->dst_address, 6); + clib_memcpy_fast (t->dst_and_src, h2->dst_address, + sizeof (h2->dst_address) + + sizeof (h2->src_address)); } - if (b3->flags & VLIB_BUFFER_IS_TRACED) + if (b[3]->flags & VLIB_BUFFER_IS_TRACED) { - ethernet_header_t *h3 = vlib_buffer_get_current (b3); + ethernet_header_t *h3 = vlib_buffer_get_current (b[3]); l2input_trace_t *t = - vlib_add_trace (vm, node, b3, sizeof (*t)); + vlib_add_trace (vm, node, b[3], sizeof (*t)); t->sw_if_index = sw_if_index3; - clib_memcpy_fast (t->src, h3->src_address, 6); - clib_memcpy_fast (t->dst, h3->dst_address, 6); + clib_memcpy_fast (t->dst_and_src, h3->dst_address, + sizeof (h3->dst_address) + + sizeof (h3->src_address)); } } - classify_and_dispatch (msm, b0, &next0); - classify_and_dispatch (msm, b1, &next1); - classify_and_dispatch (msm, b2, &next2); - classify_and_dispatch (msm, b3, &next3); + classify_and_dispatch (msm, b[0], &next0); + classify_and_dispatch (msm, b[1], &next1); + //show the better performance when clib_memcpy_fast is put here. + clib_memcpy_fast (to_next, from, sizeof (from[0]) * 4); + to_next += 4; + classify_and_dispatch (msm, b[2], &next2); + classify_and_dispatch (msm, b[3], &next3); + b += 4; + n_left_from -= 4; + n_left_to_next -= 4; /* verify speculative enqueues, maybe switch current next frame */ /* if next0==next1==next_index then nothing special needs to be done */ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next, n_left_to_next, - bi0, bi1, bi2, bi3, + from[0], from[1], from[2], from[3], next0, next1, next2, next3); + from += 4; } while (n_left_from > 0 && n_left_to_next > 0) { - u32 bi0; - vlib_buffer_t *b0; u32 next0; u32 sw_if_index0; /* speculatively enqueue b0 to the current next frame */ - bi0 = from[0]; - to_next[0] = bi0; - from += 1; - to_next += 1; - n_left_from -= 1; - n_left_to_next -= 1; - - b0 = vlib_get_buffer (vm, bi0); - if (do_trace && PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED)) + if (do_trace && PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) { - ethernet_header_t *h0 = vlib_buffer_get_current (b0); - l2input_trace_t *t = vlib_add_trace (vm, node, b0, sizeof (*t)); - sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX]; + ethernet_header_t *h0 = vlib_buffer_get_current (b[0]); + l2input_trace_t *t = + vlib_add_trace (vm, node, b[0], sizeof (*t)); + sw_if_index0 = vnet_buffer (b[0])->sw_if_index[VLIB_RX]; t->sw_if_index = sw_if_index0; - clib_memcpy_fast (t->src, h0->src_address, 6); - clib_memcpy_fast (t->dst, h0->dst_address, 6); + clib_memcpy_fast (t->dst_and_src, h0->dst_address, + sizeof (h0->dst_address) + + sizeof (h0->src_address)); } - classify_and_dispatch (msm, b0, &next0); + classify_and_dispatch (msm, b[0], &next0); + b += 1; + to_next[0] = from[0]; + to_next += 1; + n_left_from -= 1; + n_left_to_next -= 1; /* verify speculative enqueue, maybe switch current next frame */ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next, n_left_to_next, - bi0, next0); + from[0], next0); + from += 1; } vlib_put_next_frame (vm, node, next_index, n_left_to_next); |