aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
authorLijian.Zhang <Lijian.Zhang@arm.com>2020-12-10 16:01:00 +0800
committerDamjan Marion <damarion@cisco.com>2020-12-18 11:26:53 +0100
commite3e16172070e16e48d88b3e61dd2f3c539655513 (patch)
treecbf52c8e791a8d6ac45ffd2602a64d0ebb0f2643 /src/plugins
parent942542f7c1c3aae62a88612b8702a45f0d3b0e35 (diff)
avf: optimized with NEON SIMD instruction
Optimize avf-input node processing function with NEON SIMD instruction. Type: improvement Change-Id: I3dd76ac659686209dda9b176fc426aeae639e99b Signed-off-by: Lijian Zhang <Lijian.Zhang@arm.com>
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/avf/input.c28
1 files changed, 27 insertions, 1 deletions
diff --git a/src/plugins/avf/input.c b/src/plugins/avf/input.c
index 4e4f116304d..85f97ca3e49 100644
--- a/src/plugins/avf/input.c
+++ b/src/plugins/avf/input.c
@@ -253,6 +253,9 @@ avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
#ifdef CLIB_HAVE_VEC256
u64x4 q1x4, or_q1x4 = { 0 };
u64x4 dd_eop_mask4 = u64x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
+#elif defined(CLIB_HAVE_VEC128)
+ u32x4 q1x4_lo, q1x4_hi, or_q1x4 = { 0 };
+ u32x4 dd_eop_mask4 = u32x4_splat (AVF_RXD_STATUS_DD | AVF_RXD_STATUS_EOP);
#endif
/* is there anything on the ring */
@@ -300,6 +303,29 @@ avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
or_q1x4 |= q1x4;
u64x4_store_unaligned (q1x4, ptd->qw1s + n_rx_packets);
+#elif defined(CLIB_HAVE_VEC128)
+ if (n_rx_packets >= AVF_RX_VECTOR_SZ - 4 || next >= size - 4)
+ goto one_by_one;
+
+ q1x4_lo =
+ u32x4_gather ((void *) &d[0].qword[1], (void *) &d[1].qword[1],
+ (void *) &d[2].qword[1], (void *) &d[3].qword[1]);
+
+ /* not all packets are ready or at least one of them is chained */
+ if (!u32x4_is_equal (q1x4_lo & dd_eop_mask4, dd_eop_mask4))
+ goto one_by_one;
+
+ q1x4_hi = u32x4_gather (
+ (void *) &d[0].qword[1] + 4, (void *) &d[1].qword[1] + 4,
+ (void *) &d[2].qword[1] + 4, (void *) &d[3].qword[1] + 4);
+
+ or_q1x4 |= q1x4_lo;
+ ptd->qw1s[n_rx_packets + 0] = (u64) q1x4_hi[0] << 32 | (u64) q1x4_lo[0];
+ ptd->qw1s[n_rx_packets + 1] = (u64) q1x4_hi[1] << 32 | (u64) q1x4_lo[1];
+ ptd->qw1s[n_rx_packets + 2] = (u64) q1x4_hi[2] << 32 | (u64) q1x4_lo[2];
+ ptd->qw1s[n_rx_packets + 3] = (u64) q1x4_hi[3] << 32 | (u64) q1x4_lo[3];
+#endif
+#if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
vlib_buffer_copy_indices (bi, rxq->bufs + next, 4);
/* next */
@@ -359,7 +385,7 @@ no_more_desc:
rxq->next = next;
rxq->n_enqueued -= n_rx_packets + n_tail_desc;
-#ifdef CLIB_HAVE_VEC256
+#if defined(CLIB_HAVE_VEC256) || defined(CLIB_HAVE_VEC128)
or_qw1 |= or_q1x4[0] | or_q1x4[1] | or_q1x4[2] | or_q1x4[3];
#endif