aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/memif/node.c
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2017-11-03 12:24:37 +0100
committerDamjan Marion <dmarion.lists@gmail.com>2017-11-08 19:52:38 +0000
commit6d56fa4b0aa2e789f1bdc8bf8280d65d87f6a541 (patch)
tree1a4701e35cfeb538b814a3b6f2a21a262cf9fbd0 /src/plugins/memif/node.c
parent8daa80a4adfd82a19017c2c12554a8a43dddccd7 (diff)
memif: do not mask head and tail pointers
Change-Id: Ie849ab713ff086187c18a91ab32e58207fe94033 Signed-off-by: Damjan Marion <damarion@cisco.com> Signed-off-by: Jakub Grajciar <Jakub.Grajciar@pantheon.tech>
Diffstat (limited to 'src/plugins/memif/node.c')
-rw-r--r--src/plugins/memif/node.c47
1 files changed, 15 insertions, 32 deletions
diff --git a/src/plugins/memif/node.c b/src/plugins/memif/node.c
index 8190441f4b5..74b238931ac 100644
--- a/src/plugins/memif/node.c
+++ b/src/plugins/memif/node.c
@@ -132,7 +132,7 @@ memif_copy_buffer_from_rx_ring (vlib_main_t * vm, memif_if_t * mif,
while (*num_slots)
{
- data_len = ring->desc[mq->last_head].length;
+ data_len = ring->desc[mq->last_head & mask].length;
while (data_len && (*n_free_bufs))
{
/* get empty buffer */
@@ -161,7 +161,7 @@ memif_copy_buffer_from_rx_ring (vlib_main_t * vm, memif_if_t * mif,
bytes_to_copy =
data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
b->current_data = 0;
- mb = memif_get_buffer (mif, ring, mq->last_head);
+ mb = memif_get_buffer (mif, ring, mq->last_head & mask);
clib_memcpy (vlib_buffer_get_current (b), mb + offset,
CLIB_CACHE_LINE_BYTES);
if (bytes_to_copy > CLIB_CACHE_LINE_BYTES)
@@ -191,10 +191,10 @@ memif_copy_buffer_from_rx_ring (vlib_main_t * vm, memif_if_t * mif,
}
last_head = mq->last_head;
/* Advance to next descriptor */
- mq->last_head = (mq->last_head + 1) & mask;
+ mq->last_head++;
offset = 0;
(*num_slots)--;
- if ((ring->desc[last_head].flags & MEMIF_DESC_FLAG_NEXT) == 0)
+ if ((ring->desc[last_head & mask].flags & MEMIF_DESC_FLAG_NEXT) == 0)
break;
}
@@ -269,10 +269,7 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
if (head == mq->last_head)
return 0;
- if (head > mq->last_head)
- num_slots = head - mq->last_head;
- else
- num_slots = ring_size - mq->last_head + head;
+ num_slots = head - mq->last_head;
while (num_slots)
{
@@ -283,30 +280,16 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
while (num_slots > 11 && n_left_to_next > 2)
{
- if (PREDICT_TRUE (mq->last_head + 5 < ring_size))
- {
- CLIB_PREFETCH (memif_get_buffer (mif, ring, mq->last_head + 2),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (memif_get_buffer (mif, ring, mq->last_head + 3),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[mq->last_head + 4],
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[mq->last_head + 5],
- CLIB_CACHE_LINE_BYTES, LOAD);
- }
- else
- {
- CLIB_PREFETCH (memif_get_buffer
- (mif, ring, (mq->last_head + 2) % mask),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (memif_get_buffer
- (mif, ring, (mq->last_head + 3) % mask),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[(mq->last_head + 4) % mask],
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[(mq->last_head + 5) % mask],
- CLIB_CACHE_LINE_BYTES, LOAD);
- }
+ CLIB_PREFETCH (memif_get_buffer
+ (mif, ring, (mq->last_head + 2) & mask),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (memif_get_buffer
+ (mif, ring, (mq->last_head + 3) & mask),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&ring->desc[(mq->last_head + 4) & mask],
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&ring->desc[(mq->last_head + 5) & mask],
+ CLIB_CACHE_LINE_BYTES, LOAD);
vlib_buffer_t *first_b0 = 0;
u32 bi0 = 0, first_bi0 = 0;