summaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2017-11-03 12:24:37 +0100
committerDamjan Marion <dmarion.lists@gmail.com>2017-11-08 19:52:38 +0000
commit6d56fa4b0aa2e789f1bdc8bf8280d65d87f6a541 (patch)
tree1a4701e35cfeb538b814a3b6f2a21a262cf9fbd0 /src/plugins
parent8daa80a4adfd82a19017c2c12554a8a43dddccd7 (diff)
memif: do not mask head and tail pointers
Change-Id: Ie849ab713ff086187c18a91ab32e58207fe94033 Signed-off-by: Damjan Marion <damarion@cisco.com> Signed-off-by: Jakub Grajciar <Jakub.Grajciar@pantheon.tech>
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/memif/cli.c3
-rw-r--r--src/plugins/memif/device.c58
-rw-r--r--src/plugins/memif/node.c47
3 files changed, 40 insertions, 68 deletions
diff --git a/src/plugins/memif/cli.c b/src/plugins/memif/cli.c
index deca27af2ef..3d38550c1ba 100644
--- a/src/plugins/memif/cli.c
+++ b/src/plugins/memif/cli.c
@@ -76,6 +76,9 @@ memif_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
if (!is_pow2 (ring_size))
return clib_error_return (0, "ring size must be power of 2");
+ if (ring_size > 32768)
+ return clib_error_return (0, "maximum ring size is 32768");
+
args.log2_ring_size = min_log2 (ring_size);
if (rx_queues > 255 || rx_queues < 1)
diff --git a/src/plugins/memif/device.c b/src/plugins/memif/device.c
index f7eb862eef5..012c75327f1 100644
--- a/src/plugins/memif/device.c
+++ b/src/plugins/memif/device.c
@@ -109,26 +109,28 @@ memif_copy_buffer_to_tx_ring (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_buffer_t *b0;
void *mb0;
u32 total = 0, len;
+ u16 slot = (*head) & mask;
- mb0 = memif_get_buffer (mif, ring, *head);
- ring->desc[*head].flags = 0;
+ mb0 = memif_get_buffer (mif, ring, slot);
+ ring->desc[slot].flags = 0;
do
{
b0 = vlib_get_buffer (vm, bi);
len = b0->current_length;
- if (PREDICT_FALSE (ring->desc[*head].buffer_length < (total + len)))
+ if (PREDICT_FALSE (ring->desc[slot].buffer_length < (total + len)))
{
if (PREDICT_TRUE (total))
{
- ring->desc[*head].length = total;
+ ring->desc[slot].length = total;
total = 0;
- ring->desc[*head].flags |= MEMIF_DESC_FLAG_NEXT;
- *head = (*head + 1) & mask;
- mb0 = memif_get_buffer (mif, ring, *head);
- ring->desc[*head].flags = 0;
+ ring->desc[slot].flags |= MEMIF_DESC_FLAG_NEXT;
+ (*head)++;
+ slot = (*head) & mask;
+ mb0 = memif_get_buffer (mif, ring, slot);
+ ring->desc[slot].flags = 0;
}
}
- if (PREDICT_TRUE (ring->desc[*head].buffer_length >= (total + len)))
+ if (PREDICT_TRUE (ring->desc[slot].buffer_length >= (total + len)))
{
clib_memcpy (mb0 + total, vlib_buffer_get_current (b0),
CLIB_CACHE_LINE_BYTES);
@@ -149,8 +151,8 @@ memif_copy_buffer_to_tx_ring (vlib_main_t * vm, vlib_node_runtime_t * node,
if (PREDICT_TRUE (total))
{
- ring->desc[*head].length = total;
- *head = (*head + 1) & mask;
+ ring->desc[slot].length = total;
+ (*head)++;
}
}
@@ -196,34 +198,18 @@ memif_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
head = ring->head;
tail = ring->tail;
- if (tail > head)
- free_slots = tail - head;
- else
- free_slots = ring_size - head + tail;
+ free_slots = ring_size - head + tail;
while (n_left > 5 && free_slots > 1)
{
- if (PREDICT_TRUE (head + 5 < ring_size))
- {
- CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 2),
- CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (memif_get_buffer (mif, ring, head + 3),
- CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (&ring->desc[head + 4], CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (&ring->desc[head + 5], CLIB_CACHE_LINE_BYTES, STORE);
- }
- else
- {
- CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 2) % mask),
- CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 3) % mask),
- CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (&ring->desc[(head + 4) % mask],
- CLIB_CACHE_LINE_BYTES, STORE);
- CLIB_PREFETCH (&ring->desc[(head + 5) % mask],
- CLIB_CACHE_LINE_BYTES, STORE);
- }
-
+ CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 2) & mask),
+ CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (memif_get_buffer (mif, ring, (head + 3) & mask),
+ CLIB_CACHE_LINE_BYTES, STORE);
+ CLIB_PREFETCH (&ring->desc[(head + 4) & mask], CLIB_CACHE_LINE_BYTES,
+ STORE);
+ CLIB_PREFETCH (&ring->desc[(head + 5) & mask], CLIB_CACHE_LINE_BYTES,
+ STORE);
memif_prefetch_buffer_and_data (vm, buffers[2]);
memif_prefetch_buffer_and_data (vm, buffers[3]);
diff --git a/src/plugins/memif/node.c b/src/plugins/memif/node.c
index 8190441f4b5..74b238931ac 100644
--- a/src/plugins/memif/node.c
+++ b/src/plugins/memif/node.c
@@ -132,7 +132,7 @@ memif_copy_buffer_from_rx_ring (vlib_main_t * vm, memif_if_t * mif,
while (*num_slots)
{
- data_len = ring->desc[mq->last_head].length;
+ data_len = ring->desc[mq->last_head & mask].length;
while (data_len && (*n_free_bufs))
{
/* get empty buffer */
@@ -161,7 +161,7 @@ memif_copy_buffer_from_rx_ring (vlib_main_t * vm, memif_if_t * mif,
bytes_to_copy =
data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
b->current_data = 0;
- mb = memif_get_buffer (mif, ring, mq->last_head);
+ mb = memif_get_buffer (mif, ring, mq->last_head & mask);
clib_memcpy (vlib_buffer_get_current (b), mb + offset,
CLIB_CACHE_LINE_BYTES);
if (bytes_to_copy > CLIB_CACHE_LINE_BYTES)
@@ -191,10 +191,10 @@ memif_copy_buffer_from_rx_ring (vlib_main_t * vm, memif_if_t * mif,
}
last_head = mq->last_head;
/* Advance to next descriptor */
- mq->last_head = (mq->last_head + 1) & mask;
+ mq->last_head++;
offset = 0;
(*num_slots)--;
- if ((ring->desc[last_head].flags & MEMIF_DESC_FLAG_NEXT) == 0)
+ if ((ring->desc[last_head & mask].flags & MEMIF_DESC_FLAG_NEXT) == 0)
break;
}
@@ -269,10 +269,7 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
if (head == mq->last_head)
return 0;
- if (head > mq->last_head)
- num_slots = head - mq->last_head;
- else
- num_slots = ring_size - mq->last_head + head;
+ num_slots = head - mq->last_head;
while (num_slots)
{
@@ -283,30 +280,16 @@ memif_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
while (num_slots > 11 && n_left_to_next > 2)
{
- if (PREDICT_TRUE (mq->last_head + 5 < ring_size))
- {
- CLIB_PREFETCH (memif_get_buffer (mif, ring, mq->last_head + 2),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (memif_get_buffer (mif, ring, mq->last_head + 3),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[mq->last_head + 4],
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[mq->last_head + 5],
- CLIB_CACHE_LINE_BYTES, LOAD);
- }
- else
- {
- CLIB_PREFETCH (memif_get_buffer
- (mif, ring, (mq->last_head + 2) % mask),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (memif_get_buffer
- (mif, ring, (mq->last_head + 3) % mask),
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[(mq->last_head + 4) % mask],
- CLIB_CACHE_LINE_BYTES, LOAD);
- CLIB_PREFETCH (&ring->desc[(mq->last_head + 5) % mask],
- CLIB_CACHE_LINE_BYTES, LOAD);
- }
+ CLIB_PREFETCH (memif_get_buffer
+ (mif, ring, (mq->last_head + 2) & mask),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (memif_get_buffer
+ (mif, ring, (mq->last_head + 3) & mask),
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&ring->desc[(mq->last_head + 4) & mask],
+ CLIB_CACHE_LINE_BYTES, LOAD);
+ CLIB_PREFETCH (&ring->desc[(mq->last_head + 5) & mask],
+ CLIB_CACHE_LINE_BYTES, LOAD);
vlib_buffer_t *first_b0 = 0;
u32 bi0 = 0, first_bi0 = 0;