aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/replication.c
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2017-04-05 19:18:20 +0200
committerDave Barach <openvpp@barachs.net>2017-04-06 11:31:39 +0000
commit586afd762bfa149f5ca167bd5fd5a0cd59ce94fe (patch)
tree808b57c61e0fe1a181871bb1ad94398c5ba42671 /src/vnet/replication.c
parentbc799c92d761a2d45105aa6a1685b3663687d2a4 (diff)
Use thread local storage for thread index
This patch deprecates stack-based thread identification, Also removes requirement that thread stacks are adjacent. Finally, possibly annoying for some folks, it renames all occurences of cpu_index and cpu_number with thread index. Using word "cpu" is misleading here as thread can be migrated ti different CPU, and also it is not related to linux cpu index. Change-Id: I68cdaf661e701d2336fc953dcb9978d10a70f7c1 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vnet/replication.c')
-rw-r--r--src/vnet/replication.c20
1 files changed, 10 insertions, 10 deletions
diff --git a/src/vnet/replication.c b/src/vnet/replication.c
index 86d922b5e7a..233a8c2f100 100644
--- a/src/vnet/replication.c
+++ b/src/vnet/replication.c
@@ -31,16 +31,16 @@ replication_prep (vlib_main_t * vm,
{
replication_main_t *rm = &replication_main;
replication_context_t *ctx;
- uword cpu_number = vm->cpu_index;
+ uword thread_index = vm->thread_index;
ip4_header_t *ip;
u32 ctx_id;
/* Allocate a context, reserve context 0 */
- if (PREDICT_FALSE (rm->contexts[cpu_number] == 0))
- pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
+ if (PREDICT_FALSE (rm->contexts[thread_index] == 0))
+ pool_get_aligned (rm->contexts[thread_index], ctx, CLIB_CACHE_LINE_BYTES);
- pool_get_aligned (rm->contexts[cpu_number], ctx, CLIB_CACHE_LINE_BYTES);
- ctx_id = ctx - rm->contexts[cpu_number];
+ pool_get_aligned (rm->contexts[thread_index], ctx, CLIB_CACHE_LINE_BYTES);
+ ctx_id = ctx - rm->contexts[thread_index];
/* Save state from vlib buffer */
ctx->saved_free_list_index = b0->free_list_index;
@@ -94,11 +94,11 @@ replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last)
{
replication_main_t *rm = &replication_main;
replication_context_t *ctx;
- uword cpu_number = vm->cpu_index;
+ uword thread_index = vm->thread_index;
ip4_header_t *ip;
/* Get access to the replication context */
- ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
+ ctx = pool_elt_at_index (rm->contexts[thread_index], b0->recycle_count);
/* Restore vnet buffer state */
clib_memcpy (vnet_buffer (b0), ctx->vnet_buffer,
@@ -133,7 +133,7 @@ replication_recycle (vlib_main_t * vm, vlib_buffer_t * b0, u32 is_last)
b0->flags &= ~VLIB_BUFFER_RECYCLE;
/* Free context back to its pool */
- pool_put (rm->contexts[cpu_number], ctx);
+ pool_put (rm->contexts[thread_index], ctx);
}
return ctx;
@@ -160,7 +160,7 @@ replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl)
replication_main_t *rm = &replication_main;
replication_context_t *ctx;
u32 feature_node_index = 0;
- uword cpu_number = vm->cpu_index;
+ uword thread_index = vm->thread_index;
/*
* All buffers in the list are destined to the same recycle node.
@@ -172,7 +172,7 @@ replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl)
{
bi0 = fl->buffers[0];
b0 = vlib_get_buffer (vm, bi0);
- ctx = pool_elt_at_index (rm->contexts[cpu_number], b0->recycle_count);
+ ctx = pool_elt_at_index (rm->contexts[thread_index], b0->recycle_count);
feature_node_index = ctx->recycle_node_index;
}