aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/lb
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2017-04-05 19:18:20 +0200
committerDave Barach <openvpp@barachs.net>2017-04-06 11:31:39 +0000
commit586afd762bfa149f5ca167bd5fd5a0cd59ce94fe (patch)
tree808b57c61e0fe1a181871bb1ad94398c5ba42671 /src/plugins/lb
parentbc799c92d761a2d45105aa6a1685b3663687d2a4 (diff)
Use thread local storage for thread index
This patch deprecates stack-based thread identification, Also removes requirement that thread stacks are adjacent. Finally, possibly annoying for some folks, it renames all occurences of cpu_index and cpu_number with thread index. Using word "cpu" is misleading here as thread can be migrated ti different CPU, and also it is not related to linux cpu index. Change-Id: I68cdaf661e701d2336fc953dcb9978d10a70f7c1 Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/plugins/lb')
-rw-r--r--src/plugins/lb/lb.c8
-rw-r--r--src/plugins/lb/node.c22
-rw-r--r--src/plugins/lb/refcount.c8
-rw-r--r--src/plugins/lb/refcount.h4
4 files changed, 21 insertions, 21 deletions
diff --git a/src/plugins/lb/lb.c b/src/plugins/lb/lb.c
index add81236..addc2a42 100644
--- a/src/plugins/lb/lb.c
+++ b/src/plugins/lb/lb.c
@@ -63,11 +63,11 @@ u8 *format_lb_main (u8 * s, va_list * args)
s = format(s, " #vips: %u\n", pool_elts(lbm->vips));
s = format(s, " #ass: %u\n", pool_elts(lbm->ass) - 1);
- u32 cpu_index;
- for(cpu_index = 0; cpu_index < tm->n_vlib_mains; cpu_index++ ) {
- lb_hash_t *h = lbm->per_cpu[cpu_index].sticky_ht;
+ u32 thread_index;
+ for(thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++ ) {
+ lb_hash_t *h = lbm->per_cpu[thread_index].sticky_ht;
if (h) {
- s = format(s, "core %d\n", cpu_index);
+ s = format(s, "core %d\n", thread_index);
s = format(s, " timeout: %ds\n", h->timeout);
s = format(s, " usage: %d / %d\n", lb_hash_elts(h, lb_hash_time_now(vlib_get_main())), lb_hash_size(h));
}
diff --git a/src/plugins/lb/node.c b/src/plugins/lb/node.c
index 8b763c53..3171148b 100644
--- a/src/plugins/lb/node.c
+++ b/src/plugins/lb/node.c
@@ -60,10 +60,10 @@ format_lb_trace (u8 * s, va_list * args)
return s;
}
-lb_hash_t *lb_get_sticky_table(u32 cpu_index)
+lb_hash_t *lb_get_sticky_table(u32 thread_index)
{
lb_main_t *lbm = &lb_main;
- lb_hash_t *sticky_ht = lbm->per_cpu[cpu_index].sticky_ht;
+ lb_hash_t *sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
//Check if size changed
if (PREDICT_FALSE(sticky_ht && (lbm->per_cpu_sticky_buckets != lb_hash_nbuckets(sticky_ht))))
{
@@ -71,8 +71,8 @@ lb_hash_t *lb_get_sticky_table(u32 cpu_index)
lb_hash_bucket_t *b;
u32 i;
lb_hash_foreach_entry(sticky_ht, b, i) {
- vlib_refcount_add(&lbm->as_refcount, cpu_index, b->value[i], -1);
- vlib_refcount_add(&lbm->as_refcount, cpu_index, 0, 1);
+ vlib_refcount_add(&lbm->as_refcount, thread_index, b->value[i], -1);
+ vlib_refcount_add(&lbm->as_refcount, thread_index, 0, 1);
}
lb_hash_free(sticky_ht);
@@ -81,8 +81,8 @@ lb_hash_t *lb_get_sticky_table(u32 cpu_index)
//Create if necessary
if (PREDICT_FALSE(sticky_ht == NULL)) {
- lbm->per_cpu[cpu_index].sticky_ht = lb_hash_alloc(lbm->per_cpu_sticky_buckets, lbm->flow_timeout);
- sticky_ht = lbm->per_cpu[cpu_index].sticky_ht;
+ lbm->per_cpu[thread_index].sticky_ht = lb_hash_alloc(lbm->per_cpu_sticky_buckets, lbm->flow_timeout);
+ sticky_ht = lbm->per_cpu[thread_index].sticky_ht;
clib_warning("Regenerated sticky table %p", sticky_ht);
}
@@ -153,10 +153,10 @@ lb_node_fn (vlib_main_t * vm,
{
lb_main_t *lbm = &lb_main;
u32 n_left_from, *from, next_index, *to_next, n_left_to_next;
- u32 cpu_index = os_get_cpu_number();
+ u32 thread_index = vlib_get_thread_index();
u32 lb_time = lb_hash_time_now(vm);
- lb_hash_t *sticky_ht = lb_get_sticky_table(cpu_index);
+ lb_hash_t *sticky_ht = lb_get_sticky_table(thread_index);
from = vlib_frame_vector_args (frame);
n_left_from = frame->n_vectors;
next_index = node->cached_next_index;
@@ -240,9 +240,9 @@ lb_node_fn (vlib_main_t * vm,
//Configuration may be changed, vectors resized, etc...
//Dereference previously used
- vlib_refcount_add(&lbm->as_refcount, cpu_index,
+ vlib_refcount_add(&lbm->as_refcount, thread_index,
lb_hash_available_value(sticky_ht, hash0, available_index0), -1);
- vlib_refcount_add(&lbm->as_refcount, cpu_index,
+ vlib_refcount_add(&lbm->as_refcount, thread_index,
asindex0, 1);
//Add sticky entry
@@ -260,7 +260,7 @@ lb_node_fn (vlib_main_t * vm,
}
vlib_increment_simple_counter(&lbm->vip_counters[counter],
- cpu_index,
+ thread_index,
vnet_buffer (p0)->ip.adj_index[VLIB_TX],
1);
diff --git a/src/plugins/lb/refcount.c b/src/plugins/lb/refcount.c
index 22415c88..6f01ab5a 100644
--- a/src/plugins/lb/refcount.c
+++ b/src/plugins/lb/refcount.c
@@ -31,10 +31,10 @@ u64 vlib_refcount_get(vlib_refcount_t *r, u32 index)
{
u64 count = 0;
vlib_thread_main_t *tm = vlib_get_thread_main ();
- u32 cpu_index;
- for (cpu_index = 0; cpu_index < tm->n_vlib_mains; cpu_index++) {
- if (r->per_cpu[cpu_index].length > index)
- count += r->per_cpu[cpu_index].counters[index];
+ u32 thread_index;
+ for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) {
+ if (r->per_cpu[thread_index].length > index)
+ count += r->per_cpu[thread_index].counters[index];
}
return count;
}
diff --git a/src/plugins/lb/refcount.h b/src/plugins/lb/refcount.h
index 8c26e7be..dcfcb3fe 100644
--- a/src/plugins/lb/refcount.h
+++ b/src/plugins/lb/refcount.h
@@ -45,9 +45,9 @@ typedef struct {
void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size);
static_always_inline
-void vlib_refcount_add(vlib_refcount_t *r, u32 cpu_index, u32 counter_index, i32 v)
+void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v)
{
- vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[cpu_index];
+ vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index];
if (PREDICT_FALSE(counter_index >= per_cpu->length))
__vlib_refcount_resize(per_cpu, clib_max(counter_index + 16, per_cpu->length * 2));