summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHongjun Ni <hongjun.ni@intel.com>2017-11-13 20:34:06 +0800
committerNeale Ranns <nranns@cisco.com>2017-12-06 13:22:51 +0000
commitbf4be5730557c4280f2f9f8b7ef1a194716c82bd (patch)
tree5e2297c4484902801cb3c04e316da4123a9bd3b3
parent22229864cb6e30c9b75a9c36e4ffa8500c0cdc5f (diff)
Restructure some files in LB to src/vnet to reuse
Change-Id: Ic8b193e93ce18ca82b294816aa7ee0ef31d64bc2 Signed-off-by: Pierre Pfister <ppfister@cisco.com> Signed-off-by: Hongjun Ni <hongjun.ni@intel.com>
-rw-r--r--src/plugins/lb.am2
-rw-r--r--src/plugins/lb/lb.h2
-rw-r--r--src/vnet.am6
-rw-r--r--src/vnet/util/refcount.c (renamed from src/plugins/lb/refcount.c)15
-rw-r--r--src/vnet/util/refcount.h (renamed from src/plugins/lb/refcount.h)36
5 files changed, 47 insertions, 14 deletions
diff --git a/src/plugins/lb.am b/src/plugins/lb.am
index 352358fa88f..f0ff6267691 100644
--- a/src/plugins/lb.am
+++ b/src/plugins/lb.am
@@ -19,7 +19,6 @@ lb_plugin_la_SOURCES = \
lb/node.c \
lb/cli.c \
lb/util.c \
- lb/refcount.c \
lb/api.c
BUILT_SOURCES += \
@@ -31,7 +30,6 @@ API_FILES += lb/lb.api
noinst_HEADERS += \
lb/lb.h \
lb/util.h \
- lb/refcount.h \
lb/lbhash.h \
lb/lb.api.h
diff --git a/src/plugins/lb/lb.h b/src/plugins/lb/lb.h
index 882b9b30f7e..fa0b5d48b07 100644
--- a/src/plugins/lb/lb.h
+++ b/src/plugins/lb/lb.h
@@ -31,7 +31,7 @@
#define LB_PLUGIN_LB_LB_H_
#include <lb/util.h>
-#include <lb/refcount.h>
+#include <vnet/util/refcount.h>
#include <vnet/vnet.h>
#include <vnet/ip/ip.h>
diff --git a/src/vnet.am b/src/vnet.am
index a4817f2fc11..bd7efb24c4b 100644
--- a/src/vnet.am
+++ b/src/vnet.am
@@ -63,7 +63,8 @@ nobase_include_HEADERS += \
vnet/vnet.h \
vnet/vnet_all_api_h.h \
vnet/vnet_msg_enum.h \
- vnet/util/radix.h
+ vnet/util/radix.h \
+ vnet/util/refcount.h
API_FILES += vnet/interface.api
@@ -1147,7 +1148,8 @@ nobase_include_HEADERS += \
########################################
libvnet_la_SOURCES += \
- vnet/util/radix.c \
+ vnet/util/radix.c \
+ vnet/util/refcount.c \
vnet/util/trajectory.c
########################################
diff --git a/src/plugins/lb/refcount.c b/src/vnet/util/refcount.c
index 6f01ab5aaf7..a7b525d67be 100644
--- a/src/plugins/lb/refcount.c
+++ b/src/vnet/util/refcount.c
@@ -13,17 +13,18 @@
* limitations under the License.
*/
-#include <lb/refcount.h>
+#include <vnet/util/refcount.h>
void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size)
{
u32 *new_counter = 0, *old_counter;
vec_validate(new_counter, size);
- memcpy(new_counter, per_cpu->counters, per_cpu->length);
+ vlib_refcount_lock(per_cpu->counter_lock);
+ memcpy(new_counter, per_cpu->counters, vec_len(per_cpu->counters)*4);
old_counter = per_cpu->counters;
per_cpu->counters = new_counter;
+ vlib_refcount_unlock(per_cpu->counter_lock);
CLIB_MEMORY_BARRIER();
- per_cpu->length = vec_len(new_counter);
vec_free(old_counter);
}
@@ -33,8 +34,12 @@ u64 vlib_refcount_get(vlib_refcount_t *r, u32 index)
vlib_thread_main_t *tm = vlib_get_thread_main ();
u32 thread_index;
for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) {
- if (r->per_cpu[thread_index].length > index)
- count += r->per_cpu[thread_index].counters[index];
+ vlib_refcount_lock(r->per_cpu[thread_index].counter_lock);
+ if (index < vec_len(r->per_cpu[thread_index].counters))
+ {
+ count += r->per_cpu[thread_index].counters[index];
+ }
+ vlib_refcount_unlock(r->per_cpu[thread_index].counter_lock);
}
return count;
}
diff --git a/src/plugins/lb/refcount.h b/src/vnet/util/refcount.h
index dcfcb3fee58..ea92148dafa 100644
--- a/src/plugins/lb/refcount.h
+++ b/src/vnet/util/refcount.h
@@ -31,10 +31,17 @@
#include <vnet/vnet.h>
+/*
+ * Reference counting
+ * A specific reference counter is used. The design is quite
+ * similar to vlib counters but:
+ * - It is possible to decrease the value
+ * - Summing will not zero the per-thread counters
+ * - Only the thread can reallocate its own counters vector (to avoid concurrency issues)
+*/
typedef struct {
u32 *counters;
- u32 length;
- u32 *reader_lengths;
+ volatile u32 *counter_lock;
CLIB_CACHE_LINE_ALIGN_MARK(o);
} vlib_refcount_per_cpu_t;
@@ -42,14 +49,27 @@ typedef struct {
vlib_refcount_per_cpu_t *per_cpu;
} vlib_refcount_t;
+static_always_inline
+void vlib_refcount_lock (volatile u32 *counter_lock)
+{
+ while (__sync_lock_test_and_set (counter_lock, 1))
+ ;
+}
+
+static_always_inline
+void vlib_refcount_unlock (volatile u32 *counter_lock)
+{
+ *counter_lock = 0;
+}
+
void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size);
static_always_inline
void vlib_refcount_add(vlib_refcount_t *r, u32 thread_index, u32 counter_index, i32 v)
{
vlib_refcount_per_cpu_t *per_cpu = &r->per_cpu[thread_index];
- if (PREDICT_FALSE(counter_index >= per_cpu->length))
- __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16, per_cpu->length * 2));
+ if (PREDICT_FALSE(counter_index >= vec_len(per_cpu->counters)))
+ __vlib_refcount_resize(per_cpu, clib_max(counter_index + 16,(vec_len(per_cpu->counters)) * 2));
per_cpu->counters[counter_index] += v;
}
@@ -60,8 +80,16 @@ static_always_inline
void vlib_refcount_init(vlib_refcount_t *r)
{
vlib_thread_main_t *tm = vlib_get_thread_main ();
+ u32 thread_index;
r->per_cpu = 0;
vec_validate (r->per_cpu, tm->n_vlib_mains - 1);
+
+ for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++)
+ {
+ r->per_cpu[thread_index].counter_lock =
+ clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES,CLIB_CACHE_LINE_BYTES);
+ r->per_cpu[thread_index].counter_lock[0] = 0;
+ }
}