diff options
author | jaszha03 <jason.zhang2@arm.com> | 2019-07-11 20:47:24 +0000 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2019-07-31 13:53:55 +0000 |
commit | 5cdde5c25a0e71d923a6d56e5c94e058887f95d8 (patch) | |
tree | c54eeb5a91ce67417806fabd1fca8d287993e71d /src/vnet/util | |
parent | 9a4e631890a70978d414b4937cb94b50cfd778e6 (diff) |
vppinfra: refactor test_and_set spinlocks to use clib_spinlock_t
Spinlock performance improved when implemented with compare_and_exchange
instead of test_and_set. All instances of test_and_set locks were refactored
to use clib_spinlock_t when possible. Some locks e.g. ssvm synchronize
between processes rather than threads, so they cannot directly use
clib_spinlock_t.
Type: refactor
Change-Id: Ia16b5d4cd49209b2b57b8df6c94615c28b11bb60
Signed-off-by: Jason Zhang <jason.zhang2@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Lijian Zhang <Lijian.Zhang@arm.com>
Diffstat (limited to 'src/vnet/util')
-rw-r--r-- | src/vnet/util/refcount.h | 16 |
1 files changed, 7 insertions, 9 deletions
diff --git a/src/vnet/util/refcount.h b/src/vnet/util/refcount.h index 873ab6def69..4c7d7bdbdd5 100644 --- a/src/vnet/util/refcount.h +++ b/src/vnet/util/refcount.h @@ -30,6 +30,7 @@ */ #include <vnet/vnet.h> +#include <vppinfra/lock.h> /* * Reference counting @@ -41,7 +42,7 @@ */ typedef struct { u32 *counters; - volatile u32 *counter_lock; + clib_spinlock_t counter_lock; CLIB_CACHE_LINE_ALIGN_MARK(o); } vlib_refcount_per_cpu_t; @@ -50,16 +51,15 @@ typedef struct { } vlib_refcount_t; static_always_inline -void vlib_refcount_lock (volatile u32 *counter_lock) +void vlib_refcount_lock (clib_spinlock_t counter_lock) { - while (clib_atomic_test_and_set (counter_lock)) - ; + clib_spinlock_lock (&counter_lock); } static_always_inline -void vlib_refcount_unlock (volatile u32 *counter_lock) +void vlib_refcount_unlock (clib_spinlock_t counter_lock) { - clib_atomic_release(counter_lock); + clib_spinlock_unlock (&counter_lock); } void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size); @@ -86,9 +86,7 @@ void vlib_refcount_init(vlib_refcount_t *r) for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++) { - r->per_cpu[thread_index].counter_lock = - clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES,CLIB_CACHE_LINE_BYTES); - r->per_cpu[thread_index].counter_lock[0] = 0; + clib_spinlock_init (&r->per_cpu[thread_index].counter_lock); } } |