summaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
authorjaszha03 <jason.zhang2@arm.com>2019-07-11 20:47:24 +0000
committerDave Barach <openvpp@barachs.net>2019-07-31 13:53:55 +0000
commit5cdde5c25a0e71d923a6d56e5c94e058887f95d8 (patch)
treec54eeb5a91ce67417806fabd1fca8d287993e71d /src/vnet
parent9a4e631890a70978d414b4937cb94b50cfd778e6 (diff)
vppinfra: refactor test_and_set spinlocks to use clib_spinlock_t
Spinlock performance improved when implemented with compare_and_exchange instead of test_and_set. All instances of test_and_set locks were refactored to use clib_spinlock_t when possible. Some locks e.g. ssvm synchronize between processes rather than threads, so they cannot directly use clib_spinlock_t. Type: refactor Change-Id: Ia16b5d4cd49209b2b57b8df6c94615c28b11bb60 Signed-off-by: Jason Zhang <jason.zhang2@arm.com> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com> Reviewed-by: Lijian Zhang <Lijian.Zhang@arm.com>
Diffstat (limited to 'src/vnet')
-rwxr-xr-xsrc/vnet/classify/vnet_classify.c14
-rw-r--r--src/vnet/classify/vnet_classify.h2
-rw-r--r--src/vnet/dns/dns.c3
-rw-r--r--src/vnet/dns/dns.h7
-rw-r--r--src/vnet/interface.c7
-rw-r--r--src/vnet/interface.h8
-rw-r--r--src/vnet/ipfix-export/flow_report_classify.c5
-rw-r--r--src/vnet/util/refcount.h16
8 files changed, 26 insertions, 36 deletions
diff --git a/src/vnet/classify/vnet_classify.c b/src/vnet/classify/vnet_classify.c
index f5e49491b25..b807a2679a7 100755
--- a/src/vnet/classify/vnet_classify.c
+++ b/src/vnet/classify/vnet_classify.c
@@ -152,10 +152,7 @@ vnet_classify_new_table (vnet_classify_main_t * cm,
vec_validate_aligned (t->buckets, nbuckets - 1, CLIB_CACHE_LINE_BYTES);
oldheap = clib_mem_set_heap (t->mheap);
- t->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- t->writer_lock[0] = 0;
-
+ clib_spinlock_init (&t->writer_lock);
clib_mem_set_heap (oldheap);
return (t);
}
@@ -193,7 +190,7 @@ vnet_classify_entry_alloc (vnet_classify_table_t * t, u32 log2_pages)
u32 required_length;
void *oldheap;
- ASSERT (t->writer_lock[0]);
+ CLIB_SPINLOCK_ASSERT_LOCKED (&t->writer_lock);
required_length =
(sizeof (vnet_classify_entry_t) + (t->match_n_vectors * sizeof (u32x4)))
* t->entries_per_page * (1 << log2_pages);
@@ -222,7 +219,7 @@ static void
vnet_classify_entry_free (vnet_classify_table_t * t,
vnet_classify_entry_t * v, u32 log2_pages)
{
- ASSERT (t->writer_lock[0]);
+ CLIB_SPINLOCK_ASSERT_LOCKED (&t->writer_lock);
ASSERT (vec_len (t->freelists) > log2_pages);
@@ -447,8 +444,7 @@ vnet_classify_add_del (vnet_classify_table_t * t,
hash >>= t->log2_nbuckets;
- while (clib_atomic_test_and_set (t->writer_lock))
- CLIB_PAUSE ();
+ clib_spinlock_lock (&t->writer_lock);
/* First elt in the bucket? */
if (b->offset == 0)
@@ -640,7 +636,7 @@ expand_ok:
vnet_classify_entry_free (t, v, old_log2_pages);
unlock:
- clib_atomic_release (&t->writer_lock[0]);
+ clib_spinlock_unlock (&t->writer_lock);
return rv;
}
diff --git a/src/vnet/classify/vnet_classify.h b/src/vnet/classify/vnet_classify.h
index 2bc1224c72e..986e0a68674 100644
--- a/src/vnet/classify/vnet_classify.h
+++ b/src/vnet/classify/vnet_classify.h
@@ -187,7 +187,7 @@ typedef struct
void *mheap;
/* Writer (only) lock for this table */
- volatile u32 *writer_lock;
+ clib_spinlock_t writer_lock;
} vnet_classify_table_t;
diff --git a/src/vnet/dns/dns.c b/src/vnet/dns/dns.c
index bae6cb4911e..471728b8bb7 100644
--- a/src/vnet/dns/dns.c
+++ b/src/vnet/dns/dns.c
@@ -103,8 +103,7 @@ dns_enable_disable (dns_main_t * dm, int is_enable)
if (dm->cache_entry_by_name == 0)
{
if (n_vlib_mains > 1)
- dm->cache_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
+ clib_spinlock_init (&dm->cache_lock);
dm->cache_entry_by_name = hash_create_string (0, sizeof (uword));
}
diff --git a/src/vnet/dns/dns.h b/src/vnet/dns/dns.h
index d5029e521d6..494ac672096 100644
--- a/src/vnet/dns/dns.h
+++ b/src/vnet/dns/dns.h
@@ -97,7 +97,7 @@ typedef struct
/** Find cached record by name */
uword *cache_entry_by_name;
- uword *cache_lock;
+ clib_spinlock_t cache_lock;
/** enable / disable flag */
int is_enabled;
@@ -196,8 +196,7 @@ dns_cache_lock (dns_main_t * dm)
{
if (dm->cache_lock)
{
- while (clib_atomic_test_and_set (dm->cache_lock))
- CLIB_PAUSE ();
+ clib_spinlock_lock (&dm->cache_lock);
}
}
@@ -206,7 +205,7 @@ dns_cache_unlock (dns_main_t * dm)
{
if (dm->cache_lock)
{
- clib_atomic_release (dm->cache_lock);
+ clib_spinlock_unlock (&dm->cache_lock);
}
}
diff --git a/src/vnet/interface.c b/src/vnet/interface.c
index 1702cdc00d1..889ba50cc7a 100644
--- a/src/vnet/interface.c
+++ b/src/vnet/interface.c
@@ -1269,9 +1269,8 @@ vnet_interface_init (vlib_main_t * vm)
sizeof (b->opaque), sizeof (vnet_buffer_opaque_t));
}
- im->sw_if_counter_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
- im->sw_if_counter_lock[0] = 1; /* should be no need */
+ clib_spinlock_init (&im->sw_if_counter_lock);
+ clib_spinlock_lock (&im->sw_if_counter_lock); /* should be no need */
vec_validate (im->sw_if_counters, VNET_N_SIMPLE_INTERFACE_COUNTER - 1);
#define _(E,n,p) \
@@ -1286,7 +1285,7 @@ vnet_interface_init (vlib_main_t * vm)
im->combined_sw_if_counters[VNET_INTERFACE_COUNTER_##E].stat_segment_name = "/" #p "/" #n;
foreach_combined_interface_counter_name
#undef _
- im->sw_if_counter_lock[0] = 0;
+ clib_spinlock_unlock (&im->sw_if_counter_lock);
im->device_class_by_name = hash_create_string ( /* size */ 0,
sizeof (uword));
diff --git a/src/vnet/interface.h b/src/vnet/interface.h
index c6400ce6978..d3065dc2a9f 100644
--- a/src/vnet/interface.h
+++ b/src/vnet/interface.h
@@ -43,6 +43,7 @@
#include <vlib/vlib.h>
#include <vppinfra/pcap.h>
#include <vnet/l3_types.h>
+#include <vppinfra/lock.h>
struct vnet_main_t;
struct vnet_hw_interface_t;
@@ -836,7 +837,7 @@ typedef struct
/* Software interface counters both simple and combined
packet and byte counters. */
- volatile u32 *sw_if_counter_lock;
+ clib_spinlock_t sw_if_counter_lock;
vlib_simple_counter_main_t *sw_if_counters;
vlib_combined_counter_main_t *combined_sw_if_counters;
@@ -868,15 +869,14 @@ static inline void
vnet_interface_counter_lock (vnet_interface_main_t * im)
{
if (im->sw_if_counter_lock)
- while (clib_atomic_test_and_set (im->sw_if_counter_lock))
- /* zzzz */ ;
+ clib_spinlock_lock (&im->sw_if_counter_lock);
}
static inline void
vnet_interface_counter_unlock (vnet_interface_main_t * im)
{
if (im->sw_if_counter_lock)
- clib_atomic_release (im->sw_if_counter_lock);
+ clib_spinlock_unlock (&im->sw_if_counter_lock);
}
void vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add);
diff --git a/src/vnet/ipfix-export/flow_report_classify.c b/src/vnet/ipfix-export/flow_report_classify.c
index 6bddb1407fe..15118f9cfaa 100644
--- a/src/vnet/ipfix-export/flow_report_classify.c
+++ b/src/vnet/ipfix-export/flow_report_classify.c
@@ -197,8 +197,7 @@ ipfix_classify_send_flows (flow_report_main_t * frm,
t = pool_elt_at_index (vcm->tables, table->classify_table_index);
- while (clib_atomic_test_and_set (t->writer_lock))
- ;
+ clib_spinlock_lock (&t->writer_lock);
for (i = 0; i < t->nbuckets; i++)
{
@@ -385,7 +384,7 @@ flush:
bi0 = ~0;
}
- clib_atomic_release (t->writer_lock);
+ clib_spinlock_unlock (&t->writer_lock);
return f;
}
diff --git a/src/vnet/util/refcount.h b/src/vnet/util/refcount.h
index 873ab6def69..4c7d7bdbdd5 100644
--- a/src/vnet/util/refcount.h
+++ b/src/vnet/util/refcount.h
@@ -30,6 +30,7 @@
*/
#include <vnet/vnet.h>
+#include <vppinfra/lock.h>
/*
* Reference counting
@@ -41,7 +42,7 @@
*/
typedef struct {
u32 *counters;
- volatile u32 *counter_lock;
+ clib_spinlock_t counter_lock;
CLIB_CACHE_LINE_ALIGN_MARK(o);
} vlib_refcount_per_cpu_t;
@@ -50,16 +51,15 @@ typedef struct {
} vlib_refcount_t;
static_always_inline
-void vlib_refcount_lock (volatile u32 *counter_lock)
+void vlib_refcount_lock (clib_spinlock_t counter_lock)
{
- while (clib_atomic_test_and_set (counter_lock))
- ;
+ clib_spinlock_lock (&counter_lock);
}
static_always_inline
-void vlib_refcount_unlock (volatile u32 *counter_lock)
+void vlib_refcount_unlock (clib_spinlock_t counter_lock)
{
- clib_atomic_release(counter_lock);
+ clib_spinlock_unlock (&counter_lock);
}
void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size);
@@ -86,9 +86,7 @@ void vlib_refcount_init(vlib_refcount_t *r)
for (thread_index = 0; thread_index < tm->n_vlib_mains; thread_index++)
{
- r->per_cpu[thread_index].counter_lock =
- clib_mem_alloc_aligned(CLIB_CACHE_LINE_BYTES,CLIB_CACHE_LINE_BYTES);
- r->per_cpu[thread_index].counter_lock[0] = 0;
+ clib_spinlock_init (&r->per_cpu[thread_index].counter_lock);
}
}