diff options
author | Damjan Marion <damarion@cisco.com> | 2017-03-27 17:08:20 +0200 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2017-03-30 14:14:26 +0000 |
commit | 1927da29ccbe1d4cc8e59ccfa197eb41c257814f (patch) | |
tree | 226487b33921b9a45f78016d078548a6815c3431 | |
parent | b18e0de1f9630fab8b3d6ffe85c7a6ee35a6fdac (diff) |
vppinfra: add spinlock inline functions
Change-Id: I86089e9bb604adfc260a111685001be1c897ce53
Signed-off-by: Damjan Marion <damarion@cisco.com>
-rw-r--r-- | src/plugins/memif/device.c | 21 | ||||
-rw-r--r-- | src/plugins/memif/memif.c | 12 | ||||
-rw-r--r-- | src/plugins/memif/memif.h | 4 | ||||
-rw-r--r-- | src/vnet/devices/af_packet/af_packet.c | 6 | ||||
-rw-r--r-- | src/vnet/devices/af_packet/af_packet.h | 4 | ||||
-rw-r--r-- | src/vnet/devices/af_packet/device.c | 9 | ||||
-rw-r--r-- | src/vnet/devices/netmap/device.c | 9 | ||||
-rw-r--r-- | src/vnet/devices/netmap/netmap.c | 6 | ||||
-rw-r--r-- | src/vnet/devices/netmap/netmap.h | 4 | ||||
-rw-r--r-- | src/vppinfra.am | 1 | ||||
-rw-r--r-- | src/vppinfra/lock.h | 97 |
11 files changed, 117 insertions, 56 deletions
diff --git a/src/plugins/memif/device.c b/src/plugins/memif/device.c index 446537a3792..4faeb0554f3 100644 --- a/src/plugins/memif/device.c +++ b/src/plugins/memif/device.c @@ -79,23 +79,6 @@ format_memif_tx_trace (u8 * s, va_list * args) } static_always_inline void -memif_interface_lock (memif_if_t * mif) -{ - if (PREDICT_FALSE (mif->lockp != 0)) - { - while (__sync_lock_test_and_set (mif->lockp, 1)) - ; - } -} - -static_always_inline void -memif_interface_unlock (memif_if_t * mif) -{ - if (PREDICT_FALSE (mif->lockp != 0)) - *mif->lockp = 0; -} - -static_always_inline void memif_prefetch_buffer_and_data (vlib_main_t * vm, u32 bi) { vlib_buffer_t *b = vlib_get_buffer (vm, bi); @@ -117,7 +100,7 @@ memif_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u16 head, tail; u16 free_slots; - memif_interface_lock (mif); + clib_spinlock_lock_if_init (&mif->lockp); /* free consumed buffers */ @@ -210,7 +193,7 @@ memif_interface_tx_inline (vlib_main_t * vm, vlib_node_runtime_t * node, CLIB_MEMORY_STORE_BARRIER (); ring->head = head; - memif_interface_unlock (mif); + clib_spinlock_unlock (&mif->lockp); if (n_left) { diff --git a/src/plugins/memif/memif.c b/src/plugins/memif/memif.c index 7ba67c5b898..cf8ca577d54 100644 --- a/src/plugins/memif/memif.c +++ b/src/plugins/memif/memif.c @@ -716,11 +716,7 @@ memif_close_if (memif_main_t * mm, memif_if_t * mif) } } - if (mif->lockp != 0) - { - clib_mem_free ((void *) mif->lockp); - mif->lockp = 0; - } + clib_spinlock_free (&mif->lockp); mhash_unset (&mm->if_index_by_key, &mif->key, &mif->if_index); vec_free (mif->socket_filename); @@ -783,11 +779,7 @@ memif_create_if (vlib_main_t * vm, memif_create_if_args_t * args) mif->connection.fd = mif->interrupt_line.fd = -1; if (tm->n_vlib_mains > 1) - { - mif->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, - CLIB_CACHE_LINE_BYTES); - memset ((void *) mif->lockp, 0, CLIB_CACHE_LINE_BYTES); - } + clib_spinlock_init (&mif->lockp); if (!args->hw_addr_set) { diff --git a/src/plugins/memif/memif.h b/src/plugins/memif/memif.h index a7a88e07b0e..f57170f8e6f 100644 --- a/src/plugins/memif/memif.h +++ b/src/plugins/memif/memif.h @@ -15,6 +15,8 @@ *------------------------------------------------------------------ */ +#include <vppinfra/lock.h> + typedef struct { u16 version; @@ -98,7 +100,7 @@ typedef struct typedef struct { CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - volatile u32 *lockp; + clib_spinlock_t lockp; u32 flags; #define MEMIF_IF_FLAG_ADMIN_UP (1 << 0) #define MEMIF_IF_FLAG_IS_SLAVE (1 << 1) diff --git a/src/vnet/devices/af_packet/af_packet.c b/src/vnet/devices/af_packet/af_packet.c index 5fdc59f2a20..2028510716e 100644 --- a/src/vnet/devices/af_packet/af_packet.c +++ b/src/vnet/devices/af_packet/af_packet.c @@ -229,11 +229,7 @@ af_packet_create_if (vlib_main_t * vm, u8 * host_if_name, u8 * hw_addr_set, apif->next_rx_frame = 0; if (tm->n_vlib_mains > 1) - { - apif->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, - CLIB_CACHE_LINE_BYTES); - memset ((void *) apif->lockp, 0, CLIB_CACHE_LINE_BYTES); - } + clib_spinlock_init (&apif->lockp); { unix_file_t template = { 0 }; diff --git a/src/vnet/devices/af_packet/af_packet.h b/src/vnet/devices/af_packet/af_packet.h index 50ec23785ae..77a2c7a3753 100644 --- a/src/vnet/devices/af_packet/af_packet.h +++ b/src/vnet/devices/af_packet/af_packet.h @@ -17,10 +17,12 @@ *------------------------------------------------------------------ */ +#include <vppinfra/lock.h> + typedef struct { CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - volatile u32 *lockp; + clib_spinlock_t lockp; u8 *host_if_name; int fd; struct tpacket_req *rx_req; diff --git a/src/vnet/devices/af_packet/device.c b/src/vnet/devices/af_packet/device.c index 9a94fc5e4a9..2ba3f579c00 100644 --- a/src/vnet/devices/af_packet/device.c +++ b/src/vnet/devices/af_packet/device.c @@ -92,11 +92,7 @@ af_packet_interface_tx (vlib_main_t * vm, struct tpacket2_hdr *tph; u32 frame_not_ready = 0; - if (PREDICT_FALSE (apif->lockp != 0)) - { - while (__sync_lock_test_and_set (apif->lockp, 1)) - ; - } + clib_spinlock_lock_if_init (&apif->lockp); while (n_left > 0) { @@ -159,8 +155,7 @@ af_packet_interface_tx (vlib_main_t * vm, } } - if (PREDICT_FALSE (apif->lockp != 0)) - *apif->lockp = 0; + clib_spinlock_unlock_if_init (&apif->lockp); if (PREDICT_FALSE (frame_not_ready)) vlib_error_count (vm, node->node_index, diff --git a/src/vnet/devices/netmap/device.c b/src/vnet/devices/netmap/device.c index 2152824f733..aea9ddf4eb1 100644 --- a/src/vnet/devices/netmap/device.c +++ b/src/vnet/devices/netmap/device.c @@ -105,11 +105,7 @@ netmap_interface_tx (vlib_main_t * vm, netmap_if_t *nif = pool_elt_at_index (nm->interfaces, rd->dev_instance); int cur_ring; - if (PREDICT_FALSE (nif->lockp != 0)) - { - while (__sync_lock_test_and_set (nif->lockp, 1)) - ; - } + clib_spinlock_lock_if_init (&nif->lockp); cur_ring = nif->first_tx_ring; @@ -165,8 +161,7 @@ netmap_interface_tx (vlib_main_t * vm, if (n_left < frame->n_vectors) ioctl (nif->fd, NIOCTXSYNC, NULL); - if (PREDICT_FALSE (nif->lockp != 0)) - *nif->lockp = 0; + clib_spinlock_unlock_if_init (&nif->lockp); if (n_left) vlib_error_count (vm, node->node_index, diff --git a/src/vnet/devices/netmap/netmap.c b/src/vnet/devices/netmap/netmap.c index 3bdb442dda2..09afc7640ad 100644 --- a/src/vnet/devices/netmap/netmap.c +++ b/src/vnet/devices/netmap/netmap.c @@ -185,11 +185,7 @@ netmap_create_if (vlib_main_t * vm, u8 * if_name, u8 * hw_addr_set, nif->per_interface_next_index = ~0; if (tm->n_vlib_mains > 1) - { - nif->lockp = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, - CLIB_CACHE_LINE_BYTES); - memset ((void *) nif->lockp, 0, CLIB_CACHE_LINE_BYTES); - } + clib_spinlock_init (&nif->lockp); { unix_file_t template = { 0 }; diff --git a/src/vnet/devices/netmap/netmap.h b/src/vnet/devices/netmap/netmap.h index 39a94043c3c..e04f045d6e2 100644 --- a/src/vnet/devices/netmap/netmap.h +++ b/src/vnet/devices/netmap/netmap.h @@ -40,10 +40,12 @@ * SUCH DAMAGE. */ +#include <vppinfra/lock.h> + typedef struct { CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); - volatile u32 *lockp; + clib_spinlock_t lockp; u8 *host_if_name; uword if_index; u32 hw_if_index; diff --git a/src/vppinfra.am b/src/vppinfra.am index 4b9f0c29447..fed1981e8ec 100644 --- a/src/vppinfra.am +++ b/src/vppinfra.am @@ -180,6 +180,7 @@ nobase_include_HEADERS = \ vppinfra/graph.h \ vppinfra/hash.h \ vppinfra/heap.h \ + vppinfra/lock.h \ vppinfra/longjmp.h \ vppinfra/macros.h \ vppinfra/math.h \ diff --git a/src/vppinfra/lock.h b/src/vppinfra/lock.h new file mode 100644 index 00000000000..c60ff414612 --- /dev/null +++ b/src/vppinfra/lock.h @@ -0,0 +1,97 @@ +/* + * Copyright (c) 2017 Cisco and/or its affiliates. + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at: + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef included_clib_lock_h +#define included_clib_lock_h + +#include <vppinfra/clib.h> + +typedef struct +{ + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); + u32 lock; +#if CLIB_DEBUG > 0 + pid_t pid; + uword cpu_index; + void *frame_address; +#endif +} *clib_spinlock_t; + +static inline void +clib_spinlock_init (clib_spinlock_t * p) +{ + *p = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES, CLIB_CACHE_LINE_BYTES); + memset ((void *) *p, 0, CLIB_CACHE_LINE_BYTES); +} + +static inline void +clib_spinlock_free (clib_spinlock_t * p) +{ + if (*p) + { + clib_mem_free ((void *) *p); + *p = 0; + } +} + +static_always_inline void +clib_spinlock_lock (clib_spinlock_t * p) +{ + while (__sync_lock_test_and_set (&(*p)->lock, 1)) +#if __x86_64__ + __builtin_ia32_pause () +#endif + ; +#if CLIB_DEBUG > 0 + (*p)->frame_address = __builtin_frame_address (0); + (*p)->pid = getpid (); + (*p)->cpu_index = os_get_cpu_number (); +#endif +} + +static_always_inline void +clib_spinlock_lock_if_init (clib_spinlock_t * p) +{ + if (PREDICT_FALSE (*p != 0)) + clib_spinlock_lock (p); +} + +static_always_inline void +clib_spinlock_unlock (clib_spinlock_t * p) +{ + (*p)->lock = 0; +#if CLIB_DEBUG > 0 + (*p)->frame_address = 0; + (*p)->pid = 0; + (*p)->cpu_index = 0; +#endif +} + +static_always_inline void +clib_spinlock_unlock_if_init (clib_spinlock_t * p) +{ + if (PREDICT_FALSE (*p != 0)) + clib_spinlock_unlock (p); +} + +#endif + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ |