aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSirshak Das <sirshak.das@arm.com>2018-10-12 09:38:27 -0500
committerDamjan Marion <dmarion@me.com>2018-11-05 12:31:43 +0000
commit5b718d5cf8bd8df82e37825e0cdb6f986d9a784a (patch)
tree28399875bc4fe0ead3d3c78d649b4bbdd798d78a
parent86327be9751ad54cb24d16c161cacb001dc20772 (diff)
Enable atomic swap and store macro with acquire and release ordering
Add atomic swap and store macro with acquire and release ordering respectively. Variable in question is interupt_pending variable which is used as guard variable by input nodes to process the device queue. Atomic Swap is used with Acquire ordering as writes or reads following this in program order should not be reordered before the swap. Atomic Store is used with Release ordering, as post store the node is added to pending list. Change-Id: I1be49e91a15c58d0bf21ff5ba1bd37d5d7d12f7a Original-patch-by: Damjan Marion <damarion@cisco.com> Signed-off-by: Sirshak Das <sirshak.das@arm.com> Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com> Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
-rw-r--r--src/vnet/devices/devices.h10
-rw-r--r--src/vnet/devices/virtio/vhost_user_input.c4
-rw-r--r--src/vppinfra/atomics.h5
-rw-r--r--src/vppinfra/smp.h2
4 files changed, 12 insertions, 9 deletions
diff --git a/src/vnet/devices/devices.h b/src/vnet/devices/devices.h
index ac5be229b76..cc960c98f23 100644
--- a/src/vnet/devices/devices.h
+++ b/src/vnet/devices/devices.h
@@ -148,16 +148,20 @@ vnet_device_input_set_interrupt_pending (vnet_main_t * vnm, u32 hw_if_index,
rt = vlib_node_get_runtime_data (vm, hw->input_node_index);
idx = hw->dq_runtime_index_by_queue[queue_id];
dq = vec_elt_at_index (rt->devices_and_queues, idx);
- dq->interrupt_pending = 1;
+
+ clib_atomic_store_rel_n (&(dq->interrupt_pending), 1);
vlib_node_set_interrupt_pending (vm, hw->input_node_index);
}
+/*
+ * Acquire RMW Access
+ * Paired with Release Store in vnet_device_input_set_interrupt_pending
+ */
#define foreach_device_and_queue(var,vec) \
for (var = (vec); var < vec_end (vec); var++) \
if ((var->mode == VNET_HW_INTERFACE_RX_MODE_POLLING) \
- || clib_smp_swap (&((var)->interrupt_pending), 0))
-
+ || clib_atomic_swap_acq_n (&((var)->interrupt_pending), 0))
#endif /* included_vnet_vnet_device_h */
diff --git a/src/vnet/devices/virtio/vhost_user_input.c b/src/vnet/devices/virtio/vhost_user_input.c
index 291d687ab6a..8e88695957b 100644
--- a/src/vnet/devices/virtio/vhost_user_input.c
+++ b/src/vnet/devices/virtio/vhost_user_input.c
@@ -641,8 +641,8 @@ VLIB_NODE_FN (vhost_user_input_node) (vlib_main_t * vm,
vec_foreach (dq, rt->devices_and_queues)
{
- if (clib_smp_swap (&dq->interrupt_pending, 0) ||
- (node->state == VLIB_NODE_STATE_POLLING))
+ if ((node->state == VLIB_NODE_STATE_POLLING) ||
+ clib_atomic_swap_acq_n (&dq->interrupt_pending, 0))
{
vui =
pool_elt_at_index (vum->vhost_user_interfaces, dq->dev_instance);
diff --git a/src/vppinfra/atomics.h b/src/vppinfra/atomics.h
index 8ddf13801df..420ae837fc0 100644
--- a/src/vppinfra/atomics.h
+++ b/src/vppinfra/atomics.h
@@ -37,9 +37,10 @@
#define clib_atomic_cmp_and_swap(addr,old,new) __sync_val_compare_and_swap(addr, old, new)
#define clib_atomic_bool_cmp_and_swap(addr,old,new) __sync_bool_compare_and_swap(addr, old, new)
-/*Accquire Barrier*/
#define clib_atomic_test_and_set(a) __sync_lock_test_and_set(a, 1)
-/*Release Barrier*/
#define clib_atomic_release(a) __sync_lock_release(a)
+#define clib_atomic_store_rel_n(a, b) __atomic_store_n ((a), (b), __ATOMIC_RELEASE)
+#define clib_atomic_swap_acq_n(a, b) __atomic_exchange_n ((a), (b), __ATOMIC_ACQUIRE)
+
#endif /* included_clib_atomics_h */
diff --git a/src/vppinfra/smp.h b/src/vppinfra/smp.h
index 7146e51ac77..2b3ed548dfa 100644
--- a/src/vppinfra/smp.h
+++ b/src/vppinfra/smp.h
@@ -41,8 +41,6 @@
#include <vppinfra/cache.h>
#include <vppinfra/os.h> /* for os_panic */
-#define clib_smp_swap(addr,new) __sync_lock_test_and_set(addr,new)
-
#if defined (i386) || defined (__x86_64__)
#define clib_smp_pause() do { asm volatile ("pause"); } while (0)
#elif defined (__aarch64__) || defined (__arm__)