From 2f6d7bb93c157b874efb79a2d1583a4c368bf89a Mon Sep 17 00:00:00 2001 From: Sirshak Das Date: Wed, 3 Oct 2018 22:53:51 +0000 Subject: vppinfra: add atomic macros for __sync builtins This is first part of addition of atomic macros with only macros for __sync builtins. - Based on earlier patch by Damjan (https://gerrit.fd.io/r/#/c/10729/) Additionally - clib_atomic_release macro added and used in the absence of any memory barrier. - clib_atomic_bool_cmp_and_swap added Change-Id: Ie4e48c1e184a652018d1d0d87c4be80ddd180a3b Original-patch-by: Damjan Marion Signed-off-by: Sirshak Das Reviewed-by: Honnappa Nagarahalli Reviewed-by: Ola Liljedahl Reviewed-by: Steve Capper --- src/vnet/classify/vnet_classify.c | 2 +- src/vnet/devices/virtio/vhost_user_output.c | 4 ++-- src/vnet/dns/dns.h | 2 +- src/vnet/gre/gre.c | 6 +++--- src/vnet/interface.h | 4 ++-- src/vnet/ip/ip4_mtrie.c | 18 +++++++++--------- src/vnet/ipfix-export/flow_report_classify.c | 4 ++-- src/vnet/mfib/mfib_forward.c | 4 ++-- src/vnet/mfib/mfib_signal.c | 8 ++++---- src/vnet/pg/output.c | 5 +++-- src/vnet/session-apps/echo_client.c | 8 ++++---- src/vnet/util/refcount.h | 4 ++-- 12 files changed, 35 insertions(+), 34 deletions(-) (limited to 'src/vnet') diff --git a/src/vnet/classify/vnet_classify.c b/src/vnet/classify/vnet_classify.c index 9d8694a4993..52cabbc7f42 100644 --- a/src/vnet/classify/vnet_classify.c +++ b/src/vnet/classify/vnet_classify.c @@ -444,7 +444,7 @@ vnet_classify_add_del (vnet_classify_table_t * t, hash >>= t->log2_nbuckets; - while (__sync_lock_test_and_set (t->writer_lock, 1)) + while (clib_atomic_test_and_set (t->writer_lock)) ; /* First elt in the bucket? */ diff --git a/src/vnet/devices/virtio/vhost_user_output.c b/src/vnet/devices/virtio/vhost_user_output.c index c77cdb6a2ee..dab8fa5fb3b 100644 --- a/src/vnet/devices/virtio/vhost_user_output.c +++ b/src/vnet/devices/virtio/vhost_user_output.c @@ -122,7 +122,7 @@ vhost_user_name_renumber (vnet_hw_interface_t * hi, u32 new_dev_instance) static_always_inline int vhost_user_vring_try_lock (vhost_user_intf_t * vui, u32 qid) { - return __sync_lock_test_and_set (vui->vring_locks[qid], 1); + return clib_atomic_test_and_set (vui->vring_locks[qid]); } /** @@ -141,7 +141,7 @@ vhost_user_vring_lock (vhost_user_intf_t * vui, u32 qid) static_always_inline void vhost_user_vring_unlock (vhost_user_intf_t * vui, u32 qid) { - *vui->vring_locks[qid] = 0; + clib_atomic_release (vui->vring_locks[qid]); } static_always_inline void diff --git a/src/vnet/dns/dns.h b/src/vnet/dns/dns.h index f0edd8cf4ba..59a61ed7687 100644 --- a/src/vnet/dns/dns.h +++ b/src/vnet/dns/dns.h @@ -187,7 +187,7 @@ dns_cache_lock (dns_main_t * dm) { if (dm->cache_lock) { - while (__sync_lock_test_and_set (dm->cache_lock, 1)) + while (clib_atomic_test_and_set (dm->cache_lock)) ; } } diff --git a/src/vnet/gre/gre.c b/src/vnet/gre/gre.c index e82befe7918..070c78e8984 100644 --- a/src/vnet/gre/gre.c +++ b/src/vnet/gre/gre.c @@ -406,7 +406,7 @@ gre_interface_tx (vlib_main_t * vm, /* Encap GRE seq# and ERSPAN type II header */ vlib_buffer_advance (b0, -sizeof (erspan_t2_t)); erspan_t2_t *h0 = vlib_buffer_get_current (b0); - u32 seq_num = clib_smp_atomic_add (>0->gre_sn->seq_num, 1); + u32 seq_num = clib_atomic_fetch_add (>0->gre_sn->seq_num, 1); u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2); h0->seq_num = clib_host_to_net_u32 (seq_num); h0->t2_u64 = hdr; @@ -418,7 +418,7 @@ gre_interface_tx (vlib_main_t * vm, /* Encap GRE seq# and ERSPAN type II header */ vlib_buffer_advance (b1, -sizeof (erspan_t2_t)); erspan_t2_t *h1 = vlib_buffer_get_current (b1); - u32 seq_num = clib_smp_atomic_add (>1->gre_sn->seq_num, 1); + u32 seq_num = clib_atomic_fetch_add (>1->gre_sn->seq_num, 1); u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2); h1->seq_num = clib_host_to_net_u32 (seq_num); h1->t2_u64 = hdr; @@ -473,7 +473,7 @@ gre_interface_tx (vlib_main_t * vm, /* Encap GRE seq# and ERSPAN type II header */ vlib_buffer_advance (b0, -sizeof (erspan_t2_t)); erspan_t2_t *h0 = vlib_buffer_get_current (b0); - u32 seq_num = clib_smp_atomic_add (>0->gre_sn->seq_num, 1); + u32 seq_num = clib_atomic_fetch_add (>0->gre_sn->seq_num, 1); u64 hdr = clib_host_to_net_u64 (ERSPAN_HDR2); h0->seq_num = clib_host_to_net_u32 (seq_num); h0->t2_u64 = hdr; diff --git a/src/vnet/interface.h b/src/vnet/interface.h index 6ca2b0d6cee..7ce6aafdf54 100644 --- a/src/vnet/interface.h +++ b/src/vnet/interface.h @@ -872,7 +872,7 @@ static inline void vnet_interface_counter_lock (vnet_interface_main_t * im) { if (im->sw_if_counter_lock) - while (__sync_lock_test_and_set (im->sw_if_counter_lock, 1)) + while (clib_atomic_test_and_set (im->sw_if_counter_lock)) /* zzzz */ ; } @@ -880,7 +880,7 @@ static inline void vnet_interface_counter_unlock (vnet_interface_main_t * im) { if (im->sw_if_counter_lock) - *im->sw_if_counter_lock = 0; + clib_atomic_release (im->sw_if_counter_lock); } void vnet_pcap_drop_trace_filter_add_del (u32 error_index, int is_add); diff --git a/src/vnet/ip/ip4_mtrie.c b/src/vnet/ip/ip4_mtrie.c index fbb8a748074..e6425ca703a 100755 --- a/src/vnet/ip/ip4_mtrie.c +++ b/src/vnet/ip/ip4_mtrie.c @@ -254,7 +254,7 @@ set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m, else if (new_leaf_dst_address_bits >= ply->dst_address_bits_of_leaves[i]) { - __sync_val_compare_and_swap (&ply->leaves[i], old_leaf, new_leaf); + clib_atomic_cmp_and_swap (&ply->leaves[i], old_leaf, new_leaf); ASSERT (ply->leaves[i] == new_leaf); ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits; ply->n_non_empty_leafs += ip4_fib_mtrie_leaf_is_non_empty (ply, i); @@ -319,8 +319,8 @@ set_leaf (ip4_fib_mtrie_t * m, old_ply->dst_address_bits_of_leaves[i] = a->dst_address_length; - __sync_val_compare_and_swap (&old_ply->leaves[i], old_leaf, - new_leaf); + clib_atomic_cmp_and_swap (&old_ply->leaves[i], old_leaf, + new_leaf); ASSERT (old_ply->leaves[i] == new_leaf); old_ply->n_non_empty_leafs += @@ -378,8 +378,8 @@ set_leaf (ip4_fib_mtrie_t * m, /* Refetch since ply_create may move pool. */ old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index); - __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf, - new_leaf); + clib_atomic_cmp_and_swap (&old_ply->leaves[dst_byte], old_leaf, + new_leaf); ASSERT (old_ply->leaves[dst_byte] == new_leaf); old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len; @@ -451,8 +451,8 @@ set_root_leaf (ip4_fib_mtrie_t * m, * the new one */ old_ply->dst_address_bits_of_leaves[slot] = a->dst_address_length; - __sync_val_compare_and_swap (&old_ply->leaves[slot], - old_leaf, new_leaf); + clib_atomic_cmp_and_swap (&old_ply->leaves[slot], + old_leaf, new_leaf); ASSERT (old_ply->leaves[slot] == new_leaf); } else @@ -498,8 +498,8 @@ set_root_leaf (ip4_fib_mtrie_t * m, ply_base_len); new_ply = get_next_ply_for_leaf (m, new_leaf); - __sync_val_compare_and_swap (&old_ply->leaves[dst_byte], old_leaf, - new_leaf); + clib_atomic_cmp_and_swap (&old_ply->leaves[dst_byte], old_leaf, + new_leaf); ASSERT (old_ply->leaves[dst_byte] == new_leaf); old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len; } diff --git a/src/vnet/ipfix-export/flow_report_classify.c b/src/vnet/ipfix-export/flow_report_classify.c index 8fb73fc0867..196cb725ffe 100644 --- a/src/vnet/ipfix-export/flow_report_classify.c +++ b/src/vnet/ipfix-export/flow_report_classify.c @@ -197,7 +197,7 @@ ipfix_classify_send_flows (flow_report_main_t * frm, t = pool_elt_at_index (vcm->tables, table->classify_table_index); - while (__sync_lock_test_and_set (t->writer_lock, 1)) + while (clib_atomic_test_and_set (t->writer_lock)) ; for (i = 0; i < t->nbuckets; i++) @@ -385,7 +385,7 @@ flush: bi0 = ~0; } - *(t->writer_lock) = 0; + clib_atomic_release (t->writer_lock); return f; } diff --git a/src/vnet/mfib/mfib_forward.c b/src/vnet/mfib/mfib_forward.c index 3d0e0d47996..4b121324fb6 100644 --- a/src/vnet/mfib/mfib_forward.c +++ b/src/vnet/mfib/mfib_forward.c @@ -300,8 +300,8 @@ mfib_forward_itf_signal (vlib_main_t *vm, { mfib_itf_flags_t old_flags; - old_flags = __sync_fetch_and_or(&mfi->mfi_flags, - MFIB_ITF_FLAG_SIGNAL_PRESENT); + old_flags = clib_atomic_fetch_or(&mfi->mfi_flags, + MFIB_ITF_FLAG_SIGNAL_PRESENT); if (!(old_flags & MFIB_ITF_FLAG_SIGNAL_PRESENT)) { diff --git a/src/vnet/mfib/mfib_signal.c b/src/vnet/mfib/mfib_signal.c index ce9a664c548..176e8ec9114 100644 --- a/src/vnet/mfib/mfib_signal.c +++ b/src/vnet/mfib/mfib_signal.c @@ -71,14 +71,14 @@ mfib_signal_module_init (void) static inline void mfib_signal_lock_aquire (void) { - while (__sync_lock_test_and_set (&mfib_signal_pending.mip_lock, 1)) + while (clib_atomic_test_and_set (&mfib_signal_pending.mip_lock)) ; } static inline void mfib_signal_lock_release (void) { - mfib_signal_pending.mip_lock = 0; + clib_atomic_release(&mfib_signal_pending.mip_lock); } #define MFIB_SIGNAL_CRITICAL_SECTION(_body) \ @@ -117,8 +117,8 @@ mfib_signal_send_one (struct vl_api_registration_ *reg, mfs = pool_elt_at_index(mfib_signal_pool, si); mfi = mfib_itf_get(mfs->mfs_itf); mfi->mfi_si = INDEX_INVALID; - __sync_fetch_and_and(&mfi->mfi_flags, - ~MFIB_ITF_FLAG_SIGNAL_PRESENT); + clib_atomic_fetch_and(&mfi->mfi_flags, + ~MFIB_ITF_FLAG_SIGNAL_PRESENT); vl_mfib_signal_send_one(reg, context, mfs); diff --git a/src/vnet/pg/output.c b/src/vnet/pg/output.c index ab57deefd24..016e5b370e5 100644 --- a/src/vnet/pg/output.c +++ b/src/vnet/pg/output.c @@ -54,7 +54,7 @@ pg_output (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) pg_interface_t *pif = pool_elt_at_index (pg->interfaces, rd->dev_instance); if (PREDICT_FALSE (pif->lockp != 0)) - while (__sync_lock_test_and_set (pif->lockp, 1)) + while (clib_atomic_test_and_set (pif->lockp)) ; while (n_left > 0) @@ -82,7 +82,8 @@ pg_output (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * frame) vlib_buffer_free (vm, vlib_frame_args (frame), n_buffers); if (PREDICT_FALSE (pif->lockp != 0)) - *pif->lockp = 0; + clib_atomic_release (pif->lockp); + return n_buffers; } diff --git a/src/vnet/session-apps/echo_client.c b/src/vnet/session-apps/echo_client.c index 1ece0196dde..c3d838d49cc 100644 --- a/src/vnet/session-apps/echo_client.c +++ b/src/vnet/session-apps/echo_client.c @@ -263,8 +263,8 @@ echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, { stream_session_t *s; - __sync_fetch_and_add (&ecm->tx_total, sp->bytes_sent); - __sync_fetch_and_add (&ecm->rx_total, sp->bytes_received); + clib_atomic_fetch_add (&ecm->tx_total, sp->bytes_sent); + clib_atomic_fetch_add (&ecm->rx_total, sp->bytes_received); s = session_get_from_handle_if_valid (sp->vpp_session_handle); if (s) @@ -276,7 +276,7 @@ echo_client_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node, vec_delete (connections_this_batch, 1, i); i--; - __sync_fetch_and_add (&ecm->ready_connections, -1); + clib_atomic_fetch_add (&ecm->ready_connections, -1); } else { @@ -408,7 +408,7 @@ echo_clients_session_connected_callback (u32 app_index, u32 api_context, } vec_add1 (ecm->connection_index_by_thread[thread_index], session_index); - __sync_fetch_and_add (&ecm->ready_connections, 1); + clib_atomic_fetch_add (&ecm->ready_connections, 1); if (ecm->ready_connections == ecm->expected_connections) { ecm->run_test = ECHO_CLIENTS_RUNNING; diff --git a/src/vnet/util/refcount.h b/src/vnet/util/refcount.h index ea92148dafa..873ab6def69 100644 --- a/src/vnet/util/refcount.h +++ b/src/vnet/util/refcount.h @@ -52,14 +52,14 @@ typedef struct { static_always_inline void vlib_refcount_lock (volatile u32 *counter_lock) { - while (__sync_lock_test_and_set (counter_lock, 1)) + while (clib_atomic_test_and_set (counter_lock)) ; } static_always_inline void vlib_refcount_unlock (volatile u32 *counter_lock) { - *counter_lock = 0; + clib_atomic_release(counter_lock); } void __vlib_refcount_resize(vlib_refcount_per_cpu_t *per_cpu, u32 size); -- cgit 1.2.3-korg