diff options
author | Sirshak Das <sirshak.das@arm.com> | 2018-10-03 22:53:51 +0000 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2018-10-19 07:10:47 +0000 |
commit | 2f6d7bb93c157b874efb79a2d1583a4c368bf89a (patch) | |
tree | 05dc2867c598cbb8d711f074b4b0eb62dd464f41 /src/vlib | |
parent | bf3443b0f852f5a4c551d12f926defbd047f2161 (diff) |
vppinfra: add atomic macros for __sync builtins
This is first part of addition of atomic macros with only macros for
__sync builtins.
- Based on earlier patch by Damjan (https://gerrit.fd.io/r/#/c/10729/)
Additionally
- clib_atomic_release macro added and used in the absence
of any memory barrier.
- clib_atomic_bool_cmp_and_swap added
Change-Id: Ie4e48c1e184a652018d1d0d87c4be80ddd180a3b
Original-patch-by: Damjan Marion <damarion@cisco.com>
Signed-off-by: Sirshak Das <sirshak.das@arm.com>
Reviewed-by: Honnappa Nagarahalli <honnappa.nagarahalli@arm.com>
Reviewed-by: Ola Liljedahl <ola.liljedahl@arm.com>
Reviewed-by: Steve Capper <steve.capper@arm.com>
Diffstat (limited to 'src/vlib')
-rw-r--r-- | src/vlib/buffer_funcs.h | 6 | ||||
-rw-r--r-- | src/vlib/threads.c | 26 | ||||
-rw-r--r-- | src/vlib/threads.h | 10 | ||||
-rw-r--r-- | src/vlib/unix/cj.c | 2 |
4 files changed, 14 insertions, 30 deletions
diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h index d8abdf31d79..438bf7e5ee4 100644 --- a/src/vlib/buffer_funcs.h +++ b/src/vlib/buffer_funcs.h @@ -923,7 +923,7 @@ vlib_buffer_attach_clone (vlib_main_t * vm, vlib_buffer_t * head, tail->total_length_not_including_first_buffer; next_segment: - __sync_add_and_fetch (&tail->n_add_refs, 1); + clib_atomic_add_fetch (&tail->n_add_refs, 1); if (tail->flags & VLIB_BUFFER_NEXT_PRESENT) { @@ -1153,7 +1153,7 @@ vlib_validate_buffer_in_use (vlib_buffer_t * b, u32 expected) oldheap = clib_mem_set_heap (vlib_buffer_state_heap); - while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1)) + while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock)) ; p = hash_get (vlib_buffer_state_validation_hash, b); @@ -1196,7 +1196,7 @@ vlib_validate_buffer_set_in_use (vlib_buffer_t * b, u32 expected) oldheap = clib_mem_set_heap (vlib_buffer_state_heap); - while (__sync_lock_test_and_set (vlib_buffer_state_validation_lock, 1)) + while (clib_atomic_test_and_set (vlib_buffer_state_validation_lock)) ; hash_set (vlib_buffer_state_validation_hash, b, expected); diff --git a/src/vlib/threads.c b/src/vlib/threads.c index 981209bf2e7..7f407e92478 100644 --- a/src/vlib/threads.c +++ b/src/vlib/threads.c @@ -516,7 +516,7 @@ vlib_frame_queue_enqueue (vlib_main_t * vm, u32 node_runtime_index, ASSERT (fq); - new_tail = __sync_add_and_fetch (&fq->tail, 1); + new_tail = clib_atomic_add_fetch (&fq->tail, 1); /* Wait until a ring slot is available */ while (new_tail >= fq->head + fq->nelts) @@ -576,12 +576,12 @@ vlib_worker_thread_init (vlib_worker_thread_t * w) { /* Initial barrier sync, for both worker and i/o threads */ - clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1); + clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1); while (*vlib_worker_threads->wait_at_barrier) ; - clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1); + clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1); } } @@ -1310,22 +1310,6 @@ cpu_config (vlib_main_t * vm, unformat_input_t * input) VLIB_EARLY_CONFIG_FUNCTION (cpu_config, "cpu"); -#if !defined (__x86_64__) && !defined (__i386__) && !defined (__aarch64__) && !defined (__powerpc64__) && !defined(__arm__) -void -__sync_fetch_and_add_8 (void) -{ - fformat (stderr, "%s called\n", __FUNCTION__); - abort (); -} - -void -__sync_add_and_fetch_8 (void) -{ - fformat (stderr, "%s called\n", __FUNCTION__); - abort (); -} -#endif - void vnet_main_fixup (vlib_fork_fixup_t which) __attribute__ ((weak)); void vnet_main_fixup (vlib_fork_fixup_t which) @@ -1493,8 +1477,8 @@ vlib_worker_thread_barrier_release (vlib_main_t * vm) /* Do per thread rebuilds in parallel */ refork_needed = 1; - clib_smp_atomic_add (vlib_worker_threads->node_reforks_required, - (vec_len (vlib_mains) - 1)); + clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required, + (vec_len (vlib_mains) - 1)); now = vlib_time_now (vm); t_update_main = now - vm->barrier_epoch; } diff --git a/src/vlib/threads.h b/src/vlib/threads.h index bb7c164c2e3..0e9cba52103 100644 --- a/src/vlib/threads.h +++ b/src/vlib/threads.h @@ -414,7 +414,7 @@ vlib_worker_thread_barrier_check (void) ed->thread_index = thread_index; } - clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, 1); + clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, 1); if (CLIB_DEBUG > 0) { vm = vlib_get_main (); @@ -424,7 +424,7 @@ vlib_worker_thread_barrier_check (void) ; if (CLIB_DEBUG > 0) vm->parked_at_barrier = 0; - clib_smp_atomic_add (vlib_worker_threads->workers_at_barrier, -1); + clib_atomic_fetch_add (vlib_worker_threads->workers_at_barrier, -1); if (PREDICT_FALSE (*vlib_worker_threads->node_reforks_required)) { @@ -450,8 +450,8 @@ vlib_worker_thread_barrier_check (void) } vlib_worker_thread_node_refork (); - clib_smp_atomic_add (vlib_worker_threads->node_reforks_required, - -1); + clib_atomic_fetch_add (vlib_worker_threads->node_reforks_required, + -1); while (*vlib_worker_threads->node_reforks_required) ; } @@ -519,7 +519,7 @@ vlib_get_frame_queue_elt (u32 frame_queue_index, u32 index) fq = fqm->vlib_frame_queues[index]; ASSERT (fq); - new_tail = __sync_add_and_fetch (&fq->tail, 1); + new_tail = clib_atomic_add_fetch (&fq->tail, 1); /* Wait until a ring slot is available */ while (new_tail >= fq->head_hint + fq->nelts) diff --git a/src/vlib/unix/cj.c b/src/vlib/unix/cj.c index 0232ea2d690..7757146c7a7 100644 --- a/src/vlib/unix/cj.c +++ b/src/vlib/unix/cj.c @@ -44,7 +44,7 @@ cj_log (u32 type, void *data0, void *data1) if (cjm->enable == 0) return; - new_tail = __sync_add_and_fetch (&cjm->tail, 1); + new_tail = clib_atomic_add_fetch (&cjm->tail, 1); r = (cj_record_t *) & (cjm->records[new_tail & (cjm->num_records - 1)]); r->time = vlib_time_now (cjm->vlib_main); |