diff options
author | Steven <sluong@cisco.com> | 2018-01-29 20:09:09 -0800 |
---|---|---|
committer | Damjan Marion <dmarion.lists@gmail.com> | 2018-01-31 17:43:32 +0000 |
commit | 899a84b302da2c266c1de7fd962965120cf50d60 (patch) | |
tree | c74d3440f3c1347810c1f49108e2059ff4a6a420 | |
parent | 8269d3dbe70adaae60806d5d84e35f25d23193c5 (diff) |
vlib: allocating buffers on thread-x and freeing them on thread-y causes
a crash on debug image (VPP-1151)
In debug image, there is extra code to validate the buffer when it is
freed. It uses the hash table to lookup the buffer index with spinlock
to prevent contention. However, there is one spinlock for each worker
thread. So allocating the buffer on thread-x and freeing the same buffer
on thread-y causes the validation to fail on thread-y. The fix is to
have only one spinlock, stored in vlib_global_main.
Change-Id: Ic383846cefe84a3e262255afcf82276742f0f62e
Signed-off-by: Steven <sluong@cisco.com>
(cherry picked from commit a7effa1b072463f12305a474f082aeaffb7ada4b)
-rw-r--r-- | src/plugins/ixge/ixge.c | 18 | ||||
-rw-r--r-- | src/vlib/buffer.c | 11 | ||||
-rw-r--r-- | src/vlib/buffer_funcs.h | 10 | ||||
-rw-r--r-- | src/vnet/replication.c | 3 |
4 files changed, 17 insertions, 25 deletions
diff --git a/src/plugins/ixge/ixge.c b/src/plugins/ixge/ixge.c index 62f484e6cca..05862812eb8 100644 --- a/src/plugins/ixge/ixge.c +++ b/src/plugins/ixge/ixge.c @@ -1462,14 +1462,10 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm, to_add -= 2; #if 0 - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, bi0)); - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, bi1)); - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, fi0)); - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, fi1)); + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi0)); + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi1)); + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi0)); + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi1)); #endif b0 = vlib_get_buffer (vm, bi0); @@ -1680,10 +1676,8 @@ ixge_rx_queue_no_wrap (ixge_main_t * xm, to_add -= 1; #if 0 - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, bi0)); - ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == - vlib_buffer_is_known (vm, fi0)); + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (bi0)); + ASSERT (VLIB_BUFFER_KNOWN_ALLOCATED == vlib_buffer_is_known (fi0)); #endif b0 = vlib_get_buffer (vm, bi0); diff --git a/src/vlib/buffer.c b/src/vlib/buffer.c index 880cd95a067..400488d0352 100644 --- a/src/vlib/buffer.c +++ b/src/vlib/buffer.c @@ -185,7 +185,7 @@ vlib_validate_buffer_helper (vlib_main_t * vm, vlib_buffer_known_state_t k; u8 *msg, *result; - k = vlib_buffer_is_known (vm, b->next_buffer); + k = vlib_buffer_is_known (b->next_buffer); if (k != VLIB_BUFFER_KNOWN_ALLOCATED) return format (0, "next 0x%x: %U", b->next_buffer, format_vlib_buffer_known_state, k); @@ -243,7 +243,7 @@ vlib_validate_buffers (vlib_main_t * vm, goto done; } - k = vlib_buffer_is_known (vm, bi); + k = vlib_buffer_is_known (bi); if (k != known_state) { msg = format (0, "is %U; expected %U", @@ -317,7 +317,7 @@ vlib_buffer_validate_alloc_free (vlib_main_t * vm, bi = b[0]; b += 1; - known = vlib_buffer_is_known (vm, bi); + known = vlib_buffer_is_known (bi); if (known != expected_state) { ASSERT (0); @@ -328,8 +328,7 @@ vlib_buffer_validate_alloc_free (vlib_main_t * vm, } vlib_buffer_set_known_state - (vm, bi, - is_free ? VLIB_BUFFER_KNOWN_FREE : VLIB_BUFFER_KNOWN_ALLOCATED); + (bi, is_free ? VLIB_BUFFER_KNOWN_FREE : VLIB_BUFFER_KNOWN_ALLOCATED); } } @@ -580,7 +579,7 @@ vlib_buffer_fill_free_list_internal (vlib_main_t * vm, bi[i] = vlib_get_buffer_index (vm, b); if (CLIB_DEBUG > 0) - vlib_buffer_set_known_state (vm, bi[i], VLIB_BUFFER_KNOWN_FREE); + vlib_buffer_set_known_state (bi[i], VLIB_BUFFER_KNOWN_FREE); b = vlib_buffer_next_contiguous (b, fl->n_data_bytes); } diff --git a/src/vlib/buffer_funcs.h b/src/vlib/buffer_funcs.h index 06cc6dac61e..4831eb5ebac 100644 --- a/src/vlib/buffer_funcs.h +++ b/src/vlib/buffer_funcs.h @@ -222,9 +222,9 @@ void vlib_buffer_validate_alloc_free (vlib_main_t * vm, u32 * buffers, expected_state); always_inline vlib_buffer_known_state_t -vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index) +vlib_buffer_is_known (u32 buffer_index) { - vlib_buffer_main_t *bm = vm->buffer_main; + vlib_buffer_main_t *bm = vlib_global_main.buffer_main; clib_spinlock_lock (&bm->buffer_known_hash_lockp); uword *p = hash_get (bm->buffer_known_hash, buffer_index); @@ -233,11 +233,11 @@ vlib_buffer_is_known (vlib_main_t * vm, u32 buffer_index) } always_inline void -vlib_buffer_set_known_state (vlib_main_t * vm, - u32 buffer_index, +vlib_buffer_set_known_state (u32 buffer_index, vlib_buffer_known_state_t state) { - vlib_buffer_main_t *bm = vm->buffer_main; + vlib_buffer_main_t *bm = vlib_global_main.buffer_main; + clib_spinlock_lock (&bm->buffer_known_hash_lockp); hash_set (bm->buffer_known_hash, buffer_index, state); clib_spinlock_unlock (&bm->buffer_known_hash_lockp); diff --git a/src/vnet/replication.c b/src/vnet/replication.c index 5a8a0fe63b1..217aa76a4d2 100644 --- a/src/vnet/replication.c +++ b/src/vnet/replication.c @@ -220,8 +220,7 @@ replication_recycle_callback (vlib_main_t * vm, vlib_buffer_free_list_t * fl) #if (CLIB_DEBUG > 0) if (vm->buffer_main->callbacks_registered == 0) - vlib_buffer_set_known_state (vm, bi0, - VLIB_BUFFER_KNOWN_ALLOCATED); + vlib_buffer_set_known_state (bi0, VLIB_BUFFER_KNOWN_ALLOCATED); #endif /* If buffer is traced, mark frame as traced */ |