summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorjaszha03 <jason.zhang2@arm.com>2019-09-27 12:52:18 -0500
committerNeale Ranns <nranns@cisco.com>2019-10-29 17:33:16 +0000
commitee7437669c542a9497f1c3a5dde7af19b7244e35 (patch)
treeb03f1babcefa8eea8d8e18797b0cfe5b6f610219
parent04c0130f588eb11114c17d925a7a928c8f900b9b (diff)
ip: refactor ip4_mtrie to use atomic store-release
ip4_mtrie used full memory barrier compare-and-swap in set_leaf () and set_root_leaf () even though only one thread updates the trie. Replaced such instances of compare-and-swap with atomic store-release. Type: refactor Change-Id: Ic6e3c84480697915541acd16dcc630d1c436137d Signed-off-by: Jason Zhang <jason.zhang2@arm.com> Reviewed-by: Lijian Zhang <Lijian.Zhang@arm.com> Reviewed-by: Ruifeng Wang <ruifeng.wang@arm.com>
-rwxr-xr-xsrc/vnet/ip/ip4_mtrie.c19
1 files changed, 5 insertions, 14 deletions
diff --git a/src/vnet/ip/ip4_mtrie.c b/src/vnet/ip/ip4_mtrie.c
index be79883dde0..380ca324b64 100755
--- a/src/vnet/ip/ip4_mtrie.c
+++ b/src/vnet/ip/ip4_mtrie.c
@@ -254,8 +254,7 @@ set_ply_with_more_specific_leaf (ip4_fib_mtrie_t * m,
else if (new_leaf_dst_address_bits >=
ply->dst_address_bits_of_leaves[i])
{
- clib_atomic_cmp_and_swap (&ply->leaves[i], old_leaf, new_leaf);
- ASSERT (ply->leaves[i] == new_leaf);
+ clib_atomic_store_rel_n (&ply->leaves[i], new_leaf);
ply->dst_address_bits_of_leaves[i] = new_leaf_dst_address_bits;
ply->n_non_empty_leafs += ip4_fib_mtrie_leaf_is_non_empty (ply, i);
}
@@ -319,9 +318,7 @@ set_leaf (ip4_fib_mtrie_t * m,
old_ply->dst_address_bits_of_leaves[i] =
a->dst_address_length;
- clib_atomic_cmp_and_swap (&old_ply->leaves[i], old_leaf,
- new_leaf);
- ASSERT (old_ply->leaves[i] == new_leaf);
+ clib_atomic_store_rel_n (&old_ply->leaves[i], new_leaf);
old_ply->n_non_empty_leafs +=
ip4_fib_mtrie_leaf_is_non_empty (old_ply, i);
@@ -378,9 +375,7 @@ set_leaf (ip4_fib_mtrie_t * m,
/* Refetch since ply_create may move pool. */
old_ply = pool_elt_at_index (ip4_ply_pool, old_ply_index);
- clib_atomic_cmp_and_swap (&old_ply->leaves[dst_byte], old_leaf,
- new_leaf);
- ASSERT (old_ply->leaves[dst_byte] == new_leaf);
+ clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
old_ply->n_non_empty_leafs +=
@@ -451,9 +446,7 @@ set_root_leaf (ip4_fib_mtrie_t * m,
* the new one */
old_ply->dst_address_bits_of_leaves[slot] =
a->dst_address_length;
- clib_atomic_cmp_and_swap (&old_ply->leaves[slot],
- old_leaf, new_leaf);
- ASSERT (old_ply->leaves[slot] == new_leaf);
+ clib_atomic_store_rel_n (&old_ply->leaves[slot], new_leaf);
}
else
{
@@ -498,9 +491,7 @@ set_root_leaf (ip4_fib_mtrie_t * m,
ply_base_len);
new_ply = get_next_ply_for_leaf (m, new_leaf);
- clib_atomic_cmp_and_swap (&old_ply->leaves[dst_byte], old_leaf,
- new_leaf);
- ASSERT (old_ply->leaves[dst_byte] == new_leaf);
+ clib_atomic_store_rel_n (&old_ply->leaves[dst_byte], new_leaf);
old_ply->dst_address_bits_of_leaves[dst_byte] = ply_base_len;
}
else