diff options
author | Florin Coras <fcoras@cisco.com> | 2019-12-19 16:10:58 -0800 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2020-02-25 19:18:49 +0000 |
commit | f22f4e562e1b922cff036ef628b77fd2d479d015 (patch) | |
tree | 4cbc3091a5ce89a73c94685dcee198dd583ca375 /src/vppinfra | |
parent | b020806806c0e6c54886cdb4347a5fd1f19504b0 (diff) |
svm: refactor fifo
Type: refactor
Switch from a wrapped byte space to a "continuous" one wherein fifo
chunks are appended to the fifo as more data is enqueued and chunks are
removed as data is dequeued.
The fifo is still subject to a maximum size, i.e., maximum number of
bytes that can be enqueued, so the max number of chunks associated to
the fifo is also constrained.
When enqueueing data, which must fit within the available free space, if
not enough "supporting" chunk memory is available, the fifo asks the
fifo segment for enough chunk memory to ensure that the write can
succeed. To avoid allocating large amounts of small chunks due to small
writes, if possible, the size of the chunks requested is lower capped by
min_alloc.
When dequeuing data, all the chunks that have been completely drained,
i.e., head moved beyond the chunks’ end bytes, are unlinked from the
fifo and returned to the fifo segment. The one exception to this is the
last chunk which is never unlinked.
Change-Id: I98c1dbd9135fb79650365c7e40c29238b96cd4ee
Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src/vppinfra')
-rw-r--r-- | src/vppinfra/rbtree.c | 30 | ||||
-rw-r--r-- | src/vppinfra/rbtree.h | 1 |
2 files changed, 21 insertions, 10 deletions
diff --git a/src/vppinfra/rbtree.c b/src/vppinfra/rbtree.c index ff86b0f1b5b..f7383cb1d1b 100644 --- a/src/vppinfra/rbtree.c +++ b/src/vppinfra/rbtree.c @@ -207,6 +207,7 @@ rb_tree_add_custom (rb_tree_t * rt, u32 key, uword opaque, rb_tree_lt_fn ltfn) { x = rb_node (rt, xi); y = x; + ASSERT (z->key != x->key); if (ltfn (z->key, x->key)) xi = x->left; else @@ -314,8 +315,8 @@ rb_tree_transplant (rb_tree_t * rt, rb_node_t * u, rb_node_t * v) v->parent = u->parent; } -void -rb_tree_del_node (rb_tree_t * rt, rb_node_t * z) +static void +rb_tree_del_node_internal (rb_tree_t * rt, rb_node_t * z) { rb_node_color_t y_original_color; rb_node_t *x, *y, *yr, *yl, *xp, *w, *wl, *wr; @@ -441,15 +442,19 @@ rb_tree_del_node (rb_tree_t * rt, rb_node_t * z) } void +rb_tree_del_node (rb_tree_t * rt, rb_node_t * z) +{ + rb_tree_del_node_internal (rt, z); + pool_put (rt->nodes, z); +} + +void rb_tree_del (rb_tree_t * rt, u32 key) { rb_node_t *n; n = rb_tree_search_subtree (rt, rb_node (rt, rt->root), key); if (rb_node_index (rt, n) != RBTREE_TNIL_INDEX) - { - rb_tree_del_node (rt, n); - pool_put (rt->nodes, n); - } + rb_tree_del_node (rt, n); } void @@ -458,10 +463,7 @@ rb_tree_del_custom (rb_tree_t * rt, u32 key, rb_tree_lt_fn ltfn) rb_node_t *n; n = rb_tree_search_subtree_custom (rt, rb_node (rt, rt->root), key, ltfn); if (rb_node_index (rt, n) != RBTREE_TNIL_INDEX) - { - rb_tree_del_node (rt, n); - pool_put (rt->nodes, n); - } + rb_tree_del_node (rt, n); } u32 @@ -490,6 +492,14 @@ rb_tree_init (rb_tree_t * rt) tnil->color = RBTREE_BLACK; } +int +rb_tree_is_init (rb_tree_t * rt) +{ + if (pool_elts (rt->nodes) == 0) + return 0; + return 1; +} + /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vppinfra/rbtree.h b/src/vppinfra/rbtree.h index dde2fbfb836..3ab9a3347a5 100644 --- a/src/vppinfra/rbtree.h +++ b/src/vppinfra/rbtree.h @@ -64,6 +64,7 @@ rb_node_t *rb_tree_search_subtree_custom (rb_tree_t * rt, rb_node_t * x, u32 key, rb_tree_lt_fn ltfn); rb_node_t *rb_tree_successor (rb_tree_t * rt, rb_node_t * x); rb_node_t *rb_tree_predecessor (rb_tree_t * rt, rb_node_t * x); +int rb_tree_is_init (rb_tree_t * rt); static inline rb_node_index_t rb_node_index (rb_tree_t * rt, rb_node_t * n) |