diff options
author | Florin Coras <fcoras@cisco.com> | 2019-12-19 16:10:58 -0800 |
---|---|---|
committer | Dave Barach <openvpp@barachs.net> | 2020-02-25 19:18:49 +0000 |
commit | f22f4e562e1b922cff036ef628b77fd2d479d015 (patch) | |
tree | 4cbc3091a5ce89a73c94685dcee198dd583ca375 /src/svm/svm_fifo.c | |
parent | b020806806c0e6c54886cdb4347a5fd1f19504b0 (diff) |
svm: refactor fifo
Type: refactor
Switch from a wrapped byte space to a "continuous" one wherein fifo
chunks are appended to the fifo as more data is enqueued and chunks are
removed as data is dequeued.
The fifo is still subject to a maximum size, i.e., maximum number of
bytes that can be enqueued, so the max number of chunks associated to
the fifo is also constrained.
When enqueueing data, which must fit within the available free space, if
not enough "supporting" chunk memory is available, the fifo asks the
fifo segment for enough chunk memory to ensure that the write can
succeed. To avoid allocating large amounts of small chunks due to small
writes, if possible, the size of the chunks requested is lower capped by
min_alloc.
When dequeuing data, all the chunks that have been completely drained,
i.e., head moved beyond the chunks’ end bytes, are unlinked from the
fifo and returned to the fifo segment. The one exception to this is the
last chunk which is never unlinked.
Change-Id: I98c1dbd9135fb79650365c7e40c29238b96cd4ee
Signed-off-by: Florin Coras <fcoras@cisco.com>
Diffstat (limited to 'src/svm/svm_fifo.c')
-rw-r--r-- | src/svm/svm_fifo.c | 1044 |
1 files changed, 470 insertions, 574 deletions
diff --git a/src/svm/svm_fifo.c b/src/svm/svm_fifo.c index 3df67135cfd..0168c08e49c 100644 --- a/src/svm/svm_fifo.c +++ b/src/svm/svm_fifo.c @@ -18,6 +18,7 @@ */ #include <svm/svm_fifo.h> +#include <svm/fifo_segment.h> #include <vppinfra/cpu.h> CLIB_MARCH_FN (svm_fifo_copy_to_chunk, void, svm_fifo_t * f, @@ -26,7 +27,8 @@ CLIB_MARCH_FN (svm_fifo_copy_to_chunk, void, svm_fifo_t * f, { u32 n_chunk; - ASSERT (tail_idx >= c->start_byte && tail_idx < c->start_byte + c->length); + ASSERT (f_pos_geq (tail_idx, c->start_byte) + && f_pos_lt (tail_idx, c->start_byte + c->length)); tail_idx -= c->start_byte; n_chunk = c->length - tail_idx; @@ -56,7 +58,8 @@ CLIB_MARCH_FN (svm_fifo_copy_from_chunk, void, svm_fifo_t * f, { u32 n_chunk; - ASSERT (head_idx >= c->start_byte && head_idx < c->start_byte + c->length); + ASSERT (f_pos_geq (head_idx, c->start_byte) + && f_pos_lt (head_idx, c->start_byte + c->length)); head_idx -= c->start_byte; n_chunk = c->length - head_idx; @@ -98,34 +101,10 @@ svm_fifo_copy_from_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c, u32 head_idx, last); } -static inline u8 -position_lt (svm_fifo_t * f, u32 a, u32 b, u32 tail) -{ - return (f_distance_to (f, a, tail) < f_distance_to (f, b, tail)); -} - -static inline u8 -position_leq (svm_fifo_t * f, u32 a, u32 b, u32 tail) -{ - return (f_distance_to (f, a, tail) <= f_distance_to (f, b, tail)); -} - -static inline u8 -position_gt (svm_fifo_t * f, u32 a, u32 b, u32 tail) -{ - return (f_distance_to (f, a, tail) > f_distance_to (f, b, tail)); -} - -static inline u32 -position_diff (svm_fifo_t * f, u32 a, u32 b, u32 tail) -{ - return f_distance_to (f, a, tail) - f_distance_to (f, b, tail); -} - static inline u32 -ooo_segment_end_pos (svm_fifo_t * f, ooo_segment_t * s) +ooo_segment_end_pos (ooo_segment_t * s) { - return (s->start + s->length) % f->size; + return (s->start + s->length); } void @@ -200,10 +179,10 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 head, u32 tail, u32 length) u32 new_index, s_end_pos, s_index; u32 offset_pos, offset_end_pos; - ASSERT (offset + length <= f_distance_to (f, head, tail) || head == tail); + ASSERT (offset + length <= f_free_count (f, head, tail)); - offset_pos = (tail + offset) % f->size; - offset_end_pos = (tail + offset + length) % f->size; + offset_pos = tail + offset; + offset_end_pos = tail + offset + length; f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; @@ -218,28 +197,27 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 head, u32 tail, u32 length) /* Find first segment that starts after new segment */ s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); while (s->next != OOO_SEGMENT_INVALID_INDEX - && position_lt (f, s->start, offset_pos, tail)) + && f_pos_lt (s->start, offset_pos)) s = pool_elt_at_index (f->ooo_segments, s->next); /* If we have a previous and we overlap it, use it as starting point */ prev = ooo_segment_prev (f, s); - if (prev - && position_leq (f, offset_pos, ooo_segment_end_pos (f, prev), tail)) + if (prev && f_pos_leq (offset_pos, ooo_segment_end_pos (prev))) { s = prev; - s_end_pos = ooo_segment_end_pos (f, s); + s_end_pos = ooo_segment_end_pos (s); /* Since we have previous, offset start position cannot be smaller * than prev->start. Check tail */ - ASSERT (position_lt (f, s->start, offset_pos, tail)); + ASSERT (f_pos_lt (s->start, offset_pos)); goto check_tail; } s_index = s - f->ooo_segments; - s_end_pos = ooo_segment_end_pos (f, s); + s_end_pos = ooo_segment_end_pos (s); /* No overlap, add before current segment */ - if (position_lt (f, offset_end_pos, s->start, tail)) + if (f_pos_lt (offset_end_pos, s->start)) { new_s = ooo_segment_alloc (f, offset_pos, length); new_index = new_s - f->ooo_segments; @@ -264,7 +242,7 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 head, u32 tail, u32 length) return; } /* No overlap, add after current segment */ - else if (position_gt (f, offset_pos, s_end_pos, tail)) + else if (f_pos_gt (offset_pos, s_end_pos)) { new_s = ooo_segment_alloc (f, offset_pos, length); new_index = new_s - f->ooo_segments; @@ -287,24 +265,23 @@ ooo_segment_add (svm_fifo_t * f, u32 offset, u32 head, u32 tail, u32 length) */ /* Merge at head */ - if (position_lt (f, offset_pos, s->start, tail)) + if (f_pos_lt (offset_pos, s->start)) { s->start = offset_pos; - s->length = position_diff (f, s_end_pos, s->start, tail); + s->length = s_end_pos - s->start; f->ooos_newest = s - f->ooo_segments; } check_tail: /* Overlapping tail */ - if (position_gt (f, offset_end_pos, s_end_pos, tail)) + if (f_pos_gt (offset_end_pos, s_end_pos)) { - s->length = position_diff (f, offset_end_pos, s->start, tail); + s->length = offset_end_pos - s->start; /* Remove the completely overlapped segments in the tail */ it = ooo_segment_next (f, s); - while (it && position_leq (f, ooo_segment_end_pos (f, it), - offset_end_pos, tail)) + while (it && f_pos_leq (ooo_segment_end_pos (it), offset_end_pos)) { next = ooo_segment_next (f, it); ooo_segment_free (f, it - f->ooo_segments); @@ -312,10 +289,9 @@ check_tail: } /* If partial overlap with last, merge */ - if (it && position_leq (f, it->start, offset_end_pos, tail)) + if (it && f_pos_leq (it->start, offset_end_pos)) { - s->length = position_diff (f, ooo_segment_end_pos (f, it), - s->start, tail); + s->length = ooo_segment_end_pos (it) - s->start; ooo_segment_free (f, it - f->ooo_segments); } f->ooos_newest = s - f->ooo_segments; @@ -334,7 +310,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued, u32 * tail) i32 diff; s = pool_elt_at_index (f->ooo_segments, f->ooos_list_head); - diff = f_distance_from (f, s->start, *tail); + diff = *tail - s->start; ASSERT (diff != n_bytes_enqueued); @@ -350,7 +326,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued, u32 * tail) if (s->length > diff) { bytes = s->length - diff; - *tail = (*tail + bytes) % f->size; + *tail = *tail + bytes; ooo_segment_free (f, s_index); break; } @@ -359,7 +335,7 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued, u32 * tail) if (s->next != OOO_SEGMENT_INVALID_INDEX) { s = pool_elt_at_index (f->ooo_segments, s->next); - diff = f_distance_from (f, s->start, *tail); + diff = *tail - s->start; ooo_segment_free (f, s_index); } /* End of search */ @@ -370,11 +346,11 @@ ooo_segment_try_collect (svm_fifo_t * f, u32 n_bytes_enqueued, u32 * tail) } } - ASSERT (bytes <= f->nitems); + ASSERT (bytes <= f->size); return bytes; } -static ooo_segment_t * +__clib_unused static ooo_segment_t * ooo_segment_last (svm_fifo_t * f) { ooo_segment_t *s; @@ -391,36 +367,30 @@ ooo_segment_last (svm_fifo_t * f) void svm_fifo_init (svm_fifo_t * f, u32 size) { + svm_fifo_chunk_t *c, *prev; + u32 first_chunk, min_alloc; + f->size = size; - /* - * usable size of the fifo set to rounded_data_size - 1 - * to differentiate between free fifo and empty fifo. - */ - f->nitems = f->size - 1; f->ooos_list_head = OOO_SEGMENT_INVALID_INDEX; f->segment_index = SVM_FIFO_INVALID_INDEX; f->refcnt = 1; f->head = f->tail = f->flags = 0; - f->head_chunk = f->tail_chunk = f->ooo_enq = f->ooo_deq = f->start_chunk; -} - -void -svm_fifo_init_chunks (svm_fifo_t * f) -{ - svm_fifo_chunk_t *c, *prev; + f->head_chunk = f->tail_chunk = f->start_chunk; + f->ooo_deq = f->ooo_enq = 0; - if (f->start_chunk->next == f->start_chunk) - return; - - f->flags |= SVM_FIFO_F_MULTI_CHUNK; - rb_tree_init (&f->ooo_enq_lookup); - rb_tree_init (&f->ooo_deq_lookup); + first_chunk = f->start_chunk->length; + min_alloc = first_chunk > 16 << 10 ? first_chunk >> 2 : 4096; + min_alloc = clib_min (first_chunk, 64 << 10); + f->min_alloc = min_alloc; + /* + * Initialize chunks + */ f->start_chunk->start_byte = 0; prev = f->start_chunk; c = prev->next; - while (c != f->start_chunk) + while (c) { c->start_byte = prev->start_byte + prev->length; prev = c; @@ -428,11 +398,26 @@ svm_fifo_init_chunks (svm_fifo_t * f) } } +void +svm_fifo_init_ooo_lookup (svm_fifo_t * f, u8 ooo_type) +{ + if (ooo_type == 0) + { + ASSERT (!rb_tree_is_init (&f->ooo_enq_lookup)); + rb_tree_init (&f->ooo_enq_lookup); + } + else + { + ASSERT (!rb_tree_is_init (&f->ooo_deq_lookup)); + rb_tree_init (&f->ooo_deq_lookup); + } +} + /** * Creates a fifo in the current heap. Fails vs blow up the process */ svm_fifo_t * -svm_fifo_create (u32 data_size_in_bytes) +svm_fifo_alloc (u32 data_size_in_bytes) { u32 rounded_data_size; svm_fifo_chunk_t *c; @@ -454,13 +439,13 @@ svm_fifo_create (u32 data_size_in_bytes) return 0; } - c->next = c; + clib_memset (c, 0, sizeof (*c)); c->start_byte = 0; c->length = data_size_in_bytes; - c->rb_index = RBTREE_TNIL_INDEX; + c->enq_rb_index = RBTREE_TNIL_INDEX; + c->deq_rb_index = RBTREE_TNIL_INDEX; f->start_chunk = f->end_chunk = c; - svm_fifo_init (f, data_size_in_bytes); return f; } @@ -485,14 +470,73 @@ svm_fifo_chunk_alloc (u32 size) return c; } -static inline u8 -svm_fifo_chunk_includes_pos (svm_fifo_chunk_t * c, u32 pos) +/** + * Find chunk for given byte position + * + * @param f fifo + * @param pos normalized position in fifo + * + * @return chunk that includes given position or 0 + */ +static svm_fifo_chunk_t * +svm_fifo_find_chunk (svm_fifo_t * f, u32 pos) +{ + svm_fifo_chunk_t *c; + + c = f->start_chunk; + while (c && !f_chunk_includes_pos (c, pos)) + c = c->next; + + return c; +} + +static svm_fifo_chunk_t * +svm_fifo_find_next_chunk (svm_fifo_t * f, svm_fifo_chunk_t * start, u32 pos) +{ + svm_fifo_chunk_t *c; + + ASSERT (start != 0); + + c = start; + while (c && !f_chunk_includes_pos (c, pos)) + c = c->next; + + return c; +} + +u32 +svm_fifo_max_read_chunk (svm_fifo_t * f) +{ + u32 head, tail, end_chunk; + + f_load_head_tail_cons (f, &head, &tail); + ASSERT (!f->head_chunk || f_chunk_includes_pos (f->head_chunk, head)); + + if (!f->head_chunk) + { + f->head_chunk = svm_fifo_find_chunk (f, head); + if (PREDICT_FALSE (!f->head_chunk)) + return 0; + } + + end_chunk = f_chunk_end (f->head_chunk); + + return f_pos_lt (end_chunk, tail) ? end_chunk - head : tail - head; +} + +u32 +svm_fifo_max_write_chunk (svm_fifo_t * f) { - return (pos >= c->start_byte && pos < c->start_byte + c->length); + u32 head, tail; + + f_load_head_tail_prod (f, &head, &tail); + ASSERT (!f->tail_chunk || f_chunk_includes_pos (f->tail_chunk, tail)); + + return f->tail_chunk ? f_chunk_end (f->tail_chunk) - tail : 0; } static rb_node_t * -svm_fifo_find_node_rbtree (rb_tree_t * rt, u32 pos) +f_find_node_rbtree (rb_tree_t * rt, u32 pos) { rb_node_t *cur, *prev; @@ -503,7 +547,7 @@ svm_fifo_find_node_rbtree (rb_tree_t * rt, u32 pos) while (pos != cur->key) { prev = cur; - if (pos < cur->key) + if (f_pos_lt (pos, cur->key)) { cur = rb_node_left (rt, cur); if (rb_node_is_tnil (rt, cur)) @@ -530,501 +574,206 @@ svm_fifo_find_node_rbtree (rb_tree_t * rt, u32 pos) } static svm_fifo_chunk_t * -svm_fifo_find_chunk_rbtree (rb_tree_t * rt, u32 pos) +f_find_chunk_rbtree (rb_tree_t * rt, u32 pos) { svm_fifo_chunk_t *c; rb_node_t *n; - n = svm_fifo_find_node_rbtree (rt, pos); + if (!rb_tree_is_init (rt)) + return 0; + + n = f_find_node_rbtree (rt, pos); if (!n) return 0; c = uword_to_pointer (n->opaque, svm_fifo_chunk_t *); - if (svm_fifo_chunk_includes_pos (c, pos)) + if (f_chunk_includes_pos (c, pos)) return c; return 0; } -/** - * Find chunk for given byte position - * - * @param f fifo - * @param pos normalized position in fifo - * - * @return chunk that includes given position or 0 - */ -static svm_fifo_chunk_t * -svm_fifo_find_chunk (svm_fifo_t * f, u32 pos) -{ - svm_fifo_chunk_t *c; - - c = f->start_chunk; - do - { - if (svm_fifo_chunk_includes_pos (c, pos)) - return c; - c = c->next; - } - while (c != f->start_chunk); - - return 0; -} - static void -svm_fifo_update_ooo_enq (svm_fifo_t * f, u32 ref_pos, u32 start_pos, - u32 end_pos) +f_update_ooo_enq (svm_fifo_t * f, u32 start_pos, u32 end_pos) { rb_tree_t *rt = &f->ooo_enq_lookup; svm_fifo_chunk_t *c; rb_node_t *cur; - if (svm_fifo_chunk_includes_pos (f->ooo_enq, start_pos) - && svm_fifo_chunk_includes_pos (f->ooo_enq, end_pos) - && ref_pos < start_pos) - return; + /* Use linear search if rbtree is not initialized */ + if (PREDICT_FALSE (!rb_tree_is_init (rt))) + { + f->ooo_enq = svm_fifo_find_next_chunk (f, f->tail_chunk, start_pos); + return; + } if (rt->root == RBTREE_TNIL_INDEX) { c = f->tail_chunk; - c->rb_index = rb_tree_add2 (rt, c->start_byte, pointer_to_uword (c)); + ASSERT (c->enq_rb_index == RBTREE_TNIL_INDEX); + c->enq_rb_index = rb_tree_add_custom (rt, c->start_byte, + pointer_to_uword (c), f_pos_lt); } else { - cur = svm_fifo_find_node_rbtree (rt, start_pos); + cur = f_find_node_rbtree (rt, start_pos); c = uword_to_pointer (cur->opaque, svm_fifo_chunk_t *); - if (ref_pos > start_pos && c->start_byte > start_pos) - { - c = f->end_chunk; - ASSERT (c->rb_index != RBTREE_TNIL_INDEX); - } + ASSERT (f_pos_leq (c->start_byte, start_pos)); } - if (svm_fifo_chunk_includes_pos (c, start_pos)) + if (f_chunk_includes_pos (c, start_pos)) f->ooo_enq = c; - if (svm_fifo_chunk_includes_pos (c, end_pos) && ref_pos < end_pos) + if (f_chunk_includes_pos (c, end_pos)) return; do { c = c->next; - if (c->rb_index != RBTREE_TNIL_INDEX) + if (!c || c->enq_rb_index != RBTREE_TNIL_INDEX) break; - c->rb_index = rb_tree_add2 (rt, c->start_byte, pointer_to_uword (c)); + c->enq_rb_index = rb_tree_add_custom (rt, c->start_byte, + pointer_to_uword (c), f_pos_lt); - if (svm_fifo_chunk_includes_pos (c, start_pos)) + if (f_chunk_includes_pos (c, start_pos)) f->ooo_enq = c; - } - while (!svm_fifo_chunk_includes_pos (c, end_pos)); + while (!f_chunk_includes_pos (c, end_pos)); } static void -svm_fifo_update_ooo_deq (svm_fifo_t * f, u32 ref_pos, u32 start_pos, - u32 end_pos) +f_update_ooo_deq (svm_fifo_t * f, u32 start_pos, u32 end_pos) { rb_tree_t *rt = &f->ooo_deq_lookup; rb_node_t *cur; svm_fifo_chunk_t *c; - if (svm_fifo_chunk_includes_pos (f->ooo_deq, start_pos) - && svm_fifo_chunk_includes_pos (f->ooo_deq, end_pos) - && ref_pos < start_pos) - return; + /* Use linear search if rbtree is not initialized */ + if (PREDICT_FALSE (!rb_tree_is_init (rt))) + { + f->ooo_deq = svm_fifo_find_chunk (f, start_pos); + return; + } if (rt->root == RBTREE_TNIL_INDEX) { - c = f->head_chunk; - c->rb_index = rb_tree_add2 (rt, c->start_byte, pointer_to_uword (c)); + c = f->start_chunk; + ASSERT (c->deq_rb_index == RBTREE_TNIL_INDEX); + c->deq_rb_index = rb_tree_add_custom (rt, c->start_byte, + pointer_to_uword (c), f_pos_lt); } else { - cur = svm_fifo_find_node_rbtree (rt, start_pos); + cur = f_find_node_rbtree (rt, start_pos); c = uword_to_pointer (cur->opaque, svm_fifo_chunk_t *); - if (ref_pos > start_pos && c->start_byte > start_pos) - { - c = f->end_chunk; - ASSERT (c->rb_index != RBTREE_TNIL_INDEX); - } + ASSERT (f_pos_leq (c->start_byte, start_pos)); } - if (svm_fifo_chunk_includes_pos (c, start_pos)) + if (f_chunk_includes_pos (c, start_pos)) f->ooo_deq = c; - if (svm_fifo_chunk_includes_pos (c, end_pos) && ref_pos < end_pos) + if (f_chunk_includes_pos (c, end_pos)) return; do { c = c->next; - if (c->rb_index != RBTREE_TNIL_INDEX) + if (!c || c->deq_rb_index != RBTREE_TNIL_INDEX) break; - c->rb_index = rb_tree_add2 (rt, c->start_byte, pointer_to_uword (c)); + c->deq_rb_index = rb_tree_add_custom (rt, c->start_byte, + pointer_to_uword (c), f_pos_lt); - if (svm_fifo_chunk_includes_pos (c, start_pos)) + if (f_chunk_includes_pos (c, start_pos)) f->ooo_deq = c; - } - while (!svm_fifo_chunk_includes_pos (c, end_pos)); + while (!f_chunk_includes_pos (c, end_pos)); } -void -svm_fifo_ooo_deq_track (svm_fifo_t * f, u32 start_pos, u32 end_pos) +static svm_fifo_chunk_t * +f_lookup_clear_enq_chunks (svm_fifo_t * f, svm_fifo_chunk_t * start, + u32 end_pos) { - rb_tree_t *rt = &f->ooo_deq_lookup; + rb_tree_t *rt = &f->ooo_enq_lookup; svm_fifo_chunk_t *c; + rb_node_t *n; - if (svm_fifo_chunk_includes_pos (f->ooo_deq, end_pos) - && start_pos < end_pos) - return; - - c = f->ooo_deq->next; - do + c = start; + while (c && !f_chunk_includes_pos (c, end_pos)) { - ASSERT (c->rb_index == RBTREE_TNIL_INDEX); - rb_tree_add2 (rt, c->start_byte, pointer_to_uword (c)); + if (c->enq_rb_index != RBTREE_TNIL_INDEX) + { + n = rb_node (rt, c->enq_rb_index); + rb_tree_del_node (rt, n); + c->enq_rb_index = RBTREE_TNIL_INDEX; + } c = c->next; } - while (!svm_fifo_chunk_includes_pos (c, end_pos)); + + /* No ooo segments left, so make sure the current chunk + * is not tracked in the enq rbtree */ + if (f->ooos_list_head == OOO_SEGMENT_INVALID_INDEX + && c && c->enq_rb_index != RBTREE_TNIL_INDEX) + { + n = rb_node (rt, c->enq_rb_index); + rb_tree_del_node (rt, n); + c->enq_rb_index = RBTREE_TNIL_INDEX; + } + + return c; } static svm_fifo_chunk_t * -svm_fifo_lookup_clear_chunks (svm_fifo_t * f, rb_tree_t * rt, - svm_fifo_chunk_t * start, u32 start_pos, - u32 end_pos) +f_lookup_clear_deq_chunks (svm_fifo_t * f, svm_fifo_chunk_t * start, + u32 end_pos) { + rb_tree_t *rt = &f->ooo_deq_lookup; svm_fifo_chunk_t *c; rb_node_t *n; - /* Nothing to do if still in the same chunk and not wrapped */ - if (svm_fifo_chunk_includes_pos (start, end_pos) && start_pos < end_pos) - return start; - c = start; - do + while (c && !f_chunk_includes_pos (c, end_pos)) { - if (c->rb_index == RBTREE_TNIL_INDEX) + if (c->deq_rb_index != RBTREE_TNIL_INDEX) { - c = c->next; - continue; + n = rb_node (rt, c->deq_rb_index); + rb_tree_del_node (rt, n); + c->deq_rb_index = RBTREE_TNIL_INDEX; } - n = rb_node (rt, c->rb_index); - rb_tree_del_node (rt, n); - c->rb_index = RBTREE_TNIL_INDEX; c = c->next; } - while (!svm_fifo_chunk_includes_pos (c, end_pos)); return c; } -static inline void -svm_fifo_grow (svm_fifo_t * f, svm_fifo_chunk_t * c) -{ - svm_fifo_chunk_t *prev; - u32 add_bytes = 0; - - if (!c) - return; - - f->end_chunk->next = c; - while (c) - { - add_bytes += c->length; - prev = c; - c = c->next; - } - f->end_chunk = prev; - prev->next = f->start_chunk; - f->size += add_bytes; - f->nitems = f->size - 1; - f->new_chunks = 0; -} - -static void -svm_fifo_try_grow (svm_fifo_t * f, u32 new_head) -{ - if (new_head > f->tail) - return; - - svm_fifo_grow (f, f->new_chunks); - f->flags &= ~SVM_FIFO_F_GROW; -} - void svm_fifo_add_chunk (svm_fifo_t * f, svm_fifo_chunk_t * c) { svm_fifo_chunk_t *cur, *prev; - /* Initialize rbtree if needed and add default chunk to it. Expectation is - * that this is called with the heap where the rbtree's pool is pushed. */ - if (!(f->flags & SVM_FIFO_F_MULTI_CHUNK)) - { - ASSERT (f->start_chunk->next == f->start_chunk); - rb_tree_init (&f->ooo_enq_lookup); - rb_tree_init (&f->ooo_deq_lookup); - f->flags |= SVM_FIFO_F_MULTI_CHUNK; - } - - /* If fifo is not wrapped, update the size now */ - if (!svm_fifo_is_wrapped (f)) - { - /* Initialize chunks and add to lookup rbtree */ - cur = c; - if (f->new_chunks) - { - prev = f->new_chunks; - while (prev->next) - prev = prev->next; - prev->next = c; - } - else - prev = f->end_chunk; - - while (cur) - { - cur->start_byte = prev->start_byte + prev->length; - cur->rb_index = RBTREE_TNIL_INDEX; - prev = cur; - cur = cur->next; - } - - ASSERT (!f->new_chunks); - svm_fifo_grow (f, c); - return; - } - - /* Wrapped */ - if (f->flags & SVM_FIFO_F_SINGLE_THREAD_OWNED) - { - ASSERT (f->master_thread_index == os_get_thread_index ()); - - if (!f->new_chunks && f->head_chunk != f->tail_chunk) - { - u32 head = 0, tail = 0; - f_load_head_tail_cons (f, &head, &tail); - - svm_fifo_chunk_t *tmp = f->tail_chunk->next; - - prev = f->tail_chunk; - u32 add_bytes = 0; - cur = prev->next; - while (cur != f->start_chunk) - { - /* remove any existing rb_tree entry */ - if (cur->rb_index != RBTREE_TNIL_INDEX) - { - rb_tree_del (&f->ooo_enq_lookup, cur->start_byte); - rb_tree_del (&f->ooo_deq_lookup, cur->start_byte); - } - cur->rb_index = RBTREE_TNIL_INDEX; - cur = cur->next; - } - - /* insert new chunk after the tail_chunk */ - f->tail_chunk->next = c; - while (c) - { - add_bytes += c->length; - c->start_byte = prev->start_byte + prev->length; - cur->rb_index = RBTREE_TNIL_INDEX; - - prev = c; - c = c->next; - } - prev->next = tmp; - - /* shift existing chunks along */ - cur = tmp; - while (cur != f->start_chunk) - { - cur->start_byte = prev->start_byte + prev->length; - prev = cur; - cur = cur->next; - } - - f->size += add_bytes; - f->nitems = f->size - 1; - f->new_chunks = 0; - head += add_bytes; - - clib_atomic_store_rel_n (&f->head, head); - ASSERT (svm_fifo_is_sane (f)); - - return; - } - } - - /* Wrapped, and optimization of single-thread-owned fifo cannot be applied */ - /* Initialize chunks and add to lookup rbtree */ cur = c; - if (f->new_chunks) - { - prev = f->new_chunks; - while (prev->next) - prev = prev->next; - prev->next = c; - } - else - prev = f->end_chunk; + prev = f->end_chunk; while (cur) { cur->start_byte = prev->start_byte + prev->length; - cur->rb_index = RBTREE_TNIL_INDEX; - prev = cur; - cur = cur->next; - } - - /* Postpone size update */ - if (!f->new_chunks) - { - f->new_chunks = c; - f->flags |= SVM_FIFO_F_GROW; - } -} - -/** - * Removes chunks that are after fifo end byte - */ -svm_fifo_chunk_t * -svm_fifo_collect_chunks (svm_fifo_t * f) -{ - svm_fifo_chunk_t *list, *cur; - - f->flags &= ~SVM_FIFO_F_COLLECT_CHUNKS; - - list = f->new_chunks; - f->new_chunks = 0; - cur = list; - while (cur) - { - if (cur->rb_index != RBTREE_TNIL_INDEX) - { - rb_tree_del (&f->ooo_enq_lookup, cur->start_byte); - rb_tree_del (&f->ooo_deq_lookup, cur->start_byte); - } - cur = cur->next; - } - - return list; -} - -void -svm_fifo_try_shrink (svm_fifo_t * f, u32 head, u32 tail) -{ - u32 len_to_shrink = 0, tail_pos, len, last_pos; - svm_fifo_chunk_t *cur, *prev, *next, *start; - - tail_pos = tail; - if (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX) - { - ooo_segment_t *last = ooo_segment_last (f); - tail_pos = ooo_segment_end_pos (f, last); - } - - if (f->size_decrement) - { - /* Figure out available free space considering that there may be - * ooo segments */ - len = clib_min (f->size_decrement, f_free_count (f, head, tail_pos)); - f->nitems -= len; - f->size_decrement -= len; - } - - /* Remove tail chunks if the following hold: - * - not wrapped - * - last used byte less than start of last chunk - */ - if (tail_pos >= head && tail_pos < f->end_chunk->start_byte) - { - /* Lookup the last position not to be removed. Since size still needs - * to be nitems + 1, nitems must fall within the usable space. Also, - * first segment is not removable, so tail_pos can be 0. */ - last_pos = tail_pos > 0 ? tail_pos - 1 : tail_pos; - prev = svm_fifo_find_chunk (f, clib_max (f->nitems, last_pos)); - next = prev->next; - /* If tail_pos is first position in next, skip the chunk, otherwise, - * we must update the tail and, if fifo size is 0, even the head. - * We should not invalidate the tail for the caller and must not change - * consumer owned variables from code that's typically called by the - * producer */ - if (next->start_byte == tail_pos) - { - prev = next; - next = next->next; - } - while (next != f->start_chunk) - { - cur = next; - next = cur->next; - len_to_shrink += cur->length; - } - if (len_to_shrink) - { - f->size -= len_to_shrink; - start = prev->next; - prev->next = f->start_chunk; - f->end_chunk = prev; - cur->next = f->new_chunks; - f->new_chunks = start; - } - } - - if (!f->size_decrement && f->size == f->nitems + 1) - { - f->flags &= ~SVM_FIFO_F_SHRINK; - f->flags |= SVM_FIFO_F_COLLECT_CHUNKS; - if (f->start_chunk == f->start_chunk->next) - f->flags &= ~SVM_FIFO_F_MULTI_CHUNK; - } -} + cur->enq_rb_index = RBTREE_TNIL_INDEX; + cur->deq_rb_index = RBTREE_TNIL_INDEX; -/** - * Request to reduce fifo size by amount of bytes - */ -int -svm_fifo_reduce_size (svm_fifo_t * f, u32 len, u8 try_shrink) -{ - svm_fifo_chunk_t *cur; - u32 actual_len = 0; - - /* Abort if trying to reduce by more than fifo size or if - * fifo is undergoing resizing already */ - if (len >= f->size || f->size > f->nitems + 1 - || (f->flags & SVM_FIFO_F_SHRINK) || (f->flags & SVM_FIFO_F_GROW)) - return 0; - - /* last chunk that will not be removed */ - cur = svm_fifo_find_chunk (f, f->nitems - len); - - /* sum length of chunks that will be removed */ - cur = cur->next; - while (cur != f->start_chunk) - { - actual_len += cur->length; + prev = cur; cur = cur->next; } - ASSERT (actual_len <= len); - if (!actual_len) - return 0; - - f->size_decrement = actual_len; - f->flags |= SVM_FIFO_F_SHRINK; + prev->next = 0; + f->end_chunk->next = c; + f->end_chunk = prev; - if (try_shrink) - { - u32 head, tail; - f_load_head_tail_prod (f, &head, &tail); - svm_fifo_try_shrink (f, head, tail); - } + if (!f->tail_chunk) + f->tail_chunk = c; - return actual_len; + return; } void @@ -1054,9 +803,13 @@ svm_fifo_overwrite_head (svm_fifo_t * f, u8 * src, u32 len) u32 head, tail, head_idx; svm_fifo_chunk_t *c; - ASSERT (len <= f->nitems); + ASSERT (len <= f->size); f_load_head_tail_cons (f, &head, &tail); + + if (!f->head_chunk) + f->head_chunk = svm_fifo_find_chunk (f, head); + c = f->head_chunk; head_idx = head - c->start_byte; n_chunk = c->length - head_idx; @@ -1064,30 +817,65 @@ svm_fifo_overwrite_head (svm_fifo_t * f, u8 * src, u32 len) clib_memcpy_fast (&c->data[head_idx], src, len); else { + ASSERT (len - n_chunk <= c->next->length); clib_memcpy_fast (&c->data[head_idx], src, n_chunk); clib_memcpy_fast (&c->next->data[0], src + n_chunk, len - n_chunk); } } +static int +f_try_grow (svm_fifo_t * f, u32 head, u32 tail, u32 len) +{ + svm_fifo_chunk_t *c; + u32 alloc_size, free_alloced; + + free_alloced = f_chunk_end (f->end_chunk) - tail; + ASSERT (free_alloced < len); + + alloc_size = clib_min (f->min_alloc, f->size - (tail - head)); + alloc_size = clib_max (alloc_size, len - free_alloced); + + c = fsh_alloc_chunk (f->fs_hdr, f->slice_index, alloc_size); + if (PREDICT_FALSE (!c)) + return -1; + + svm_fifo_add_chunk (f, c); + return 0; +} + int svm_fifo_enqueue (svm_fifo_t * f, u32 len, const u8 * src) { u32 tail, head, free_count; + svm_fifo_chunk_t *old_tail_c; + + f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; f_load_head_tail_prod (f, &head, &tail); /* free space in fifo can only increase during enqueue: SPSC */ free_count = f_free_count (f, head, tail); - f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; - if (PREDICT_FALSE (free_count == 0)) return SVM_FIFO_EFULL; /* number of bytes we're going to copy */ len = clib_min (free_count, len); + + if (f_pos_gt (tail + len, f_chunk_end (f->end_chunk))) + { + if (PREDICT_FALSE (f_try_grow (f, head, tail, len))) + { + len = f_chunk_end (f->end_chunk) - tail; + if (!len) + return SVM_FIFO_EGROW; + } + } + + old_tail_c = f->tail_chunk; + svm_fifo_copy_to_chunk (f, f->tail_chunk, tail, src, len, &f->tail_chunk); - tail = (tail + len) % f->size; + tail = tail + len; svm_fifo_trace_add (f, head, len, 2); @@ -1095,10 +883,9 @@ svm_fifo_enqueue (svm_fifo_t * f, u32 len, const u8 * src) if (PREDICT_FALSE (f->ooos_list_head != OOO_SEGMENT_INVALID_INDEX)) { len += ooo_segment_try_collect (f, len, &tail); - if (f->flags & SVM_FIFO_F_MULTI_CHUNK) - f->tail_chunk = svm_fifo_lookup_clear_chunks (f, &f->ooo_enq_lookup, - f->tail_chunk, f->tail, - tail); + /* Tail chunk might've changed even if nothing was collected */ + f->tail_chunk = f_lookup_clear_enq_chunks (f, old_tail_c, tail); + f->ooo_enq = 0; } /* store-rel: producer owned index (paired with load-acq in consumer) */ @@ -1117,30 +904,33 @@ svm_fifo_enqueue (svm_fifo_t * f, u32 len, const u8 * src) int svm_fifo_enqueue_with_offset (svm_fifo_t * f, u32 offset, u32 len, u8 * src) { - u32 tail, head, free_count, tail_idx; + u32 tail, head, free_count, enq_pos; f_load_head_tail_prod (f, &head, &tail); - if (PREDICT_FALSE (f->flags & SVM_FIFO_F_SHRINK)) - svm_fifo_try_shrink (f, head, tail); - /* free space in fifo can only increase during enqueue: SPSC */ free_count = f_free_count (f, head, tail); + f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; /* will this request fit? */ if ((len + offset) > free_count) return SVM_FIFO_EFULL; - f->ooos_newest = OOO_SEGMENT_INVALID_INDEX; + enq_pos = tail + offset; + + if (f_pos_gt (enq_pos + len, f_chunk_end (f->end_chunk))) + { + if (PREDICT_FALSE (f_try_grow (f, head, tail, offset + len))) + return SVM_FIFO_EGROW; + } + svm_fifo_trace_add (f, offset, len, 1); ooo_segment_add (f, offset, head, tail, len); - tail_idx = (tail + offset) % f->size; - if (f->flags & SVM_FIFO_F_MULTI_CHUNK) - svm_fifo_update_ooo_enq (f, f->tail, tail_idx, - (tail_idx + len) % f->size); + if (!f->ooo_enq || !f_chunk_includes_pos (f->ooo_enq, enq_pos)) + f_update_ooo_enq (f, enq_pos, enq_pos + len); - svm_fifo_copy_to_chunk (f, f->ooo_enq, tail_idx, src, len, &f->ooo_enq); + svm_fifo_copy_to_chunk (f, f->ooo_enq, enq_pos, src, len, &f->ooo_enq); return 0; } @@ -1156,17 +946,74 @@ svm_fifo_enqueue_nocopy (svm_fifo_t * f, u32 len) ASSERT (len <= svm_fifo_max_enqueue_prod (f)); /* load-relaxed: producer owned index */ tail = f->tail; - tail = (tail + len) % f->size; + tail = tail + len; - if (f->flags & SVM_FIFO_F_MULTI_CHUNK) - f->tail_chunk = svm_fifo_lookup_clear_chunks (f, &f->ooo_enq_lookup, - f->tail_chunk, f->tail, - tail); + if (rb_tree_is_init (&f->ooo_enq_lookup)) + { + f->tail_chunk = f_lookup_clear_enq_chunks (f, f->tail_chunk, tail); + f->ooo_enq = 0; + } + else + { + f->tail_chunk = svm_fifo_find_next_chunk (f, f->tail_chunk, tail); + } /* store-rel: producer owned index (paired with load-acq in consumer) */ clib_atomic_store_rel_n (&f->tail, tail); } +always_inline svm_fifo_chunk_t * +f_unlink_chunks (svm_fifo_t * f, u32 end_pos, u8 maybe_ooo) +{ + svm_fifo_chunk_t *start, *prev = 0, *c; + rb_tree_t *rt; + rb_node_t *n; + + ASSERT (!f_chunk_includes_pos (f->start_chunk, end_pos)); + + if (maybe_ooo) + rt = &f->ooo_deq_lookup; + + c = f->start_chunk; + + do + { + if (maybe_ooo && c->deq_rb_index != RBTREE_TNIL_INDEX) + { + n = rb_node (rt, c->deq_rb_index); + ASSERT (n == f_find_node_rbtree (rt, c->start_byte)); + rb_tree_del_node (rt, n); + c->deq_rb_index = RBTREE_TNIL_INDEX; + } + if (!c->next) + break; + prev = c; + c = c->next; + } + while (!f_chunk_includes_pos (c, end_pos)); + + if (maybe_ooo) + { + if (f->ooo_deq && f_pos_lt (f->ooo_deq->start_byte, f_chunk_end (c))) + f->ooo_deq = 0; + } + else + { + if (PREDICT_FALSE (f->ooo_deq != 0)) + f->ooo_deq = 0; + } + + /* Avoid unlinking the last chunk */ + if (!prev) + return 0; + + prev->next = 0; + start = f->start_chunk; + f->start_chunk = c; + + return start; +} + int svm_fifo_dequeue (svm_fifo_t * f, u32 len, u8 * dst) { @@ -1181,11 +1028,16 @@ svm_fifo_dequeue (svm_fifo_t * f, u32 len, u8 * dst) return SVM_FIFO_EEMPTY; len = clib_min (cursize, len); + + if (!f->head_chunk) + f->head_chunk = svm_fifo_find_chunk (f, head); + svm_fifo_copy_from_chunk (f, f->head_chunk, head, dst, len, &f->head_chunk); - head = (head + len) % f->size; + head = head + len; - if (PREDICT_FALSE (f->flags & SVM_FIFO_F_GROW)) - svm_fifo_try_grow (f, head); + if (f_pos_geq (head, f_chunk_end (f->start_chunk))) + fsh_collect_chunks (f->fs_hdr, f->slice_index, + f_unlink_chunks (f, head, 0)); /* store-rel: consumer owned index (paired with load-acq in producer) */ clib_atomic_store_rel_n (&f->head, head); @@ -1207,10 +1059,10 @@ svm_fifo_peek (svm_fifo_t * f, u32 offset, u32 len, u8 * dst) return SVM_FIFO_EEMPTY; len = clib_min (cursize - offset, len); - head_idx = (head + offset) % f->size; + head_idx = head + offset; - if (f->flags & SVM_FIFO_F_MULTI_CHUNK) - svm_fifo_update_ooo_deq (f, head, head_idx, (head_idx + len) % f->size); + if (!f->ooo_deq || !f_chunk_includes_pos (f->ooo_deq, head_idx)) + f_update_ooo_deq (f, head_idx, head_idx + len); svm_fifo_copy_from_chunk (f, f->ooo_deq, head_idx, dst, len, &f->ooo_deq); return len; @@ -1234,15 +1086,15 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, u32 len) svm_fifo_trace_add (f, tail, total_drop_bytes, 3); /* move head */ - head = (head + total_drop_bytes) % f->size; - - if (f->flags & SVM_FIFO_F_MULTI_CHUNK) - f->head_chunk = svm_fifo_lookup_clear_chunks (f, &f->ooo_deq_lookup, - f->head_chunk, f->head, - head); + head = head + total_drop_bytes; - if (PREDICT_FALSE (f->flags & SVM_FIFO_F_GROW)) - svm_fifo_try_grow (f, head); + if (f_pos_geq (head, f_chunk_end (f->start_chunk))) + { + fsh_collect_chunks (f->fs_hdr, f->slice_index, + f_unlink_chunks (f, head, 1)); + f->head_chunk = + f_chunk_includes_pos (f->start_chunk, head) ? f->start_chunk : 0; + } /* store-rel: consumer owned index (paired with load-acq in producer) */ clib_atomic_store_rel_n (&f->head, head); @@ -1250,25 +1102,48 @@ svm_fifo_dequeue_drop (svm_fifo_t * f, u32 len) return total_drop_bytes; } +/** + * Drop all data from fifo + * + * Should be called only from vpp side because of lookup cleanup + */ void svm_fifo_dequeue_drop_all (svm_fifo_t * f) { - /* consumer foreign index */ - u32 tail = clib_atomic_load_acq_n (&f->tail); + u32 head, tail; + + f_load_head_tail_all_acq (f, &head, &tail); - if (f->flags & SVM_FIFO_F_MULTI_CHUNK) - f->head_chunk = svm_fifo_lookup_clear_chunks (f, &f->ooo_deq_lookup, - f->head_chunk, tail, - tail - 1); + if (!f->head_chunk || !f_chunk_includes_pos (f->head_chunk, head)) + f->head_chunk = svm_fifo_find_chunk (f, head); - if (PREDICT_FALSE (f->flags & SVM_FIFO_F_GROW)) - svm_fifo_try_grow (f, tail); + f->head_chunk = f_lookup_clear_deq_chunks (f, f->head_chunk, tail); + + if (f_pos_geq (tail, f_chunk_end (f->start_chunk))) + fsh_collect_chunks (f->fs_hdr, f->slice_index, + f_unlink_chunks (f, tail, 0)); /* store-rel: consumer owned index (paired with load-acq in producer) */ clib_atomic_store_rel_n (&f->head, tail); } int +svm_fifo_fill_chunk_list (svm_fifo_t * f) +{ + u32 head, tail; + + f_load_head_tail_prod (f, &head, &tail); + + if (f_chunk_end (f->end_chunk) - head >= f->size) + return 0; + + if (f_try_grow (f, head, tail, f->size - (tail - head))) + return SVM_FIFO_EGROW; + + return 0; +} + +int svm_fifo_segments (svm_fifo_t * f, svm_fifo_seg_t * fs) { u32 cursize, head, tail, head_idx; @@ -1309,7 +1184,7 @@ svm_fifo_segments_free (svm_fifo_t * f, svm_fifo_seg_t * fs) head = f->head; ASSERT (fs[0].data == f->head_chunk->data + head); - head = (head + fs[0].len + fs[1].len) % f->size; + head = (head + fs[0].len + fs[1].len); /* store-rel: consumer owned index (paired with load-acq in producer) */ clib_atomic_store_rel_n (&f->head, head); } @@ -1325,6 +1200,10 @@ void svm_fifo_clone (svm_fifo_t * df, svm_fifo_t * sf) { u32 head, tail; + + /* Support only single chunk clones for now */ + ASSERT (svm_fifo_n_chunks (sf) == 1); + clib_memcpy_fast (df->head_chunk->data, sf->head_chunk->data, sf->size); f_load_head_tail_all_acq (sf, &head, &tail); @@ -1350,20 +1229,17 @@ svm_fifo_first_ooo_segment (svm_fifo_t * f) void svm_fifo_init_pointers (svm_fifo_t * f, u32 head, u32 tail) { - head = head % f->size; - tail = tail % f->size; + svm_fifo_chunk_t *c; + clib_atomic_store_rel_n (&f->head, head); clib_atomic_store_rel_n (&f->tail, tail); - if (f->flags & SVM_FIFO_F_MULTI_CHUNK) - { - svm_fifo_chunk_t *c; - c = svm_fifo_find_chunk (f, head); - ASSERT (c != 0); - f->head_chunk = f->ooo_deq = c; - c = svm_fifo_find_chunk (f, tail); - ASSERT (c != 0); - f->tail_chunk = f->ooo_enq = c; - } + + c = svm_fifo_find_chunk (f, head); + ASSERT (c != 0); + f->head_chunk = f->ooo_deq = c; + c = svm_fifo_find_chunk (f, tail); + ASSERT (c != 0); + f->tail_chunk = f->ooo_enq = c; } void @@ -1392,20 +1268,54 @@ svm_fifo_del_subscriber (svm_fifo_t * f, u8 subscriber) u8 svm_fifo_is_sane (svm_fifo_t * f) { - if (f->size - 1 != f->nitems && !(f->flags & SVM_FIFO_F_SHRINK)) - return 0; - if (!svm_fifo_chunk_includes_pos (f->head_chunk, f->head)) + svm_fifo_chunk_t *tmp; + + if (f->head_chunk && !f_chunk_includes_pos (f->head_chunk, f->head)) return 0; - if (!svm_fifo_chunk_includes_pos (f->tail_chunk, f->tail)) + if (f->tail_chunk && !f_chunk_includes_pos (f->tail_chunk, f->tail)) return 0; + if (f->ooo_deq) + { + if (rb_tree_is_init (&f->ooo_deq_lookup)) + { + if (f_pos_lt (f->ooo_deq->start_byte, f->start_chunk->start_byte) + || f_pos_gt (f->ooo_deq->start_byte, + f_chunk_end (f->end_chunk))) + return 0; - if (f->start_chunk->next != f->start_chunk) + tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup, + f->ooo_deq->start_byte); + } + else + tmp = svm_fifo_find_chunk (f, f->ooo_deq->start_byte); + if (tmp != f->ooo_deq) + return 0; + } + if (f->ooo_enq) { - svm_fifo_chunk_t *c, *prev = 0, *tmp; - u32 size = 0; + if (rb_tree_is_init (&f->ooo_enq_lookup)) + { + if (f_pos_lt (f->ooo_enq->start_byte, f->start_chunk->start_byte) + || f_pos_gt (f->ooo_enq->start_byte, + f_chunk_end (f->end_chunk))) + return 0; - if (!(f->flags & SVM_FIFO_F_MULTI_CHUNK)) + tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup, + f->ooo_enq->start_byte); + } + else + { + tmp = svm_fifo_find_next_chunk (f, f->tail_chunk, + f->ooo_enq->start_byte); + } + if (tmp != f->ooo_enq) return 0; + } + + if (f->start_chunk->next) + { + svm_fifo_chunk_t *c, *prev = 0, *tmp; + u32 chunks_bytes = 0; c = f->start_chunk; do @@ -1416,75 +1326,61 @@ svm_fifo_is_sane (svm_fifo_t * f) if (prev && (prev->start_byte + prev->length != c->start_byte)) return 0; - if (c->rb_index != RBTREE_TNIL_INDEX) + if (c->enq_rb_index != RBTREE_TNIL_INDEX) { - u8 found = 0; - - tmp = svm_fifo_find_chunk_rbtree (&f->ooo_enq_lookup, - c->start_byte); + tmp = f_find_chunk_rbtree (&f->ooo_enq_lookup, c->start_byte); if (tmp) { - found = 1; if (tmp != c) return 0; } - - tmp = svm_fifo_find_chunk_rbtree (&f->ooo_deq_lookup, - c->start_byte); + } + if (c->deq_rb_index != RBTREE_TNIL_INDEX) + { + tmp = f_find_chunk_rbtree (&f->ooo_deq_lookup, c->start_byte); if (tmp) { - if (found) - return 0; - - found = 1; if (tmp != c) return 0; } - if (!found) - return 0; } - size += c->length; + chunks_bytes += c->length; prev = c; c = c->next; } - while (c != f->start_chunk); + while (c); - if (size != f->size) + if (chunks_bytes < f->tail - f->head) return 0; } return 1; } -u8 -svm_fifo_set_single_thread_owned (svm_fifo_t * f) +u32 +svm_fifo_n_chunks (svm_fifo_t * f) { - if (f->flags & SVM_FIFO_F_SINGLE_THREAD_OWNED) - { - if (f->master_thread_index == os_get_thread_index ()) - { - /* just a duplicate call */ - return 0; - } + svm_fifo_chunk_t *c; + int n_chunks = 0; - /* already owned by another thread */ - return 1; + c = f->start_chunk; + while (c) + { + n_chunks++; + c = c->next; } - f->flags |= SVM_FIFO_F_SINGLE_THREAD_OWNED; - return 0; + return n_chunks; } u8 * format_ooo_segment (u8 * s, va_list * args) { - svm_fifo_t *f = va_arg (*args, svm_fifo_t *); + svm_fifo_t __clib_unused *f = va_arg (*args, svm_fifo_t *); ooo_segment_t *seg = va_arg (*args, ooo_segment_t *); - u32 normalized_start = (seg->start + f->nitems - f->tail) % f->size; - s = format (s, "[%u, %u], len %u, next %d, prev %d", normalized_start, - (normalized_start + seg->length) % f->size, seg->length, - seg->next, seg->prev); + s = format (s, "[%u, %u], len %u, next %d, prev %d", seg->start, + seg->start + seg->length, seg->length, seg->next, seg->prev); return s; } @@ -1532,9 +1428,10 @@ svm_fifo_replay (u8 * s, svm_fifo_t * f, u8 no_read, u8 verbose) trace_len = 0; #endif - dummy_fifo = svm_fifo_create (f->size); - clib_memset (f->head_chunk->data, 0xFF, f->nitems); - vec_validate (data, f->nitems); + dummy_fifo = svm_fifo_alloc (f->size); + svm_fifo_init (f, f->size); + clib_memset (f->head_chunk->data, 0xFF, f->size); + vec_validate (data, f->size); for (i = 0; i < vec_len (data); i++) data[i] = i; @@ -1545,7 +1442,7 @@ svm_fifo_replay (u8 * s, svm_fifo_t * f, u8 no_read, u8 verbose) { if (verbose) s = format (s, "adding [%u, %u]:", trace[i].offset, - (trace[i].offset + trace[i].len) % dummy_fifo->size); + (trace[i].offset + trace[i].len)); svm_fifo_enqueue_with_offset (dummy_fifo, trace[i].offset, trace[i].len, &data[offset]); } @@ -1600,11 +1497,10 @@ format_svm_fifo (u8 * s, va_list * args) return s; indent = format_get_indent (s); - s = format (s, "cursize %u nitems %u has_event %d\n", - svm_fifo_max_dequeue (f), f->nitems, f->has_event); + s = format (s, "cursize %u nitems %u has_event %d min_alloc %u\n", + svm_fifo_max_dequeue (f), f->size, f->has_event, f->min_alloc); s = format (s, "%Uhead %u tail %u segment manager %u\n", format_white_space, - indent, (f->head % f->size), (f->tail % f->size), - f->segment_manager); + indent, f->head, f->tail, f->segment_manager); if (verbose > 1) s = format (s, "%Uvpp session %d thread %d app session %d thread %d\n", |