summaryrefslogtreecommitdiffstats
path: root/src/vlib/buffer_node.h
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2018-07-20 18:47:05 +0200
committerDamjan Marion <damarion@cisco.com>2018-07-20 19:43:36 +0200
commit78fd7e810c24b9d638ac7c7f08edabf692543743 (patch)
treec676d0f4bc9a7471fd5c60cb02fb995868fa65d6 /src/vlib/buffer_node.h
parent508498f74d2df98e70a961d030cf0128a63a926d (diff)
Add congestion drop in interface handoff
This prevents deadlock in case when worker A sends to B and worker B sends to A Change-Id: Id9436960f932c58325fe4f5ef8ec67b50031aeda Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/vlib/buffer_node.h')
-rw-r--r--src/vlib/buffer_node.h53
1 files changed, 33 insertions, 20 deletions
diff --git a/src/vlib/buffer_node.h b/src/vlib/buffer_node.h
index cfdb0567a0c..93ffb1e9dce 100644
--- a/src/vlib/buffer_node.h
+++ b/src/vlib/buffer_node.h
@@ -443,28 +443,23 @@ vlib_buffer_enqueue_to_next (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
-static_always_inline void
+static_always_inline u32
vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
u32 * buffer_indices, u16 * thread_indices,
- u32 n_left)
+ u32 n_packets, int drop_on_congestion)
{
vlib_thread_main_t *tm = vlib_get_thread_main ();
- static __thread vlib_frame_queue_elt_t **handoff_queue_elt_by_thread_index =
- 0;
- static __thread vlib_frame_queue_t **congested_handoff_queue_by_thread_index
- = 0;
+ vlib_frame_queue_main_t *fqm;
+ vlib_frame_queue_per_thread_data_t *ptd;
+ u32 n_left = n_packets;
+ u32 drop_list[VLIB_FRAME_SIZE], *dbi = drop_list, n_drop = 0;
vlib_frame_queue_elt_t *hf = 0;
u32 n_left_to_next_thread = 0, *to_next_thread = 0;
u32 next_thread_index, current_thread_index = ~0;
int i;
- if (PREDICT_FALSE (handoff_queue_elt_by_thread_index == 0))
- {
- vec_validate (handoff_queue_elt_by_thread_index, tm->n_vlib_mains - 1);
- vec_validate_init_empty (congested_handoff_queue_by_thread_index,
- tm->n_vlib_mains - 1,
- (vlib_frame_queue_t *) (~0));
- }
+ fqm = vec_elt_at_index (tm->frame_queue_mains, frame_queue_index);
+ ptd = vec_elt_at_index (fqm->per_thread_data, vm->thread_index);
while (n_left)
{
@@ -472,12 +467,24 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
if (next_thread_index != current_thread_index)
{
+
+ if (drop_on_congestion &&
+ is_vlib_frame_queue_congested
+ (frame_queue_index, next_thread_index, fqm->queue_hi_thresh,
+ ptd->congested_handoff_queue_by_thread_index))
+ {
+ dbi[0] = buffer_indices[0];
+ dbi++;
+ n_drop++;
+ goto next;
+ }
+
if (hf)
hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
hf = vlib_get_worker_handoff_queue_elt (frame_queue_index,
next_thread_index,
- handoff_queue_elt_by_thread_index);
+ ptd->handoff_queue_elt_by_thread_index);
n_left_to_next_thread = VLIB_FRAME_SIZE - hf->n_vectors;
to_next_thread = &hf->buffer_index[hf->n_vectors];
@@ -493,11 +500,12 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
hf->n_vectors = VLIB_FRAME_SIZE;
vlib_put_frame_queue_elt (hf);
current_thread_index = ~0;
- handoff_queue_elt_by_thread_index[next_thread_index] = 0;
+ ptd->handoff_queue_elt_by_thread_index[next_thread_index] = 0;
hf = 0;
}
/* next */
+ next:
thread_indices += 1;
buffer_indices += 1;
n_left -= 1;
@@ -507,11 +515,11 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
hf->n_vectors = VLIB_FRAME_SIZE - n_left_to_next_thread;
/* Ship frames to the thread nodes */
- for (i = 0; i < vec_len (handoff_queue_elt_by_thread_index); i++)
+ for (i = 0; i < vec_len (ptd->handoff_queue_elt_by_thread_index); i++)
{
- if (handoff_queue_elt_by_thread_index[i])
+ if (ptd->handoff_queue_elt_by_thread_index[i])
{
- hf = handoff_queue_elt_by_thread_index[i];
+ hf = ptd->handoff_queue_elt_by_thread_index[i];
/*
* It works better to let the handoff node
* rate-adapt, always ship the handoff queue element.
@@ -519,14 +527,19 @@ vlib_buffer_enqueue_to_thread (vlib_main_t * vm, u32 frame_queue_index,
if (1 || hf->n_vectors == hf->last_n_vectors)
{
vlib_put_frame_queue_elt (hf);
- handoff_queue_elt_by_thread_index[i] = 0;
+ ptd->handoff_queue_elt_by_thread_index[i] = 0;
}
else
hf->last_n_vectors = hf->n_vectors;
}
- congested_handoff_queue_by_thread_index[i] =
+ ptd->congested_handoff_queue_by_thread_index[i] =
(vlib_frame_queue_t *) (~0);
}
+
+ if (drop_on_congestion && n_drop)
+ vlib_buffer_free (vm, drop_list, n_drop);
+
+ return n_packets - n_drop;
}
#endif /* included_vlib_buffer_node_h */