diff options
author | Dave Barach <dave@barachs.net> | 2016-08-08 09:51:21 -0400 |
---|---|---|
committer | Keith Burns <alagalah@gmail.com> | 2016-08-08 15:25:14 +0000 |
commit | ba868bb7898edc46ad5f8cd4925af6c8b92e7c89 (patch) | |
tree | a6f4092a3373e0a8d4f4a67da6e60f78331fe37f /vnet/vnet/pipeline.h | |
parent | da62e1a1d84728094c427a4a7ec03e76e2df988c (diff) |
VPP-311 Coding standards cleanup for vnet/vnet/*.[ch]
Change-Id: I08ed983f594072bc8c72202e77205a7789eea599
Signed-off-by: Dave Barach <dave@barachs.net>
Diffstat (limited to 'vnet/vnet/pipeline.h')
-rw-r--r-- | vnet/vnet/pipeline.h | 639 |
1 files changed, 321 insertions, 318 deletions
diff --git a/vnet/vnet/pipeline.h b/vnet/vnet/pipeline.h index 5a0d4dcc616..a4aa5cf5277 100644 --- a/vnet/vnet/pipeline.h +++ b/vnet/vnet/pipeline.h @@ -23,14 +23,14 @@ * <Define pipeline stages> * * #include <vnet/pipeline.h> - * + * * static uword my_node_fn (vlib_main_t * vm, * vlib_node_runtime_t * node, * vlib_frame_t * frame) * { * return dispatch_pipeline (vm, node, frame); * } - * + * */ #ifndef NSTAGES @@ -41,20 +41,20 @@ #define STAGE_INLINE inline #endif -/* +/* * A prefetch stride of 2 is quasi-equivalent to doubling the number * of stages with every other pipeline stage empty. */ -/* - * This is a typical first pipeline stage, which prefetches - * buffer metadata and the first line of pkt data. +/* + * This is a typical first pipeline stage, which prefetches + * buffer metadata and the first line of pkt data. * To use it: * #define stage0 generic_stage0 */ -static STAGE_INLINE void generic_stage0 (vlib_main_t * vm, - vlib_node_runtime_t * node, - u32 buffer_index) +static STAGE_INLINE void +generic_stage0 (vlib_main_t * vm, + vlib_node_runtime_t * node, u32 buffer_index) { /* generic default stage 0 here */ vlib_buffer_t *b = vlib_get_buffer (vm, buffer_index); @@ -66,62 +66,61 @@ static STAGE_INLINE void generic_stage0 (vlib_main_t * vm, static STAGE_INLINE uword dispatch_pipeline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) + vlib_node_runtime_t * node, vlib_frame_t * frame) { - u32 * from = vlib_frame_vector_args (frame); - u32 n_left_from, n_left_to_next, * to_next, next_index, next0; + u32 *from = vlib_frame_vector_args (frame); + u32 n_left_from, n_left_to_next, *to_next, next_index, next0; int pi, pi_limit; - + n_left_from = frame->n_vectors; next_index = node->cached_next_index; - - while (n_left_from > 0) + + while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - + pi_limit = clib_min (n_left_from, n_left_to_next); - - for (pi = 0; pi < NSTAGES-1; pi++) - { - if(pi == pi_limit) - break; - stage0 (vm, node, from[pi]); - } - - for (; pi < pi_limit; pi++) - { - stage0 (vm, node, from[pi]); - to_next[0] = from [pi - 1]; - to_next++; - n_left_to_next--; - next0 = last_stage (vm, node, from [pi - 1]); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[pi - 1], next0); - n_left_from--; - if ((int) n_left_to_next < 0 && n_left_from > 0) - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - - for (; pi < (pi_limit + (NSTAGES-1)); pi++) - { - if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) - { - to_next[0] = from [pi - 1]; - to_next++; - n_left_to_next--; - next0 = last_stage (vm, node, from [pi - 1]); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[pi - 1], next0); - n_left_from--; - if ((int) n_left_to_next < 0 && n_left_from > 0) - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - } + + for (pi = 0; pi < NSTAGES - 1; pi++) + { + if (pi == pi_limit) + break; + stage0 (vm, node, from[pi]); + } + + for (; pi < pi_limit; pi++) + { + stage0 (vm, node, from[pi]); + to_next[0] = from[pi - 1]; + to_next++; + n_left_to_next--; + next0 = last_stage (vm, node, from[pi - 1]); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + from[pi - 1], next0); + n_left_from--; + if ((int) n_left_to_next < 0 && n_left_from > 0) + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } + + for (; pi < (pi_limit + (NSTAGES - 1)); pi++) + { + if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) + { + to_next[0] = from[pi - 1]; + to_next++; + n_left_to_next--; + next0 = last_stage (vm, node, from[pi - 1]); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + from[pi - 1], next0); + n_left_from--; + if ((int) n_left_to_next < 0 && n_left_from > 0) + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } + } vlib_put_next_frame (vm, node, next_index, n_left_to_next); from += pi_limit; } @@ -132,69 +131,68 @@ dispatch_pipeline (vlib_main_t * vm, #if NSTAGES == 3 static STAGE_INLINE uword dispatch_pipeline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) + vlib_node_runtime_t * node, vlib_frame_t * frame) { - u32 * from = vlib_frame_vector_args (frame); - u32 n_left_from, n_left_to_next, * to_next, next_index, next0; + u32 *from = vlib_frame_vector_args (frame); + u32 n_left_from, n_left_to_next, *to_next, next_index, next0; int pi, pi_limit; - + n_left_from = frame->n_vectors; next_index = node->cached_next_index; - - while (n_left_from > 0) + + while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - + pi_limit = clib_min (n_left_from, n_left_to_next); - - for (pi = 0; pi < NSTAGES-1; pi++) - { - if(pi == pi_limit) - break; - stage0 (vm, node, from[pi]); - if (pi-1 >= 0) - stage1 (vm, node, from[pi-1]); - } - - for (; pi < pi_limit; pi++) - { - stage0 (vm, node, from[pi]); - stage1 (vm, node, from[pi-1]); - to_next[0] = from [pi - 2]; - to_next++; - n_left_to_next--; - next0 = last_stage (vm, node, from [pi - 2]); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[pi - 2], next0); - n_left_from--; - if ((int) n_left_to_next < 0 && n_left_from > 0) - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - - - for (; pi < (pi_limit + (NSTAGES-1)); pi++) - { - if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) - stage1 (vm, node, from[pi-1]); - if (((pi - 2) >= 0) && ((pi - 2) < pi_limit)) - { - to_next[0] = from[pi - 2]; - to_next++; - n_left_to_next--; - next0 = last_stage (vm, node, from [pi - 2]); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[pi - 2], next0); - n_left_from--; - if ((int) n_left_to_next < 0 && n_left_from > 0) - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - } - + + for (pi = 0; pi < NSTAGES - 1; pi++) + { + if (pi == pi_limit) + break; + stage0 (vm, node, from[pi]); + if (pi - 1 >= 0) + stage1 (vm, node, from[pi - 1]); + } + + for (; pi < pi_limit; pi++) + { + stage0 (vm, node, from[pi]); + stage1 (vm, node, from[pi - 1]); + to_next[0] = from[pi - 2]; + to_next++; + n_left_to_next--; + next0 = last_stage (vm, node, from[pi - 2]); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + from[pi - 2], next0); + n_left_from--; + if ((int) n_left_to_next < 0 && n_left_from > 0) + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } + + + for (; pi < (pi_limit + (NSTAGES - 1)); pi++) + { + if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) + stage1 (vm, node, from[pi - 1]); + if (((pi - 2) >= 0) && ((pi - 2) < pi_limit)) + { + to_next[0] = from[pi - 2]; + to_next++; + n_left_to_next--; + next0 = last_stage (vm, node, from[pi - 2]); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + from[pi - 2], next0); + n_left_from--; + if ((int) n_left_to_next < 0 && n_left_from > 0) + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } + } + vlib_put_next_frame (vm, node, next_index, n_left_to_next); from += pi_limit; } @@ -205,76 +203,75 @@ dispatch_pipeline (vlib_main_t * vm, #if NSTAGES == 4 static STAGE_INLINE uword dispatch_pipeline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) + vlib_node_runtime_t * node, vlib_frame_t * frame) { - u32 * from = vlib_frame_vector_args (frame); - u32 n_left_from, n_left_to_next, * to_next, next_index, next0; + u32 *from = vlib_frame_vector_args (frame); + u32 n_left_from, n_left_to_next, *to_next, next_index, next0; int pi, pi_limit; - + n_left_from = frame->n_vectors; next_index = node->cached_next_index; - - while (n_left_from > 0) + + while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - + pi_limit = clib_min (n_left_from, n_left_to_next); - - for (pi = 0; pi < NSTAGES-1; pi++) - { - if(pi == pi_limit) - break; - stage0 (vm, node, from[pi]); - if (pi-1 >= 0) - stage1 (vm, node, from[pi-1]); - if (pi-2 >= 0) - stage2 (vm, node, from[pi-2]); - } - - for (; pi < pi_limit; pi++) - { - stage0 (vm, node, from[pi]); - stage1 (vm, node, from[pi-1]); - stage2 (vm, node, from[pi-2]); - to_next[0] = from [pi - 3]; - to_next++; - n_left_to_next--; - next0 = last_stage (vm, node, from [pi - 3]); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[pi - 3], next0); - n_left_from--; - if ((int) n_left_to_next < 0 && n_left_from > 0) - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - - - for (; pi < (pi_limit + (NSTAGES-1)); pi++) - { - if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) - stage1 (vm, node, from[pi-1]); - if (((pi - 2) >= 0) && ((pi - 2) < pi_limit)) - stage2 (vm, node, from[pi-2]); - if (((pi - 3) >= 0) && ((pi - 3) < pi_limit)) - { - to_next[0] = from[pi - 3]; - to_next++; - n_left_to_next--; - next0 = last_stage (vm, node, from [pi - 3]); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[pi - 3], next0); - n_left_from--; - if ((int) n_left_to_next < 0 && n_left_from > 0) - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - from += pi_limit; + + for (pi = 0; pi < NSTAGES - 1; pi++) + { + if (pi == pi_limit) + break; + stage0 (vm, node, from[pi]); + if (pi - 1 >= 0) + stage1 (vm, node, from[pi - 1]); + if (pi - 2 >= 0) + stage2 (vm, node, from[pi - 2]); + } + + for (; pi < pi_limit; pi++) + { + stage0 (vm, node, from[pi]); + stage1 (vm, node, from[pi - 1]); + stage2 (vm, node, from[pi - 2]); + to_next[0] = from[pi - 3]; + to_next++; + n_left_to_next--; + next0 = last_stage (vm, node, from[pi - 3]); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + from[pi - 3], next0); + n_left_from--; + if ((int) n_left_to_next < 0 && n_left_from > 0) + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } + + + for (; pi < (pi_limit + (NSTAGES - 1)); pi++) + { + if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) + stage1 (vm, node, from[pi - 1]); + if (((pi - 2) >= 0) && ((pi - 2) < pi_limit)) + stage2 (vm, node, from[pi - 2]); + if (((pi - 3) >= 0) && ((pi - 3) < pi_limit)) + { + to_next[0] = from[pi - 3]; + to_next++; + n_left_to_next--; + next0 = last_stage (vm, node, from[pi - 3]); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + from[pi - 3], next0); + n_left_from--; + if ((int) n_left_to_next < 0 && n_left_from > 0) + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + from += pi_limit; } return frame->n_vectors; } @@ -284,81 +281,80 @@ dispatch_pipeline (vlib_main_t * vm, #if NSTAGES == 5 static STAGE_INLINE uword dispatch_pipeline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) + vlib_node_runtime_t * node, vlib_frame_t * frame) { - u32 * from = vlib_frame_vector_args (frame); - u32 n_left_from, n_left_to_next, * to_next, next_index, next0; + u32 *from = vlib_frame_vector_args (frame); + u32 n_left_from, n_left_to_next, *to_next, next_index, next0; int pi, pi_limit; - + n_left_from = frame->n_vectors; next_index = node->cached_next_index; - - while (n_left_from > 0) + + while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - + pi_limit = clib_min (n_left_from, n_left_to_next); - - for (pi = 0; pi < NSTAGES-1; pi++) - { - if(pi == pi_limit) - break; - stage0 (vm, node, from[pi]); - if (pi-1 >= 0) - stage1 (vm, node, from[pi-1]); - if (pi-2 >= 0) - stage2 (vm, node, from[pi-2]); - if (pi-3 >= 0) - stage3 (vm, node, from[pi-3]); - } - - for (; pi < pi_limit; pi++) - { - stage0 (vm, node, from[pi]); - stage1 (vm, node, from[pi-1]); - stage2 (vm, node, from[pi-2]); - stage3 (vm, node, from[pi-3]); - to_next[0] = from [pi - 4]; - to_next++; - n_left_to_next--; - next0 = last_stage (vm, node, from [pi - 4]); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[pi - 4], next0); - n_left_from--; - if ((int) n_left_to_next < 0 && n_left_from > 0) - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - - - for (; pi < (pi_limit + (NSTAGES-1)); pi++) - { - if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) - stage1 (vm, node, from[pi-1]); - if (((pi - 2) >= 0) && ((pi - 2) < pi_limit)) - stage2 (vm, node, from[pi - 2]); - if (((pi - 3) >= 0) && ((pi - 3) < pi_limit)) - stage3 (vm, node, from[pi - 3]); - if (((pi - 4) >= 0) && ((pi - 4) < pi_limit)) - { - to_next[0] = from[pi - 4]; - to_next++; - n_left_to_next--; - next0 = last_stage (vm, node, from [pi - 4]); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[pi - 4], next0); - n_left_from--; - if ((int) n_left_to_next < 0 && n_left_from > 0) - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - from += pi_limit; + + for (pi = 0; pi < NSTAGES - 1; pi++) + { + if (pi == pi_limit) + break; + stage0 (vm, node, from[pi]); + if (pi - 1 >= 0) + stage1 (vm, node, from[pi - 1]); + if (pi - 2 >= 0) + stage2 (vm, node, from[pi - 2]); + if (pi - 3 >= 0) + stage3 (vm, node, from[pi - 3]); + } + + for (; pi < pi_limit; pi++) + { + stage0 (vm, node, from[pi]); + stage1 (vm, node, from[pi - 1]); + stage2 (vm, node, from[pi - 2]); + stage3 (vm, node, from[pi - 3]); + to_next[0] = from[pi - 4]; + to_next++; + n_left_to_next--; + next0 = last_stage (vm, node, from[pi - 4]); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + from[pi - 4], next0); + n_left_from--; + if ((int) n_left_to_next < 0 && n_left_from > 0) + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } + + + for (; pi < (pi_limit + (NSTAGES - 1)); pi++) + { + if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) + stage1 (vm, node, from[pi - 1]); + if (((pi - 2) >= 0) && ((pi - 2) < pi_limit)) + stage2 (vm, node, from[pi - 2]); + if (((pi - 3) >= 0) && ((pi - 3) < pi_limit)) + stage3 (vm, node, from[pi - 3]); + if (((pi - 4) >= 0) && ((pi - 4) < pi_limit)) + { + to_next[0] = from[pi - 4]; + to_next++; + n_left_to_next--; + next0 = last_stage (vm, node, from[pi - 4]); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + from[pi - 4], next0); + n_left_from--; + if ((int) n_left_to_next < 0 && n_left_from > 0) + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + from += pi_limit; } return frame->n_vectors; } @@ -367,87 +363,94 @@ dispatch_pipeline (vlib_main_t * vm, #if NSTAGES == 6 static STAGE_INLINE uword dispatch_pipeline (vlib_main_t * vm, - vlib_node_runtime_t * node, - vlib_frame_t * frame) + vlib_node_runtime_t * node, vlib_frame_t * frame) { - u32 * from = vlib_frame_vector_args (frame); - u32 n_left_from, n_left_to_next, * to_next, next_index, next0; + u32 *from = vlib_frame_vector_args (frame); + u32 n_left_from, n_left_to_next, *to_next, next_index, next0; int pi, pi_limit; - + n_left_from = frame->n_vectors; next_index = node->cached_next_index; - - while (n_left_from > 0) + + while (n_left_from > 0) { vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next); - + pi_limit = clib_min (n_left_from, n_left_to_next); - - for (pi = 0; pi < NSTAGES-1; pi++) - { - if(pi == pi_limit) - break; - stage0 (vm, node, from[pi]); - if (pi-1 >= 0) - stage1 (vm, node, from[pi-1]); - if (pi-2 >= 0) - stage2 (vm, node, from[pi-2]); - if (pi-3 >= 0) - stage3 (vm, node, from[pi-3]); - if (pi-4 >= 0) - stage4 (vm, node, from[pi-4]); - } - - for (; pi < pi_limit; pi++) - { - stage0 (vm, node, from[pi]); - stage1 (vm, node, from[pi-1]); - stage2 (vm, node, from[pi-2]); - stage3 (vm, node, from[pi-3]); - stage4 (vm, node, from[pi-4]); - to_next[0] = from [pi - 5]; - to_next++; - n_left_to_next--; - next0 = last_stage (vm, node, from [pi - 5]); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[pi - 5], next0); - n_left_from--; - if ((int) n_left_to_next < 0 && n_left_from > 0) - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - - - for (; pi < (pi_limit + (NSTAGES-1)); pi++) - { - if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) - stage1 (vm, node, from[pi-1]); - if (((pi - 2) >= 0) && ((pi - 2) < pi_limit)) - stage2 (vm, node, from[pi - 2]); - if (((pi - 3) >= 0) && ((pi - 3) < pi_limit)) - stage3 (vm, node, from[pi - 3]); - if (((pi - 4) >= 0) && ((pi - 4) < pi_limit)) - stage4 (vm, node, from[pi - 4]); - if (((pi - 5) >= 0) && ((pi - 5) < pi_limit)) - { - to_next[0] = from[pi - 5]; - to_next++; - n_left_to_next--; - next0 = last_stage (vm, node, from [pi - 5]); - vlib_validate_buffer_enqueue_x1 (vm, node, next_index, - to_next, n_left_to_next, - from[pi - 5], next0); - n_left_from--; - if ((int) n_left_to_next < 0 && n_left_from > 0) - vlib_get_next_frame (vm, node, next_index, to_next, - n_left_to_next); - } - } - - vlib_put_next_frame (vm, node, next_index, n_left_to_next); - from += pi_limit; + + for (pi = 0; pi < NSTAGES - 1; pi++) + { + if (pi == pi_limit) + break; + stage0 (vm, node, from[pi]); + if (pi - 1 >= 0) + stage1 (vm, node, from[pi - 1]); + if (pi - 2 >= 0) + stage2 (vm, node, from[pi - 2]); + if (pi - 3 >= 0) + stage3 (vm, node, from[pi - 3]); + if (pi - 4 >= 0) + stage4 (vm, node, from[pi - 4]); + } + + for (; pi < pi_limit; pi++) + { + stage0 (vm, node, from[pi]); + stage1 (vm, node, from[pi - 1]); + stage2 (vm, node, from[pi - 2]); + stage3 (vm, node, from[pi - 3]); + stage4 (vm, node, from[pi - 4]); + to_next[0] = from[pi - 5]; + to_next++; + n_left_to_next--; + next0 = last_stage (vm, node, from[pi - 5]); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + from[pi - 5], next0); + n_left_from--; + if ((int) n_left_to_next < 0 && n_left_from > 0) + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } + + + for (; pi < (pi_limit + (NSTAGES - 1)); pi++) + { + if (((pi - 1) >= 0) && ((pi - 1) < pi_limit)) + stage1 (vm, node, from[pi - 1]); + if (((pi - 2) >= 0) && ((pi - 2) < pi_limit)) + stage2 (vm, node, from[pi - 2]); + if (((pi - 3) >= 0) && ((pi - 3) < pi_limit)) + stage3 (vm, node, from[pi - 3]); + if (((pi - 4) >= 0) && ((pi - 4) < pi_limit)) + stage4 (vm, node, from[pi - 4]); + if (((pi - 5) >= 0) && ((pi - 5) < pi_limit)) + { + to_next[0] = from[pi - 5]; + to_next++; + n_left_to_next--; + next0 = last_stage (vm, node, from[pi - 5]); + vlib_validate_buffer_enqueue_x1 (vm, node, next_index, + to_next, n_left_to_next, + from[pi - 5], next0); + n_left_from--; + if ((int) n_left_to_next < 0 && n_left_from > 0) + vlib_get_next_frame (vm, node, next_index, to_next, + n_left_to_next); + } + } + + vlib_put_next_frame (vm, node, next_index, n_left_to_next); + from += pi_limit; } return frame->n_vectors; } #endif + +/* + * fd.io coding-style-patch-verification: ON + * + * Local Variables: + * eval: (c-set-style "gnu") + * End: + */ |