diff options
author | Vratko Polak <vrpolak@cisco.com> | 2023-08-17 16:15:38 +0200 |
---|---|---|
committer | Fan Zhang <fanzhang.oss@gmail.com> | 2023-09-12 12:38:08 +0000 |
commit | ceb64add2afe62c25a05be8c26cce5ea8d243b22 (patch) | |
tree | 0d5081b665ba40f1e0404288b3e953c97c08ffa2 /src/plugins/crypto_sw_scheduler | |
parent | 48cd559fb5f8e5bbe045f6fa2ca231ab0b0e6a18 (diff) |
crypto-sw-scheduler: improve function indentation
The checkstyle --fix command remains confused
around the def/foreach/undef usage in convert_async_crypto_id,
but at least the other functions now look correctly indented to me.
Type: style
Change-Id: Ic8f7b580267386b7a6b07d33d9ba7ae9787c0e0a
Signed-off-by: Vratko Polak <vrpolak@cisco.com>
Diffstat (limited to 'src/plugins/crypto_sw_scheduler')
-rw-r--r-- | src/plugins/crypto_sw_scheduler/main.c | 315 |
1 files changed, 156 insertions, 159 deletions
diff --git a/src/plugins/crypto_sw_scheduler/main.c b/src/plugins/crypto_sw_scheduler/main.c index 2b08cba6dbc..26ab02cdde5 100644 --- a/src/plugins/crypto_sw_scheduler/main.c +++ b/src/plugins/crypto_sw_scheduler/main.c @@ -346,68 +346,66 @@ crypto_sw_scheduler_process_aead (vlib_main_t *vm, process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks, &state); f->state = state; - } +} + +static_always_inline void +crypto_sw_scheduler_process_link (vlib_main_t *vm, + crypto_sw_scheduler_main_t *cm, + crypto_sw_scheduler_per_thread_data_t *ptd, + vnet_crypto_async_frame_t *f, u32 crypto_op, + u32 auth_op, u16 digest_len, u8 is_enc) +{ + vnet_crypto_async_frame_elt_t *fe; + u32 *bi; + u32 n_elts = f->n_elts; + u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS; + + vec_reset_length (ptd->crypto_ops); + vec_reset_length (ptd->integ_ops); + vec_reset_length (ptd->chained_crypto_ops); + vec_reset_length (ptd->chained_integ_ops); + vec_reset_length (ptd->chunks); + fe = f->elts; + bi = f->buffer_indices; - static_always_inline void - crypto_sw_scheduler_process_link ( - vlib_main_t *vm, crypto_sw_scheduler_main_t *cm, - crypto_sw_scheduler_per_thread_data_t *ptd, vnet_crypto_async_frame_t *f, - u32 crypto_op, u32 auth_op, u16 digest_len, u8 is_enc) + while (n_elts--) { - vnet_crypto_async_frame_elt_t *fe; - u32 *bi; - u32 n_elts = f->n_elts; - u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS; - - vec_reset_length (ptd->crypto_ops); - vec_reset_length (ptd->integ_ops); - vec_reset_length (ptd->chained_crypto_ops); - vec_reset_length (ptd->chained_integ_ops); - vec_reset_length (ptd->chunks); - fe = f->elts; - bi = f->buffer_indices; - - while (n_elts--) - { - if (n_elts > 1) - clib_prefetch_load (fe + 1); - - crypto_sw_scheduler_convert_link_crypto ( - vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0], - crypto_op, auth_op, digest_len, is_enc); - bi++; - fe++; - } + if (n_elts > 1) + clib_prefetch_load (fe + 1); - if (is_enc) - { - process_ops (vm, f, ptd->crypto_ops, &state); - process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks, - &state); - process_ops (vm, f, ptd->integ_ops, &state); - process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks, - &state); - } - else - { - process_ops (vm, f, ptd->integ_ops, &state); - process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks, - &state); - process_ops (vm, f, ptd->crypto_ops, &state); - process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks, - &state); - } + crypto_sw_scheduler_convert_link_crypto ( + vm, ptd, cm->keys + fe->key_index, fe, fe - f->elts, bi[0], crypto_op, + auth_op, digest_len, is_enc); + bi++; + fe++; + } - f->state = state; + if (is_enc) + { + process_ops (vm, f, ptd->crypto_ops, &state); + process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks, + &state); + process_ops (vm, f, ptd->integ_ops, &state); + process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks, &state); } + else + { + process_ops (vm, f, ptd->integ_ops, &state); + process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks, &state); + process_ops (vm, f, ptd->crypto_ops, &state); + process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks, + &state); + } + + f->state = state; +} - static_always_inline int - convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id, - u32 *crypto_op, u32 *auth_op_or_aad_len, - u16 *digest_len, u8 *is_enc) +static_always_inline int +convert_async_crypto_id (vnet_crypto_async_op_id_t async_op_id, u32 *crypto_op, + u32 *auth_op_or_aad_len, u16 *digest_len, u8 *is_enc) +{ + switch (async_op_id) { - switch (async_op_id) - { #define _(n, s, k, t, a) \ case VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC: \ *crypto_op = VNET_CRYPTO_OP_##n##_ENC; \ @@ -421,7 +419,7 @@ crypto_sw_scheduler_process_aead (vlib_main_t *vm, *digest_len = t; \ *is_enc = 0; \ return 1; - foreach_crypto_aead_async_alg + foreach_crypto_aead_async_alg #undef _ #define _(c, h, s, k, d) \ @@ -437,140 +435,139 @@ crypto_sw_scheduler_process_aead (vlib_main_t *vm, *digest_len = d; \ *is_enc = 0; \ return 0; - foreach_crypto_link_async_alg + foreach_crypto_link_async_alg #undef _ - default : return -1; - } - - return -1; + default : return -1; } - static_always_inline vnet_crypto_async_frame_t * - crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed, - u32 *enqueue_thread_idx) - { - crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main; - crypto_sw_scheduler_per_thread_data_t *ptd = - cm->per_thread_data + vm->thread_index; - vnet_crypto_async_frame_t *f = 0; - crypto_sw_scheduler_queue_t *current_queue = 0; - u32 tail, head; - u8 found = 0; - - /* get a pending frame to process */ - if (ptd->self_crypto_enabled) - { - u32 i = ptd->last_serve_lcore_id + 1; + return -1; +} - while (1) - { - crypto_sw_scheduler_per_thread_data_t *st; - u32 j; +static_always_inline vnet_crypto_async_frame_t * +crypto_sw_scheduler_dequeue (vlib_main_t *vm, u32 *nb_elts_processed, + u32 *enqueue_thread_idx) +{ + crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main; + crypto_sw_scheduler_per_thread_data_t *ptd = + cm->per_thread_data + vm->thread_index; + vnet_crypto_async_frame_t *f = 0; + crypto_sw_scheduler_queue_t *current_queue = 0; + u32 tail, head; + u8 found = 0; + + /* get a pending frame to process */ + if (ptd->self_crypto_enabled) + { + u32 i = ptd->last_serve_lcore_id + 1; - if (i >= vec_len (cm->per_thread_data)) - i = 0; + while (1) + { + crypto_sw_scheduler_per_thread_data_t *st; + u32 j; - st = cm->per_thread_data + i; + if (i >= vec_len (cm->per_thread_data)) + i = 0; - if (ptd->last_serve_encrypt) - current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT]; - else - current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT]; + st = cm->per_thread_data + i; - tail = current_queue->tail; - head = current_queue->head; + if (ptd->last_serve_encrypt) + current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT]; + else + current_queue = &st->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT]; - /* Skip this queue unless tail < head or head has overflowed - * and tail has not. At the point where tail overflows (== 0), - * the largest possible value of head is (queue size - 1). - * Prior to that, the largest possible value of head is - * (queue size - 2). - */ - if ((tail > head) && (head >= CRYPTO_SW_SCHEDULER_QUEUE_MASK)) - goto skip_queue; + tail = current_queue->tail; + head = current_queue->head; - for (j = tail; j != head; j++) - { + /* Skip this queue unless tail < head or head has overflowed + * and tail has not. At the point where tail overflows (== 0), + * the largest possible value of head is (queue size - 1). + * Prior to that, the largest possible value of head is + * (queue size - 2). + */ + if ((tail > head) && (head >= CRYPTO_SW_SCHEDULER_QUEUE_MASK)) + goto skip_queue; - f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK]; + for (j = tail; j != head; j++) + { - if (!f) - continue; + f = current_queue->jobs[j & CRYPTO_SW_SCHEDULER_QUEUE_MASK]; - if (clib_atomic_bool_cmp_and_swap ( - &f->state, VNET_CRYPTO_FRAME_STATE_PENDING, - VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS)) - { - found = 1; - break; - } - } + if (!f) + continue; - skip_queue: - if (found || i == ptd->last_serve_lcore_id) + if (clib_atomic_bool_cmp_and_swap ( + &f->state, VNET_CRYPTO_FRAME_STATE_PENDING, + VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS)) { - CLIB_MEMORY_STORE_BARRIER (); - ptd->last_serve_encrypt = !ptd->last_serve_encrypt; + found = 1; break; } + } - i++; + skip_queue: + if (found || i == ptd->last_serve_lcore_id) + { + CLIB_MEMORY_STORE_BARRIER (); + ptd->last_serve_encrypt = !ptd->last_serve_encrypt; + break; } - ptd->last_serve_lcore_id = i; + i++; } - if (found) - { - u32 crypto_op, auth_op_or_aad_len; - u16 digest_len; - u8 is_enc; - int ret; - - ret = convert_async_crypto_id ( - f->op, &crypto_op, &auth_op_or_aad_len, &digest_len, &is_enc); - - if (ret == 1) - crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op, - auth_op_or_aad_len, digest_len); - else if (ret == 0) - crypto_sw_scheduler_process_link (vm, cm, ptd, f, crypto_op, - auth_op_or_aad_len, digest_len, - is_enc); - - *enqueue_thread_idx = f->enqueue_thread_index; - *nb_elts_processed = f->n_elts; - } + ptd->last_serve_lcore_id = i; + } - if (ptd->last_return_queue) - { - current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT]; - ptd->last_return_queue = 0; - } - else - { - current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT]; - ptd->last_return_queue = 1; - } + if (found) + { + u32 crypto_op, auth_op_or_aad_len; + u16 digest_len; + u8 is_enc; + int ret; + + ret = convert_async_crypto_id (f->op, &crypto_op, &auth_op_or_aad_len, + &digest_len, &is_enc); + + if (ret == 1) + crypto_sw_scheduler_process_aead (vm, ptd, f, crypto_op, + auth_op_or_aad_len, digest_len); + else if (ret == 0) + crypto_sw_scheduler_process_link ( + vm, cm, ptd, f, crypto_op, auth_op_or_aad_len, digest_len, is_enc); + + *enqueue_thread_idx = f->enqueue_thread_index; + *nb_elts_processed = f->n_elts; + } - tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK; + if (ptd->last_return_queue) + { + current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_DECRYPT]; + ptd->last_return_queue = 0; + } + else + { + current_queue = &ptd->queue[CRYPTO_SW_SCHED_QUEUE_TYPE_ENCRYPT]; + ptd->last_return_queue = 1; + } - if (current_queue->jobs[tail] && - current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS) - { + tail = current_queue->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK; - CLIB_MEMORY_STORE_BARRIER (); - current_queue->tail++; - f = current_queue->jobs[tail]; - current_queue->jobs[tail] = 0; + if (current_queue->jobs[tail] && + current_queue->jobs[tail]->state >= VNET_CRYPTO_FRAME_STATE_SUCCESS) + { - return f; - } + CLIB_MEMORY_STORE_BARRIER (); + current_queue->tail++; + f = current_queue->jobs[tail]; + current_queue->jobs[tail] = 0; - return 0; + return f; } + return 0; +} + static clib_error_t * sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input, vlib_cli_command_t * cmd) |