aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
authorPiotrX Kleski <piotrx.kleski@intel.com>2020-07-08 14:36:34 +0200
committerDamjan Marion <dmarion@me.com>2020-09-03 14:23:51 +0000
commit2284817eae67d78f3a9afffed9d830da658dd568 (patch)
tree0d108b262c42caa5b70d065dd5596f368a79a795 /src
parent56230097e2a642740a1a00483e54419edc7fc2ba (diff)
crypto: SW scheduler async crypto engine
Type: feature This patch adds new sw_scheduler async crypto engine. The engine transforms async frames info sync crypto ops and delegates them to active sync engines. With the patch it is possible to increase the single worker crypto throughput by offloading the crypto workload to multiple workers. By default all workers in the system will attend the crypto workload processing. However a worker's available cycles are limited. To avail more cycles to one worker to process other workload (e.g. the worker core that handles the RX/TX and IPSec stack processing), a useful cli command is added to remove itself (or add it back later) from the heavy crypto workload but only let other workers to process the crypto. The command is: - set sw_scheduler worker <idx> crypto <on|off> It also adds new interrupt mode to async crypto dispatch node. This mode signals the node when new frames are enqueued as opposed to polling mode that continuously calls dispatch node. New cli commands: - set crypto async dispatch [polling|interrupt] - show crypto async status (displays mode and nodes' states) Signed-off-by: PiotrX Kleski <piotrx.kleski@intel.com> Signed-off-by: DariuszX Kazimierski <dariuszx.kazimierski@intel.com> Reviewed-by: Fan Zhang <roy.fan.zhang@intel.com> Change-Id: I332655f347bb9e3bc9c64166e86e393e911bdb39
Diffstat (limited to 'src')
-rw-r--r--src/plugins/crypto_sw_scheduler/CMakeLists.txt17
-rw-r--r--src/plugins/crypto_sw_scheduler/crypto_sw_scheduler.h61
-rw-r--r--src/plugins/crypto_sw_scheduler/main.c713
-rw-r--r--src/plugins/dpdk/cryptodev/cryptodev.c6
-rw-r--r--src/vnet/crypto/cli.c73
-rw-r--r--src/vnet/crypto/crypto.c82
-rw-r--r--src/vnet/crypto/crypto.h44
-rw-r--r--src/vnet/crypto/node.c102
8 files changed, 1027 insertions, 71 deletions
diff --git a/src/plugins/crypto_sw_scheduler/CMakeLists.txt b/src/plugins/crypto_sw_scheduler/CMakeLists.txt
new file mode 100644
index 00000000000..b94b8f8804c
--- /dev/null
+++ b/src/plugins/crypto_sw_scheduler/CMakeLists.txt
@@ -0,0 +1,17 @@
+# Copyright (c) 2020 Intel and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(crypto_sw_scheduler
+ SOURCES
+ main.c
+)
diff --git a/src/plugins/crypto_sw_scheduler/crypto_sw_scheduler.h b/src/plugins/crypto_sw_scheduler/crypto_sw_scheduler.h
new file mode 100644
index 00000000000..9db42ba18ce
--- /dev/null
+++ b/src/plugins/crypto_sw_scheduler/crypto_sw_scheduler.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (c) 2020 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/crypto/crypto.h>
+
+#ifndef __crypto_sw_scheduler_h__
+#define __crypto_sw_scheduler_h__
+
+#define CRYPTO_SW_SCHEDULER_QUEUE_SIZE 64
+#define CRYPTO_SW_SCHEDULER_QUEUE_MASK (CRYPTO_SW_SCHEDULER_QUEUE_SIZE - 1)
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 head;
+ u32 tail;
+ vnet_crypto_async_frame_t *jobs[0];
+} crypto_sw_scheduler_queue_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ crypto_sw_scheduler_queue_t *queues[VNET_CRYPTO_ASYNC_OP_N_IDS];
+ vnet_crypto_op_t *crypto_ops;
+ vnet_crypto_op_t *integ_ops;
+ vnet_crypto_op_t *chained_crypto_ops;
+ vnet_crypto_op_t *chained_integ_ops;
+ vnet_crypto_op_chunk_t *chunks;
+ u8 self_crypto_enabled;
+} crypto_sw_scheduler_per_thread_data_t;
+
+typedef struct
+{
+ u32 crypto_engine_index;
+ crypto_sw_scheduler_per_thread_data_t *per_thread_data;
+ vnet_crypto_key_t *keys;
+} crypto_sw_scheduler_main_t;
+
+extern crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
+
+#endif // __crypto_native_h__
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/crypto_sw_scheduler/main.c b/src/plugins/crypto_sw_scheduler/main.c
new file mode 100644
index 00000000000..8f27fefe29a
--- /dev/null
+++ b/src/plugins/crypto_sw_scheduler/main.c
@@ -0,0 +1,713 @@
+/*
+ * Copyright (c) 2020 Intel and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+
+#include "crypto_sw_scheduler.h"
+
+int
+crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled)
+{
+ crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ crypto_sw_scheduler_per_thread_data_t *ptd = 0;
+ u32 count = 0, i = vlib_num_workers () > 0;
+
+ if (worker_idx >= vlib_num_workers ())
+ {
+ return VNET_API_ERROR_INVALID_VALUE;
+ }
+
+ for (; i < tm->n_vlib_mains; i++)
+ {
+ ptd = cm->per_thread_data + i;
+ count += ptd->self_crypto_enabled;
+ }
+
+ if (enabled || count > 1)
+ {
+ cm->per_thread_data[vlib_get_worker_thread_index
+ (worker_idx)].self_crypto_enabled = enabled;
+ }
+ else /* cannot disable all crypto workers */
+ {
+ return VNET_API_ERROR_INVALID_VALUE_2;
+ }
+ return 0;
+}
+
+static void
+crypto_sw_scheduler_key_handler (vlib_main_t * vm, vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx)
+{
+ crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
+ vnet_crypto_key_t *key = vnet_crypto_get_key (idx);
+
+ vec_validate (cm->keys, idx);
+
+ if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ {
+ if (kop == VNET_CRYPTO_KEY_OP_DEL)
+ {
+ cm->keys[idx].index_crypto = UINT32_MAX;
+ cm->keys[idx].index_integ = UINT32_MAX;
+ }
+ else
+ {
+ cm->keys[idx] = *key;
+ }
+ }
+}
+
+static int
+crypto_sw_scheduler_frame_enqueue (vlib_main_t * vm,
+ vnet_crypto_async_frame_t * frame)
+{
+ crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
+ crypto_sw_scheduler_per_thread_data_t *ptd
+ = vec_elt_at_index (cm->per_thread_data, vm->thread_index);
+ crypto_sw_scheduler_queue_t *q = ptd->queues[frame->op];
+ u64 head = q->head;
+
+ if (q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK])
+ {
+ u32 n_elts = frame->n_elts, i;
+ for (i = 0; i < n_elts; i++)
+ frame->elts[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
+ frame->state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
+ return -1;
+ }
+ frame->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
+ q->jobs[head & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = frame;
+ head += 1;
+ CLIB_MEMORY_STORE_BARRIER ();
+ q->head = head;
+ return 0;
+}
+
+static_always_inline vnet_crypto_async_frame_t *
+crypto_sw_scheduler_get_pending_frame (crypto_sw_scheduler_queue_t * q)
+{
+ vnet_crypto_async_frame_t *f;
+ u32 i;
+ u32 tail = q->tail;
+ u32 head = q->head;
+
+ for (i = tail; i < head; i++)
+ {
+ f = q->jobs[i & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
+ if (!f)
+ continue;
+ if (clib_atomic_bool_cmp_and_swap
+ (&f->state, VNET_CRYPTO_FRAME_STATE_PENDING,
+ VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS))
+ {
+ return f;
+ }
+ }
+ return NULL;
+}
+
+static_always_inline vnet_crypto_async_frame_t *
+crypto_sw_scheduler_get_completed_frame (crypto_sw_scheduler_queue_t * q)
+{
+ vnet_crypto_async_frame_t *f = 0;
+ if (q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]
+ && q->jobs[q->tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK]->state
+ >= VNET_CRYPTO_FRAME_STATE_SUCCESS)
+ {
+ u32 tail = q->tail;
+ CLIB_MEMORY_STORE_BARRIER ();
+ q->tail++;
+ f = q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK];
+ q->jobs[tail & CRYPTO_SW_SCHEDULER_QUEUE_MASK] = 0;
+ }
+ return f;
+}
+
+static_always_inline void
+cryptodev_sw_scheduler_sgl (vlib_main_t * vm,
+ crypto_sw_scheduler_per_thread_data_t * ptd,
+ vlib_buffer_t * b, vnet_crypto_op_t * op,
+ i32 offset, i32 len)
+{
+ vnet_crypto_op_chunk_t *ch;
+ vlib_buffer_t *nb = b;
+ u32 n_chunks = 0;
+ u32 chunk_index = vec_len (ptd->chunks);
+
+ op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
+
+ while (len)
+ {
+ if (nb->current_data + nb->current_length > offset)
+ {
+ vec_add2 (ptd->chunks, ch, 1);
+ ch->src = ch->dst = nb->data + offset;
+ ch->len
+ = clib_min (nb->current_data + nb->current_length - offset, len);
+ len -= ch->len;
+ offset = 0;
+ n_chunks++;
+ if (!len)
+ break;
+ }
+ if (offset)
+ offset -= nb->current_data + nb->current_length;
+ if (nb->flags & VLIB_BUFFER_NEXT_PRESENT)
+ nb = vlib_get_buffer (vm, nb->next_buffer);
+ else
+ break;
+ }
+
+ ASSERT (offset == 0 && len == 0);
+ op->chunk_index = chunk_index;
+ op->n_chunks = n_chunks;
+}
+
+static_always_inline void
+crypto_sw_scheduler_convert_aead (vlib_main_t * vm,
+ crypto_sw_scheduler_per_thread_data_t * ptd,
+ vnet_crypto_async_frame_elt_t * fe,
+ u32 index, u32 bi,
+ vnet_crypto_op_id_t op_id, u16 aad_len,
+ u8 tag_len)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ vnet_crypto_op_t *op = 0;
+
+ if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ vec_add2 (ptd->chained_crypto_ops, op, 1);
+ cryptodev_sw_scheduler_sgl (vm, ptd, b, op, fe->crypto_start_offset,
+ fe->crypto_total_length);
+ }
+ else
+ {
+ vec_add2 (ptd->crypto_ops, op, 1);
+ op->src = op->dst = b->data + fe->crypto_start_offset;
+ op->len = fe->crypto_total_length;
+ }
+
+ op->op = op_id;
+ op->tag = fe->tag;
+ op->flags = fe->flags;
+ op->key_index = fe->key_index;
+ op->iv = fe->iv;
+ op->aad = fe->aad;
+ op->aad_len = aad_len;
+ op->tag_len = tag_len;
+ op->user_data = index;
+}
+
+static_always_inline void
+crypto_sw_scheduler_convert_link_crypto (vlib_main_t * vm,
+ crypto_sw_scheduler_per_thread_data_t
+ * ptd, vnet_crypto_key_t * key,
+ vnet_crypto_async_frame_elt_t * fe,
+ u32 index, u32 bi,
+ vnet_crypto_op_id_t crypto_op_id,
+ vnet_crypto_op_id_t integ_op_id,
+ u32 digest_len, u8 is_enc)
+{
+ vlib_buffer_t *b = vlib_get_buffer (vm, bi);
+ vnet_crypto_op_t *crypto_op = 0, *integ_op = 0;
+
+ if (fe->flags & VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS)
+ {
+ vec_add2 (ptd->chained_crypto_ops, crypto_op, 1);
+ vec_add2 (ptd->chained_integ_ops, integ_op, 1);
+ cryptodev_sw_scheduler_sgl (vm, ptd, b, crypto_op,
+ fe->crypto_start_offset,
+ fe->crypto_total_length);
+ cryptodev_sw_scheduler_sgl (vm, ptd, b, integ_op,
+ fe->integ_start_offset,
+ fe->crypto_total_length +
+ fe->integ_length_adj);
+ }
+ else
+ {
+ vec_add2 (ptd->crypto_ops, crypto_op, 1);
+ vec_add2 (ptd->integ_ops, integ_op, 1);
+ crypto_op->src = crypto_op->dst = b->data + fe->crypto_start_offset;
+ crypto_op->len = fe->crypto_total_length;
+ integ_op->src = integ_op->dst = b->data + fe->integ_start_offset;
+ integ_op->len = fe->crypto_total_length + fe->integ_length_adj;
+ }
+
+ crypto_op->op = crypto_op_id;
+ crypto_op->iv = fe->iv;
+ crypto_op->key_index = key->index_crypto;
+ crypto_op->user_data = 0;
+ integ_op->op = integ_op_id;
+ integ_op->digest = fe->digest;
+ integ_op->digest_len = digest_len;
+ integ_op->key_index = key->index_integ;
+ if (is_enc)
+ crypto_op->flags |= VNET_CRYPTO_OP_FLAG_INIT_IV;
+ else
+ integ_op->flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
+ crypto_op->user_data = integ_op->user_data = index;
+}
+
+static_always_inline void
+process_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
+ vnet_crypto_op_t * ops, u8 * state)
+{
+ u32 n_fail, n_ops = vec_len (ops);
+ vnet_crypto_op_t *op = ops;
+
+ if (n_ops == 0)
+ return;
+
+ n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
+
+ while (n_fail)
+ {
+ ASSERT (op - ops < n_ops);
+
+ if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ {
+ f->elts[op->user_data].status = op->status;
+ *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
+ n_fail--;
+ }
+ op++;
+ }
+}
+
+static_always_inline void
+process_chained_ops (vlib_main_t * vm, vnet_crypto_async_frame_t * f,
+ vnet_crypto_op_t * ops, vnet_crypto_op_chunk_t * chunks,
+ u8 * state)
+{
+ u32 n_fail, n_ops = vec_len (ops);
+ vnet_crypto_op_t *op = ops;
+
+ if (n_ops == 0)
+ return;
+
+ n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops);
+
+ while (n_fail)
+ {
+ ASSERT (op - ops < n_ops);
+
+ if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ {
+ f->elts[op->user_data].status = op->status;
+ *state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
+ n_fail--;
+ }
+ op++;
+ }
+}
+
+static_always_inline vnet_crypto_async_frame_t *
+crypto_sw_scheduler_dequeue_aead (vlib_main_t * vm,
+ vnet_crypto_async_op_id_t async_op_id,
+ vnet_crypto_op_id_t sync_op_id, u8 tag_len,
+ u8 aad_len, u32 * nb_elts_processed,
+ u32 * enqueue_thread_idx)
+{
+ crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
+ crypto_sw_scheduler_per_thread_data_t *ptd = 0;
+ crypto_sw_scheduler_queue_t *q = 0;
+ vnet_crypto_async_frame_t *f = 0;
+ vnet_crypto_async_frame_elt_t *fe;
+ u32 *bi;
+ u32 n_elts;
+ int i = 0;
+ u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
+
+ if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
+ {
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, cm->per_thread_data)
+ {
+ ptd = cm->per_thread_data + i;
+ q = ptd->queues[async_op_id];
+ f = crypto_sw_scheduler_get_pending_frame (q);
+ if (f)
+ break;
+ }
+ /* *INDENT-ON* */
+ }
+
+ ptd = cm->per_thread_data + vm->thread_index;
+
+ if (f)
+ {
+ *nb_elts_processed = n_elts = f->n_elts;
+ fe = f->elts;
+ bi = f->buffer_indices;
+
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chunks);
+
+ while (n_elts--)
+ {
+ if (n_elts > 1)
+ CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ crypto_sw_scheduler_convert_aead (vm, ptd, fe, fe - f->elts, bi[0],
+ sync_op_id, aad_len, tag_len);
+ bi++;
+ fe++;
+ }
+
+ process_ops (vm, f, ptd->crypto_ops, &state);
+ process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
+ &state);
+ f->state = state;
+ *enqueue_thread_idx = f->enqueue_thread_index;
+ }
+
+ return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
+}
+
+static_always_inline vnet_crypto_async_frame_t *
+crypto_sw_scheduler_dequeue_link (vlib_main_t * vm,
+ vnet_crypto_async_op_id_t async_op_id,
+ vnet_crypto_op_id_t sync_crypto_op_id,
+ vnet_crypto_op_id_t sync_integ_op_id,
+ u16 digest_len, u8 is_enc,
+ u32 * nb_elts_processed,
+ u32 * enqueue_thread_idx)
+{
+ crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
+ crypto_sw_scheduler_per_thread_data_t *ptd = 0;
+ crypto_sw_scheduler_queue_t *q = 0;
+ vnet_crypto_async_frame_t *f = 0;
+ vnet_crypto_async_frame_elt_t *fe;
+ u32 *bi;
+ u32 n_elts;
+ int i = 0;
+ u8 state = VNET_CRYPTO_FRAME_STATE_SUCCESS;
+
+ if (cm->per_thread_data[vm->thread_index].self_crypto_enabled)
+ {
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, cm->per_thread_data)
+ {
+ ptd = cm->per_thread_data + i;
+ q = ptd->queues[async_op_id];
+ f = crypto_sw_scheduler_get_pending_frame (q);
+ if (f)
+ break;
+ }
+ /* *INDENT-ON* */
+ }
+
+ ptd = cm->per_thread_data + vm->thread_index;
+
+ if (f)
+ {
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
+ vec_reset_length (ptd->chunks);
+
+ *nb_elts_processed = n_elts = f->n_elts;
+ fe = f->elts;
+ bi = f->buffer_indices;
+
+ while (n_elts--)
+ {
+ if (n_elts > 1)
+ CLIB_PREFETCH (fe + 1, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ crypto_sw_scheduler_convert_link_crypto (vm, ptd,
+ cm->keys + fe->key_index,
+ fe, fe - f->elts, bi[0],
+ sync_crypto_op_id,
+ sync_integ_op_id,
+ digest_len, is_enc);
+ bi++;
+ fe++;
+ }
+
+ if (is_enc)
+ {
+ process_ops (vm, f, ptd->crypto_ops, &state);
+ process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
+ &state);
+ process_ops (vm, f, ptd->integ_ops, &state);
+ process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
+ &state);
+ }
+ else
+ {
+ process_ops (vm, f, ptd->integ_ops, &state);
+ process_chained_ops (vm, f, ptd->chained_integ_ops, ptd->chunks,
+ &state);
+ process_ops (vm, f, ptd->crypto_ops, &state);
+ process_chained_ops (vm, f, ptd->chained_crypto_ops, ptd->chunks,
+ &state);
+ }
+
+ f->state = state;
+ *enqueue_thread_idx = f->enqueue_thread_index;
+ }
+
+ return crypto_sw_scheduler_get_completed_frame (ptd->queues[async_op_id]);
+}
+
+static clib_error_t *
+sw_scheduler_set_worker_crypto (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 worker_index;
+ u8 crypto_enable;
+ int rv;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "worker %u", &worker_index))
+ {
+ if (unformat (line_input, "crypto"))
+ {
+ if (unformat (line_input, "on"))
+ crypto_enable = 1;
+ else if (unformat (line_input, "off"))
+ crypto_enable = 0;
+ else
+ return (clib_error_return (0, "unknown input '%U'",
+ format_unformat_error,
+ line_input));
+ }
+ else
+ return (clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, line_input));
+ }
+ else
+ return (clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, line_input));
+ }
+
+ rv = crypto_sw_scheduler_set_worker_crypto (worker_index, crypto_enable);
+ if (rv == VNET_API_ERROR_INVALID_VALUE)
+ {
+ return (clib_error_return (0, "invalid worker idx: %d", worker_index));
+ }
+ else if (rv == VNET_API_ERROR_INVALID_VALUE_2)
+ {
+ return (clib_error_return (0, "cannot disable all crypto workers"));
+ }
+ return 0;
+}
+
+/*?
+ * This command sets if worker will do crypto processing.
+ *
+ * @cliexpar
+ * Example of how to set worker crypto processing off:
+ * @cliexstart{set sw_scheduler worker 0 crypto off}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_set_sw_scheduler_worker_crypto, static) = {
+ .path = "set sw_scheduler",
+ .short_help = "set sw_scheduler worker <idx> crypto <on|off>",
+ .function = sw_scheduler_set_worker_crypto,
+ .is_mp_safe = 1,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+sw_scheduler_show_workers (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
+ u32 i;
+
+ vlib_cli_output (vm, "%-7s%-20s%-8s", "ID", "Name", "Crypto");
+ for (i = vlib_num_workers () >= 0; i < vlib_thread_main.n_vlib_mains; i++)
+ {
+ vlib_cli_output (vm, "%-7d%-20s%-8s", vlib_get_worker_index (i),
+ (vlib_worker_threads + i)->name,
+ cm->
+ per_thread_data[i].self_crypto_enabled ? "on" : "off");
+ }
+
+ return 0;
+}
+
+/*?
+ * This command displays sw_scheduler workers.
+ *
+ * @cliexpar
+ * Example of how to show workers:
+ * @cliexstart{show sw_scheduler workers}
+ * @cliexend
+ ?*/
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (cmd_show_sw_scheduler_workers, static) = {
+ .path = "show sw_scheduler workers",
+ .short_help = "show sw_scheduler workers",
+ .function = sw_scheduler_show_workers,
+ .is_mp_safe = 1,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+sw_scheduler_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (sw_scheduler_cli_init);
+
+/* *INDENT-OFF* */
+#define _(n, s, k, t, a) \
+ static vnet_crypto_async_frame_t \
+ *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc ( \
+ vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
+ { \
+ return crypto_sw_scheduler_dequeue_aead ( \
+ vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
+ VNET_CRYPTO_OP_##n##_ENC, t, a, nb_elts_processed, thread_idx); \
+ } \
+ static vnet_crypto_async_frame_t \
+ *crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec ( \
+ vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
+ { \
+ return crypto_sw_scheduler_dequeue_aead ( \
+ vm, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
+ VNET_CRYPTO_OP_##n##_DEC, t, a, nb_elts_processed, thread_idx); \
+ }
+foreach_crypto_aead_async_alg
+#undef _
+
+#define _(c, h, s, k, d) \
+ static vnet_crypto_async_frame_t \
+ *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc ( \
+ vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
+ { \
+ return crypto_sw_scheduler_dequeue_link ( \
+ vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
+ VNET_CRYPTO_OP_##c##_ENC, VNET_CRYPTO_OP_##h##_HMAC, d, 1, \
+ nb_elts_processed, thread_idx); \
+ } \
+ static vnet_crypto_async_frame_t \
+ *crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec ( \
+ vlib_main_t *vm, u32 *nb_elts_processed, u32 * thread_idx) \
+ { \
+ return crypto_sw_scheduler_dequeue_link ( \
+ vm, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
+ VNET_CRYPTO_OP_##c##_DEC, VNET_CRYPTO_OP_##h##_HMAC, d, 0, \
+ nb_elts_processed, thread_idx); \
+ }
+ foreach_crypto_link_async_alg
+#undef _
+ /* *INDENT-ON* */
+
+crypto_sw_scheduler_main_t crypto_sw_scheduler_main;
+clib_error_t *
+crypto_sw_scheduler_init (vlib_main_t * vm)
+{
+ crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ clib_error_t *error = 0;
+ crypto_sw_scheduler_per_thread_data_t *ptd;
+
+ u32 queue_size = CRYPTO_SW_SCHEDULER_QUEUE_SIZE * sizeof (void *)
+ + sizeof (crypto_sw_scheduler_queue_t);
+
+ vec_validate_aligned (cm->per_thread_data, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ vec_foreach (ptd, cm->per_thread_data)
+ {
+ ptd->self_crypto_enabled = 1;
+ u32 i;
+ for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
+ {
+ crypto_sw_scheduler_queue_t *q
+ = clib_mem_alloc_aligned (queue_size, CLIB_CACHE_LINE_BYTES);
+ ASSERT (q != 0);
+ ptd->queues[i] = q;
+ clib_memset_u8 (q, 0, queue_size);
+ }
+ }
+
+ cm->crypto_engine_index =
+ vnet_crypto_register_engine (vm, "sw_scheduler", 100,
+ "SW Scheduler Async Engine");
+
+ vnet_crypto_register_key_handler (vm, cm->crypto_engine_index,
+ crypto_sw_scheduler_key_handler);
+
+ /* *INDENT-OFF* */
+#define _(n, s, k, t, a) \
+ vnet_crypto_register_async_handler ( \
+ vm, cm->crypto_engine_index, \
+ VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
+ crypto_sw_scheduler_frame_enqueue, \
+ crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_enc); \
+ vnet_crypto_register_async_handler ( \
+ vm, cm->crypto_engine_index, \
+ VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
+ crypto_sw_scheduler_frame_enqueue, \
+ crypto_sw_scheduler_frame_dequeue_##n##_TAG_##t##_AAD_##a##_dec);
+ foreach_crypto_aead_async_alg
+#undef _
+
+#define _(c, h, s, k, d) \
+ vnet_crypto_register_async_handler ( \
+ vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
+ crypto_sw_scheduler_frame_enqueue, \
+ crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_enc); \
+ vnet_crypto_register_async_handler ( \
+ vm, cm->crypto_engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
+ crypto_sw_scheduler_frame_enqueue, \
+ crypto_sw_scheduler_frame_dequeue_##c##_##h##_TAG##d##_dec);
+ foreach_crypto_link_async_alg
+#undef _
+ /* *INDENT-ON* */
+
+ if (error)
+ vec_free (cm->per_thread_data);
+
+ return error;
+}
+
+/* *INDENT-OFF* */
+VLIB_INIT_FUNCTION (crypto_sw_scheduler_init) = {
+ .runs_after = VLIB_INITS ("vnet_crypto_init"),
+};
+
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "SW Scheduler Crypto Async Engine plugin",
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/dpdk/cryptodev/cryptodev.c b/src/plugins/dpdk/cryptodev/cryptodev.c
index 2ae09ce226e..6fc09c3813e 100644
--- a/src/plugins/dpdk/cryptodev/cryptodev.c
+++ b/src/plugins/dpdk/cryptodev/cryptodev.c
@@ -706,7 +706,8 @@ cryptodev_get_ring_head (struct rte_ring * ring)
}
static_always_inline vnet_crypto_async_frame_t *
-cryptodev_frame_dequeue (vlib_main_t * vm)
+cryptodev_frame_dequeue (vlib_main_t * vm, u32 * nb_elts_processed,
+ u32 * enqueue_thread_idx)
{
cryptodev_main_t *cmt = &cryptodev_main;
cryptodev_numa_data_t *numa = cmt->per_numa_data + vm->numa_node;
@@ -768,7 +769,8 @@ cryptodev_frame_dequeue (vlib_main_t * vm)
VNET_CRYPTO_FRAME_STATE_SUCCESS : VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
rte_mempool_put_bulk (numa->cop_pool, (void **) cet->cops, frame->n_elts);
-
+ *nb_elts_processed = frame->n_elts;
+ *enqueue_thread_idx = frame->enqueue_thread_index;
return frame;
}
diff --git a/src/vnet/crypto/cli.c b/src/vnet/crypto/cli.c
index f6778930ef7..cef779ab25e 100644
--- a/src/vnet/crypto/cli.c
+++ b/src/vnet/crypto/cli.c
@@ -311,6 +311,48 @@ VLIB_CLI_COMMAND (show_crypto_async_handlers_command, static) =
static clib_error_t *
+show_crypto_async_status_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_crypto_main_t *cm = &crypto_main;
+ u32 skip_master = vlib_num_workers () > 0;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ unformat_input_t _line_input, *line_input = &_line_input;
+ int i;
+
+ if (unformat_user (input, unformat_line_input, line_input))
+ unformat_free (line_input);
+
+ vlib_cli_output (vm, "Crypto async dispatch mode: %s",
+ cm->dispatch_mode ==
+ VNET_CRYPTO_ASYNC_DISPATCH_POLLING ? "POLLING" :
+ "INTERRUPT");
+
+ for (i = skip_master; i < tm->n_vlib_mains; i++)
+ {
+ vlib_node_state_t state =
+ vlib_node_get_state (vlib_mains[i], cm->crypto_node_index);
+ if (state == VLIB_NODE_STATE_POLLING)
+ vlib_cli_output (vm, "threadId: %-6d POLLING", i);
+ if (state == VLIB_NODE_STATE_INTERRUPT)
+ vlib_cli_output (vm, "threadId: %-6d INTERRUPT", i);
+ if (state == VLIB_NODE_STATE_DISABLED)
+ vlib_cli_output (vm, "threadId: %-6d DISABLED", i);
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (show_crypto_async_status_command, static) =
+{
+ .path = "show crypto async status",
+ .short_help = "show crypto async status",
+ .function = show_crypto_async_status_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
set_crypto_async_handler_command_fn (vlib_main_t * vm,
unformat_input_t * input,
vlib_cli_command_t * cmd)
@@ -393,6 +435,37 @@ VLIB_CLI_COMMAND (set_crypto_async_handler_command, static) =
};
/* *INDENT-ON* */
+static clib_error_t *
+set_crypto_async_dispatch_polling_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_crypto_set_async_dispatch_mode (VNET_CRYPTO_ASYNC_DISPATCH_POLLING);
+ return 0;
+}
+
+static clib_error_t *
+set_crypto_async_dispatch_interrupt_command_fn (vlib_main_t * vm,
+ unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ vnet_crypto_set_async_dispatch_mode (VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT);
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (set_crypto_async_dispatch_polling_command, static) =
+{
+ .path = "set crypto async dispatch polling",
+ .short_help = "set crypto async dispatch polling|interrupt",
+ .function = set_crypto_async_dispatch_polling_command_fn,
+};
+VLIB_CLI_COMMAND (set_crypto_async_dispatch_interrupt_command, static) =
+{
+ .path = "set crypto async dispatch interrupt",
+ .short_help = "set crypto async dispatch polling|interrupt",
+ .function = set_crypto_async_dispatch_interrupt_command_fn,
+};
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/vnet/crypto/crypto.c b/src/vnet/crypto/crypto.c
index 288e227821b..b877d9a5f03 100644
--- a/src/vnet/crypto/crypto.c
+++ b/src/vnet/crypto/crypto.c
@@ -446,18 +446,20 @@ vnet_crypto_key_add_linked (vlib_main_t * vm,
clib_error_t *
crypto_dispatch_enable_disable (int is_enable)
{
- vlib_main_t *vm = vlib_get_main ();
- vlib_thread_main_t *tm = vlib_get_thread_main ();
- vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "crypto-dispatch");
vnet_crypto_main_t *cm = &crypto_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
u32 skip_master = vlib_num_workers () > 0, i;
- u32 state_change = 0;
- vlib_node_state_t state;
+ vlib_node_state_t state = VLIB_NODE_STATE_DISABLED;
+ u8 state_change = 0;
+ CLIB_MEMORY_STORE_BARRIER ();
if (is_enable && cm->async_refcnt > 0)
{
state_change = 1;
- state = VLIB_NODE_STATE_POLLING;
+ state =
+ cm->dispatch_mode ==
+ VNET_CRYPTO_ASYNC_DISPATCH_POLLING ? VLIB_NODE_STATE_POLLING :
+ VLIB_NODE_STATE_INTERRUPT;
}
if (!is_enable && cm->async_refcnt == 0)
@@ -468,8 +470,11 @@ crypto_dispatch_enable_disable (int is_enable)
if (state_change)
for (i = skip_master; i < tm->n_vlib_mains; i++)
- vlib_node_set_state (vlib_mains[i], node->index, state);
-
+ {
+ if (state !=
+ vlib_node_get_state (vlib_mains[i], cm->crypto_node_index))
+ vlib_node_set_state (vlib_mains[i], cm->crypto_node_index, state);
+ }
return 0;
}
@@ -553,20 +558,20 @@ vnet_crypto_register_post_node (vlib_main_t * vm, char *post_node_name)
void
vnet_crypto_request_async_mode (int is_enable)
{
- vlib_main_t *vm = vlib_get_main ();
- vlib_thread_main_t *tm = vlib_get_thread_main ();
- vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "crypto-dispatch");
vnet_crypto_main_t *cm = &crypto_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
u32 skip_master = vlib_num_workers () > 0, i;
- u32 state_change = 0;
- vlib_node_state_t state;
+ vlib_node_state_t state = VLIB_NODE_STATE_DISABLED;
+ u8 state_change = 0;
+ CLIB_MEMORY_STORE_BARRIER ();
if (is_enable && cm->async_refcnt == 0)
{
state_change = 1;
- state = VLIB_NODE_STATE_POLLING;
+ state =
+ cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_POLLING ?
+ VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_INTERRUPT;
}
-
if (!is_enable && cm->async_refcnt == 1)
{
state_change = 1;
@@ -575,7 +580,11 @@ vnet_crypto_request_async_mode (int is_enable)
if (state_change)
for (i = skip_master; i < tm->n_vlib_mains; i++)
- vlib_node_set_state (vlib_mains[i], node->index, state);
+ {
+ if (state !=
+ vlib_node_get_state (vlib_mains[i], cm->crypto_node_index))
+ vlib_node_set_state (vlib_mains[i], cm->crypto_node_index, state);
+ }
if (is_enable)
cm->async_refcnt += 1;
@@ -583,6 +592,40 @@ vnet_crypto_request_async_mode (int is_enable)
cm->async_refcnt -= 1;
}
+void
+vnet_crypto_set_async_dispatch_mode (u8 mode)
+{
+ vnet_crypto_main_t *cm = &crypto_main;
+ u32 skip_master = vlib_num_workers () > 0, i;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vlib_node_state_t state = VLIB_NODE_STATE_DISABLED;
+
+ CLIB_MEMORY_STORE_BARRIER ();
+ cm->dispatch_mode = mode;
+ if (mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT)
+ {
+ state =
+ cm->async_refcnt == 0 ?
+ VLIB_NODE_STATE_DISABLED : VLIB_NODE_STATE_INTERRUPT;
+ }
+ else if (mode == VNET_CRYPTO_ASYNC_DISPATCH_POLLING)
+ {
+ state =
+ cm->async_refcnt == 0 ?
+ VLIB_NODE_STATE_DISABLED : VLIB_NODE_STATE_POLLING;
+ }
+
+ for (i = skip_master; i < tm->n_vlib_mains; i++)
+ {
+ if (state != vlib_node_get_state (vlib_mains[i], cm->crypto_node_index))
+ vlib_node_set_state (vlib_mains[i], cm->crypto_node_index, state);
+ }
+ clib_warning ("Switching dispatch mode might not work is some situations.");
+ clib_warning
+ ("Use 'show crypto async status' to verify that the nodes' states were set");
+ clib_warning ("and if not, set 'crypto async dispatch' mode again.");
+}
+
int
vnet_crypto_is_set_async_handler (vnet_crypto_async_op_id_t op)
{
@@ -663,6 +706,8 @@ vnet_crypto_init (vlib_main_t * vm)
vnet_crypto_main_t *cm = &crypto_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
vnet_crypto_thread_t *ct = 0;
+
+ cm->dispatch_mode = VNET_CRYPTO_ASYNC_DISPATCH_POLLING;
cm->engine_index_by_name = hash_create_string ( /* size */ 0,
sizeof (uword));
cm->alg_index_by_name = hash_create_string (0, sizeof (uword));
@@ -705,7 +750,10 @@ vnet_crypto_init (vlib_main_t * vm)
s);
foreach_crypto_link_async_alg
#undef _
- return 0;
+ cm->crypto_node_index =
+ vlib_get_node_by_name (vm, (u8 *) "crypto-dispatch")->index;
+
+ return 0;
}
VLIB_INIT_FUNCTION (vnet_crypto_init);
diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h
index 777923a8be7..a4a82d6118c 100644
--- a/src/vnet/crypto/crypto.h
+++ b/src/vnet/crypto/crypto.h
@@ -18,7 +18,7 @@
#include <vlib/vlib.h>
-#define VNET_CRYPTO_FRAME_SIZE 32
+#define VNET_CRYPTO_FRAME_SIZE 64
/* CRYPTO_ID, PRETTY_NAME, KEY_LENGTH_IN_BYTES */
#define foreach_crypto_cipher_alg \
@@ -322,15 +322,17 @@ typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
#define VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED 0
-#define VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS 1
-#define VNET_CRYPTO_FRAME_STATE_SUCCESS 2
-#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR 3
+#define VNET_CRYPTO_FRAME_STATE_PENDING 1 /* frame waiting to be processed */
+#define VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS 2
+#define VNET_CRYPTO_FRAME_STATE_SUCCESS 3
+#define VNET_CRYPTO_FRAME_STATE_ELT_ERROR 4
u8 state;
vnet_crypto_async_op_id_t op:8;
u16 n_elts;
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE];
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE];
u16 next_node_index[VNET_CRYPTO_FRAME_SIZE];
+ u32 enqueue_thread_index;
} vnet_crypto_async_frame_t;
typedef struct
@@ -357,13 +359,16 @@ typedef void (vnet_crypto_key_handler_t) (vlib_main_t * vm,
vnet_crypto_key_index_t idx);
/** async crypto function handlers **/
-typedef int (vnet_crypto_frame_enqueue_t) (vlib_main_t * vm,
- vnet_crypto_async_frame_t * frame);
+typedef int
+ (vnet_crypto_frame_enqueue_t) (vlib_main_t * vm,
+ vnet_crypto_async_frame_t * frame);
typedef vnet_crypto_async_frame_t *
- (vnet_crypto_frame_dequeue_t) (vlib_main_t * vm);
+ (vnet_crypto_frame_dequeue_t) (vlib_main_t * vm, u32 * nb_elts_processed,
+ u32 * enqueue_thread_idx);
-u32 vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio,
- char *desc);
+u32
+vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio,
+ char *desc);
void vnet_crypto_register_ops_handler (vlib_main_t * vm, u32 engine_index,
vnet_crypto_op_id_t opt,
@@ -431,6 +436,10 @@ typedef struct
vnet_crypto_async_alg_data_t *async_algs;
u32 async_refcnt;
vnet_crypto_async_next_node_t *next_nodes;
+ u32 crypto_node_index;
+#define VNET_CRYPTO_ASYNC_DISPATCH_POLLING 0
+#define VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT 1
+ u8 dispatch_mode;
} vnet_crypto_main_t;
extern vnet_crypto_main_t crypto_main;
@@ -466,6 +475,8 @@ int vnet_crypto_is_set_async_handler (vnet_crypto_async_op_id_t opt);
void vnet_crypto_request_async_mode (int is_enable);
+void vnet_crypto_set_async_dispatch_mode (u8 mode);
+
vnet_crypto_async_alg_t vnet_crypto_link_algs (vnet_crypto_alg_t crypto_alg,
vnet_crypto_alg_t integ_alg);
@@ -551,14 +562,18 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm,
vnet_crypto_async_frame_t * frame)
{
vnet_crypto_main_t *cm = &crypto_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
vnet_crypto_thread_t *ct = cm->threads + vm->thread_index;
vnet_crypto_async_op_id_t opt = frame->op;
+ u32 i = vlib_num_workers () > 0;
+
int ret = (cm->enqueue_handlers[frame->op]) (vm, frame);
+ frame->enqueue_thread_index = vm->thread_index;
clib_bitmap_set_no_check (cm->async_active_ids, opt, 1);
if (PREDICT_TRUE (ret == 0))
{
vnet_crypto_async_frame_t *nf = 0;
- frame->state = VNET_CRYPTO_FRAME_STATE_WORK_IN_PROGRESS;
+ frame->state = VNET_CRYPTO_FRAME_STATE_PENDING;
pool_get_aligned (ct->frame_pool, nf, CLIB_CACHE_LINE_BYTES);
if (CLIB_DEBUG > 0)
clib_memset (nf, 0xfe, sizeof (*nf));
@@ -567,6 +582,15 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm,
nf->n_elts = 0;
ct->frames[opt] = nf;
}
+
+ if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT)
+ {
+ for (; i < tm->n_vlib_mains; i++)
+ {
+ vlib_node_set_interrupt_pending (vlib_mains[i],
+ cm->crypto_node_index);
+ }
+ }
return ret;
}
diff --git a/src/vnet/crypto/node.c b/src/vnet/crypto/node.c
index 51ee63d1d62..12e6033ad80 100644
--- a/src/vnet/crypto/node.c
+++ b/src/vnet/crypto/node.c
@@ -74,60 +74,78 @@ vnet_crypto_async_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
static_always_inline u32
crypto_dequeue_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
vnet_crypto_thread_t * ct,
- vnet_crypto_frame_dequeue_t * hdl,
- u32 n_cache, u32 * n_total)
+ vnet_crypto_frame_dequeue_t * hdl, u32 n_cache,
+ u32 * n_total)
{
- vnet_crypto_async_frame_t *cf = (hdl) (vm);
+ vnet_crypto_main_t *cm = &crypto_main;
+ u32 n_elts = 0;
+ u32 enqueue_thread_idx = ~0;
+ vnet_crypto_async_frame_t *cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
+ *n_total += n_elts;
- while (cf)
+ while (cf || n_elts)
{
- vec_validate (ct->buffer_indice, n_cache + cf->n_elts);
- vec_validate (ct->nexts, n_cache + cf->n_elts);
- clib_memcpy_fast (ct->buffer_indice + n_cache, cf->buffer_indices,
- sizeof (u32) * cf->n_elts);
- if (cf->state == VNET_CRYPTO_FRAME_STATE_SUCCESS)
- {
- clib_memcpy_fast (ct->nexts + n_cache, cf->next_node_index,
- sizeof (u16) * cf->n_elts);
- }
- else
+ if (cf)
{
- u32 i;
- for (i = 0; i < cf->n_elts; i++)
+ vec_validate (ct->buffer_indice, n_cache + cf->n_elts);
+ vec_validate (ct->nexts, n_cache + cf->n_elts);
+ clib_memcpy_fast (ct->buffer_indice + n_cache, cf->buffer_indices,
+ sizeof (u32) * cf->n_elts);
+ if (cf->state == VNET_CRYPTO_FRAME_STATE_SUCCESS)
+ {
+ clib_memcpy_fast (ct->nexts + n_cache, cf->next_node_index,
+ sizeof (u16) * cf->n_elts);
+ }
+ else
{
- if (cf->elts[i].status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ u32 i;
+ for (i = 0; i < cf->n_elts; i++)
{
- ct->nexts[i + n_cache] = CRYPTO_DISPATCH_NEXT_ERR_DROP;
- vlib_node_increment_counter (vm, node->node_index,
- cf->elts[i].status, 1);
+ if (cf->elts[i].status != VNET_CRYPTO_OP_STATUS_COMPLETED)
+ {
+ ct->nexts[i + n_cache] = CRYPTO_DISPATCH_NEXT_ERR_DROP;
+ vlib_node_increment_counter (vm, node->node_index,
+ cf->elts[i].status, 1);
+ }
+ else
+ ct->nexts[i + n_cache] = cf->next_node_index[i];
}
- else
- ct->nexts[i + n_cache] = cf->next_node_index[i];
}
- }
- n_cache += cf->n_elts;
- *n_total += cf->n_elts;
- if (n_cache >= VLIB_FRAME_SIZE)
- {
- vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indice, ct->nexts,
- n_cache);
- n_cache = 0;
- }
-
- if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
- {
- u32 i;
+ n_cache += cf->n_elts;
+ if (n_cache >= VLIB_FRAME_SIZE)
+ {
+ vlib_buffer_enqueue_to_next (vm, node, ct->buffer_indice,
+ ct->nexts, n_cache);
+ n_cache = 0;
+ }
- for (i = 0; i < cf->n_elts; i++)
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
{
- vlib_buffer_t *b = vlib_get_buffer (vm, cf->buffer_indices[i]);
- if (b->flags & VLIB_BUFFER_IS_TRACED)
- vnet_crypto_async_add_trace (vm, node, b, cf->op,
- cf->elts[i].status);
+ u32 i;
+
+ for (i = 0; i < cf->n_elts; i++)
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm,
+ cf->buffer_indices[i]);
+ if (b->flags & VLIB_BUFFER_IS_TRACED)
+ vnet_crypto_async_add_trace (vm, node, b, cf->op,
+ cf->elts[i].status);
+ }
}
+ vnet_crypto_async_free_frame (vm, cf);
+ }
+ /* signal enqueue-thread to dequeue the processed frame (n_elts>0) */
+ if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT
+ && n_elts > 0)
+ {
+ vlib_node_set_interrupt_pending (vlib_mains[enqueue_thread_idx],
+ cm->crypto_node_index);
}
- vnet_crypto_async_free_frame (vm, cf);
- cf = (hdl) (vm);
+
+ n_elts = 0;
+ enqueue_thread_idx = 0;
+ cf = (hdl) (vm, &n_elts, &enqueue_thread_idx);
+ *n_total += n_elts;
}
return n_cache;