aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/dev_octeon
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/dev_octeon')
-rw-r--r--src/plugins/dev_octeon/CMakeLists.txt1
-rw-r--r--src/plugins/dev_octeon/crypto.c1782
-rw-r--r--src/plugins/dev_octeon/crypto.h184
-rw-r--r--src/plugins/dev_octeon/flow.c114
-rw-r--r--src/plugins/dev_octeon/init.c116
-rw-r--r--src/plugins/dev_octeon/octeon.h5
-rw-r--r--src/plugins/dev_octeon/port.c164
-rw-r--r--src/plugins/dev_octeon/roc_helper.c7
-rw-r--r--src/plugins/dev_octeon/rx_node.c24
-rw-r--r--src/plugins/dev_octeon/tx_node.c12
10 files changed, 2328 insertions, 81 deletions
diff --git a/src/plugins/dev_octeon/CMakeLists.txt b/src/plugins/dev_octeon/CMakeLists.txt
index c6271ecdfba..6109de57a7d 100644
--- a/src/plugins/dev_octeon/CMakeLists.txt
+++ b/src/plugins/dev_octeon/CMakeLists.txt
@@ -36,6 +36,7 @@ add_vpp_plugin(dev_octeon
tx_node.c
flow.c
counter.c
+ crypto.c
MULTIARCH_SOURCES
rx_node.c
diff --git a/src/plugins/dev_octeon/crypto.c b/src/plugins/dev_octeon/crypto.c
new file mode 100644
index 00000000000..7d3790f3ec9
--- /dev/null
+++ b/src/plugins/dev_octeon/crypto.c
@@ -0,0 +1,1782 @@
+/*
+ * Copyright (c) 2024 Marvell.
+ * SPDX-License-Identifier: Apache-2.0
+ * https://spdx.org/licenses/Apache-2.0.html
+ */
+
+#include <vnet/dev/dev.h>
+#include <vnet/devices/devices.h>
+#include <dev_octeon/octeon.h>
+#include <dev_octeon/crypto.h>
+#include <base/roc_api.h>
+#include <common.h>
+
+oct_crypto_main_t oct_crypto_main;
+oct_crypto_dev_t oct_crypto_dev;
+
+VLIB_REGISTER_LOG_CLASS (oct_log, static) = {
+ .class_name = "octeon",
+ .subclass_name = "crypto",
+};
+
+static_always_inline void
+oct_map_keyindex_to_session (oct_crypto_sess_t *sess, u32 key_index, u8 type)
+{
+ oct_crypto_main_t *ocm = &oct_crypto_main;
+ oct_crypto_key_t *ckey;
+
+ ckey = vec_elt_at_index (ocm->keys[type], key_index);
+
+ ckey->sess = sess;
+ sess->key_index = key_index;
+}
+
+static_always_inline oct_crypto_sess_t *
+oct_crypto_session_alloc (vlib_main_t *vm, u8 type)
+{
+ extern oct_plt_init_param_t oct_plt_init_param;
+ oct_crypto_sess_t *addr = NULL;
+ oct_crypto_main_t *ocm;
+ oct_crypto_dev_t *ocd;
+ u32 size;
+
+ ocm = &oct_crypto_main;
+ ocd = ocm->crypto_dev[type];
+
+ size = sizeof (oct_crypto_sess_t);
+
+ addr = oct_plt_init_param.oct_plt_zmalloc (size, CLIB_CACHE_LINE_BYTES);
+ if (addr == NULL)
+ {
+ log_err (ocd->dev, "Failed to allocate crypto session memory");
+ return NULL;
+ }
+
+ return addr;
+}
+
+static_always_inline i32
+oct_crypto_session_create (vlib_main_t *vm, vnet_crypto_key_index_t key_index,
+ int op_type)
+{
+ oct_crypto_main_t *ocm = &oct_crypto_main;
+ oct_crypto_sess_t *session;
+ vnet_crypto_key_t *key;
+ oct_crypto_key_t *ckey;
+
+ key = vnet_crypto_get_key (key_index);
+
+ if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ {
+ /*
+ * Read crypto or integ key session. And map link key index to same.
+ */
+ if (key->index_crypto != UINT32_MAX)
+ {
+ ckey = vec_elt_at_index (ocm->keys[op_type], key->index_crypto);
+ session = ckey->sess;
+ }
+ else if (key->index_integ != UINT32_MAX)
+ {
+ ckey = vec_elt_at_index (ocm->keys[op_type], key->index_integ);
+ session = ckey->sess;
+ }
+ else
+ return -1;
+ }
+ else
+ {
+ session = oct_crypto_session_alloc (vm, op_type);
+ if (session == NULL)
+ return -1;
+ }
+
+ oct_map_keyindex_to_session (session, key_index, op_type);
+ return 0;
+}
+
+void
+oct_crypto_key_del_handler (vlib_main_t *vm, vnet_crypto_key_index_t key_index)
+{
+ extern oct_plt_init_param_t oct_plt_init_param;
+ oct_crypto_main_t *ocm = &oct_crypto_main;
+ oct_crypto_key_t *ckey_linked;
+ oct_crypto_key_t *ckey;
+
+ vec_validate (ocm->keys[VNET_CRYPTO_OP_TYPE_ENCRYPT], key_index);
+
+ ckey = vec_elt_at_index (ocm->keys[VNET_CRYPTO_OP_TYPE_ENCRYPT], key_index);
+ if (ckey->sess)
+ {
+ /*
+ * If in case link algo is pointing to same sesison, reset the pointer.
+ */
+ if (ckey->sess->key_index != key_index)
+ {
+ ckey_linked = vec_elt_at_index (
+ ocm->keys[VNET_CRYPTO_OP_TYPE_ENCRYPT], ckey->sess->key_index);
+ ckey_linked->sess = NULL;
+ }
+ oct_plt_init_param.oct_plt_free (ckey->sess);
+ ckey->sess = NULL;
+ }
+
+ ckey = vec_elt_at_index (ocm->keys[VNET_CRYPTO_OP_TYPE_DECRYPT], key_index);
+ if (ckey->sess)
+ {
+ /*
+ * If in case link algo is pointing to same sesison, reset the pointer.
+ */
+ if (ckey->sess->key_index != key_index)
+ {
+ ckey_linked = vec_elt_at_index (
+ ocm->keys[VNET_CRYPTO_OP_TYPE_DECRYPT], ckey->sess->key_index);
+ ckey_linked->sess = NULL;
+ }
+
+ oct_plt_init_param.oct_plt_free (ckey->sess);
+ ckey->sess = NULL;
+ }
+}
+
+void
+oct_crypto_key_add_handler (vlib_main_t *vm, vnet_crypto_key_index_t key_index)
+{
+ oct_crypto_main_t *ocm = &oct_crypto_main;
+ oct_crypto_key_t *ckey;
+ oct_crypto_dev_t *ocd = &oct_crypto_dev;
+
+ vec_validate (ocm->keys[VNET_CRYPTO_OP_TYPE_ENCRYPT], key_index);
+ ckey = vec_elt_at_index (ocm->keys[VNET_CRYPTO_OP_TYPE_ENCRYPT], key_index);
+ if (ckey->sess == NULL)
+ {
+ if (oct_crypto_session_create (vm, key_index,
+ VNET_CRYPTO_OP_TYPE_ENCRYPT))
+ {
+ log_err (ocd->dev, "Unable to create crypto session");
+ return;
+ }
+ }
+
+ vec_validate (ocm->keys[VNET_CRYPTO_OP_TYPE_DECRYPT], key_index);
+ ckey = vec_elt_at_index (ocm->keys[VNET_CRYPTO_OP_TYPE_DECRYPT], key_index);
+ if (ckey->sess == NULL)
+ {
+ if (oct_crypto_session_create (vm, key_index,
+ VNET_CRYPTO_OP_TYPE_DECRYPT))
+ {
+ log_err (ocd->dev, "Unable to create crypto session");
+ return;
+ }
+ }
+}
+
+void
+oct_crypto_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx)
+{
+ oct_crypto_main_t *ocm = &oct_crypto_main;
+
+ if (kop == VNET_CRYPTO_KEY_OP_DEL)
+ {
+ oct_crypto_key_del_handler (vm, idx);
+ return;
+ }
+ oct_crypto_key_add_handler (vm, idx);
+
+ ocm->started = 1;
+}
+
+static_always_inline void
+oct_crypto_session_free (vlib_main_t *vm, oct_crypto_sess_t *sess)
+{
+ extern oct_plt_init_param_t oct_plt_init_param;
+
+ oct_plt_init_param.oct_plt_free (sess);
+ return;
+}
+
+#ifdef PLATFORM_OCTEON9
+static inline void
+oct_cpt_inst_submit (struct cpt_inst_s *inst, uint64_t lmtline,
+ uint64_t io_addr)
+{
+ uint64_t lmt_status;
+
+ do
+ {
+ /* Copy CPT command to LMTLINE */
+ roc_lmt_mov64 ((void *) lmtline, inst);
+
+ /*
+ * Make sure compiler does not reorder memcpy and ldeor.
+ * LMTST transactions are always flushed from the write
+ * buffer immediately, a DMB is not required to push out
+ * LMTSTs.
+ */
+ asm volatile ("dmb oshst" : : : "memory");
+ lmt_status = roc_lmt_submit_ldeor (io_addr);
+ }
+ while (lmt_status == 0);
+}
+#endif
+
+static_always_inline void
+oct_crypto_burst_submit (oct_crypto_dev_t *crypto_dev, struct cpt_inst_s *inst,
+ u32 n_left)
+{
+ u64 lmt_base;
+ u64 io_addr;
+ u32 count;
+
+#ifdef PLATFORM_OCTEON9
+ lmt_base = crypto_dev->lf.lmt_base;
+ io_addr = crypto_dev->lf.io_addr;
+
+ for (count = 0; count < n_left; count++)
+ oct_cpt_inst_submit (inst + count, lmt_base, io_addr);
+#else
+ u64 *lmt_line[OCT_MAX_LMT_SZ];
+ u64 lmt_arg, core_lmt_id;
+
+ lmt_base = crypto_dev->lmtline.lmt_base;
+ io_addr = crypto_dev->lmtline.io_addr;
+
+ ROC_LMT_CPT_BASE_ID_GET (lmt_base, core_lmt_id);
+
+ for (count = 0; count < 16; count++)
+ {
+ lmt_line[count] = OCT_CPT_LMT_GET_LINE_ADDR (lmt_base, count);
+ }
+
+ while (n_left > OCT_MAX_LMT_SZ)
+ {
+
+ /*
+ * Add a memory barrier so that LMTLINEs from the previous iteration
+ * can be reused for a subsequent transfer.
+ */
+ asm volatile ("dmb oshst" ::: "memory");
+
+ lmt_arg = ROC_CN10K_CPT_LMT_ARG | (u64) core_lmt_id;
+
+ for (count = 0; count < 16; count++)
+ {
+ roc_lmt_mov_seg ((void *) lmt_line[count], inst + count,
+ CPT_LMT_SIZE_COPY);
+ }
+
+ /* Set number of LMTSTs, excluding the first */
+ lmt_arg |= (OCT_MAX_LMT_SZ - 1) << 12;
+
+ roc_lmt_submit_steorl (lmt_arg, io_addr);
+
+ inst += OCT_MAX_LMT_SZ;
+ n_left -= OCT_MAX_LMT_SZ;
+ }
+
+ if (n_left > 0)
+ {
+ /*
+ * Add a memory barrier so that LMTLINEs from the previous iteration
+ * can be reused for a subsequent transfer.
+ */
+ asm volatile ("dmb oshst" ::: "memory");
+
+ lmt_arg = ROC_CN10K_CPT_LMT_ARG | (u64) core_lmt_id;
+
+ for (count = 0; count < n_left; count++)
+ {
+ roc_lmt_mov_seg ((void *) lmt_line[count], inst + count,
+ CPT_LMT_SIZE_COPY);
+ }
+
+ /* Set number of LMTSTs, excluding the first */
+ lmt_arg |= (n_left - 1) << 12;
+
+ roc_lmt_submit_steorl (lmt_arg, io_addr);
+ }
+#endif
+}
+
+static_always_inline uint32_t
+oct_crypto_fill_sg_comp_from_iov (struct roc_sglist_comp *list, uint32_t i,
+ struct roc_se_iov_ptr *from,
+ uint32_t from_offset, uint32_t *psize,
+ struct roc_se_buf_ptr *extra_buf,
+ uint32_t extra_offset)
+{
+ uint32_t extra_len = extra_buf ? extra_buf->size : 0;
+ uint32_t size = *psize;
+ int32_t j;
+
+ for (j = 0; j < from->buf_cnt; j++)
+ {
+ struct roc_sglist_comp *to = &list[i >> 2];
+ uint32_t buf_sz = from->bufs[j].size;
+ void *vaddr = from->bufs[j].vaddr;
+ uint64_t e_vaddr;
+ uint32_t e_len;
+
+ if (PREDICT_FALSE (from_offset))
+ {
+ if (from_offset >= buf_sz)
+ {
+ from_offset -= buf_sz;
+ continue;
+ }
+ e_vaddr = (uint64_t) vaddr + from_offset;
+ e_len = clib_min ((buf_sz - from_offset), size);
+ from_offset = 0;
+ }
+ else
+ {
+ e_vaddr = (uint64_t) vaddr;
+ e_len = clib_min (buf_sz, size);
+ }
+
+ to->u.s.len[i % 4] = clib_host_to_net_u16 (e_len);
+ to->ptr[i % 4] = clib_host_to_net_u64 (e_vaddr);
+
+ if (extra_len && (e_len >= extra_offset))
+ {
+ /* Break the data at given offset */
+ uint32_t next_len = e_len - extra_offset;
+ uint64_t next_vaddr = e_vaddr + extra_offset;
+
+ if (!extra_offset)
+ {
+ i--;
+ }
+ else
+ {
+ e_len = extra_offset;
+ size -= e_len;
+ to->u.s.len[i % 4] = clib_host_to_net_u16 (e_len);
+ }
+
+ extra_len = clib_min (extra_len, size);
+ /* Insert extra data ptr */
+ if (extra_len)
+ {
+ i++;
+ to = &list[i >> 2];
+ to->u.s.len[i % 4] = clib_host_to_net_u16 (extra_len);
+ to->ptr[i % 4] =
+ clib_host_to_net_u64 ((uint64_t) extra_buf->vaddr);
+ size -= extra_len;
+ }
+
+ next_len = clib_min (next_len, size);
+ /* insert the rest of the data */
+ if (next_len)
+ {
+ i++;
+ to = &list[i >> 2];
+ to->u.s.len[i % 4] = clib_host_to_net_u16 (next_len);
+ to->ptr[i % 4] = clib_host_to_net_u64 (next_vaddr);
+ size -= next_len;
+ }
+ extra_len = 0;
+ }
+ else
+ {
+ size -= e_len;
+ }
+ if (extra_offset)
+ extra_offset -= size;
+ i++;
+
+ if (PREDICT_FALSE (!size))
+ break;
+ }
+
+ *psize = size;
+ return (uint32_t) i;
+}
+
+static_always_inline u32
+oct_crypto_fill_sg2_comp_from_iov (struct roc_sg2list_comp *list, u32 i,
+ struct roc_se_iov_ptr *from,
+ u32 from_offset, u32 *psize,
+ struct roc_se_buf_ptr *extra_buf,
+ u32 extra_offset)
+{
+ u32 extra_len = extra_buf ? extra_buf->size : 0;
+ u32 size = *psize, buf_sz, e_len, next_len;
+ struct roc_sg2list_comp *to;
+ u64 e_vaddr, next_vaddr;
+ void *vaddr;
+ i32 j;
+
+ for (j = 0; j < from->buf_cnt; j++)
+ {
+ to = &list[i / 3];
+ buf_sz = from->bufs[j].size;
+ vaddr = from->bufs[j].vaddr;
+
+ if (PREDICT_FALSE (from_offset))
+ {
+ if (from_offset >= buf_sz)
+ {
+ from_offset -= buf_sz;
+ continue;
+ }
+ e_vaddr = (u64) vaddr + from_offset;
+ e_len = clib_min ((buf_sz - from_offset), size);
+ from_offset = 0;
+ }
+ else
+ {
+ e_vaddr = (u64) vaddr;
+ e_len = clib_min (buf_sz, size);
+ }
+
+ to->u.s.len[i % 3] = (e_len);
+ to->ptr[i % 3] = (e_vaddr);
+ to->u.s.valid_segs = (i % 3) + 1;
+
+ if (extra_len && (e_len >= extra_offset))
+ {
+ /* Break the data at given offset */
+ next_len = e_len - extra_offset;
+ next_vaddr = e_vaddr + extra_offset;
+
+ if (!extra_offset)
+ i--;
+ else
+ {
+ e_len = extra_offset;
+ size -= e_len;
+ to->u.s.len[i % 3] = (e_len);
+ }
+
+ extra_len = clib_min (extra_len, size);
+ /* Insert extra data ptr */
+ if (extra_len)
+ {
+ i++;
+ to = &list[i / 3];
+ to->u.s.len[i % 3] = (extra_len);
+ to->ptr[i % 3] = ((u64) extra_buf->vaddr);
+ to->u.s.valid_segs = (i % 3) + 1;
+ size -= extra_len;
+ }
+
+ next_len = clib_min (next_len, size);
+ /* insert the rest of the data */
+ if (next_len)
+ {
+ i++;
+ to = &list[i / 3];
+ to->u.s.len[i % 3] = (next_len);
+ to->ptr[i % 3] = (next_vaddr);
+ to->u.s.valid_segs = (i % 3) + 1;
+ size -= next_len;
+ }
+ extra_len = 0;
+ }
+ else
+ size -= e_len;
+
+ if (extra_offset)
+ extra_offset -= size;
+
+ i++;
+
+ if (PREDICT_FALSE (!size))
+ break;
+ }
+
+ *psize = size;
+ return (u32) i;
+}
+
+static_always_inline uint32_t
+oct_crypto_fill_sg_comp_from_buf (struct roc_sglist_comp *list, uint32_t i,
+ struct roc_se_buf_ptr *from)
+{
+ struct roc_sglist_comp *to = &list[i >> 2];
+
+ to->u.s.len[i % 4] = clib_host_to_net_u16 (from->size);
+ to->ptr[i % 4] = clib_host_to_net_u64 ((uint64_t) from->vaddr);
+ return ++i;
+}
+
+static_always_inline uint32_t
+oct_crypto_fill_sg_comp (struct roc_sglist_comp *list, uint32_t i,
+ uint64_t dma_addr, uint32_t size)
+{
+ struct roc_sglist_comp *to = &list[i >> 2];
+
+ to->u.s.len[i % 4] = clib_host_to_net_u16 (size);
+ to->ptr[i % 4] = clib_host_to_net_u64 (dma_addr);
+ return ++i;
+}
+
+static_always_inline u32
+oct_crypto_fill_sg2_comp (struct roc_sg2list_comp *list, u32 index,
+ u64 dma_addr, u32 size)
+{
+ struct roc_sg2list_comp *to = &list[index / 3];
+
+ to->u.s.len[index % 3] = (size);
+ to->ptr[index % 3] = (dma_addr);
+ to->u.s.valid_segs = (index % 3) + 1;
+ return ++index;
+}
+
+static_always_inline u32
+oct_crypto_fill_sg2_comp_from_buf (struct roc_sg2list_comp *list, u32 index,
+ struct roc_se_buf_ptr *from)
+{
+ struct roc_sg2list_comp *to = &list[index / 3];
+
+ to->u.s.len[index % 3] = (from->size);
+ to->ptr[index % 3] = ((u64) from->vaddr);
+ to->u.s.valid_segs = (index % 3) + 1;
+ return ++index;
+}
+
+static_always_inline int __attribute__ ((unused))
+oct_crypto_sg_inst_prep (struct roc_se_fc_params *params,
+ struct cpt_inst_s *inst, uint64_t offset_ctrl,
+ const uint8_t *iv_s, int iv_len, uint8_t pack_iv,
+ uint8_t pdcp_alg_type, int32_t inputlen,
+ int32_t outputlen, uint32_t passthrough_len,
+ uint32_t req_flags, int pdcp_flag, int decrypt)
+{
+ struct roc_sglist_comp *gather_comp, *scatter_comp;
+ void *m_vaddr = params->meta_buf.vaddr;
+ struct roc_se_buf_ptr *aad_buf = NULL;
+ uint32_t mac_len = 0, aad_len = 0;
+ struct roc_se_ctx *se_ctx;
+ uint32_t i, g_size_bytes;
+ uint64_t *offset_vaddr;
+ uint32_t s_size_bytes;
+ uint8_t *in_buffer;
+ uint32_t size;
+ uint8_t *iv_d;
+ int ret = 0;
+
+ se_ctx = params->ctx;
+ mac_len = se_ctx->mac_len;
+
+ if (PREDICT_FALSE (req_flags & ROC_SE_VALID_AAD_BUF))
+ {
+ /* We don't support both AAD and auth data separately */
+ aad_len = params->aad_buf.size;
+ aad_buf = &params->aad_buf;
+ }
+
+ /* save space for iv */
+ offset_vaddr = m_vaddr;
+
+ m_vaddr =
+ (uint8_t *) m_vaddr + ROC_SE_OFF_CTRL_LEN + PLT_ALIGN_CEIL (iv_len, 8);
+
+ inst->w4.s.opcode_major |= (uint64_t) ROC_DMA_MODE_SG;
+
+ /* iv offset is 0 */
+ *offset_vaddr = offset_ctrl;
+
+ iv_d = ((uint8_t *) offset_vaddr + ROC_SE_OFF_CTRL_LEN);
+
+ if (PREDICT_TRUE (iv_len))
+ memcpy (iv_d, iv_s, iv_len);
+
+ /* DPTR has SG list */
+
+ /* TODO Add error check if space will be sufficient */
+ gather_comp = (struct roc_sglist_comp *) ((uint8_t *) m_vaddr + 8);
+
+ /*
+ * Input Gather List
+ */
+ i = 0;
+
+ /* Offset control word followed by iv */
+
+ i = oct_crypto_fill_sg_comp (gather_comp, i, (uint64_t) offset_vaddr,
+ ROC_SE_OFF_CTRL_LEN + iv_len);
+
+ /* Add input data */
+ if (decrypt && (req_flags & ROC_SE_VALID_MAC_BUF))
+ {
+ size = inputlen - iv_len - mac_len;
+
+ if (PREDICT_TRUE (size))
+ {
+ uint32_t aad_offset = aad_len ? passthrough_len : 0;
+ i = oct_crypto_fill_sg_comp_from_iov (
+ gather_comp, i, params->src_iov, 0, &size, aad_buf, aad_offset);
+ if (PREDICT_FALSE (size))
+ {
+ clib_warning ("Cryptodev: Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return -1;
+ }
+ }
+
+ if (mac_len)
+ i =
+ oct_crypto_fill_sg_comp_from_buf (gather_comp, i, &params->mac_buf);
+ }
+ else
+ {
+ /* input data */
+ size = inputlen - iv_len;
+ if (size)
+ {
+ uint32_t aad_offset = aad_len ? passthrough_len : 0;
+ i = oct_crypto_fill_sg_comp_from_iov (
+ gather_comp, i, params->src_iov, 0, &size, aad_buf, aad_offset);
+ if (PREDICT_FALSE (size))
+ {
+ clib_warning ("Cryptodev: Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ }
+
+ in_buffer = m_vaddr;
+ ((uint16_t *) in_buffer)[0] = 0;
+ ((uint16_t *) in_buffer)[1] = 0;
+ ((uint16_t *) in_buffer)[2] = clib_host_to_net_u16 (i);
+
+ g_size_bytes = ((i + 3) / 4) * sizeof (struct roc_sglist_comp);
+ /*
+ * Output Scatter List
+ */
+
+ i = 0;
+ scatter_comp =
+ (struct roc_sglist_comp *) ((uint8_t *) gather_comp + g_size_bytes);
+
+ i = oct_crypto_fill_sg_comp (
+ scatter_comp, i, (uint64_t) offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
+
+ /* Add output data */
+ if ((!decrypt) && (req_flags & ROC_SE_VALID_MAC_BUF))
+ {
+ size = outputlen - iv_len - mac_len;
+ if (size)
+ {
+
+ uint32_t aad_offset = aad_len ? passthrough_len : 0;
+
+ i = oct_crypto_fill_sg_comp_from_iov (
+ scatter_comp, i, params->dst_iov, 0, &size, aad_buf, aad_offset);
+ if (PREDICT_FALSE (size))
+ {
+ clib_warning ("Cryptodev: Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+
+ /* mac data */
+ if (mac_len)
+ i =
+ oct_crypto_fill_sg_comp_from_buf (scatter_comp, i, &params->mac_buf);
+ }
+ else
+ {
+ /* Output including mac */
+ size = outputlen - iv_len;
+
+ if (size)
+ {
+ uint32_t aad_offset = aad_len ? passthrough_len : 0;
+
+ i = oct_crypto_fill_sg_comp_from_iov (
+ scatter_comp, i, params->dst_iov, 0, &size, aad_buf, aad_offset);
+
+ if (PREDICT_FALSE (size))
+ {
+ clib_warning ("Cryptodev: Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ }
+ ((uint16_t *) in_buffer)[3] = clib_host_to_net_u16 (i);
+ s_size_bytes = ((i + 3) / 4) * sizeof (struct roc_sglist_comp);
+
+ size = g_size_bytes + s_size_bytes + ROC_SG_LIST_HDR_SIZE;
+
+ /* This is DPTR len in case of SG mode */
+ inst->w4.s.dlen = size;
+
+ if (PREDICT_FALSE (size > ROC_SG_MAX_DLEN_SIZE))
+ {
+ clib_warning (
+ "Cryptodev: Exceeds max supported components. Reduce segments");
+ ret = -1;
+ }
+
+ inst->dptr = (uint64_t) in_buffer;
+ return ret;
+}
+
+static_always_inline int __attribute__ ((unused))
+oct_crypto_sg2_inst_prep (struct roc_se_fc_params *params,
+ struct cpt_inst_s *inst, u64 offset_ctrl,
+ const u8 *iv_s, int iv_len, u8 pack_iv,
+ u8 pdcp_alg_type, i32 inputlen, i32 outputlen,
+ u32 passthrough_len, u32 req_flags, int pdcp_flag,
+ int decrypt)
+{
+ u32 mac_len = 0, aad_len = 0, size, index, g_size_bytes;
+ struct roc_sg2list_comp *gather_comp, *scatter_comp;
+ void *m_vaddr = params->meta_buf.vaddr;
+ struct roc_se_buf_ptr *aad_buf = NULL;
+ union cpt_inst_w5 cpt_inst_w5;
+ union cpt_inst_w6 cpt_inst_w6;
+ u16 scatter_sz, gather_sz;
+ struct roc_se_ctx *se_ctx;
+ u64 *offset_vaddr;
+ int ret = 0;
+ u8 *iv_d;
+
+ se_ctx = params->ctx;
+ mac_len = se_ctx->mac_len;
+
+ if (PREDICT_FALSE (req_flags & ROC_SE_VALID_AAD_BUF))
+ {
+ /* We don't support both AAD and auth data separately */
+ aad_len = params->aad_buf.size;
+ aad_buf = &params->aad_buf;
+ }
+
+ /* save space for iv */
+ offset_vaddr = m_vaddr;
+
+ m_vaddr = (u8 *) m_vaddr + ROC_SE_OFF_CTRL_LEN + PLT_ALIGN_CEIL (iv_len, 8);
+
+ inst->w4.s.opcode_major |= (u64) ROC_DMA_MODE_SG;
+
+ /* This is DPTR len in case of SG mode */
+ inst->w4.s.dlen = inputlen + ROC_SE_OFF_CTRL_LEN;
+
+ /* iv offset is 0 */
+ *offset_vaddr = offset_ctrl;
+ iv_d = ((u8 *) offset_vaddr + ROC_SE_OFF_CTRL_LEN);
+
+ if (PREDICT_TRUE (iv_len))
+ clib_memcpy (iv_d, iv_s, iv_len);
+
+ /* DPTR has SG list */
+
+ gather_comp = (struct roc_sg2list_comp *) ((u8 *) m_vaddr);
+
+ /*
+ * Input Gather List
+ */
+ index = 0;
+
+ /* Offset control word followed by iv */
+
+ index = oct_crypto_fill_sg2_comp (gather_comp, index, (u64) offset_vaddr,
+ ROC_SE_OFF_CTRL_LEN + iv_len);
+
+ /* Add input data */
+ if (decrypt && (req_flags & ROC_SE_VALID_MAC_BUF))
+ {
+ size = inputlen - iv_len - mac_len;
+ if (size)
+ {
+ /* input data only */
+ u32 aad_offset = aad_len ? passthrough_len : 0;
+
+ index = oct_crypto_fill_sg2_comp_from_iov (gather_comp, index,
+ params->src_iov, 0, &size,
+ aad_buf, aad_offset);
+
+ if (PREDICT_FALSE (size))
+ {
+ clib_warning ("Cryptodev: Insufficient buffer"
+ " space, size %d needed",
+ size);
+ return -1;
+ }
+ }
+
+ /* mac data */
+ if (mac_len)
+ index = oct_crypto_fill_sg2_comp_from_buf (gather_comp, index,
+ &params->mac_buf);
+ }
+ else
+ {
+ /* input data */
+ size = inputlen - iv_len;
+ if (size)
+ {
+ u32 aad_offset = aad_len ? passthrough_len : 0;
+
+ index = oct_crypto_fill_sg2_comp_from_iov (gather_comp, index,
+ params->src_iov, 0, &size,
+ aad_buf, aad_offset);
+ if (PREDICT_FALSE (size))
+ {
+ clib_warning ("Cryptodev: Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ }
+
+ gather_sz = (index + 2) / 3;
+ g_size_bytes = gather_sz * sizeof (struct roc_sg2list_comp);
+
+ /*
+ * Output Scatter List
+ */
+
+ index = 0;
+ scatter_comp =
+ (struct roc_sg2list_comp *) ((u8 *) gather_comp + g_size_bytes);
+
+ index = oct_crypto_fill_sg2_comp (
+ scatter_comp, index, (u64) offset_vaddr + ROC_SE_OFF_CTRL_LEN, iv_len);
+
+ /* Add output data */
+ if ((!decrypt) && (req_flags & ROC_SE_VALID_MAC_BUF))
+ {
+ size = outputlen - iv_len - mac_len;
+ if (size)
+ {
+
+ u32 aad_offset = aad_len ? passthrough_len : 0;
+
+ index = oct_crypto_fill_sg2_comp_from_iov (scatter_comp, index,
+ params->dst_iov, 0, &size,
+ aad_buf, aad_offset);
+ if (PREDICT_FALSE (size))
+ {
+ clib_warning ("Cryptodev: Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+
+ /* mac data */
+ if (mac_len)
+ index = oct_crypto_fill_sg2_comp_from_buf (scatter_comp, index,
+ &params->mac_buf);
+ }
+ else
+ {
+ /* Output including mac */
+ size = outputlen - iv_len;
+ if (size)
+ {
+ u32 aad_offset = aad_len ? passthrough_len : 0;
+
+ index = oct_crypto_fill_sg2_comp_from_iov (scatter_comp, index,
+ params->dst_iov, 0, &size,
+ aad_buf, aad_offset);
+
+ if (PREDICT_FALSE (size))
+ {
+ clib_warning ("Cryptodev: Insufficient buffer space,"
+ " size %d needed",
+ size);
+ return -1;
+ }
+ }
+ }
+
+ scatter_sz = (index + 2) / 3;
+
+ cpt_inst_w5.s.gather_sz = gather_sz;
+ cpt_inst_w6.s.scatter_sz = scatter_sz;
+
+ cpt_inst_w5.s.dptr = (u64) gather_comp;
+ cpt_inst_w6.s.rptr = (u64) scatter_comp;
+
+ inst->w5.u64 = cpt_inst_w5.u64;
+ inst->w6.u64 = cpt_inst_w6.u64;
+
+ if (PREDICT_FALSE ((scatter_sz >> 4) || (gather_sz >> 4)))
+ {
+ clib_warning (
+ "Cryptodev: Exceeds max supported components. Reduce segments");
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static_always_inline int
+oct_crypto_cpt_hmac_prep (u32 flags, u64 d_offs, u64 d_lens,
+ struct roc_se_fc_params *fc_params,
+ struct cpt_inst_s *inst, u8 is_decrypt)
+{
+ u32 encr_data_len, auth_data_len, aad_len = 0;
+ i32 inputlen, outputlen, enc_dlen, auth_dlen;
+ u32 encr_offset, auth_offset, iv_offset = 0;
+ union cpt_inst_w4 cpt_inst_w4;
+ u32 cipher_type;
+ struct roc_se_ctx *se_ctx;
+ u32 passthrough_len = 0;
+ const u8 *src = NULL;
+ u64 offset_ctrl;
+ u8 iv_len = 16;
+ u8 op_minor;
+ u32 mac_len;
+ int ret;
+
+ encr_offset = ROC_SE_ENCR_OFFSET (d_offs);
+ auth_offset = ROC_SE_AUTH_OFFSET (d_offs);
+ encr_data_len = ROC_SE_ENCR_DLEN (d_lens);
+ auth_data_len = ROC_SE_AUTH_DLEN (d_lens);
+
+ if (PREDICT_FALSE (flags & ROC_SE_VALID_AAD_BUF))
+ {
+ /* We don't support both AAD and auth data separately */
+ auth_data_len = 0;
+ auth_offset = 0;
+ aad_len = fc_params->aad_buf.size;
+ }
+
+ se_ctx = fc_params->ctx;
+ cipher_type = se_ctx->enc_cipher;
+ mac_len = se_ctx->mac_len;
+ cpt_inst_w4.u64 = se_ctx->template_w4.u64;
+ op_minor = cpt_inst_w4.s.opcode_minor;
+
+ if (PREDICT_FALSE (flags & ROC_SE_VALID_AAD_BUF))
+ {
+ /*
+ * When AAD is given, data above encr_offset is pass through
+ * Since AAD is given as separate pointer and not as offset,
+ * this is a special case as we need to fragment input data
+ * into passthrough + encr_data and then insert AAD in between.
+ */
+ passthrough_len = encr_offset;
+ auth_offset = passthrough_len + iv_len;
+ encr_offset = passthrough_len + aad_len + iv_len;
+ auth_data_len = aad_len + encr_data_len;
+ }
+ else
+ {
+ encr_offset += iv_len;
+ auth_offset += iv_len;
+ }
+
+ auth_dlen = auth_offset + auth_data_len;
+ enc_dlen = encr_data_len + encr_offset;
+
+ cpt_inst_w4.s.opcode_major = ROC_SE_MAJOR_OP_FC;
+
+ if (is_decrypt)
+ {
+ cpt_inst_w4.s.opcode_minor |= ROC_SE_FC_MINOR_OP_DECRYPT;
+
+ if (auth_dlen > enc_dlen)
+ {
+ inputlen = auth_dlen + mac_len;
+ outputlen = auth_dlen;
+ }
+ else
+ {
+ inputlen = enc_dlen + mac_len;
+ outputlen = enc_dlen;
+ }
+ }
+ else
+ {
+ cpt_inst_w4.s.opcode_minor |= ROC_SE_FC_MINOR_OP_ENCRYPT;
+
+ /* Round up to 16 bytes alignment */
+ if (PREDICT_FALSE (encr_data_len & 0xf))
+ {
+ if (PREDICT_TRUE (cipher_type == ROC_SE_AES_CBC) ||
+ (cipher_type == ROC_SE_DES3_CBC))
+ enc_dlen = PLT_ALIGN_CEIL (encr_data_len, 8) + encr_offset;
+ }
+
+ /*
+ * auth_dlen is larger than enc_dlen in Authentication cases
+ * like AES GMAC Authentication
+ */
+ if (PREDICT_FALSE (auth_dlen > enc_dlen))
+ {
+ inputlen = auth_dlen;
+ outputlen = auth_dlen + mac_len;
+ }
+ else
+ {
+ inputlen = enc_dlen;
+ outputlen = enc_dlen + mac_len;
+ }
+ }
+
+ if (op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST)
+ outputlen = enc_dlen;
+
+ cpt_inst_w4.s.param1 = encr_data_len;
+ cpt_inst_w4.s.param2 = auth_data_len;
+
+ if (PREDICT_FALSE ((encr_offset >> 16) || (iv_offset >> 8) ||
+ (auth_offset >> 8)))
+ {
+ clib_warning ("Cryptodev: Offset not supported");
+ clib_warning (
+ "Cryptodev: enc_offset: %d, iv_offset : %d, auth_offset: %d",
+ encr_offset, iv_offset, auth_offset);
+ return -1;
+ }
+
+ offset_ctrl = clib_host_to_net_u64 (
+ ((u64) encr_offset << 16) | ((u64) iv_offset << 8) | ((u64) auth_offset));
+
+ src = fc_params->iv_buf;
+
+ inst->w4.u64 = cpt_inst_w4.u64;
+
+#ifdef PLATFORM_OCTEON9
+ ret = oct_crypto_sg_inst_prep (fc_params, inst, offset_ctrl, src, iv_len, 0,
+ 0, inputlen, outputlen, passthrough_len,
+ flags, 0, is_decrypt);
+#else
+ ret = oct_crypto_sg2_inst_prep (fc_params, inst, offset_ctrl, src, iv_len, 0,
+ 0, inputlen, outputlen, passthrough_len,
+ flags, 0, is_decrypt);
+#endif
+
+ if (PREDICT_FALSE (ret))
+ return -1;
+
+ return 0;
+}
+
+static_always_inline int
+oct_crypto_fill_fc_params (oct_crypto_sess_t *sess, struct cpt_inst_s *inst,
+ const bool is_aead, u8 aad_length, u8 *payload,
+ vnet_crypto_async_frame_elt_t *elts, void *mdata,
+ u32 cipher_data_length, u32 cipher_data_offset,
+ u32 auth_data_length, u32 auth_data_offset,
+ vlib_buffer_t *b, u16 adj_len)
+{
+ struct roc_se_fc_params fc_params = { 0 };
+ struct roc_se_ctx *ctx = &sess->cpt_ctx;
+ u64 d_offs = 0, d_lens = 0;
+ vlib_buffer_t *buffer = b;
+ u32 flags = 0, index = 0;
+ u8 op_minor = 0, cpt_op;
+ char src[SRC_IOV_SIZE];
+ u32 *iv_buf;
+
+ cpt_op = sess->cpt_op;
+
+ if (is_aead)
+ {
+ flags |= ROC_SE_VALID_IV_BUF;
+ iv_buf = (u32 *) elts->iv;
+ iv_buf[3] = clib_host_to_net_u32 (0x1);
+ fc_params.iv_buf = elts->iv;
+
+ d_offs = cipher_data_offset;
+ d_offs = d_offs << 16;
+
+ d_lens = cipher_data_length;
+ d_lens = d_lens << 32;
+
+ fc_params.aad_buf.vaddr = elts->aad;
+ fc_params.aad_buf.size = aad_length;
+ flags |= ROC_SE_VALID_AAD_BUF;
+
+ if (sess->cpt_ctx.mac_len)
+ {
+ flags |= ROC_SE_VALID_MAC_BUF;
+ fc_params.mac_buf.size = sess->cpt_ctx.mac_len;
+ fc_params.mac_buf.vaddr = elts->tag;
+ }
+ }
+ else
+ {
+ op_minor = ctx->template_w4.s.opcode_minor;
+
+ flags |= ROC_SE_VALID_IV_BUF;
+
+ fc_params.iv_buf = elts->iv;
+
+ d_offs = cipher_data_offset;
+ d_offs = (d_offs << 16) | auth_data_offset;
+
+ d_lens = cipher_data_length;
+ d_lens = (d_lens << 32) | auth_data_length;
+
+ if (PREDICT_TRUE (sess->cpt_ctx.mac_len))
+ {
+ if (!(op_minor & ROC_SE_FC_MINOR_OP_HMAC_FIRST))
+ {
+ flags |= ROC_SE_VALID_MAC_BUF;
+ fc_params.mac_buf.size = sess->cpt_ctx.mac_len;
+ fc_params.mac_buf.vaddr = elts->digest;
+ }
+ }
+ }
+
+ fc_params.ctx = &sess->cpt_ctx;
+
+ fc_params.src_iov = (void *) src;
+
+ fc_params.src_iov->bufs[index].vaddr = payload;
+ fc_params.src_iov->bufs[index].size = b->current_length - adj_len;
+ index++;
+
+ while (buffer->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ buffer = vlib_get_buffer (vlib_get_main (), buffer->next_buffer);
+ fc_params.src_iov->bufs[index].vaddr =
+ buffer->data + buffer->current_data;
+ fc_params.src_iov->bufs[index].size = buffer->current_length;
+ index++;
+ }
+
+ fc_params.src_iov->buf_cnt = index;
+
+ fc_params.dst_iov = (void *) src;
+
+ fc_params.meta_buf.vaddr = mdata;
+ fc_params.meta_buf.size = OCT_SCATTER_GATHER_BUFFER_SIZE;
+
+ return oct_crypto_cpt_hmac_prep (flags, d_offs, d_lens, &fc_params, inst,
+ cpt_op);
+}
+
+static_always_inline u64
+oct_cpt_inst_w7_get (oct_crypto_sess_t *sess, struct roc_cpt *roc_cpt)
+{
+ union cpt_inst_w7 inst_w7;
+
+ inst_w7.u64 = 0;
+ inst_w7.s.cptr = (u64) &sess->cpt_ctx.se_ctx.fctx;
+ /* Set the engine group */
+ inst_w7.s.egrp = roc_cpt->eng_grp[CPT_ENG_TYPE_IE];
+
+ return inst_w7.u64;
+}
+
+static_always_inline i32
+oct_crypto_link_session_update (vlib_main_t *vm, oct_crypto_sess_t *sess,
+ u32 key_index, u8 type)
+{
+ vnet_crypto_key_t *crypto_key, *auth_key;
+ roc_se_cipher_type enc_type = 0;
+ roc_se_auth_type auth_type = 0;
+ vnet_crypto_key_t *key;
+ u32 digest_len = ~0;
+ i32 rv = 0;
+
+ key = vnet_crypto_get_key (key_index);
+
+ switch (key->async_alg)
+ {
+ case VNET_CRYPTO_ALG_AES_128_CBC_SHA1_TAG12:
+ case VNET_CRYPTO_ALG_AES_192_CBC_SHA1_TAG12:
+ case VNET_CRYPTO_ALG_AES_256_CBC_SHA1_TAG12:
+ enc_type = ROC_SE_AES_CBC;
+ auth_type = ROC_SE_SHA1_TYPE;
+ digest_len = 12;
+ break;
+ case VNET_CRYPTO_ALG_AES_128_CBC_SHA224_TAG14:
+ case VNET_CRYPTO_ALG_AES_192_CBC_SHA224_TAG14:
+ case VNET_CRYPTO_ALG_AES_256_CBC_SHA224_TAG14:
+ enc_type = ROC_SE_AES_CBC;
+ auth_type = ROC_SE_SHA2_SHA224;
+ digest_len = 14;
+ break;
+ case VNET_CRYPTO_ALG_AES_128_CBC_SHA256_TAG16:
+ case VNET_CRYPTO_ALG_AES_192_CBC_SHA256_TAG16:
+ case VNET_CRYPTO_ALG_AES_256_CBC_SHA256_TAG16:
+ enc_type = ROC_SE_AES_CBC;
+ auth_type = ROC_SE_SHA2_SHA256;
+ digest_len = 16;
+ break;
+ case VNET_CRYPTO_ALG_AES_128_CBC_SHA384_TAG24:
+ case VNET_CRYPTO_ALG_AES_192_CBC_SHA384_TAG24:
+ case VNET_CRYPTO_ALG_AES_256_CBC_SHA384_TAG24:
+ enc_type = ROC_SE_AES_CBC;
+ auth_type = ROC_SE_SHA2_SHA384;
+ digest_len = 24;
+ break;
+ case VNET_CRYPTO_ALG_AES_128_CBC_SHA512_TAG32:
+ case VNET_CRYPTO_ALG_AES_192_CBC_SHA512_TAG32:
+ case VNET_CRYPTO_ALG_AES_256_CBC_SHA512_TAG32:
+ enc_type = ROC_SE_AES_CBC;
+ auth_type = ROC_SE_SHA2_SHA512;
+ digest_len = 32;
+ break;
+ case VNET_CRYPTO_ALG_AES_128_CBC_MD5_TAG12:
+ case VNET_CRYPTO_ALG_AES_192_CBC_MD5_TAG12:
+ case VNET_CRYPTO_ALG_AES_256_CBC_MD5_TAG12:
+ enc_type = ROC_SE_AES_CBC;
+ auth_type = ROC_SE_MD5_TYPE;
+ digest_len = 12;
+ break;
+ case VNET_CRYPTO_ALG_AES_128_CTR_SHA1_TAG12:
+ case VNET_CRYPTO_ALG_AES_192_CTR_SHA1_TAG12:
+ case VNET_CRYPTO_ALG_AES_256_CTR_SHA1_TAG12:
+ enc_type = ROC_SE_AES_CTR;
+ auth_type = ROC_SE_SHA1_TYPE;
+ digest_len = 12;
+ break;
+ case VNET_CRYPTO_ALG_3DES_CBC_MD5_TAG12:
+ enc_type = ROC_SE_DES3_CBC;
+ auth_type = ROC_SE_MD5_TYPE;
+ digest_len = 12;
+ break;
+ case VNET_CRYPTO_ALG_3DES_CBC_SHA1_TAG12:
+ enc_type = ROC_SE_DES3_CBC;
+ auth_type = ROC_SE_SHA1_TYPE;
+ digest_len = 12;
+ break;
+ case VNET_CRYPTO_ALG_3DES_CBC_SHA224_TAG14:
+ enc_type = ROC_SE_DES3_CBC;
+ auth_type = ROC_SE_SHA2_SHA224;
+ digest_len = 14;
+ break;
+ case VNET_CRYPTO_ALG_3DES_CBC_SHA256_TAG16:
+ enc_type = ROC_SE_DES3_CBC;
+ auth_type = ROC_SE_SHA2_SHA256;
+ digest_len = 16;
+ break;
+ case VNET_CRYPTO_ALG_3DES_CBC_SHA384_TAG24:
+ enc_type = ROC_SE_DES3_CBC;
+ auth_type = ROC_SE_SHA2_SHA384;
+ digest_len = 24;
+ break;
+ case VNET_CRYPTO_ALG_3DES_CBC_SHA512_TAG32:
+ enc_type = ROC_SE_DES3_CBC;
+ auth_type = ROC_SE_SHA2_SHA512;
+ digest_len = 32;
+ break;
+ default:
+ clib_warning (
+ "Cryptodev: Undefined link algo %u specified. Key index %u",
+ key->async_alg, key_index);
+ return -1;
+ }
+
+ if (type == VNET_CRYPTO_OP_TYPE_ENCRYPT)
+ sess->cpt_ctx.ciph_then_auth = true;
+ else
+ sess->cpt_ctx.auth_then_ciph = true;
+
+ sess->iv_length = 16;
+ sess->cpt_op = type;
+
+ crypto_key = vnet_crypto_get_key (key->index_crypto);
+ rv = roc_se_ciph_key_set (&sess->cpt_ctx, enc_type, crypto_key->data,
+ vec_len (crypto_key->data));
+ if (rv)
+ {
+ clib_warning ("Cryptodev: Error in setting cipher key for enc type %u",
+ enc_type);
+ return -1;
+ }
+
+ auth_key = vnet_crypto_get_key (key->index_integ);
+
+ rv = roc_se_auth_key_set (&sess->cpt_ctx, auth_type, auth_key->data,
+ vec_len (auth_key->data), digest_len);
+ if (rv)
+ {
+ clib_warning ("Cryptodev: Error in setting auth key for auth type %u",
+ auth_type);
+ return -1;
+ }
+
+ return 0;
+}
+
+static_always_inline i32
+oct_crypto_aead_session_update (vlib_main_t *vm, oct_crypto_sess_t *sess,
+ u32 key_index, u8 type)
+{
+ vnet_crypto_key_t *key = vnet_crypto_get_key (key_index);
+ roc_se_cipher_type enc_type = 0;
+ roc_se_auth_type auth_type = 0;
+ u32 digest_len = ~0;
+ i32 rv = 0;
+
+ switch (key->async_alg)
+ {
+ case VNET_CRYPTO_ALG_AES_128_GCM:
+ case VNET_CRYPTO_ALG_AES_192_GCM:
+ case VNET_CRYPTO_ALG_AES_256_GCM:
+ enc_type = ROC_SE_AES_GCM;
+ sess->aes_gcm = 1;
+ sess->iv_offset = 0;
+ sess->iv_length = 16;
+ sess->cpt_ctx.mac_len = 16;
+ sess->cpt_op = type;
+ digest_len = 16;
+ break;
+ case VNET_CRYPTO_ALG_CHACHA20_POLY1305:
+ enc_type = ROC_SE_CHACHA20;
+ auth_type = ROC_SE_POLY1305;
+ break;
+ default:
+ clib_warning (
+ "Cryptodev: Undefined cipher algo %u specified. Key index %u",
+ key->async_alg, key_index);
+ return -1;
+ }
+
+ rv = roc_se_ciph_key_set (&sess->cpt_ctx, enc_type, key->data,
+ vec_len (key->data));
+ if (rv)
+ {
+ clib_warning ("Cryptodev: Error in setting cipher key for enc type %u",
+ enc_type);
+ return -1;
+ }
+
+ rv = roc_se_auth_key_set (&sess->cpt_ctx, auth_type, NULL, 0, digest_len);
+ if (rv)
+ {
+ clib_warning ("Cryptodev: Error in setting auth key for auth type %u",
+ auth_type);
+ return -1;
+ }
+
+ if (enc_type == ROC_SE_CHACHA20)
+ sess->cpt_ctx.template_w4.s.opcode_minor |= BIT (5);
+
+ return 0;
+}
+
+static_always_inline i32
+oct_crypto_session_init (vlib_main_t *vm, oct_crypto_sess_t *session,
+ vnet_crypto_key_index_t key_index, int op_type)
+{
+ oct_crypto_main_t *ocm = &oct_crypto_main;
+ vnet_crypto_key_t *key;
+ oct_crypto_dev_t *ocd;
+ i32 rv = 0;
+
+ ocd = ocm->crypto_dev[op_type];
+
+ key = vnet_crypto_get_key (key_index);
+
+ if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
+ rv = oct_crypto_link_session_update (vm, session, key_index, op_type);
+ else
+ rv = oct_crypto_aead_session_update (vm, session, key_index, op_type);
+
+ if (rv)
+ {
+ oct_crypto_session_free (vm, session);
+ return -1;
+ }
+
+ session->crypto_dev = ocd;
+
+ session->cpt_inst_w7 =
+ oct_cpt_inst_w7_get (session, session->crypto_dev->roc_cpt);
+
+ session->initialised = 1;
+
+ return 0;
+}
+
+static_always_inline void
+oct_crypto_update_frame_error_status (vnet_crypto_async_frame_t *f, u32 index,
+ vnet_crypto_op_status_t s)
+{
+ u32 i;
+
+ for (i = index; i < f->n_elts; i++)
+ f->elts[i].status = s;
+
+ if (index == 0)
+ f->state = VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED;
+}
+
+static_always_inline int
+oct_crypto_enqueue_enc_dec (vlib_main_t *vm, vnet_crypto_async_frame_t *frame,
+ const u8 is_aead, u8 aad_len, const u8 type)
+{
+ u32 i, enq_tail, enc_auth_len, buffer_index, nb_infl_allowed;
+ struct cpt_inst_s inst[VNET_CRYPTO_FRAME_SIZE];
+ u32 crypto_start_offset, integ_start_offset;
+ oct_crypto_main_t *ocm = &oct_crypto_main;
+ vnet_crypto_async_frame_elt_t *elts;
+ oct_crypto_dev_t *crypto_dev = NULL;
+ oct_crypto_inflight_req_t *infl_req;
+ oct_crypto_pending_queue_t *pend_q;
+ u64 dptr_start_ptr, curr_ptr;
+ oct_crypto_sess_t *sess;
+ u32 crypto_total_length;
+ oct_crypto_key_t *key;
+ vlib_buffer_t *buffer;
+ u16 adj_len;
+ int ret;
+
+ /* GCM packets having 8 bytes of aad and 8 bytes of iv */
+ u8 aad_iv = 8 + 8;
+
+ pend_q = &ocm->pend_q[vlib_get_thread_index ()];
+
+ enq_tail = pend_q->enq_tail;
+
+ nb_infl_allowed = pend_q->n_desc - pend_q->n_crypto_inflight;
+ if (PREDICT_FALSE (nb_infl_allowed == 0))
+ {
+ oct_crypto_update_frame_error_status (
+ frame, 0, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ return -1;
+ }
+
+ infl_req = &pend_q->req_queue[enq_tail];
+ infl_req->frame = frame;
+
+ for (i = 0; i < frame->n_elts; i++)
+ {
+ elts = &frame->elts[i];
+ buffer_index = frame->buffer_indices[i];
+ key = vec_elt_at_index (ocm->keys[type], elts->key_index);
+
+ if (PREDICT_FALSE (!key->sess))
+ {
+ oct_crypto_update_frame_error_status (
+ frame, i, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ return -1;
+ }
+
+ sess = key->sess;
+
+ if (PREDICT_FALSE (!sess->initialised))
+ oct_crypto_session_init (vm, sess, elts->key_index, type);
+
+ crypto_dev = sess->crypto_dev;
+
+ clib_memset (inst + i, 0, sizeof (struct cpt_inst_s));
+
+ buffer = vlib_get_buffer (vm, buffer_index);
+
+ if (is_aead)
+ {
+ dptr_start_ptr =
+ (u64) (buffer->data + (elts->crypto_start_offset - aad_iv));
+ curr_ptr = (u64) (buffer->data + buffer->current_data);
+ adj_len = (u16) (dptr_start_ptr - curr_ptr);
+
+ crypto_total_length = elts->crypto_total_length;
+ crypto_start_offset = aad_iv;
+ integ_start_offset = 0;
+
+ ret = oct_crypto_fill_fc_params (
+ sess, inst + i, is_aead, aad_len, (u8 *) dptr_start_ptr, elts,
+ (oct_crypto_scatter_gather_t *) (infl_req->sg_data) + i,
+ crypto_total_length /* cipher_len */,
+ crypto_start_offset /* cipher_offset */, 0 /* auth_len */,
+ integ_start_offset /* auth_off */, buffer, adj_len);
+ if (PREDICT_FALSE (ret < 0))
+ {
+ oct_crypto_update_frame_error_status (
+ frame, i, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ return -1;
+ }
+ }
+ else
+ {
+ dptr_start_ptr = (u64) (buffer->data + elts->integ_start_offset);
+
+ enc_auth_len = elts->crypto_total_length + elts->integ_length_adj;
+
+ curr_ptr = (u64) (buffer->data + buffer->current_data);
+ adj_len = (u16) (dptr_start_ptr - curr_ptr);
+
+ crypto_total_length = elts->crypto_total_length;
+ crypto_start_offset =
+ elts->crypto_start_offset - elts->integ_start_offset;
+ integ_start_offset = 0;
+
+ ret = oct_crypto_fill_fc_params (
+ sess, inst + i, is_aead, aad_len, (u8 *) dptr_start_ptr, elts,
+ (oct_crypto_scatter_gather_t *) (infl_req->sg_data) + i,
+ crypto_total_length /* cipher_len */,
+ crypto_start_offset /* cipher_offset */,
+ enc_auth_len /* auth_len */, integ_start_offset /* auth_off */,
+ buffer, adj_len);
+ if (PREDICT_FALSE (ret < 0))
+ {
+ oct_crypto_update_frame_error_status (
+ frame, i, VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR);
+ return -1;
+ }
+ }
+
+ inst[i].w7.u64 = sess->cpt_inst_w7;
+ inst[i].res_addr = (u64) &infl_req->res[i];
+ }
+
+ oct_crypto_burst_submit (crypto_dev, inst, frame->n_elts);
+
+ infl_req->elts = frame->n_elts;
+ OCT_MOD_INC (pend_q->enq_tail, pend_q->n_desc);
+ pend_q->n_crypto_inflight++;
+
+ return 0;
+}
+
+int
+oct_crypto_enqueue_linked_alg_enc (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame)
+{
+ return oct_crypto_enqueue_enc_dec (
+ vm, frame, 0 /* is_aead */, 0 /* aad_len */, VNET_CRYPTO_OP_TYPE_ENCRYPT);
+}
+
+int
+oct_crypto_enqueue_linked_alg_dec (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame)
+{
+ return oct_crypto_enqueue_enc_dec (
+ vm, frame, 0 /* is_aead */, 0 /* aad_len */, VNET_CRYPTO_OP_TYPE_DECRYPT);
+}
+
+int
+oct_crypto_enqueue_aead_aad_enc (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame, u8 aad_len)
+{
+ return oct_crypto_enqueue_enc_dec (vm, frame, 1 /* is_aead */, aad_len,
+ VNET_CRYPTO_OP_TYPE_ENCRYPT);
+}
+
+static_always_inline int
+oct_crypto_enqueue_aead_aad_dec (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame, u8 aad_len)
+{
+ return oct_crypto_enqueue_enc_dec (vm, frame, 1 /* is_aead */, aad_len,
+ VNET_CRYPTO_OP_TYPE_DECRYPT);
+}
+
+int
+oct_crypto_enqueue_aead_aad_8_enc (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame)
+{
+ return oct_crypto_enqueue_aead_aad_enc (vm, frame, 8);
+}
+
+int
+oct_crypto_enqueue_aead_aad_12_enc (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame)
+{
+ return oct_crypto_enqueue_aead_aad_enc (vm, frame, 12);
+}
+
+int
+oct_crypto_enqueue_aead_aad_0_enc (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame)
+{
+ return oct_crypto_enqueue_aead_aad_enc (vm, frame, 0);
+}
+
+int
+oct_crypto_enqueue_aead_aad_8_dec (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame)
+{
+ return oct_crypto_enqueue_aead_aad_dec (vm, frame, 8);
+}
+
+int
+oct_crypto_enqueue_aead_aad_12_dec (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame)
+{
+ return oct_crypto_enqueue_aead_aad_dec (vm, frame, 12);
+}
+
+int
+oct_crypto_enqueue_aead_aad_0_dec (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame)
+{
+ return oct_crypto_enqueue_aead_aad_dec (vm, frame, 0);
+}
+
+vnet_crypto_async_frame_t *
+oct_crypto_frame_dequeue (vlib_main_t *vm, u32 *nb_elts_processed,
+ u32 *enqueue_thread_idx)
+{
+ oct_crypto_main_t *ocm = &oct_crypto_main;
+ u32 deq_head, status = VNET_CRYPTO_OP_STATUS_COMPLETED;
+ vnet_crypto_async_frame_elt_t *fe = NULL;
+ oct_crypto_inflight_req_t *infl_req;
+ oct_crypto_pending_queue_t *pend_q;
+ vnet_crypto_async_frame_t *frame;
+ volatile union cpt_res_s *res;
+ int i;
+
+ pend_q = &ocm->pend_q[vlib_get_thread_index ()];
+
+ if (!pend_q->n_crypto_inflight)
+ return NULL;
+
+ deq_head = pend_q->deq_head;
+ infl_req = &pend_q->req_queue[deq_head];
+ frame = infl_req->frame;
+
+ fe = frame->elts;
+
+ for (i = infl_req->deq_elts; i < infl_req->elts; ++i)
+ {
+ res = &infl_req->res[i];
+
+ if (PREDICT_FALSE (res->cn10k.compcode == CPT_COMP_NOT_DONE))
+ return NULL;
+
+ if (PREDICT_FALSE (res->cn10k.uc_compcode))
+ {
+ if (res->cn10k.uc_compcode == ROC_SE_ERR_GC_ICV_MISCOMPARE)
+ status = fe[i].status = VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC;
+ else
+ status = fe[i].status = VNET_CRYPTO_OP_STATUS_FAIL_ENGINE_ERR;
+ }
+
+ infl_req->deq_elts++;
+ }
+
+ clib_memset ((void *) infl_req->res, 0,
+ sizeof (union cpt_res_s) * VNET_CRYPTO_FRAME_SIZE);
+
+ OCT_MOD_INC (pend_q->deq_head, pend_q->n_desc);
+ pend_q->n_crypto_inflight--;
+
+ frame->state = status == VNET_CRYPTO_OP_STATUS_COMPLETED ?
+ VNET_CRYPTO_FRAME_STATE_SUCCESS :
+ VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
+
+ *nb_elts_processed = frame->n_elts;
+ *enqueue_thread_idx = frame->enqueue_thread_index;
+
+ infl_req->deq_elts = 0;
+ infl_req->elts = 0;
+
+ return frame;
+}
+
+int
+oct_init_crypto_engine_handlers (vlib_main_t *vm, vnet_dev_t *dev)
+{
+ u32 engine_index;
+
+ engine_index = vnet_crypto_register_engine (vm, "oct_cryptodev", 100,
+ "OCT Cryptodev Engine");
+
+#define _(n, k, t, a) \
+ vnet_crypto_register_enqueue_handler ( \
+ vm, engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
+ oct_crypto_enqueue_aead_aad_##a##_enc); \
+ vnet_crypto_register_enqueue_handler ( \
+ vm, engine_index, VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
+ oct_crypto_enqueue_aead_aad_##a##_dec);
+ foreach_oct_crypto_aead_async_alg
+#undef _
+
+#define _(c, h, k, d) \
+ vnet_crypto_register_enqueue_handler ( \
+ vm, engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
+ oct_crypto_enqueue_linked_alg_enc); \
+ vnet_crypto_register_enqueue_handler ( \
+ vm, engine_index, VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
+ oct_crypto_enqueue_linked_alg_dec);
+ foreach_oct_crypto_link_async_alg;
+#undef _
+
+ vnet_crypto_register_dequeue_handler (vm, engine_index,
+ oct_crypto_frame_dequeue);
+
+ vnet_crypto_register_key_handler (vm, engine_index, oct_crypto_key_handler);
+
+ return 0;
+}
+
+int
+oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev)
+{
+ oct_crypto_main_t *ocm = &oct_crypto_main;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ extern oct_plt_init_param_t oct_plt_init_param;
+ oct_crypto_inflight_req_t *infl_req_queue;
+ u32 n_inflight_req;
+ int i, j = 0;
+
+ ocm->pend_q = oct_plt_init_param.oct_plt_zmalloc (
+ tm->n_vlib_mains * sizeof (oct_crypto_pending_queue_t),
+ CLIB_CACHE_LINE_BYTES);
+ if (ocm->pend_q == NULL)
+ {
+ log_err (dev, "Failed to allocate memory for crypto pending queue");
+ return -1;
+ }
+
+ /*
+ * Each pending queue will get number of cpt desc / number of cores.
+ * And that desc count is shared across inflight entries.
+ */
+ n_inflight_req =
+ (OCT_CPT_LF_MAX_NB_DESC / tm->n_vlib_mains) / VNET_CRYPTO_FRAME_SIZE;
+
+ for (i = 0; i < tm->n_vlib_mains; ++i)
+ {
+ ocm->pend_q[i].n_desc = n_inflight_req;
+
+ ocm->pend_q[i].req_queue = oct_plt_init_param.oct_plt_zmalloc (
+ ocm->pend_q[i].n_desc * sizeof (oct_crypto_inflight_req_t),
+ CLIB_CACHE_LINE_BYTES);
+ if (ocm->pend_q[i].req_queue == NULL)
+ {
+ log_err (dev,
+ "Failed to allocate memory for crypto inflight request");
+ goto free;
+ }
+
+ for (j = 0; j <= ocm->pend_q[i].n_desc; ++j)
+ {
+ infl_req_queue = &ocm->pend_q[i].req_queue[j];
+
+ infl_req_queue->sg_data = oct_plt_init_param.oct_plt_zmalloc (
+ OCT_SCATTER_GATHER_BUFFER_SIZE * VNET_CRYPTO_FRAME_SIZE,
+ CLIB_CACHE_LINE_BYTES);
+ if (infl_req_queue->sg_data == NULL)
+ {
+ log_err (dev, "Failed to allocate crypto scatter gather memory");
+ goto free;
+ }
+ }
+ }
+ return 0;
+free:
+ for (; i >= 0; i--)
+ {
+ if (ocm->pend_q[i].req_queue == NULL)
+ continue;
+ for (; j >= 0; j--)
+ {
+ infl_req_queue = &ocm->pend_q[i].req_queue[j];
+
+ if (infl_req_queue->sg_data == NULL)
+ continue;
+
+ oct_plt_init_param.oct_plt_free (infl_req_queue->sg_data);
+ }
+ oct_plt_init_param.oct_plt_free (ocm->pend_q[i].req_queue);
+ }
+ oct_plt_init_param.oct_plt_free (ocm->pend_q);
+
+ return -1;
+}
diff --git a/src/plugins/dev_octeon/crypto.h b/src/plugins/dev_octeon/crypto.h
new file mode 100644
index 00000000000..27e1f600c68
--- /dev/null
+++ b/src/plugins/dev_octeon/crypto.h
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2024 Marvell.
+ * SPDX-License-Identifier: Apache-2.0
+ * https://spdx.org/licenses/Apache-2.0.html
+ */
+
+#ifndef _CRYPTO_H_
+#define _CRYPTO_H_
+#include <vnet/crypto/crypto.h>
+#include <vnet/ip/ip.h>
+
+#define OCT_MAX_N_CPT_DEV 2
+
+#define OCT_CPT_LF_MAX_NB_DESC 128000
+
+/* CRYPTO_ID, KEY_LENGTH_IN_BYTES, TAG_LEN, AAD_LEN */
+#define foreach_oct_crypto_aead_async_alg \
+ _ (AES_128_GCM, 16, 16, 8) \
+ _ (AES_128_GCM, 16, 16, 12) \
+ _ (AES_192_GCM, 24, 16, 8) \
+ _ (AES_192_GCM, 24, 16, 12) \
+ _ (AES_256_GCM, 32, 16, 8) \
+ _ (AES_256_GCM, 32, 16, 12) \
+ _ (CHACHA20_POLY1305, 32, 16, 8) \
+ _ (CHACHA20_POLY1305, 32, 16, 12) \
+ _ (CHACHA20_POLY1305, 32, 16, 0)
+
+/* CRYPTO_ID, INTEG_ID, KEY_LENGTH_IN_BYTES, DIGEST_LEN */
+#define foreach_oct_crypto_link_async_alg \
+ _ (AES_128_CBC, SHA1, 16, 12) \
+ _ (AES_192_CBC, SHA1, 24, 12) \
+ _ (AES_256_CBC, SHA1, 32, 12) \
+ _ (AES_128_CBC, SHA256, 16, 16) \
+ _ (AES_192_CBC, SHA256, 24, 16) \
+ _ (AES_256_CBC, SHA256, 32, 16) \
+ _ (AES_128_CBC, SHA384, 16, 24) \
+ _ (AES_192_CBC, SHA384, 24, 24) \
+ _ (AES_256_CBC, SHA384, 32, 24) \
+ _ (AES_128_CBC, SHA512, 16, 32) \
+ _ (AES_192_CBC, SHA512, 24, 32) \
+ _ (AES_256_CBC, SHA512, 32, 32) \
+ _ (AES_128_CBC, MD5, 16, 12) \
+ _ (AES_192_CBC, MD5, 24, 12) \
+ _ (AES_256_CBC, MD5, 32, 12) \
+ _ (3DES_CBC, MD5, 24, 12) \
+ _ (3DES_CBC, SHA1, 24, 12) \
+ _ (3DES_CBC, SHA256, 24, 16) \
+ _ (3DES_CBC, SHA384, 24, 24) \
+ _ (3DES_CBC, SHA512, 24, 32) \
+ _ (AES_128_CTR, SHA1, 16, 12) \
+ _ (AES_192_CTR, SHA1, 24, 12) \
+ _ (AES_256_CTR, SHA1, 32, 12)
+
+#define OCT_MOD_INC(i, l) ((i) == (l - 1) ? (i) = 0 : (i)++)
+
+#define OCT_SCATTER_GATHER_BUFFER_SIZE 1024
+
+#define CPT_LMT_SIZE_COPY (sizeof (struct cpt_inst_s) / 16)
+#define OCT_MAX_LMT_SZ 16
+
+#define SRC_IOV_SIZE \
+ (sizeof (struct roc_se_iov_ptr) + \
+ (sizeof (struct roc_se_buf_ptr) * ROC_MAX_SG_CNT))
+
+#define OCT_CPT_LMT_GET_LINE_ADDR(lmt_addr, lmt_num) \
+ (void *) ((u64) (lmt_addr) + ((u64) (lmt_num) << ROC_LMT_LINE_SIZE_LOG2))
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ struct roc_cpt *roc_cpt;
+ struct roc_cpt_lmtline lmtline;
+ struct roc_cpt_lf lf;
+ vnet_dev_t *dev;
+} oct_crypto_dev_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ /** CPT opcode */
+ u16 cpt_op : 4;
+ /** Flag for AES GCM */
+ u16 aes_gcm : 1;
+ /** IV length in bytes */
+ u8 iv_length;
+ /** Auth IV length in bytes */
+ u8 auth_iv_length;
+ /** IV offset in bytes */
+ u16 iv_offset;
+ /** Auth IV offset in bytes */
+ u16 auth_iv_offset;
+ /** CPT inst word 7 */
+ u64 cpt_inst_w7;
+ /* initialise as part of first packet */
+ u8 initialised;
+ /* store link key index in case of linked algo */
+ vnet_crypto_key_index_t key_index;
+ oct_crypto_dev_t *crypto_dev;
+ struct roc_se_ctx cpt_ctx;
+} oct_crypto_sess_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ oct_crypto_sess_t *sess;
+ oct_crypto_dev_t *crypto_dev;
+} oct_crypto_key_t;
+
+typedef struct oct_crypto_scatter_gather
+{
+ u8 buf[OCT_SCATTER_GATHER_BUFFER_SIZE];
+} oct_crypto_scatter_gather_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ /** Result data of all entries in the frame */
+ volatile union cpt_res_s res[VNET_CRYPTO_FRAME_SIZE];
+ /** Scatter gather data */
+ void *sg_data;
+ /** Frame pointer */
+ vnet_crypto_async_frame_t *frame;
+ /** Number of async elements in frame */
+ u16 elts;
+ /** Next read entry in frame, when dequeue */
+ u16 deq_elts;
+} oct_crypto_inflight_req_t;
+
+typedef struct
+{
+ /** Array of pending request */
+ oct_crypto_inflight_req_t *req_queue;
+ /** Number of inflight operations in queue */
+ u32 n_crypto_inflight;
+ /** Tail of queue to be used for enqueue */
+ u16 enq_tail;
+ /** Head of queue to be used for dequeue */
+ u16 deq_head;
+ /** Number of descriptors */
+ u16 n_desc;
+} oct_crypto_pending_queue_t;
+
+typedef struct
+{
+ oct_crypto_dev_t *crypto_dev[OCT_MAX_N_CPT_DEV];
+ oct_crypto_key_t *keys[VNET_CRYPTO_ASYNC_OP_N_TYPES];
+ oct_crypto_pending_queue_t *pend_q;
+ int n_cpt;
+ u8 started;
+} oct_crypto_main_t;
+
+extern oct_crypto_main_t oct_crypto_main;
+
+void oct_crypto_key_del_handler (vlib_main_t *vm,
+ vnet_crypto_key_index_t key_index);
+
+void oct_crypto_key_add_handler (vlib_main_t *vm,
+ vnet_crypto_key_index_t key_index);
+
+void oct_crypto_key_handler (vlib_main_t *vm, vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx);
+
+int oct_crypto_enqueue_linked_alg_enc (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame);
+int oct_crypto_enqueue_linked_alg_dec (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame);
+int oct_crypto_enqueue_aead_aad_8_enc (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame);
+int oct_crypto_enqueue_aead_aad_12_enc (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame);
+int oct_crypto_enqueue_aead_aad_0_enc (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame);
+int oct_crypto_enqueue_aead_aad_8_dec (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame);
+int oct_crypto_enqueue_aead_aad_12_dec (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame);
+int oct_crypto_enqueue_aead_aad_0_dec (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame);
+vnet_crypto_async_frame_t *oct_crypto_frame_dequeue (vlib_main_t *vm,
+ u32 *nb_elts_processed,
+ u32 *enqueue_thread_idx);
+int oct_init_crypto_engine_handlers (vlib_main_t *vm, vnet_dev_t *dev);
+int oct_conf_sw_queue (vlib_main_t *vm, vnet_dev_t *dev);
+#endif /* _CRYPTO_H_ */
diff --git a/src/plugins/dev_octeon/flow.c b/src/plugins/dev_octeon/flow.c
index 35aabde76a7..e86425ec85d 100644
--- a/src/plugins/dev_octeon/flow.c
+++ b/src/plugins/dev_octeon/flow.c
@@ -131,6 +131,7 @@ oct_flow_validate_params (vlib_main_t *vm, vnet_dev_port_t *port,
vnet_dev_port_cfg_type_t type, u32 flow_index,
uword *priv_data)
{
+ vnet_dev_port_interfaces_t *ifs = port->interfaces;
vnet_flow_t *flow = vnet_get_flow (flow_index);
u32 last_queue;
u32 qid;
@@ -151,11 +152,11 @@ oct_flow_validate_params (vlib_main_t *vm, vnet_dev_port_t *port,
if (flow->actions & VNET_FLOW_ACTION_REDIRECT_TO_QUEUE)
{
qid = flow->redirect_queue;
- if (qid > port->intf.num_rx_queues - 1 || qid < 0)
+ if (qid > ifs->num_rx_queues - 1 || qid < 0)
{
log_err (port->dev,
"Given Q(%d) is invalid, supported range is %d-%d", qid, 0,
- port->intf.num_rx_queues - 1);
+ ifs->num_rx_queues - 1);
return VNET_DEV_ERR_NOT_SUPPORTED;
}
}
@@ -163,12 +164,12 @@ oct_flow_validate_params (vlib_main_t *vm, vnet_dev_port_t *port,
if (flow->actions & VNET_FLOW_ACTION_RSS)
{
last_queue = flow->queue_index + flow->queue_num;
- if (last_queue > port->intf.num_rx_queues - 1)
+ if (last_queue > ifs->num_rx_queues - 1)
{
log_err (port->dev,
"Given Q range(%d-%d) is invalid, supported range is %d-%d",
flow->queue_index, flow->queue_index + flow->queue_num, 0,
- port->intf.num_rx_queues - 1);
+ ifs->num_rx_queues - 1);
return VNET_DEV_ERR_NOT_SUPPORTED;
}
}
@@ -189,6 +190,14 @@ oct_flow_rule_create (vnet_dev_port_t *port, struct roc_npc_action *actions,
npc = &oct_port->npc;
+ for (int i = 0; item_info[i].type != ROC_NPC_ITEM_TYPE_END; i++)
+ {
+ log_debug (port->dev, "Flow[%d] Item[%d] type %d spec 0x%U mask 0x%U",
+ flow->index, i, item_info[i].type, format_hex_bytes,
+ item_info[i].spec, item_info[i].size, format_hex_bytes,
+ item_info[i].mask, item_info[i].size);
+ }
+
npc_flow =
roc_npc_flow_create (npc, &attr, item_info, actions, npc->pf_func, &rv);
if (rv)
@@ -530,6 +539,15 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
struct roc_npc_item_info item_info[ROC_NPC_ITEM_TYPE_END] = {};
struct roc_npc_action actions[ROC_NPC_ITEM_TYPE_END] = {};
oct_port_t *oct_port = vnet_dev_get_port_data (port);
+ vnet_dev_port_interfaces_t *ifs = port->interfaces;
+ ethernet_header_t eth_spec = {}, eth_mask = {};
+ sctp_header_t sctp_spec = {}, sctp_mask = {};
+ gtpu_header_t gtpu_spec = {}, gtpu_mask = {};
+ ip4_header_t ip4_spec = {}, ip4_mask = {};
+ ip6_header_t ip6_spec = {}, ip6_mask = {};
+ udp_header_t udp_spec = {}, udp_mask = {};
+ tcp_header_t tcp_spec = {}, tcp_mask = {};
+ esp_header_t esp_spec = {}, esp_mask = {};
u16 l4_src_port = 0, l4_dst_port = 0;
u16 l4_src_mask = 0, l4_dst_mask = 0;
struct roc_npc_action_rss rss_conf = {};
@@ -537,6 +555,7 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
struct roc_npc_action_mark mark = {};
struct roc_npc *npc = &oct_port->npc;
u8 *flow_spec = 0, *flow_mask = 0;
+ u8 *drv_spec = 0, *drv_mask = 0;
vnet_dev_rv_t rv = VNET_DEV_OK;
int layer = 0, index = 0;
u16 *queues = NULL;
@@ -546,7 +565,6 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
if (FLOW_IS_GENERIC_TYPE (flow))
{
- u8 drv_item_spec[1024] = { 0 }, drv_item_mask[1024] = { 0 };
unformat_input_t input;
int rc;
@@ -562,11 +580,13 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
unformat_user (&input, unformat_hex_string, &flow_mask);
unformat_free (&input);
+ vec_validate (drv_spec, 1024);
+ vec_validate (drv_mask, 1024);
oct_flow_parse_state pst = {
.nxt_proto = 0,
.port = port,
.items = item_info,
- .oct_drv = { .spec = drv_item_spec, .mask = drv_item_mask },
+ .oct_drv = { .spec = drv_spec, .mask = drv_mask },
.generic = { .spec = flow_spec,
.mask = flow_mask,
.len = vec_len (flow_spec) },
@@ -577,6 +597,8 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
{
vec_free (flow_spec);
vec_free (flow_mask);
+ vec_free (drv_spec);
+ vec_free (drv_mask);
return VNET_DEV_ERR_NOT_SUPPORTED;
}
@@ -585,9 +607,8 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
if (FLOW_IS_ETHERNET_CLASS (flow))
{
- ethernet_header_t eth_spec = { .type = clib_host_to_net_u16 (
- flow->ethernet.eth_hdr.type) },
- eth_mask = { .type = 0xFFFF };
+ eth_spec.type = clib_host_to_net_u16 (flow->ethernet.eth_hdr.type);
+ eth_mask.type = 0xFFFF;
item_info[layer].spec = (void *) &eth_spec;
item_info[layer].mask = (void *) &eth_mask;
@@ -600,10 +621,11 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
{
vnet_flow_ip4_t *ip4_hdr = &flow->ip4;
proto = ip4_hdr->protocol.prot;
- ip4_header_t ip4_spec = { .src_address = ip4_hdr->src_addr.addr,
- .dst_address = ip4_hdr->dst_addr.addr },
- ip4_mask = { .src_address = ip4_hdr->src_addr.mask,
- .dst_address = ip4_hdr->dst_addr.mask };
+
+ ip4_spec.src_address = ip4_hdr->src_addr.addr;
+ ip4_spec.dst_address = ip4_hdr->dst_addr.addr;
+ ip4_mask.src_address = ip4_hdr->src_addr.mask;
+ ip4_mask.dst_address = ip4_hdr->dst_addr.mask;
item_info[layer].spec = (void *) &ip4_spec;
item_info[layer].mask = (void *) &ip4_mask;
@@ -625,10 +647,11 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
{
vnet_flow_ip6_t *ip6_hdr = &flow->ip6;
proto = ip6_hdr->protocol.prot;
- ip6_header_t ip6_spec = { .src_address = ip6_hdr->src_addr.addr,
- .dst_address = ip6_hdr->dst_addr.addr },
- ip6_mask = { .src_address = ip6_hdr->src_addr.mask,
- .dst_address = ip6_hdr->dst_addr.mask };
+
+ ip6_spec.src_address = ip6_hdr->src_addr.addr;
+ ip6_spec.dst_address = ip6_hdr->dst_addr.addr;
+ ip6_mask.src_address = ip6_hdr->src_addr.mask;
+ ip6_mask.dst_address = ip6_hdr->dst_addr.mask;
item_info[layer].spec = (void *) &ip6_spec;
item_info[layer].mask = (void *) &ip6_mask;
@@ -653,16 +676,15 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
switch (proto)
{
case IP_PROTOCOL_UDP:
- item_info[layer].type = ROC_NPC_ITEM_TYPE_UDP;
-
- udp_header_t udp_spec = { .src_port = l4_src_port,
- .dst_port = l4_dst_port },
- udp_mask = { .src_port = l4_src_mask,
- .dst_port = l4_dst_mask };
+ udp_spec.src_port = l4_src_port;
+ udp_spec.dst_port = l4_dst_port;
+ udp_mask.src_port = l4_src_mask;
+ udp_mask.dst_port = l4_dst_mask;
item_info[layer].spec = (void *) &udp_spec;
item_info[layer].mask = (void *) &udp_mask;
item_info[layer].size = sizeof (udp_header_t);
+ item_info[layer].type = ROC_NPC_ITEM_TYPE_UDP;
layer++;
if (FLOW_IS_L4_TUNNEL_TYPE (flow))
@@ -670,14 +692,13 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
switch (flow->type)
{
case VNET_FLOW_TYPE_IP4_GTPU:
- item_info[layer].type = ROC_NPC_ITEM_TYPE_GTPU;
- gtpu_header_t gtpu_spec = { .teid = clib_host_to_net_u32 (
- flow->ip4_gtpu.teid) },
- gtpu_mask = { .teid = 0XFFFFFFFF };
+ gtpu_spec.teid = clib_host_to_net_u32 (flow->ip4_gtpu.teid);
+ gtpu_mask.teid = 0XFFFFFFFF;
item_info[layer].spec = (void *) &gtpu_spec;
item_info[layer].mask = (void *) &gtpu_mask;
item_info[layer].size = sizeof (gtpu_header_t);
+ item_info[layer].type = ROC_NPC_ITEM_TYPE_GTPU;
layer++;
break;
@@ -689,42 +710,39 @@ oct_flow_add (vlib_main_t *vm, vnet_dev_port_t *port, vnet_flow_t *flow,
break;
case IP_PROTOCOL_TCP:
- item_info[layer].type = ROC_NPC_ITEM_TYPE_TCP;
-
- tcp_header_t tcp_spec = { .src_port = l4_src_port,
- .dst_port = l4_dst_port },
- tcp_mask = { .src_port = l4_src_mask,
- .dst_port = l4_dst_mask };
+ tcp_spec.src_port = l4_src_port;
+ tcp_spec.dst_port = l4_dst_port;
+ tcp_mask.src_port = l4_src_mask;
+ tcp_mask.dst_port = l4_dst_mask;
item_info[layer].spec = (void *) &tcp_spec;
item_info[layer].mask = (void *) &tcp_mask;
item_info[layer].size = sizeof (tcp_header_t);
+ item_info[layer].type = ROC_NPC_ITEM_TYPE_TCP;
layer++;
break;
case IP_PROTOCOL_SCTP:
- item_info[layer].type = ROC_NPC_ITEM_TYPE_SCTP;
-
- sctp_header_t sctp_spec = { .src_port = l4_src_port,
- .dst_port = l4_dst_port },
- sctp_mask = { .src_port = l4_src_mask,
- .dst_port = l4_dst_mask };
+ sctp_spec.src_port = l4_src_port;
+ sctp_spec.dst_port = l4_dst_port;
+ sctp_mask.src_port = l4_src_mask;
+ sctp_mask.dst_port = l4_dst_mask;
item_info[layer].spec = (void *) &sctp_spec;
item_info[layer].mask = (void *) &sctp_mask;
item_info[layer].size = sizeof (sctp_header_t);
+ item_info[layer].type = ROC_NPC_ITEM_TYPE_SCTP;
layer++;
break;
case IP_PROTOCOL_IPSEC_ESP:
- item_info[layer].type = ROC_NPC_ITEM_TYPE_ESP;
- esp_header_t esp_spec = { .spi = clib_host_to_net_u32 (
- flow->ip4_ipsec_esp.spi) },
- esp_mask = { .spi = 0xFFFFFFFF };
+ esp_spec.spi = clib_host_to_net_u32 (flow->ip4_ipsec_esp.spi);
+ esp_mask.spi = 0xFFFFFFFF;
item_info[layer].spec = (void *) &esp_spec;
item_info[layer].mask = (void *) &esp_mask;
item_info[layer].size = sizeof (u32);
+ item_info[layer].type = ROC_NPC_ITEM_TYPE_ESP;
layer++;
break;
@@ -759,7 +777,7 @@ parse_flow_actions:
log_err (port->dev, "RSS action has no queues");
return VNET_DEV_ERR_NOT_SUPPORTED;
}
- queues = clib_mem_alloc (sizeof (u16) * port->intf.num_rx_queues);
+ queues = clib_mem_alloc (sizeof (u16) * ifs->num_rx_queues);
for (index = 0; index < flow->queue_num; index++)
queues[index] = flow->queue_index++;
@@ -803,10 +821,10 @@ parse_flow_actions:
if (queues)
clib_mem_free (queues);
- if (flow_spec)
- vec_free (flow_spec);
- if (flow_mask)
- vec_free (flow_mask);
+ vec_free (flow_spec);
+ vec_free (flow_mask);
+ vec_free (drv_spec);
+ vec_free (drv_mask);
return rv;
}
diff --git a/src/plugins/dev_octeon/init.c b/src/plugins/dev_octeon/init.c
index fd65ce6d9e2..99cadddfc24 100644
--- a/src/plugins/dev_octeon/init.c
+++ b/src/plugins/dev_octeon/init.c
@@ -10,6 +10,7 @@
#include <vnet/plugin/plugin.h>
#include <vpp/app/version.h>
#include <dev_octeon/octeon.h>
+#include <dev_octeon/crypto.h>
#include <base/roc_api.h>
#include <common.h>
@@ -54,7 +55,9 @@ static struct
_ (0xa064, RVU_VF, "Marvell Octeon Resource Virtualization Unit VF"),
_ (0xa0f8, LBK_VF, "Marvell Octeon Loopback Unit VF"),
_ (0xa0f7, SDP_VF, "Marvell Octeon System DPI Packet Interface Unit VF"),
- _ (0xa0f3, CPT_VF, "Marvell Octeon Cryptographic Accelerator Unit VF"),
+ _ (0xa0f3, O10K_CPT_VF,
+ "Marvell Octeon-10 Cryptographic Accelerator Unit VF"),
+ _ (0xa0fe, O9K_CPT_VF, "Marvell Octeon-9 Cryptographic Accelerator Unit VF"),
#undef _
};
@@ -110,6 +113,7 @@ oct_init_nix (vlib_main_t *vm, vnet_dev_t *dev)
.reta_sz = ROC_NIX_RSS_RETA_SZ_256,
.max_sqb_count = 512,
.pci_dev = &cd->plt_pci_dev,
+ .hw_vlan_ins = true,
};
if ((rrv = roc_nix_dev_init (cd->nix)))
@@ -131,6 +135,9 @@ oct_init_nix (vlib_main_t *vm, vnet_dev_t *dev)
.rx_offloads = {
.ip4_cksum = 1,
},
+ .tx_offloads = {
+ .ip4_cksum = 1,
+ },
},
.ops = {
.init = oct_port_init,
@@ -187,17 +194,113 @@ oct_init_nix (vlib_main_t *vm, vnet_dev_t *dev)
return vnet_dev_port_add (vm, dev, 0, &port_add_args);
}
+static int
+oct_conf_cpt (vlib_main_t *vm, vnet_dev_t *dev, oct_crypto_dev_t *ocd,
+ int nb_lf)
+{
+ struct roc_cpt *roc_cpt = ocd->roc_cpt;
+ int rrv;
+
+ if ((rrv = roc_cpt_eng_grp_add (roc_cpt, CPT_ENG_TYPE_SE)) < 0)
+ {
+ log_err (dev, "Could not add CPT SE engines");
+ return cnx_return_roc_err (dev, rrv, "roc_cpt_eng_grp_add");
+ }
+ if ((rrv = roc_cpt_eng_grp_add (roc_cpt, CPT_ENG_TYPE_IE)) < 0)
+ {
+ log_err (dev, "Could not add CPT IE engines");
+ return cnx_return_roc_err (dev, rrv, "roc_cpt_eng_grp_add");
+ }
+ if (roc_cpt->eng_grp[CPT_ENG_TYPE_IE] != ROC_CPT_DFLT_ENG_GRP_SE_IE)
+ {
+ log_err (dev, "Invalid CPT IE engine group configuration");
+ return -1;
+ }
+ if (roc_cpt->eng_grp[CPT_ENG_TYPE_SE] != ROC_CPT_DFLT_ENG_GRP_SE)
+ {
+ log_err (dev, "Invalid CPT SE engine group configuration");
+ return -1;
+ }
+ if ((rrv = roc_cpt_dev_configure (roc_cpt, nb_lf, false, 0)) < 0)
+ {
+ log_err (dev, "could not configure crypto device %U",
+ format_vlib_pci_addr, roc_cpt->pci_dev->addr);
+ return cnx_return_roc_err (dev, rrv, "roc_cpt_dev_configure");
+ }
+ return 0;
+}
+
+static vnet_dev_rv_t
+oct_conf_cpt_queue (vlib_main_t *vm, vnet_dev_t *dev, oct_crypto_dev_t *ocd)
+{
+ struct roc_cpt *roc_cpt = ocd->roc_cpt;
+ struct roc_cpt_lmtline *cpt_lmtline;
+ struct roc_cpt_lf *cpt_lf;
+ int rrv;
+
+ cpt_lf = &ocd->lf;
+ cpt_lmtline = &ocd->lmtline;
+
+ cpt_lf->nb_desc = OCT_CPT_LF_MAX_NB_DESC;
+ cpt_lf->lf_id = 0;
+ if ((rrv = roc_cpt_lf_init (roc_cpt, cpt_lf)) < 0)
+ return cnx_return_roc_err (dev, rrv, "roc_cpt_lf_init");
+
+ roc_cpt_iq_enable (cpt_lf);
+
+ if ((rrv = roc_cpt_lmtline_init (roc_cpt, cpt_lmtline, 0) < 0))
+ return cnx_return_roc_err (dev, rrv, "roc_cpt_lmtline_init");
+
+ return 0;
+}
+
static vnet_dev_rv_t
oct_init_cpt (vlib_main_t *vm, vnet_dev_t *dev)
{
+ oct_crypto_main_t *ocm = &oct_crypto_main;
+ extern oct_plt_init_param_t oct_plt_init_param;
oct_device_t *cd = vnet_dev_get_data (dev);
+ oct_crypto_dev_t *ocd = NULL;
int rrv;
- struct roc_cpt cpt = {
- .pci_dev = &cd->plt_pci_dev,
- };
- if ((rrv = roc_cpt_dev_init (&cpt)))
+ if (ocm->n_cpt == OCT_MAX_N_CPT_DEV || ocm->started)
+ return VNET_DEV_ERR_NOT_SUPPORTED;
+
+ ocd = oct_plt_init_param.oct_plt_zmalloc (sizeof (oct_crypto_dev_t),
+ CLIB_CACHE_LINE_BYTES);
+
+ ocd->roc_cpt = oct_plt_init_param.oct_plt_zmalloc (sizeof (struct roc_cpt),
+ CLIB_CACHE_LINE_BYTES);
+ ocd->roc_cpt->pci_dev = &cd->plt_pci_dev;
+
+ ocd->dev = dev;
+
+ if ((rrv = roc_cpt_dev_init (ocd->roc_cpt)))
return cnx_return_roc_err (dev, rrv, "roc_cpt_dev_init");
+
+ if ((rrv = oct_conf_cpt (vm, dev, ocd, 1)))
+ return rrv;
+
+ if ((rrv = oct_conf_cpt_queue (vm, dev, ocd)))
+ return rrv;
+
+ if (!ocm->n_cpt)
+ {
+ /*
+ * Initialize s/w queues, which are common across multiple
+ * crypto devices
+ */
+ oct_conf_sw_queue (vm, dev);
+
+ ocm->crypto_dev[0] = ocd;
+ }
+
+ ocm->crypto_dev[1] = ocd;
+
+ oct_init_crypto_engine_handlers (vm, dev);
+
+ ocm->n_cpt++;
+
return VNET_DEV_OK;
}
@@ -252,7 +355,8 @@ oct_init (vlib_main_t *vm, vnet_dev_t *dev)
case OCT_DEVICE_TYPE_SDP_VF:
return oct_init_nix (vm, dev);
- case OCT_DEVICE_TYPE_CPT_VF:
+ case OCT_DEVICE_TYPE_O10K_CPT_VF:
+ case OCT_DEVICE_TYPE_O9K_CPT_VF:
return oct_init_cpt (vm, dev);
default:
diff --git a/src/plugins/dev_octeon/octeon.h b/src/plugins/dev_octeon/octeon.h
index a87a5e3e1ed..ccf8f62880d 100644
--- a/src/plugins/dev_octeon/octeon.h
+++ b/src/plugins/dev_octeon/octeon.h
@@ -30,7 +30,8 @@ typedef enum
OCT_DEVICE_TYPE_RVU_VF,
OCT_DEVICE_TYPE_LBK_VF,
OCT_DEVICE_TYPE_SDP_VF,
- OCT_DEVICE_TYPE_CPT_VF,
+ OCT_DEVICE_TYPE_O10K_CPT_VF,
+ OCT_DEVICE_TYPE_O9K_CPT_VF,
} __clib_packed oct_device_type_t;
typedef struct
@@ -41,7 +42,6 @@ typedef struct
u8 full_duplex : 1;
u32 speed;
struct plt_pci_device plt_pci_dev;
- struct roc_cpt cpt;
struct roc_nix *nix;
} oct_device_t;
@@ -102,7 +102,6 @@ typedef struct
u64 aura_handle;
u64 io_addr;
void *lmt_addr;
-
oct_npa_batch_alloc_cl128_t *ba_buffer;
u8 ba_first_cl;
u8 ba_num_cl;
diff --git a/src/plugins/dev_octeon/port.c b/src/plugins/dev_octeon/port.c
index 8ba9041f858..f8a7d6ba7db 100644
--- a/src/plugins/dev_octeon/port.c
+++ b/src/plugins/dev_octeon/port.c
@@ -53,28 +53,117 @@ oct_roc_err (vnet_dev_t *dev, int rv, char *fmt, ...)
}
vnet_dev_rv_t
+oct_port_pause_flow_control_init (vlib_main_t *vm, vnet_dev_port_t *port)
+{
+ vnet_dev_t *dev = port->dev;
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ struct roc_nix *nix = cd->nix;
+ struct roc_nix_fc_cfg fc_cfg;
+ struct roc_nix_sq *sq;
+ struct roc_nix_cq *cq;
+ struct roc_nix_rq *rq;
+ int rrv;
+
+ /* pause flow control is not supported on SDP/LBK devices */
+ if (roc_nix_is_sdp (nix) || roc_nix_is_lbk (nix))
+ {
+ log_notice (dev,
+ "pause flow control is not supported on SDP/LBK devices");
+ return VNET_DEV_OK;
+ }
+
+ fc_cfg.type = ROC_NIX_FC_RXCHAN_CFG;
+ fc_cfg.rxchan_cfg.enable = true;
+ rrv = roc_nix_fc_config_set (nix, &fc_cfg);
+ if (rrv)
+ return oct_roc_err (dev, rrv, "roc_nix_fc_config_set failed");
+
+ memset (&fc_cfg, 0, sizeof (struct roc_nix_fc_cfg));
+ fc_cfg.type = ROC_NIX_FC_RQ_CFG;
+ fc_cfg.rq_cfg.enable = true;
+ fc_cfg.rq_cfg.tc = 0;
+
+ foreach_vnet_dev_port_rx_queue (rxq, port)
+ {
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+
+ rq = &crq->rq;
+ cq = &crq->cq;
+
+ fc_cfg.rq_cfg.rq = rq->qid;
+ fc_cfg.rq_cfg.cq_drop = cq->drop_thresh;
+
+ rrv = roc_nix_fc_config_set (nix, &fc_cfg);
+ if (rrv)
+ return oct_roc_err (dev, rrv, "roc_nix_fc_config_set failed");
+ }
+
+ memset (&fc_cfg, 0, sizeof (struct roc_nix_fc_cfg));
+ fc_cfg.type = ROC_NIX_FC_TM_CFG;
+ fc_cfg.tm_cfg.tc = 0;
+ fc_cfg.tm_cfg.enable = true;
+
+ foreach_vnet_dev_port_tx_queue (txq, port)
+ {
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
+
+ sq = &ctq->sq;
+
+ fc_cfg.tm_cfg.sq = sq->qid;
+ rrv = roc_nix_fc_config_set (nix, &fc_cfg);
+ if (rrv)
+ return oct_roc_err (dev, rrv, "roc_nix_fc_config_set failed");
+ }
+
+ /* By default, enable pause flow control */
+ rrv = roc_nix_fc_mode_set (nix, ROC_NIX_FC_FULL);
+ if (rrv)
+ return oct_roc_err (dev, rrv, "roc_nix_fc_mode_set failed");
+
+ return VNET_DEV_OK;
+}
+
+vnet_dev_rv_t
oct_port_init (vlib_main_t *vm, vnet_dev_port_t *port)
{
vnet_dev_t *dev = port->dev;
oct_device_t *cd = vnet_dev_get_data (dev);
oct_port_t *cp = vnet_dev_get_port_data (port);
+ vnet_dev_port_interfaces_t *ifs = port->interfaces;
+ u8 mac_addr[PLT_ETHER_ADDR_LEN];
struct roc_nix *nix = cd->nix;
vnet_dev_rv_t rv;
int rrv;
log_debug (dev, "port init: port %u", port->port_id);
- if ((rrv = roc_nix_lf_alloc (nix, port->intf.num_rx_queues,
- port->intf.num_tx_queues, rxq_cfg)))
+ if ((rrv = roc_nix_lf_alloc (nix, ifs->num_rx_queues, ifs->num_tx_queues,
+ rxq_cfg)))
{
oct_port_deinit (vm, port);
return oct_roc_err (
dev, rrv,
"roc_nix_lf_alloc(nb_rxq = %u, nb_txq = %d, rxq_cfg=0x%lx) failed",
- port->intf.num_rx_queues, port->intf.num_tx_queues, rxq_cfg);
+ ifs->num_rx_queues, ifs->num_tx_queues, rxq_cfg);
}
cp->lf_allocated = 1;
+ if (!roc_nix_is_vf_or_sdp (nix))
+ {
+ if ((rrv = roc_nix_npc_mac_addr_get (nix, mac_addr)))
+ {
+ oct_port_deinit (vm, port);
+ return oct_roc_err (dev, rrv, "roc_nix_npc_mac_addr_get failed");
+ }
+
+ /* Sync MAC address to CGX/RPM table */
+ if ((rrv = roc_nix_mac_addr_set (nix, mac_addr)))
+ {
+ oct_port_deinit (vm, port);
+ return oct_roc_err (dev, rrv, "roc_nix_mac_addr_set failed");
+ }
+ }
+
if ((rrv = roc_nix_tm_init (nix)))
{
oct_port_deinit (vm, port);
@@ -125,6 +214,19 @@ oct_port_init (vlib_main_t *vm, vnet_dev_port_t *port)
oct_port_add_counters (vm, port);
+ if ((rrv = roc_nix_mac_mtu_set (nix, port->max_rx_frame_size)))
+ {
+ rv = oct_roc_err (dev, rrv, "roc_nix_mac_mtu_set() failed");
+ return rv;
+ }
+
+ /* Configure pause frame flow control*/
+ if ((rv = oct_port_pause_flow_control_init (vm, port)))
+ {
+ oct_port_deinit (vm, port);
+ return rv;
+ }
+
return VNET_DEV_OK;
}
@@ -188,7 +290,7 @@ oct_port_poll (vlib_main_t *vm, vnet_dev_port_t *port)
return;
}
- if (roc_nix_is_lbk (nix))
+ if (roc_nix_is_lbk (nix) || roc_nix_is_sdp (nix))
{
link_info.status = 1;
link_info.full_duplex = 1;
@@ -327,6 +429,7 @@ oct_port_start (vlib_main_t *vm, vnet_dev_port_t *port)
{
vnet_dev_t *dev = port->dev;
oct_device_t *cd = vnet_dev_get_data (dev);
+ oct_port_t *cp = vnet_dev_get_port_data (port);
struct roc_nix *nix = cd->nix;
struct roc_nix_eeprom_info eeprom_info = {};
vnet_dev_rv_t rv;
@@ -344,15 +447,15 @@ oct_port_start (vlib_main_t *vm, vnet_dev_port_t *port)
ctq->n_enq = 0;
}
- if ((rrv = roc_nix_mac_mtu_set (nix, 9200)))
+ if ((rrv = roc_nix_npc_rx_ena_dis (nix, true)))
{
- rv = oct_roc_err (dev, rrv, "roc_nix_mac_mtu_set() failed");
+ rv = oct_roc_err (dev, rrv, "roc_nix_npc_rx_ena_dis() failed");
goto done;
}
- if ((rrv = roc_nix_npc_rx_ena_dis (nix, true)))
+ if ((rrv = roc_npc_mcam_enable_all_entries (&cp->npc, true)))
{
- rv = oct_roc_err (dev, rrv, "roc_nix_npc_rx_ena_dis() failed");
+ rv = oct_roc_err (dev, rrv, "roc_npc_mcam_enable_all_entries() failed");
goto done;
}
@@ -374,6 +477,7 @@ oct_port_stop (vlib_main_t *vm, vnet_dev_port_t *port)
{
vnet_dev_t *dev = port->dev;
oct_device_t *cd = vnet_dev_get_data (dev);
+ oct_port_t *cp = vnet_dev_get_port_data (port);
struct roc_nix *nix = cd->nix;
int rrv;
@@ -381,6 +485,14 @@ oct_port_stop (vlib_main_t *vm, vnet_dev_port_t *port)
vnet_dev_poll_port_remove (vm, port, oct_port_poll);
+ /* Disable all the NPC entries */
+ rrv = roc_npc_mcam_enable_all_entries (&cp->npc, false);
+ if (rrv)
+ {
+ oct_roc_err (dev, rrv, "roc_npc_mcam_enable_all_entries() failed");
+ return;
+ }
+
rrv = roc_nix_npc_rx_ena_dis (nix, false);
if (rrv)
{
@@ -393,6 +505,18 @@ oct_port_stop (vlib_main_t *vm, vnet_dev_port_t *port)
foreach_vnet_dev_port_tx_queue (q, port)
oct_txq_stop (vm, q);
+
+ vnet_dev_port_state_change (vm, port,
+ (vnet_dev_port_state_changes_t){
+ .change.link_state = 1,
+ .change.link_speed = 1,
+ .link_speed = 0,
+ .link_state = 0,
+ });
+
+ /* Update the device status */
+ cd->status = 0;
+ cd->speed = 0;
}
vnet_dev_rv_t
@@ -445,7 +569,6 @@ oct_port_add_del_eth_addr (vlib_main_t *vm, vnet_dev_port_t *port,
oct_device_t *cd = vnet_dev_get_data (dev);
struct roc_nix *nix = cd->nix;
vnet_dev_rv_t rv = VNET_DEV_OK;
-
i32 rrv;
if (is_primary)
@@ -469,8 +592,30 @@ oct_port_add_del_eth_addr (vlib_main_t *vm, vnet_dev_port_t *port,
rv = oct_roc_err (dev, rrv, "roc_nix_mac_addr_set() failed");
}
}
+
+ rrv = roc_nix_rss_default_setup (nix, default_rss_flowkey);
+ if (rrv)
+ rv = oct_roc_err (dev, rrv, "roc_nix_rss_default_setup() failed");
}
}
+
+ return rv;
+}
+
+vnet_dev_rv_t
+oct_op_config_max_rx_len (vlib_main_t *vm, vnet_dev_port_t *port,
+ u32 rx_frame_size)
+{
+ vnet_dev_t *dev = port->dev;
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ struct roc_nix *nix = cd->nix;
+ vnet_dev_rv_t rv = VNET_DEV_OK;
+ i32 rrv;
+
+ rrv = roc_nix_mac_max_rx_len_set (nix, rx_frame_size);
+ if (rrv)
+ rv = oct_roc_err (dev, rrv, "roc_nix_mac_max_rx_len_set() failed");
+
return rv;
}
@@ -535,6 +680,7 @@ oct_port_cfg_change (vlib_main_t *vm, vnet_dev_port_t *port,
break;
case VNET_DEV_PORT_CFG_MAX_RX_FRAME_SIZE:
+ rv = oct_op_config_max_rx_len (vm, port, req->max_rx_frame_size);
break;
case VNET_DEV_PORT_CFG_ADD_RX_FLOW:
diff --git a/src/plugins/dev_octeon/roc_helper.c b/src/plugins/dev_octeon/roc_helper.c
index 16e0a871a9d..c1166b654cf 100644
--- a/src/plugins/dev_octeon/roc_helper.c
+++ b/src/plugins/dev_octeon/roc_helper.c
@@ -75,13 +75,12 @@ oct_drv_physmem_alloc (vlib_main_t *vm, u32 size, u32 align)
if (align)
{
- /* Force cache line alloc in case alignment is less than cache line */
- align = align < CLIB_CACHE_LINE_BYTES ? CLIB_CACHE_LINE_BYTES : align;
+ /* Force ROC align alloc in case alignment is less than ROC align */
+ align = align < ROC_ALIGN ? ROC_ALIGN : align;
mem = vlib_physmem_alloc_aligned_on_numa (vm, size, align, 0);
}
else
- mem =
- vlib_physmem_alloc_aligned_on_numa (vm, size, CLIB_CACHE_LINE_BYTES, 0);
+ mem = vlib_physmem_alloc_aligned_on_numa (vm, size, ROC_ALIGN, 0);
if (!mem)
return NULL;
diff --git a/src/plugins/dev_octeon/rx_node.c b/src/plugins/dev_octeon/rx_node.c
index 1f8d5d93fa3..833227eeea8 100644
--- a/src/plugins/dev_octeon/rx_node.c
+++ b/src/plugins/dev_octeon/rx_node.c
@@ -103,8 +103,10 @@ oct_rx_batch (vlib_main_t *vm, oct_rx_node_ctx_t *ctx,
vnet_dev_rx_queue_t *rxq, u32 n)
{
oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
- vlib_buffer_template_t bt = rxq->buffer_template;
- u32 n_left;
+ vlib_buffer_template_t bt = vnet_dev_get_rx_queue_if_buffer_template (rxq);
+ u32 b0_err_flags = 0, b1_err_flags = 0;
+ u32 b2_err_flags = 0, b3_err_flags = 0;
+ u32 n_left, err_flags = 0;
oct_nix_rx_cqe_desc_t *d = ctx->next_desc;
vlib_buffer_t *b[4];
@@ -145,6 +147,13 @@ oct_rx_batch (vlib_main_t *vm, oct_rx_node_ctx_t *ctx,
oct_rx_attach_tail (vm, ctx, b[2], d + 2);
oct_rx_attach_tail (vm, ctx, b[3], d + 3);
}
+
+ b0_err_flags = (d[0].parse.w[0] >> 20) & 0xFFF;
+ b1_err_flags = (d[1].parse.w[0] >> 20) & 0xFFF;
+ b2_err_flags = (d[2].parse.w[0] >> 20) & 0xFFF;
+ b3_err_flags = (d[3].parse.w[0] >> 20) & 0xFFF;
+
+ err_flags |= b0_err_flags | b1_err_flags | b2_err_flags | b3_err_flags;
}
for (; n_left; d += 1, n_left -= 1, ctx->to_next += 1)
@@ -157,11 +166,16 @@ oct_rx_batch (vlib_main_t *vm, oct_rx_node_ctx_t *ctx,
ctx->n_segs += 1;
if (d[0].sg0.segs > 1)
oct_rx_attach_tail (vm, ctx, b[0], d + 0);
+
+ err_flags |= ((d[0].parse.w[0] >> 20) & 0xFFF);
}
plt_write64 ((crq->cq.wdata | n), crq->cq.door);
ctx->n_rx_pkts += n;
ctx->n_left_to_next -= n;
+ if (err_flags)
+ ctx->parse_w0_or = (err_flags << 20);
+
return n;
}
@@ -333,9 +347,9 @@ oct_rx_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
oct_nix_rx_cqe_desc_t *descs = crq->cq.desc_base;
oct_nix_lf_cq_op_status_t status;
oct_rx_node_ctx_t _ctx = {
- .next_index = rxq->next_index,
- .sw_if_index = port->intf.sw_if_index,
- .hw_if_index = port->intf.hw_if_index,
+ .next_index = vnet_dev_get_rx_queue_if_next_index(rxq),
+ .sw_if_index = vnet_dev_get_rx_queue_if_sw_if_index (rxq),
+ .hw_if_index = vnet_dev_get_rx_queue_if_hw_if_index (rxq),
}, *ctx = &_ctx;
/* get head and tail from NIX_LF_CQ_OP_STATUS */
diff --git a/src/plugins/dev_octeon/tx_node.c b/src/plugins/dev_octeon/tx_node.c
index 0907493814d..f42f18d989b 100644
--- a/src/plugins/dev_octeon/tx_node.c
+++ b/src/plugins/dev_octeon/tx_node.c
@@ -255,19 +255,18 @@ oct_tx_enq1 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vlib_buffer_t *b,
if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
{
d.hdr_w1.ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
- d.hdr_w1.ol3ptr = vnet_buffer (b)->l3_hdr_offset;
- d.hdr_w1.ol4ptr =
- vnet_buffer (b)->l3_hdr_offset + sizeof (ip4_header_t);
+ d.hdr_w1.ol3ptr = vnet_buffer (b)->l3_hdr_offset - b->current_data;
+ d.hdr_w1.ol4ptr = d.hdr_w1.ol3ptr + sizeof (ip4_header_t);
}
if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
{
d.hdr_w1.ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
- d.hdr_w1.ol4ptr = vnet_buffer (b)->l4_hdr_offset;
+ d.hdr_w1.ol4ptr = vnet_buffer (b)->l4_hdr_offset - b->current_data;
}
else if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
{
d.hdr_w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
- d.hdr_w1.ol4ptr = vnet_buffer (b)->l4_hdr_offset;
+ d.hdr_w1.ol4ptr = vnet_buffer (b)->l4_hdr_offset - b->current_data;
}
}
@@ -301,7 +300,8 @@ oct_tx_enq16 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq,
{
u8 dwords_per_line[16], *dpl = dwords_per_line;
u64 __attribute__ ((unused)) lmt_arg, ioaddr, n_lines;
- u32 n_left, or_flags_16 = 0, n = 0;
+ u32 __attribute__ ((unused)) or_flags_16 = 0;
+ u32 n_left, n = 0;
const u32 not_simple_flags =
VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD;
lmt_line_t *l = ctx->lmt_lines;