aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2023-10-23 18:36:18 +0200
committerDamjan Marion <damarion@cisco.com>2024-01-17 20:44:10 +0100
commit01fe7ab88efe1771618358ee5e90f56996ba909e (patch)
treebe82513c2c07c6febe8e305d8c2e9f19af1a3508 /src/plugins
parentdc26d50426792954e372cb7949b94fd3eb573942 (diff)
octeon: native driver for Marvell Octeon SoC
Type: feature Change-Id: I6898625c4e8854f777407dac3159e4c639a54860 Signed-off-by: Monendra Singh Kushwaha <kmonendra@marvell.com> Signed-off-by: Damjan Marion <damarion@cisco.com>
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/dev_octeon/CMakeLists.txt41
-rw-r--r--src/plugins/dev_octeon/common.h29
-rw-r--r--src/plugins/dev_octeon/format.c164
-rw-r--r--src/plugins/dev_octeon/hw_defs.h98
-rw-r--r--src/plugins/dev_octeon/init.c301
-rw-r--r--src/plugins/dev_octeon/octeon.h154
-rw-r--r--src/plugins/dev_octeon/port.c418
-rw-r--r--src/plugins/dev_octeon/queue.c303
-rw-r--r--src/plugins/dev_octeon/roc_helper.c181
-rw-r--r--src/plugins/dev_octeon/rx_node.c385
-rw-r--r--src/plugins/dev_octeon/tx_node.c427
11 files changed, 2501 insertions, 0 deletions
diff --git a/src/plugins/dev_octeon/CMakeLists.txt b/src/plugins/dev_octeon/CMakeLists.txt
new file mode 100644
index 00000000000..b7c25fe0404
--- /dev/null
+++ b/src/plugins/dev_octeon/CMakeLists.txt
@@ -0,0 +1,41 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright(c) 2022 Cisco Systems, Inc.
+
+if (NOT VPP_PLATFORM_NAME STREQUAL "octeon10")
+ return()
+endif()
+
+# Find OCTEON roc files
+vpp_find_path(OCTEON_ROC_DIR PATH_SUFFIXES octeon-roc NAMES platform.h)
+vpp_plugin_find_library(dev-octeon OCTEON_ROC_LIB "libocteon-roc.a")
+
+if (NOT OCTEON_ROC_DIR)
+ message("OCTEON ROC files not found - Marvell OCTEON device plugin disabled")
+ return()
+endif()
+
+if (NOT OCTEON_ROC_LIB)
+ message("OCTEON ROC library (libocteon-roc.a) not found - Marvell OCTEON device plugin disabled")
+ return ()
+endif()
+
+include_directories (${OCTEON_ROC_DIR}/)
+
+add_vpp_plugin(dev_octeon
+ SOURCES
+ init.c
+ format.c
+ port.c
+ queue.c
+ roc_helper.c
+ rx_node.c
+ tx_node.c
+
+ MULTIARCH_SOURCES
+ rx_node.c
+ tx_node.c
+
+ LINK_LIBRARIES
+ ${OCTEON_ROC_LIB}
+)
+
diff --git a/src/plugins/dev_octeon/common.h b/src/plugins/dev_octeon/common.h
new file mode 100644
index 00000000000..a7a051526d2
--- /dev/null
+++ b/src/plugins/dev_octeon/common.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2023 Cisco Systems, Inc.
+ */
+
+#ifndef _OCT_COMMON_H_
+#define _OCT_COMMON_H_
+
+#include <vppinfra/clib.h>
+#include <vppinfra/format.h>
+#include <vnet/vnet.h>
+#include <vnet/dev/dev.h>
+#include <base/roc_api.h>
+
+static_always_inline u32
+oct_aura_free_all_buffers (vlib_main_t *vm, u64 aura_handle, u16 hdr_off)
+{
+ u32 n = 0;
+ u64 iova;
+
+ while ((iova = roc_npa_aura_op_alloc (aura_handle, 0)))
+ {
+ vlib_buffer_t *b = (void *) iova + hdr_off;
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, b));
+ n++;
+ }
+ return n;
+}
+
+#endif /* _OCT_COMMON_H_ */
diff --git a/src/plugins/dev_octeon/format.c b/src/plugins/dev_octeon/format.c
new file mode 100644
index 00000000000..5ee956ad4f4
--- /dev/null
+++ b/src/plugins/dev_octeon/format.c
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2023 Cisco Systems, Inc.
+ */
+
+#include "vlib/pci/pci.h"
+#include "vnet/error.h"
+#include "vppinfra/error.h"
+#include <vnet/vnet.h>
+#include <vnet/dev/dev.h>
+#include <dev_octeon/octeon.h>
+
+u8 *
+format_oct_port_status (u8 *s, va_list *args)
+{
+ return s;
+}
+
+u8 *
+format_oct_nix_rx_cqe_desc (u8 *s, va_list *args)
+{
+ oct_nix_rx_cqe_desc_t *d = va_arg (*args, oct_nix_rx_cqe_desc_t *);
+ u32 indent = format_get_indent (s);
+ typeof (d->hdr) *h = &d->hdr;
+ typeof (d->parse.f) *p = &d->parse.f;
+ typeof (d->sg0) *sg0 = &d->sg0;
+ typeof (d->sg0) *sg1 = &d->sg1;
+
+ s = format (s, "hdr: cqe_type %u nude %u q %u tag 0x%x", h->cqe_type,
+ h->node, h->q, h->tag);
+ s = format (s, "\n%Uparse:", format_white_space, indent);
+#define _(n, f) s = format (s, " " #n " " f, p->n)
+ _ (chan, "%u");
+ _ (errcode, "%u");
+ _ (errlev, "%u");
+ _ (desc_sizem1, "%u");
+ _ (pkt_lenm1, "%u");
+ _ (pkind, "%u");
+ s = format (s, "\n%U ", format_white_space, indent);
+ _ (nix_idx, "%u");
+ _ (color, "%u");
+ _ (flow_key_alg, "%u");
+ _ (eoh_ptr, "%u");
+ _ (match_id, "0x%x");
+ s = format (s, "\n%U ", format_white_space, indent);
+ _ (wqe_aura, "0x%x");
+ _ (pb_aura, "0x%x");
+ _ (imm_copy, "%u");
+ _ (express, "%u");
+ _ (wqwd, "%u");
+ _ (l2m, "%u");
+ _ (l2b, "%u");
+ _ (l3m, "%u");
+ _ (l3b, "%u");
+#undef _
+ s = format (s, "\n%U ", format_white_space, indent);
+ s = format (s, "layer: a b c d e f g h");
+ s = format (s, "\n%U ", format_white_space, indent);
+ s = format (s, "type: %3u %3u %3u %3u %3u %3u %3u %3u", p->latype,
+ p->lbtype, p->lctype, p->ldtype, p->letype, p->lftype, p->lgtype,
+ p->lhtype);
+ s = format (s, "\n%U ", format_white_space, indent);
+ s = format (
+ s, "flags: 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x",
+ p->laflags, p->lbflags, p->lcflags, p->ldflags, p->leflags, p->lfflags,
+ p->lgflags, p->lhflags);
+ s = format (s, "\n%U ", format_white_space, indent);
+ s = format (s, "ptr: %3u %3u %3u %3u %3u %3u %3u %3u", p->laptr,
+ p->lbptr, p->lcptr, p->ldptr, p->leptr, p->lfptr, p->lgptr,
+ p->lhptr);
+
+ if (sg0->subdc != 0x4)
+ return format (s, "\n%Usg0: unexpected subdc %x", format_white_space,
+ indent, sg0->subdc);
+
+ s = format (s,
+ "\n%Usg0: segs %u seg1_sz %u seg2_sz %u seg3_sz %u seg1 "
+ "%p seg2 %p seg3 %p",
+ format_white_space, indent, sg0->segs, sg0->seg1_size,
+ sg0->seg2_size, sg0->seg3_size, d->segs0[0], d->segs0[1],
+ d->segs0[2]);
+
+ if (sg1->subdc != 0x4 && sg1->subdc != 0)
+ return format (s, "\n%Usg1: unexpected subdc %x", format_white_space,
+ indent, sg1->subdc);
+
+ if (sg1->subdc == 4)
+ s = format (s,
+ "\n%Usg1: segs %u seg1_sz %u seg2_sz %u seg3_sz %u seg1 "
+ "%p seg2 %p seg3 %p",
+ format_white_space, indent, sg1->segs, sg1->seg1_size,
+ sg1->seg2_size, sg1->seg3_size, d->segs1[0], d->segs1[1],
+ d->segs1[2]);
+
+ return s;
+}
+
+u8 *
+format_oct_rx_trace (u8 *s, va_list *args)
+{
+ vlib_main_t *vm = va_arg (*args, vlib_main_t *);
+ vlib_node_t *node = va_arg (*args, vlib_node_t *);
+ oct_rx_trace_t *t = va_arg (*args, oct_rx_trace_t *);
+ u32 indent = format_get_indent (s);
+
+ s = format (s, "octeon-rx: next-node %U sw_if_index %u",
+ format_vlib_next_node_name, vm, node->index, t->next_index,
+ t->sw_if_index);
+ s = format (s, "\n%U%U", format_white_space, indent + 2,
+ format_oct_nix_rx_cqe_desc, &t->desc);
+ return s;
+}
+
+u8 *
+format_oct_tx_trace (u8 *s, va_list *args)
+{
+ va_arg (*args, vlib_main_t *);
+ va_arg (*args, vlib_node_t *);
+ oct_tx_trace_t *t = va_arg (*args, oct_tx_trace_t *);
+ u32 indent = format_get_indent (s);
+
+ s = format (s, "octeon-tx: sw_if_index %u", t->sw_if_index);
+ s = format (s, "\n%Uhdr[0]:", format_white_space, indent + 2);
+#define _(n, f) s = format (s, " " #n " " f, t->desc.hdr_w0.n)
+ _ (total, "%u");
+ _ (df, "%u");
+ _ (aura, "0x%x");
+ _ (sizem1, "%u");
+ _ (pnc, "%u");
+ _ (sq, "%u");
+#undef _
+ s = format (s, "\n%Uhdr[1]:", format_white_space, indent + 2);
+#define _(n, f) s = format (s, " " #n " " f, t->desc.hdr_w1.n)
+ _ (ol3ptr, "%u");
+ _ (ol4ptr, "%u");
+ _ (il3ptr, "%u");
+ _ (il4ptr, "%u");
+ _ (ol3type, "%u");
+ _ (ol4type, "%u");
+ _ (il3type, "%u");
+ _ (il4type, "%u");
+ _ (sqe_id, "%u");
+#undef _
+
+ foreach_int (j, 0, 4)
+ {
+ s = format (s, "\n%Usg[%u]:", format_white_space, indent + 2, j);
+#define _(n, f) s = format (s, " " #n " " f, t->desc.sg[j].n)
+ _ (subdc, "%u");
+ _ (segs, "%u");
+ _ (seg1_size, "%u");
+ _ (seg2_size, "%u");
+ _ (seg3_size, "%u");
+ _ (i1, "%u");
+ _ (i2, "%u");
+ _ (i3, "%u");
+ _ (ld_type, "%u");
+#undef _
+ for (int i = 1; i < 4; i++)
+ s = format (s, "\n%Usg[%u]: %p", format_white_space, indent + 2, i + j,
+ t->desc.sg[i + j]);
+ }
+
+ return s;
+}
diff --git a/src/plugins/dev_octeon/hw_defs.h b/src/plugins/dev_octeon/hw_defs.h
new file mode 100644
index 00000000000..ab0fc7bd8da
--- /dev/null
+++ b/src/plugins/dev_octeon/hw_defs.h
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2023 Cisco Systems, Inc.
+ */
+
+#ifndef _OCT_HW_DEFS_H_
+#define _OCT_HW_DEFS_H_
+
+#include <vppinfra/clib.h>
+#include <base/roc_api.h>
+
+typedef union
+{
+ struct
+ {
+ u64 tail : 20;
+ u64 head : 20;
+ u64 resv40 : 6;
+ u64 cq_err : 1;
+ u64 resv47 : 16;
+ u64 op_err : 1;
+ };
+ u64 as_u64;
+} oct_nix_lf_cq_op_status_t;
+
+STATIC_ASSERT_SIZEOF (oct_nix_lf_cq_op_status_t, 8);
+
+typedef union
+{
+ struct
+ {
+ u64 aura : 20;
+ u64 _reseved20 : 12;
+ u64 count_eot : 1;
+ u64 _reserved33 : 30;
+ u64 fabs : 1;
+ };
+ u64 as_u64;
+} oct_npa_lf_aura_batch_free0_t;
+
+STATIC_ASSERT_SIZEOF (oct_npa_lf_aura_batch_free0_t, 8);
+
+typedef struct
+{
+ oct_npa_lf_aura_batch_free0_t w0;
+ u64 data[15];
+} oct_npa_lf_aura_batch_free_line_t;
+
+STATIC_ASSERT_SIZEOF (oct_npa_lf_aura_batch_free_line_t, 128);
+
+typedef union
+{
+ struct npa_batch_alloc_compare_s compare_s;
+ u64 as_u64;
+} oct_npa_batch_alloc_compare_t;
+
+typedef union
+{
+ struct
+ {
+ union nix_send_hdr_w0_u hdr_w0;
+ union nix_send_hdr_w1_u hdr_w1;
+ union nix_send_sg_s sg[8];
+ };
+ u128 as_u128[5];
+} oct_tx_desc_t;
+
+STATIC_ASSERT_SIZEOF (oct_tx_desc_t, 80);
+
+typedef union
+{
+ u128 dwords[8];
+ u64 words[16];
+} lmt_line_t;
+
+STATIC_ASSERT_SIZEOF (lmt_line_t, 1 << ROC_LMT_LINE_SIZE_LOG2);
+
+typedef union
+{
+ union nix_rx_parse_u f;
+ u64 w[7];
+} oct_nix_rx_parse_t;
+
+STATIC_ASSERT_SIZEOF (oct_nix_rx_parse_t, 56);
+
+typedef struct
+{
+ CLIB_ALIGN_MARK (desc, 128);
+ struct nix_cqe_hdr_s hdr;
+ oct_nix_rx_parse_t parse;
+ struct nix_rx_sg_s sg0;
+ void *segs0[3];
+ struct nix_rx_sg_s sg1;
+ void *segs1[3];
+} oct_nix_rx_cqe_desc_t;
+
+STATIC_ASSERT_SIZEOF (oct_nix_rx_cqe_desc_t, 128);
+
+#endif /* _OCT_HW_DEFS_H_ */
diff --git a/src/plugins/dev_octeon/init.c b/src/plugins/dev_octeon/init.c
new file mode 100644
index 00000000000..bee449f2123
--- /dev/null
+++ b/src/plugins/dev_octeon/init.c
@@ -0,0 +1,301 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2023 Cisco Systems, Inc.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/dev/dev.h>
+#include <vnet/dev/pci.h>
+#include <vnet/dev/counters.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+#include <dev_octeon/octeon.h>
+
+#include <base/roc_api.h>
+#include <common.h>
+
+struct roc_model oct_model;
+
+VLIB_REGISTER_LOG_CLASS (oct_log, static) = {
+ .class_name = "octeon",
+ .subclass_name = "init",
+};
+
+#define _(f, n, s, d) \
+ { .name = #n, .desc = d, .severity = VL_COUNTER_SEVERITY_##s },
+
+vlib_error_desc_t oct_tx_node_counters[] = { foreach_oct_tx_node_counter };
+#undef _
+
+vnet_dev_node_t oct_rx_node = {
+ .format_trace = format_oct_rx_trace,
+};
+
+vnet_dev_node_t oct_tx_node = {
+ .format_trace = format_oct_tx_trace,
+ .error_counters = oct_tx_node_counters,
+ .n_error_counters = ARRAY_LEN (oct_tx_node_counters),
+};
+
+static struct
+{
+ u16 device_id;
+ oct_device_type_t type;
+ char *description;
+} oct_dev_types[] = {
+
+#define _(id, device_type, desc) \
+ { \
+ .device_id = (id), .type = OCT_DEVICE_TYPE_##device_type, \
+ .description = (desc) \
+ }
+
+ _ (0xa063, RVU_PF, "Marvell Octeon Resource Virtualization Unit PF"),
+ _ (0xa0f3, CPT_VF, "Marvell Octeon Cryptographic Accelerator Unit VF"),
+#undef _
+};
+
+static u8 *
+oct_probe (vlib_main_t *vm, vnet_dev_bus_index_t bus_index, void *dev_info)
+{
+ vnet_dev_bus_pci_device_info_t *di = dev_info;
+
+ if (di->vendor_id != 0x177d) /* Cavium */
+ return 0;
+
+ FOREACH_ARRAY_ELT (dt, oct_dev_types)
+ {
+ if (dt->device_id == di->device_id)
+ return format (0, "%s", dt->description);
+ }
+
+ return 0;
+}
+
+vnet_dev_rv_t
+cnx_return_roc_err (vnet_dev_t *dev, int rrv, char *fmt, ...)
+{
+ va_list va;
+ va_start (va, fmt);
+ u8 *s = va_format (0, fmt, &va);
+ va_end (va);
+
+ log_err (dev, "%v: %s [%d]", s, roc_error_msg_get (rrv), rrv);
+ vec_free (s);
+
+ return VNET_DEV_ERR_UNSUPPORTED_DEVICE;
+}
+
+static vnet_dev_rv_t
+oct_alloc (vlib_main_t *vm, vnet_dev_t *dev)
+{
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ cd->nix =
+ clib_mem_alloc_aligned (sizeof (struct roc_nix), CLIB_CACHE_LINE_BYTES);
+ return VNET_DEV_OK;
+}
+
+static vnet_dev_rv_t
+oct_init_nix (vlib_main_t *vm, vnet_dev_t *dev)
+{
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ u8 mac_addr[6];
+ int rrv;
+ oct_port_t oct_port = {};
+
+ *cd->nix = (struct roc_nix){
+ .reta_sz = ROC_NIX_RSS_RETA_SZ_256,
+ .max_sqb_count = 512,
+ .pci_dev = &cd->plt_pci_dev,
+ };
+
+ if ((rrv = roc_nix_dev_init (cd->nix)))
+ return cnx_return_roc_err (dev, rrv, "roc_nix_dev_init");
+
+ if (roc_nix_npc_mac_addr_get (cd->nix, mac_addr))
+ return cnx_return_roc_err (dev, rrv, "roc_nix_npc_mac_addr_get");
+
+ vnet_dev_port_add_args_t port_add_args = {
+ .port = {
+ .attr = {
+ .type = VNET_DEV_PORT_TYPE_ETHERNET,
+ .max_rx_queues = 64,
+ .max_tx_queues = 64,
+ .max_supported_rx_frame_size = roc_nix_max_pkt_len (cd->nix),
+ .caps = {
+ .rss = 1,
+ },
+ .rx_offloads = {
+ .ip4_cksum = 1,
+ },
+ },
+ .ops = {
+ .init = oct_port_init,
+ .deinit = oct_port_deinit,
+ .start = oct_port_start,
+ .stop = oct_port_stop,
+ .config_change = oct_port_cfg_change,
+ .format_status = format_oct_port_status,
+ },
+ .data_size = sizeof (oct_port_t),
+ .initial_data = &oct_port,
+ },
+ .rx_node = &oct_rx_node,
+ .tx_node = &oct_tx_node,
+ .rx_queue = {
+ .config = {
+ .data_size = sizeof (oct_rxq_t),
+ .default_size = 1024,
+ .multiplier = 32,
+ .min_size = 256,
+ .max_size = 16384,
+ },
+ .ops = {
+ .alloc = oct_rx_queue_alloc,
+ .free = oct_rx_queue_free,
+ .format_info = format_oct_rxq_info,
+ },
+ },
+ .tx_queue = {
+ .config = {
+ .data_size = sizeof (oct_txq_t),
+ .default_size = 1024,
+ .multiplier = 32,
+ .min_size = 256,
+ .max_size = 16384,
+ },
+ .ops = {
+ .alloc = oct_tx_queue_alloc,
+ .free = oct_tx_queue_free,
+ .format_info = format_oct_txq_info,
+ },
+ },
+ };
+
+ vnet_dev_set_hw_addr_eth_mac (&port_add_args.port.attr.hw_addr, mac_addr);
+
+ log_info (dev, "MAC address is %U", format_ethernet_address, mac_addr);
+
+ return vnet_dev_port_add (vm, dev, 0, &port_add_args);
+}
+
+static vnet_dev_rv_t
+oct_init_cpt (vlib_main_t *vm, vnet_dev_t *dev)
+{
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ int rrv;
+ struct roc_cpt cpt = {
+ .pci_dev = &cd->plt_pci_dev,
+ };
+
+ if ((rrv = roc_cpt_dev_init (&cpt)))
+ return cnx_return_roc_err (dev, rrv, "roc_cpt_dev_init");
+ return VNET_DEV_OK;
+}
+
+static vnet_dev_rv_t
+oct_init (vlib_main_t *vm, vnet_dev_t *dev)
+{
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ vlib_pci_config_hdr_t pci_hdr;
+ vnet_dev_rv_t rv;
+
+ rv = vnet_dev_pci_read_config_header (vm, dev, &pci_hdr);
+ if (rv != VNET_DEV_OK)
+ return rv;
+
+ if (pci_hdr.vendor_id != 0x177d)
+ return VNET_DEV_ERR_UNSUPPORTED_DEVICE;
+
+ FOREACH_ARRAY_ELT (dt, oct_dev_types)
+ {
+ if (dt->device_id == pci_hdr.device_id)
+ cd->type = dt->type;
+ }
+
+ if (cd->type == OCT_DEVICE_TYPE_UNKNOWN)
+ return rv;
+
+ rv = VNET_DEV_ERR_UNSUPPORTED_DEVICE;
+
+ cd->plt_pci_dev = (struct plt_pci_device){
+ .id.vendor_id = pci_hdr.vendor_id,
+ .id.device_id = pci_hdr.device_id,
+ .id.class_id = pci_hdr.class << 16 | pci_hdr.subclass,
+ .pci_handle = vnet_dev_get_pci_handle (dev),
+ };
+
+ foreach_int (i, 2, 4)
+ {
+ rv = vnet_dev_pci_map_region (vm, dev, i,
+ &cd->plt_pci_dev.mem_resource[i].addr);
+ if (rv != VNET_DEV_OK)
+ return rv;
+ }
+
+ strncpy ((char *) cd->plt_pci_dev.name, dev->device_id,
+ sizeof (cd->plt_pci_dev.name) - 1);
+
+ if (cd->type == OCT_DEVICE_TYPE_RVU_PF)
+ return oct_init_nix (vm, dev);
+ else if (cd->type == OCT_DEVICE_TYPE_CPT_VF)
+ return oct_init_cpt (vm, dev);
+ else
+ return VNET_DEV_ERR_UNSUPPORTED_DEVICE;
+
+ return 0;
+}
+
+static void
+oct_deinit (vlib_main_t *vm, vnet_dev_t *dev)
+{
+ oct_device_t *cd = vnet_dev_get_data (dev);
+
+ if (cd->nix_initialized)
+ roc_nix_dev_fini (cd->nix);
+}
+
+static void
+oct_free (vlib_main_t *vm, vnet_dev_t *dev)
+{
+ oct_device_t *cd = vnet_dev_get_data (dev);
+
+ if (cd->nix_initialized)
+ roc_nix_dev_fini (cd->nix);
+}
+
+VNET_DEV_REGISTER_DRIVER (octeon) = {
+ .name = "octeon",
+ .bus = "pci",
+ .device_data_sz = sizeof (oct_device_t),
+ .ops = {
+ .alloc = oct_alloc,
+ .init = oct_init,
+ .deinit = oct_deinit,
+ .free = oct_free,
+ .probe = oct_probe,
+ },
+};
+
+static clib_error_t *
+oct_plugin_init (vlib_main_t *vm)
+{
+ int rv;
+ extern oct_plt_init_param_t oct_plt_init_param;
+
+ rv = oct_plt_init (&oct_plt_init_param);
+ if (rv)
+ return clib_error_return (0, "oct_plt_init failed");
+
+ rv = roc_model_init (&oct_model);
+ if (rv)
+ return clib_error_return (0, "roc_model_init failed");
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (oct_plugin_init);
+
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "dev_octeon",
+};
diff --git a/src/plugins/dev_octeon/octeon.h b/src/plugins/dev_octeon/octeon.h
new file mode 100644
index 00000000000..a87330c6f90
--- /dev/null
+++ b/src/plugins/dev_octeon/octeon.h
@@ -0,0 +1,154 @@
+
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2023 Cisco Systems, Inc.
+ */
+#ifndef _OCTEON_H_
+#define _OCTEON_H_
+#include <vppinfra/clib.h>
+#include <vppinfra/error_bootstrap.h>
+#include <vppinfra/format.h>
+#include <vnet/vnet.h>
+#include <vnet/dev/dev.h>
+#include <base/roc_api.h>
+#include <dev_octeon/hw_defs.h>
+
+typedef enum
+{
+ OCT_DEVICE_TYPE_UNKNOWN = 0,
+ OCT_DEVICE_TYPE_RVU_PF,
+ OCT_DEVICE_TYPE_CPT_VF,
+} __clib_packed oct_device_type_t;
+
+typedef struct
+{
+ oct_device_type_t type;
+ u8 nix_initialized : 1;
+ u8 status : 1;
+ u8 full_duplex : 1;
+ u32 speed;
+ struct plt_pci_device plt_pci_dev;
+ struct roc_cpt cpt;
+ struct roc_nix *nix;
+} oct_device_t;
+
+typedef struct
+{
+ u8 lf_allocated : 1;
+ u8 tm_initialized : 1;
+ u8 npc_initialized : 1;
+ struct roc_npc npc;
+} oct_port_t;
+
+typedef struct
+{
+ u8 npa_pool_initialized : 1;
+ u8 cq_initialized : 1;
+ u8 rq_initialized : 1;
+ u16 hdr_off;
+ u32 n_enq;
+ u64 aura_handle;
+ u64 aura_batch_free_ioaddr;
+ u64 lmt_base_addr;
+ CLIB_CACHE_LINE_ALIGN_MARK (data0);
+ struct roc_nix_cq cq;
+ struct roc_nix_rq rq;
+} oct_rxq_t;
+
+typedef struct
+{
+ CLIB_ALIGN_MARK (cl, 128);
+ union
+ {
+ struct npa_batch_alloc_status_s status;
+ u64 iova[16];
+ };
+} oct_npa_batch_alloc_cl128_t;
+
+STATIC_ASSERT_SIZEOF (oct_npa_batch_alloc_cl128_t, 128);
+
+typedef struct
+{
+ u8 sq_initialized : 1;
+ u8 npa_pool_initialized : 1;
+ u16 hdr_off;
+ u32 n_enq;
+ u64 aura_handle;
+ u64 io_addr;
+ void *lmt_addr;
+
+ oct_npa_batch_alloc_cl128_t *ba_buffer;
+ u8 ba_first_cl;
+ u8 ba_num_cl;
+ CLIB_CACHE_LINE_ALIGN_MARK (data0);
+ struct roc_nix_sq sq;
+} oct_txq_t;
+
+/* format.c */
+format_function_t format_oct_port_status;
+format_function_t format_oct_rx_trace;
+format_function_t format_oct_tx_trace;
+
+/* port.c */
+vnet_dev_rv_t oct_port_init (vlib_main_t *, vnet_dev_port_t *);
+vnet_dev_rv_t oct_port_start (vlib_main_t *, vnet_dev_port_t *);
+void oct_port_stop (vlib_main_t *, vnet_dev_port_t *);
+void oct_port_deinit (vlib_main_t *, vnet_dev_port_t *);
+vnet_dev_rv_t oct_port_cfg_change (vlib_main_t *, vnet_dev_port_t *,
+ vnet_dev_port_cfg_change_req_t *);
+
+/* queue.c */
+vnet_dev_rv_t oct_rx_queue_alloc (vlib_main_t *, vnet_dev_rx_queue_t *);
+vnet_dev_rv_t oct_tx_queue_alloc (vlib_main_t *, vnet_dev_tx_queue_t *);
+void oct_rx_queue_free (vlib_main_t *, vnet_dev_rx_queue_t *);
+void oct_tx_queue_free (vlib_main_t *, vnet_dev_tx_queue_t *);
+vnet_dev_rv_t oct_rxq_init (vlib_main_t *, vnet_dev_rx_queue_t *);
+vnet_dev_rv_t oct_txq_init (vlib_main_t *, vnet_dev_tx_queue_t *);
+void oct_rxq_deinit (vlib_main_t *, vnet_dev_rx_queue_t *);
+void oct_txq_deinit (vlib_main_t *, vnet_dev_tx_queue_t *);
+format_function_t format_oct_rxq_info;
+format_function_t format_oct_txq_info;
+
+#define log_debug(dev, f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_DEBUG, oct_log.class, "%U: " f, \
+ format_vnet_dev_addr, (dev), ##__VA_ARGS__)
+#define log_info(dev, f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_INFO, oct_log.class, "%U: " f, \
+ format_vnet_dev_addr, (dev), ##__VA_ARGS__)
+#define log_notice(dev, f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_NOTICE, oct_log.class, "%U: " f, \
+ format_vnet_dev_addr, (dev), ##__VA_ARGS__)
+#define log_warn(dev, f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_WARNING, oct_log.class, "%U: " f, \
+ format_vnet_dev_addr, (dev), ##__VA_ARGS__)
+#define log_err(dev, f, ...) \
+ vlib_log (VLIB_LOG_LEVEL_ERR, oct_log.class, "%U: " f, \
+ format_vnet_dev_addr, (dev), ##__VA_ARGS__)
+
+#define foreach_oct_tx_node_counter \
+ _ (CHAIN_TOO_LONG, chain_too_long, ERROR, "drop due to buffer chain > 6") \
+ _ (NO_FREE_SLOTS, no_free_slots, ERROR, "no free tx slots") \
+ _ (AURA_BATCH_ALLOC_ISSUE_FAIL, aura_batch_alloc_issue_fail, ERROR, \
+ "aura batch alloc issue failed") \
+ _ (AURA_BATCH_ALLOC_NOT_READY, aura_batch_alloc_not_ready, ERROR, \
+ "aura batch alloc not ready")
+
+typedef enum
+{
+#define _(f, n, s, d) OCT_TX_NODE_CTR_##f,
+ foreach_oct_tx_node_counter
+#undef _
+} oct_tx_node_counter_t;
+
+typedef struct
+{
+ u32 sw_if_index;
+ u32 next_index;
+ oct_nix_rx_cqe_desc_t desc;
+} oct_rx_trace_t;
+
+typedef struct
+{
+ u32 sw_if_index;
+ oct_tx_desc_t desc;
+} oct_tx_trace_t;
+#endif /* _OCTEON_H_ */
diff --git a/src/plugins/dev_octeon/port.c b/src/plugins/dev_octeon/port.c
new file mode 100644
index 00000000000..5857bc15f77
--- /dev/null
+++ b/src/plugins/dev_octeon/port.c
@@ -0,0 +1,418 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2023 Cisco Systems, Inc.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/dev/dev.h>
+#include <vnet/dev/pci.h>
+#include <vnet/dev/counters.h>
+#include <dev_octeon/octeon.h>
+#include <dev_octeon/common.h>
+#include <vnet/ethernet/ethernet.h>
+
+VLIB_REGISTER_LOG_CLASS (oct_log, static) = {
+ .class_name = "octeon",
+ .subclass_name = "port",
+};
+
+static const u8 default_rss_key[] = {
+ 0xfe, 0xed, 0x0b, 0xad, 0xfe, 0xed, 0x0b, 0xad, 0xad, 0x0b, 0xed, 0xfe,
+ 0xad, 0x0b, 0xed, 0xfe, 0x13, 0x57, 0x9b, 0xef, 0x24, 0x68, 0xac, 0x0e,
+ 0x91, 0x72, 0x53, 0x11, 0x82, 0x64, 0x20, 0x44, 0x12, 0xef, 0x34, 0xcd,
+ 0x56, 0xbc, 0x78, 0x9a, 0x9a, 0x78, 0xbc, 0x56, 0xcd, 0x34, 0xef, 0x12
+};
+
+static const u32 default_rss_flowkey =
+ (FLOW_KEY_TYPE_IPV4 | FLOW_KEY_TYPE_IPV6 | FLOW_KEY_TYPE_TCP |
+ FLOW_KEY_TYPE_UDP | FLOW_KEY_TYPE_SCTP);
+
+static const u64 rxq_cfg =
+ ROC_NIX_LF_RX_CFG_DIS_APAD | ROC_NIX_LF_RX_CFG_IP6_UDP_OPT |
+ ROC_NIX_LF_RX_CFG_L2_LEN_ERR | ROC_NIX_LF_RX_CFG_DROP_RE |
+ ROC_NIX_LF_RX_CFG_CSUM_OL4 | ROC_NIX_LF_RX_CFG_CSUM_IL4 |
+ ROC_NIX_LF_RX_CFG_LEN_OL3 | ROC_NIX_LF_RX_CFG_LEN_OL4 |
+ ROC_NIX_LF_RX_CFG_LEN_IL3 | ROC_NIX_LF_RX_CFG_LEN_IL4;
+
+static vnet_dev_rv_t
+oct_roc_err (vnet_dev_t *dev, int rv, char *fmt, ...)
+{
+ u8 *s = 0;
+ va_list va;
+
+ va_start (va, fmt);
+ s = va_format (s, fmt, &va);
+ va_end (va);
+
+ log_err (dev, "%v - ROC error %s (%d)", s, roc_error_msg_get (rv), rv);
+
+ vec_free (s);
+ return VNET_DEV_ERR_INTERNAL;
+}
+
+vnet_dev_rv_t
+oct_port_init (vlib_main_t *vm, vnet_dev_port_t *port)
+{
+ vnet_dev_t *dev = port->dev;
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ oct_port_t *cp = vnet_dev_get_port_data (port);
+ struct roc_nix *nix = cd->nix;
+ vnet_dev_rv_t rv;
+ int rrv;
+
+ log_debug (dev, "port init: port %u", port->port_id);
+
+ if ((rrv = roc_nix_lf_alloc (nix, port->intf.num_rx_queues,
+ port->intf.num_tx_queues, rxq_cfg)))
+ {
+ oct_port_deinit (vm, port);
+ return oct_roc_err (
+ dev, rrv,
+ "roc_nix_lf_alloc(nb_rxq = %u, nb_txq = %d, rxq_cfg=0x%lx) failed",
+ port->intf.num_rx_queues, port->intf.num_tx_queues, rxq_cfg);
+ }
+ cp->lf_allocated = 1;
+
+ if ((rrv = roc_nix_tm_init (nix)))
+ {
+ oct_port_deinit (vm, port);
+ return oct_roc_err (dev, rrv, "roc_nix_tm_init() failed");
+ }
+ cp->tm_initialized = 1;
+
+ if ((rrv = roc_nix_tm_hierarchy_enable (nix, ROC_NIX_TM_DEFAULT,
+ /* xmit_enable */ 0)))
+ {
+ oct_port_deinit (vm, port);
+ return oct_roc_err (dev, rrv, "roc_nix_tm_hierarchy_enable() failed");
+ }
+
+ if ((rrv = roc_nix_rss_default_setup (nix, default_rss_flowkey)))
+ {
+ oct_port_deinit (vm, port);
+ return oct_roc_err (dev, rrv, "roc_nix_rss_default_setup() failed");
+ }
+
+ roc_nix_rss_key_set (nix, default_rss_key);
+
+ cp->npc.roc_nix = nix;
+ if ((rrv = roc_npc_init (&cp->npc)))
+ {
+ oct_port_deinit (vm, port);
+ return oct_roc_err (dev, rrv, "roc_npc_init() failed");
+ }
+ cp->npc_initialized = 1;
+
+ foreach_vnet_dev_port_rx_queue (q, port)
+ if (q->enabled)
+ if ((rv = oct_rxq_init (vm, q)))
+ {
+ oct_port_deinit (vm, port);
+ return rv;
+ }
+
+ foreach_vnet_dev_port_tx_queue (q, port)
+ if (q->enabled)
+ if ((rv = oct_txq_init (vm, q)))
+ {
+ oct_port_deinit (vm, port);
+ return rv;
+ }
+
+ return VNET_DEV_OK;
+}
+
+void
+oct_port_deinit (vlib_main_t *vm, vnet_dev_port_t *port)
+{
+ vnet_dev_t *dev = port->dev;
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ oct_port_t *cp = vnet_dev_get_port_data (port);
+ struct roc_nix *nix = cd->nix;
+ int rrv;
+
+ foreach_vnet_dev_port_rx_queue (q, port)
+ oct_rxq_deinit (vm, q);
+ foreach_vnet_dev_port_tx_queue (q, port)
+ oct_txq_deinit (vm, q);
+
+ if (cp->npc_initialized)
+ {
+ if ((rrv = roc_npc_fini (&cp->npc)))
+ oct_roc_err (dev, rrv, "roc_npc_fini() failed");
+ cp->npc_initialized = 0;
+ }
+
+ if (cp->tm_initialized)
+ {
+ roc_nix_tm_fini (nix);
+ cp->tm_initialized = 0;
+ }
+
+ if (cp->lf_allocated)
+ {
+ if ((rrv = roc_nix_lf_free (nix)))
+ oct_roc_err (dev, rrv, "roc_nix_lf_free() failed");
+ cp->lf_allocated = 0;
+ }
+}
+
+void
+oct_port_poll (vlib_main_t *vm, vnet_dev_port_t *port)
+{
+ vnet_dev_t *dev = port->dev;
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ struct roc_nix *nix = cd->nix;
+ struct roc_nix_link_info link_info = {};
+ vnet_dev_port_state_changes_t changes = {};
+ int rrv;
+
+ rrv = roc_nix_mac_link_info_get (nix, &link_info);
+ if (rrv)
+ return;
+
+ if (cd->status != link_info.status)
+ {
+ changes.change.link_state = 1;
+ changes.link_state = link_info.status;
+ cd->status = link_info.status;
+ }
+
+ if (cd->full_duplex != link_info.full_duplex)
+ {
+ changes.change.link_duplex = 1;
+ changes.full_duplex = link_info.full_duplex;
+ cd->full_duplex = link_info.full_duplex;
+ }
+
+ if (cd->speed != link_info.speed)
+ {
+ changes.change.link_speed = 1;
+ changes.link_speed = link_info.speed;
+ cd->speed = link_info.speed;
+ }
+
+ if (changes.change.any == 0)
+ return;
+
+ log_debug (dev,
+ "status %u full_duplex %u speed %u port %u lmac_type_id %u "
+ "fec %u aautoneg %u",
+ link_info.status, link_info.full_duplex, link_info.speed,
+ link_info.port, link_info.lmac_type_id, link_info.fec,
+ link_info.autoneg);
+ vnet_dev_port_state_change (vm, port, changes);
+}
+
+vnet_dev_rv_t
+oct_rxq_start (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
+{
+ vnet_dev_t *dev = rxq->port->dev;
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ u32 buffer_indices[rxq->size], n_alloc;
+ u8 bpi = vnet_dev_get_rx_queue_buffer_pool_index (rxq);
+ int rrv;
+
+ n_alloc = vlib_buffer_alloc_from_pool (vm, buffer_indices, rxq->size, bpi);
+
+ for (int i = 0; i < n_alloc; i++)
+ roc_npa_aura_op_free (
+ crq->aura_handle, 0,
+ pointer_to_uword (vlib_get_buffer (vm, buffer_indices[i])) -
+ crq->hdr_off);
+
+ crq->n_enq = n_alloc;
+
+ if (roc_npa_aura_op_available (crq->aura_handle) != rxq->size)
+ log_warn (rxq->port->dev, "rx queue %u aura not filled completelly",
+ rxq->queue_id);
+
+ if ((rrv = roc_nix_rq_ena_dis (&crq->rq, 1)))
+ return oct_roc_err (dev, rrv, "roc_nix_rq_ena_dis() failed");
+
+ return VNET_DEV_OK;
+}
+void
+oct_rxq_stop (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
+{
+ vnet_dev_t *dev = rxq->port->dev;
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ int rrv;
+ u32 n;
+
+ if ((rrv = roc_nix_rq_ena_dis (&crq->rq, 0)))
+ oct_roc_err (dev, rrv, "roc_nix_rq_ena_dis() failed");
+
+ n = oct_aura_free_all_buffers (vm, crq->aura_handle, crq->hdr_off);
+
+ if (crq->n_enq - n > 0)
+ log_err (dev, "%u buffers leaked on rx queue %u stop", crq->n_enq - n,
+ rxq->queue_id);
+ else
+ log_debug (dev, "%u buffers freed from rx queue %u", n, rxq->queue_id);
+
+ crq->n_enq = 0;
+}
+
+void
+oct_txq_stop (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
+{
+ vnet_dev_t *dev = txq->port->dev;
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
+ oct_npa_batch_alloc_cl128_t *cl;
+ u32 n, off = ctq->hdr_off;
+
+ n = oct_aura_free_all_buffers (vm, ctq->aura_handle, off);
+ ctq->n_enq -= n;
+
+ if (ctq->n_enq > 0 && ctq->ba_num_cl > 0)
+ for (n = ctq->ba_num_cl, cl = ctq->ba_buffer + ctq->ba_first_cl; n;
+ cl++, n--)
+ {
+ if (cl->status.ccode != 0)
+ for (u32 i = 0; i < cl->status.count; i++)
+ {
+ vlib_buffer_t *b = (vlib_buffer_t *) (cl->iova[i] + off);
+ vlib_buffer_free_one (vm, vlib_get_buffer_index (vm, b));
+ ctq->n_enq--;
+ }
+ }
+
+ if (ctq->n_enq > 0)
+ log_err (dev, "%u buffers leaked on tx queue %u stop", ctq->n_enq,
+ txq->queue_id);
+ else
+ log_debug (dev, "%u buffers freed from tx queue %u", n, txq->queue_id);
+
+ ctq->n_enq = 0;
+}
+
+vnet_dev_rv_t
+oct_port_start (vlib_main_t *vm, vnet_dev_port_t *port)
+{
+ vnet_dev_t *dev = port->dev;
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ struct roc_nix *nix = cd->nix;
+ struct roc_nix_eeprom_info eeprom_info = {};
+ vnet_dev_rv_t rv;
+ int rrv;
+
+ log_debug (port->dev, "port start: port %u", port->port_id);
+
+ foreach_vnet_dev_port_rx_queue (q, port)
+ if ((rv = oct_rxq_start (vm, q)) != VNET_DEV_OK)
+ goto done;
+
+ foreach_vnet_dev_port_tx_queue (q, port)
+ {
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (q);
+ ctq->n_enq = 0;
+ }
+
+ if ((rrv = roc_nix_mac_mtu_set (nix, 9200)))
+ {
+ rv = oct_roc_err (dev, rrv, "roc_nix_mac_mtu_set() failed");
+ goto done;
+ }
+
+ if ((rrv = roc_nix_npc_rx_ena_dis (nix, true)))
+ {
+ rv = oct_roc_err (dev, rrv, "roc_nix_npc_rx_ena_dis() failed");
+ goto done;
+ }
+
+ vnet_dev_poll_port_add (vm, port, 0.5, oct_port_poll);
+
+ if (roc_nix_eeprom_info_get (nix, &eeprom_info) == 0)
+ {
+ log_debug (dev, "sff_id %u data %U", eeprom_info.sff_id, format_hexdump,
+ eeprom_info.buf, sizeof (eeprom_info.buf));
+ }
+done:
+ if (rv != VNET_DEV_OK)
+ oct_port_stop (vm, port);
+ return VNET_DEV_OK;
+}
+
+void
+oct_port_stop (vlib_main_t *vm, vnet_dev_port_t *port)
+{
+ vnet_dev_t *dev = port->dev;
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ struct roc_nix *nix = cd->nix;
+ int rrv;
+
+ log_debug (port->dev, "port stop: port %u", port->port_id);
+
+ vnet_dev_poll_port_remove (vm, port, oct_port_poll);
+
+ rrv = roc_nix_npc_rx_ena_dis (nix, false);
+ if (rrv)
+ {
+ oct_roc_err (dev, rrv, "roc_nix_npc_rx_ena_dis() failed");
+ return;
+ }
+
+ foreach_vnet_dev_port_rx_queue (q, port)
+ oct_rxq_stop (vm, q);
+
+ foreach_vnet_dev_port_tx_queue (q, port)
+ oct_txq_stop (vm, q);
+}
+
+vnet_dev_rv_t
+oct_port_cfg_change_precheck (vlib_main_t *vm, vnet_dev_port_t *port,
+ vnet_dev_port_cfg_change_req_t *req)
+{
+ vnet_dev_rv_t rv = VNET_DEV_OK;
+
+ switch (req->type)
+ {
+ case VNET_DEV_PORT_CFG_MAX_RX_FRAME_SIZE:
+ if (port->started)
+ rv = VNET_DEV_ERR_PORT_STARTED;
+ break;
+
+ case VNET_DEV_PORT_CFG_PROMISC_MODE:
+ case VNET_DEV_PORT_CFG_CHANGE_PRIMARY_HW_ADDR:
+ case VNET_DEV_PORT_CFG_ADD_SECONDARY_HW_ADDR:
+ case VNET_DEV_PORT_CFG_REMOVE_SECONDARY_HW_ADDR:
+ break;
+
+ default:
+ rv = VNET_DEV_ERR_NOT_SUPPORTED;
+ };
+
+ return rv;
+}
+
+vnet_dev_rv_t
+oct_port_cfg_change (vlib_main_t *vm, vnet_dev_port_t *port,
+ vnet_dev_port_cfg_change_req_t *req)
+{
+ vnet_dev_rv_t rv = VNET_DEV_OK;
+
+ switch (req->type)
+ {
+ case VNET_DEV_PORT_CFG_PROMISC_MODE:
+ {
+ }
+ break;
+
+ case VNET_DEV_PORT_CFG_CHANGE_PRIMARY_HW_ADDR:
+ break;
+
+ case VNET_DEV_PORT_CFG_ADD_SECONDARY_HW_ADDR:
+ break;
+
+ case VNET_DEV_PORT_CFG_REMOVE_SECONDARY_HW_ADDR:
+ break;
+
+ case VNET_DEV_PORT_CFG_MAX_RX_FRAME_SIZE:
+ break;
+
+ default:
+ return VNET_DEV_ERR_NOT_SUPPORTED;
+ };
+
+ return rv;
+}
diff --git a/src/plugins/dev_octeon/queue.c b/src/plugins/dev_octeon/queue.c
new file mode 100644
index 00000000000..9378fc3b7c7
--- /dev/null
+++ b/src/plugins/dev_octeon/queue.c
@@ -0,0 +1,303 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2023 Cisco Systems, Inc.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/dev/dev.h>
+#include <vnet/dev/pci.h>
+#include <vnet/dev/counters.h>
+#include <dev_octeon/octeon.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+
+VLIB_REGISTER_LOG_CLASS (oct_log, static) = {
+ .class_name = "octeon",
+ .subclass_name = "queue",
+};
+
+static vnet_dev_rv_t
+oct_roc_err (vnet_dev_t *dev, int rv, char *fmt, ...)
+{
+ u8 *s = 0;
+ va_list va;
+
+ va_start (va, fmt);
+ s = va_format (s, fmt, &va);
+ va_end (va);
+
+ log_err (dev, "%v - ROC error %s (%d)", s, roc_error_msg_get (rv), rv);
+
+ vec_free (s);
+ return VNET_DEV_ERR_INTERNAL;
+}
+
+vnet_dev_rv_t
+oct_rx_queue_alloc (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
+{
+ vnet_dev_port_t *port = rxq->port;
+ vnet_dev_t *dev = port->dev;
+
+ log_debug (dev, "rx_queue_alloc: queue %u alocated", rxq->queue_id);
+ return VNET_DEV_OK;
+}
+
+void
+oct_rx_queue_free (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
+{
+ vnet_dev_port_t *port = rxq->port;
+ vnet_dev_t *dev = port->dev;
+
+ log_debug (dev, "rx_queue_free: queue %u", rxq->queue_id);
+}
+
+vnet_dev_rv_t
+oct_tx_queue_alloc (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
+{
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
+ vnet_dev_port_t *port = txq->port;
+ vnet_dev_t *dev = port->dev;
+
+ log_debug (dev, "tx_queue_alloc: queue %u alocated", txq->queue_id);
+
+ return vnet_dev_dma_mem_alloc (
+ vm, dev, sizeof (void *) * ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS, 128,
+ (void **) &ctq->ba_buffer);
+}
+
+void
+oct_tx_queue_free (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
+{
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
+ vnet_dev_port_t *port = txq->port;
+ vnet_dev_t *dev = port->dev;
+
+ log_debug (dev, "tx_queue_free: queue %u", txq->queue_id);
+
+ vnet_dev_dma_mem_free (vm, dev, ctq->ba_buffer);
+}
+
+vnet_dev_rv_t
+oct_rxq_init (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
+{
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ vnet_dev_t *dev = rxq->port->dev;
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ vlib_buffer_pool_t *bp =
+ vlib_get_buffer_pool (vm, vnet_dev_get_rx_queue_buffer_pool_index (rxq));
+ struct roc_nix *nix = cd->nix;
+ int rrv;
+
+ struct npa_aura_s aura = {};
+ struct npa_pool_s npapool = { .nat_align = 1 };
+
+ if ((rrv = roc_npa_pool_create (&crq->aura_handle, bp->alloc_size, rxq->size,
+ &aura, &npapool, 0)))
+ {
+ oct_rxq_deinit (vm, rxq);
+ return oct_roc_err (dev, rrv, "roc_npa_pool_create() failed");
+ }
+
+ crq->npa_pool_initialized = 1;
+ log_notice (dev, "NPA pool created, aura_handle = 0x%lx", crq->aura_handle);
+
+ crq->cq = (struct roc_nix_cq){
+ .nb_desc = rxq->size,
+ .qid = rxq->queue_id,
+ };
+
+ if ((rrv = roc_nix_cq_init (nix, &crq->cq)))
+ {
+ oct_rxq_deinit (vm, rxq);
+ return oct_roc_err (dev, rrv,
+ "roc_nix_cq_init(qid = %u, nb_desc = %u) failed",
+ crq->cq.nb_desc, crq->cq.nb_desc);
+ }
+
+ crq->cq_initialized = 1;
+ log_debug (dev, "CQ %u initialised (qmask 0x%x wdata 0x%lx)", crq->cq.qid,
+ crq->cq.qmask, crq->cq.wdata);
+
+ crq->hdr_off = vm->buffer_main->ext_hdr_size;
+
+ crq->rq = (struct roc_nix_rq){
+ .qid = rxq->queue_id,
+ .cqid = crq->cq.qid,
+ .aura_handle = crq->aura_handle,
+ .first_skip = crq->hdr_off + sizeof (vlib_buffer_t),
+ .later_skip = crq->hdr_off + sizeof (vlib_buffer_t),
+ .lpb_size = bp->data_size + crq->hdr_off + sizeof (vlib_buffer_t),
+ .flow_tag_width = 32,
+ };
+
+ if ((rrv = roc_nix_rq_init (nix, &crq->rq, 1 /* disable */)))
+ {
+ oct_rxq_deinit (vm, rxq);
+ return oct_roc_err (dev, rrv, "roc_nix_rq_init(qid = %u) failed",
+ crq->rq.qid);
+ }
+
+ crq->rq_initialized = 1;
+ crq->lmt_base_addr = roc_idev_lmt_base_addr_get ();
+ crq->aura_batch_free_ioaddr =
+ (roc_npa_aura_handle_to_base (crq->aura_handle) +
+ NPA_LF_AURA_BATCH_FREE0) |
+ (0x7 << 4);
+
+ log_debug (dev, "RQ %u initialised", crq->cq.qid);
+
+ return VNET_DEV_OK;
+}
+
+void
+oct_rxq_deinit (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq)
+{
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ vnet_dev_t *dev = rxq->port->dev;
+ int rrv;
+
+ if (crq->rq_initialized)
+ {
+ rrv = roc_nix_rq_fini (&crq->rq);
+ if (rrv)
+ oct_roc_err (dev, rrv, "roc_nix_rq_fini() failed");
+ crq->rq_initialized = 0;
+ }
+
+ if (crq->cq_initialized)
+ {
+ rrv = roc_nix_cq_fini (&crq->cq);
+ if (rrv)
+ oct_roc_err (dev, rrv, "roc_nix_cq_fini() failed");
+ crq->cq_initialized = 0;
+ }
+
+ if (crq->npa_pool_initialized)
+ {
+ rrv = roc_npa_pool_destroy (crq->aura_handle);
+ if (rrv)
+ oct_roc_err (dev, rrv, "roc_npa_pool_destroy() failed");
+ crq->npa_pool_initialized = 0;
+ }
+}
+
+vnet_dev_rv_t
+oct_txq_init (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
+{
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
+ vnet_dev_t *dev = txq->port->dev;
+ oct_device_t *cd = vnet_dev_get_data (dev);
+ struct roc_nix *nix = cd->nix;
+ struct npa_aura_s aura = {};
+ struct npa_pool_s npapool = { .nat_align = 1 };
+ int rrv;
+ vlib_buffer_pool_t *bp = vlib_get_buffer_pool (vm, 0);
+
+ if ((rrv = roc_npa_pool_create (
+ &ctq->aura_handle, bp->alloc_size,
+ txq->size * 6 /* worst case - two SG with 3 segs each = 6 */, &aura,
+ &npapool, 0)))
+ {
+ oct_txq_deinit (vm, txq);
+ return oct_roc_err (dev, rrv, "roc_npa_pool_create() failed");
+ }
+
+ ctq->npa_pool_initialized = 1;
+ log_notice (dev, "NPA pool created, aura_handle = 0x%lx", ctq->aura_handle);
+
+ ctq->sq = (struct roc_nix_sq){
+ .nb_desc = txq->size,
+ .qid = txq->queue_id,
+ .max_sqe_sz = NIX_MAXSQESZ_W16,
+ };
+
+ if ((rrv = roc_nix_sq_init (nix, &ctq->sq)))
+ {
+ oct_txq_deinit (vm, txq);
+ return oct_roc_err (
+ dev, rrv,
+ "roc_nix_sq_init(qid = %u, nb_desc = %u, max_sqe_sz = %u) failed",
+ ctq->sq.nb_desc, ctq->sq.max_sqe_sz);
+ }
+
+ ctq->sq_initialized = 1;
+ log_debug (dev, "SQ initialised, qid %u, nb_desc %u, max_sqe_sz %u",
+ ctq->sq.qid, ctq->sq.nb_desc, ctq->sq.max_sqe_sz);
+
+ ctq->hdr_off = vm->buffer_main->ext_hdr_size;
+
+ if (ctq->sq.lmt_addr == 0)
+ ctq->sq.lmt_addr = (void *) nix->lmt_base;
+ ctq->io_addr = ctq->sq.io_addr & ~0x7fULL;
+ ctq->lmt_addr = ctq->sq.lmt_addr;
+
+ return VNET_DEV_OK;
+}
+
+void
+oct_txq_deinit (vlib_main_t *vm, vnet_dev_tx_queue_t *txq)
+{
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
+ vnet_dev_t *dev = txq->port->dev;
+ int rrv;
+
+ if (ctq->sq_initialized)
+ {
+ rrv = roc_nix_sq_fini (&ctq->sq);
+ if (rrv)
+ oct_roc_err (dev, rrv, "roc_nix_sq_fini() failed");
+ ctq->sq_initialized = 0;
+ }
+
+ if (ctq->npa_pool_initialized)
+ {
+ rrv = roc_npa_pool_destroy (ctq->aura_handle);
+ if (rrv)
+ oct_roc_err (dev, rrv, "roc_npa_pool_destroy() failed");
+ ctq->npa_pool_initialized = 0;
+ }
+}
+
+u8 *
+format_oct_rxq_info (u8 *s, va_list *args)
+{
+ vnet_dev_format_args_t *a = va_arg (*args, vnet_dev_format_args_t *);
+ vnet_dev_rx_queue_t *rxq = va_arg (*args, vnet_dev_rx_queue_t *);
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ u32 indent = format_get_indent (s);
+
+ if (a->debug)
+ {
+ s = format (s, "n_enq %u cq_nb_desc %u", crq->n_enq, crq->cq.nb_desc);
+ s = format (s, "\n%Uaura: id 0x%x count %u limit %u avail %u",
+ format_white_space, indent,
+ roc_npa_aura_handle_to_aura (crq->aura_handle),
+ roc_npa_aura_op_cnt_get (crq->aura_handle),
+ roc_npa_aura_op_limit_get (crq->aura_handle),
+ roc_npa_aura_op_available (crq->aura_handle));
+ }
+ return s;
+}
+
+u8 *
+format_oct_txq_info (u8 *s, va_list *args)
+{
+ vnet_dev_format_args_t *a = va_arg (*args, vnet_dev_format_args_t *);
+ vnet_dev_tx_queue_t *txq = va_arg (*args, vnet_dev_tx_queue_t *);
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
+ u32 indent = format_get_indent (s);
+
+ if (a->debug)
+ {
+ s = format (s, "n_enq %u sq_nb_desc %u io_addr %p lmt_addr %p",
+ ctq->n_enq, ctq->sq.nb_desc, ctq->io_addr, ctq->lmt_addr);
+ s = format (s, "\n%Uaura: id 0x%x count %u limit %u avail %u",
+ format_white_space, indent,
+ roc_npa_aura_handle_to_aura (ctq->aura_handle),
+ roc_npa_aura_op_cnt_get (ctq->aura_handle),
+ roc_npa_aura_op_limit_get (ctq->aura_handle),
+ roc_npa_aura_op_available (ctq->aura_handle));
+ }
+
+ return s;
+}
diff --git a/src/plugins/dev_octeon/roc_helper.c b/src/plugins/dev_octeon/roc_helper.c
new file mode 100644
index 00000000000..f10c2cb578b
--- /dev/null
+++ b/src/plugins/dev_octeon/roc_helper.c
@@ -0,0 +1,181 @@
+/*
+ * Copyright (c) 2023 Marvell.
+ * SPDX-License-Identifier: Apache-2.0
+ * https://spdx.org/licenses/Apache-2.0.html
+ */
+
+#include <vnet/vnet.h>
+#include <vlib/pci/pci.h>
+#include <vlib/linux/vfio.h>
+#include <base/roc_api.h>
+#include <common.h>
+
+static oct_plt_memzone_list_t memzone_list;
+
+static inline void
+oct_plt_log (oct_plt_log_level_t level, oct_plt_log_class_t cls, char *fmt,
+ ...)
+{
+ vlib_log ((vlib_log_level_t) level, cls, fmt);
+}
+
+static inline void
+oct_plt_spinlock_init (oct_plt_spinlock_t *p)
+{
+ clib_spinlock_init ((clib_spinlock_t *) p);
+}
+
+static void
+oct_plt_spinlock_lock (oct_plt_spinlock_t *p)
+{
+ clib_spinlock_lock ((clib_spinlock_t *) p);
+}
+
+static void
+oct_plt_spinlock_unlock (oct_plt_spinlock_t *p)
+{
+ clib_spinlock_unlock ((clib_spinlock_t *) p);
+}
+
+static int
+oct_plt_spinlock_trylock (oct_plt_spinlock_t *p)
+{
+ return clib_spinlock_trylock ((clib_spinlock_t *) p);
+}
+
+static u64
+oct_plt_get_thread_index (void)
+{
+ return __os_thread_index;
+}
+
+static void
+oct_drv_physmem_free (vlib_main_t *vm, void *mem)
+{
+ if (!mem)
+ {
+ clib_warning ("Invalid address %p", mem);
+ return;
+ }
+
+ vlib_physmem_free (vm, mem);
+}
+
+static void *
+oct_drv_physmem_alloc (vlib_main_t *vm, u32 size, u32 align)
+{
+ clib_error_t *error = NULL;
+ uword *mem = NULL;
+
+ if (align)
+ {
+ /* Force cache line alloc in case alignment is less than cache line */
+ align = align < CLIB_CACHE_LINE_BYTES ? CLIB_CACHE_LINE_BYTES : align;
+ mem = vlib_physmem_alloc_aligned_on_numa (vm, size, align, 0);
+ }
+ else
+ mem =
+ vlib_physmem_alloc_aligned_on_numa (vm, size, CLIB_CACHE_LINE_BYTES, 0);
+ if (!mem)
+ return NULL;
+
+ error = vfio_map_physmem_page (vm, mem);
+ if (error)
+ goto report_error;
+
+ clib_memset (mem, 0, size);
+ return mem;
+
+report_error:
+ clib_error_report (error);
+ oct_drv_physmem_free (vm, mem);
+
+ return NULL;
+}
+
+static void
+oct_plt_free (void *addr)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ oct_drv_physmem_free ((void *) vm, addr);
+}
+
+static void *
+oct_plt_zmalloc (u32 size, u32 align)
+{
+ vlib_main_t *vm = vlib_get_main ();
+
+ return oct_drv_physmem_alloc (vm, size, align);
+}
+
+static oct_plt_memzone_t *
+memzone_get (u32 index)
+{
+ if (index == ((u32) ~0))
+ return 0;
+
+ return pool_elt_at_index (memzone_list.mem_pool, index);
+}
+
+static int
+oct_plt_memzone_free (const oct_plt_memzone_t *name)
+{
+ uword *p;
+ p = hash_get_mem (memzone_list.memzone_by_name, name);
+
+ if (p[0] == ((u32) ~0))
+ return -EINVAL;
+
+ hash_unset_mem (memzone_list.memzone_by_name, name);
+
+ pool_put_index (memzone_list.mem_pool, p[0]);
+
+ return 0;
+}
+
+static oct_plt_memzone_t *
+oct_plt_memzone_lookup (const char *name)
+{
+ uword *p;
+ p = hash_get_mem (memzone_list.memzone_by_name, name);
+ if (p)
+ return memzone_get (p[0]);
+
+ return 0;
+}
+
+static oct_plt_memzone_t *
+oct_plt_memzone_reserve_aligned (const char *name, u64 len, u8 socket,
+ u32 flags, u32 align)
+{
+ oct_plt_memzone_t *mem_pool;
+ void *p = NULL;
+
+ pool_get_zero (memzone_list.mem_pool, mem_pool);
+
+ p = oct_plt_zmalloc (len, align);
+ if (!p)
+ return NULL;
+
+ mem_pool->addr = p;
+ mem_pool->index = mem_pool - memzone_list.mem_pool;
+ hash_set_mem (memzone_list.memzone_by_name, name, mem_pool->index);
+
+ return mem_pool;
+}
+
+oct_plt_init_param_t oct_plt_init_param = {
+ .oct_plt_log_reg_class = vlib_log_register_class,
+ .oct_plt_log = oct_plt_log,
+ .oct_plt_free = oct_plt_free,
+ .oct_plt_zmalloc = oct_plt_zmalloc,
+ .oct_plt_memzone_free = oct_plt_memzone_free,
+ .oct_plt_memzone_lookup = oct_plt_memzone_lookup,
+ .oct_plt_memzone_reserve_aligned = oct_plt_memzone_reserve_aligned,
+ .oct_plt_spinlock_init = oct_plt_spinlock_init,
+ .oct_plt_spinlock_lock = oct_plt_spinlock_lock,
+ .oct_plt_spinlock_unlock = oct_plt_spinlock_unlock,
+ .oct_plt_spinlock_trylock = oct_plt_spinlock_trylock,
+ .oct_plt_get_thread_index = oct_plt_get_thread_index,
+};
diff --git a/src/plugins/dev_octeon/rx_node.c b/src/plugins/dev_octeon/rx_node.c
new file mode 100644
index 00000000000..c1c4771795e
--- /dev/null
+++ b/src/plugins/dev_octeon/rx_node.c
@@ -0,0 +1,385 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2023 Cisco Systems, Inc.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/dev/dev.h>
+#include <vnet/ethernet/ethernet.h>
+#include <dev_octeon/octeon.h>
+#include <dev_octeon/hw_defs.h>
+
+typedef struct
+{
+ u32 next_index;
+ u32 sw_if_index;
+ u32 hw_if_index;
+ u32 trace_count;
+ u32 n_traced;
+ oct_nix_rx_cqe_desc_t *next_desc;
+ u64 parse_w0_or;
+ u32 n_left_to_next;
+ u32 *to_next;
+ u32 n_rx_pkts;
+ u32 n_rx_bytes;
+ u32 n_segs;
+} oct_rx_node_ctx_t;
+
+static_always_inline vlib_buffer_t *
+oct_seg_to_bp (void *p)
+{
+ return (vlib_buffer_t *) p - 1;
+}
+
+static_always_inline void
+oct_rx_attach_tail (vlib_main_t *vm, oct_rx_node_ctx_t *ctx, vlib_buffer_t *h,
+ oct_nix_rx_cqe_desc_t *d)
+{
+ u32 tail_sz = 0, n_tail_segs = 0;
+ vlib_buffer_t *p, *b;
+ u8 segs0 = d->sg0.segs, segs1 = 0;
+
+ if (segs0 < 2)
+ return;
+
+ b = oct_seg_to_bp (d->segs0[1]);
+ h->next_buffer = vlib_get_buffer_index (vm, b);
+ tail_sz += b->current_length = d->sg0.seg2_size;
+ n_tail_segs++;
+
+ if (segs0 == 2)
+ goto done;
+
+ p = b;
+ p->flags = VLIB_BUFFER_NEXT_PRESENT;
+ b = oct_seg_to_bp (d->segs0[2]);
+ p->next_buffer = vlib_get_buffer_index (vm, b);
+ tail_sz += b->current_length = d->sg0.seg3_size;
+ n_tail_segs++;
+
+ if (d->sg1.subdc != NIX_SUBDC_SG)
+ goto done;
+
+ segs1 = d->sg1.segs;
+ if (segs1 == 0)
+ goto done;
+
+ p = b;
+ p->flags = VLIB_BUFFER_NEXT_PRESENT;
+ b = oct_seg_to_bp (d->segs1[0]);
+ p->next_buffer = vlib_get_buffer_index (vm, b);
+ tail_sz += b->current_length = d->sg1.seg1_size;
+ n_tail_segs++;
+
+ if (segs1 == 1)
+ goto done;
+
+ p = b;
+ p->flags = VLIB_BUFFER_NEXT_PRESENT;
+ b = oct_seg_to_bp (d->segs1[1]);
+ p->next_buffer = vlib_get_buffer_index (vm, b);
+ tail_sz += b->current_length = d->sg1.seg2_size;
+ n_tail_segs++;
+
+ if (segs1 == 2)
+ goto done;
+
+ p = b;
+ p->flags = VLIB_BUFFER_NEXT_PRESENT;
+ b = oct_seg_to_bp (d->segs1[2]);
+ p->next_buffer = vlib_get_buffer_index (vm, b);
+ tail_sz += b->current_length = d->sg1.seg3_size;
+ n_tail_segs++;
+
+done:
+ b->flags = 0;
+ h->total_length_not_including_first_buffer = tail_sz;
+ h->flags |= VLIB_BUFFER_NEXT_PRESENT | VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ ctx->n_rx_bytes += tail_sz;
+ ctx->n_segs += n_tail_segs;
+}
+
+static_always_inline u32
+oct_rx_batch (vlib_main_t *vm, oct_rx_node_ctx_t *ctx,
+ vnet_dev_rx_queue_t *rxq, u32 n)
+{
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ vlib_buffer_template_t bt = rxq->buffer_template;
+ u32 n_left;
+ oct_nix_rx_cqe_desc_t *d = ctx->next_desc;
+ vlib_buffer_t *b[4];
+
+ for (n_left = n; n_left >= 8; d += 4, n_left -= 4, ctx->to_next += 4)
+ {
+ u32 segs = 0;
+ clib_prefetch_store (oct_seg_to_bp (d[4].segs0[0]));
+ clib_prefetch_store (oct_seg_to_bp (d[5].segs0[0]));
+ b[0] = oct_seg_to_bp (d[0].segs0[0]);
+ clib_prefetch_store (oct_seg_to_bp (d[6].segs0[0]));
+ b[1] = oct_seg_to_bp (d[1].segs0[0]);
+ clib_prefetch_store (oct_seg_to_bp (d[7].segs0[0]));
+ b[2] = oct_seg_to_bp (d[2].segs0[0]);
+ b[3] = oct_seg_to_bp (d[3].segs0[0]);
+ ctx->to_next[0] = vlib_get_buffer_index (vm, b[0]);
+ ctx->to_next[1] = vlib_get_buffer_index (vm, b[1]);
+ ctx->to_next[2] = vlib_get_buffer_index (vm, b[2]);
+ ctx->to_next[3] = vlib_get_buffer_index (vm, b[3]);
+ b[0]->template = bt;
+ b[1]->template = bt;
+ b[2]->template = bt;
+ b[3]->template = bt;
+ ctx->n_rx_bytes += b[0]->current_length = d[0].sg0.seg1_size;
+ ctx->n_rx_bytes += b[1]->current_length = d[1].sg0.seg1_size;
+ ctx->n_rx_bytes += b[2]->current_length = d[2].sg0.seg1_size;
+ ctx->n_rx_bytes += b[3]->current_length = d[3].sg0.seg1_size;
+ ctx->n_segs += 4;
+ segs = d[0].sg0.segs + d[1].sg0.segs + d[2].sg0.segs + d[3].sg0.segs;
+
+ if (PREDICT_FALSE (segs > 4))
+ {
+ oct_rx_attach_tail (vm, ctx, b[0], d + 0);
+ oct_rx_attach_tail (vm, ctx, b[1], d + 1);
+ oct_rx_attach_tail (vm, ctx, b[2], d + 2);
+ oct_rx_attach_tail (vm, ctx, b[3], d + 3);
+ }
+ }
+
+ for (; n_left; d += 1, n_left -= 1, ctx->to_next += 1)
+ {
+ b[0] = (vlib_buffer_t *) d->segs0[0] - 1;
+ ctx->to_next[0] = vlib_get_buffer_index (vm, b[0]);
+ b[0]->template = bt;
+ ctx->n_rx_bytes += b[0]->current_length = d[0].sg0.seg1_size;
+ ctx->n_segs += 1;
+ if (d[0].sg0.segs > 1)
+ oct_rx_attach_tail (vm, ctx, b[0], d + 0);
+ }
+
+ plt_write64 ((crq->cq.wdata | n), crq->cq.door);
+ ctx->n_rx_pkts += n;
+ ctx->n_left_to_next -= n;
+ return n;
+}
+
+static_always_inline void
+oct_rxq_refill_batch (vlib_main_t *vm, u64 lmt_id, u64 addr,
+ oct_npa_lf_aura_batch_free_line_t *lines, u32 *bi,
+ oct_npa_lf_aura_batch_free0_t w0, u64 n_lines)
+{
+ u64 data;
+
+ for (u32 i = 0; i < n_lines; i++, bi += 15)
+ {
+ lines[i].w0 = w0;
+ vlib_get_buffers (vm, bi, (vlib_buffer_t **) lines[i].data, 15);
+ }
+
+ data = lmt_id | ((n_lines - 1) << 12) | ((1ULL << (n_lines * 3)) - 1) << 19;
+ roc_lmt_submit_steorl (data, addr);
+
+ /* Data Store Memory Barrier - outer shareable domain */
+ asm volatile("dmb oshst" ::: "memory");
+}
+
+static_always_inline u32
+oct_rxq_refill (vlib_main_t *vm, vnet_dev_rx_queue_t *rxq, u16 n_refill)
+{
+ const u32 batch_max_lines = 16;
+ const u32 bufs_per_line = 15;
+ const u32 batch_max_bufs = 15 * 16;
+
+ u32 batch_bufs, n_lines, n_alloc;
+ u32 buffer_indices[batch_max_bufs];
+ u64 lmt_addr, lmt_id, addr, n_enq = 0;
+ u8 bpi = vnet_dev_get_rx_queue_buffer_pool_index (rxq);
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ oct_npa_lf_aura_batch_free_line_t *lines;
+
+ if (n_refill < bufs_per_line)
+ return 0;
+
+ n_lines = n_refill / bufs_per_line;
+
+ addr = crq->aura_batch_free_ioaddr;
+ lmt_addr = crq->lmt_base_addr;
+ lmt_id = vm->thread_index << ROC_LMT_LINES_PER_CORE_LOG2;
+ lmt_addr += lmt_id << ROC_LMT_LINE_SIZE_LOG2;
+ lines = (oct_npa_lf_aura_batch_free_line_t *) lmt_addr;
+
+ oct_npa_lf_aura_batch_free0_t w0 = {
+ .aura = roc_npa_aura_handle_to_aura (crq->aura_handle),
+ .count_eot = 1,
+ };
+
+ while (n_lines >= batch_max_lines)
+ {
+ n_alloc = vlib_buffer_alloc (vm, buffer_indices, batch_max_bufs);
+ if (PREDICT_FALSE (n_alloc < batch_max_bufs))
+ goto alloc_fail;
+ oct_rxq_refill_batch (vm, lmt_id, addr, lines, buffer_indices, w0,
+ batch_max_lines);
+ n_lines -= batch_max_lines;
+ n_enq += batch_max_bufs;
+ }
+
+ if (n_lines == 0)
+ return n_enq;
+
+ batch_bufs = n_lines * bufs_per_line;
+ n_alloc = vlib_buffer_alloc_from_pool (vm, buffer_indices, batch_bufs, bpi);
+
+ if (PREDICT_FALSE (n_alloc < batch_bufs))
+ {
+ alloc_fail:
+ if (n_alloc >= bufs_per_line)
+ {
+ u32 n_unalloc;
+ n_lines = n_alloc / bufs_per_line;
+ batch_bufs = n_lines * bufs_per_line;
+ n_unalloc = n_alloc - batch_bufs;
+
+ if (n_unalloc)
+ vlib_buffer_unalloc_to_pool (vm, buffer_indices + batch_bufs,
+ n_unalloc, bpi);
+ }
+ else
+ {
+ if (n_alloc)
+ vlib_buffer_unalloc_to_pool (vm, buffer_indices, n_alloc, bpi);
+ return n_enq;
+ }
+ }
+
+ oct_rxq_refill_batch (vm, lmt_id, addr, lines, buffer_indices, w0, n_lines);
+ n_enq += batch_bufs;
+
+ return n_enq;
+}
+
+static_always_inline void
+oct_rx_trace (vlib_main_t *vm, vlib_node_runtime_t *node,
+ oct_rx_node_ctx_t *ctx, oct_nix_rx_cqe_desc_t *d, u32 n_desc)
+{
+ u32 i = 0;
+ if (PREDICT_TRUE (ctx->trace_count == 0))
+ return;
+
+ while (ctx->n_traced < ctx->trace_count && i < n_desc)
+ {
+ vlib_buffer_t *b = (vlib_buffer_t *) d[i].segs0[0] - 1;
+
+ if (PREDICT_TRUE (vlib_trace_buffer (vm, node, ctx->next_index, b,
+ /* follow_chain */ 0)))
+ {
+ oct_rx_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
+ tr->next_index = ctx->next_index;
+ tr->sw_if_index = ctx->sw_if_index;
+ tr->desc = d[i];
+ ctx->n_traced++;
+ }
+ i++;
+ }
+}
+
+static_always_inline uword
+oct_rx_node_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, vnet_dev_port_t *port,
+ vnet_dev_rx_queue_t *rxq, int with_flows)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 thr_idx = vlib_get_thread_index ();
+ oct_rxq_t *crq = vnet_dev_get_rx_queue_data (rxq);
+ u32 n_desc, head, n, n_enq;
+ u32 cq_size = crq->cq.nb_desc;
+ u32 cq_mask = crq->cq.qmask;
+ oct_nix_rx_cqe_desc_t *descs = crq->cq.desc_base;
+ oct_nix_lf_cq_op_status_t status;
+ oct_rx_node_ctx_t _ctx = {
+ .next_index = rxq->next_index,
+ .sw_if_index = port->intf.sw_if_index,
+ .hw_if_index = port->intf.hw_if_index,
+ }, *ctx = &_ctx;
+
+ /* get head and tail from NIX_LF_CQ_OP_STATUS */
+ status.as_u64 = roc_atomic64_add_sync (crq->cq.wdata, crq->cq.status);
+ if (status.cq_err || status.op_err)
+ return 0;
+
+ head = status.head;
+ n_desc = (status.tail - head) & cq_mask;
+
+ if (n_desc == 0)
+ return 0;
+
+ vlib_get_new_next_frame (vm, node, ctx->next_index, ctx->to_next,
+ ctx->n_left_to_next);
+
+ ctx->trace_count = vlib_get_trace_count (vm, node);
+
+ while (1)
+ {
+ ctx->next_desc = descs + head;
+ n = clib_min (cq_size - head, clib_min (n_desc, ctx->n_left_to_next));
+ n = oct_rx_batch (vm, ctx, rxq, n);
+ oct_rx_trace (vm, node, ctx, descs + head, n);
+
+ if (ctx->n_left_to_next == 0)
+ break;
+
+ status.as_u64 = roc_atomic64_add_sync (crq->cq.wdata, crq->cq.status);
+ if (status.cq_err || status.op_err)
+ break;
+
+ head = status.head;
+ n_desc = (status.tail - head) & cq_mask;
+ if (n_desc == 0)
+ break;
+ }
+
+ if (ctx->n_traced)
+ vlib_set_trace_count (vm, node, ctx->trace_count - ctx->n_traced);
+
+ if (PREDICT_TRUE (ctx->next_index == VNET_DEV_ETH_RX_PORT_NEXT_ETH_INPUT))
+ {
+ vlib_next_frame_t *nf;
+ vlib_frame_t *f;
+ ethernet_input_frame_t *ef;
+ oct_nix_rx_parse_t p = { .w[0] = ctx->parse_w0_or };
+ nf = vlib_node_runtime_get_next_frame (vm, node, ctx->next_index);
+ f = vlib_get_frame (vm, nf->frame);
+ f->flags = ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX;
+
+ ef = vlib_frame_scalar_args (f);
+ ef->sw_if_index = ctx->sw_if_index;
+ ef->hw_if_index = ctx->hw_if_index;
+
+ if (p.f.errcode == 0 && p.f.errlev == 0)
+ f->flags |= ETH_INPUT_FRAME_F_IP4_CKSUM_OK;
+
+ vlib_frame_no_append (f);
+ }
+
+ vlib_put_next_frame (vm, node, ctx->next_index, ctx->n_left_to_next);
+
+ n_enq = crq->n_enq - ctx->n_segs;
+ n_enq += oct_rxq_refill (vm, rxq, rxq->size - n_enq);
+ crq->n_enq = n_enq;
+
+ vlib_increment_combined_counter (
+ vnm->interface_main.combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX,
+ thr_idx, ctx->hw_if_index, ctx->n_rx_pkts, ctx->n_rx_bytes);
+
+ return ctx->n_rx_pkts;
+}
+
+VNET_DEV_NODE_FN (oct_rx_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ u32 n_rx = 0;
+ foreach_vnet_dev_rx_queue_runtime (rxq, node)
+ {
+ vnet_dev_port_t *port = rxq->port;
+ n_rx += oct_rx_node_inline (vm, node, frame, port, rxq, 0);
+ }
+
+ return n_rx;
+}
diff --git a/src/plugins/dev_octeon/tx_node.c b/src/plugins/dev_octeon/tx_node.c
new file mode 100644
index 00000000000..5deaa82a0c1
--- /dev/null
+++ b/src/plugins/dev_octeon/tx_node.c
@@ -0,0 +1,427 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2023 Cisco Systems, Inc.
+ */
+
+#include <vlib/vlib.h>
+#include <vppinfra/ring.h>
+#include <vppinfra/vector/ip_csum.h>
+
+#include <vnet/dev/dev.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/ip/ip4_packet.h>
+#include <vnet/ip/ip6_packet.h>
+#include <vnet/udp/udp_packet.h>
+#include <vnet/tcp/tcp_packet.h>
+
+#include <dev_octeon/octeon.h>
+
+typedef struct
+{
+ union nix_send_hdr_w0_u hdr_w0_teplate;
+ vlib_node_runtime_t *node;
+ u32 n_tx_bytes;
+ u32 n_drop;
+ vlib_buffer_t *drop[VLIB_FRAME_SIZE];
+ u32 batch_alloc_not_ready;
+ u32 batch_alloc_issue_fail;
+ u16 lmt_id;
+ u64 lmt_ioaddr;
+ lmt_line_t *lmt_lines;
+} oct_tx_ctx_t;
+
+static_always_inline u32
+oct_batch_free (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq)
+{
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
+ u8 num_cl;
+ u64 ah;
+ u32 n_freed = 0, n;
+ oct_npa_batch_alloc_cl128_t *cl;
+
+ num_cl = ctq->ba_num_cl;
+ if (num_cl)
+ {
+ u16 off = ctq->hdr_off;
+ u32 *bi = (u32 *) ctq->ba_buffer;
+
+ for (cl = ctq->ba_buffer + ctq->ba_first_cl; num_cl > 0; num_cl--, cl++)
+ {
+ u8 count;
+ if (cl->status.ccode == ALLOC_CCODE_INVAL)
+ {
+ ctx->batch_alloc_not_ready++;
+ n_freed = bi - (u32 *) ctq->ba_buffer;
+ if (n_freed > 0)
+ {
+ vlib_buffer_free_no_next (vm, (u32 *) ctq->ba_buffer,
+ n_freed);
+ ctq->ba_num_cl = num_cl;
+ ctq->ba_first_cl = cl - ctq->ba_buffer;
+ return n_freed;
+ }
+
+ return 0;
+ }
+
+ count = cl->status.count;
+#if (CLIB_DEBUG > 0)
+ cl->status.count = cl->status.ccode = 0;
+#endif
+ if (PREDICT_TRUE (count == 16))
+ {
+ /* optimize for likely case where cacheline is full */
+ vlib_get_buffer_indices_with_offset (vm, (void **) cl, bi, 16,
+ off);
+ bi += 16;
+ }
+ else
+ {
+ vlib_get_buffer_indices_with_offset (vm, (void **) cl, bi, count,
+ off);
+ bi += count;
+ }
+ }
+
+ n_freed = bi - (u32 *) ctq->ba_buffer;
+ if (n_freed > 0)
+ vlib_buffer_free_no_next (vm, (u32 *) ctq->ba_buffer, n_freed);
+
+ /* clear status bits in each cacheline */
+ n = cl - ctq->ba_buffer;
+ for (u32 i = 0; i < n; i++)
+ ctq->ba_buffer[i].iova[0] = 0;
+
+ ctq->ba_num_cl = ctq->ba_first_cl = 0;
+ }
+
+ ah = ctq->aura_handle;
+
+ if ((n = roc_npa_aura_op_available (ah)) >= 32)
+ {
+ u64 addr, res;
+
+ n = clib_min (n, ROC_CN10K_NPA_BATCH_ALLOC_MAX_PTRS);
+
+ oct_npa_batch_alloc_compare_t cmp = {
+ .compare_s = { .aura = roc_npa_aura_handle_to_aura (ah),
+ .stype = ALLOC_STYPE_STF,
+ .count = n }
+ };
+
+ addr = roc_npa_aura_handle_to_base (ah) + NPA_LF_AURA_BATCH_ALLOC;
+ res = roc_atomic64_casl (cmp.as_u64, (uint64_t) ctq->ba_buffer,
+ (i64 *) addr);
+ if (res == ALLOC_RESULT_ACCEPTED || res == ALLOC_RESULT_NOCORE)
+ {
+ ctq->ba_num_cl = (n + 15) / 16;
+ ctq->ba_first_cl = 0;
+ }
+ else
+ ctx->batch_alloc_issue_fail++;
+ }
+
+ return n_freed;
+}
+
+static_always_inline u8
+oct_tx_enq1 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vlib_buffer_t *b,
+ lmt_line_t *line, u32 flags, int simple, int trace)
+{
+ u8 n_dwords = 2;
+ u32 total_len = 0;
+ oct_tx_desc_t d = {
+ .hdr_w0 = ctx->hdr_w0_teplate,
+ .sg[0] = {
+ .segs = 1,
+ .subdc = NIX_SUBDC_SG,
+ },
+ .sg[4] = {
+ .subdc = NIX_SUBDC_SG,
+ },
+ };
+
+ if (!simple && flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ u8 n_tail_segs = 0;
+ vlib_buffer_t *tail_segs[5], *t = b;
+
+ while (t->flags & VLIB_BUFFER_NEXT_PRESENT)
+ {
+ t = vlib_get_buffer (vm, t->next_buffer);
+ tail_segs[n_tail_segs++] = t;
+ if (n_tail_segs > 5)
+ {
+ ctx->drop[ctx->n_drop++] = t;
+ return 0;
+ }
+ }
+
+ switch (n_tail_segs)
+ {
+ case 5:
+ d.sg[7].u = (u64) vlib_buffer_get_current (tail_segs[4]);
+ total_len += d.sg[4].seg3_size = tail_segs[4]->current_length;
+ d.sg[4].segs++;
+ case 4:
+ d.sg[6].u = (u64) vlib_buffer_get_current (tail_segs[3]);
+ total_len += d.sg[4].seg2_size = tail_segs[3]->current_length;
+ d.sg[4].segs++;
+ n_dwords++;
+ case 3:
+ d.sg[5].u = (u64) vlib_buffer_get_current (tail_segs[2]);
+ total_len += d.sg[4].seg1_size = tail_segs[2]->current_length;
+ d.sg[4].segs++;
+ n_dwords++;
+ case 2:
+ d.sg[3].u = (u64) vlib_buffer_get_current (tail_segs[1]);
+ total_len += d.sg[0].seg3_size = tail_segs[1]->current_length;
+ d.sg[0].segs++;
+ case 1:
+ d.sg[2].u = (u64) vlib_buffer_get_current (tail_segs[0]);
+ total_len += d.sg[0].seg2_size = tail_segs[0]->current_length;
+ d.sg[0].segs++;
+ n_dwords++;
+ default:
+ break;
+ };
+ d.hdr_w0.sizem1 = n_dwords - 1;
+ }
+
+ if (!simple && flags & VNET_BUFFER_F_OFFLOAD)
+ {
+ vnet_buffer_oflags_t oflags = vnet_buffer (b)->oflags;
+ if (oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM)
+ {
+ d.hdr_w1.ol3type = NIX_SENDL3TYPE_IP4_CKSUM;
+ d.hdr_w1.ol3ptr = vnet_buffer (b)->l3_hdr_offset;
+ d.hdr_w1.ol4ptr =
+ vnet_buffer (b)->l3_hdr_offset + sizeof (ip4_header_t);
+ }
+ if (oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM)
+ {
+ d.hdr_w1.ol4type = NIX_SENDL4TYPE_UDP_CKSUM;
+ d.hdr_w1.ol4ptr = vnet_buffer (b)->l4_hdr_offset;
+ }
+ else if (oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM)
+ {
+ d.hdr_w1.ol4type = NIX_SENDL4TYPE_TCP_CKSUM;
+ d.hdr_w1.ol4ptr = vnet_buffer (b)->l4_hdr_offset;
+ }
+ }
+
+ total_len += d.sg[0].seg1_size = b->current_length;
+ d.hdr_w0.total = total_len;
+ d.sg[1].u = (u64) vlib_buffer_get_current (b);
+
+ if (trace && flags & VLIB_BUFFER_IS_TRACED)
+ {
+ oct_tx_trace_t *t = vlib_add_trace (vm, ctx->node, b, sizeof (*t));
+ t->desc = d;
+ t->sw_if_index = vnet_buffer (b)->sw_if_index[VLIB_TX];
+ }
+
+ for (u32 i = 0; i < n_dwords; i++)
+ line->dwords[i] = d.as_u128[i];
+
+ return n_dwords;
+}
+
+static_always_inline u32
+oct_tx_enq16 (vlib_main_t *vm, oct_tx_ctx_t *ctx, vnet_dev_tx_queue_t *txq,
+ vlib_buffer_t **b, u32 n_pkts, int trace)
+{
+ u8 dwords_per_line[16], *dpl = dwords_per_line;
+ u64 lmt_arg, ioaddr, n_lines;
+ u32 n_left, or_flags_16 = 0;
+ const u32 not_simple_flags =
+ VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD;
+ lmt_line_t *l = ctx->lmt_lines;
+
+ /* Data Store Memory Barrier - outer shareable domain */
+ asm volatile("dmb oshst" ::: "memory");
+
+ for (n_left = n_pkts; n_left >= 8; n_left -= 8, b += 8, l += 8)
+ {
+ u32 f0, f1, f2, f3, f4, f5, f6, f7, or_f = 0;
+ vlib_prefetch_buffer_header (b[8], LOAD);
+ or_f |= f0 = b[0]->flags;
+ or_f |= f1 = b[1]->flags;
+ vlib_prefetch_buffer_header (b[9], LOAD);
+ or_f |= f2 = b[2]->flags;
+ or_f |= f3 = b[3]->flags;
+ vlib_prefetch_buffer_header (b[10], LOAD);
+ or_f |= f4 = b[4]->flags;
+ or_f |= f5 = b[5]->flags;
+ vlib_prefetch_buffer_header (b[11], LOAD);
+ or_f |= f6 = b[6]->flags;
+ or_f |= f7 = b[7]->flags;
+ vlib_prefetch_buffer_header (b[12], LOAD);
+ or_flags_16 |= or_f;
+
+ if ((or_f & not_simple_flags) == 0)
+ {
+ int simple = 1;
+ oct_tx_enq1 (vm, ctx, b[0], l, f0, simple, trace);
+ oct_tx_enq1 (vm, ctx, b[1], l + 1, f1, simple, trace);
+ vlib_prefetch_buffer_header (b[13], LOAD);
+ oct_tx_enq1 (vm, ctx, b[2], l + 2, f2, simple, trace);
+ oct_tx_enq1 (vm, ctx, b[3], l + 3, f3, simple, trace);
+ vlib_prefetch_buffer_header (b[14], LOAD);
+ oct_tx_enq1 (vm, ctx, b[4], l + 4, f4, simple, trace);
+ oct_tx_enq1 (vm, ctx, b[5], l + 5, f5, simple, trace);
+ vlib_prefetch_buffer_header (b[15], LOAD);
+ oct_tx_enq1 (vm, ctx, b[6], l + 6, f6, simple, trace);
+ oct_tx_enq1 (vm, ctx, b[7], l + 7, f7, simple, trace);
+ dpl[0] = dpl[1] = dpl[2] = dpl[3] = 2;
+ dpl[4] = dpl[5] = dpl[6] = dpl[7] = 2;
+ }
+ else
+ {
+ int simple = 0;
+ dpl[0] = oct_tx_enq1 (vm, ctx, b[0], l, f0, simple, trace);
+ dpl[1] = oct_tx_enq1 (vm, ctx, b[1], l + 1, f1, simple, trace);
+ vlib_prefetch_buffer_header (b[13], LOAD);
+ dpl[2] = oct_tx_enq1 (vm, ctx, b[2], l + 2, f2, simple, trace);
+ dpl[3] = oct_tx_enq1 (vm, ctx, b[3], l + 3, f3, simple, trace);
+ vlib_prefetch_buffer_header (b[14], LOAD);
+ dpl[4] = oct_tx_enq1 (vm, ctx, b[4], l + 4, f4, simple, trace);
+ dpl[5] = oct_tx_enq1 (vm, ctx, b[5], l + 5, f5, simple, trace);
+ vlib_prefetch_buffer_header (b[15], LOAD);
+ dpl[6] = oct_tx_enq1 (vm, ctx, b[6], l + 6, f6, simple, trace);
+ dpl[7] = oct_tx_enq1 (vm, ctx, b[7], l + 7, f7, simple, trace);
+ }
+ dpl += 8;
+ }
+
+ for (; n_left > 0; n_left -= 1, b += 1, l += 1)
+ {
+ u32 f0 = b[0]->flags;
+ dpl++[0] = oct_tx_enq1 (vm, ctx, b[0], l, f0, 0, trace);
+ or_flags_16 |= f0;
+ }
+
+ lmt_arg = ctx->lmt_id;
+ ioaddr = ctx->lmt_ioaddr;
+ n_lines = n_pkts;
+
+ if (PREDICT_FALSE (or_flags_16 & VLIB_BUFFER_NEXT_PRESENT))
+ {
+ dpl = dwords_per_line;
+ ioaddr |= (dpl[0] - 1) << 4;
+
+ if (n_lines > 1)
+ {
+ lmt_arg |= (--n_lines) << 12;
+
+ for (u8 bit_off = 19; n_lines; n_lines--, bit_off += 3, dpl++)
+ lmt_arg |= ((u64) dpl[1] - 1) << bit_off;
+ }
+ }
+ else
+ {
+ const u64 n_dwords = 2;
+ ioaddr |= (n_dwords - 1) << 4;
+
+ if (n_lines > 1)
+ {
+ lmt_arg |= (--n_lines) << 12;
+
+ for (u8 bit_off = 19; n_lines; n_lines--, bit_off += 3)
+ lmt_arg |= (n_dwords - 1) << bit_off;
+ }
+ }
+
+ roc_lmt_submit_steorl (lmt_arg, ioaddr);
+
+ return n_pkts;
+}
+
+VNET_DEV_NODE_FN (oct_tx_node)
+(vlib_main_t *vm, vlib_node_runtime_t *node, vlib_frame_t *frame)
+{
+ vnet_dev_tx_node_runtime_t *rt = vnet_dev_get_tx_node_runtime (node);
+ vnet_dev_tx_queue_t *txq = rt->tx_queue;
+ oct_txq_t *ctq = vnet_dev_get_tx_queue_data (txq);
+ u32 node_index = node->node_index;
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n, n_enq, n_left, n_pkts = frame->n_vectors;
+ vlib_buffer_t *buffers[VLIB_FRAME_SIZE + 8], **b = buffers;
+ u64 lmt_id = vm->thread_index << ROC_LMT_LINES_PER_CORE_LOG2;
+
+ oct_tx_ctx_t ctx = {
+ .node = node,
+ .hdr_w0_teplate = {
+ .aura = roc_npa_aura_handle_to_aura (ctq->aura_handle),
+ .sq = ctq->sq.qid,
+ .sizem1 = 1,
+ },
+ .lmt_id = lmt_id,
+ .lmt_ioaddr = ctq->io_addr,
+ .lmt_lines = ctq->lmt_addr + (lmt_id << ROC_LMT_LINE_SIZE_LOG2),
+ };
+
+ vlib_get_buffers (vm, vlib_frame_vector_args (frame), b, n_pkts);
+ for (int i = 0; i < 8; i++)
+ b[n_pkts + i] = b[n_pkts - 1];
+
+ vnet_dev_tx_queue_lock_if_needed (txq);
+
+ n_enq = ctq->n_enq;
+ n_enq -= oct_batch_free (vm, &ctx, txq);
+
+ if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
+ {
+ for (n_left = clib_min (n_pkts, txq->size - n_enq), n = 0; n_left >= 16;
+ n_left -= 16, b += 16)
+ n += oct_tx_enq16 (vm, &ctx, txq, b, 16, /* trace */ 1);
+
+ if (n_left)
+ n += oct_tx_enq16 (vm, &ctx, txq, b, n_left, /* trace */ 1);
+ }
+ else
+ {
+ for (n_left = clib_min (n_pkts, txq->size - n_enq), n = 0; n_left >= 16;
+ n_left -= 16, b += 16)
+ n += oct_tx_enq16 (vm, &ctx, txq, b, 16, /* trace */ 0);
+
+ if (n_left)
+ n += oct_tx_enq16 (vm, &ctx, txq, b, n_left, /* trace */ 0);
+ }
+
+ ctq->n_enq = n_enq + n;
+
+ if (n < n_pkts)
+ {
+ n = n_pkts - n;
+ vlib_buffer_free (vm, from + n, n);
+ vlib_error_count (vm, node->node_index, OCT_TX_NODE_CTR_NO_FREE_SLOTS,
+ n);
+ n_pkts -= ctx.n_drop;
+ }
+
+ if (ctx.n_drop)
+ vlib_error_count (vm, node->node_index, OCT_TX_NODE_CTR_CHAIN_TOO_LONG,
+ ctx.n_drop);
+
+ if (ctx.batch_alloc_not_ready)
+ vlib_error_count (vm, node_index,
+ OCT_TX_NODE_CTR_AURA_BATCH_ALLOC_NOT_READY,
+ ctx.batch_alloc_not_ready);
+
+ if (ctx.batch_alloc_issue_fail)
+ vlib_error_count (vm, node_index,
+ OCT_TX_NODE_CTR_AURA_BATCH_ALLOC_ISSUE_FAIL,
+ ctx.batch_alloc_issue_fail);
+
+ vnet_dev_tx_queue_unlock_if_needed (txq);
+
+ if (ctx.n_drop)
+ {
+ u32 bi[VLIB_FRAME_SIZE];
+ vlib_get_buffer_indices (vm, ctx.drop, bi, ctx.n_drop);
+ vlib_buffer_free (vm, bi, ctx.n_drop);
+ n_pkts -= ctx.n_drop;
+ }
+
+ return n_pkts;
+}