aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins')
-rw-r--r--src/plugins/Makefile.am4
-rw-r--r--src/plugins/avf.am57
-rw-r--r--src/plugins/avf/README.md94
-rw-r--r--src/plugins/avf/avf.h265
-rw-r--r--src/plugins/avf/cli.c200
-rw-r--r--src/plugins/avf/device.c1243
-rw-r--r--src/plugins/avf/format.c145
-rw-r--r--src/plugins/avf/input.c439
-rw-r--r--src/plugins/avf/output.c183
-rw-r--r--src/plugins/avf/plugin.c35
-rw-r--r--src/plugins/avf/virtchnl.h342
11 files changed, 3007 insertions, 0 deletions
diff --git a/src/plugins/Makefile.am b/src/plugins/Makefile.am
index 03a39dfcd7a..d87d74f8402 100644
--- a/src/plugins/Makefile.am
+++ b/src/plugins/Makefile.am
@@ -34,6 +34,10 @@ if ENABLE_ACL_PLUGIN
include acl.am
endif
+if ENABLE_AVF_PLUGIN
+include avf.am
+endif
+
if ENABLE_CDP_PLUGIN
include cdp.am
endif
diff --git a/src/plugins/avf.am b/src/plugins/avf.am
new file mode 100644
index 00000000000..76231a16bcb
--- /dev/null
+++ b/src/plugins/avf.am
@@ -0,0 +1,57 @@
+# Copyright (c) 2018 Cisco Systems, Inc.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+vppplugins_LTLIBRARIES += avf_plugin.la
+
+avf_plugin_la_LIBADD =
+avf_plugin_la_SOURCES = \
+ avf/cli.c \
+ avf/device.c \
+ avf/format.c \
+ avf/input.c \
+ avf/output.c \
+ avf/plugin.c
+
+noinst_HEADERS += avf/avf.h
+
+if CPU_X86_64
+avf_multiversioning_files = \
+ avf/input.c \
+ avf/output.c
+
+if CC_SUPPORTS_AVX2
+###############################################################
+# AVX2
+###############################################################
+libavf_plugin_avx2_la_SOURCES = $(avf_multiversioning_files)
+libavf_plugin_avx2_la_CFLAGS = \
+ $(AM_CFLAGS) @CPU_AVX2_FLAGS@ \
+ -DCLIB_MULTIARCH_VARIANT=avx2
+noinst_LTLIBRARIES += libavf_plugin_avx2.la
+avf_plugin_la_LIBADD += libavf_plugin_avx2.la
+endif
+
+if CC_SUPPORTS_AVX512
+###############################################################
+# AVX512
+###############################################################
+libavf_plugin_avx512_la_SOURCES = $(avf_multiversioning_files)
+libavf_plugin_avx512_la_CFLAGS = \
+ $(AM_CFLAGS) @CPU_AVX512_FLAGS@ \
+ -DCLIB_MULTIARCH_VARIANT=avx512
+noinst_LTLIBRARIES += libavf_plugin_avx512.la
+avf_plugin_la_LIBADD += libavf_plugin_avx512.la
+endif
+endif
+
+# vi:syntax=automake
diff --git a/src/plugins/avf/README.md b/src/plugins/avf/README.md
new file mode 100644
index 00000000000..422a6c07fe8
--- /dev/null
+++ b/src/plugins/avf/README.md
@@ -0,0 +1,94 @@
+# Intel AVF device plugin for VPP
+
+##Overview
+This plugins provides native device support for intel Adaptive Virtual
+Function (AVF). AVF is driver specification for current and future
+Intel Virtual Function devices. AVF defines communication channel between
+Physical Funciton (PF) and VF.
+In essence, today this driver can be used only with
+Intel XL710 / X710 / XXV710 adapters.
+
+##Prerequisites
+ * Driver requires newer i40e PF linux driver to be installed on the system,
+which supports virtualchnl interface. This code is tested with i40e driver
+version 2.4.6.
+
+* Driver requires MSI-X interrupt support, which is not supported by
+uio_pci_generic driver, so vfio-pci needs to be used. On systems without IOMMU
+vfio driver can still be used with recent kernels which support no-iommu mode.
+
+##Known issues
+This driver is still in experimental phase, however it shows very good
+performance numbers. Following items are not implemented (yet).
+
+* Jumbo MTU support
+* Interrupt and adaptive mode
+* NUMA support
+
+## Usage
+### System setup
+
+1. load VFIO driver
+```
+sudo modprobe vfio-pci
+```
+
+2. (systems without IOMMU only) enable unsafe NOIOMMU mode
+```
+echo Y | sudo tee /sys/module/vfio/parameters/enable_unsafe_noiommu_mode
+```
+
+3. Create and bind SR-IOV virtual function(s)
+
+Following script creates VF, assigns MAC address and binds VF to vfio-pci
+```bash
+#!/bin/bash
+
+if [ $USER != "root" ] ; then
+ echo "Restarting script with sudo..."
+ sudo $0 ${*}
+ exit
+fi
+
+setup () {
+ cd /sys/bus/pci/devices/${1}
+ driver=$(basename $(readlink driver))
+ if [ "${driver}" != "i40e" ]; then
+ echo ${1} | tee driver/unbind
+ echo ${1} | tee /sys/bus/pci/drivers/i40e/bind
+ fi
+ ifname=$(basename net/*)
+ echo 0 | tee sriov_numvfs > /dev/null
+ echo 1 | tee sriov_numvfs > /dev/null
+ ip link set dev ${ifname} vf 0 mac ${2}
+ ip link show dev ${ifname}
+ vf=$(basename $(readlink virtfn0))
+ echo ${vf} | tee virtfn0/driver/unbind
+ echo vfio-pci | tee virtfn0/driver_override
+ echo ${vf} | sudo tee /sys/bus/pci/drivers/vfio-pci/bind
+ echo | tee virtfn0/driver_override
+}
+
+# Setup one VF on PF 0000:3b:00.0 and assign MAC address
+setup 0000:3b:00.0 00:11:22:33:44:00
+# Setup one VF on PF 0000:3b:00.1 and assign MAC address
+setup 0000:3b:00.1 00:11:22:33:44:01
+```
+
+### Interface Cration
+Interfaces can be dynamically created by using following CLI:
+```
+create interface avf 0000:3b:02.0
+set int state AVF0/3b/2/0 up
+```
+
+### Interface Deletion
+Interface can be deleted with following CLI:
+```
+delete interface avf <interface name>
+```
+
+### Interface Statistics
+Interface statistics can be displayed with `sh hardware-interface <if-name>`
+command.
+
diff --git a/src/plugins/avf/avf.h b/src/plugins/avf/avf.h
new file mode 100644
index 00000000000..71e6f09e824
--- /dev/null
+++ b/src/plugins/avf/avf.h
@@ -0,0 +1,265 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <avf/virtchnl.h>
+
+#define foreach_avf_device_flags \
+ _(0, INITIALIZED, "initialized") \
+ _(1, ERROR, "error") \
+ _(2, ADMIN_UP, "admin-up") \
+ _(3, IOVA, "iova") \
+ _(4, LINK_UP, "link-up") \
+ _(5, SHARED_TXQ_LOCK, "shared-txq-lock") \
+ _(6, ELOG, "elog")
+
+enum
+{
+#define _(a, b, c) AVF_DEVICE_F_##b = (1 << a),
+ foreach_avf_device_flags
+#undef _
+};
+
+typedef struct
+{
+ u64 qword[4];
+} avf_rx_desc_t;
+
+STATIC_ASSERT_SIZEOF (avf_rx_desc_t, 32);
+
+typedef struct
+{
+ union
+ {
+ u64 qword[2];
+ u64x2 as_u64x2;
+ };
+} avf_tx_desc_t;
+
+STATIC_ASSERT_SIZEOF (avf_tx_desc_t, 16);
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ volatile u32 *qrx_tail;
+ u16 next;
+ u16 size;
+ avf_rx_desc_t *descs;
+ u32 *bufs;
+ u16 n_bufs;
+} avf_rxq_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ volatile u32 *qtx_tail;
+ u16 next;
+ u16 size;
+ clib_spinlock_t lock;
+ avf_tx_desc_t *descs;
+ u32 *bufs;
+ u16 n_bufs;
+} avf_txq_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 flags;
+ u32 per_interface_next_index;
+
+ u32 dev_instance;
+ u32 sw_if_index;
+ u32 hw_if_index;
+ vlib_pci_dev_handle_t pci_dev_handle;
+ void *bar0;
+
+ /* queues */
+ avf_rxq_t *rxqs;
+ avf_txq_t *txqs;
+
+ /* Admin queues */
+ avf_aq_desc_t *atq;
+ avf_aq_desc_t *arq;
+ void *atq_bufs;
+ void *arq_bufs;
+ u64 atq_bufs_pa;
+ u64 arq_bufs_pa;
+ u16 atq_next_slot;
+ u16 arq_next_slot;
+ virtchnl_pf_event_t *events;
+
+ u16 vsi_id;
+ u32 feature_bitmap;
+ u8 hwaddr[6];
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+ virtchnl_link_speed_t link_speed;
+
+ /* stats */
+ virtchnl_eth_stats_t eth_stats;
+
+ /* error */
+ clib_error_t *error;
+} avf_device_t;
+
+typedef struct
+{
+ u32 bi;
+ u32 status;
+ u16 length;
+ u16 tail_length;
+ u8 ptype;
+ u8 error;
+} avf_rx_vector_entry_t;
+
+STATIC_ASSERT_SIZEOF (avf_rx_vector_entry_t, 16);
+
+#define AVF_RX_VECTOR_SZ VLIB_FRAME_SIZE
+
+enum
+{
+ AVF_PROCESS_EVENT_START = 1,
+ AVF_PROCESS_EVENT_STOP = 2,
+ AVF_PROCESS_EVENT_AQ_INT = 3,
+} avf_process_event_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ avf_rx_vector_entry_t rx_vector[AVF_RX_VECTOR_SZ];
+ u32 *to_free;
+} avf_per_thread_data_t;
+
+typedef struct
+{
+ u8 next_node;
+ i8 buffer_advance;
+ u32 flags;
+} avf_ptype_t;
+
+STATIC_ASSERT (VNET_DEVICE_INPUT_N_NEXT_NODES < 256, "too many next nodes");
+
+typedef struct
+{
+ avf_device_t *devices;
+ avf_per_thread_data_t *per_thread_data;
+ vlib_physmem_region_index_t physmem_region;
+ int physmem_region_alloc;
+
+ /* 256 element array for ptype based lookup */
+ avf_ptype_t *ptypes;
+} avf_main_t;
+
+extern avf_main_t avf_main;
+
+typedef struct
+{
+ vlib_pci_addr_t addr;
+ int enable_elog;
+ /* return */
+ int rv;
+ clib_error_t *error;
+} avf_create_if_args_t;
+
+void avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args);
+void avf_delete_if (vlib_main_t * vm, avf_device_t * ad);
+
+extern vlib_node_registration_t avf_input_node;
+extern vnet_device_class_t avf_device_class;
+uword avf_interface_tx (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame);
+
+/* format.c */
+format_function_t format_avf_device;
+format_function_t format_avf_device_name;
+format_function_t format_avf_input_trace;
+
+static inline u32
+avf_get_u32 (void *start, int offset)
+{
+ return *(u32 *) (((u8 *) start) + offset);
+}
+
+static inline u64
+avf_get_u64 (void *start, int offset)
+{
+ return *(u64 *) (((u8 *) start) + offset);
+}
+
+static inline u32
+avf_get_u32_bits (void *start, int offset, int first, int last)
+{
+ u32 value = avf_get_u32 (start, offset);
+ if ((last == 0) && (first == 31))
+ return value;
+ value >>= last;
+ value &= (1 << (first - last + 1)) - 1;
+ return value;
+}
+
+static inline u64
+avf_get_u64_bits (void *start, int offset, int first, int last)
+{
+ u64 value = avf_get_u64 (start, offset);
+ if ((last == 0) && (first == 63))
+ return value;
+ value >>= last;
+ value &= (1 << (first - last + 1)) - 1;
+ return value;
+}
+
+static inline void
+avf_set_u32 (void *start, int offset, u32 value)
+{
+ (*(u32 *) (((u8 *) start) + offset)) = value;
+}
+
+static inline void
+avf_reg_write (avf_device_t * ad, u32 addr, u32 val)
+{
+ *(volatile u32 *) ((u8 *) ad->bar0 + addr) = val;
+}
+
+static inline u32
+avf_reg_read (avf_device_t * ad, u32 addr)
+{
+ return *(volatile u32 *) (ad->bar0 + addr);
+}
+
+static inline void
+avf_reg_flush (avf_device_t * ad)
+{
+ avf_reg_read (ad, AVFGEN_RSTAT);
+ asm volatile ("":::"memory");
+}
+
+typedef struct
+{
+ u32 next_index;
+ u32 hw_if_index;
+ avf_rx_vector_entry_t rxve;
+} avf_input_trace_t;
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/avf/cli.c b/src/plugins/avf/cli.c
new file mode 100644
index 00000000000..ba9f5600f99
--- /dev/null
+++ b/src/plugins/avf/cli.c
@@ -0,0 +1,200 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+#include <stdint.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <inttypes.h>
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <avf/avf.h>
+
+static clib_error_t *
+avf_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ avf_create_if_args_t args;
+
+ memset (&args, 0, sizeof (avf_create_if_args_t));
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "%U", unformat_vlib_pci_addr, &args.addr))
+ ;
+ else if (unformat (line_input, "elog"))
+ args.enable_elog = 1;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+
+ avf_create_if (vm, &args);
+
+ return args.error;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (avf_create_command, static) = {
+ .path = "create interface avf",
+ .short_help = "create interface avf <pci-address>",
+ .function = avf_create_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+avf_delete_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 sw_if_index = ~0;
+ vnet_hw_interface_t *hw;
+ avf_main_t *am = &avf_main;
+ avf_device_t *ad;
+ vnet_main_t *vnm = vnet_get_main ();
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "sw_if_index %d", &sw_if_index))
+ ;
+ else if (unformat (line_input, "%U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0,
+ "please specify interface name or sw_if_index");
+
+ hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ if (hw == NULL || avf_device_class.index != hw->dev_class_index)
+ return clib_error_return (0, "not a AVF interface");
+
+ ad = pool_elt_at_index (am->devices, hw->dev_instance);
+
+ avf_delete_if (vm, ad);
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (avf_delete_command, static) = {
+ .path = "delete interface avf",
+ .short_help = "delete interface avf "
+ "{<interface> | sw_if_index <sw_idx>}",
+ .function = avf_delete_command_fn,
+};
+/* *INDENT-ON* */
+
+static clib_error_t *
+avf_test_command_fn (vlib_main_t * vm, unformat_input_t * input,
+ vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 sw_if_index = ~0;
+ vnet_hw_interface_t *hw;
+ avf_main_t *am = &avf_main;
+ avf_device_t *ad;
+ vnet_main_t *vnm = vnet_get_main ();
+ int test_irq = 0, enable_elog = 0, disable_elog = 0;
+
+ /* Get a line of input. */
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "sw_if_index %d", &sw_if_index))
+ ;
+ else if (unformat (line_input, "irq"))
+ test_irq = 1;
+ else if (unformat (line_input, "elog-on"))
+ enable_elog = 1;
+ else if (unformat (line_input, "elog-off"))
+ disable_elog = 1;
+ else if (unformat (line_input, "%U", unformat_vnet_sw_interface,
+ vnm, &sw_if_index))
+ ;
+ else
+ return clib_error_return (0, "unknown input `%U'",
+ format_unformat_error, input);
+ }
+ unformat_free (line_input);
+
+ if (sw_if_index == ~0)
+ return clib_error_return (0,
+ "please specify interface name or sw_if_index");
+
+ hw = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ if (hw == NULL || avf_device_class.index != hw->dev_class_index)
+ return clib_error_return (0, "not a AVF interface");
+
+ ad = pool_elt_at_index (am->devices, hw->dev_instance);
+
+ if (enable_elog)
+ ad->flags |= AVF_DEVICE_F_ELOG;
+
+ if (disable_elog)
+ ad->flags &= ~AVF_DEVICE_F_ELOG;
+
+ if (test_irq)
+ avf_reg_write (ad, AVFINT_DYN_CTL0, (1 << 0) | (3 << 3) | (1 << 2));
+
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (avf_test_command, static) = {
+ .path = "test avf",
+ .short_help = "test avf [<interface> | sw_if_index <sw_idx>] [irq] "
+ "[elog-on] [elog-off]",
+ .function = avf_test_command_fn,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+avf_cli_init (vlib_main_t * vm)
+{
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (avf_cli_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/avf/device.c b/src/plugins/avf/device.c
new file mode 100644
index 00000000000..4a0fed19d44
--- /dev/null
+++ b/src/plugins/avf/device.c
@@ -0,0 +1,1243 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <avf/avf.h>
+
+#define AVF_MBOX_LEN 64
+#define AVF_MBOX_BUF_SZ 512
+#define AVF_RXQ_SZ 512
+#define AVF_TXQ_SZ 512
+#define AVF_ITR_INT 8160
+
+#define PCI_VENDOR_ID_INTEL 0x8086
+#define PCI_DEVICE_ID_INTEL_AVF 0x1889
+#define PCI_DEVICE_ID_INTEL_X710_VF 0x154c
+#define PCI_DEVICE_ID_INTEL_X722_VF 0x37cd
+
+avf_main_t avf_main;
+
+static pci_device_id_t avf_pci_device_ids[] = {
+ {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_AVF},
+ {.vendor_id = PCI_VENDOR_ID_INTEL,.device_id = PCI_DEVICE_ID_INTEL_X710_VF},
+ {0},
+};
+
+//#define avf_log_debug(fmt, ...) fformat(stderr, "%s: " fmt "\n", __func__, __VA_ARGS__)
+#define avf_log_debug(fmt, ...)
+
+static inline void
+avf_irq_0_disable (avf_device_t * ad)
+{
+ u32 dyn_ctl0 = 0, icr0_ena = 0;
+
+ dyn_ctl0 |= (3 << 3); /* 11b = No ITR update */
+
+ avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
+ avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
+ avf_reg_flush (ad);
+}
+
+static inline void
+avf_irq_0_enable (avf_device_t * ad)
+{
+ u32 dyn_ctl0 = 0, icr0_ena = 0;
+
+ icr0_ena |= (1 << 30); /* [30] Admin Queue Enable */
+
+ dyn_ctl0 |= (1 << 0); /* [0] Interrupt Enable */
+ dyn_ctl0 |= (1 << 1); /* [1] Clear PBA */
+ //dyn_ctl0 |= (3 << 3); /* [4:3] ITR Index, 11b = No ITR update */
+ dyn_ctl0 |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
+
+ avf_irq_0_disable (ad);
+ avf_reg_write (ad, AVFINT_ICR0_ENA1, icr0_ena);
+ avf_reg_write (ad, AVFINT_DYN_CTL0, dyn_ctl0);
+ avf_reg_flush (ad);
+}
+
+static inline void
+avf_irq_n_disable (avf_device_t * ad, u8 line)
+{
+ u32 dyn_ctln = 0;
+
+ avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
+ avf_reg_flush (ad);
+}
+
+static inline void
+avf_irq_n_enable (avf_device_t * ad, u8 line)
+{
+ u32 dyn_ctln = 0;
+
+ dyn_ctln |= (1 << 0); /* [0] Interrupt Enable */
+ dyn_ctln |= (1 << 1); /* [1] Clear PBA */
+ dyn_ctln |= ((AVF_ITR_INT / 2) << 5); /* [16:5] ITR Interval in 2us steps */
+
+ avf_irq_n_disable (ad, line);
+ avf_reg_write (ad, AVFINT_DYN_CTLN (line), dyn_ctln);
+ avf_reg_flush (ad);
+}
+
+
+clib_error_t *
+avf_aq_desc_enq (vlib_main_t * vm, avf_device_t * ad, avf_aq_desc_t * dt,
+ void *data, int len)
+{
+ clib_error_t *err = 0;
+ avf_aq_desc_t *d, dc;
+ int n_retry = 5;
+
+ d = &ad->atq[ad->atq_next_slot];
+ clib_memcpy (d, dt, sizeof (avf_aq_desc_t));
+ d->flags |= AVF_AQ_F_RD | AVF_AQ_F_SI;
+ if (len)
+ d->datalen = len;
+ if (len)
+ {
+ u64 pa;
+ pa = ad->atq_bufs_pa + ad->atq_next_slot * AVF_MBOX_BUF_SZ;
+ d->addr_hi = (u32) (pa >> 32);
+ d->addr_lo = (u32) pa;
+ clib_memcpy (ad->atq_bufs + ad->atq_next_slot * AVF_MBOX_BUF_SZ, data,
+ len);
+ d->flags |= AVF_AQ_F_BUF;
+ }
+
+ if (ad->flags & AVF_DEVICE_F_ELOG)
+ clib_memcpy (&dc, d, sizeof (avf_aq_desc_t));
+
+ CLIB_MEMORY_BARRIER ();
+ avf_log_debug ("%U", format_hexdump, data, len);
+ ad->atq_next_slot = (ad->atq_next_slot + 1) % AVF_MBOX_LEN;
+ avf_reg_write (ad, AVF_ATQT, ad->atq_next_slot);
+ avf_reg_flush (ad);
+
+retry:
+ vlib_process_suspend (vm, 10e-6);
+
+ if (((d->flags & AVF_AQ_F_DD) == 0) || ((d->flags & AVF_AQ_F_CMP) == 0))
+ {
+ if (--n_retry == 0)
+ {
+ err = clib_error_return (0, "adminq enqueue timeout [opcode 0x%x]",
+ d->opcode);
+ goto done;
+ }
+ goto retry;
+ }
+
+ clib_memcpy (dt, d, sizeof (avf_aq_desc_t));
+ if (d->flags & AVF_AQ_F_ERR)
+ return clib_error_return (0, "adminq enqueue error [opcode 0x%x, retval "
+ "%d]", d->opcode, d->retval);
+
+done:
+ if (ad->flags & AVF_DEVICE_F_ELOG)
+ {
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (el) =
+ {
+ .format = "avf[%d] aq enq: s_flags 0x%x r_flags 0x%x opcode 0x%x "
+ "datalen %d retval %d",
+ .format_args = "i4i2i2i2i2i2",
+ };
+ struct
+ {
+ u32 dev_instance;
+ u16 s_flags;
+ u16 r_flags;
+ u16 opcode;
+ u16 datalen;
+ u16 retval;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, el);
+ ed->dev_instance = ad->dev_instance;
+ ed->s_flags = dc.flags;
+ ed->r_flags = d->flags;
+ ed->opcode = dc.opcode;
+ ed->datalen = dc.datalen;
+ ed->retval = d->retval;
+ /* *INDENT-ON* */
+ }
+
+ return err;
+}
+
+clib_error_t *
+avf_cmd_rx_ctl_reg_write (vlib_main_t * vm, avf_device_t * ad, u32 reg,
+ u32 val)
+{
+ clib_error_t *err;
+ avf_aq_desc_t d = {.opcode = 0x207,.param1 = reg,.param3 = val };
+ err = avf_aq_desc_enq (vm, ad, &d, 0, 0);
+
+ if (ad->flags & AVF_DEVICE_F_ELOG)
+ {
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (el) =
+ {
+ .format = "avf[%d] rx ctl reg write: reg 0x%x val 0x%x ",
+ .format_args = "i4i4i4",
+ };
+ struct
+ {
+ u32 dev_instance;
+ u32 reg;
+ u32 val;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, el);
+ ed->dev_instance = ad->dev_instance;
+ ed->reg = reg;
+ ed->val = val;
+ /* *INDENT-ON* */
+ }
+ return err;
+}
+
+clib_error_t *
+avf_rxq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid)
+{
+ avf_main_t *am = &avf_main;
+ avf_rxq_t *rxq;
+ clib_error_t *error = 0;
+ u32 n_alloc, i;
+
+ vec_validate_aligned (ad->rxqs, qid, CLIB_CACHE_LINE_BYTES);
+ rxq = vec_elt_at_index (ad->rxqs, qid);
+ rxq->size = AVF_RXQ_SZ;
+ rxq->next = 0;
+ rxq->descs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
+ rxq->size * sizeof (avf_rx_desc_t),
+ 2 * CLIB_CACHE_LINE_BYTES);
+ memset (rxq->descs, 0, rxq->size * sizeof (avf_rx_desc_t));
+ vec_validate_aligned (rxq->bufs, rxq->size, CLIB_CACHE_LINE_BYTES);
+ rxq->qrx_tail = ad->bar0 + AVF_QRX_TAIL (qid);
+
+ n_alloc = vlib_buffer_alloc (vm, rxq->bufs, rxq->size - 8);
+
+ if (n_alloc == 0)
+ return clib_error_return (0, "buffer allocation error");
+
+ rxq->n_bufs = n_alloc;
+ avf_rx_desc_t *d = rxq->descs;
+ for (i = 0; i < n_alloc; i++)
+ {
+ if (ad->flags & AVF_DEVICE_F_IOVA)
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[i]);
+ d->qword[0] = pointer_to_uword (b->data);
+ }
+ else
+ d->qword[0] =
+ vlib_get_buffer_data_physical_address (vm, rxq->bufs[i]);
+ d++;
+ }
+ return 0;
+}
+
+clib_error_t *
+avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid)
+{
+ avf_main_t *am = &avf_main;
+ avf_txq_t *txq;
+ clib_error_t *error = 0;
+
+ if (qid >= ad->num_queue_pairs)
+ {
+ qid = qid % ad->num_queue_pairs;
+ txq = vec_elt_at_index (ad->txqs, qid);
+ if (txq->lock == 0)
+ clib_spinlock_init (&txq->lock);
+ ad->flags |= AVF_DEVICE_F_SHARED_TXQ_LOCK;
+ return 0;
+ }
+
+ vec_validate_aligned (ad->txqs, qid, CLIB_CACHE_LINE_BYTES);
+ txq = vec_elt_at_index (ad->txqs, qid);
+ txq->size = AVF_TXQ_SZ;
+ txq->next = 0;
+ txq->descs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
+ txq->size * sizeof (avf_tx_desc_t),
+ 2 * CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (txq->bufs, txq->size, CLIB_CACHE_LINE_BYTES);
+ txq->qtx_tail = ad->bar0 + AVF_QTX_TAIL (qid);
+ return 0;
+}
+
+typedef struct
+{
+ u16 vsi_id;
+ u16 flags;
+} virtchnl_promisc_info_t;
+
+void
+avf_arq_slot_init (avf_device_t * ad, u16 slot)
+{
+ avf_aq_desc_t *d;
+ u64 pa = ad->arq_bufs_pa + slot * AVF_MBOX_BUF_SZ;
+ d = &ad->arq[slot];
+ memset (d, 0, sizeof (avf_aq_desc_t));
+ d->flags = AVF_AQ_F_BUF;
+ d->datalen = AVF_MBOX_BUF_SZ;
+ d->addr_hi = (u32) (pa >> 32);
+ d->addr_lo = (u32) pa;
+}
+
+static inline uword
+avf_dma_addr (vlib_main_t * vm, avf_device_t * ad, void *p)
+{
+ avf_main_t *am = &avf_main;
+ return (ad->flags & AVF_DEVICE_F_IOVA) ?
+ pointer_to_uword (p) :
+ vlib_physmem_virtual_to_physical (vm, am->physmem_region, p);
+}
+
+static void
+avf_adminq_init (vlib_main_t * vm, avf_device_t * ad)
+{
+ u64 pa;
+ int i;
+
+ /* VF MailBox Transmit */
+ memset (ad->atq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
+ ad->atq_bufs_pa = avf_dma_addr (vm, ad, ad->atq_bufs);
+
+ pa = avf_dma_addr (vm, ad, ad->atq);
+ avf_reg_write (ad, AVF_ATQT, 0); /* Tail */
+ avf_reg_write (ad, AVF_ATQH, 0); /* Head */
+ avf_reg_write (ad, AVF_ATQLEN, AVF_MBOX_LEN | (1 << 31)); /* len & ena */
+ avf_reg_write (ad, AVF_ATQBAL, (u32) pa); /* Base Address Low */
+ avf_reg_write (ad, AVF_ATQBAH, (u32) (pa >> 32)); /* Base Address High */
+
+ /* VF MailBox Receive */
+ memset (ad->arq, 0, sizeof (avf_aq_desc_t) * AVF_MBOX_LEN);
+ ad->arq_bufs_pa = avf_dma_addr (vm, ad, ad->arq_bufs);
+
+ for (i = 0; i < AVF_MBOX_LEN; i++)
+ avf_arq_slot_init (ad, i);
+
+ pa = avf_dma_addr (vm, ad, ad->arq);
+
+ avf_reg_write (ad, AVF_ARQH, 0); /* Head */
+ avf_reg_write (ad, AVF_ARQT, 0); /* Head */
+ avf_reg_write (ad, AVF_ARQLEN, AVF_MBOX_LEN | (1 << 31)); /* len & ena */
+ avf_reg_write (ad, AVF_ARQBAL, (u32) pa); /* Base Address Low */
+ avf_reg_write (ad, AVF_ARQBAH, (u32) (pa >> 32)); /* Base Address High */
+ avf_reg_write (ad, AVF_ARQT, AVF_MBOX_LEN - 1); /* Tail */
+
+ ad->atq_next_slot = 0;
+ ad->arq_next_slot = 0;
+}
+
+clib_error_t *
+avf_send_to_pf (vlib_main_t * vm, avf_device_t * ad, virtchnl_ops_t op,
+ void *in, int in_len, void *out, int out_len)
+{
+ clib_error_t *err;
+ avf_aq_desc_t *d, dt = {.opcode = 0x801,.v_opcode = op };
+ u32 head;
+ int n_retry = 5;
+
+
+ /* supppres interrupt in the next adminq receive slot
+ as we are going to wait for response
+ we only need interrupts when event is received */
+ d = &ad->arq[ad->arq_next_slot];
+ d->flags |= AVF_AQ_F_SI;
+
+ if ((err = avf_aq_desc_enq (vm, ad, &dt, in, in_len)))
+ return err;
+
+retry:
+ head = avf_get_u32 (ad->bar0, AVF_ARQH);
+
+ if (ad->arq_next_slot == head)
+ {
+ if (--n_retry == 0)
+ return clib_error_return (0, "timeout");
+ vlib_process_suspend (vm, 10e-3);
+ goto retry;
+ }
+
+ d = &ad->arq[ad->arq_next_slot];
+
+ if (d->v_opcode == VIRTCHNL_OP_EVENT)
+ {
+ void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
+ virtchnl_pf_event_t *e;
+
+ if ((d->datalen != sizeof (virtchnl_pf_event_t)) ||
+ ((d->flags & AVF_AQ_F_BUF) == 0))
+ return clib_error_return (0, "event message error");
+
+ vec_add2 (ad->events, e, 1);
+ clib_memcpy (e, buf, sizeof (virtchnl_pf_event_t));
+ avf_arq_slot_init (ad, ad->arq_next_slot);
+ ad->arq_next_slot++;
+ n_retry = 5;
+ goto retry;
+ }
+
+ if (d->v_opcode != op)
+ {
+ err = clib_error_return (0, "unexpected message receiver [v_opcode = %u"
+ "expected %u]", d->v_opcode, op);
+ goto done;
+ }
+
+ if (d->v_retval)
+ {
+ err = clib_error_return (0, "error [v_opcode = %u, v_retval %d]",
+ d->v_opcode, d->v_retval);
+ goto done;
+ }
+
+ if (d->flags & AVF_AQ_F_BUF)
+ {
+ void *buf = ad->arq_bufs + ad->arq_next_slot * AVF_MBOX_BUF_SZ;
+ clib_memcpy (out, buf, out_len);
+ }
+
+ avf_arq_slot_init (ad, ad->arq_next_slot);
+ avf_reg_write (ad, AVF_ARQT, ad->arq_next_slot);
+ avf_reg_flush (ad);
+ ad->arq_next_slot = (ad->arq_next_slot + 1) % AVF_MBOX_LEN;
+
+done:
+
+ if (ad->flags & AVF_DEVICE_F_ELOG)
+ {
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (el) =
+ {
+ .format = "avf[%d] send to pf: v_opcode %s (%d) v_retval 0x%x",
+ .format_args = "i4t4i4i4",
+ .n_enum_strings = VIRTCHNL_N_OPS,
+ .enum_strings = {
+#define _(v, n) [v] = #n,
+ foreach_virtchnl_op
+#undef _
+ },
+ };
+ struct
+ {
+ u32 dev_instance;
+ u32 v_opcode;
+ u32 v_opcode_val;
+ u32 v_retval;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, el);
+ ed->dev_instance = ad->dev_instance;
+ ed->v_opcode = op;
+ ed->v_opcode_val = op;
+ ed->v_retval = d->v_retval;
+ /* *INDENT-ON* */
+ }
+ return err;
+}
+
+clib_error_t *
+avf_op_version (vlib_main_t * vm, avf_device_t * ad,
+ virtchnl_version_info_t * ver)
+{
+ clib_error_t *err = 0;
+ virtchnl_version_info_t myver = {
+ .major = VIRTCHNL_VERSION_MAJOR,
+ .minor = VIRTCHNL_VERSION_MINOR,
+ };
+
+ err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_VERSION, &myver,
+ sizeof (virtchnl_version_info_t), ver,
+ sizeof (virtchnl_version_info_t));
+
+ if (err)
+ return err;
+
+ return err;
+}
+
+clib_error_t *
+avf_op_get_vf_resources (vlib_main_t * vm, avf_device_t * ad,
+ virtchnl_vf_resource_t * res)
+{
+ clib_error_t *err = 0;
+ u32 bitmap = (VIRTCHNL_VF_OFFLOAD_L2 | VIRTCHNL_VF_OFFLOAD_RSS_AQ |
+ VIRTCHNL_VF_OFFLOAD_RSS_REG | VIRTCHNL_VF_OFFLOAD_WB_ON_ITR |
+ VIRTCHNL_VF_OFFLOAD_VLAN | VIRTCHNL_VF_OFFLOAD_RX_POLLING);
+
+ err = avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_VF_RESOURCES, &bitmap,
+ sizeof (u32), res, sizeof (virtchnl_vf_resource_t));
+
+ if (err)
+ return err;
+
+ return err;
+}
+
+clib_error_t *
+avf_op_disable_vlan_stripping (vlib_main_t * vm, avf_device_t * ad)
+{
+ return avf_send_to_pf (vm, ad, VIRTCHNL_OP_DISABLE_VLAN_STRIPPING, 0, 0, 0,
+ 0);
+}
+
+clib_error_t *
+avf_config_promisc_mode (vlib_main_t * vm, avf_device_t * ad)
+{
+ virtchnl_promisc_info_t pi = { 0 };
+
+ pi.vsi_id = ad->vsi_id;
+ pi.flags = 1;
+ return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, &pi,
+ sizeof (virtchnl_promisc_info_t), 0, 0);
+}
+
+
+clib_error_t *
+avf_op_config_vsi_queues (vlib_main_t * vm, avf_device_t * ad)
+{
+ int i;
+ int n_qp = clib_max (vec_len (ad->rxqs), vec_len (ad->txqs));
+ int msg_len = sizeof (virtchnl_vsi_queue_config_info_t) + n_qp *
+ sizeof (virtchnl_queue_pair_info_t);
+ u8 msg[msg_len];
+ virtchnl_vsi_queue_config_info_t *ci;
+
+ memset (msg, 0, msg_len);
+ ci = (virtchnl_vsi_queue_config_info_t *) msg;
+ ci->vsi_id = ad->vsi_id;
+ ci->num_queue_pairs = n_qp;
+
+ for (i = 0; i < n_qp; i++)
+ {
+ virtchnl_txq_info_t *txq = &ci->qpair[i].txq;
+ virtchnl_rxq_info_t *rxq = &ci->qpair[i].rxq;
+
+ rxq->vsi_id = ad->vsi_id;
+ rxq->queue_id = i;
+ rxq->max_pkt_size = 1518;
+ if (i < vec_len (ad->rxqs))
+ {
+ avf_rxq_t *q = vec_elt_at_index (ad->rxqs, i);
+ rxq->ring_len = q->size;
+ rxq->databuffer_size = VLIB_BUFFER_DEFAULT_FREE_LIST_BYTES;
+ rxq->dma_ring_addr = avf_dma_addr (vm, ad, q->descs);
+ avf_reg_write (ad, AVF_QRX_TAIL (i), q->size - 1);
+ }
+
+ avf_txq_t *q = vec_elt_at_index (ad->txqs, i);
+ txq->vsi_id = ad->vsi_id;
+ if (i < vec_len (ad->txqs))
+ {
+ txq->queue_id = i;
+ txq->ring_len = q->size;
+ txq->dma_ring_addr = avf_dma_addr (vm, ad, q->descs);
+ }
+ }
+
+ return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_VSI_QUEUES, msg, msg_len,
+ 0, 0);
+}
+
+clib_error_t *
+avf_op_config_irq_map (vlib_main_t * vm, avf_device_t * ad)
+{
+ int count = 1;
+ int msg_len = sizeof (virtchnl_irq_map_info_t) +
+ count * sizeof (virtchnl_vector_map_t);
+ u8 msg[msg_len];
+ virtchnl_irq_map_info_t *imi;
+
+ memset (msg, 0, msg_len);
+ imi = (virtchnl_irq_map_info_t *) msg;
+ imi->num_vectors = count;
+
+ imi->vecmap[0].vector_id = 1;
+ imi->vecmap[0].vsi_id = ad->vsi_id;
+ imi->vecmap[0].rxq_map = 1;
+ return avf_send_to_pf (vm, ad, VIRTCHNL_OP_CONFIG_IRQ_MAP, msg, msg_len, 0,
+ 0);
+}
+
+clib_error_t *
+avf_op_add_eth_addr (vlib_main_t * vm, avf_device_t * ad, u8 count, u8 * macs)
+{
+ int msg_len =
+ sizeof (virtchnl_ether_addr_list_t) +
+ count * sizeof (virtchnl_ether_addr_t);
+ u8 msg[msg_len];
+ virtchnl_ether_addr_list_t *al;
+ int i;
+
+ memset (msg, 0, msg_len);
+ al = (virtchnl_ether_addr_list_t *) msg;
+ al->vsi_id = ad->vsi_id;
+ al->num_elements = count;
+ for (i = 0; i < count; i++)
+ clib_memcpy (&al->list[i].addr, macs + i * 6, 6);
+ return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ADD_ETH_ADDR, msg, msg_len, 0,
+ 0);
+}
+
+clib_error_t *
+avf_op_enable_queues (vlib_main_t * vm, avf_device_t * ad, u32 rx, u32 tx)
+{
+ virtchnl_queue_select_t qs = { 0 };
+ qs.vsi_id = ad->vsi_id;
+ qs.rx_queues = rx;
+ qs.tx_queues = tx;
+ avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, 0);
+ avf_reg_write (ad, AVF_QRX_TAIL (0), rxq->n_bufs);
+ return avf_send_to_pf (vm, ad, VIRTCHNL_OP_ENABLE_QUEUES, &qs,
+ sizeof (virtchnl_queue_select_t), 0, 0);
+}
+
+clib_error_t *
+avf_op_get_stats (vlib_main_t * vm, avf_device_t * ad,
+ virtchnl_eth_stats_t * es)
+{
+ virtchnl_queue_select_t qs = { 0 };
+ qs.vsi_id = ad->vsi_id;
+ return avf_send_to_pf (vm, ad, VIRTCHNL_OP_GET_STATS,
+ &qs, sizeof (virtchnl_queue_select_t),
+ es, sizeof (virtchnl_eth_stats_t));
+}
+
+clib_error_t *
+avf_device_reset (vlib_main_t * vm, avf_device_t * ad)
+{
+ avf_aq_desc_t d = { 0 };
+ clib_error_t *error;
+ u32 rstat;
+ int n_retry = 20;
+
+ d.opcode = 0x801;
+ d.v_opcode = VIRTCHNL_OP_RESET_VF;
+ if ((error = avf_aq_desc_enq (vm, ad, &d, 0, 0)))
+ return error;
+
+retry:
+ vlib_process_suspend (vm, 10e-3);
+ rstat = avf_get_u32 (ad->bar0, AVFGEN_RSTAT);
+
+ if (rstat == 2 || rstat == 3)
+ return 0;
+
+ if (--n_retry == 0)
+ return clib_error_return (0, "reset failed (timeout)");
+
+ goto retry;
+}
+
+clib_error_t *
+avf_device_init (vlib_main_t * vm, avf_device_t * ad)
+{
+ virtchnl_version_info_t ver = { 0 };
+ virtchnl_vf_resource_t res = { 0 };
+ clib_error_t *error;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ int i;
+
+ avf_adminq_init (vm, ad);
+
+ if ((error = avf_device_reset (vm, ad)))
+ return error;
+
+ avf_adminq_init (vm, ad);
+
+ /*
+ * OP_VERSION
+ */
+ if ((error = avf_op_version (vm, ad, &ver)))
+ return error;
+
+ if (ver.major != VIRTCHNL_VERSION_MAJOR ||
+ ver.minor != VIRTCHNL_VERSION_MINOR)
+ return clib_error_return (0, "incompatible protocol version "
+ "(remote %d.%d)", ver.major, ver.minor);
+
+ /*
+ * OP_GET_VF_RESOUCES
+ */
+ if ((error = avf_op_get_vf_resources (vm, ad, &res)))
+ return error;
+
+ if (res.num_vsis != 1 || res.vsi_res[0].vsi_type != VIRTCHNL_VSI_SRIOV)
+ return clib_error_return (0, "unexpected GET_VF_RESOURCE reply received");
+
+ ad->vsi_id = res.vsi_res[0].vsi_id;
+ ad->feature_bitmap = res.vf_offload_flags;
+ ad->num_queue_pairs = res.num_queue_pairs;
+ ad->max_vectors = res.max_vectors;
+ ad->max_mtu = res.max_mtu;
+ ad->rss_key_size = res.rss_key_size;
+ ad->rss_lut_size = res.rss_lut_size;
+
+ clib_memcpy (ad->hwaddr, res.vsi_res[0].default_mac_addr, 6);
+
+ /*
+ * Disable VLAN stripping
+ */
+ if ((error = avf_op_disable_vlan_stripping (vm, ad)))
+ return error;
+
+ if ((error = avf_config_promisc_mode (vm, ad)))
+ return error;
+
+ if ((error = avf_cmd_rx_ctl_reg_write (vm, ad, 0xc400, 0)))
+ return error;
+
+ if ((error = avf_cmd_rx_ctl_reg_write (vm, ad, 0xc404, 0)))
+ return error;
+
+ /*
+ * Init Queues
+ */
+ if ((error = avf_rxq_init (vm, ad, 0)))
+ return error;
+
+ for (i = 0; i < tm->n_vlib_mains; i++)
+ if ((error = avf_txq_init (vm, ad, i)))
+ return error;
+
+ if ((error = avf_op_config_vsi_queues (vm, ad)))
+ return error;
+
+ if ((error = avf_op_config_irq_map (vm, ad)))
+ return error;
+
+ avf_irq_0_enable (ad);
+ avf_irq_n_enable (ad, 0);
+
+ if ((error = avf_op_add_eth_addr (vm, ad, 1, ad->hwaddr)))
+ return error;
+
+ if ((error = avf_op_enable_queues (vm, ad, 1, 0)))
+ return error;
+
+ if ((error = avf_op_enable_queues (vm, ad, 0, 1)))
+ return error;
+
+ ad->flags |= AVF_DEVICE_F_INITIALIZED;
+ return error;
+}
+
+void
+avf_process_one_device (vlib_main_t * vm, avf_device_t * ad, int is_irq)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ virtchnl_pf_event_t *e;
+ u32 r;
+
+ if (ad->flags & AVF_DEVICE_F_ERROR)
+ return;
+
+ if ((ad->flags & AVF_DEVICE_F_INITIALIZED) == 0)
+ return;
+
+ ASSERT (ad->error == 0);
+
+ r = avf_get_u32 (ad->bar0, AVF_ARQLEN);
+ if ((r & 0xf0000000) != (1 << 31))
+ {
+ ad->error = clib_error_return (0, "arq not enabled, arqlen = 0x%x", r);
+ goto error;
+ }
+
+ r = avf_get_u32 (ad->bar0, AVF_ATQLEN);
+ if ((r & 0xf0000000) != (1 << 31))
+ {
+ ad->error = clib_error_return (0, "atq not enabled, atqlen = 0x%x", r);
+ goto error;
+ }
+
+ if (is_irq == 0)
+ avf_op_get_stats (vm, ad, &ad->eth_stats);
+
+ /* *INDENT-OFF* */
+ vec_foreach (e, ad->events)
+ {
+ if (e->event == VIRTCHNL_EVENT_LINK_CHANGE)
+ {
+ int link_up = e->event_data.link_event.link_status;
+ virtchnl_link_speed_t speed = e->event_data.link_event.link_speed;
+ u32 flags = 0;
+
+ if (link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) == 0)
+ {
+ ad->flags |= AVF_DEVICE_F_LINK_UP;
+ flags |= (VNET_HW_INTERFACE_FLAG_FULL_DUPLEX |
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ if (speed == VIRTCHNL_LINK_SPEED_40GB)
+ flags |= VNET_HW_INTERFACE_FLAG_SPEED_40G;
+ else if (speed == VIRTCHNL_LINK_SPEED_25GB)
+ flags |= VNET_HW_INTERFACE_FLAG_SPEED_25G;
+ else if (speed == VIRTCHNL_LINK_SPEED_10GB)
+ flags |= VNET_HW_INTERFACE_FLAG_SPEED_10G;
+ else if (speed == VIRTCHNL_LINK_SPEED_1GB)
+ flags |= VNET_HW_INTERFACE_FLAG_SPEED_1G;
+ else if (speed == VIRTCHNL_LINK_SPEED_100MB)
+ flags |= VNET_HW_INTERFACE_FLAG_SPEED_100M;
+ vnet_hw_interface_set_flags (vnm, ad->hw_if_index, flags);
+ ad->link_speed = speed;
+ }
+ else if (!link_up && (ad->flags & AVF_DEVICE_F_LINK_UP) != 0)
+ {
+ ad->flags &= ~AVF_DEVICE_F_LINK_UP;
+ ad->link_speed = 0;
+ }
+
+ if (ad->flags & AVF_DEVICE_F_ELOG)
+ {
+ ELOG_TYPE_DECLARE (el) =
+ {
+ .format = "avf[%d] link change: link_status %d "
+ "link_speed %d",
+ .format_args = "i4i1i1",
+ };
+ struct
+ {
+ u32 dev_instance;
+ u8 link_status;
+ u8 link_speed;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, el);
+ ed->dev_instance = ad->dev_instance;
+ ed->link_status = link_up;
+ ed->link_speed = speed;
+ }
+ }
+ else
+ {
+ if (ad->flags & AVF_DEVICE_F_ELOG)
+ {
+ ELOG_TYPE_DECLARE (el) =
+ {
+ .format = "avf[%d] unknown event: event %d severity %d",
+ .format_args = "i4i4i1i1",
+ };
+ struct
+ {
+ u32 dev_instance;
+ u32 event;
+ u32 severity;
+ } *ed;
+ ed = ELOG_DATA (&vm->elog_main, el);
+ ed->dev_instance = ad->dev_instance;
+ ed->event = e->event;
+ ed->severity = e->severity;
+ }
+ }
+ }
+ /* *INDENT-ON* */
+ vec_reset_length (ad->events);
+
+ return;
+
+error:
+ ad->flags |= AVF_DEVICE_F_ERROR;
+ ASSERT (ad->error != 0);
+}
+
+static u32
+avf_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hw, u32 flags)
+{
+ clib_warning ("TODO");
+ return 0;
+}
+
+static uword
+avf_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f)
+{
+ avf_main_t *am = &avf_main;
+ avf_device_t *ad;
+ uword *event_data = 0, event_type;
+ int enabled = 0, irq;
+ f64 last_run_duration = 0;
+ f64 last_periodic_time = 0;
+
+ while (1)
+ {
+ if (enabled)
+ vlib_process_wait_for_event_or_clock (vm, 5.0 - last_run_duration);
+ else
+ vlib_process_wait_for_event (vm);
+
+ event_type = vlib_process_get_events (vm, &event_data);
+ vec_reset_length (event_data);
+ irq = 0;
+
+ switch (event_type)
+ {
+ case ~0:
+ last_periodic_time = vlib_time_now (vm);
+ break;
+ case AVF_PROCESS_EVENT_START:
+ enabled = 1;
+ break;
+ case AVF_PROCESS_EVENT_STOP:
+ enabled = 0;
+ continue;
+ case AVF_PROCESS_EVENT_AQ_INT:
+ irq = 1;
+ break;
+ default:
+ ASSERT (0);
+ }
+
+ /* *INDENT-OFF* */
+ pool_foreach (ad, am->devices,
+ {
+ avf_process_one_device (vm, ad, irq);
+ });
+ /* *INDENT-ON* */
+ last_run_duration = vlib_time_now (vm) - last_periodic_time;
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (avf_process_node, static) = {
+ .function = avf_process,
+ .type = VLIB_NODE_TYPE_PROCESS,
+ .name = "avf-process",
+};
+/* *INDENT-ON* */
+
+static void
+avf_irq_0_handler (vlib_pci_dev_handle_t h, u16 line)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ avf_main_t *am = &avf_main;
+ uword pd = vlib_pci_get_private_data (h);
+ avf_device_t *ad = pool_elt_at_index (am->devices, pd);
+ u32 icr0;
+
+ icr0 = avf_reg_read (ad, AVFINT_ICR0);
+
+ if (ad->flags & AVF_DEVICE_F_ELOG)
+ {
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (el) =
+ {
+ .format = "avf[%d] irq 0: icr0 0x%x",
+ .format_args = "i4i4",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ u32 dev_instance;
+ u32 icr0;
+ } *ed;
+
+ ed = ELOG_DATA (&vm->elog_main, el);
+ ed->dev_instance = ad->dev_instance;
+ ed->icr0 = icr0;
+ }
+
+ avf_irq_0_enable (ad);
+
+ /* bit 30 - Send/Receive Admin queue interrupt indication */
+ if (icr0 & (1 << 30))
+ vlib_process_signal_event (vm, avf_process_node.index,
+ AVF_PROCESS_EVENT_AQ_INT, 0);
+}
+
+static void
+avf_irq_n_handler (vlib_pci_dev_handle_t h, u16 line)
+{
+ vlib_main_t *vm = vlib_get_main ();
+ avf_main_t *am = &avf_main;
+ uword pd = vlib_pci_get_private_data (h);
+ avf_device_t *ad = pool_elt_at_index (am->devices, pd);
+
+ if (ad->flags & AVF_DEVICE_F_ELOG)
+ {
+ /* *INDENT-OFF* */
+ ELOG_TYPE_DECLARE (el) =
+ {
+ .format = "avf[%d] irq %d: received",
+ .format_args = "i4i2",
+ };
+ /* *INDENT-ON* */
+ struct
+ {
+ u32 dev_instance;
+ u16 line;
+ } *ed;
+
+ ed = ELOG_DATA (&vm->elog_main, el);
+ ed->dev_instance = ad->dev_instance;
+ ed->line = line;
+ }
+
+ avf_irq_n_enable (ad, 0);
+}
+
+void
+avf_delete_if (vlib_main_t * vm, avf_device_t * ad)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ avf_main_t *am = &avf_main;
+ int i;
+
+ if (ad->hw_if_index)
+ {
+ vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
+ vnet_hw_interface_unassign_rx_thread (vnm, ad->hw_if_index, 0);
+ ethernet_delete_interface (vnm, ad->hw_if_index);
+ }
+
+ vlib_pci_device_close (ad->pci_dev_handle);
+
+ vlib_physmem_free (vm, am->physmem_region, ad->atq);
+ vlib_physmem_free (vm, am->physmem_region, ad->arq);
+ vlib_physmem_free (vm, am->physmem_region, ad->atq_bufs);
+ vlib_physmem_free (vm, am->physmem_region, ad->arq_bufs);
+
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, ad->rxqs)
+ {
+ avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, i);
+ vlib_physmem_free (vm, am->physmem_region, rxq->descs);
+ if (rxq->n_bufs)
+ vlib_buffer_free_from_ring (vm, rxq->bufs, rxq->next, rxq->size,
+ rxq->n_bufs);
+ vec_free (rxq->bufs);
+ }
+ /* *INDENT-ON* */
+ vec_free (ad->rxqs);
+
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, ad->txqs)
+ {
+ avf_txq_t *txq = vec_elt_at_index (ad->txqs, i);
+ vlib_physmem_free (vm, am->physmem_region, txq->descs);
+ if (txq->n_bufs)
+ {
+ u16 first = (txq->next - txq->n_bufs) & (txq->size -1);
+ vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
+ txq->n_bufs);
+ }
+ vec_free (txq->bufs);
+ }
+ /* *INDENT-ON* */
+ vec_free (ad->txqs);
+
+ clib_error_free (ad->error);
+ memset (ad, 0, sizeof (*ad));
+ pool_put (am->devices, ad);
+}
+
+void
+avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ avf_main_t *am = &avf_main;
+ avf_device_t *ad;
+ vlib_pci_dev_handle_t h;
+ clib_error_t *error = 0;
+
+ pool_get (am->devices, ad);
+ ad->dev_instance = ad - am->devices;
+ ad->per_interface_next_index = ~0;
+
+ if (args->enable_elog)
+ ad->flags |= AVF_DEVICE_F_ELOG;
+
+ if ((error = vlib_pci_device_open (&args->addr, avf_pci_device_ids, &h)))
+ goto error;
+ ad->pci_dev_handle = h;
+
+ vlib_pci_set_private_data (h, ad->dev_instance);
+
+ if ((error = vlib_pci_bus_master_enable (h)))
+ goto error;
+
+ if ((error = vlib_pci_map_region (h, 0, &ad->bar0)))
+ goto error;
+
+ if ((error = vlib_pci_register_msix_handler (h, 0, 1, &avf_irq_0_handler)))
+ goto error;
+
+ if ((error = vlib_pci_register_msix_handler (h, 1, 1, &avf_irq_n_handler)))
+ goto error;
+
+ if ((error = vlib_pci_enable_msix_irq (h, 0, 2)))
+ goto error;
+
+ if (am->physmem_region_alloc == 0)
+ {
+ u32 flags = VLIB_PHYSMEM_F_INIT_MHEAP | VLIB_PHYSMEM_F_HUGETLB;
+ error = vlib_physmem_region_alloc (vm, "avf descriptors", 4 << 20, 0,
+ flags, &am->physmem_region);
+ if (error)
+ goto error;
+ am->physmem_region_alloc = 1;
+ }
+ ad->atq = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
+ sizeof (avf_aq_desc_t) * AVF_MBOX_LEN,
+ 64);
+ if (error)
+ goto error;
+
+ ad->arq = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
+ sizeof (avf_aq_desc_t) * AVF_MBOX_LEN,
+ 64);
+ if (error)
+ goto error;
+
+ ad->atq_bufs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
+ AVF_MBOX_BUF_SZ * AVF_MBOX_LEN,
+ 64);
+ if (error)
+ goto error;
+
+ ad->arq_bufs = vlib_physmem_alloc_aligned (vm, am->physmem_region, &error,
+ AVF_MBOX_BUF_SZ * AVF_MBOX_LEN,
+ 64);
+ if (error)
+ goto error;
+
+ if ((error = vlib_pci_intr_enable (h)))
+ goto error;
+
+ /* FIXME detect */
+ ad->flags |= AVF_DEVICE_F_IOVA;
+
+ if ((error = avf_device_init (vm, ad)))
+ goto error;
+
+ /* create interface */
+ error = ethernet_register_interface (vnm, avf_device_class.index,
+ ad->dev_instance, ad->hwaddr,
+ &ad->hw_if_index, avf_flag_change);
+
+ if (error)
+ goto error;
+
+ vnet_sw_interface_t *sw = vnet_get_hw_sw_interface (vnm, ad->hw_if_index);
+ ad->sw_if_index = sw->sw_if_index;
+
+ vnet_hw_interface_set_input_node (vnm, ad->hw_if_index,
+ avf_input_node.index);
+
+ if (pool_elts (am->devices) == 1)
+ vlib_process_signal_event (vm, avf_process_node.index,
+ AVF_PROCESS_EVENT_START, 0);
+
+ return;
+
+error:
+ avf_delete_if (vm, ad);
+ args->rv = VNET_API_ERROR_INVALID_INTERFACE;
+ args->error = clib_error_return (error, "pci-addr %U",
+ format_vlib_pci_addr, &args->addr);
+}
+
+static clib_error_t *
+avf_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
+{
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, hw_if_index);
+ avf_main_t *am = &avf_main;
+ avf_device_t *ad = vec_elt_at_index (am->devices, hi->dev_instance);
+ uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
+
+ if (ad->flags & AVF_DEVICE_F_ERROR)
+ return clib_error_return (0, "device is in error state");
+
+ if (is_up)
+ {
+ vnet_hw_interface_set_flags (vnm, ad->hw_if_index,
+ VNET_HW_INTERFACE_FLAG_LINK_UP);
+ ad->flags |= AVF_DEVICE_F_ADMIN_UP;
+ vnet_hw_interface_assign_rx_thread (vnm, ad->hw_if_index, 0, ~0);
+ }
+ else
+ {
+ vnet_hw_interface_set_flags (vnm, ad->hw_if_index, 0);
+ ad->flags &= ~AVF_DEVICE_F_ADMIN_UP;
+ }
+ return 0;
+}
+
+/* *INDENT-OFF* */
+VNET_DEVICE_CLASS (avf_device_class,) =
+{
+ .name = "Adaptive Virtual Function (AVF) interface",
+ .tx_function = avf_interface_tx,
+ .format_device = format_avf_device,
+ .format_device_name = format_avf_device_name,
+ .admin_up_down_function = avf_interface_admin_up_down,
+};
+/* *INDENT-ON* */
+
+clib_error_t *
+avf_init (vlib_main_t * vm)
+{
+ avf_main_t *am = &avf_main;
+ clib_error_t *error;
+ vlib_thread_main_t *tm = vlib_get_thread_main ();
+ int i;
+
+ if ((error = vlib_call_init_function (vm, pci_bus_init)))
+ return error;
+
+ vec_validate_aligned (am->per_thread_data, tm->n_vlib_mains - 1,
+ CLIB_CACHE_LINE_BYTES);
+
+ /* initialize ptype based loopup table */
+ vec_validate_aligned (am->ptypes, 255, CLIB_CACHE_LINE_BYTES);
+
+ /* *INDENT-OFF* */
+ vec_foreach_index (i, am->ptypes)
+ {
+ avf_ptype_t *p = vec_elt_at_index (am->ptypes, i);
+ if ((i >= 22) && (i <= 87))
+ {
+ p->next_node = VNET_DEVICE_INPUT_NEXT_IP4_NCS_INPUT;
+ p->flags = VNET_BUFFER_F_IS_IP4;
+ }
+ else if ((i >= 88) && (i <= 153))
+ {
+ p->next_node = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
+ p->flags = VNET_BUFFER_F_IS_IP6;
+ }
+ else
+ p->next_node = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ p->buffer_advance = device_input_next_node_advance[p->next_node];
+ p->flags |= VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ }
+ /* *INDENT-ON* */
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (avf_init);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/avf/format.c b/src/plugins/avf/format.c
new file mode 100644
index 00000000000..32b9d3f14a6
--- /dev/null
+++ b/src/plugins/avf/format.c
@@ -0,0 +1,145 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vnet/ethernet/ethernet.h>
+
+#include <avf/avf.h>
+
+u8 *
+format_avf_device_name (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ avf_main_t *am = &avf_main;
+ avf_device_t *ad = vec_elt_at_index (am->devices, i);
+ vlib_pci_addr_t *addr = vlib_pci_get_addr (ad->pci_dev_handle);
+
+ s = format (s, "AVF%x/%x/%x/%x",
+ addr->domain, addr->bus, addr->slot, addr->function);
+ return s;
+}
+
+u8 *
+format_avf_device_flags (u8 * s, va_list * args)
+{
+ avf_device_t *ad = va_arg (*args, avf_device_t *);
+ u8 *t = 0;
+
+#define _(a, b, c) if (ad->flags & (1 << a)) \
+t = format (t, "%s%s", t ? " ":"", c);
+ foreach_avf_device_flags
+#undef _
+ s = format (s, "%v", t);
+ vec_free (t);
+ return s;
+}
+
+u8 *
+format_avf_vf_cap_flags (u8 * s, va_list * args)
+{
+ u32 flags = va_arg (*args, u32);
+ u8 *t = 0;
+
+#define _(a, b, c) if (flags & (1 << a)) \
+ t = format (t, "%s%s", t ? " ":"", c);
+ foreach_avf_vf_cap_flag;
+#undef _
+ s = format (s, "%v", t);
+ vec_free (t);
+ return s;
+}
+
+static u8 *
+format_virtchnl_link_speed (u8 * s, va_list * args)
+{
+ virtchnl_link_speed_t speed = va_arg (*args, virtchnl_link_speed_t);
+
+ if (speed == 0)
+ return format (s, "unknown");
+#define _(a, b, c) \
+ else if (speed == VIRTCHNL_LINK_SPEED_##b) \
+ return format (s, c);
+ foreach_virtchnl_link_speed;
+#undef _
+ return s;
+}
+
+u8 *
+format_avf_device (u8 * s, va_list * args)
+{
+ u32 i = va_arg (*args, u32);
+ avf_main_t *am = &avf_main;
+ avf_device_t *ad = vec_elt_at_index (am->devices, i);
+ u32 indent = format_get_indent (s);
+ u8 *a = 0;
+
+ s = format (s, "flags: %U", format_avf_device_flags, ad);
+ s = format (s, "\n%Uoffload features: %U", format_white_space, indent,
+ format_avf_vf_cap_flags, ad->feature_bitmap);
+
+ s = format (s, "\n%Unum-queue-pairs %d max-vectors %u max-mtu %u "
+ "rss-key-size %u rss-lut-size %u", format_white_space, indent,
+ ad->num_queue_pairs, ad->max_vectors, ad->max_mtu,
+ ad->rss_key_size, ad->rss_lut_size);
+ s = format (s, "\n%Uspeed %U", format_white_space, indent,
+ format_virtchnl_link_speed, ad->link_speed);
+ if (ad->error)
+ s = format (s, "\n%Uerror %U", format_white_space, indent,
+ format_clib_error, ad->error);
+
+#define _(c) if (ad->eth_stats.c) \
+ a = format (a, "\n%U%-20U %u", format_white_space, indent + 2, \
+ format_c_identifier, #c, ad->eth_stats.c);
+ foreach_virtchnl_eth_stats;
+#undef _
+ if (a)
+ s = format (s, "\n%Ustats:%v", format_white_space, indent, a);
+
+ vec_free (a);
+ return s;
+}
+
+u8 *
+format_avf_input_trace (u8 * s, va_list * args)
+{
+ vlib_main_t *vm = va_arg (*args, vlib_main_t *);
+ vlib_node_t *node = va_arg (*args, vlib_node_t *);
+ avf_input_trace_t *t = va_arg (*args, avf_input_trace_t *);
+ vnet_main_t *vnm = vnet_get_main ();
+ vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, t->hw_if_index);
+ u32 indent = format_get_indent (s);
+ avf_rx_vector_entry_t *rxve = &t->rxve;
+
+ s = format (s, "avf: %v (%d) next-node %U",
+ hi->name, t->hw_if_index, format_vlib_next_node_name, vm,
+ node->index, t->next_index);
+ s = format (s, "\n%Ustatus 0x%x error 0x%x ptype 0x%x length %u",
+ format_white_space, indent + 2, rxve->status, rxve->error,
+ rxve->ptype, rxve->length);
+
+ return s;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/avf/input.c b/src/plugins/avf/input.c
new file mode 100644
index 00000000000..a769bf79646
--- /dev/null
+++ b/src/plugins/avf/input.c
@@ -0,0 +1,439 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+
+#include <avf/avf.h>
+
+#define foreach_avf_input_error \
+ _(BUFFER_ALLOC, "buffer alloc error") \
+ _(RX_PACKET_ERROR, "Rx packet errors")
+
+typedef enum
+{
+#define _(f,s) AVF_INPUT_ERROR_##f,
+ foreach_avf_input_error
+#undef _
+ AVF_INPUT_N_ERROR,
+} avf_input_error_t;
+
+static __clib_unused char *avf_input_error_strings[] = {
+#define _(n,s) s,
+ foreach_avf_input_error
+#undef _
+};
+
+#define AVF_RX_DESC_STATUS(x) (1 << x)
+#define AVF_RX_DESC_STATUS_DD AVF_RX_DESC_STATUS(0)
+#define AVF_RX_DESC_STATUS_EOP AVF_RX_DESC_STATUS(1)
+
+static_always_inline void
+avf_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node, u32 next0,
+ vlib_buffer_t * b0, uword * n_trace, avf_device_t * ad,
+ avf_rx_vector_entry_t * rxve)
+{
+ avf_input_trace_t *tr;
+ vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
+ vlib_set_trace_count (vm, node, --(*n_trace));
+ tr = vlib_add_trace (vm, node, b0, sizeof (*tr));
+ tr->next_index = next0;
+ tr->hw_if_index = ad->hw_if_index;
+ clib_memcpy (&tr->rxve, rxve, sizeof (avf_rx_vector_entry_t));
+}
+
+#define AVF_INPUT_REFILL_TRESHOLD 32
+static_always_inline void
+avf_rxq_refill (vlib_main_t * vm, vlib_node_runtime_t * node, avf_rxq_t * rxq,
+ int use_iova)
+{
+ u16 n_refill, mask, n_alloc, slot;
+ avf_rx_desc_t *d;
+
+ n_refill = rxq->size - 1 - rxq->n_bufs;
+ if (PREDICT_TRUE (n_refill <= AVF_INPUT_REFILL_TRESHOLD))
+ return;
+
+ mask = rxq->size - 1;
+ slot = (rxq->next - n_refill - 1) & mask;
+
+ n_refill &= ~7; /* round to 8 */
+ n_alloc = vlib_buffer_alloc_to_ring (vm, rxq->bufs, slot, rxq->size,
+ n_refill);
+
+ if (PREDICT_FALSE (n_alloc != n_refill))
+ {
+ vlib_error_count (vm, node->node_index,
+ AVF_INPUT_ERROR_BUFFER_ALLOC, 1);
+ if (n_alloc)
+ vlib_buffer_free (vm, rxq->bufs + slot, n_alloc);
+ return;
+ }
+
+ rxq->n_bufs += n_alloc;
+
+ while (n_alloc--)
+ {
+ u64 addr;
+ d = ((avf_rx_desc_t *) rxq->descs) + slot;
+ if (use_iova)
+ {
+ vlib_buffer_t *b = vlib_get_buffer (vm, rxq->bufs[slot]);
+ addr = pointer_to_uword (b->data);
+ }
+ else
+ addr = vlib_get_buffer_data_physical_address (vm, rxq->bufs[slot]);
+ d->qword[0] = addr;
+ d->qword[1] = 0;
+ slot = (slot + 1) & mask;
+ }
+
+ CLIB_MEMORY_BARRIER ();
+ *(rxq->qrx_tail) = slot;
+}
+
+static_always_inline void
+avf_check_for_error (vlib_node_runtime_t * node, avf_rx_vector_entry_t * rxve,
+ vlib_buffer_t * b, u32 * next)
+{
+ avf_main_t *am = &avf_main;
+ avf_ptype_t *ptype;
+ if (PREDICT_FALSE (rxve->error))
+ {
+ b->error = node->errors[AVF_INPUT_ERROR_RX_PACKET_ERROR];
+ ptype = am->ptypes + rxve->ptype;
+ /* retract */
+ vlib_buffer_advance (b, --ptype->buffer_advance);
+ *next = VNET_DEVICE_INPUT_NEXT_DROP;
+ }
+}
+
+static_always_inline u32
+avf_find_next (avf_rx_vector_entry_t * rxve, vlib_buffer_t * b,
+ int maybe_tagged)
+{
+ avf_main_t *am = &avf_main;
+ ethernet_header_t *e = (ethernet_header_t *) b->data;
+ avf_ptype_t *ptype;
+ if (maybe_tagged && ethernet_frame_is_tagged (e->type))
+ return VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ ptype = am->ptypes + rxve->ptype;
+ vlib_buffer_advance (b, ptype->buffer_advance);
+ b->flags |= ptype->flags;
+ return ptype->next_node;
+}
+
+
+static_always_inline uword
+avf_device_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, avf_device_t * ad, u16 qid,
+ int with_features_or_tracing)
+{
+ avf_main_t *am = &avf_main;
+ vnet_main_t *vnm = vnet_get_main ();
+ u32 thr_idx = vlib_get_thread_index ();
+ avf_per_thread_data_t *ptd =
+ vec_elt_at_index (am->per_thread_data, thr_idx);
+ avf_rxq_t *rxq = vec_elt_at_index (ad->rxqs, qid);
+ avf_rx_vector_entry_t *rxve;
+ uword n_trace = vlib_get_trace_count (vm, node);
+ u32 next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ avf_rx_desc_t *d;
+ u32 *to_next = 0;
+ u32 n_rx_packets = 0;
+ u32 n_rx_bytes = 0;
+ u32 sw_if_idx[VLIB_N_RX_TX] = {[VLIB_RX] = ad->sw_if_index,[VLIB_TX] = ~0 };
+ u16 mask = rxq->size - 1;
+ u16 n_rxv = 0;
+ u8 maybe_error = 0;
+
+ /* fetch up to AVF_RX_VECTOR_SZ from the rx ring, unflatten them and
+ copy needed data from descriptor to rx vector */
+ d = rxq->descs + rxq->next;
+ while ((d->qword[1] & AVF_RX_DESC_STATUS_DD) && n_rxv < AVF_RX_VECTOR_SZ)
+ {
+ u16 next_pf = (rxq->next + 8) & mask;
+ CLIB_PREFETCH (rxq->descs + next_pf, CLIB_CACHE_LINE_BYTES, LOAD);
+ rxve = ptd->rx_vector + n_rxv;
+ rxve->bi = rxq->bufs[rxq->next];
+ rxve->status = avf_get_u64_bits (d, 8, 18, 0);
+ rxve->error = avf_get_u64_bits (d, 8, 26, 19);
+ rxve->ptype = avf_get_u64_bits (d, 8, 37, 30);
+ rxve->length = avf_get_u64_bits (d, 8, 63, 38);
+ maybe_error |= rxve->error;
+
+ /* deal with chained buffers */
+ while (PREDICT_FALSE ((d->qword[1] & AVF_RX_DESC_STATUS_EOP) == 0))
+ {
+ clib_error ("fixme");
+ }
+
+ /* next */
+ rxq->next = (rxq->next + 1) & mask;
+ d = rxq->descs + rxq->next;
+ n_rxv++;
+ rxq->n_bufs--;
+ }
+
+ if (n_rxv == 0)
+ return 0;
+
+ /* refill rx ring */
+ if (ad->flags & AVF_DEVICE_F_IOVA)
+ avf_rxq_refill (vm, node, rxq, 1 /* use_iova */ );
+ else
+ avf_rxq_refill (vm, node, rxq, 0 /* use_iova */ );
+
+ n_rx_packets = n_rxv;
+ rxve = ptd->rx_vector;
+ while (n_rxv)
+ {
+ u32 n_left_to_next;
+ u32 bi0, bi1, bi2, bi3;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u32 next0, next1, next2, next3;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_rxv >= 12 && n_left_to_next >= 4)
+ {
+ vlib_buffer_t *p;
+ p = vlib_get_buffer (vm, rxve[8].bi);
+ vlib_prefetch_buffer_header (p, LOAD);
+ CLIB_PREFETCH (p->data, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ p = vlib_get_buffer (vm, rxve[9].bi);
+ vlib_prefetch_buffer_header (p, LOAD);
+ CLIB_PREFETCH (p->data, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ p = vlib_get_buffer (vm, rxve[10].bi);
+ vlib_prefetch_buffer_header (p, LOAD);
+ CLIB_PREFETCH (p->data, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ p = vlib_get_buffer (vm, rxve[11].bi);
+ vlib_prefetch_buffer_header (p, LOAD);
+ CLIB_PREFETCH (p->data, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ to_next[0] = bi0 = rxve[0].bi;
+ to_next[1] = bi1 = rxve[1].bi;
+ to_next[2] = bi2 = rxve[2].bi;
+ to_next[3] = bi3 = rxve[3].bi;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+ b0->current_length = rxve[0].length;
+ b1->current_length = rxve[1].length;
+ b2->current_length = rxve[2].length;
+ b3->current_length = rxve[3].length;
+
+ n_rx_bytes += b0->current_length;
+ n_rx_bytes += b1->current_length;
+ n_rx_bytes += b2->current_length;
+ n_rx_bytes += b3->current_length;
+
+ if (PREDICT_TRUE (ad->per_interface_next_index == ~0))
+ {
+ ethernet_header_t *e0, *e1, *e2, *e3;
+
+ e0 = (ethernet_header_t *) b0->data;
+ e1 = (ethernet_header_t *) b1->data;
+ e2 = (ethernet_header_t *) b2->data;
+ e3 = (ethernet_header_t *) b3->data;
+
+ if (ethernet_frame_is_any_tagged_x4 (e0->type, e1->type,
+ e2->type, e3->type))
+ {
+ next0 = avf_find_next (rxve, b0, 1);
+ next1 = avf_find_next (rxve + 1, b1, 1);
+ next2 = avf_find_next (rxve + 2, b2, 1);
+ next3 = avf_find_next (rxve + 3, b3, 1);
+ }
+ else
+ {
+ next0 = avf_find_next (rxve, b0, 0);
+ next1 = avf_find_next (rxve + 1, b1, 0);
+ next2 = avf_find_next (rxve + 2, b2, 0);
+ next3 = avf_find_next (rxve + 3, b3, 0);
+ }
+
+ if (with_features_or_tracing)
+ vnet_feature_start_device_input_x4 (ad->sw_if_index, &next0,
+ &next1, &next2, &next3,
+ b0, b1, b2, b3);
+
+ if (PREDICT_FALSE (maybe_error))
+ {
+ avf_check_for_error (node, rxve + 0, b0, &next0);
+ avf_check_for_error (node, rxve + 1, b1, &next1);
+ avf_check_for_error (node, rxve + 2, b2, &next2);
+ avf_check_for_error (node, rxve + 3, b3, &next3);
+ }
+ }
+ else
+ next0 = next1 = next2 = next3 = ad->per_interface_next_index;
+
+ clib_memcpy (vnet_buffer (b0)->sw_if_index, sw_if_idx,
+ sizeof (sw_if_idx));
+ clib_memcpy (vnet_buffer (b1)->sw_if_index, sw_if_idx,
+ sizeof (sw_if_idx));
+ clib_memcpy (vnet_buffer (b2)->sw_if_index, sw_if_idx,
+ sizeof (sw_if_idx));
+ clib_memcpy (vnet_buffer (b3)->sw_if_index, sw_if_idx,
+ sizeof (sw_if_idx));
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b1);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b2);
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b3);
+
+ if (with_features_or_tracing && PREDICT_FALSE (n_trace))
+ {
+ avf_input_trace (vm, node, next0, b0, &n_trace, ad, rxve);
+ if (n_trace)
+ avf_input_trace (vm, node, next1, b1, &n_trace, ad, rxve + 1);
+ if (n_trace)
+ avf_input_trace (vm, node, next2, b2, &n_trace, ad, rxve + 2);
+ if (n_trace)
+ avf_input_trace (vm, node, next3, b3, &n_trace, ad, rxve + 3);
+ }
+
+ /* next */
+ to_next += 4;
+ n_left_to_next -= 4;
+ rxve += 4;
+ n_rxv -= 4;
+
+ /* enqueue */
+ vlib_validate_buffer_enqueue_x4 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, bi1, bi2, bi3,
+ next0, next1, next2, next3);
+ }
+ while (n_rxv && n_left_to_next)
+ {
+ bi0 = rxve[0].bi;
+ to_next[0] = bi0;
+ b0 = vlib_get_buffer (vm, bi0);
+
+ b0->current_length = rxve->length;
+ n_rx_bytes += b0->current_length;
+
+ if (PREDICT_TRUE (ad->per_interface_next_index == ~0))
+ {
+ next0 = avf_find_next (rxve, b0, 1);
+ if (with_features_or_tracing)
+ vnet_feature_start_device_input_x1 (ad->sw_if_index, &next0,
+ b0);
+ avf_check_for_error (node, rxve + 0, b0, &next0);
+ }
+ else
+ next0 = ad->per_interface_next_index;
+
+ clib_memcpy (vnet_buffer (b0)->sw_if_index, sw_if_idx,
+ sizeof (sw_if_idx));
+
+ VLIB_BUFFER_TRACE_TRAJECTORY_INIT (b0);
+ if (with_features_or_tracing && PREDICT_FALSE (n_trace > 0))
+ avf_input_trace (vm, node, next0, b0, &n_trace, ad, rxve);
+
+ /* next */
+ to_next += 1;
+ n_left_to_next -= 1;
+ rxve += 1;
+ n_rxv -= 1;
+
+ /* enqueue */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ vlib_increment_combined_counter (vnm->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX, thr_idx,
+ ad->hw_if_index, n_rx_packets, n_rx_bytes);
+
+ return n_rx_packets;
+}
+
+uword
+CLIB_MULTIARCH_FN (avf_input) (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_rx = 0;
+ avf_main_t *am = &avf_main;
+ vnet_device_input_runtime_t *rt = (void *) node->runtime_data;
+ vnet_device_and_queue_t *dq;
+
+ foreach_device_and_queue (dq, rt->devices_and_queues)
+ {
+ avf_device_t *ad;
+ ad = vec_elt_at_index (am->devices, dq->dev_instance);
+ if ((ad->flags & AVF_DEVICE_F_ADMIN_UP) == 0)
+ continue;
+ if (vnet_device_input_have_features (ad->sw_if_index) ||
+ vlib_get_trace_count (vm, node))
+ n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id, 1);
+ else
+ n_rx += avf_device_input_inline (vm, node, frame, ad, dq->queue_id, 0);
+ }
+ return n_rx;
+}
+
+#ifndef CLIB_MULTIARCH_VARIANT
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (avf_input_node) = {
+ .function = avf_input,
+ .name = "avf-input",
+ .sibling_of = "device-input",
+ .format_trace = format_avf_input_trace,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_POLLING,
+ .n_errors = AVF_INPUT_N_ERROR,
+ .error_strings = avf_input_error_strings,
+};
+
+#if __x86_64__
+vlib_node_function_t __clib_weak avf_input_avx512;
+vlib_node_function_t __clib_weak avf_input_avx2;
+static void __clib_constructor
+avf_input_multiarch_select (void)
+{
+ if (avf_input_avx512 && clib_cpu_supports_avx512f ())
+ avf_input_node.function = avf_input_avx512;
+ else if (avf_input_avx2 && clib_cpu_supports_avx2 ())
+ avf_input_node.function = avf_input_avx2;
+}
+
+#endif
+#endif
+
+/* *INDENT-ON* */
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/avf/output.c b/src/plugins/avf/output.c
new file mode 100644
index 00000000000..8e5fa6a43f8
--- /dev/null
+++ b/src/plugins/avf/output.c
@@ -0,0 +1,183 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vlib/pci/pci.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/devices/devices.h>
+
+#include <avf/avf.h>
+
+#define AVF_TXQ_DESC_CMD(x) (1 << (x + 4))
+#define AVF_TXQ_DESC_CMD_EOP AVF_TXQ_DESC_CMD(0)
+#define AVF_TXQ_DESC_CMD_RS AVF_TXQ_DESC_CMD(1)
+
+static_always_inline u8
+avf_tx_desc_get_dtyp (avf_tx_desc_t * d)
+{
+ return d->qword[1] & 0x0f;
+}
+
+uword
+CLIB_MULTIARCH_FN (avf_interface_tx) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ avf_main_t *am = &avf_main;
+ vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
+ avf_device_t *ad = pool_elt_at_index (am->devices, rd->dev_instance);
+ u32 thread_index = vlib_get_thread_index ();
+ u8 qid = thread_index;
+ avf_txq_t *txq = vec_elt_at_index (ad->txqs, qid % ad->num_queue_pairs);
+ avf_tx_desc_t *d0, *d1, *d2, *d3;
+ u32 *buffers = vlib_frame_args (frame);
+ u32 bi0, bi1, bi2, bi3;
+ u16 n_left = frame->n_vectors;
+ vlib_buffer_t *b0, *b1, *b2, *b3;
+ u16 mask = txq->size - 1;
+
+ clib_spinlock_lock_if_init (&txq->lock);
+
+ /* release cosumed bufs */
+ if (txq->n_bufs)
+ {
+ u16 first, slot, n_free = 0;
+ first = slot = (txq->next - txq->n_bufs) & mask;
+ d0 = txq->descs + slot;
+ while (n_free < txq->n_bufs && avf_tx_desc_get_dtyp (d0) == 0x0F)
+ {
+ n_free++;
+ slot = (slot + 1) & mask;
+ d0 = txq->descs + slot;
+ }
+
+ if (n_free)
+ {
+ txq->n_bufs -= n_free;;
+ vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size,
+ n_free);
+ }
+ }
+
+ while (n_left >= 7)
+ {
+ u16 slot0, slot1, slot2, slot3;
+
+ vlib_prefetch_buffer_with_index (vm, buffers[4], LOAD);
+ vlib_prefetch_buffer_with_index (vm, buffers[5], LOAD);
+ vlib_prefetch_buffer_with_index (vm, buffers[6], LOAD);
+ vlib_prefetch_buffer_with_index (vm, buffers[7], LOAD);
+
+ slot0 = txq->next;
+ slot1 = (txq->next + 1) & mask;
+ slot2 = (txq->next + 2) & mask;
+ slot3 = (txq->next + 3) & mask;
+
+ d0 = txq->descs + slot0;
+ d1 = txq->descs + slot1;
+ d2 = txq->descs + slot2;
+ d3 = txq->descs + slot3;
+
+ bi0 = buffers[0];
+ bi1 = buffers[1];
+ bi2 = buffers[2];
+ bi3 = buffers[3];
+
+ txq->bufs[slot0] = bi0;
+ txq->bufs[slot1] = bi1;
+ txq->bufs[slot2] = bi2;
+ txq->bufs[slot3] = bi3;
+ b0 = vlib_get_buffer (vm, bi0);
+ b1 = vlib_get_buffer (vm, bi1);
+ b2 = vlib_get_buffer (vm, bi2);
+ b3 = vlib_get_buffer (vm, bi3);
+
+#if 0
+ d->qword[0] = vlib_get_buffer_data_physical_address (vm, bi0) +
+ b0->current_data;
+#else
+ d0->qword[0] = pointer_to_uword (b0->data);
+ d1->qword[0] = pointer_to_uword (b1->data);
+ d2->qword[0] = pointer_to_uword (b2->data);
+ d3->qword[0] = pointer_to_uword (b3->data);
+
+#endif
+ u64 bits = AVF_TXQ_DESC_CMD_EOP | AVF_TXQ_DESC_CMD_RS;
+ d0->qword[1] = ((u64) b0->current_length) << 34 | bits;
+ d1->qword[1] = ((u64) b1->current_length) << 34 | bits;
+ d2->qword[1] = ((u64) b2->current_length) << 34 | bits;
+ d3->qword[1] = ((u64) b3->current_length) << 34 | bits;
+
+ txq->next = (txq->next + 4) & mask;
+ txq->n_bufs += 4;
+ buffers += 4;
+ n_left -= 4;
+ }
+
+ while (n_left)
+ {
+ d0 = txq->descs + txq->next;
+ bi0 = buffers[0];
+ txq->bufs[txq->next] = bi0;
+ b0 = vlib_get_buffer (vm, bi0);
+
+#if 0
+ d->qword[0] = vlib_get_buffer_data_physical_address (vm, bi0) +
+ b0->current_data;
+#else
+ d0->qword[0] = pointer_to_uword (b0->data);
+
+#endif
+ d0->qword[1] = ((u64) b0->current_length) << 34;
+ d0->qword[1] |= AVF_TXQ_DESC_CMD_EOP | AVF_TXQ_DESC_CMD_RS;
+
+ txq->next = (txq->next + 1) & mask;
+ txq->n_bufs++;
+ buffers++;
+ n_left--;
+ }
+ CLIB_MEMORY_BARRIER ();
+ *(txq->qtx_tail) = txq->next;
+
+ clib_spinlock_unlock_if_init (&txq->lock);
+
+ return frame->n_vectors - n_left;
+}
+
+#ifndef CLIB_MULTIARCH_VARIANT
+#if __x86_64__
+vlib_node_function_t __clib_weak avf_interface_tx_avx512;
+vlib_node_function_t __clib_weak avf_interface_tx_avx2;
+static void __clib_constructor
+avf_interface_tx_multiarch_select (void)
+{
+ if (avf_interface_tx_avx512 && clib_cpu_supports_avx512f ())
+ avf_device_class.tx_function = avf_interface_tx_avx512;
+ else if (avf_interface_tx_avx2 && clib_cpu_supports_avx2 ())
+ avf_device_class.tx_function = avf_interface_tx_avx2;
+}
+#endif
+#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/avf/plugin.c b/src/plugins/avf/plugin.c
new file mode 100644
index 00000000000..d2a043ac2cb
--- /dev/null
+++ b/src/plugins/avf/plugin.c
@@ -0,0 +1,35 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+
+/* *INDENT-OFF* */
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "Intel Adaptive Virtual Function (AVF) Device Plugin",
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/avf/virtchnl.h b/src/plugins/avf/virtchnl.h
new file mode 100644
index 00000000000..671da959c2b
--- /dev/null
+++ b/src/plugins/avf/virtchnl.h
@@ -0,0 +1,342 @@
+/*
+ *------------------------------------------------------------------
+ * Copyright (c) 2018 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#define VIRTCHNL_VERSION_MAJOR 1
+#define VIRTCHNL_VERSION_MINOR 1
+
+
+#define AVFINT_DYN_CTLN(x) (0x00003800 + (0x4 * x))
+#define AVFINT_ICR0 0x00004800
+#define AVFINT_ICR0_ENA1 0x00005000
+#define AVFINT_DYN_CTL0 0x00005C00
+#define AVF_ARQBAH 0x00006000
+#define AVF_ATQH 0x00006400
+#define AVF_ATQLEN 0x00006800
+#define AVF_ARQBAL 0x00006C00
+#define AVF_ARQT 0x00007000
+#define AVF_ARQH 0x00007400
+#define AVF_ATQBAH 0x00007800
+#define AVF_ATQBAL 0x00007C00
+#define AVF_ARQLEN 0x00008000
+#define AVF_ATQT 0x00008400
+#define AVFGEN_RSTAT 0x00008800
+#define AVF_QTX_TAIL(q) (0x00000000 + (0x4 * q))
+#define AVF_QRX_TAIL(q) (0x00002000 + (0x4 * q))
+
+#define AVF_AQ_F_DD (1 << 0)
+#define AVF_AQ_F_CMP (1 << 1)
+#define AVF_AQ_F_ERR (1 << 2)
+#define AVF_AQ_F_VFE (1 << 3)
+#define AVF_AQ_F_LB (1 << 9)
+#define AVF_AQ_F_RD (1 << 10)
+#define AVF_AQ_F_VFC (1 << 11)
+#define AVF_AQ_F_BUF (1 << 12)
+#define AVF_AQ_F_SI (1 << 13)
+#define AVF_AQ_F_EI (1 << 14)
+#define AVF_AQ_F_FE (1 << 15)
+
+
+#define foreach_virtchnl_op \
+ _(0, UNKNOWN) \
+ _(1, VERSION) \
+ _(2, RESET_VF) \
+ _(3, GET_VF_RESOURCES) \
+ _(4, CONFIG_TX_QUEUE) \
+ _(5, CONFIG_RX_QUEUE) \
+ _(6, CONFIG_VSI_QUEUES) \
+ _(7, CONFIG_IRQ_MAP) \
+ _(8, ENABLE_QUEUES) \
+ _(9, DISABLE_QUEUES) \
+ _(10, ADD_ETH_ADDR) \
+ _(11, DEL_ETH_ADDR) \
+ _(12, ADD_VLAN) \
+ _(13, DEL_VLAN) \
+ _(14, CONFIG_PROMISCUOUS_MODE) \
+ _(15, GET_STATS) \
+ _(16, RSVD) \
+ _(17, EVENT) \
+ _(18, UNDEF_18) \
+ _(19, UNDEF_19) \
+ _(20, IWARP) \
+ _(21, CONFIG_IWARP_IRQ_MAP) \
+ _(22, RELEASE_IWARP_IRQ_MAP) \
+ _(23, CONFIG_RSS_KEY) \
+ _(24, CONFIG_RSS_LUT) \
+ _(25, GET_RSS_HENA_CAPS) \
+ _(26, SET_RSS_HENA) \
+ _(27, ENABLE_VLAN_STRIPPING) \
+ _(28, DISABLE_VLAN_STRIPPING) \
+ _(29, REQUEST_QUEUES)
+
+typedef enum
+{
+#define _(v,n) VIRTCHNL_OP_##n = v,
+ foreach_virtchnl_op
+#undef _
+ VIRTCHNL_N_OPS,
+} virtchnl_ops_t;
+
+typedef enum
+{
+ VIRTCHNL_STATUS_SUCCESS = 0,
+ VIRTCHNL_ERR_PARAM = -5,
+ VIRTCHNL_STATUS_ERR_OPCODE_MISMATCH = -38,
+ VIRTCHNL_STATUS_ERR_CQP_COMPL_ERROR = -39,
+ VIRTCHNL_STATUS_ERR_INVALID_VF_ID = -40,
+ VIRTCHNL_STATUS_NOT_SUPPORTED = -64,
+} virtchnl_status_code_t;
+
+#define foreach_avf_vf_cap_flag \
+ _( 0, L2, "l2") \
+ _( 1, IWARP, "iwarp") \
+ _( 2, RSVD, "rsvd") \
+ _( 3, RSS_AQ, "rss-aq") \
+ _( 4, RSS_REG, "rss-reg") \
+ _( 5, WB_ON_ITR, "wb-on-itr") \
+ _( 6, REQ_QUEUES, "req-queues") \
+ _(16, VLAN, "vlan") \
+ _(17, RX_POLLING, "rx-polling") \
+ _(18, RSS_PCTYPE_V2, "rss-pctype-v2") \
+ _(19, RSS_PF, "rss-pf") \
+ _(20, ENCAP, "encap") \
+ _(21, ENCAP_CSUM, "encap-csum") \
+ _(22, RX_ENCAP_CSUM, "rx-encap-csum")
+
+typedef enum
+{
+#define _(a, b, c) VIRTCHNL_VF_OFFLOAD_##b = (1 << a),
+ foreach_avf_vf_cap_flag
+#undef _
+} avf_vf_cap_flag_t;
+
+typedef enum
+{
+ VIRTCHNL_VSI_TYPE_INVALID = 0,
+ VIRTCHNL_VSI_SRIOV = 6,
+} virtchnl_vsi_type_t;
+
+typedef struct
+{
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ virtchnl_vsi_type_t vsi_type;
+ u16 qset_handle;
+ u8 default_mac_addr[6];
+} virtchnl_vsi_resource_t;
+
+typedef struct
+{
+ u16 num_vsis;
+ u16 num_queue_pairs;
+ u16 max_vectors;
+ u16 max_mtu;
+ u32 vf_offload_flags;
+ u32 rss_key_size;
+ u32 rss_lut_size;
+ virtchnl_vsi_resource_t vsi_res[1];
+} virtchnl_vf_resource_t;
+
+typedef enum
+{
+ VIRTCHNL_EVENT_UNKNOWN = 0,
+ VIRTCHNL_EVENT_LINK_CHANGE,
+ VIRTCHNL_EVENT_RESET_IMPENDING,
+ VIRTCHNL_EVENT_PF_DRIVER_CLOSE,
+} virtchnl_event_codes_t;
+
+#define foreach_virtchnl_link_speed \
+ _(1, 100MB, "100 Mbps") \
+ _(2, 1GB, "1 Gbps") \
+ _(3, 10GB, "10 Gbps") \
+ _(4, 40GB, "40 Gbps") \
+ _(5, 20GB, "20 Gbps") \
+ _(6, 25GB, "25 Gbps")
+
+typedef enum
+{
+ VIRTCHNL_LINK_SPEED_UNKNOWN = 0,
+#define _(a,b,c) VIRTCHNL_LINK_SPEED_##b = (1 << a),
+ foreach_virtchnl_link_speed
+#undef _
+} virtchnl_link_speed_t;
+
+typedef struct
+{
+ virtchnl_event_codes_t event;
+ union
+ {
+ struct
+ {
+ virtchnl_link_speed_t link_speed;
+ _Bool link_status;
+ } link_event;
+ } event_data;
+ int severity;
+} virtchnl_pf_event_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl_pf_event_t, 16);
+
+typedef struct
+{
+ u32 major;
+ u32 minor;
+} virtchnl_version_info_t;
+
+typedef struct
+{
+ u16 flags;
+ u16 opcode;
+ u16 datalen;
+ u16 retval;
+ union
+ {
+ u32 cookie_hi;
+ virtchnl_ops_t v_opcode;
+ };
+ union
+ {
+ u32 cookie_lo;
+ virtchnl_status_code_t v_retval;
+ };
+ u32 param0;
+ u32 param1;
+ union
+ {
+ u32 param2;
+ u32 addr_hi;
+ };
+ union
+ {
+ u32 param3;
+ u32 addr_lo;
+ };
+} avf_aq_desc_t;
+
+STATIC_ASSERT_SIZEOF (avf_aq_desc_t, 32);
+
+typedef struct
+{
+ u16 vsi_id;
+ u16 queue_id;
+ u16 ring_len;
+ u64 dma_ring_addr;
+ u64 dma_headwb_addr;
+} virtchnl_txq_info_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl_txq_info_t, 24);
+
+typedef struct
+{
+ u16 vsi_id;
+ u16 queue_id;
+ u32 ring_len;
+ u16 hdr_size;
+ u16 splithdr_enabled;
+ u32 databuffer_size;
+ u32 max_pkt_size;
+ u32 pad1;
+ u64 dma_ring_addr;
+ int rx_split_pos;
+ u32 pad2;
+} virtchnl_rxq_info_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl_rxq_info_t, 40);
+
+typedef struct
+{
+ virtchnl_txq_info_t txq;
+ virtchnl_rxq_info_t rxq;
+} virtchnl_queue_pair_info_t;
+
+typedef struct
+{
+ u16 vsi_id;
+ u16 num_queue_pairs;
+ u32 pad;
+ virtchnl_queue_pair_info_t qpair[1];
+} virtchnl_vsi_queue_config_info_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl_vsi_queue_config_info_t, 72);
+
+typedef struct
+{
+ u16 vsi_id;
+ u16 pad;
+ u32 rx_queues;
+ u32 tx_queues;
+} virtchnl_queue_select_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl_queue_select_t, 12);
+
+typedef struct
+{
+ u16 vsi_id;
+ u16 vector_id;
+ u16 rxq_map;
+ u16 txq_map;
+ u16 rxitr_idx;
+ u16 txitr_idx;
+} virtchnl_vector_map_t;
+
+typedef struct
+{
+ u16 num_vectors;
+ virtchnl_vector_map_t vecmap[1];
+} virtchnl_irq_map_info_t;
+
+STATIC_ASSERT_SIZEOF (virtchnl_irq_map_info_t, 14);
+
+typedef struct
+{
+ u8 addr[6];
+ u8 pad[2];
+} virtchnl_ether_addr_t;
+
+typedef struct
+{
+ u16 vsi_id;
+ u16 num_elements;
+ virtchnl_ether_addr_t list[1];
+} virtchnl_ether_addr_list_t;
+
+#define foreach_virtchnl_eth_stats \
+ _(rx_bytes) \
+ _(rx_unicast) \
+ _(rx_multicast) \
+ _(rx_broadcast) \
+ _(rx_discards) \
+ _(rx_unknown_protocol)\
+ _(tx_bytes) \
+ _(tx_unicast) \
+ _(tx_multicast) \
+ _(tx_broadcast) \
+ _(tx_discards) \
+ _(tx_errors)
+
+typedef struct
+{
+#define _(s) u64 s;
+ foreach_virtchnl_eth_stats
+#undef _
+} virtchnl_eth_stats_t;
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */