summaryrefslogtreecommitdiffstats
path: root/src/plugins/af_packet
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/af_packet')
-rw-r--r--src/plugins/af_packet/CMakeLists.txt36
-rw-r--r--src/plugins/af_packet/FEATURE.yaml16
-rw-r--r--src/plugins/af_packet/af_packet.api192
-rw-r--r--src/plugins/af_packet/af_packet.c879
-rw-r--r--src/plugins/af_packet/af_packet.h179
-rw-r--r--src/plugins/af_packet/af_packet_api.c253
-rw-r--r--src/plugins/af_packet/cli.c295
-rw-r--r--src/plugins/af_packet/device.c836
-rw-r--r--src/plugins/af_packet/dir.dox29
-rw-r--r--src/plugins/af_packet/node.c845
-rw-r--r--src/plugins/af_packet/plugin.c22
11 files changed, 3582 insertions, 0 deletions
diff --git a/src/plugins/af_packet/CMakeLists.txt b/src/plugins/af_packet/CMakeLists.txt
new file mode 100644
index 00000000000..4b79615cae7
--- /dev/null
+++ b/src/plugins/af_packet/CMakeLists.txt
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: Apache-2.0
+# Copyright (c) 2022 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+add_vpp_plugin(af_packet
+ SOURCES
+ plugin.c
+ af_packet.c
+ device.c
+ node.c
+ cli.c
+ af_packet_api.c
+
+ MULTIARCH_SOURCES
+ node.c
+ device.c
+
+ INSTALL_HEADERS
+ af_packet.h
+
+ API_FILES
+ af_packet.api
+
+ # API_TEST_SOURCES
+ #af_packet_test_api.c
+)
diff --git a/src/plugins/af_packet/FEATURE.yaml b/src/plugins/af_packet/FEATURE.yaml
new file mode 100644
index 00000000000..4a11ea2beb5
--- /dev/null
+++ b/src/plugins/af_packet/FEATURE.yaml
@@ -0,0 +1,16 @@
+---
+name: host-interface Device AF_PACKET
+maintainer: Damjan Marion <damarion@cisco.com>
+features:
+ - L4 checksum offload
+ - GSO offload
+description: "Create a host interface that will attach to a linux AF_PACKET
+ interface, one side of a veth pair. The veth pair must
+ already exist. Once created, a new host interface will
+ exist in VPP with the name 'host-<ifname>', where '<ifname>'
+ is the name of the specified veth pair. Use the 'show interface'
+ command to display host interface details."
+missing:
+ - API dump details beyond sw_if_index and name
+state: production
+properties: [API, CLI, STATS, MULTITHREAD]
diff --git a/src/plugins/af_packet/af_packet.api b/src/plugins/af_packet/af_packet.api
new file mode 100644
index 00000000000..a12da37796a
--- /dev/null
+++ b/src/plugins/af_packet/af_packet.api
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015-2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+option version = "2.0.0";
+
+import "vnet/interface_types.api";
+import "vnet/ethernet/ethernet_types.api";
+
+/** \brief Create host-interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param hw_addr - interface MAC
+ @param use_random_hw_addr - use random generated MAC
+ @param host_if_name - interface name
+*/
+define af_packet_create
+{
+ u32 client_index;
+ u32 context;
+
+ vl_api_mac_address_t hw_addr;
+ bool use_random_hw_addr;
+ string host_if_name[64];
+};
+
+/** \brief Create host-interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define af_packet_create_reply
+{
+ u32 context;
+ i32 retval;
+ vl_api_interface_index_t sw_if_index;
+};
+
+/** \brief Create host-interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param hw_addr - interface MAC
+ @param use_random_hw_addr - use random generated MAC
+ @param host_if_name - interface name
+ @param rx_frame_size - frame size for RX
+ @param tx_frame_size - frame size for TX
+ @param rx_frames_per_block - frames per block for RX
+ @param tx_frames_per_block - frames per block for TX
+ @param flags - flags for the af_packet interface creation
+ @param num_rx_queues - number of rx queues
+*/
+define af_packet_create_v2
+{
+ u32 client_index;
+ u32 context;
+
+ vl_api_mac_address_t hw_addr;
+ bool use_random_hw_addr;
+ string host_if_name[64];
+ u32 rx_frame_size;
+ u32 tx_frame_size;
+ u32 rx_frames_per_block;
+ u32 tx_frames_per_block;
+ u32 flags;
+ u16 num_rx_queues [default=1];
+};
+
+/** \brief Create host-interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define af_packet_create_v2_reply
+{
+ u32 context;
+ i32 retval;
+ vl_api_interface_index_t sw_if_index;
+};
+
+enum af_packet_mode {
+ AF_PACKET_API_MODE_ETHERNET = 1, /* mode ethernet */
+ AF_PACKET_API_MODE_IP = 2, /* mode ip */
+};
+
+enum af_packet_flags {
+ AF_PACKET_API_FLAG_QDISC_BYPASS = 1, /* enable the qdisc bypass */
+ AF_PACKET_API_FLAG_CKSUM_GSO = 2, /* enable checksum/gso */
+ AF_PACKET_API_FLAG_VERSION_2 = 8 [backwards_compatible], /* af packet v2, default is v3 */
+};
+
+/** \brief Create host-interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param mode - 1 - Ethernet, 2 - IP
+ @param hw_addr - interface MAC
+ @param use_random_hw_addr - use random generated MAC
+ @param host_if_name - interface name
+ @param rx_frame_size - frame size for RX
+ @param tx_frame_size - frame size for TX
+ @param rx_frames_per_block - frames per block for RX
+ @param tx_frames_per_block - frames per block for TX
+ @param flags - flags for the af_packet interface creation
+ @param num_rx_queues - number of rx queues
+ @param num_tx_queues - number of tx queues
+*/
+define af_packet_create_v3
+{
+ u32 client_index;
+ u32 context;
+
+ vl_api_af_packet_mode_t mode;
+ vl_api_mac_address_t hw_addr;
+ bool use_random_hw_addr;
+ string host_if_name[64];
+ u32 rx_frame_size;
+ u32 tx_frame_size;
+ u32 rx_frames_per_block;
+ u32 tx_frames_per_block;
+ vl_api_af_packet_flags_t flags;
+ u16 num_rx_queues [default=1];
+ u16 num_tx_queues [default=1];
+};
+
+/** \brief Create host-interface response
+ @param context - sender context, to match reply w/ request
+ @param retval - return value for request
+*/
+define af_packet_create_v3_reply
+{
+ u32 context;
+ i32 retval;
+ vl_api_interface_index_t sw_if_index;
+};
+
+/** \brief Delete host-interface
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param host_if_name - interface name
+*/
+autoreply define af_packet_delete
+{
+ u32 client_index;
+ u32 context;
+
+ string host_if_name[64];
+};
+
+/** \brief Set l4 offload checksum calculation
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+*/
+autoreply define af_packet_set_l4_cksum_offload
+{
+ u32 client_index;
+ u32 context;
+
+ vl_api_interface_index_t sw_if_index;
+ bool set;
+};
+
+/** \brief Dump af_packet interfaces request */
+define af_packet_dump
+{
+ u32 client_index;
+ u32 context;
+};
+
+/** \brief Reply for af_packet dump request
+ @param sw_if_index - software index of af_packet interface
+ @param host_if_name - interface name
+*/
+define af_packet_details
+{
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+ string host_if_name[64];
+};
+
+/*
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/af_packet/af_packet.c b/src/plugins/af_packet/af_packet.c
new file mode 100644
index 00000000000..b2f860e658d
--- /dev/null
+++ b/src/plugins/af_packet/af_packet.c
@@ -0,0 +1,879 @@
+/*
+ *------------------------------------------------------------------
+ * af_packet.c - linux kernel packet interface
+ *
+ * Copyright (c) 2016 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *------------------------------------------------------------------
+ */
+
+#include <linux/if_ether.h>
+#include <linux/if_packet.h>
+#include <sys/ioctl.h>
+#include <net/if.h>
+#include <dirent.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+#include <vppinfra/linux/sysfs.h>
+#include <vlib/vlib.h>
+#include <vlib/unix/unix.h>
+#include <vnet/ip/ip.h>
+#include <vnet/devices/netlink.h>
+#include <vnet/ethernet/ethernet.h>
+#include <vnet/interface/rx_queue_funcs.h>
+#include <vnet/interface/tx_queue_funcs.h>
+
+#include <af_packet/af_packet.h>
+
+af_packet_main_t af_packet_main;
+
+VNET_HW_INTERFACE_CLASS (af_packet_ip_device_hw_interface_class, static) = {
+ .name = "af-packet-ip-device",
+ .flags = VNET_HW_INTERFACE_CLASS_FLAG_P2P,
+};
+
+#define AF_PACKET_DEFAULT_TX_FRAMES_PER_BLOCK 1024
+#define AF_PACKET_DEFAULT_TX_FRAME_SIZE (2048 * 33) // GSO packet of 64KB
+#define AF_PACKET_TX_BLOCK_NR 1
+
+#define AF_PACKET_DEFAULT_RX_FRAMES_PER_BLOCK_V2 1024
+#define AF_PACKET_DEFAULT_RX_FRAME_SIZE_V2 (2048 * 33) // GSO packet of 64KB
+#define AF_PACKET_RX_BLOCK_NR_V2 1
+
+#define AF_PACKET_DEFAULT_RX_FRAMES_PER_BLOCK 32
+#define AF_PACKET_DEFAULT_RX_FRAME_SIZE 2048
+#define AF_PACKET_RX_BLOCK_NR 160
+
+/*defined in net/if.h but clashes with dpdk headers */
+unsigned int if_nametoindex (const char *ifname);
+
+static clib_error_t *
+af_packet_eth_set_max_frame_size (vnet_main_t *vnm, vnet_hw_interface_t *hi,
+ u32 frame_size)
+{
+ clib_error_t *error, *rv;
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_if_t *apif = pool_elt_at_index (apm->interfaces, hi->dev_instance);
+
+ error = vnet_netlink_set_link_mtu (apif->host_if_index,
+ frame_size + hi->frame_overhead);
+
+ if (error)
+ {
+ vlib_log_err (apm->log_class, "netlink failed to change MTU: %U",
+ format_clib_error, error);
+ rv = vnet_error (VNET_ERR_SYSCALL_ERROR_1, "netlink error: %U",
+ format_clib_error, error);
+ clib_error_free (error);
+ return rv;
+ }
+ else
+ apif->host_mtu = frame_size + hi->frame_overhead;
+ return 0;
+}
+
+static int
+af_packet_read_mtu (af_packet_if_t *apif)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ clib_error_t *error;
+ error = vnet_netlink_get_link_mtu (apif->host_if_index, &apif->host_mtu);
+ if (error)
+ {
+ vlib_log_err (apm->log_class, "netlink failed to get MTU: %U",
+ format_clib_error, error);
+ clib_error_free (error);
+ return VNET_API_ERROR_SYSCALL_ERROR_1;
+ }
+ return 0;
+}
+
+static clib_error_t *
+af_packet_fd_read_ready (clib_file_t * uf)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+
+ /* Schedule the rx node */
+ vnet_hw_if_rx_queue_set_int_pending (vnm, uf->private_data);
+ return 0;
+}
+
+static int
+is_bridge (const u8 * host_if_name)
+{
+ u8 *s;
+ DIR *dir = NULL;
+
+ s = format (0, "/sys/class/net/%s/bridge%c", host_if_name, 0);
+ dir = opendir ((char *) s);
+ vec_free (s);
+
+ if (dir)
+ {
+ closedir (dir);
+ return 0;
+ }
+
+ return -1;
+}
+
+static void
+af_packet_set_rx_queues (vlib_main_t *vm, af_packet_if_t *apif)
+{
+ vnet_main_t *vnm = vnet_get_main ();
+ af_packet_queue_t *rx_queue;
+
+ vnet_hw_if_set_input_node (vnm, apif->hw_if_index,
+ af_packet_input_node.index);
+
+ vec_foreach (rx_queue, apif->rx_queues)
+ {
+ rx_queue->queue_index = vnet_hw_if_register_rx_queue (
+ vnm, apif->hw_if_index, rx_queue->queue_id, VNET_HW_IF_RXQ_THREAD_ANY);
+
+ {
+ clib_file_t template = { 0 };
+ template.read_function = af_packet_fd_read_ready;
+ template.file_descriptor = rx_queue->fd;
+ template.private_data = rx_queue->queue_index;
+ template.flags = UNIX_FILE_EVENT_EDGE_TRIGGERED;
+ template.description =
+ format (0, "%U queue %u", format_af_packet_device_name,
+ apif->dev_instance, rx_queue->queue_id);
+ rx_queue->clib_file_index = clib_file_add (&file_main, &template);
+ }
+ vnet_hw_if_set_rx_queue_file_index (vnm, rx_queue->queue_index,
+ rx_queue->clib_file_index);
+ vnet_hw_if_set_rx_queue_mode (vnm, rx_queue->queue_index,
+ VNET_HW_IF_RX_MODE_INTERRUPT);
+ rx_queue->mode = VNET_HW_IF_RX_MODE_INTERRUPT;
+ }
+ vnet_hw_if_update_runtime_data (vnm, apif->hw_if_index);
+}
+
+static void
@media only all and (prefers-color-scheme: dark) { .highlight .hll { background-color: #49483e } .highlight .c { color: #75715e } /* Comment */ .highlight .err { color: #960050; background-color: #1e0010 } /* Error */ .highlight .k { color: #66d9ef } /* Keyword */ .highlight .l { color: #ae81ff } /* Literal */ .highlight .n { color: #f8f8f2 } /* Name */ .highlight .o { color: #f92672 } /* Operator */ .highlight .p { color: #f8f8f2 } /* Punctuation */ .highlight .ch { color: #75715e } /* Comment.Hashbang */ .highlight .cm { color: #75715e } /* Comment.Multiline */ .highlight .cp { color: #75715e } /* Comment.Preproc */ .highlight .cpf { color: #75715e } /* Comment.PreprocFile */ .highlight .c1 { color: #75715e } /* Comment.Single */ .highlight .cs { color: #75715e } /* Comment.Special */ .highlight .gd { color: #f92672 } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gi { color: #a6e22e } /* Generic.Inserted */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #75715e } /* Generic.Subheading */ .highlight .kc { color: #66d9ef } /* Keyword.Constant */ .highlight .kd { color: #66d9ef } /* Keyword.Declaration */ .highlight .kn { color: #f92672 } /* Keyword.Namespace */ .highlight .kp { color: #66d9ef } /* Keyword.Pseudo */ .highlight .kr { color: #66d9ef } /* Keyword.Reserved */ .highlight .kt { color: #66d9ef } /* Keyword.Type */ .highlight .ld { color: #e6db74 } /* Literal.Date */ .highlight .m { color: #ae81ff } /* Literal.Number */ .highlight .s { color: #e6db74 } /* Literal.String */ .highlight .na { color: #a6e22e } /* Name.Attribute */ .highlight .nb { color: #f8f8f2 } /* Name.Builtin */ .highlight .nc { color: #a6e22e } /* Name.Class */ .highlight .no { color: #66d9ef } /* Name.Constant */ .highlight .nd { color: #a6e22e } /* Name.Decorator */ .highlight .ni { color: #f8f8f2 } /* Name.Entity */ .highlight .ne { color: #a6e22e } /* Name.Exception */ .highlight .nf { color: #a6e22e } /* Name.Function */ .highlight .nl { color: #f8f8f2 } /* Name.Label */ .highlight .nn { color: #f8f8f2 } /* Name.Namespace */ .highlight .nx { color: #a6e22e } /* Name.Other */ .highlight .py { color: #f8f8f2 } /* Name.Property */ .highlight .nt { color: #f92672 } /* Name.Tag */ .highlight .nv { color: #f8f8f2 } /* Name.Variable */ .highlight .ow { color: #f92672 } /* Operator.Word */ .highlight .w { color: #f8f8f2 } /* Text.Whitespace */ .highlight .mb { color: #ae81ff } /* Literal.Number.Bin */ .highlight .mf { color: #ae81ff } /* Literal.Number.Float */ .highlight .mh { color: #ae81ff } /* Literal.Number.Hex */ .highlight .mi { color: #ae81ff } /* Literal.Number.Integer */ .highlight .mo { color: #ae81ff } /* Literal.Number.Oct */ .highlight .sa { color: #e6db74 } /* Literal.String.Affix */ .highlight .sb { color: #e6db74 } /* Literal.String.Backtick */ .highlight .sc { color: #e6db74 } /* Literal.String.Char */ .highlight .dl { color: #e6db74 } /* Literal.String.Delimiter */ .highlight .sd { color: #e6db74 } /* Literal.String.Doc */ .highlight .s2 { color: #e6db74 } /* Literal.String.Double */ .highlight .se { color: #ae81ff } /* Literal.String.Escape */ .highlight .sh { color: #e6db74 } /* Literal.String.Heredoc */ .highlight .si { color: #e6db74 } /* Literal.String.Interpol */ .highlight .sx { color: #e6db74 } /* Literal.String.Other */ .highlight .sr { color: #e6db74 } /* Literal.String.Regex */ .highlight .s1 { color: #e6db74 } /* Literal.String.Single */ .highlight .ss { color: #e6db74 } /* Literal.String.Symbol */ .highlight .bp { color: #f8f8f2 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #a6e22e } /* Name.Function.Magic */ .highlight .vc { color: #f8f8f2 } /* Name.Variable.Class */ .highlight .vg { color: #f8f8f2 } /* Name.Variable.Global */ .highlight .vi { color: #f8f8f2 } /* Name.Variable.Instance */ .highlight .vm { color: #f8f8f2 } /* Name.Variable.Magic */ .highlight .il { color: #ae81ff } /* Literal.Number.Integer.Long */ } @media (prefers-color-scheme: light) { .highlight .hll { background-color: #ffffcc } .highlight .c { color: #888888 } /* Comment */ .highlight .err { color: #a61717; background-color: #e3d2d2 } /* Error */ .highlight .k { color: #008800; font-weight: bold } /* Keyword */ .highlight .ch { color: #888888 } /* Comment.Hashbang */ .highlight .cm { color: #888888 } /* Comment.Multiline */ .highlight .cp { color: #cc0000; font-weight: bold } /* Comment.Preproc */ .highlight .cpf { color: #888888 } /* Comment.PreprocFile */ .highlight .c1 { color: #888888 } /* Comment.Single */ .highlight .cs { color: #cc0000; font-weight: bold; background-color: #fff0f0 } /* Comment.Special */ .highlight .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */ .highlight .ge { font-style: italic } /* Generic.Emph */ .highlight .gr { color: #aa0000 } /* Generic.Error */ .highlight .gh { color: #333333 } /* Generic.Heading */ .highlight .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */ .highlight .go { color: #888888 } /* Generic.Output */ .highlight .gp { color: #555555 } /* Generic.Prompt */ .highlight .gs { font-weight: bold } /* Generic.Strong */ .highlight .gu { color: #666666 } /* Generic.Subheading */ .highlight .gt { color: #aa0000 } /* Generic.Traceback */ .highlight .kc { color: #008800; font-weight: bold } /* Keyword.Constant */ .highlight .kd { color: #008800; font-weight: bold } /* Keyword.Declaration */ .highlight .kn { color: #008800; font-weight: bold } /* Keyword.Namespace */ .highlight .kp { color: #008800 } /* Keyword.Pseudo */ .highlight .kr { color: #008800; font-weight: bold } /* Keyword.Reserved */ .highlight .kt { color: #888888; font-weight: bold } /* Keyword.Type */ .highlight .m { color: #0000DD; font-weight: bold } /* Literal.Number */ .highlight .s { color: #dd2200; background-color: #fff0f0 } /* Literal.String */ .highlight .na { color: #336699 } /* Name.Attribute */ .highlight .nb { color: #003388 } /* Name.Builtin */ .highlight .nc { color: #bb0066; font-weight: bold } /* Name.Class */ .highlight .no { color: #003366; font-weight: bold } /* Name.Constant */ .highlight .nd { color: #555555 } /* Name.Decorator */ .highlight .ne { color: #bb0066; font-weight: bold } /* Name.Exception */ .highlight .nf { color: #0066bb; font-weight: bold } /* Name.Function */ .highlight .nl { color: #336699; font-style: italic } /* Name.Label */ .highlight .nn { color: #bb0066; font-weight: bold } /* Name.Namespace */ .highlight .py { color: #336699; font-weight: bold } /* Name.Property */ .highlight .nt { color: #bb0066; font-weight: bold } /* Name.Tag */ .highlight .nv { color: #336699 } /* Name.Variable */ .highlight .ow { color: #008800 } /* Operator.Word */ .highlight .w { color: #bbbbbb } /* Text.Whitespace */ .highlight .mb { color: #0000DD; font-weight: bold } /* Literal.Number.Bin */ .highlight .mf { color: #0000DD; font-weight: bold } /* Literal.Number.Float */ .highlight .mh { color: #0000DD; font-weight: bold } /* Literal.Number.Hex */ .highlight .mi { color: #0000DD; font-weight: bold } /* Literal.Number.Integer */ .highlight .mo { color: #0000DD; font-weight: bold } /* Literal.Number.Oct */ .highlight .sa { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Affix */ .highlight .sb { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Backtick */ .highlight .sc { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Char */ .highlight .dl { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Delimiter */ .highlight .sd { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Doc */ .highlight .s2 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Double */ .highlight .se { color: #0044dd; background-color: #fff0f0 } /* Literal.String.Escape */ .highlight .sh { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Heredoc */ .highlight .si { color: #3333bb; background-color: #fff0f0 } /* Literal.String.Interpol */ .highlight .sx { color: #22bb22; background-color: #f0fff0 } /* Literal.String.Other */ .highlight .sr { color: #008800; background-color: #fff0ff } /* Literal.String.Regex */ .highlight .s1 { color: #dd2200; background-color: #fff0f0 } /* Literal.String.Single */ .highlight .ss { color: #aa6600; background-color: #fff0f0 } /* Literal.String.Symbol */ .highlight .bp { color: #003388 } /* Name.Builtin.Pseudo */ .highlight .fm { color: #0066bb; font-weight: bold } /* Name.Function.Magic */ .highlight .vc { color: #336699 } /* Name.Variable.Class */ .highlight .vg { color: #dd7700 } /* Name.Variable.Global */ .highlight .vi { color: #3333bb } /* Name.Variable.Instance */ .highlight .vm { color: #336699 } /* Name.Variable.Magic */ .highlight .il { color: #0000DD; font-weight: bold } /* Literal.Number.Integer.Long */ }
# Copyright (c) 2017 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

*** Settings ***
| Resource | resources/libraries/robot/performance.robot
| Library | resources.libraries.python.NodePath
| Force Tags | 3_NODE_SINGLE_LINK_TOPO | PERFTEST | HW_ENV | NDRPDRDISC
| ...        | NIC_Intel-X520-DA2 | DOT1Q | L2XCFWD | BASE | VHOST | VM
| Suite Setup | 3-node Performance Suite Setup with DUT's NIC model
| ... | L2 | Intel-X520-DA2
| Suite Teardown | 3-node Performance Suite Teardown
| Test Setup | Setup all DUTs before test
| Test Teardown | Run Keywords
| ...           | Run Keyword If Test Failed
| ...           | Traffic should pass with no loss | 10
| ...           | ${min_rate}pps | ${framesize} | 3-node-bridge
| ...           | fail_on_loss=${False}
| ...           | AND | Show Vpp Vhost On All DUTs
| ...           | AND |  Remove startup configuration of VPP from all DUTs
| ...           | AND | Guest VM with dpdk-testpmd Teardown | ${dut1}
| ...                 | ${dut1_vm_refs}
| ...           | AND | Guest VM with dpdk-testpmd Teardown | ${dut2}
| ...                 | ${dut2_vm_refs}
| Documentation | *RFC2544: Pkt throughput L2XC test cases with vhost*
| ...
| ... | *[Top] Network Topologies:* TG-DUT1-DUT2-TG 3-node circular topology
| ... | with single links between nodes.
| ... | *[Enc] Packet Encapsulations:* Eth-IPv4 for L2 cross connect. 802.1q
| ... | tagging is applied on link between DUT1 and DUT2.
| ... | *[Cfg] DUT configuration:* DUT1 and DUT2 are configured with L2 cross-
| ... | connect. Qemu Guest is connected to VPP via vhost-user interfaces.
| ... | Guest is running DPDK testpmd interconnecting vhost-user interfaces
| ... | using 5 cores pinned to cpus 5-9 and 2048M memory. Testpmd is using
| ... | socket-mem=1024M (512x2M hugepages), 5 cores (1 main core and 4 cores
| ... | dedicated for io), forwarding mode is set to io, rxd/txd=256,
| ... | burst=64. DUT1, DUT2 are tested with 2p10GE NIC X520 Niantic by Intel.
| ... | *[Ver] TG verification:* TG finds and reports throughput NDR (Non Drop
| ... | Rate) with zero packet loss tolerance or throughput PDR (Partial Drop
| ... | Rate) with non-zero packet loss tolerance (LT) expressed in percentage
| ... | of packets transmitted. NDR and PDR are discovered for different
| ... | Ethernet L2 frame sizes using either binary search or linear search
| ... | algorithms with configured starting rate and final step that determines
| ... | throughput measurement resolution. Test packets are generated by TG on
| ... | links to DUTs. TG traffic profile contains two L3 flow-groups
| ... | (flow-group per direction, 253 flows per flow-group) with all packets
| ... | containing Ethernet header, IPv4 header with IP protocol=61 and static
| ... | payload. MAC addresses are matching MAC addresses of the TG node
| ... | interfaces.
| ... | *[Ref] Applicable standard specifications:* RFC2544.

*** Variables ***
| ${subid}= | 10
| ${tag_rewrite}= | pop-1
| ${vlan_overhead}= | ${4}
# Socket names
| ${sock1}= | /tmp/sock-1
| ${sock2}= | /tmp/sock-2
# X520-DA2 bandwidth limit
| ${s_limit}= | ${10000000000}

*** Test Cases ***
| tc01-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC switching config with 1 thread, 1 phy core,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find NDR for 64 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps.
| | ...
| | [Tags] | 1T1C | STHREAD | NDRDISC
| | ...
| | ${framesize}= | Set Variable | ${64}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '1' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold}

| tc02-64B-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD switching config with 1 thread, 1 phy core,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find PDR for 64 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps, LT=0.5%.
| | ...
| | [Tags] | 1T1C | STHREAD | PDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | ${64}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '1' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold} | ${glob_loss_acceptance} | ${glob_loss_acceptance_type}

| tc03-1518B-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC switching config with 1 thread, 1 phy core,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find NDR for 1518 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps.
| | ...
| | [Tags] | 1T1C | STHREAD | NDRDISC
| | ...
| | ${framesize}= | Set Variable | ${1518}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '1' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold}

| tc04-1518B-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD switching config with 1 thread, 1 phy core,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find PDR for 1518 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps, LT=0.5%.
| | ...
| | [Tags] | 1T1C | STHREAD | PDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | ${1518}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '1' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold} | ${glob_loss_acceptance} | ${glob_loss_acceptance_type}

| tc05-IMIX-1t1c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD switching config with 1 thread, 1 phy core,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find NDR for IMIX_v4_1 framesize using binary search start at\
| | ... | 10GE linerate, step 10kpps.
| | ... | IMIX_v4_1 = (28x64B; 16x570B; 4x1518B)
| | ...
| | [Tags] | 1T1C | STHREAD | NDRDISC
| | ...
| | ${framesize}= | Set Variable | IMIX_v4_1
| | ${avg_framesize}= | Set Variable | ${357.833}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${avg_framesize}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '1' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold}

| tc06-IMIX-1t1c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD switching config with 1 thread, 1 phy core,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find PDR for IMIX_v4_1 framesize using binary search start at\
| | ... | 10GE linerate, step 10kpps, LT=0.5%.
| | ... | IMIX_v4_1 = (28x64B; 16x570B; 4x1518B)
| | ...
| | [Tags] | 1T1C | STHREAD | PDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | IMIX_v4_1
| | ${avg_framesize}= | Set Variable | ${357.833}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${avg_framesize}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '1' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold} | ${glob_loss_acceptance} | ${glob_loss_acceptance_type}

| tc07-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC switching config with 2 threads, 2 phy cores,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find NDR for 64 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps.
| | ...
| | [Tags] | 2T2C | STHREAD | NDRDISC
| | ...
| | ${framesize}= | Set Variable | ${64}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '2' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold}

| tc08-64B-2t2c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD switching config with 2 threads, 2 phy cores,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find PDR for 64 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps, LT=0.5%.
| | ...
| | [Tags] | 2T2C | STHREAD | PDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | ${64}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '2' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold} | ${glob_loss_acceptance} | ${glob_loss_acceptance_type}

| tc09-1518B-2t2c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC switching config with 2 threads, 2 phy cores,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find NDR for 1518 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps.
| | ...
| | [Tags] | 2T2C | STHREAD | NDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | ${1518}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '2' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold}

| tc10-1518B-2t2c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD switching config with 2 threads, 2 phy cores,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find PDR for 1518 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps, LT=0.5%.
| | ...
| | [Tags] | 2T2C | STHREAD | PDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | ${1518}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '2' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold} | ${glob_loss_acceptance} | ${glob_loss_acceptance_type}

| tc11-IMIX-2t2c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD switching config with 2 threads, 2 phy cores,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find NDR for IMIX_v4_1 framesize using binary search start at\
| | ... | 10GE linerate, step 10kpps.
| | ... | IMIX_v4_1 = (28x64B; 16x570B; 4x1518B)
| | ...
| | [Tags] | 2T2C | STHREAD | NDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | IMIX_v4_1
| | ${avg_framesize}= | Set Variable | ${357.833}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${avg_framesize}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '2' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold}

| tc12-IMIX-2t2c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2BD switching config with 2 threads, 2 phy cores,\
| | ... | 1 receive queue per NIC port.
| | ... | [Ver] Find PDR for IMIX_v4_1 framesize using binary search start at\
| | ... | 10GE linerate, step 10kpps, LT=0.5%.
| | ... | IMIX_v4_1 = (28x64B; 16x570B; 4x1518B)
| | ...
| | [Tags] | 2T2C | STHREAD | PDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | IMIX_v4_1
| | ${avg_framesize}= | Set Variable | ${357.833}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${avg_framesize}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '2' worker threads and rxqueues '1' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold} | ${glob_loss_acceptance} | ${glob_loss_acceptance_type}

| tc13-64B-4t4c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC switching config with 4 threads, 4 phy cores,\
| | ... | 2 receive queues per NIC port.
| | ... | [Ver] Find NDR for 64 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps.
| | ...
| | [Tags] | 4T4C | STHREAD | NDRDISC
| | ...
| | ${framesize}= | Set Variable | ${64}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '4' worker threads and rxqueues '2' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold}

| tc14-64B-4t4c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC switching config with 4 threads, 4 phy cores,\
| | ... | 2 receive queues per NIC port.
| | ... | [Ver] Find PDR for 64 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps, LT=0.5%.
| | ...
| | [Tags] | 4T4C | STHREAD | PDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | ${64}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '4' worker threads and rxqueues '2' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold} | ${glob_loss_acceptance} | ${glob_loss_acceptance_type}

| tc15-1518B-4t4c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC switching config with 4 threads, 4 phy cores,\
| | ... | 2 receive queues per NIC port.
| | ... | [Ver] Find NDR for 1518 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps.
| | ...
| | [Tags] | 4T4C | STHREAD | NDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | ${1518}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '4' worker threads and rxqueues '2' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold}

| tc16-1518B-4t4c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC switching config with 4 threads, 4 phy cores,\
| | ... | 2 receive queues per NIC port.
| | ... | [Ver] Find PDR for 1518 Byte frames using binary search start at 10GE\
| | ... | linerate, step 10kpps, LT=0.5%.
| | ...
| | [Tags] | 4T4C | STHREAD | PDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | ${1518}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${framesize + ${vlan_overhead}}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '4' worker threads and rxqueues '2' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold} | ${glob_loss_acceptance} | ${glob_loss_acceptance_type}

| tc17-IMIX-4t4c-eth-l2xcbase-eth-2vhost-1vm-ndrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC switching config with 4 threads, 4 phy cores,\
| | ... | 2 receive queues per NIC port.
| | ... | [Ver] Find NDR for IMIX_v4_1 framesize using binary search start at\
| | ... | 10GE linerate, step 10kpps.
| | ... | IMIX_v4_1 = (28x64B; 16x570B; 4x1518B)
| | ...
| | [Tags] | 4T4C | STHREAD | NDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | IMIX_v4_1
| | ${avg_framesize}= | Set Variable | ${357.833}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${avg_framesize}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '4' worker threads and rxqueues '2' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find NDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold}

| tc18-IMIX-4t4c-eth-l2xcbase-eth-2vhost-1vm-pdrdisc
| | [Documentation]
| | ... | [Cfg] DUT runs L2XC switching config with 4 threads, 4 phy cores,\
| | ... | 2 receive queues per NIC port.
| | ... | [Ver] Find PDR for IMIX_v4_1 framesize using binary search start at\
| | ... | 10GE linerate, step 10kpps, LT=0.5%.
| | ... | IMIX_v4_1 = (28x64B; 16x570B; 4x1518B)
| | ...
| | [Tags] | 4T4C | STHREAD | PDRDISC | SKIP_PATCH
| | ...
| | ${framesize}= | Set Variable | IMIX_v4_1
| | ${avg_framesize}= | Set Variable | ${357.833}
| | ${min_rate}= | Set Variable | ${10000}
| | ${max_rate}= | Calculate pps | ${s_limit} | ${avg_framesize}
| | ${binary_min}= | Set Variable | ${min_rate}
| | ${binary_max}= | Set Variable | ${max_rate}
| | ${threshold}= | Set Variable | ${min_rate}
| | ${dut1_vm_refs}= | Create Dictionary
| | ${dut2_vm_refs}= | Create Dictionary
| | Given Add '4' worker threads and rxqueues '2' in 3-node single-link topo
| | And Add PCI devices to DUTs from 3-node single link topology
| | And Add No Multi Seg to all DUTs
| | And Apply startup configuration on all VPP DUTs
| | When L2 xconnect with Vhost-User and VLAN initialized in a 3-node circular topology
| | ... | ${sock1} | ${sock2} | ${subid} | ${tag_rewrite}
| | ${vm1}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut1} | ${sock1} | ${sock2} | DUT1_VM1
| | Set To Dictionary | ${dut1_vm_refs} | DUT1_VM1 | ${vm1}
| | ${vm2}= | And Guest VM with dpdk-testpmd connected via vhost-user is setup
| | ... | ${dut2} | ${sock1} | ${sock2} | DUT2_VM1
| | Set To Dictionary | ${dut2_vm_refs} | DUT2_VM1 | ${vm2}
| | And Setup scheduler policy for VPP on all DUTs
| | Then Find PDR using binary search and pps | ${framesize} | ${binary_min}
| | ... | ${binary_max} | 3-node-bridge | ${min_rate} | ${max_rate}
| | ... | ${threshold} | ${glob_loss_acceptance} | ${glob_loss_acceptance_type}
*l4_hdr_sz, u8 is_ip)
+{
+ vnet_buffer_oflags_t oflags = 0;
+ u16 l2hdr_sz = 0;
+ u16 ethertype = 0;
+ u8 l4_proto = 0;
+
+ if (is_ip)
+ {
+ switch (b->data[0] & 0xf0)
+ {
+ case 0x40:
+ ethertype = ETHERNET_TYPE_IP4;
+ break;
+ case 0x60:
+ ethertype = ETHERNET_TYPE_IP6;
+ break;
+ }
+ }
+ else
+ {
+ ethernet_header_t *eth = (ethernet_header_t *) b->data;
+ ethertype = clib_net_to_host_u16 (eth->type);
+ l2hdr_sz = sizeof (ethernet_header_t);
+ if (ethernet_frame_is_tagged (ethertype))
+ {
+ ethernet_vlan_header_t *vlan = (ethernet_vlan_header_t *) (eth + 1);
+
+ ethertype = clib_net_to_host_u16 (vlan->type);
+ l2hdr_sz += sizeof (*vlan);
+ if (ethertype == ETHERNET_TYPE_VLAN)
+ {
+ vlan++;
+ ethertype = clib_net_to_host_u16 (vlan->type);
+ l2hdr_sz += sizeof (*vlan);
+ }
+ }
+ }
+
+ vnet_buffer (b)->l2_hdr_offset = 0;
+ vnet_buffer (b)->l3_hdr_offset = l2hdr_sz;
+
+ if (ethertype == ETHERNET_TYPE_IP4)
+ {
+ ip4_header_t *ip4 = (ip4_header_t *) (b->data + l2hdr_sz);
+ vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip4_header_bytes (ip4);
+ b->flags |= (VNET_BUFFER_F_IS_IP4 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
+
+ l4_proto = ip4->protocol;
+ }
+ else if (ethertype == ETHERNET_TYPE_IP6)
+ {
+ ip6_header_t *ip6 = (ip6_header_t *) (b->data + l2hdr_sz);
+ b->flags |= (VNET_BUFFER_F_IS_IP6 | VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L3_HDR_OFFSET_VALID |
+ VNET_BUFFER_F_L4_HDR_OFFSET_VALID);
+ u16 ip6_hdr_len = sizeof (ip6_header_t);
+
+ if (ip6_ext_hdr (ip6->protocol))
+ {
+ ip6_ext_header_t *p = (void *) (ip6 + 1);
+ ip6_hdr_len += ip6_ext_header_len (p);
+ while (ip6_ext_hdr (p->next_hdr))
+ {
+ ip6_hdr_len += ip6_ext_header_len (p);
+ p = ip6_ext_next_header (p);
+ }
+ l4_proto = p->next_hdr;
+ }
+ else
+ l4_proto = ip6->protocol;
+ vnet_buffer (b)->l4_hdr_offset = l2hdr_sz + ip6_hdr_len;
+ }
+
+ if (l4_proto == IP_PROTOCOL_TCP)
+ {
+ oflags |= VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
+ tcp_header_t *tcp =
+ (tcp_header_t *) (b->data + vnet_buffer (b)->l4_hdr_offset);
+ *l4_hdr_sz = tcp_header_bytes (tcp);
+ }
+ else if (l4_proto == IP_PROTOCOL_UDP)
+ {
+ oflags |= VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
+ *l4_hdr_sz = sizeof (udp_header_t);
+ }
+
+ if (oflags)
+ vnet_buffer_offload_flags_set (b, oflags);
+}
+
+always_inline uword
+af_packet_v3_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, af_packet_if_t *apif,
+ u16 queue_id, u8 is_cksum_gso_enabled)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_queue_t *rx_queue = vec_elt_at_index (apif->rx_queues, queue_id);
+ tpacket3_hdr_t *tph;
+ u32 next_index;
+ u32 n_free_bufs;
+ u32 n_rx_packets = 0;
+ u32 n_rx_bytes = 0;
+ u32 timedout_blk = 0;
+ u32 total = 0;
+ u32 *to_next = 0;
+ u32 block = rx_queue->next_rx_block;
+ u32 block_nr = rx_queue->rx_req->req3.tp_block_nr;
+ u8 *block_start = 0;
+ uword n_trace = vlib_get_trace_count (vm, node);
+ u32 thread_index = vm->thread_index;
+ u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
+ u32 min_bufs = rx_queue->rx_req->req3.tp_frame_size / n_buffer_bytes;
+ u32 num_pkts = 0;
+ u32 rx_frame_offset = 0;
+ block_desc_t *bd = 0;
+ vlib_buffer_t bt = {};
+ u8 is_ip = (apif->mode == AF_PACKET_IF_MODE_IP);
+
+ if (is_ip)
+ next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
+ else
+ {
+ next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
+ next_index = apif->per_interface_next_index;
+
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (apif->sw_if_index, &next_index, &bt);
+ }
+
+ if ((((block_desc_t *) (block_start = rx_queue->rx_ring[block]))
+ ->hdr.bh1.block_status &
+ TP_STATUS_USER) != 0)
+ {
+ u32 n_required = 0;
+ bd = (block_desc_t *) block_start;
+
+ if (PREDICT_FALSE (rx_queue->is_rx_pending))
+ {
+ num_pkts = rx_queue->num_rx_pkts;
+ rx_frame_offset = rx_queue->rx_frame_offset;
+ rx_queue->is_rx_pending = 0;
+ }
+ else
+ {
+ num_pkts = bd->hdr.bh1.num_pkts;
+ rx_frame_offset = sizeof (block_desc_t);
+ total++;
+
+ if (TP_STATUS_BLK_TMO & bd->hdr.bh1.block_status)
+ timedout_blk++;
+ }
+
+ n_required = clib_max (num_pkts, VLIB_FRAME_SIZE);
+ n_free_bufs = vec_len (apm->rx_buffers[thread_index]);
+ if (PREDICT_FALSE (n_free_bufs < n_required))
+ {
+ vec_validate (apm->rx_buffers[thread_index],
+ n_required + n_free_bufs - 1);
+ n_free_bufs += vlib_buffer_alloc (
+ vm, &apm->rx_buffers[thread_index][n_free_bufs], n_required);
+ vec_set_len (apm->rx_buffers[thread_index], n_free_bufs);
+ }
+
+ while (num_pkts && (n_free_bufs >= min_bufs))
+ {
+ u32 next0 = next_index;
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (num_pkts && n_left_to_next && (n_free_bufs >= min_bufs))
+ {
+ tph = (tpacket3_hdr_t *) (block_start + rx_frame_offset);
+
+ if (num_pkts > 1)
+ CLIB_PREFETCH (block_start + rx_frame_offset +
+ tph->tp_next_offset,
+ 2 * CLIB_CACHE_LINE_BYTES, LOAD);
+
+ vlib_buffer_t *b0 = 0, *first_b0 = 0, *prev_b0 = 0;
+ vnet_virtio_net_hdr_t *vnet_hdr = 0;
+ u32 data_len = tph->tp_snaplen;
+ u32 offset = 0;
+ u32 bi0 = ~0, first_bi0 = ~0;
+ u8 l4_hdr_sz = 0;
+
+ if (is_cksum_gso_enabled)
+ vnet_hdr =
+ (vnet_virtio_net_hdr_t *) ((u8 *) tph + tph->tp_mac -
+ sizeof (vnet_virtio_net_hdr_t));
+
+ // save current state and return
+ if (PREDICT_FALSE (((data_len / n_buffer_bytes) + 1) >
+ vec_len (apm->rx_buffers[thread_index])))
+ {
+ rx_queue->rx_frame_offset = rx_frame_offset;
+ rx_queue->num_rx_pkts = num_pkts;
+ rx_queue->is_rx_pending = 1;
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ goto done;
+ }
+
+ while (data_len)
+ {
+ /* grab free buffer */
+ u32 last_empty_buffer =
+ vec_len (apm->rx_buffers[thread_index]) - 1;
+ bi0 = apm->rx_buffers[thread_index][last_empty_buffer];
+ vec_set_len (apm->rx_buffers[thread_index],
+ last_empty_buffer);
+ n_free_bufs--;
+
+ /* copy data */
+ u32 bytes_to_copy =
+ data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
+ u32 vlan_len = 0;
+ u32 bytes_copied = 0;
+
+ b0 = vlib_get_buffer (vm, bi0);
+ b0->current_data = 0;
+
+ /* Kernel removes VLAN headers, so reconstruct VLAN */
+ if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID))
+ {
+ if (PREDICT_TRUE (offset == 0))
+ {
+ clib_memcpy_fast (vlib_buffer_get_current (b0),
+ (u8 *) tph + tph->tp_mac,
+ sizeof (ethernet_header_t));
+ ethernet_header_t *eth =
+ vlib_buffer_get_current (b0);
+ ethernet_vlan_header_t *vlan =
+ (ethernet_vlan_header_t *) (eth + 1);
+ vlan->priority_cfi_and_id =
+ clib_host_to_net_u16 (tph->hv1.tp_vlan_tci);
+ vlan->type = eth->type;
+ eth->type =
+ clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ vlan_len = sizeof (ethernet_vlan_header_t);
+ bytes_copied = sizeof (ethernet_header_t);
+ }
+ }
+ clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) +
+ bytes_copied + vlan_len,
+ (u8 *) tph + tph->tp_mac + offset +
+ bytes_copied,
+ (bytes_to_copy - bytes_copied));
+
+ /* fill buffer header */
+ b0->current_length = bytes_to_copy + vlan_len;
+
+ if (offset == 0)
+ {
+ b0->total_length_not_including_first_buffer = 0;
+ b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] =
+ apif->sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~0;
+ first_b0 = b0;
+ first_bi0 = bi0;
+ if (is_cksum_gso_enabled)
+ {
+ if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
+ fill_cksum_offload (first_b0, &l4_hdr_sz, is_ip);
+ if (vnet_hdr->gso_type & (VIRTIO_NET_HDR_GSO_TCPV4 |
+ VIRTIO_NET_HDR_GSO_TCPV6))
+ fill_gso_offload (first_b0, vnet_hdr->gso_size,
+ l4_hdr_sz);
+ }
+ }
+ else
+ buffer_add_to_chain (b0, first_b0, prev_b0, bi0);
+
+ prev_b0 = b0;
+ offset += bytes_to_copy;
+ data_len -= bytes_to_copy;
+ }
+ n_rx_packets++;
+ n_rx_bytes += tph->tp_snaplen;
+ to_next[0] = first_bi0;
+ to_next += 1;
+ n_left_to_next--;
+
+ /* drop partial packets */
+ if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen))
+ {
+ next0 = VNET_DEVICE_INPUT_NEXT_DROP;
+ first_b0->error =
+ node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT];
+ }
+ else
+ {
+ if (PREDICT_FALSE (apif->mode == AF_PACKET_IF_MODE_IP))
+ {
+ switch (first_b0->data[0] & 0xf0)
+ {
+ case 0x40:
+ next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
+ break;
+ case 0x60:
+ next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
+ break;
+ default:
+ next0 = VNET_DEVICE_INPUT_NEXT_DROP;
+ break;
+ }
+ if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
+ next0 = apif->per_interface_next_index;
+ }
+ else
+ {
+ /* copy feature arc data from template */
+ first_b0->current_config_index = bt.current_config_index;
+ vnet_buffer (first_b0)->feature_arc_index =
+ vnet_buffer (&bt)->feature_arc_index;
+ }
+ }
+
+ /* trace */
+ if (PREDICT_FALSE (n_trace > 0 &&
+ vlib_trace_buffer (vm, node, next0, first_b0,
+ /* follow_chain */ 0)))
+ {
+ af_packet_input_trace_t *tr;
+ vlib_set_trace_count (vm, node, --n_trace);
+ tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
+ tr->is_v3 = 1;
+ tr->next_index = next0;
+ tr->hw_if_index = apif->hw_if_index;
+ tr->queue_id = queue_id;
+ tr->block = block;
+ tr->block_start = bd;
+ tr->pkt_num = bd->hdr.bh1.num_pkts - num_pkts;
+ clib_memcpy_fast (&tr->bd, bd, sizeof (block_desc_t));
+ clib_memcpy_fast (&tr->tph3, tph, sizeof (tpacket3_hdr_t));
+ if (is_cksum_gso_enabled)
+ clib_memcpy_fast (&tr->vnet_hdr, vnet_hdr,
+ sizeof (vnet_virtio_net_hdr_t));
+ else
+ clib_memset_u8 (&tr->vnet_hdr, 0,
+ sizeof (vnet_virtio_net_hdr_t));
+ }
+
+ /* enque and take next packet */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, first_bi0,
+ next0);
+
+ /* next packet */
+ num_pkts--;
+ rx_frame_offset += tph->tp_next_offset;
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ if (PREDICT_TRUE (num_pkts == 0))
+ {
+ bd->hdr.bh1.block_status = TP_STATUS_KERNEL;
+ block = (block + 1) % block_nr;
+ }
+ else
+ {
+ rx_queue->rx_frame_offset = rx_frame_offset;
+ rx_queue->num_rx_pkts = num_pkts;
+ rx_queue->is_rx_pending = 1;
+ }
+ }
+
+ rx_queue->next_rx_block = block;
+
+done:
+
+ if (apm->polling_count == 0)
+ {
+ if ((((block_desc_t *) (block_start = rx_queue->rx_ring[block]))
+ ->hdr.bh1.block_status &
+ TP_STATUS_USER) != 0)
+ vlib_node_set_state (vm, node->node_index, VLIB_NODE_STATE_POLLING);
+ else
+ vlib_node_set_state (vm, node->node_index, VLIB_NODE_STATE_INTERRUPT);
+ }
+
+ vlib_error_count (vm, node->node_index, AF_PACKET_INPUT_ERROR_TOTAL_RECV_BLK,
+ total);
+ vlib_error_count (vm, node->node_index, AF_PACKET_INPUT_ERROR_TIMEDOUT_BLK,
+ timedout_blk);
+
+ vlib_increment_combined_counter
+ (vnet_get_main ()->interface_main.combined_sw_if_counters
+ + VNET_INTERFACE_COUNTER_RX,
+ vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes);
+
+ vnet_device_increment_rx_packets (thread_index, n_rx_packets);
+ return n_rx_packets;
+}
+
+always_inline uword
+af_packet_v2_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, af_packet_if_t *apif,
+ u16 queue_id, u8 is_cksum_gso_enabled)
+{
+ af_packet_main_t *apm = &af_packet_main;
+ af_packet_queue_t *rx_queue = vec_elt_at_index (apif->rx_queues, queue_id);
+ tpacket2_hdr_t *tph;
+ u32 next_index;
+ u32 block = 0;
+ u32 rx_frame;
+ u32 n_free_bufs;
+ u32 n_rx_packets = 0;
+ u32 n_rx_bytes = 0;
+ u32 *to_next = 0;
+ u32 frame_size = rx_queue->rx_req->req.tp_frame_size;
+ u32 frame_num = rx_queue->rx_req->req.tp_frame_nr;
+ u8 *block_start = rx_queue->rx_ring[block];
+ uword n_trace = vlib_get_trace_count (vm, node);
+ u32 thread_index = vm->thread_index;
+ u32 n_buffer_bytes = vlib_buffer_get_default_data_size (vm);
+ u32 min_bufs = rx_queue->rx_req->req.tp_frame_size / n_buffer_bytes;
+ u8 is_ip = (apif->mode == AF_PACKET_IF_MODE_IP);
+ vlib_buffer_t bt = {};
+
+ if (is_ip)
+ {
+ next_index = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
+ }
+ else
+ {
+ next_index = VNET_DEVICE_INPUT_NEXT_ETHERNET_INPUT;
+ if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
+ next_index = apif->per_interface_next_index;
+
+ /* redirect if feature path enabled */
+ vnet_feature_start_device_input_x1 (apif->sw_if_index, &next_index, &bt);
+ }
+
+ n_free_bufs = vec_len (apm->rx_buffers[thread_index]);
+ if (PREDICT_FALSE (n_free_bufs < VLIB_FRAME_SIZE))
+ {
+ vec_validate (apm->rx_buffers[thread_index],
+ VLIB_FRAME_SIZE + n_free_bufs - 1);
+ n_free_bufs += vlib_buffer_alloc (
+ vm, &apm->rx_buffers[thread_index][n_free_bufs], VLIB_FRAME_SIZE);
+ vec_set_len (apm->rx_buffers[thread_index], n_free_bufs);
+ }
+
+ rx_frame = rx_queue->next_rx_frame;
+ tph = (tpacket2_hdr_t *) (block_start + rx_frame * frame_size);
+ while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs))
+ {
+ vlib_buffer_t *b0 = 0, *first_b0 = 0, *prev_b0 = 0;
+ u32 next0 = next_index;
+
+ u32 n_left_to_next;
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+ while ((tph->tp_status & TP_STATUS_USER) && (n_free_bufs > min_bufs) &&
+ n_left_to_next)
+ {
+ vnet_virtio_net_hdr_t *vnet_hdr = 0;
+ u32 data_len = tph->tp_snaplen;
+ u32 offset = 0;
+ u32 bi0 = 0, first_bi0 = 0;
+ u8 l4_hdr_sz = 0;
+
+ if (is_cksum_gso_enabled)
+ vnet_hdr =
+ (vnet_virtio_net_hdr_t *) ((u8 *) tph + tph->tp_mac -
+ sizeof (vnet_virtio_net_hdr_t));
+ while (data_len)
+ {
+ /* grab free buffer */
+ u32 last_empty_buffer =
+ vec_len (apm->rx_buffers[thread_index]) - 1;
+ bi0 = apm->rx_buffers[thread_index][last_empty_buffer];
+ b0 = vlib_get_buffer (vm, bi0);
+ vec_set_len (apm->rx_buffers[thread_index], last_empty_buffer);
+ n_free_bufs--;
+
+ /* copy data */
+ u32 bytes_to_copy =
+ data_len > n_buffer_bytes ? n_buffer_bytes : data_len;
+ u32 vlan_len = 0;
+ u32 bytes_copied = 0;
+ b0->current_data = 0;
+ /* Kernel removes VLAN headers, so reconstruct VLAN */
+ if (PREDICT_FALSE (tph->tp_status & TP_STATUS_VLAN_VALID))
+ {
+ if (PREDICT_TRUE (offset == 0))
+ {
+ clib_memcpy_fast (vlib_buffer_get_current (b0),
+ (u8 *) tph + tph->tp_mac,
+ sizeof (ethernet_header_t));
+ ethernet_header_t *eth = vlib_buffer_get_current (b0);
+ ethernet_vlan_header_t *vlan =
+ (ethernet_vlan_header_t *) (eth + 1);
+ vlan->priority_cfi_and_id =
+ clib_host_to_net_u16 (tph->tp_vlan_tci);
+ vlan->type = eth->type;
+ eth->type = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
+ vlan_len = sizeof (ethernet_vlan_header_t);
+ bytes_copied = sizeof (ethernet_header_t);
+ }
+ }
+ clib_memcpy_fast (((u8 *) vlib_buffer_get_current (b0)) +
+ bytes_copied + vlan_len,
+ (u8 *) tph + tph->tp_mac + offset +
+ bytes_copied,
+ (bytes_to_copy - bytes_copied));
+
+ /* fill buffer header */
+ b0->current_length = bytes_to_copy + vlan_len;
+
+ if (offset == 0)
+ {
+ b0->total_length_not_including_first_buffer = 0;
+ b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ vnet_buffer (b0)->sw_if_index[VLIB_RX] = apif->sw_if_index;
+ vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~0;
+ first_bi0 = bi0;
+ first_b0 = vlib_get_buffer (vm, first_bi0);
+
+ if (is_cksum_gso_enabled)
+ {
+ if (vnet_hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)
+ fill_cksum_offload (first_b0, &l4_hdr_sz, is_ip);
+ if (vnet_hdr->gso_type & (VIRTIO_NET_HDR_GSO_TCPV4 |
+ VIRTIO_NET_HDR_GSO_TCPV6))
+ fill_gso_offload (first_b0, vnet_hdr->gso_size,
+ l4_hdr_sz);
+ }
+ }
+ else
+ buffer_add_to_chain (b0, first_b0, prev_b0, bi0);
+
+ prev_b0 = b0;
+ offset += bytes_to_copy;
+ data_len -= bytes_to_copy;
+ }
+ n_rx_packets++;
+ n_rx_bytes += tph->tp_snaplen;
+ to_next[0] = first_bi0;
+ to_next += 1;
+ n_left_to_next--;
+
+ /* drop partial packets */
+ if (PREDICT_FALSE (tph->tp_len != tph->tp_snaplen))
+ {
+ next0 = VNET_DEVICE_INPUT_NEXT_DROP;
+ first_b0->error =
+ node->errors[AF_PACKET_INPUT_ERROR_PARTIAL_PKT];
+ }
+ else
+ {
+ if (PREDICT_FALSE (is_ip))
+ {
+ switch (first_b0->data[0] & 0xf0)
+ {
+ case 0x40:
+ next0 = VNET_DEVICE_INPUT_NEXT_IP4_INPUT;
+ break;
+ case 0x60:
+ next0 = VNET_DEVICE_INPUT_NEXT_IP6_INPUT;
+ break;
+ default:
+ next0 = VNET_DEVICE_INPUT_NEXT_DROP;
+ break;
+ }
+ if (PREDICT_FALSE (apif->per_interface_next_index != ~0))
+ next0 = apif->per_interface_next_index;
+ }
+ else
+ {
+ /* copy feature arc data from template */
+ first_b0->current_config_index = bt.current_config_index;
+ vnet_buffer (first_b0)->feature_arc_index =
+ vnet_buffer (&bt)->feature_arc_index;
+ }
+ }
+
+ /* trace */
+ if (PREDICT_FALSE (n_trace > 0 &&
+ vlib_trace_buffer (vm, node, next0, first_b0,
+ /* follow_chain */ 0)))
+ {
+ af_packet_input_trace_t *tr;
+ vlib_set_trace_count (vm, node, --n_trace);
+ tr = vlib_add_trace (vm, node, first_b0, sizeof (*tr));
+ tr->is_v3 = 0;
+ tr->next_index = next0;
+ tr->hw_if_index = apif->hw_if_index;
+ tr->queue_id = queue_id;
+ clib_memcpy_fast (&tr->tph2, tph, sizeof (struct tpacket2_hdr));
+ if (is_cksum_gso_enabled)
+ clib_memcpy_fast (&tr->vnet_hdr, vnet_hdr,
+ sizeof (vnet_virtio_net_hdr_t));
+ else
+ clib_memset_u8 (&tr->vnet_hdr, 0,
+ sizeof (vnet_virtio_net_hdr_t));
+ }
+
+ /* enque and take next packet */
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, first_bi0, next0);
+
+ /* next packet */
+ tph->tp_status = TP_STATUS_KERNEL;
+ rx_frame = (rx_frame + 1) % frame_num;
+ tph = (struct tpacket2_hdr *) (block_start + rx_frame * frame_size);
+ }
+
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+
+ rx_queue->next_rx_frame = rx_frame;
+
+ vlib_increment_combined_counter (
+ vnet_get_main ()->interface_main.combined_sw_if_counters +
+ VNET_INTERFACE_COUNTER_RX,
+ vlib_get_thread_index (), apif->hw_if_index, n_rx_packets, n_rx_bytes);
+
+ vnet_device_increment_rx_packets (thread_index, n_rx_packets);
+ return n_rx_packets;
+}
+
+always_inline uword
+af_packet_device_input_fn (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_frame_t *frame, af_packet_if_t *apif,
+ u16 queue_id, u8 is_cksum_gso_enabled)
+
+{
+ if (apif->version == TPACKET_V3)
+ return af_packet_v3_device_input_fn (vm, node, frame, apif, queue_id,
+ is_cksum_gso_enabled);
+ else
+ return af_packet_v2_device_input_fn (vm, node, frame, apif, queue_id,
+ is_cksum_gso_enabled);
+}
+
+VLIB_NODE_FN (af_packet_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ u32 n_rx_packets = 0;
+ af_packet_main_t *apm = &af_packet_main;
+ vnet_hw_if_rxq_poll_vector_t *pv;
+ pv = vnet_hw_if_get_rxq_poll_vector (vm, node);
+ for (int i = 0; i < vec_len (pv); i++)
+ {
+ af_packet_if_t *apif;
+ apif = vec_elt_at_index (apm->interfaces, pv[i].dev_instance);
+ if (apif->is_admin_up)
+ {
+ if (apif->is_cksum_gso_enabled)
+ n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif,
+ pv[i].queue_id, 1);
+ else
+ n_rx_packets += af_packet_device_input_fn (vm, node, frame, apif,
+ pv[i].queue_id, 0);
+ }
+ }
+ return n_rx_packets;
+}
+
+VLIB_REGISTER_NODE (af_packet_input_node) = {
+ .name = "af-packet-input",
+ .flags = VLIB_NODE_FLAG_TRACE_SUPPORTED,
+ .sibling_of = "device-input",
+ .format_trace = format_af_packet_input_trace,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_INTERRUPT,
+ .n_errors = AF_PACKET_INPUT_N_ERROR,
+ .error_strings = af_packet_input_error_strings,
+};
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/af_packet/plugin.c b/src/plugins/af_packet/plugin.c
new file mode 100644
index 00000000000..0146dd3e740
--- /dev/null
+++ b/src/plugins/af_packet/plugin.c
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2022 Cisco Systems, Inc.
+ * License: Cisco Proprietary Closed Source License - Cisco Internal.
+ * The software, documentation and any fonts accompanying this License whether
+ * on disk, in read only memory, on any other media or in any other form (col-
+ * lectively the “Software”) are licensed, not sold, to you by Cisco, Inc.
+ * (“Cisco”) for use only under the terms of this License, and Cisco reserves
+ * all rights not expressly granted to you. The rights granted herein are
+ * limited to Cisco’s intel- lectual property rights in the Cisco Software and
+ * do not include any other patents or intellectual property rights. You own
+ * the media on which the Cisco Software is recorded but Cisco and/or Cisco’s
+ * licensor(s) retain ownership of the Software itself.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/plugin/plugin.h>
+#include <vpp/app/version.h>
+
+VLIB_PLUGIN_REGISTER () = {
+ .version = VPP_BUILD_VER,
+ .description = "af-packet",
+};