From 7b53c036e6bf56623b8273018ff1c8cc62847857 Mon Sep 17 00:00:00 2001 From: Ricardo Salveti Date: Mon, 25 Jul 2016 13:22:22 -0300 Subject: Imported Upstream version 16.07-rc4 Change-Id: Ic57f6a3726f2dbd1682223648d91310f45705327 Signed-off-by: Ricardo Salveti --- doc/guides/howto/flow_bifurcation.rst | 299 +++++++++ doc/guides/howto/img/flow_bifurcation_overview.svg | 544 ++++++++++++++++ doc/guides/howto/img/ixgbe_bifu_queue_idx.svg | 101 +++ doc/guides/howto/img/lm_bond_virtio_sriov.svg | 666 +++++++++++++++++++ doc/guides/howto/img/lm_vhost_user.svg | 644 +++++++++++++++++++ doc/guides/howto/index.rst | 40 ++ doc/guides/howto/lm_bond_virtio_sriov.rst | 713 +++++++++++++++++++++ doc/guides/howto/lm_virtio_vhost_user.rst | 469 ++++++++++++++ 8 files changed, 3476 insertions(+) create mode 100644 doc/guides/howto/flow_bifurcation.rst create mode 100644 doc/guides/howto/img/flow_bifurcation_overview.svg create mode 100644 doc/guides/howto/img/ixgbe_bifu_queue_idx.svg create mode 100644 doc/guides/howto/img/lm_bond_virtio_sriov.svg create mode 100644 doc/guides/howto/img/lm_vhost_user.svg create mode 100644 doc/guides/howto/index.rst create mode 100644 doc/guides/howto/lm_bond_virtio_sriov.rst create mode 100644 doc/guides/howto/lm_virtio_vhost_user.rst (limited to 'doc/guides/howto') diff --git a/doc/guides/howto/flow_bifurcation.rst b/doc/guides/howto/flow_bifurcation.rst new file mode 100644 index 00000000..a1c6262b --- /dev/null +++ b/doc/guides/howto/flow_bifurcation.rst @@ -0,0 +1,299 @@ +.. BSD LICENSE + Copyright(c) 2016 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Flow Bifurcation How-to Guide +============================= + +Flow Bifurcation is a mechanism which uses hardware capable Ethernet devices +to split traffic between Linux user space and kernel space. Since it is a +hardware assisted feature this approach can provide line rate processing +capability. Other than :ref:`KNI `, the software is just required to +enable device configuration, there is no need to take care of the packet +movement during the traffic split. This can yield better performance with +less CPU overhead. + +The Flow Bifurcation splits the incoming data traffic to user space +applications (such as DPDK applications) and/or kernel space programs (such as +the Linux kernel stack). It can direct some traffic, for example data plane +traffic, to DPDK, while directing some other traffic, for example control +plane traffic, to the traditional Linux networking stack. + +There are a number of technical options to achieve this. A typical example is +to combine the technology of SR-IOV and packet classification filtering. + +SR-IOV is a PCI standard that allows the same physical adapter to be split as +multiple virtual functions. Each virtual function (VF) has separated queues +with physical functions (PF). The network adapter will direct traffic to a +virtual function with a matching destination MAC address. In a sense, SR-IOV +has the capability for queue division. + +Packet classification filtering is a hardware capability available on most +network adapters. Filters can be configured to direct specific flows to a +given receive queue by hardware. Different NICs may have different filter +types to direct flows to a Virtual Function or a queue that belong to it. + +In this way the Linux networking stack can receive specific traffic through +the kernel driver while a DPDK application can receive specific traffic +bypassing the Linux kernel by using drivers like VFIO or the DPDK ``igb_uio`` +module. + +.. _figure_flow_bifurcation_overview: + +.. figure:: img/flow_bifurcation_overview.* + + Flow Bifurcation Overview + + +Using Flow Bifurcation on IXGBE in Linux +---------------------------------------- + +On Intel 82599 10 Gigabit Ethernet Controller series NICs Flow Bifurcation can +be achieved by SR-IOV and Intel Flow Director technologies. Traffic can be +directed to queues by the Flow Director capability, typically by matching +5-tuple of UDP/TCP packets. + +The typical procedure to achieve this is as follows: + +#. Boot the system without iommu, or with ``iommu=pt``. + +#. Create Virtual Functions: + + .. code-block:: console + + echo 2 > /sys/bus/pci/devices/0000:01:00.0/sriov_numvfs + +#. Enable and set flow filters: + + .. code-block:: console + + ethtool -K eth1 ntuple on + ethtool -N eth1 flow-type udp4 src-ip 192.0.2.2 dst-ip 198.51.100.2 \ + action $queue_index_in_VF0 + ethtool -N eth1 flow-type udp4 src-ip 198.51.100.2 dst-ip 192.0.2.2 \ + action $queue_index_in_VF1 + + Where: + + * ``$queue_index_in_VFn``: Bits 39:32 of the variable defines VF id + 1; the lower 32 bits indicates the queue index of the VF. Thus: + + * ``$queue_index_in_VF0`` = ``(0x1 & 0xFF) << 32 + [queue index]``. + + * ``$queue_index_in_VF1`` = ``(0x2 & 0xFF) << 32 + [queue index]``. + + .. _figure_ixgbe_bifu_queue_idx: + + .. figure:: img/ixgbe_bifu_queue_idx.* + +#. Compile the DPDK application and insert ``igb_uio`` or probe the ``vfio-pci`` kernel modules as normal. + +#. Bind the virtual functions: + + .. code-block:: console + + modprobe vfio-pci + dpdk_nic_bind.py -b vfio-pci 01:10.0 + dpdk_nic_bind.py -b vfio-pci 01:10.1 + +#. Run a DPDK application on the VFs: + + .. code-block:: console + + testpmd -c 0xff -n 4 -- -i -w 01:10.0 -w 01:10.1 --forward-mode=mac + +In this example, traffic matching the rules will go through the VF by matching +the filter rule. All other traffic, not matching the rules, will go through +the default queue or scaling on queues in the PF. That is to say UDP packets +with the specified IP source and destination addresses will go through the +DPDK application. All other traffic, with different hosts or different +protocols, will go through the Linux networking stack. + +.. note:: + + * The above steps work on the Linux kernel v4.2. + + * The Flow Bifurcation is implemented in Linux kernel and ixgbe kernel driver using the following patches: + + * `ethtool: Add helper routines to pass vf to rx_flow_spec `_ + + * `ixgbe: Allow flow director to use entire queue space `_ + + * The Ethtool version used in this example is 3.18. + + +Using Flow Bifurcation on I40E in Linux +--------------------------------------- + +On Intel X710/XL710 series Ethernet Controllers Flow Bifurcation can be +achieved by SR-IOV, Cloud Filter and L3 VEB switch. The traffic can be +directed to queues by the Cloud Filter and L3 VEB switch's matching rule. + +* L3 VEB filters work for non-tunneled packets. It can direct a packet just by + the Destination IP address to a queue in a VF. + +* Cloud filters work for the following types of tunneled packets. + + * Inner mac. + + * Inner mac + VNI. + + * Outer mac + Inner mac + VNI. + + * Inner mac + Inner vlan + VNI. + + * Inner mac + Inner vlan. + +The typical procedure to achieve this is as follows: + +#. Boot the system without iommu, or with ``iommu=pt``. + +#. Build and insert the ``i40e.ko`` module. + +#. Create Virtual Functions: + + .. code-block:: console + + echo 2 > /sys/bus/pci/devices/0000:01:00.0/sriov_numvfs + +#. Add udp port offload to the NIC if using cloud filter: + + .. code-block:: console + + ip li add vxlan0 type vxlan id 42 group 239.1.1.1 local 10.16.43.214 dev + ifconfig vxlan0 up + ip -d li show vxlan0 + + .. note:: + + Output such as ``add vxlan port 8472, index 0 success`` should be + found in the system log. + +#. Examples of enabling and setting flow filters: + + * L3 VEB filter, for a route whose destination IP is 192.168.50.108 to VF + 0's queue 2. + + .. code-block:: console + + ethtool -N flow-type ip4 dst-ip 192.168.50.108 \ + user-def 0xffffffff00000000 action 2 loc 8 + + * Inner mac, for a route whose inner destination mac is 0:0:0:0:9:0 to + PF's queue 6. + + .. code-block:: console + + ethtool -N flow-type ether dst 00:00:00:00:00:00 \ + m ff:ff:ff:ff:ff:ff src 00:00:00:00:09:00 m 00:00:00:00:00:00 \ + user-def 0xffffffff00000003 action 6 loc 1 + + * Inner mac + VNI, for a route whose inner destination mac is 0:0:0:0:9:0 + and VNI is 8 to PF's queue 4. + + .. code-block:: console + + ethtool -N flow-type ether dst 00:00:00:00:00:00 \ + m ff:ff:ff:ff:ff:ff src 00:00:00:00:09:00 m 00:00:00:00:00:00 \ + user-def 0x800000003 action 4 loc 4 + + * Outer mac + Inner mac + VNI, for a route whose outer mac is + 68:05:ca:24:03:8b, inner destination mac is c2:1a:e1:53:bc:57, and VNI + is 8 to PF's queue 2. + + .. code-block:: console + + ethtool -N flow-type ether dst 68:05:ca:24:03:8b \ + m 00:00:00:00:00:00 src c2:1a:e1:53:bc:57 m 00:00:00:00:00:00 \ + user-def 0x800000003 action 2 loc 2 + + * Inner mac + Inner vlan + VNI, for a route whose inner destination mac is + 00:00:00:00:20:00, inner vlan is 10, and VNI is 8 to VF 0's queue 1. + + .. code-block:: console + + ethtool -N flow-type ether dst 00:00:00:00:01:00 \ + m ff:ff:ff:ff:ff:ff src 00:00:00:00:20:00 m 00:00:00:00:00:00 \ + vlan 10 user-def 0x800000000 action 1 loc 5 + + * Inner mac + Inner vlan, for a route whose inner destination mac is + 00:00:00:00:20:00, and inner vlan is 10 to VF 0's queue 1. + + .. code-block:: console + + ethtool -N flow-type ether dst 00:00:00:00:01:00 \ + m ff:ff:ff:ff:ff:ff src 00:00:00:00:20:00 m 00:00:00:00:00:00 \ + vlan 10 user-def 0xffffffff00000000 action 1 loc 5 + + .. note:: + + * If the upper 32 bits of 'user-def' are ``0xffffffff``, then the + filter can be used for programming an L3 VEB filter, otherwise the + upper 32 bits of 'user-def' can carry the tenant ID/VNI if + specified/required. + + * Cloud filters can be defined with inner mac, outer mac, inner ip, + inner vlan and VNI as part of the cloud tuple. It is always the + destination (not source) mac/ip that these filters use. For all + these examples dst and src mac address fields are overloaded dst == + outer, src == inner. + + * The filter will direct a packet matching the rule to a vf id + specified in the lower 32 bit of user-def to the queue specified by + 'action'. + + * If the vf id specified by the lower 32 bit of user-def is greater + than or equal to ``max_vfs``, then the filter is for the PF queues. + +#. Compile the DPDK application and insert ``igb_uio`` or probe the ``vfio-pci`` + kernel modules as normal. + +#. Bind the virtual function: + + .. code-block:: console + + modprobe vfio-pci + dpdk_nic_bind.py -b vfio-pci 01:10.0 + dpdk_nic_bind.py -b vfio-pci 01:10.1 + +#. run DPDK application on VFs: + + .. code-block:: console + + testpmd -c 0xff -n 4 -- -i -w 01:10.0 -w 01:10.1 --forward-mode=mac + +.. note:: + + * The above steps work on the i40e Linux kernel driver v1.5.16. + + * The Ethtool version used in this example is 3.18. The mask ``ff`` means + 'not involved', while ``00`` or no mask means 'involved'. + + * For more details of the configuration, refer to the + `cloud filter test plan `_ diff --git a/doc/guides/howto/img/flow_bifurcation_overview.svg b/doc/guides/howto/img/flow_bifurcation_overview.svg new file mode 100644 index 00000000..4fa27648 --- /dev/null +++ b/doc/guides/howto/img/flow_bifurcation_overview.svg @@ -0,0 +1,544 @@ + + + +image/svg+xmlPage-1Sheet.85NICNIC +NIC +Rounded Rectangle.20LINUXLINUX +LINUX +Rounded Rectangle.8Kernel pf driverKernel pf driver +Rounded RectangleFilters support traffic steering to VFFilters support traffic +steering to VF +Rectangle.3Rx Queues (0-N) PFRx Queues +( 0-N ) + PF +Rectangle.4Rx Queues (0-M) VF(vf 0)Rx Queues +( 0-M ) +VF(vf0) +Rectangle.5filtersfilters +Rounded Rectangle.6Tools to program filtersTools to +program filters +2-D word balloonDirector flows to queue index in specified VFinspecified VF +Director flows +to queue index +in specified VF +Rounded Rectangle.24DPDKDPDK +Rounded Rectangle.25SocketSocket +Simple Arrow.44Single arrowheadDynamic connector.70Dynamic connector.81Dynamic connector.83Dynamic connector.84Sheet.98Sheet.109Sheet.110 \ No newline at end of file diff --git a/doc/guides/howto/img/ixgbe_bifu_queue_idx.svg b/doc/guides/howto/img/ixgbe_bifu_queue_idx.svg new file mode 100644 index 00000000..f7e2bd80 --- /dev/null +++ b/doc/guides/howto/img/ixgbe_bifu_queue_idx.svg @@ -0,0 +1,101 @@ + + + + + + + + + + + + + + + Page-1 + + + Rectangle + 0x000000 + + + + + + + + + + 0x000000 + + Rectangle.2 + VF ID + 1 + + + + + + + + + + VF ID + 1 + + Rectangle.3 + Queue Index + + + + + + + + + + Queue Index + + Sheet.4 + 0 + + + + 0 + + Sheet.6 + 31 + + + + 31 + + Sheet.7 + 39 + + + + 39 + + Sheet.8 + 63 + + + + 63 + + diff --git a/doc/guides/howto/img/lm_bond_virtio_sriov.svg b/doc/guides/howto/img/lm_bond_virtio_sriov.svg new file mode 100644 index 00000000..d913ae01 --- /dev/null +++ b/doc/guides/howto/img/lm_bond_virtio_sriov.svg @@ -0,0 +1,666 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + VM 1 + Switch with 10Gb ports + Server 1 + Server 2 + 10 Gb Traffic Generator + VM 2 + Linux, KVM, QEMU + Linux, KVM, QEMU + 10 Gb NIC + 10 Gb NIC + + + 10 Gb NIC + 10 Gb NIC + + DPDK Testpmd App. + bonded device withvirtio and VF slaves + + DPDK Testpmd App. + bonded device withvirtio and VF slaves + Kernel PF driver + Kernel PF driver + SW bridge with Tapand PF connected + NFS ServerVM disk image + + + + + + + + + + + + + + + + SW bridge with Tapand PF connected + 10 Gb Migration Link + + diff --git a/doc/guides/howto/img/lm_vhost_user.svg b/doc/guides/howto/img/lm_vhost_user.svg new file mode 100644 index 00000000..3601cf11 --- /dev/null +++ b/doc/guides/howto/img/lm_vhost_user.svg @@ -0,0 +1,644 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + VM 1 + Switch with 10Gb ports + Server 1 + Server 2 + 10 Gb Traffic Generator + VM 2 + Linux, KVM, QEMU 2.5 + 10 Gb NIC + 10 Gb NIC + + + 10 Gb NIC + 10 Gb NIC + + + DPDK Testpmd App + DPDK virtio PMD's + DPDK PF PMD and vhost_user + DPDK PF PMD and vhost_user + NFS ServerVM disk image + + + + + + + + + + + + + + + + 10 Gb Migration Link + DPDK Testpmd App + DPDK virtio PMD's + Linux, KVM, QEMU 2.5 + + diff --git a/doc/guides/howto/index.rst b/doc/guides/howto/index.rst new file mode 100644 index 00000000..aa6d3e2b --- /dev/null +++ b/doc/guides/howto/index.rst @@ -0,0 +1,40 @@ +.. BSD LICENSE + Copyright(c) 2016 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +How To User Guides +================== + +.. toctree:: + :maxdepth: 2 + :numbered: + + lm_bond_virtio_sriov + lm_virtio_vhost_user + flow_bifurcation diff --git a/doc/guides/howto/lm_bond_virtio_sriov.rst b/doc/guides/howto/lm_bond_virtio_sriov.rst new file mode 100644 index 00000000..49666f13 --- /dev/null +++ b/doc/guides/howto/lm_bond_virtio_sriov.rst @@ -0,0 +1,713 @@ +.. BSD LICENSE + Copyright(c) 2016 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +Live Migration of VM with SR-IOV VF +=================================== + +Overview +-------- + +It is not possible to migrate a Virtual Machine which has an SR-IOV Virtual Function (VF). + +To get around this problem the bonding PMD is used. + +The following sections show an example of how to do this. + +Test Setup +---------- + +A bonded device is created in the VM. +The virtio and VF PMD's are added as slaves to the bonded device. +The VF is set as the primary slave of the bonded device. + +A bridge must be set up on the Host connecting the tap device, which is the +backend of the Virtio device and the Physical Function (PF) device. + +To test the Live Migration two servers with identical operating systems installed are used. +KVM and Qemu 2.3 is also required on the servers. + +In this example, the servers have Niantic and or Fortville NIC's installed. +The NIC's on both servers are connected to a switch +which is also connected to the traffic generator. + +The switch is configured to broadcast traffic on all the NIC ports. +A :ref:`Sample switch configuration ` +can be found in this section. + +The host is running the Kernel PF driver (ixgbe or i40e). + +The ip address of host_server_1 is 10.237.212.46 + +The ip address of host_server_2 is 10.237.212.131 + +.. _figure_lm_bond_virtio_sriov: + +.. figure:: img/lm_bond_virtio_sriov.* + +Live Migration steps +-------------------- + +The sample scripts mentioned in the steps below can be found in the +:ref:`Sample host scripts ` and +:ref:`Sample VM scripts ` sections. + +On host_server_1: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: console + + cd /root/dpdk/host_scripts + ./setup_vf_on_212_46.sh + +For Fortville NIC + +.. code-block:: console + + ./vm_virtio_vf_i40e_212_46.sh + +For Niantic NIC + +.. code-block:: console + + ./vm_virtio_vf_one_212_46.sh + +On host_server_1: Terminal 2 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: console + + cd /root/dpdk/host_scripts + ./setup_bridge_on_212_46.sh + ./connect_to_qemu_mon_on_host.sh + (qemu) + +On host_server_1: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**In VM on host_server_1:** + +.. code-block:: console + + cd /root/dpdk/vm_scripts + ./setup_dpdk_in_vm.sh + ./run_testpmd_bonding_in_vm.sh + + testpmd> show port info all + +The ``mac_addr`` command only works with kernel PF for Niantic + +.. code-block:: console + + testpmd> mac_addr add port 1 vf 0 AA:BB:CC:DD:EE:FF + +The syntax of the ``testpmd`` command is: + +Create bonded device (mode) (socket). + +Mode 1 is active backup. + +Virtio is port 0 (P0). + +VF is port 1 (P1). + +Bonding is port 2 (P2). + +.. code-block:: console + + testpmd> create bonded device 1 0 + Created new bonded device eth_bond_testpmd_0 on (port 2). + testpmd> add bonding slave 0 2 + testpmd> add bonding slave 1 2 + testpmd> show bonding config 2 + +The syntax of the ``testpmd`` command is: + +set bonding primary (slave id) (port id) + +Set primary to P1 before starting bonding port. + +.. code-block:: console + + testpmd> set bonding primary 1 2 + testpmd> show bonding config 2 + testpmd> port start 2 + Port 2: 02:09:C0:68:99:A5 + Checking link statuses... + Port 0 Link Up - speed 10000 Mbps - full-duplex + Port 1 Link Up - speed 10000 Mbps - full-duplex + Port 2 Link Up - speed 10000 Mbps - full-duplex + + testpmd> show bonding config 2 + +Primary is now P1. There are 2 active slaves. + +Use P2 only for forwarding. + +.. code-block:: console + + testpmd> set portlist 2 + testpmd> show config fwd + testpmd> set fwd mac + testpmd> start + testpmd> show bonding config 2 + +Primary is now P1. There are 2 active slaves. + +.. code-block:: console + + testpmd> show port stats all + +VF traffic is seen at P1 and P2. + +.. code-block:: console + + testpmd> clear port stats all + testpmd> set bonding primary 0 2 + testpmd> remove bonding slave 1 2 + testpmd> show bonding config 2 + +Primary is now P0. There is 1 active slave. + +.. code-block:: console + + testpmd> clear port stats all + testpmd> show port stats all + +No VF traffic is seen at P0 and P2, VF MAC address still present. + +.. code-block:: console + + testpmd> port stop 1 + testpmd> port close 1 + +Port close should remove VF MAC address, it does not remove perm_addr. + +The ``mac_addr`` command only works with the kernel PF for Niantic. + +.. code-block:: console + + testpmd> mac_addr remove 1 AA:BB:CC:DD:EE:FF + testpmd> port detach 1 + Port '0000:00:04.0' is detached. Now total ports is 2 + testpmd> show port stats all + +No VF traffic is seen at P0 and P2. + +On host_server_1: Terminal 2 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: console + + (qemu) device_del vf1 + + +On host_server_1: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**In VM on host_server_1:** + +.. code-block:: console + + testpmd> show bonding config 2 + +Primary is now P0. There is 1 active slave. + +.. code-block:: console + + testpmd> show port info all + testpmd> show port stats all + +On host_server_2: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: console + + cd /root/dpdk/host_scripts + ./setup_vf_on_212_131.sh + ./vm_virtio_one_migrate.sh + +On host_server_2: Terminal 2 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: console + + ./setup_bridge_on_212_131.sh + ./connect_to_qemu_mon_on_host.sh + (qemu) info status + VM status: paused (inmigrate) + (qemu) + +On host_server_1: Terminal 2 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Check that the switch is up before migrating. + +.. code-block:: console + + (qemu) migrate tcp:10.237.212.131:5555 + (qemu) info status + VM status: paused (postmigrate) + +For the Niantic NIC. + +.. code-block:: console + + (qemu) info migrate + capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off + Migration status: completed + total time: 11834 milliseconds + downtime: 18 milliseconds + setup: 3 milliseconds + transferred ram: 389137 kbytes + throughput: 269.49 mbps + remaining ram: 0 kbytes + total ram: 1590088 kbytes + duplicate: 301620 pages + skipped: 0 pages + normal: 96433 pages + normal bytes: 385732 kbytes + dirty sync count: 2 + (qemu) quit + +For the Fortville NIC. + +.. code-block:: console + + (qemu) info migrate + capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off + Migration status: completed + total time: 11619 milliseconds + downtime: 5 milliseconds + setup: 7 milliseconds + transferred ram: 379699 kbytes + throughput: 267.82 mbps + remaining ram: 0 kbytes + total ram: 1590088 kbytes + duplicate: 303985 pages + skipped: 0 pages + normal: 94073 pages + normal bytes: 376292 kbytes + dirty sync count: 2 + (qemu) quit + +On host_server_2: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**In VM on host_server_2:** + + Hit Enter key. This brings the user to the testpmd prompt. + +.. code-block:: console + + testpmd> + +On host_server_2: Terminal 2 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: console + + (qemu) info status + VM status: running + +For the Niantic NIC. + +.. code-block:: console + + (qemu) device_add pci-assign,host=06:10.0,id=vf1 + +For the Fortville NIC. + +.. code-block:: console + + (qemu) device_add pci-assign,host=03:02.0,id=vf1 + +On host_server_2: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**In VM on host_server_2:** + +.. code-block:: console + + testomd> show port info all + testpmd> show port stats all + testpmd> show bonding config 2 + testpmd> port attach 0000:00:04.0 + Port 1 is attached. + Now total ports is 3 + Done + + testpmd> port start 1 + +The ``mac_addr`` command only works with the Kernel PF for Niantic. + +.. code-block:: console + + testpmd> mac_addr add port 1 vf 0 AA:BB:CC:DD:EE:FF + testpmd> show port stats all. + testpmd> show config fwd + testpmd> show bonding config 2 + testpmd> add bonding slave 1 2 + testpmd> set bonding primary 1 2 + testpmd> show bonding config 2 + testpmd> show port stats all + +VF traffic is seen at P1 (VF) and P2 (Bonded device). + +.. code-block:: console + + testpmd> remove bonding slave 0 2 + testpmd> show bonding config 2 + testpmd> port stop 0 + testpmd> port close 0 + testpmd> port detach 0 + Port '0000:00:03.0' is detached. Now total ports is 2 + + testpmd> show port info all + testpmd> show config fwd + testpmd> show port stats all + +VF traffic is seen at P1 (VF) and P2 (Bonded device). + +.. _lm_bond_virtio_sriov_host_scripts: + +Sample host scripts +------------------- + +setup_vf_on_212_46.sh +~~~~~~~~~~~~~~~~~~~~~ +Set up Virtual Functions on host_server_1 + +.. code-block:: sh + + #!/bin/sh + # This script is run on the host 10.237.212.46 to setup the VF + + # set up Niantic VF + cat /sys/bus/pci/devices/0000\:09\:00.0/sriov_numvfs + echo 1 > /sys/bus/pci/devices/0000\:09\:00.0/sriov_numvfs + cat /sys/bus/pci/devices/0000\:09\:00.0/sriov_numvfs + rmmod ixgbevf + + # set up Fortville VF + cat /sys/bus/pci/devices/0000\:02\:00.0/sriov_numvfs + echo 1 > /sys/bus/pci/devices/0000\:02\:00.0/sriov_numvfs + cat /sys/bus/pci/devices/0000\:02\:00.0/sriov_numvfs + rmmod i40evf + +vm_virtio_vf_one_212_46.sh +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Setup Virtual Machine on host_server_1 + +.. code-block:: sh + + #!/bin/sh + + # Path to KVM tool + KVM_PATH="/usr/bin/qemu-system-x86_64" + + # Guest Disk image + DISK_IMG="/home/username/disk_image/virt1_sml.disk" + + # Number of guest cpus + VCPUS_NR="4" + + # Memory + MEM=1536 + + taskset -c 1-5 $KVM_PATH \ + -enable-kvm \ + -m $MEM \ + -smp $VCPUS_NR \ + -cpu host \ + -name VM1 \ + -no-reboot \ + -net none \ + -vnc none -nographic \ + -hda $DISK_IMG \ + -netdev type=tap,id=net1,script=no,downscript=no,ifname=tap1 \ + -device virtio-net-pci,netdev=net1,mac=CC:BB:BB:BB:BB:BB \ + -device pci-assign,host=09:10.0,id=vf1 \ + -monitor telnet::3333,server,nowait + +setup_bridge_on_212_46.sh +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Setup bridge on host_server_1 + +.. code-block:: sh + + #!/bin/sh + # This script is run on the host 10.237.212.46 to setup the bridge + # for the Tap device and the PF device. + # This enables traffic to go from the PF to the Tap to the Virtio PMD in the VM. + + # ens3f0 is the Niantic NIC + # ens6f0 is the Fortville NIC + + ifconfig ens3f0 down + ifconfig tap1 down + ifconfig ens6f0 down + ifconfig virbr0 down + + brctl show virbr0 + brctl addif virbr0 ens3f0 + brctl addif virbr0 ens6f0 + brctl addif virbr0 tap1 + brctl show virbr0 + + ifconfig ens3f0 up + ifconfig tap1 up + ifconfig ens6f0 up + ifconfig virbr0 up + +connect_to_qemu_mon_on_host.sh +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: sh + + #!/bin/sh + # This script is run on both hosts when the VM is up, + # to connect to the Qemu Monitor. + + telnet 0 3333 + +setup_vf_on_212_131.sh +~~~~~~~~~~~~~~~~~~~~~~ + +Set up Virtual Functions on host_server_2 + +.. code-block:: sh + + #!/bin/sh + # This script is run on the host 10.237.212.131 to setup the VF + + # set up Niantic VF + cat /sys/bus/pci/devices/0000\:06\:00.0/sriov_numvfs + echo 1 > /sys/bus/pci/devices/0000\:06\:00.0/sriov_numvfs + cat /sys/bus/pci/devices/0000\:06\:00.0/sriov_numvfs + rmmod ixgbevf + + # set up Fortville VF + cat /sys/bus/pci/devices/0000\:03\:00.0/sriov_numvfs + echo 1 > /sys/bus/pci/devices/0000\:03\:00.0/sriov_numvfs + cat /sys/bus/pci/devices/0000\:03\:00.0/sriov_numvfs + rmmod i40evf + +vm_virtio_one_migrate.sh +~~~~~~~~~~~~~~~~~~~~~~~~ + +Setup Virtual Machine on host_server_2 + +.. code-block:: sh + + #!/bin/sh + # Start the VM on host_server_2 with the same parameters except without the VF + # parameters, as the VM on host_server_1, in migration-listen mode + # (-incoming tcp:0:5555) + + # Path to KVM tool + KVM_PATH="/usr/bin/qemu-system-x86_64" + + # Guest Disk image + DISK_IMG="/home/username/disk_image/virt1_sml.disk" + + # Number of guest cpus + VCPUS_NR="4" + + # Memory + MEM=1536 + + taskset -c 1-5 $KVM_PATH \ + -enable-kvm \ + -m $MEM \ + -smp $VCPUS_NR \ + -cpu host \ + -name VM1 \ + -no-reboot \ + -net none \ + -vnc none -nographic \ + -hda $DISK_IMG \ + -netdev type=tap,id=net1,script=no,downscript=no,ifname=tap1 \ + -device virtio-net-pci,netdev=net1,mac=CC:BB:BB:BB:BB:BB \ + -incoming tcp:0:5555 \ + -monitor telnet::3333,server,nowait + +setup_bridge_on_212_131.sh +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Setup bridge on host_server_2 + +.. code-block:: sh + + #!/bin/sh + # This script is run on the host to setup the bridge + # for the Tap device and the PF device. + # This enables traffic to go from the PF to the Tap to the Virtio PMD in the VM. + + # ens4f0 is the Niantic NIC + # ens5f0 is the Fortville NIC + + ifconfig ens4f0 down + ifconfig tap1 down + ifconfig ens5f0 down + ifconfig virbr0 down + + brctl show virbr0 + brctl addif virbr0 ens4f0 + brctl addif virbr0 ens5f0 + brctl addif virbr0 tap1 + brctl show virbr0 + + ifconfig ens4f0 up + ifconfig tap1 up + ifconfig ens5f0 up + ifconfig virbr0 up + +.. _lm_bond_virtio_sriov_vm_scripts: + +Sample VM scripts +----------------- + +setup_dpdk_in_vm.sh +~~~~~~~~~~~~~~~~~~~ + +Set up DPDK in the Virtual Machine + +.. code-block:: sh + + #!/bin/sh + # this script matches the vm_virtio_vf_one script + # virtio port is 03 + # vf port is 04 + + cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages + echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages + cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages + + ifconfig -a + /root/dpdk/tools/dpdk_nic_bind.py --status + + rmmod virtio-pci ixgbevf + + modprobe uio + insmod /root/dpdk/x86_64-default-linuxapp-gcc/kmod/igb_uio.ko + + /root/dpdk/tools/dpdk_nic_bind.py -b igb_uio 0000:00:03.0 + /root/dpdk/tools/dpdk_nic_bind.py -b igb_uio 0000:00:04.0 + + /root/dpdk/tools/dpdk_nic_bind.py --status + +run_testpmd_bonding_in_vm.sh +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Run testpmd in the Virtual Machine. + +.. code-block:: sh + + #!/bin/sh + # Run testpmd in the VM + + # The test system has 8 cpus (0-7), use cpus 2-7 for VM + # Use taskset -pc + + # use for bonding of virtio and vf tests in VM + + /root/dpdk/x86_64-default-linuxapp-gcc/app/testpmd \ + -c f -n 4 --socket-mem 350 -- --i --port-topology=chained + +.. _lm_bond_virtio_sriov_switch_conf: + +Sample switch configuration +--------------------------- + +The Intel switch is used to connect the traffic generator to the +NIC's on host_server_1 and host_server_2. + +In order to run the switch configuration two console windows are required. + +Log in as root in both windows. + +TestPointShared, run_switch.sh and load /root/switch_config must be executed +in the sequence below. + +On Switch: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~ + +run TestPointShared + +.. code-block:: console + + /usr/bin/TestPointShared + +On Switch: Terminal 2 +~~~~~~~~~~~~~~~~~~~~~ + +execute run_switch.sh + +.. code-block:: console + + /root/run_switch.sh + +On Switch: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~ + +load switch configuration + +.. code-block:: console + + load /root/switch_config + +Sample switch configuration script +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +The ``/root/switch_config`` script: + +.. code-block:: sh + + # TestPoint History + show port 1,5,9,13,17,21,25 + set port 1,5,9,13,17,21,25 up + show port 1,5,9,13,17,21,25 + del acl 1 + create acl 1 + create acl-port-set + create acl-port-set + add port port-set 1 0 + add port port-set 5,9,13,17,21,25 1 + create acl-rule 1 1 + add acl-rule condition 1 1 port-set 1 + add acl-rule action 1 1 redirect 1 + apply acl + create vlan 1000 + add vlan port 1000 1,5,9,13,17,21,25 + set vlan tagging 1000 1,5,9,13,17,21,25 tag + set switch config flood_ucast fwd + show port stats all 1,5,9,13,17,21,25 diff --git a/doc/guides/howto/lm_virtio_vhost_user.rst b/doc/guides/howto/lm_virtio_vhost_user.rst new file mode 100644 index 00000000..fad1f2a8 --- /dev/null +++ b/doc/guides/howto/lm_virtio_vhost_user.rst @@ -0,0 +1,469 @@ +.. BSD LICENSE + Copyright(c) 2016 Intel Corporation. All rights reserved. + All rights reserved. + + Redistribution and use in source and binary forms, with or without + modification, are permitted provided that the following conditions + are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + * Neither the name of Intel Corporation nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + + THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +Live Migration of VM with Virtio on host running vhost_user +=========================================================== + +Overview +-------- + +Live Migration of a VM with DPDK Virtio PMD on a host which is +running the Vhost sample application (vhost-switch) and using the DPDK PMD (ixgbe or i40e). + +The Vhost sample application uses VMDQ so SRIOV must be disabled on the NIC's. + +The following sections show an example of how to do this migration. + +Test Setup +---------- + +To test the Live Migration two servers with identical operating systems installed are used. +KVM and QEMU is also required on the servers. + +QEMU 2.5 is required for Live Migration of a VM with vhost_user running on the hosts. + +In this example, the servers have Niantic and or Fortville NIC's installed. +The NIC's on both servers are connected to a switch +which is also connected to the traffic generator. + +The switch is configured to broadcast traffic on all the NIC ports. + +The ip address of host_server_1 is 10.237.212.46 + +The ip address of host_server_2 is 10.237.212.131 + +.. _figure_lm_vhost_user: + +.. figure:: img/lm_vhost_user.* + +Live Migration steps +-------------------- + +The sample scripts mentioned in the steps below can be found in the +:ref:`Sample host scripts ` and +:ref:`Sample VM scripts ` sections. + +On host_server_1: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Setup DPDK on host_server_1 + +.. code-block:: console + + cd /root/dpdk/host_scripts + ./setup_dpdk_on_host.sh + +On host_server_1: Terminal 2 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Bind the Niantic or Fortville NIC to igb_uio on host_server_1. + +For Fortville NIC. + +.. code-block:: console + + cd /root/dpdk/tools + ./dpdk_nic_bind.py -b igb_uio 0000:02:00.0 + +For Niantic NIC. + +.. code-block:: console + + cd /root/dpdk/tools + ./dpdk_nic_bind.py -b igb_uio 0000:09:00.0 + +On host_server_1: Terminal 3 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For Fortville and Niantic NIC's reset SRIOV and run the +vhost_user sample application (vhost-switch) on host_server_1. + +.. code-block:: console + + cd /root/dpdk/host_scripts + ./reset_vf_on_212_46.sh + ./run_vhost_switch_on_host.sh + +On host_server_1: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Start the VM on host_server_1 + +.. code-block:: console + + ./vm_virtio_vhost_user.sh + +On host_server_1: Terminal 4 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Connect to the QEMU monitor on host_server_1. + +.. code-block:: console + + cd /root/dpdk/host_scripts + ./connect_to_qemu_mon_on_host.sh + (qemu) + +On host_server_1: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**In VM on host_server_1:** + +Setup DPDK in the VM and run testpmd in the VM. + +.. code-block:: console + + cd /root/dpdk/vm_scripts + ./setup_dpdk_in_vm.sh + ./run_testpmd_in_vm.sh + + testpmd> show port info all + testpmd> set fwd mac retry + testpmd> start tx_first + testpmd> show port stats all + +Virtio traffic is seen at P1 and P2. + +On host_server_2: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Set up DPDK on the host_server_2. + +.. code-block:: console + + cd /root/dpdk/host_scripts + ./setup_dpdk_on_host.sh + +On host_server_2: Terminal 2 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Bind the Niantic or Fortville NIC to igb_uio on host_server_2. + +For Fortville NIC. + +.. code-block:: console + + cd /root/dpdk/tools + ./dpdk_nic_bind.py -b igb_uio 0000:03:00.0 + +For Niantic NIC. + +.. code-block:: console + + cd /root/dpdk/tools + ./dpdk_nic_bind.py -b igb_uio 0000:06:00.0 + +On host_server_2: Terminal 3 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For Fortville and Niantic NIC's reset SRIOV, and run +the vhost_user sample application on host_server_2. + +.. code-block:: console + + cd /root/dpdk/host_scripts + ./reset_vf_on_212_131.sh + ./run_vhost_switch_on_host.sh + +On host_server_2: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Start the VM on host_server_2. + +.. code-block:: console + + ./vm_virtio_vhost_user_migrate.sh + +On host_server_2: Terminal 4 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Connect to the QEMU monitor on host_server_2. + +.. code-block:: console + + cd /root/dpdk/host_scripts + ./connect_to_qemu_mon_on_host.sh + (qemu) info status + VM status: paused (inmigrate) + (qemu) + +On host_server_1: Terminal 4 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Check that switch is up before migrating the VM. + +.. code-block:: console + + (qemu) migrate tcp:10.237.212.131:5555 + (qemu) info status + VM status: paused (postmigrate) + + (qemu) info migrate + capabilities: xbzrle: off rdma-pin-all: off auto-converge: off zero-blocks: off + Migration status: completed + total time: 11619 milliseconds + downtime: 5 milliseconds + setup: 7 milliseconds + transferred ram: 379699 kbytes + throughput: 267.82 mbps + remaining ram: 0 kbytes + total ram: 1590088 kbytes + duplicate: 303985 pages + skipped: 0 pages + normal: 94073 pages + normal bytes: 376292 kbytes + dirty sync count: 2 + (qemu) quit + +On host_server_2: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**In VM on host_server_2:** + + Hit Enter key. This brings the user to the testpmd prompt. + +.. code-block:: console + + testpmd> + +On host_server_2: Terminal 4 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**In QEMU monitor on host_server_2** + +.. code-block:: console + + (qemu) info status + VM status: running + +On host_server_2: Terminal 1 +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +**In VM on host_server_2:** + +.. code-block:: console + + testomd> show port info all + testpmd> show port stats all + +Virtio traffic is seen at P0 and P1. + + +.. _lm_virtio_vhost_user_host_scripts: + +Sample host scripts +------------------- + +reset_vf_on_212_46.sh +~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: sh + + #!/bin/sh + # This script is run on the host 10.237.212.46 to reset SRIOV + + # BDF for Fortville NIC is 0000:02:00.0 + cat /sys/bus/pci/devices/0000\:02\:00.0/max_vfs + echo 0 > /sys/bus/pci/devices/0000\:02\:00.0/max_vfs + cat /sys/bus/pci/devices/0000\:02\:00.0/max_vfs + + # BDF for Niantic NIC is 0000:09:00.0 + cat /sys/bus/pci/devices/0000\:09\:00.0/max_vfs + echo 0 > /sys/bus/pci/devices/0000\:09\:00.0/max_vfs + cat /sys/bus/pci/devices/0000\:09\:00.0/max_vfs + +vm_virtio_vhost_user.sh +~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: sh + + #/bin/sh + # Script for use with vhost_user sample application + # The host system has 8 cpu's (0-7) + + # Path to KVM tool + KVM_PATH="/usr/bin/qemu-system-x86_64" + + # Guest Disk image + DISK_IMG="/home/user/disk_image/virt1_sml.disk" + + # Number of guest cpus + VCPUS_NR="6" + + # Memory + MEM=1024 + + VIRTIO_OPTIONS="csum=off,gso=off,guest_tso4=off,guest_tso6=off,guest_ecn=off" + + # Socket Path + SOCKET_PATH="/root/dpdk/host_scripts/usvhost" + + taskset -c 2-7 $KVM_PATH \ + -enable-kvm \ + -m $MEM \ + -smp $VCPUS_NR \ + -object memory-backend-file,id=mem,size=1024M,mem-path=/mnt/huge,share=on \ + -numa node,memdev=mem,nodeid=0 \ + -cpu host \ + -name VM1 \ + -no-reboot \ + -net none \ + -vnc none \ + -nographic \ + -hda $DISK_IMG \ + -chardev socket,id=chr0,path=$SOCKET_PATH \ + -netdev type=vhost-user,id=net1,chardev=chr0,vhostforce \ + -device virtio-net-pci,netdev=net1,mac=CC:BB:BB:BB:BB:BB,$VIRTIO_OPTIONS \ + -chardev socket,id=chr1,path=$SOCKET_PATH \ + -netdev type=vhost-user,id=net2,chardev=chr1,vhostforce \ + -device virtio-net-pci,netdev=net2,mac=DD:BB:BB:BB:BB:BB,$VIRTIO_OPTIONS \ + -monitor telnet::3333,server,nowait + +connect_to_qemu_mon_on_host.sh +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: sh + + #!/bin/sh + # This script is run on both hosts when the VM is up, + # to connect to the Qemu Monitor. + + telnet 0 3333 + +reset_vf_on_212_131.sh +~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: sh + + #!/bin/sh + # This script is run on the host 10.237.212.131 to reset SRIOV + + # BDF for Ninatic NIC is 0000:06:00.0 + cat /sys/bus/pci/devices/0000\:06\:00.0/max_vfs + echo 0 > /sys/bus/pci/devices/0000\:06\:00.0/max_vfs + cat /sys/bus/pci/devices/0000\:06\:00.0/max_vfs + + # BDF for Fortville NIC is 0000:03:00.0 + cat /sys/bus/pci/devices/0000\:03\:00.0/max_vfs + echo 0 > /sys/bus/pci/devices/0000\:03\:00.0/max_vfs + cat /sys/bus/pci/devices/0000\:03\:00.0/max_vfs + +vm_virtio_vhost_user_migrate.sh +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: sh + + #/bin/sh + # Script for use with vhost user sample application + # The host system has 8 cpu's (0-7) + + # Path to KVM tool + KVM_PATH="/usr/bin/qemu-system-x86_64" + + # Guest Disk image + DISK_IMG="/home/user/disk_image/virt1_sml.disk" + + # Number of guest cpus + VCPUS_NR="6" + + # Memory + MEM=1024 + + VIRTIO_OPTIONS="csum=off,gso=off,guest_tso4=off,guest_tso6=off,guest_ecn=off" + + # Socket Path + SOCKET_PATH="/root/dpdk/host_scripts/usvhost" + + taskset -c 2-7 $KVM_PATH \ + -enable-kvm \ + -m $MEM \ + -smp $VCPUS_NR \ + -object memory-backend-file,id=mem,size=1024M,mem-path=/mnt/huge,share=on \ + -numa node,memdev=mem,nodeid=0 \ + -cpu host \ + -name VM1 \ + -no-reboot \ + -net none \ + -vnc none \ + -nographic \ + -hda $DISK_IMG \ + -chardev socket,id=chr0,path=$SOCKET_PATH \ + -netdev type=vhost-user,id=net1,chardev=chr0,vhostforce \ + -device virtio-net-pci,netdev=net1,mac=CC:BB:BB:BB:BB:BB,$VIRTIO_OPTIONS \ + -chardev socket,id=chr1,path=$SOCKET_PATH \ + -netdev type=vhost-user,id=net2,chardev=chr1,vhostforce \ + -device virtio-net-pci,netdev=net2,mac=DD:BB:BB:BB:BB:BB,$VIRTIO_OPTIONS \ + -incoming tcp:0:5555 \ + -monitor telnet::3333,server,nowait + +.. _lm_virtio_vhost_user_vm_scripts: + +Sample VM scripts +----------------- + +setup_dpdk_virtio_in_vm.sh +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: sh + + #!/bin/sh + # this script matches the vm_virtio_vhost_user script + # virtio port is 03 + # virtio port is 04 + + cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages + echo 1024 > /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages + cat /sys/kernel/mm/hugepages/hugepages-2048kB/nr_hugepages + + ifconfig -a + /root/dpdk/tools/dpdk_nic_bind.py --status + + rmmod virtio-pci + + modprobe uio + insmod /root/dpdk/x86_64-default-linuxapp-gcc/kmod/igb_uio.ko + + /root/dpdk/tools/dpdk_nic_bind.py -b igb_uio 0000:00:03.0 + /root/dpdk/tools/dpdk_nic_bind.py -b igb_uio 0000:00:04.0 + + /root/dpdk/tools/dpdk_nic_bind.py --status + +run_testpmd_in_vm.sh +~~~~~~~~~~~~~~~~~~~~ + +.. code-block:: sh + + #!/bin/sh + # Run testpmd for use with vhost_user sample app. + # test system has 8 cpus (0-7), use cpus 2-7 for VM + + /root/dpdk/x86_64-default-linuxapp-gcc/app/testpmd \ + -c 3f -n 4 --socket-mem 350 -- --burst=64 --i --disable-hw-vlan-filter -- cgit 1.2.3-korg