aboutsummaryrefslogtreecommitdiffstats
path: root/test/vpp_interface.py
AgeCommit message (Expand)AuthorFilesLines
2017-03-17Fix IP feature ordering.Neale Ranns1-0/+5
2017-03-17Attached hostsNeale Ranns1-1/+1
2017-03-09Tests to target holes in adjacency and DPO test coverageNeale Ranns1-0/+7
2017-03-03IPv6 RA improvementsNeale Ranns1-0/+11
2017-03-02Remove the unused VRF ID parameter from the IP neighbour Add/Del APINeale Ranns1-4/+4
2017-02-27[Proxy] ARP testsNeale Ranns1-0/+13
2017-02-21test: ip6 vrf instances multi-context test (CSIT-497)Jan Gelety1-3/+7
2017-01-27IP Multicast FIB (mfib)Neale Ranns1-1/+2
2017-01-17BFD: IPv6 supportKlement Sekera1-2/+9
2017-01-11make test: improve documentation and PEP8 complianceKlement Sekera1-5/+9
2017-01-11test: ip4 vrf instances multi-context test (CSIT-492)Jan1-3/+7
2017-01-10IPv6 NS RS tests and fixesNeale Ranns1-0/+14
2017-01-09make test: Loopback interface CRUD testMatej Klotton1-17/+37
2016-12-05test: l2bd instance multi-context correctionJan1-0/+2
2016-12-05make test: fix missing log/packet messagesKlement Sekera1-7/+5
2016-11-24Remove postinit from make-test interfacesMatej Klotton1-13/+13
2016-11-22GRE tests and fixesNeale Ranns1-0/+31
2016-11-15Update test documentation.Matej Klotton1-31/+54
2016-11-11Add IRB testMatej Klotton1-101/+54
2016-11-01MPLS Exp-null TestsNeale Ranns1-0/+17
2016-10-26refactor test frameworkKlement Sekera1-0/+240
/span> - KVM and VMware ESX SR-IOV modes are supported. - RSS hash result is supported. - Hardware TSO. +- Hardware checksum TX offload for VXLAN and GRE. Limitations ----------- - Inner RSS for VXLAN frames is not supported yet. - Port statistics through software counters only. -- Hardware checksum offloads for VXLAN inner header are not supported yet. +- Hardware checksum RX offloads for VXLAN inner header are not supported yet. - Secondary process RX is not supported. Configuration diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 03ed3b3..6f42948 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -375,6 +375,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_device_attr device_attr; unsigned int sriov; unsigned int mps; + unsigned int tunnel_en; int idx; int i; @@ -429,12 +430,17 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) * as all ConnectX-5 devices. */ switch (pci_dev->id.device_id) { + case PCI_DEVICE_ID_MELLANOX_CONNECTX4: + tunnel_en = 1; + mps = 0; + break; case PCI_DEVICE_ID_MELLANOX_CONNECTX4LX: case PCI_DEVICE_ID_MELLANOX_CONNECTX5: case PCI_DEVICE_ID_MELLANOX_CONNECTX5VF: case PCI_DEVICE_ID_MELLANOX_CONNECTX5EX: case PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF: mps = 1; + tunnel_en = 1; break; default: mps = 0; @@ -539,6 +545,7 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->mtu = ETHER_MTU; priv->mps = mps; /* Enable MPW by default if supported. */ priv->cqe_comp = 1; /* Enable compression by default. */ + priv->tunnel_en = tunnel_en; err = mlx5_args(priv, pci_dev->device.devargs); if (err) { ERROR("failed to process device arguments: %s", diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 93f129b..870e01f 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -127,6 +127,8 @@ struct priv { unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */ unsigned int pending_alarm:1; /* An alarm is pending. */ unsigned int tso:1; /* Whether TSO is supported. */ + unsigned int tunnel_en:1; + /* Whether Tx offloads for tunneled packets are supported. */ unsigned int max_tso_payload_sz; /* Maximum TCP payload for TSO. */ unsigned int txq_inline; /* Maximum packet size for inlining. */ unsigned int txqs_inline; /* Queue number threshold for inlining. */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 5542193..8be9e77 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -695,6 +695,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) DEV_TX_OFFLOAD_TCP_CKSUM); if (priv->tso) info->tx_offload_capa |= DEV_TX_OFFLOAD_TCP_TSO; + if (priv->tunnel_en) + info->tx_offload_capa |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; if (priv_get_ifname(priv, &ifname) == 0) info->if_index = if_nametoindex(ifname); /* FIXME: RETA update/query API expects the callee to know the size of diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h index 3318668..0a77f5b 100644 --- a/drivers/net/mlx5/mlx5_prm.h +++ b/drivers/net/mlx5/mlx5_prm.h @@ -120,6 +120,12 @@ /* Tunnel packet bit in the CQE. */ #define MLX5_CQE_RX_TUNNEL_PACKET (1u << 0) +/* Inner L3 checksum offload (Tunneled packets only). */ +#define MLX5_ETH_WQE_L3_INNER_CSUM (1u << 4) + +/* Inner L4 checksum offload (Tunneled packets only). */ +#define MLX5_ETH_WQE_L4_INNER_CSUM (1u << 5) + /* INVALID is used by packets matching no flow rules. */ #define MLX5_FLOW_MARK_INVALID 0 diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 98889f6..c2eb891 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -443,7 +443,19 @@ mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) /* Should we enable HW CKSUM offload */ if (buf->ol_flags & (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; + const uint64_t is_tunneled = buf->ol_flags & + (PKT_TX_TUNNEL_GRE | + PKT_TX_TUNNEL_VXLAN); + + if (is_tunneled && txq->tunnel_en) { + cs_flags = MLX5_ETH_WQE_L3_INNER_CSUM | + MLX5_ETH_WQE_L4_INNER_CSUM; + if (buf->ol_flags & PKT_TX_OUTER_IP_CKSUM) + cs_flags |= MLX5_ETH_WQE_L3_CSUM; + } else { + cs_flags = MLX5_ETH_WQE_L3_CSUM | + MLX5_ETH_WQE_L4_CSUM; + } } raw = ((uint8_t *)(uintptr_t)wqe) + 2 * MLX5_WQE_DWORD_SIZE; /* Replace the Ethernet type by the VLAN if necessary. */ diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 6b328cf..9669564 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -256,6 +256,8 @@ struct txq { uint16_t max_inline; /* Multiple of RTE_CACHE_LINE_SIZE to inline. */ uint16_t inline_en:1; /* When set inline is enabled. */ uint16_t tso_en:1; /* When set hardware TSO is enabled. */ + uint16_t tunnel_en:1; + /* When set TX offload for tunneled packets are supported. */ uint32_t qp_num_8s; /* QP number shifted by 8. */ volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ volatile void *wqes; /* Work queue (use volatile to write into). */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 995b763..9d0c00f 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -356,6 +356,8 @@ txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, max_tso_inline); tmpl.txq.tso_en = 1; } + if (priv->tunnel_en) + tmpl.txq.tunnel_en = 1; tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); if (tmpl.qp == NULL) { ret = (errno ? errno : EINVAL); -- 2.7.4