diff options
author | Damjan Marion <damarion@cisco.com> | 2022-01-09 14:05:27 +0100 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2022-01-11 11:42:59 +0000 |
commit | ce4083ce48958d9d3956e8317445a5552780af1a (patch) | |
tree | 56ddc02d9a2d606a6b6c51197a8ea0b5c756f6de | |
parent | 3b7ef512f190a506f62af53536b586b4800f66c1 (diff) |
dpdk: offloads cleanup
Type: improvement
Change-Id: Ibf43aa483548e6055e4b851ad893371d7af3b018
Signed-off-by: Damjan Marion <damarion@cisco.com>
-rw-r--r-- | src/plugins/dpdk/device/common.c | 102 | ||||
-rw-r--r-- | src/plugins/dpdk/device/dpdk.h | 14 | ||||
-rw-r--r-- | src/plugins/dpdk/device/dpdk_priv.h | 6 | ||||
-rw-r--r-- | src/plugins/dpdk/device/init.c | 199 |
4 files changed, 122 insertions, 199 deletions
diff --git a/src/plugins/dpdk/device/common.c b/src/plugins/dpdk/device/common.c index 8313a293fe6..15424acf0b8 100644 --- a/src/plugins/dpdk/device/common.c +++ b/src/plugins/dpdk/device/common.c @@ -63,7 +63,7 @@ dpdk_device_setup (dpdk_device_t * xd) vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, xd->hw_if_index); vnet_hw_if_caps_change_t caps = {}; struct rte_eth_dev_info dev_info; - u64 bitmap; + struct rte_eth_conf conf = {}; u64 rxo, txo; u16 mtu; int rv; @@ -80,44 +80,81 @@ dpdk_device_setup (dpdk_device_t * xd) dpdk_device_stop (xd); } - /* Enable flow director when flows exist */ - if (xd->pmd == VNET_DPDK_PMD_I40E) - { - if ((xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0) - xd->port_conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT; - else - xd->port_conf.fdir_conf.mode = RTE_FDIR_MODE_NONE; - } - rte_eth_dev_info_get (xd->port_id, &dev_info); - bitmap = xd->port_conf.txmode.offloads & ~dev_info.tx_offload_capa; - if (bitmap) + /* create rx and tx offload wishlist */ + rxo = DEV_RX_OFFLOAD_IPV4_CKSUM; + txo = 0; + + if (xd->conf.enable_tcp_udp_checksum) + rxo |= DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM; + + if (xd->conf.disable_tx_checksum_offload == 0 && + xd->conf.enable_outer_checksum_offload) + txo |= DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM | DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; + + if (xd->conf.disable_tx_checksum_offload == 0) + txo |= DEV_TX_OFFLOAD_IPV4_CKSUM | DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM; + + if (xd->conf.disable_multi_seg == 0) { - dpdk_log_warn ("unsupported tx offloads requested on port %u: %U", - xd->port_id, format_dpdk_tx_offload_caps, bitmap); - xd->port_conf.txmode.offloads ^= bitmap; + txo |= DEV_TX_OFFLOAD_MULTI_SEGS; + rxo |= DEV_RX_OFFLOAD_JUMBO_FRAME | DEV_RX_OFFLOAD_SCATTER; } - bitmap = xd->port_conf.rxmode.offloads & ~dev_info.rx_offload_capa; - if (bitmap) + if (xd->conf.enable_lro) + rxo |= DEV_RX_OFFLOAD_TCP_LRO; + + /* per-device offload config */ + if (xd->conf.enable_tso) + txo |= DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_VXLAN_TNL_TSO; + + if (xd->conf.disable_rx_scatter) + rxo &= ~DEV_RX_OFFLOAD_SCATTER; + + /* mask unsupported offloads */ + rxo &= dev_info.rx_offload_capa; + txo &= dev_info.tx_offload_capa; + + dpdk_log_debug ("[%u] Configured RX offloads: %U", xd->port_id, + format_dpdk_rx_offload_caps, rxo); + dpdk_log_debug ("[%u] Configured TX offloads: %U", xd->port_id, + format_dpdk_tx_offload_caps, txo); + + /* Enable flow director when flows exist */ + if (xd->supported_flow_actions && + (xd->flags & DPDK_DEVICE_FLAG_RX_FLOW_OFFLOAD) != 0) + conf.fdir_conf.mode = RTE_FDIR_MODE_PERFECT; + + /* finalize configuration */ + conf.rxmode.offloads = rxo; + conf.txmode.offloads = txo; + if (rxo & DEV_RX_OFFLOAD_TCP_LRO) + conf.rxmode.max_lro_pkt_size = xd->conf.max_lro_pkt_size; + + if (xd->conf.enable_lsc_int) + conf.intr_conf.lsc = 1; + if (xd->conf.enable_rxq_int) + conf.intr_conf.rxq = 1; + + conf.rxmode.mq_mode = ETH_MQ_RX_NONE; + if (xd->conf.n_rx_queues > 1) { - dpdk_log_warn ("unsupported rx offloads requested on port %u: %U", - xd->port_id, format_dpdk_rx_offload_caps, bitmap); - xd->port_conf.rxmode.offloads ^= bitmap; + if (xd->conf.disable_rss == 0) + { + conf.rxmode.mq_mode = ETH_MQ_RX_RSS; + conf.rx_adv_conf.rss_conf.rss_hf = xd->conf.rss_hf; + } } - rxo = xd->port_conf.rxmode.offloads; - txo = xd->port_conf.txmode.offloads; - if (rxo & DEV_RX_OFFLOAD_JUMBO_FRAME) - xd->port_conf.rxmode.max_rx_pkt_len = + conf.rxmode.max_rx_pkt_len = clib_min (ETHERNET_MAX_PACKET_BYTES, dev_info.max_rx_pktlen); - else - xd->port_conf.rxmode.max_rx_pkt_len = 0; rv = rte_eth_dev_configure (xd->port_id, xd->conf.n_rx_queues, - xd->conf.n_tx_queues, &xd->port_conf); + xd->conf.n_tx_queues, &conf); if (rv < 0) { @@ -194,6 +231,15 @@ dpdk_device_setup (dpdk_device_t * xd) xd->buffer_flags |= (VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT); + dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_RX_IP4_CKSUM, + rxo & DEV_RX_OFFLOAD_IPV4_CKSUM); + dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_MAYBE_MULTISEG, + rxo & DEV_RX_OFFLOAD_SCATTER); + dpdk_device_flag_set ( + xd, DPDK_DEVICE_FLAG_TX_OFFLOAD, + (txo & (DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM)) == + (DEV_TX_OFFLOAD_TCP_CKSUM | DEV_TX_OFFLOAD_UDP_CKSUM)); + /* unconditionally set mac filtering cap */ caps.val = caps.mask = VNET_HW_IF_CAP_MAC_FILTER; @@ -254,7 +300,7 @@ dpdk_setup_interrupts (dpdk_device_t *xd) if (!hi) return; - if (!xd->port_conf.intr_conf.rxq) + if (!xd->conf.enable_rxq_int) return; /* Probe for interrupt support */ diff --git a/src/plugins/dpdk/device/dpdk.h b/src/plugins/dpdk/device/dpdk.h index f400f4887cf..7a941e210ed 100644 --- a/src/plugins/dpdk/device/dpdk.h +++ b/src/plugins/dpdk/device/dpdk.h @@ -173,15 +173,22 @@ typedef union { struct { - u16 no_multi_seg : 1; + u16 disable_multi_seg : 1; u16 enable_lro : 1; + u16 enable_tso : 1; u16 enable_tcp_udp_checksum : 1; u16 enable_outer_checksum_offload : 1; - u16 no_tx_checksum_offload : 1; + u16 enable_lsc_int : 1; + u16 enable_rxq_int : 1; + u16 disable_tx_checksum_offload : 1; + u16 disable_rss : 1; + u16 disable_rx_scatter : 1; u16 n_rx_queues; u16 n_tx_queues; u16 n_rx_desc; u16 n_tx_desc; + u32 max_lro_pkt_size; + u64 rss_hf; }; u64 as_u64[3]; } dpdk_port_conf_t; @@ -222,9 +229,6 @@ typedef struct /* number of sub-interfaces */ u16 num_subifs; - /* PMD related */ - struct rte_eth_conf port_conf; - /* flow related */ u32 supported_flow_actions; dpdk_flow_entry_t *flow_entries; /* pool */ diff --git a/src/plugins/dpdk/device/dpdk_priv.h b/src/plugins/dpdk/device/dpdk_priv.h index d1cdea32453..535a532f0b3 100644 --- a/src/plugins/dpdk/device/dpdk_priv.h +++ b/src/plugins/dpdk/device/dpdk_priv.h @@ -42,6 +42,12 @@ _(iova-mode) \ _(base-virtaddr) /* clang-format on */ +static_always_inline void +dpdk_device_flag_set (dpdk_device_t *xd, __typeof__ (xd->flags) flag, int val) +{ + xd->flags = val ? xd->flags | flag : xd->flags & ~flag; +} + static inline void dpdk_get_xstats (dpdk_device_t * xd) { diff --git a/src/plugins/dpdk/device/init.c b/src/plugins/dpdk/device/init.c index 19f5a545dcd..3b0ce193baa 100644 --- a/src/plugins/dpdk/device/init.c +++ b/src/plugins/dpdk/device/init.c @@ -91,10 +91,10 @@ dpdk_flag_change (vnet_main_t * vnm, vnet_hw_interface_t * hi, u32 flags) { case ETHERNET_INTERFACE_FLAG_DEFAULT_L3: /* set to L3/non-promisc mode */ - xd->flags &= ~DPDK_DEVICE_FLAG_PROMISC; + dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_PROMISC, 0); break; case ETHERNET_INTERFACE_FLAG_ACCEPT_ALL: - xd->flags |= DPDK_DEVICE_FLAG_PROMISC; + dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_PROMISC, 1); break; case ETHERNET_INTERFACE_FLAG_MTU: if (xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) @@ -161,15 +161,6 @@ check_l3cache () return 0; } -static void -dpdk_enable_l4_csum_offload (dpdk_device_t * xd) -{ - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; - xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD | - DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; -} - static clib_error_t * dpdk_lib_init (dpdk_main_t * dm) { @@ -211,6 +202,8 @@ dpdk_lib_init (dpdk_main_t * dm) dm->default_port_conf.n_tx_desc = DPDK_NB_TX_DESC_DEFAULT; dm->default_port_conf.n_rx_queues = 1; dm->default_port_conf.n_tx_queues = tm->n_vlib_mains; + dm->default_port_conf.rss_hf = ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP; + dm->default_port_conf.max_lro_pkt_size = DPDK_MAX_LRO_SIZE_DEFAULT; if ((clib_mem_get_default_hugepage_size () == 2 << 20) && check_l3cache () == 0) @@ -329,58 +322,8 @@ dpdk_lib_init (dpdk_main_t * dm) else last_pci_addr.as_u32 = ~0; - if (di.rx_offload_capa & DEV_RX_OFFLOAD_IPV4_CKSUM) - { - xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_IPV4_CKSUM; - xd->flags |= DPDK_DEVICE_FLAG_RX_IP4_CKSUM; - } - - if (xd->conf.enable_tcp_udp_checksum) - { - if (di.rx_offload_capa & DEV_RX_OFFLOAD_UDP_CKSUM) - xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_UDP_CKSUM; - if (di.rx_offload_capa & DEV_RX_OFFLOAD_TCP_CKSUM) - xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TCP_CKSUM; - if (di.tx_offload_capa & DEV_TX_OFFLOAD_IPV4_CKSUM) - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; - - if (xd->conf.enable_outer_checksum_offload) - { - if (di.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM) - xd->port_conf.txmode.offloads |= - DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; - if (di.tx_offload_capa & DEV_TX_OFFLOAD_OUTER_UDP_CKSUM) - xd->port_conf.txmode.offloads |= - DEV_TX_OFFLOAD_OUTER_UDP_CKSUM; - } - } - - if (xd->conf.enable_lro) - { - if (di.rx_offload_capa & DEV_RX_OFFLOAD_TCP_LRO) - { - xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_TCP_LRO; - if (devconf->max_lro_pkt_size) - xd->port_conf.rxmode.max_lro_pkt_size = - devconf->max_lro_pkt_size; - else - xd->port_conf.rxmode.max_lro_pkt_size = - DPDK_MAX_LRO_SIZE_DEFAULT; - } - } - if (xd->conf.no_multi_seg) - { - xd->port_conf.txmode.offloads &= ~DEV_TX_OFFLOAD_MULTI_SEGS; - xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_JUMBO_FRAME; - xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_SCATTER; - } - else - { - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; - xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; - xd->port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SCATTER; - xd->flags |= DPDK_DEVICE_FLAG_MAYBE_MULTISEG; - } + if (devconf->max_lro_pkt_size) + xd->conf.max_lro_pkt_size = devconf->max_lro_pkt_size; xd->conf.n_tx_queues = clib_min (di.max_tx_queues, xd->conf.n_tx_queues); @@ -392,22 +335,17 @@ dpdk_lib_init (dpdk_main_t * dm) di.max_rx_queues >= devconf->num_rx_queues) { xd->conf.n_rx_queues = devconf->num_rx_queues; - xd->port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; - if (devconf->rss_fn == 0) - xd->port_conf.rx_adv_conf.rss_conf.rss_hf = - ETH_RSS_IP | ETH_RSS_UDP | ETH_RSS_TCP; - else + if (devconf->rss_fn) { u64 unsupported_bits; - xd->port_conf.rx_adv_conf.rss_conf.rss_hf = devconf->rss_fn; - unsupported_bits = xd->port_conf.rx_adv_conf.rss_conf.rss_hf; + xd->conf.rss_hf = devconf->rss_fn; + unsupported_bits = xd->conf.rss_hf; unsupported_bits &= ~di.flow_type_rss_offloads; if (unsupported_bits) dpdk_log_warn ("Unsupported RSS hash functions: %U", format_dpdk_rss_hf_name, unsupported_bits); } - xd->port_conf.rx_adv_conf.rss_conf.rss_hf &= - di.flow_type_rss_offloads; + xd->conf.rss_hf &= di.flow_type_rss_offloads; } if (devconf->num_rx_desc) @@ -443,123 +381,70 @@ dpdk_lib_init (dpdk_main_t * dm) { /* Drivers with valid speed_capa set */ case VNET_DPDK_PMD_I40E: - xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE; + dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INT_UNMASKABLE, 1); /* fall through */ case VNET_DPDK_PMD_E1000EM: case VNET_DPDK_PMD_IGB: case VNET_DPDK_PMD_IGC: case VNET_DPDK_PMD_IXGBE: case VNET_DPDK_PMD_ICE: - xd->port_type = port_type_from_speed_capa (&di); xd->supported_flow_actions = VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE | VNET_FLOW_ACTION_REDIRECT_TO_QUEUE | VNET_FLOW_ACTION_BUFFER_ADVANCE | VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP | VNET_FLOW_ACTION_RSS; - - if (xd->conf.no_tx_checksum_offload == 0) - { - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; - xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD | - DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; - } - - xd->port_conf.intr_conf.rxq = 1; - break; + dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM, 1); + xd->conf.enable_rxq_int = 1; + /* fall through */ case VNET_DPDK_PMD_MLX5: - if (xd->conf.no_tx_checksum_offload == 0) - { - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; - xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD | - DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; - } - xd->port_type = port_type_from_speed_capa (&di); - break; case VNET_DPDK_PMD_CXGBE: case VNET_DPDK_PMD_MLX4: case VNET_DPDK_PMD_QEDE: case VNET_DPDK_PMD_BNXT: + case VNET_DPDK_PMD_ENIC: xd->port_type = port_type_from_speed_capa (&di); break; /* SR-IOV VFs */ case VNET_DPDK_PMD_I40EVF: - xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE; + dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INT_UNMASKABLE, 1); /* fall through */ case VNET_DPDK_PMD_IGBVF: case VNET_DPDK_PMD_IXGBEVF: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; - if (xd->conf.no_tx_checksum_offload == 0) - { - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; - xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD | - DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; - } + dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM, 1); /* DPDK bug in multiqueue... */ /* xd->port_conf.intr_conf.rxq = 1; */ break; /* iAVF */ case VNET_DPDK_PMD_IAVF: - xd->flags |= DPDK_DEVICE_FLAG_INT_UNMASKABLE; + dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INT_UNMASKABLE, 1); + dpdk_device_flag_set (xd, DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM, 1); xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; xd->supported_flow_actions = VNET_FLOW_ACTION_MARK | VNET_FLOW_ACTION_REDIRECT_TO_NODE | VNET_FLOW_ACTION_REDIRECT_TO_QUEUE | VNET_FLOW_ACTION_BUFFER_ADVANCE | VNET_FLOW_ACTION_COUNT | VNET_FLOW_ACTION_DROP | VNET_FLOW_ACTION_RSS; - - if (xd->conf.no_tx_checksum_offload == 0) - { - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; - xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD | - DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM; - } /* DPDK bug in multiqueue... */ /* xd->port_conf.intr_conf.rxq = 1; */ break; case VNET_DPDK_PMD_THUNDERX: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; - - if (xd->conf.no_tx_checksum_offload == 0) - { - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; - xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD; - } break; case VNET_DPDK_PMD_ENA: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF; - xd->port_conf.rxmode.offloads &= ~DEV_RX_OFFLOAD_SCATTER; - xd->port_conf.intr_conf.rxq = 1; - if (xd->conf.no_tx_checksum_offload == 0) - { - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; - xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD; - } + xd->conf.disable_rx_scatter = 1; + xd->conf.enable_rxq_int = 1; break; case VNET_DPDK_PMD_DPAA2: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_10G; break; - /* Cisco VIC */ - case VNET_DPDK_PMD_ENIC: - { - xd->port_type = port_type_from_speed_capa (&di); - if (xd->conf.enable_tcp_udp_checksum) - dpdk_enable_l4_csum_offload (xd); - } - break; - /* Intel Red Rock Canyon */ case VNET_DPDK_PMD_FM10K: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_SWITCH; @@ -567,7 +452,7 @@ dpdk_lib_init (dpdk_main_t * dm) /* virtio */ case VNET_DPDK_PMD_VIRTIO: - xd->port_conf.rxmode.mq_mode = ETH_MQ_RX_NONE; + xd->conf.disable_rss = 1; xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G; xd->conf.n_rx_desc = DPDK_NB_RX_DESC_VIRTIO; xd->conf.n_tx_desc = DPDK_NB_TX_DESC_VIRTIO; @@ -578,22 +463,12 @@ dpdk_lib_init (dpdk_main_t * dm) * use the same check that the virtio driver does. */ if (pci_dev && rte_intr_cap_multiple (&pci_dev->intr_handle)) - xd->port_conf.intr_conf.rxq = 1; + xd->conf.enable_rxq_int = 1; break; /* vmxnet3 */ case VNET_DPDK_PMD_VMXNET3: xd->port_type = VNET_DPDK_PORT_TYPE_ETH_1G; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_MULTI_SEGS; - /* TCP csum offload not working although udp might work. Left - * disabled for now */ - if (0 && (xd->conf.no_tx_checksum_offload == 0)) - { - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_IPV4_CKSUM; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_CKSUM; - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_UDP_CKSUM; - xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD; - } break; case VNET_DPDK_PMD_AF_PACKET: @@ -614,7 +489,7 @@ dpdk_lib_init (dpdk_main_t * dm) case VNET_DPDK_PMD_FAILSAFE: xd->port_type = VNET_DPDK_PORT_TYPE_FAILSAFE; - xd->port_conf.intr_conf.lsc = 1; + xd->conf.enable_lsc_int = 1; break; case VNET_DPDK_PMD_NETVSC: @@ -692,24 +567,16 @@ dpdk_lib_init (dpdk_main_t * dm) ETHERNET_INTERFACE_FLAG_DEFAULT_L3); } - if (devconf->tso == DPDK_DEVICE_TSO_ON && hi != NULL) + if (devconf->tso == DPDK_DEVICE_TSO_ON) { /*tcp_udp checksum must be enabled*/ - if ((xd->conf.enable_tcp_udp_checksum) && - (xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD)) - { - xd->port_conf.txmode.offloads |= DEV_TX_OFFLOAD_TCP_TSO; - - if (xd->conf.enable_outer_checksum_offload && - (di.tx_offload_capa & DEV_TX_OFFLOAD_VXLAN_TNL_TSO)) - { - xd->port_conf.txmode.offloads |= - DEV_TX_OFFLOAD_VXLAN_TNL_TSO; - } - } + if (xd->conf.enable_tcp_udp_checksum == 0) + dpdk_log_warn ("[%u] TCP/UDP checksum offload must be enabled", + xd->port_id); + else if ((di.tx_offload_capa & DEV_TX_OFFLOAD_TCP_TSO) == 0) + dpdk_log_warn ("[%u] TSO not supported by device", xd->port_id); else - clib_warning ("%s: TCP/UDP checksum offload must be enabled", - hi->name); + xd->conf.enable_tso = 1; } dpdk_device_setup (xd); @@ -1214,13 +1081,13 @@ dpdk_config (vlib_main_t * vm, unformat_input_t * input) dm->default_port_conf.enable_outer_checksum_offload = 1; } else if (unformat (input, "no-tx-checksum-offload")) - dm->default_port_conf.no_tx_checksum_offload = 1; + dm->default_port_conf.disable_tx_checksum_offload = 1; else if (unformat (input, "decimal-interface-names")) conf->interface_name_format_decimal = 1; else if (unformat (input, "no-multi-seg")) - dm->default_port_conf.no_multi_seg = 1; + dm->default_port_conf.disable_multi_seg = 1; else if (unformat (input, "enable-lro")) dm->default_port_conf.enable_lro = 1; else if (unformat (input, "max-simd-bitwidth %U", |