summaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rwxr-xr-xsrc/bp_gtest.cpp2
-rwxr-xr-xsrc/bp_sim.cpp29
-rwxr-xr-xsrc/bp_sim.h40
-rwxr-xr-xsrc/common/Network/Packet/IPHeader.h9
-rw-r--r--src/debug.cpp15
-rw-r--r--src/dpdk/drivers/net/enic/base/vnic_dev.c33
-rw-r--r--src/dpdk/drivers/net/enic/base/vnic_dev.h3
-rw-r--r--src/dpdk/drivers/net/enic/base/vnic_devcmd.h346
-rw-r--r--src/dpdk/drivers/net/enic/enic.h14
-rw-r--r--src/dpdk/drivers/net/enic/enic_clsf.c301
-rw-r--r--src/dpdk/drivers/net/enic/enic_ethdev.c25
-rw-r--r--src/dpdk/drivers/net/enic/enic_main.c3
-rw-r--r--src/dpdk/drivers/net/enic/enic_res.c5
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5.c20
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5.h31
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_autoconf.h8
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_fdir.c67
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_rxq.c4
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_rxtx.h4
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_stats.c321
-rw-r--r--src/flow_stat_parser.cpp4
-rw-r--r--src/main_dpdk.cpp572
-rwxr-xr-xsrc/pal/linux_dpdk/x86_64-default-linuxapp-gcc/include/rte_config.h8
-rw-r--r--src/pkt_gen.cpp18
-rw-r--r--src/pre_test.cpp2
-rwxr-xr-xsrc/rx_check.cpp4
-rw-r--r--src/stateful_rx_core.cpp4
-rw-r--r--src/stateless/dp/trex_stateless_dp_core.cpp4
28 files changed, 1690 insertions, 206 deletions
diff --git a/src/bp_gtest.cpp b/src/bp_gtest.cpp
index ca514c88..11bd6235 100755
--- a/src/bp_gtest.cpp
+++ b/src/bp_gtest.cpp
@@ -196,7 +196,7 @@ public:
ports[i]=lpg->GenerateOneSourcePort();
}
}
-
+ CGlobalInfo::m_options.m_run_mode = CParserOption::RUN_MODE_BATCH;
lpt->start_generate_stateful(buf,CGlobalInfo::m_options.preview);
lpt->m_node_gen.DumpHist(stdout);
diff --git a/src/bp_sim.cpp b/src/bp_sim.cpp
index 62e8d822..c1df72fc 100755
--- a/src/bp_sim.cpp
+++ b/src/bp_sim.cpp
@@ -160,8 +160,8 @@ uint64_t CPlatformSocketInfoNoConfig::get_cores_mask(){
int i;
int offset=0;
/* master */
- uint32_t res=1;
- uint32_t mask=(1<<(offset+1));
+ uint64_t res=1;
+ uint64_t mask=(1LL<<(offset+1));
for (i=0; i<(cores_number-1); i++) {
res |= mask ;
mask = mask <<1;
@@ -238,6 +238,13 @@ bool CPlatformSocketInfoConfig::init(){
}
}
+ if (m_threads_per_dual_if > m_max_threads_per_dual_if) {
+ printf("ERROR: Maximum threads in platform section of config file is %d, unable to run with -c %d.\n",
+ m_max_threads_per_dual_if, m_threads_per_dual_if);
+ printf("Please increase the pool in config or use lower -c.\n");
+ exit(1);
+ }
+
int j;
for (j=0; j<m_threads_per_dual_if; j++) {
@@ -381,14 +388,14 @@ uint64_t CPlatformSocketInfoConfig::get_cores_mask(){
printf(" ERROR phy threads can't be higher than 64 \n");
exit(1);
}
- mask |=(1<<i);
+ mask |=(1LL<<i);
}
}
- mask |=(1<<m_platform->m_master_thread);
+ mask |=(1LL<<m_platform->m_master_thread);
assert(m_platform->m_master_thread<64);
if (m_rx_is_enabled) {
- mask |=(1<<m_platform->m_rx_thread);
+ mask |=(1LL<<m_platform->m_rx_thread);
assert(m_platform->m_rx_thread<64);
}
return (mask);
@@ -1668,7 +1675,8 @@ void CFlowPktInfo::do_generate_new_mbuf_rxcheck(rte_mbuf_t * m,
IPv6Header * ipv6=(IPv6Header *)(mp1 + 14);
uint8_t save_header= ipv6->getNextHdr();
ipv6->setNextHdr(RX_CHECK_V6_OPT_TYPE);
- ipv6->setHopLimit(TTL_RESERVE_DUPLICATE);
+ ipv6->setHopLimit(TTL_RESERVE_DUPLICATE);
+ ipv6->setTrafficClass(ipv6->getTrafficClass()|TOS_TTL_RESERVE_DUPLICATE);
ipv6->setPayloadLen( ipv6->getPayloadLen() +
sizeof(CRx_check_header));
rxhdr->m_option_type = save_header;
@@ -1678,6 +1686,8 @@ void CFlowPktInfo::do_generate_new_mbuf_rxcheck(rte_mbuf_t * m,
ipv4->setHeaderLength(current_opt_len+opt_len);
ipv4->setTotalLength(ipv4->getTotalLength()+opt_len);
ipv4->setTimeToLive(TTL_RESERVE_DUPLICATE);
+ ipv4->setTOS(ipv4->getTOS()|TOS_TTL_RESERVE_DUPLICATE);
+
rxhdr->m_option_type = RX_CHECK_V4_OPT_TYPE;
rxhdr->m_option_len = RX_CHECK_V4_OPT_LEN;
}
@@ -2148,6 +2158,7 @@ void CCapFileFlowInfo::update_info(){
lp = GetPacket(1);
assert(lp);
lp->m_pkt_indication.setTTL(TTL_RESERVE_DUPLICATE);
+ lp->m_pkt_indication.setTOSReserve();
}
}
@@ -2234,6 +2245,9 @@ enum CCapFileFlowInfo::load_cap_file_err CCapFileFlowInfo::load_cap_file(std::st
pkt_indication.setTTL(TTL_RESERVE_DUPLICATE-4);
}
+ pkt_indication.clearTOSReserve();
+
+
// Validation for first packet in flow
if (is_fif) {
lpflow->flow_id = m_total_flows;
@@ -4419,7 +4433,7 @@ void CFlowGenListPerThread::stop_stateless_simulation_file(){
}
void CFlowGenListPerThread::start_stateless_daemon_simulation(){
-
+ CGlobalInfo::m_options.m_run_mode = CParserOption::RUN_MODE_INTERACTIVE;
m_cur_time_sec = 0;
/* if no pending CP messages - the core will simply be stuck forever */
@@ -4438,6 +4452,7 @@ bool CFlowGenListPerThread::set_stateless_next_node( CGenNodeStateless * cur_nod
void CFlowGenListPerThread::start_stateless_daemon(CPreviewMode &preview){
+ CGlobalInfo::m_options.m_run_mode = CParserOption::RUN_MODE_INTERACTIVE;
m_cur_time_sec = 0;
/* set per thread global info, for performance */
m_preview_mode = preview;
diff --git a/src/bp_sim.h b/src/bp_sim.h
index cd0f6a13..0cf77437 100755
--- a/src/bp_sim.h
+++ b/src/bp_sim.h
@@ -80,6 +80,7 @@ typedef struct {
/* reserve both 0xFF and 0xFE , router will -1 FF */
#define TTL_RESERVE_DUPLICATE 0xff
+#define TOS_TTL_RESERVE_DUPLICATE 0x1
/*
* Length of string needed to hold the largest port (16-bit) address
@@ -824,6 +825,11 @@ public:
return ( (m_expected_portd>>1) * preview.getCores());
}
bool is_stateless(){
+ if (m_run_mode == RUN_MODE_INVALID) {
+ fprintf(stderr, "Internal bug: Calling is stateless before initializing run mode\n");
+ fprintf(stderr, "Try to put -i or -f <file> option as first in the option list\n");
+ exit(-1);
+ }
return (m_run_mode == RUN_MODE_INTERACTIVE ?true:false);
}
bool is_latency_enabled() {
@@ -1199,12 +1205,13 @@ public:
/* for simulation */
static void free_pools();
-
static inline rte_mbuf_t * pktmbuf_alloc_small(socket_id_t socket){
return ( m_mem_pool[socket].pktmbuf_alloc_small() );
}
-
+ static inline rte_mbuf_t * pktmbuf_alloc_small_by_port(uint8_t port_id) {
+ return ( m_mem_pool[m_socket.port_to_socket(port_id)].pktmbuf_alloc_small() );
+ }
/**
* try to allocate small buffers too
@@ -1222,6 +1229,13 @@ public:
return (m_mem_pool[socket].pktmbuf_alloc(size));
}
+ static inline rte_mbuf_t * pktmbuf_alloc_by_port(uint8_t port_id, uint16_t size){
+ socket_id_t socket = m_socket.port_to_socket(port_id);
+ if (size<FIRST_PKT_SIZE) {
+ return ( pktmbuf_alloc_small(socket));
+ }
+ return (m_mem_pool[socket].pktmbuf_alloc(size));
+ }
static inline bool is_learn_verify_mode(){
return ( (m_options.m_learn_mode != CParserOption::LEARN_MODE_DISABLED) && m_options.preview.get_learn_and_verify_mode_enable());
@@ -2682,6 +2696,26 @@ public:
return (0);
}
}
+
+
+ void setTOSReserve(){
+ BP_ASSERT(l3.m_ipv4);
+ if (is_ipv6()) {
+ l3.m_ipv6->setTrafficClass(l3.m_ipv6->getTrafficClass() | TOS_TTL_RESERVE_DUPLICATE );
+ }else{
+ l3.m_ipv4->setTOS(l3.m_ipv4->getTOS()| TOS_TTL_RESERVE_DUPLICATE );
+ }
+ }
+
+ void clearTOSReserve(){
+ BP_ASSERT(l3.m_ipv4);
+ if (is_ipv6()) {
+ l3.m_ipv6->setTrafficClass(l3.m_ipv6->getTrafficClass()& (~TOS_TTL_RESERVE_DUPLICATE) );
+ }else{
+ l3.m_ipv4->setTOS(l3.m_ipv4->getTOS() & (~TOS_TTL_RESERVE_DUPLICATE) );
+ }
+ }
+
uint8_t getTTL(){
BP_ASSERT(l3.m_ipv4);
if (is_ipv6()) {
@@ -3054,6 +3088,8 @@ inline void CFlowPktInfo::update_pkt_info(char *p,
printf(" %.3f : DP : learn packet !\n",now_sec());
#endif
ipv4->setTimeToLive(TTL_RESERVE_DUPLICATE);
+ ipv4->setTOS(ipv4->getTOS()|TOS_TTL_RESERVE_DUPLICATE);
+
/* first ipv4 option add the info in case of learn packet, usualy only the first packet */
if (CGlobalInfo::is_learn_mode(CParserOption::LEARN_MODE_IP_OPTION)) {
diff --git a/src/common/Network/Packet/IPHeader.h b/src/common/Network/Packet/IPHeader.h
index da9ba52c..0bf97fbb 100755
--- a/src/common/Network/Packet/IPHeader.h
+++ b/src/common/Network/Packet/IPHeader.h
@@ -91,6 +91,15 @@ public:
*/
inline void setHeaderLength (uint8_t);
+ inline uint16_t getFirstWord(){
+ return PKT_NTOHS(*((uint16_t *)&myVer_HeaderLength));
+ }
+
+ inline void setFirstWord (uint16_t word){
+ *((uint16_t *)&myVer_HeaderLength) = PKT_NTOHS(word);
+ }
+
+
inline uint8_t getTOS ();
inline void setTOS (uint8_t);
diff --git a/src/debug.cpp b/src/debug.cpp
index 3a9cd506..d0b7cf11 100644
--- a/src/debug.cpp
+++ b/src/debug.cpp
@@ -106,7 +106,7 @@ rte_mbuf_t *CTrexDebug::create_test_pkt(int ip_ver, uint16_t l4_proto, uint8_t t
0x07, 0x08, 0x50, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x08, 0x0a, 0x01, 0x02, 0x03, 0x04,
// bad - 0x03, 0x04, 0x06, 0x02, 0x20, 0x00, 0xBB, 0x79, 0x00, 0x00};
0x03, 0x04, 0x50, 0x02, 0x20, 0x00, 0xBB, 0x79, 0x00, 0x00};
- rte_mbuf_t *m = CGlobalInfo::pktmbuf_alloc(0, sizeof(test_pkt));
+ rte_mbuf_t *m = CGlobalInfo::pktmbuf_alloc_by_port(0, sizeof(test_pkt));
char *p = rte_pktmbuf_append(m, sizeof(test_pkt));
assert(p);
@@ -141,7 +141,12 @@ rte_mbuf_t *CTrexDebug::create_test_pkt(int ip_ver, uint16_t l4_proto, uint8_t t
}
pkt = CTestPktGen::create_test_pkt(l3_type, l4_proto, ttl, ip_id, flags, 1000, pkt_size);
- m = CGlobalInfo::pktmbuf_alloc(0, pkt_size);
+
+ /* DEBUG print the packet
+ utl_k12_pkt_format(stdout,pkt, pkt_size) ;
+ */
+
+ m = CGlobalInfo::pktmbuf_alloc_by_port(0, pkt_size);
if ( unlikely(m == 0) ) {
printf("ERROR no packets \n");
return (NULL);
@@ -156,7 +161,7 @@ rte_mbuf_t *CTrexDebug::create_test_pkt(int ip_ver, uint16_t l4_proto, uint8_t t
#endif
rte_mbuf_t *CTrexDebug::create_pkt(uint8_t *pkt, int pkt_size) {
- rte_mbuf_t *m = CGlobalInfo::pktmbuf_alloc(0, pkt_size);
+ rte_mbuf_t *m = CGlobalInfo::pktmbuf_alloc_by_port(0, pkt_size);
if ( unlikely(m == 0) ) {
printf("ERROR no packets \n");
return 0;
@@ -170,7 +175,7 @@ rte_mbuf_t *CTrexDebug::create_pkt(uint8_t *pkt, int pkt_size) {
}
rte_mbuf_t *CTrexDebug::create_pkt_indirect(rte_mbuf_t *m, uint32_t new_pkt_size){
- rte_mbuf_t *d = CGlobalInfo::pktmbuf_alloc(0, 60);
+ rte_mbuf_t *d = CGlobalInfo::pktmbuf_alloc_by_port(0, 60);
assert(d);
rte_pktmbuf_attach(d, m);
@@ -341,7 +346,7 @@ int CTrexDebug::verify_hw_rules(bool recv_all) {
rte_mbuf_t *m = NULL;
CPhyEthIF * lp;
rte_mbuf_t * rx_pkts[32];
- int sent_num = 20;
+ int sent_num = 8; /* reduce the size, there are driver that can handle only burst of 8 in QUEUE 0 */
int ret = 0;
for (int pkt_num = 0; pkt_num < sizeof(test_pkts) / sizeof (test_pkts[0]); pkt_num++) {
diff --git a/src/dpdk/drivers/net/enic/base/vnic_dev.c b/src/dpdk/drivers/net/enic/base/vnic_dev.c
index fc2e4cc3..713b6089 100644
--- a/src/dpdk/drivers/net/enic/base/vnic_dev.c
+++ b/src/dpdk/drivers/net/enic/base/vnic_dev.c
@@ -462,6 +462,18 @@ int vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
}
}
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev)
+{
+ u64 a0 = (u32)CMD_ADD_ADV_FILTER, a1 = 0;
+ int wait = 1000;
+ int err;
+
+ err = vnic_dev_cmd(vdev, CMD_CAPABILITY, &a0, &a1, wait);
+ if (err)
+ return 0;
+ return (a1 >= (u32)FILTER_DPDK_1);
+}
+
static int vnic_dev_capable(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd)
{
u64 a0 = (u32)cmd, a1 = 0;
@@ -999,7 +1011,7 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
* @data: filter data
*/
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter *data)
+ struct filter_v2 *data)
{
u64 a0, a1;
int wait = 1000;
@@ -1008,11 +1020,20 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
struct filter_tlv *tlv, *tlv_va;
struct filter_action *action;
u64 tlv_size;
+ u32 filter_size;
static unsigned int unique_id;
char z_name[RTE_MEMZONE_NAMESIZE];
+ enum vnic_devcmd_cmd dev_cmd;
+
if (cmd == CLSF_ADD) {
- tlv_size = sizeof(struct filter) +
+ if (data->type == FILTER_DPDK_1)
+ dev_cmd = CMD_ADD_ADV_FILTER;
+ else
+ dev_cmd = CMD_ADD_FILTER;
+
+ filter_size = vnic_filter_size(data);
+ tlv_size = filter_size +
sizeof(struct filter_action) +
2*sizeof(struct filter_tlv);
snprintf((char *)z_name, sizeof(z_name),
@@ -1026,12 +1047,12 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
a1 = tlv_size;
memset(tlv, 0, tlv_size);
tlv->type = CLSF_TLV_FILTER;
- tlv->length = sizeof(struct filter);
- *(struct filter *)&tlv->val = *data;
+ tlv->length = filter_size;
+ memcpy(&tlv->val, (void *)data, filter_size);
tlv = (struct filter_tlv *)((char *)tlv +
sizeof(struct filter_tlv) +
- sizeof(struct filter));
+ filter_size);
tlv->type = CLSF_TLV_ACTION;
tlv->length = sizeof(struct filter_action);
@@ -1039,7 +1060,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
action->type = FILTER_ACTION_RQ_STEERING;
action->u.rq_idx = *entry;
- ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
+ ret = vnic_dev_cmd(vdev, dev_cmd, &a0, &a1, wait);
*entry = (u16)a0;
vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa);
} else if (cmd == CLSF_DEL) {
diff --git a/src/dpdk/drivers/net/enic/base/vnic_dev.h b/src/dpdk/drivers/net/enic/base/vnic_dev.h
index 689442f3..06ebd4ce 100644
--- a/src/dpdk/drivers/net/enic/base/vnic_dev.h
+++ b/src/dpdk/drivers/net/enic/base/vnic_dev.h
@@ -134,6 +134,7 @@ void vnic_dev_cmd_proxy_by_bdf_start(struct vnic_dev *vdev, u16 bdf);
void vnic_dev_cmd_proxy_end(struct vnic_dev *vdev);
int vnic_dev_fw_info(struct vnic_dev *vdev,
struct vnic_devcmd_fw_info **fw_info);
+int vnic_dev_capable_adv_filters(struct vnic_dev *vdev);
int vnic_dev_asic_info(struct vnic_dev *vdev, u16 *asic_type, u16 *asic_rev);
int vnic_dev_spec(struct vnic_dev *vdev, unsigned int offset, size_t size,
void *value);
@@ -201,7 +202,7 @@ int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
- struct filter *data);
+ struct filter_v2 *data);
#ifdef ENIC_VXLAN
int vnic_dev_overlay_offload_enable_disable(struct vnic_dev *vdev,
u8 overlay, u8 config);
diff --git a/src/dpdk/drivers/net/enic/base/vnic_devcmd.h b/src/dpdk/drivers/net/enic/base/vnic_devcmd.h
index b3d5a6cc..785fd6fd 100644
--- a/src/dpdk/drivers/net/enic/base/vnic_devcmd.h
+++ b/src/dpdk/drivers/net/enic/base/vnic_devcmd.h
@@ -1,5 +1,5 @@
/*
- * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
+ * Copyright 2008-2016 Cisco Systems, Inc. All rights reserved.
* Copyright 2007 Nuova Systems, Inc. All rights reserved.
*
* Copyright (c) 2014, Cisco Systems, Inc.
@@ -126,7 +126,8 @@ enum vnic_devcmd_cmd {
/* dev-specific block member:
* in: (u16)a0=offset,(u8)a1=size
- * out: a0=value */
+ * out: a0=value
+ */
CMD_DEV_SPEC = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 2),
/* stats clear */
@@ -146,8 +147,9 @@ enum vnic_devcmd_cmd {
CMD_HANG_NOTIFY = _CMDC(_CMD_DIR_NONE, _CMD_VTYPE_ALL, 8),
/* MAC address in (u48)a0 */
- CMD_GET_MAC_ADDR = _CMDC(_CMD_DIR_READ,
+ CMD_MAC_ADDR = _CMDC(_CMD_DIR_READ,
_CMD_VTYPE_ENET | _CMD_VTYPE_FC, 9),
+#define CMD_GET_MAC_ADDR CMD_MAC_ADDR /* some uses are aliased */
/* add addr from (u48)a0 */
CMD_ADDR_ADD = _CMDCNW(_CMD_DIR_WRITE,
@@ -387,9 +389,8 @@ enum vnic_devcmd_cmd {
* Subvnic migration from MQ <--> VF.
* Enable the LIF migration from MQ to VF and vice versa. MQ and VF
* indexes are statically bound at the time of initialization.
- * Based on the
- * direction of migration, the resources of either MQ or the VF shall
- * be attached to the LIF.
+ * Based on the direction of migration, the resources of either MQ or
+ * the VF shall be attached to the LIF.
* in: (u32)a0=Direction of Migration
* 0=> Migrate to VF
* 1=> Migrate to MQ
@@ -397,7 +398,6 @@ enum vnic_devcmd_cmd {
*/
CMD_MIGRATE_SUBVNIC = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 53),
-
/*
* Register / Deregister the notification block for MQ subvnics
* in:
@@ -433,6 +433,10 @@ enum vnic_devcmd_cmd {
* in: (u64) a0= filter address
* (u32) a1= size of filter
* out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if capability query supported
+ * (u64) a1= MAX filter type supported
*/
CMD_ADD_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 58),
@@ -471,23 +475,133 @@ enum vnic_devcmd_cmd {
CMD_QP_STATS_CLEAR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 63),
/*
- * Enable/Disable overlay offloads on the given vnic
+ * UEFI BOOT API: (u64)a0= UEFI FLS_CMD_xxx
+ * (ui64)a1= paddr for the info buffer
+ */
+ CMD_FC_REQ = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_FC, 64),
+
+ /*
+ * Return the iSCSI config details required by the EFI Option ROM
+ * in: (u32) a0=0 Get Boot Info for PXE eNIC as per pxe_boot_config_t
+ * a0=1 Get Boot info for iSCSI enic as per
+ * iscsi_boot_efi_cfg_t
+ * in: (u64) a1=Host address where iSCSI config info is returned
+ */
+ CMD_VNIC_BOOT_CONFIG_INFO = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ALL, 65),
+
+ /*
+ * Create a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u32) a1 = Remote QP
+ * (u32) a2 = RDMA-RQ
+ * (u16) a3 = RQ Res Group
+ * (u16) a4 = SQ Res Group
+ * (u32) a5 = Protection Domain
+ * (u64) a6 = Remote MAC
+ * (u32) a7 = start PSN
+ * (u16) a8 = MSS
+ * (u32) a9 = protocol version
+ */
+ CMD_RDMA_QP_CREATE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 66),
+
+ /*
+ * Delete a Queue Pair (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ */
+ CMD_RDMA_QP_DELETE = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 67),
+
+ /*
+ * Retrieve a Queue Pair's status information (RoCE)
+ * in: (u32) a0 = Queue Pair number
+ * (u64) a1 = host buffer addr for QP status struct
+ * (u32) a2 = length of the buffer
+ */
+ CMD_RDMA_QP_STATUS = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 68),
+
+ /*
+ * Use this devcmd for agreeing on the highest common version supported
+ * by both driver and fw for by features who need such a facility.
+ * in: (u64) a0 = feature (driver requests for the supported versions
+ * on this feature)
+ * out: (u64) a0 = bitmap of all supported versions for that feature
+ */
+ CMD_GET_SUPP_FEATURE_VER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 69),
+
+ /*
+ * Initialize the RDMA notification work queue
+ * in: (u64) a0 = host buffer address
+ * in: (u16) a1 = number of entries in buffer
+ * in: (u16) a2 = resource group number
+ * in: (u16) a3 = CQ number to post completion
+ */
+ CMD_RDMA_INIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 70),
+
+ /*
+ * De-init the RDMA notification work queue
+ * in: (u64) a0=resource group number
+ */
+ CMD_RDMA_DEINIT_INFO_BUF = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 71),
+
+ /*
+ * Control (Enable/Disable) overlay offloads on the given vnic
* in: (u8) a0 = OVERLAY_FEATURE_NVGRE : NVGRE
* a0 = OVERLAY_FEATURE_VXLAN : VxLAN
- * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable
- * a1 = OVERLAY_OFFLOAD_DISABLE : Disable
+ * in: (u8) a1 = OVERLAY_OFFLOAD_ENABLE : Enable or
+ * a1 = OVERLAY_OFFLOAD_DISABLE : Disable or
+ * a1 = OVERLAY_OFFLOAD_ENABLE_V2 : Enable with version 2
*/
- CMD_OVERLAY_OFFLOAD_ENABLE_DISABLE =
- _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
+ CMD_OVERLAY_OFFLOAD_CTRL =
+ _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 72),
/*
* Configuration of overlay offloads feature on a given vNIC
- * in: (u8) a0 = DEVCMD_OVERLAY_NVGRE : NVGRE
- * a0 = DEVCMD_OVERLAY_VXLAN : VxLAN
- * in: (u8) a1 = VXLAN_PORT_UPDATE : VxLAN
- * in: (u16) a2 = unsigned short int port information
+ * in: (u8) a0 = OVERLAY_CFG_VXLAN_PORT_UPDATE : VxLAN
+ * in: (u16) a1 = unsigned short int port information
*/
CMD_OVERLAY_OFFLOAD_CFG = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 73),
+
+ /*
+ * Return the configured name for the device
+ * in: (u64) a0=Host address where the name is copied
+ * (u32) a1=Size of the buffer
+ */
+ CMD_GET_CONFIG_NAME = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ALL, 74),
+
+ /*
+ * Enable group interrupt for the VF
+ * in: (u32) a0 = GRPINTR_ENABLE : enable
+ * a0 = GRPINTR_DISABLE : disable
+ * a0 = GRPINTR_UPD_VECT: update group vector addr
+ * in: (u32) a1 = interrupt group count
+ * in: (u64) a2 = Start of host buffer address for DMAing group
+ * vector bitmap
+ * in: (u64) a3 = Stride between group vectors
+ */
+ CMD_CONFIG_GRPINTR = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 75),
+
+ /*
+ * Set cq arrary base and size in a list of consective wqs and
+ * rqs for a device
+ * in: (u16) a0 = the wq relative index in the device.
+ * -1 indicates skipping wq configuration
+ * in: (u16) a1 = the wcq relative index in the device
+ * in: (u16) a2 = the rq relative index in the device
+ * -1 indicates skipping rq configuration
+ * in: (u16) a3 = the rcq relative index in the device
+ */
+ CMD_CONFIG_CQ_ARRAY = _CMDC(_CMD_DIR_WRITE, _CMD_VTYPE_ENET, 76),
+
+ /*
+ * Add an advanced filter.
+ * in: (u64) a0= filter address
+ * (u32) a1= size of filter
+ * out: (u32) a0=filter identifier
+ *
+ * Capability query:
+ * out: (u64) a0= 1 if capabliity query supported
+ * (u64) a1= MAX filter type supported
+ */
+ CMD_ADD_ADV_FILTER = _CMDC(_CMD_DIR_RW, _CMD_VTYPE_ENET, 77),
};
/* CMD_ENABLE2 flags */
@@ -520,6 +634,9 @@ enum vnic_devcmd_status {
STAT_NONE = 0,
STAT_BUSY = 1 << 0, /* cmd in progress */
STAT_ERROR = 1 << 1, /* last cmd caused error (code in a0) */
+ STAT_FAILOVER = 1 << 2, /* always set on vnics in pci standby state
+ * if seen a failover to the standby happened
+ */
};
enum vnic_devcmd_error {
@@ -558,9 +675,9 @@ enum fwinfo_asic_type {
FWINFO_ASIC_TYPE_UNKNOWN,
FWINFO_ASIC_TYPE_PALO,
FWINFO_ASIC_TYPE_SERENO,
+ FWINFO_ASIC_TYPE_CRUZ,
};
-
struct vnic_devcmd_notify {
u32 csum; /* checksum over following words */
@@ -595,25 +712,16 @@ struct vnic_devcmd_provinfo {
*/
#define FILTER_FIELD_VALID(fld) (1 << (fld - 1))
-#define FILTER_FIELDS_USNIC (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2) | \
- FILTER_FIELD_VALID(3) | \
- FILTER_FIELD_VALID(4))
-
-#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2) | \
- FILTER_FIELD_VALID(3) | \
- FILTER_FIELD_VALID(4) | \
- FILTER_FIELD_VALID(5))
-
-#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VALID(1) | \
- FILTER_FIELD_VALID(2))
-
#define FILTER_FIELD_USNIC_VLAN FILTER_FIELD_VALID(1)
#define FILTER_FIELD_USNIC_ETHTYPE FILTER_FIELD_VALID(2)
#define FILTER_FIELD_USNIC_PROTO FILTER_FIELD_VALID(3)
#define FILTER_FIELD_USNIC_ID FILTER_FIELD_VALID(4)
+#define FILTER_FIELDS_USNIC (FILTER_FIELD_USNIC_VLAN | \
+ FILTER_FIELD_USNIC_ETHTYPE | \
+ FILTER_FIELD_USNIC_PROTO | \
+ FILTER_FIELD_USNIC_ID)
+
struct filter_usnic_id {
u32 flags;
u16 vlan;
@@ -628,10 +736,18 @@ struct filter_usnic_id {
#define FILTER_FIELD_5TUP_SRC_PT FILTER_FIELD_VALID(4)
#define FILTER_FIELD_5TUP_DST_PT FILTER_FIELD_VALID(5)
+#define FILTER_FIELDS_IPV4_5TUPLE (FILTER_FIELD_5TUP_PROTO | \
+ FILTER_FIELD_5TUP_SRC_AD | \
+ FILTER_FIELD_5TUP_DST_AD | \
+ FILTER_FIELD_5TUP_SRC_PT | \
+ FILTER_FIELD_5TUP_DST_PT)
+
/* Enums for the protocol field. */
enum protocol_e {
PROTO_UDP = 0,
PROTO_TCP = 1,
+ PROTO_IPV4 = 2,
+ PROTO_IPV6 = 3
};
struct filter_ipv4_5tuple {
@@ -646,12 +762,78 @@ struct filter_ipv4_5tuple {
#define FILTER_FIELD_VMQ_VLAN FILTER_FIELD_VALID(1)
#define FILTER_FIELD_VMQ_MAC FILTER_FIELD_VALID(2)
+#define FILTER_FIELDS_MAC_VLAN (FILTER_FIELD_VMQ_VLAN | \
+ FILTER_FIELD_VMQ_MAC)
+
+#define FILTER_FIELDS_NVGRE FILTER_FIELD_VMQ_MAC
+
struct filter_mac_vlan {
u32 flags;
u16 vlan;
u8 mac_addr[6];
} __attribute__((packed));
+#define FILTER_FIELD_VLAN_IP_3TUP_VLAN FILTER_FIELD_VALID(1)
+#define FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO FILTER_FIELD_VALID(2)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_AD FILTER_FIELD_VALID(3)
+#define FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO FILTER_FIELD_VALID(4)
+#define FILTER_FIELD_VLAN_IP_3TUP_DST_PT FILTER_FIELD_VALID(5)
+
+#define FILTER_FIELDS_VLAN_IP_3TUP (FILTER_FIELD_VLAN_IP_3TUP_VLAN | \
+ FILTER_FIELD_VLAN_IP_3TUP_L3_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_AD | \
+ FILTER_FIELD_VLAN_IP_3TUP_L4_PROTO | \
+ FILTER_FIELD_VLAN_IP_3TUP_DST_PT)
+
+struct filter_vlan_ip_3tuple {
+ u32 flags;
+ u16 vlan;
+ u16 l3_protocol;
+ union {
+ u32 dst_addr_v4;
+ u8 dst_addr_v6[16];
+ } u;
+ u32 l4_protocol;
+ u16 dst_port;
+} __attribute__((packed));
+
+#define FILTER_GENERIC_1_BYTES 64
+
+enum filter_generic_1_layer {
+ FILTER_GENERIC_1_L2,
+ FILTER_GENERIC_1_L3,
+ FILTER_GENERIC_1_L4,
+ FILTER_GENERIC_1_L5,
+ FILTER_GENERIC_1_NUM_LAYERS
+};
+
+#define FILTER_GENERIC_1_IPV4 (1 << 0)
+#define FILTER_GENERIC_1_IPV6 (1 << 1)
+#define FILTER_GENERIC_1_UDP (1 << 2)
+#define FILTER_GENERIC_1_TCP (1 << 3)
+#define FILTER_GENERIC_1_TCP_OR_UDP (1 << 4)
+#define FILTER_GENERIC_1_IP4SUM_OK (1 << 5)
+#define FILTER_GENERIC_1_L4SUM_OK (1 << 6)
+#define FILTER_GENERIC_1_IPFRAG (1 << 7)
+
+#define FILTER_GENERIC_1_KEY_LEN 64
+
+/*
+ * Version 1 of generic filter specification
+ * position is only 16 bits, reserving positions > 64k to be used by firmware
+ */
+struct filter_generic_1 {
+ u16 position; /* lower position comes first */
+ u32 mask_flags;
+ u32 val_flags;
+ u16 mask_vlan;
+ u16 val_vlan;
+ struct {
+ u8 mask[FILTER_GENERIC_1_KEY_LEN]; /* 0 bit means "don't care"*/
+ u8 val[FILTER_GENERIC_1_KEY_LEN];
+ } __attribute__((packed)) layer[FILTER_GENERIC_1_NUM_LAYERS];
+} __attribute__((packed));
+
/* Specifies the filter_action type. */
enum {
FILTER_ACTION_RQ_STEERING = 0,
@@ -670,6 +852,10 @@ enum filter_type {
FILTER_USNIC_ID = 0,
FILTER_IPV4_5TUPLE = 1,
FILTER_MAC_VLAN = 2,
+ FILTER_VLAN_IP_3TUPLE = 3,
+ FILTER_NVGRE_VMQ = 4,
+ FILTER_USNIC_IP = 5,
+ FILTER_DPDK_1 = 6,
FILTER_MAX
};
@@ -679,6 +865,27 @@ struct filter {
struct filter_usnic_id usnic;
struct filter_ipv4_5tuple ipv4;
struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ } u;
+} __attribute__((packed));
+
+/*
+ * This is a strict superset of "struct filter" and exists only
+ * because many drivers use "sizeof (struct filter)" in deciding TLV size.
+ * This new, larger struct filter would cause any code that uses that method
+ * to not work with older firmware, so we add filter_v2 to hold the
+ * new filter types. Drivers should use vnic_filter_size() to determine
+ * the TLV size instead of sizeof (struct fiter_v2) to guard against future
+ * growth.
+ */
+struct filter_v2 {
+ u32 type;
+ union {
+ struct filter_usnic_id usnic;
+ struct filter_ipv4_5tuple ipv4;
+ struct filter_mac_vlan mac_vlan;
+ struct filter_vlan_ip_3tuple vlan_3tuple;
+ struct filter_generic_1 generic_1;
} u;
} __attribute__((packed));
@@ -687,14 +894,55 @@ enum {
CLSF_TLV_ACTION = 1,
};
-#define FILTER_MAX_BUF_SIZE 100 /* Maximum size of buffer to CMD_ADD_FILTER */
-
struct filter_tlv {
- uint32_t type;
- uint32_t length;
- uint32_t val[0];
+ u_int32_t type;
+ u_int32_t length;
+ u_int32_t val[0];
};
+/* Data for CMD_ADD_FILTER is 2 TLV and filter + action structs */
+#define FILTER_MAX_BUF_SIZE 100
+#define FILTER_V2_MAX_BUF_SIZE (sizeof(struct filter_v2) + \
+ sizeof(struct filter_action) + \
+ (2 * sizeof(struct filter_tlv)))
+
+/*
+ * Compute actual structure size given filter type. To be "future-proof,"
+ * drivers should use this instead of "sizeof (struct filter_v2)" when
+ * computing length for TLV.
+ */
+static inline u_int32_t
+vnic_filter_size(struct filter_v2 *fp)
+{
+ u_int32_t size;
+
+ switch (fp->type) {
+ case FILTER_USNIC_ID:
+ size = sizeof(fp->u.usnic);
+ break;
+ case FILTER_IPV4_5TUPLE:
+ size = sizeof(fp->u.ipv4);
+ break;
+ case FILTER_MAC_VLAN:
+ case FILTER_NVGRE_VMQ:
+ size = sizeof(fp->u.mac_vlan);
+ break;
+ case FILTER_VLAN_IP_3TUPLE:
+ size = sizeof(fp->u.vlan_3tuple);
+ break;
+ case FILTER_USNIC_IP:
+ case FILTER_DPDK_1:
+ size = sizeof(fp->u.generic_1);
+ break;
+ default:
+ size = sizeof(fp->u);
+ break;
+ }
+ size += sizeof(fp->type);
+ return size;
+}
+
+
enum {
CLSF_ADD = 0,
CLSF_DEL = 1,
@@ -766,8 +1014,30 @@ typedef enum {
OVERLAY_FEATURE_MAX,
} overlay_feature_t;
-#define OVERLAY_OFFLOAD_ENABLE 0
-#define OVERLAY_OFFLOAD_DISABLE 1
+#define OVERLAY_OFFLOAD_ENABLE 0
+#define OVERLAY_OFFLOAD_DISABLE 1
+#define OVERLAY_OFFLOAD_ENABLE_V2 2
#define OVERLAY_CFG_VXLAN_PORT_UPDATE 0
+
+/*
+ * Use this enum to get the supported versions for each of these features
+ * If you need to use the devcmd_get_supported_feature_version(), add
+ * the new feature into this enum and install function handler in devcmd.c
+ */
+typedef enum {
+ VIC_FEATURE_VXLAN,
+ VIC_FEATURE_RDMA,
+ VIC_FEATURE_MAX,
+} vic_feature_t;
+
+/*
+ * CMD_CONFIG_GRPINTR subcommands
+ */
+typedef enum {
+ GRPINTR_ENABLE = 1,
+ GRPINTR_DISABLE,
+ GRPINTR_UPD_VECT,
+} grpintr_subcmd_t;
+
#endif /* _VNIC_DEVCMD_H_ */
diff --git a/src/dpdk/drivers/net/enic/enic.h b/src/dpdk/drivers/net/enic/enic.h
index 4c16ef17..c00d3fb8 100644
--- a/src/dpdk/drivers/net/enic/enic.h
+++ b/src/dpdk/drivers/net/enic/enic.h
@@ -92,6 +92,12 @@ struct enic_fdir {
struct rte_eth_fdir_stats stats;
struct rte_hash *hash;
struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX];
+ u32 modes;
+ u32 types_mask;
+ void (*copy_fltr_fn)(struct filter_v2 *filt,
+ struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
+
};
struct enic_soft_stats {
@@ -128,6 +134,7 @@ struct enic {
int link_status;
u8 hw_ip_checksum;
u16 max_mtu;
+ u16 adv_filters;
unsigned int flags;
unsigned int priv_flags;
@@ -273,4 +280,11 @@ uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
int enic_set_mtu(struct enic *enic, uint16_t new_mtu);
+void enic_fdir_info(struct enic *enic);
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *stats);
+void copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks);
+void copy_fltr_v2(__rte_unused struct filter_v2 *fltr,
+ __rte_unused struct rte_eth_fdir_input *input,
+ __rte_unused struct rte_eth_fdir_masks *masks);
#endif /* _ENIC_H_ */
diff --git a/src/dpdk/drivers/net/enic/enic_clsf.c b/src/dpdk/drivers/net/enic/enic_clsf.c
index e6f57bea..23cb0124 100644
--- a/src/dpdk/drivers/net/enic/enic_clsf.c
+++ b/src/dpdk/drivers/net/enic/enic_clsf.c
@@ -38,6 +38,11 @@
#include <rte_malloc.h>
#include <rte_hash.h>
#include <rte_byteorder.h>
+#include <rte_ip.h>
+#include <rte_tcp.h>
+#include <rte_udp.h>
+#include <rte_sctp.h>
+#include <rte_eth_ctrl.h>
#include "enic_compat.h"
#include "enic.h"
@@ -67,6 +72,268 @@ void enic_fdir_stats_get(struct enic *enic, struct rte_eth_fdir_stats *stats)
*stats = enic->fdir.stats;
}
+void enic_fdir_info_get(struct enic *enic, struct rte_eth_fdir_info *info)
+{
+ info->mode = enic->fdir.modes;
+ info->flow_types_mask[0] = enic->fdir.types_mask;
+}
+
+void enic_fdir_info(struct enic *enic)
+{
+ enic->fdir.modes = (u32)RTE_FDIR_MODE_PERFECT;
+ enic->fdir.types_mask = 1 << RTE_ETH_FLOW_NONFRAG_IPV4_UDP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV4_TCP;
+ if (enic->adv_filters) {
+ enic->fdir.types_mask |= 1 << RTE_ETH_FLOW_NONFRAG_IPV4_OTHER |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV4_SCTP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_UDP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_TCP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_SCTP |
+ 1 << RTE_ETH_FLOW_NONFRAG_IPV6_OTHER;
+ enic->fdir.copy_fltr_fn = copy_fltr_v2;
+ } else {
+ enic->fdir.copy_fltr_fn = copy_fltr_v1;
+ }
+}
+
+static void
+enic_set_layer(struct filter_generic_1 *gp, unsigned int flag,
+ enum filter_generic_1_layer layer, void *mask, void *val,
+ unsigned int len)
+{
+ gp->mask_flags |= flag;
+ gp->val_flags |= gp->mask_flags;
+ memcpy(gp->layer[layer].mask, mask, len);
+ memcpy(gp->layer[layer].val, val, len);
+}
+
+/* Copy Flow Director filter to a VIC ipv4 filter (for Cisco VICs
+ * without advanced filter support.
+ */
+void
+copy_fltr_v1(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ __rte_unused struct rte_eth_fdir_masks *masks)
+{
+ fltr->type = FILTER_IPV4_5TUPLE;
+ fltr->u.ipv4.src_addr = rte_be_to_cpu_32(
+ input->flow.ip4_flow.src_ip);
+ fltr->u.ipv4.dst_addr = rte_be_to_cpu_32(
+ input->flow.ip4_flow.dst_ip);
+ fltr->u.ipv4.src_port = rte_be_to_cpu_16(
+ input->flow.udp4_flow.src_port);
+ fltr->u.ipv4.dst_port = rte_be_to_cpu_16(
+ input->flow.udp4_flow.dst_port);
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP)
+ fltr->u.ipv4.protocol = PROTO_TCP;
+ else
+ fltr->u.ipv4.protocol = PROTO_UDP;
+
+ fltr->u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+}
+
+/* Copy Flow Director filter to a VIC generic filter (requires advanced
+ * filter support.
+ */
+void
+copy_fltr_v2(struct filter_v2 *fltr, struct rte_eth_fdir_input *input,
+ struct rte_eth_fdir_masks *masks)
+{
+ struct filter_generic_1 *gp = &fltr->u.generic_1;
+ int i;
+
+ RTE_ASSERT(enic->adv_filters);
+
+ fltr->type = FILTER_DPDK_1;
+ memset(gp, 0, sizeof(*gp));
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP) {
+ struct udp_hdr udp_mask, udp_val;
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ memset(&udp_val, 0, sizeof(udp_val));
+
+ if (input->flow.udp4_flow.src_port) {
+ udp_mask.src_port = masks->src_port_mask;
+ udp_val.src_port = input->flow.udp4_flow.src_port;
+ }
+ if (input->flow.udp4_flow.dst_port) {
+ udp_mask.src_port = masks->dst_port_mask;
+ udp_val.dst_port = input->flow.udp4_flow.dst_port;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+ &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP) {
+ struct tcp_hdr tcp_mask, tcp_val;
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ memset(&tcp_val, 0, sizeof(tcp_val));
+
+ if (input->flow.tcp4_flow.src_port) {
+ tcp_mask.src_port = masks->src_port_mask;
+ tcp_val.src_port = input->flow.tcp4_flow.src_port;
+ }
+ if (input->flow.tcp4_flow.dst_port) {
+ tcp_mask.dst_port = masks->dst_port_mask;
+ tcp_val.dst_port = input->flow.tcp4_flow.dst_port;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+ &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP) {
+ struct sctp_hdr sctp_mask, sctp_val;
+ memset(&sctp_mask, 0, sizeof(sctp_mask));
+ memset(&sctp_val, 0, sizeof(sctp_val));
+
+ if (input->flow.sctp4_flow.src_port) {
+ sctp_mask.src_port = masks->src_port_mask;
+ sctp_val.src_port = input->flow.sctp4_flow.src_port;
+ }
+ if (input->flow.sctp4_flow.dst_port) {
+ sctp_mask.dst_port = masks->dst_port_mask;
+ sctp_val.dst_port = input->flow.sctp4_flow.dst_port;
+ }
+ if (input->flow.sctp4_flow.verify_tag) {
+ sctp_mask.tag = 0xffffffff;
+ sctp_val.tag = input->flow.sctp4_flow.verify_tag;
+ }
+
+ /* v4 proto should be 132, override ip4_flow.proto */
+ input->flow.ip4_flow.proto = 132;
+
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+ &sctp_val, sizeof(struct sctp_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_UDP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_TCP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_SCTP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) {
+ struct ipv4_hdr ip4_mask, ip4_val;
+ memset(&ip4_mask, 0, sizeof(struct ipv4_hdr));
+ memset(&ip4_val, 0, sizeof(struct ipv4_hdr));
+
+ if (input->flow.ip4_flow.tos) {
+ ip4_mask.type_of_service = masks->ipv4_mask.tos;
+ ip4_val.type_of_service = input->flow.ip4_flow.tos;
+ }
+ if (input->flow.ip4_flow.ip_id) {
+ ip4_mask.packet_id = 0xffff;
+ ip4_val.packet_id = input->flow.ip4_flow.ip_id;
+ }
+ if (input->flow.ip4_flow.ttl) {
+ ip4_mask.time_to_live = 0xff;
+ ip4_val.time_to_live = input->flow.ip4_flow.ttl;
+ }
+ if (input->flow.ip4_flow.proto) {
+ ip4_mask.next_proto_id = 0xff;
+ ip4_val.next_proto_id = input->flow.ip4_flow.proto;
+ }
+ if (input->flow.ip4_flow.src_ip) {
+ ip4_mask.src_addr = masks->ipv4_mask.src_ip;
+ ip4_val.src_addr = input->flow.ip4_flow.src_ip;
+ }
+ if (input->flow.ip4_flow.dst_ip) {
+ ip4_mask.dst_addr = masks->ipv4_mask.dst_ip;
+ ip4_val.dst_addr = input->flow.ip4_flow.dst_ip;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_IPV4, FILTER_GENERIC_1_L3,
+ &ip4_mask, &ip4_val, sizeof(struct ipv4_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP) {
+ struct udp_hdr udp_mask, udp_val;
+ memset(&udp_mask, 0, sizeof(udp_mask));
+ memset(&udp_val, 0, sizeof(udp_val));
+
+ if (input->flow.udp6_flow.src_port) {
+ udp_mask.src_port = masks->src_port_mask;
+ udp_val.src_port = input->flow.udp6_flow.src_port;
+ }
+ if (input->flow.udp6_flow.dst_port) {
+ udp_mask.dst_port = masks->dst_port_mask;
+ udp_val.dst_port = input->flow.udp6_flow.dst_port;
+ }
+ enic_set_layer(gp, FILTER_GENERIC_1_UDP, FILTER_GENERIC_1_L4,
+ &udp_mask, &udp_val, sizeof(struct udp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP) {
+ struct tcp_hdr tcp_mask, tcp_val;
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ memset(&tcp_val, 0, sizeof(tcp_val));
+
+ if (input->flow.tcp6_flow.src_port) {
+ tcp_mask.src_port = masks->src_port_mask;
+ tcp_val.src_port = input->flow.tcp6_flow.src_port;
+ }
+ if (input->flow.tcp6_flow.dst_port) {
+ tcp_mask.dst_port = masks->dst_port_mask;
+ tcp_val.dst_port = input->flow.tcp6_flow.dst_port;
+ }
+ enic_set_layer(gp, FILTER_GENERIC_1_TCP, FILTER_GENERIC_1_L4,
+ &tcp_mask, &tcp_val, sizeof(struct tcp_hdr));
+ } else if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP) {
+ struct sctp_hdr sctp_mask, sctp_val;
+ memset(&sctp_mask, 0, sizeof(sctp_mask));
+ memset(&sctp_val, 0, sizeof(sctp_val));
+
+ if (input->flow.sctp6_flow.src_port) {
+ sctp_mask.src_port = masks->src_port_mask;
+ sctp_val.src_port = input->flow.sctp6_flow.src_port;
+ }
+ if (input->flow.sctp6_flow.dst_port) {
+ sctp_mask.dst_port = masks->dst_port_mask;
+ sctp_val.dst_port = input->flow.sctp6_flow.dst_port;
+ }
+ if (input->flow.sctp6_flow.verify_tag) {
+ sctp_mask.tag = 0xffffffff;
+ sctp_val.tag = input->flow.sctp6_flow.verify_tag;
+ }
+
+ /* v4 proto should be 132, override ipv6_flow.proto */
+ input->flow.ipv6_flow.proto = 132;
+
+ enic_set_layer(gp, 0, FILTER_GENERIC_1_L4, &sctp_mask,
+ &sctp_val, sizeof(struct sctp_hdr));
+ }
+
+ if (input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_UDP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_TCP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_SCTP ||
+ input->flow_type == RTE_ETH_FLOW_NONFRAG_IPV6_OTHER) {
+ struct ipv6_hdr ipv6_mask, ipv6_val;
+ memset(&ipv6_mask, 0, sizeof(struct ipv6_hdr));
+ memset(&ipv6_val, 0, sizeof(struct ipv6_hdr));
+
+ if (input->flow.ipv6_flow.proto) {
+ ipv6_mask.proto = masks->ipv6_mask.proto;
+ ipv6_val.proto = input->flow.ipv6_flow.proto;
+ }
+ for (i = 0; i < 4; i++) {
+ *(uint32_t *)&ipv6_mask.src_addr[i * 4] =
+ masks->ipv6_mask.src_ip[i];
+ *(uint32_t *)&ipv6_val.src_addr[i * 4] =
+ input->flow.ipv6_flow.src_ip[i];
+ }
+ for (i = 0; i < 4; i++) {
+ *(uint32_t *)&ipv6_mask.dst_addr[i * 4] =
+ masks->ipv6_mask.src_ip[i];
+ *(uint32_t *)&ipv6_val.dst_addr[i * 4] =
+ input->flow.ipv6_flow.dst_ip[i];
+ }
+ if (input->flow.ipv6_flow.tc) {
+ ipv6_mask.vtc_flow = ((uint32_t)masks->ipv6_mask.tc<<12);
+ ipv6_val.vtc_flow = input->flow.ipv6_flow.tc << 12;
+ }
+ if (input->flow.ipv6_flow.hop_limits) {
+ ipv6_mask.hop_limits = 0xff;
+ ipv6_val.hop_limits = input->flow.ipv6_flow.hop_limits;
+ }
+
+ enic_set_layer(gp, FILTER_GENERIC_1_IPV6, FILTER_GENERIC_1_L3,
+ &ipv6_mask, &ipv6_val, sizeof(struct ipv6_hdr));
+ }
+}
+
int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
int32_t pos;
@@ -97,7 +364,7 @@ int enic_fdir_del_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
{
struct enic_fdir_node *key;
- struct filter fltr = {0};
+ struct filter_v2 fltr;
int32_t pos;
u8 do_free = 0;
u16 old_fltr_id = 0;
@@ -105,9 +372,9 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
u16 flex_bytes;
u16 queue;
- flowtype_supported = (
- (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type) ||
- (RTE_ETH_FLOW_NONFRAG_IPV4_UDP == params->input.flow_type));
+ memset(&fltr, 0, sizeof(fltr));
+ flowtype_supported = enic->fdir.types_mask
+ & (1 << params->input.flow_type);
flex_bytes = ((params->input.flow_ext.flexbytes[1] << 8 & 0xFF00) |
(params->input.flow_ext.flexbytes[0] & 0xFF));
@@ -120,7 +387,12 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
return -ENOTSUP;
}
- queue = params->action.rx_queue;
+ /* Get the enicpmd RQ from the DPDK Rx queue */
+ queue = enic_sop_rq(params->action.rx_queue);
+
+ if (!enic->rq[queue].in_use)
+ return -EINVAL;
+
/* See if the key is already there in the table */
pos = rte_hash_del_key(enic->fdir.hash, params);
switch (pos) {
@@ -183,22 +455,8 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params)
key->filter = *params;
key->rq_index = queue;
- fltr.type = FILTER_IPV4_5TUPLE;
- fltr.u.ipv4.src_addr = rte_be_to_cpu_32(
- params->input.flow.ip4_flow.src_ip);
- fltr.u.ipv4.dst_addr = rte_be_to_cpu_32(
- params->input.flow.ip4_flow.dst_ip);
- fltr.u.ipv4.src_port = rte_be_to_cpu_16(
- params->input.flow.udp4_flow.src_port);
- fltr.u.ipv4.dst_port = rte_be_to_cpu_16(
- params->input.flow.udp4_flow.dst_port);
-
- if (RTE_ETH_FLOW_NONFRAG_IPV4_TCP == params->input.flow_type)
- fltr.u.ipv4.protocol = PROTO_TCP;
- else
- fltr.u.ipv4.protocol = PROTO_UDP;
-
- fltr.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+ enic->fdir.copy_fltr_fn(&fltr, &params->input,
+ &enic->rte_dev->data->dev_conf.fdir_conf.mask);
if (!vnic_dev_classifier(enic->vdev, CLSF_ADD, &queue, &fltr)) {
key->fltr_id = queue;
@@ -238,6 +496,7 @@ void enic_clsf_destroy(struct enic *enic)
vnic_dev_classifier(enic->vdev, CLSF_DEL,
&key->fltr_id, NULL);
rte_free(key);
+ enic->fdir.nodes[index] = NULL;
}
}
diff --git a/src/dpdk/drivers/net/enic/enic_ethdev.c b/src/dpdk/drivers/net/enic/enic_ethdev.c
index 47b07c92..c05476b2 100644
--- a/src/dpdk/drivers/net/enic/enic_ethdev.c
+++ b/src/dpdk/drivers/net/enic/enic_ethdev.c
@@ -95,10 +95,12 @@ enicpmd_fdir_ctrl_func(struct rte_eth_dev *eth_dev,
break;
case RTE_ETH_FILTER_FLUSH:
- case RTE_ETH_FILTER_INFO:
dev_warning(enic, "unsupported operation %u", filter_op);
ret = -ENOTSUP;
break;
+ case RTE_ETH_FILTER_INFO:
+ enic_fdir_info_get(enic, (struct rte_eth_fdir_info *)arg);
+ break;
default:
dev_err(enic, "unknown operation %u", filter_op);
ret = -EINVAL;
@@ -433,6 +435,25 @@ static void enicpmd_dev_stats_reset(struct rte_eth_dev *eth_dev)
enic_dev_stats_clear(enic);
}
+
+int enicpmd_dev_get_fw_support(int port_id,
+ uint32_t *ver){
+ struct rte_eth_dev *dev;
+
+ RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -EINVAL);
+
+ dev = &rte_eth_devices[port_id];
+ *ver=0;
+
+ struct enic *enic = pmd_priv(dev);
+ enic->adv_filters;
+ if ( enic->adv_filters ==0 ) {
+ return (-1);
+ }
+ return (0);
+}
+
+
static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
struct rte_eth_dev_info *device_info)
{
@@ -459,6 +480,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
device_info->default_rxconf = (struct rte_eth_rxconf) {
.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
};
+
+ device_info->speed_capa = ETH_LINK_SPEED_40G;
}
static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
diff --git a/src/dpdk/drivers/net/enic/enic_main.c b/src/dpdk/drivers/net/enic/enic_main.c
index b4ca3710..889bc692 100644
--- a/src/dpdk/drivers/net/enic/enic_main.c
+++ b/src/dpdk/drivers/net/enic/enic_main.c
@@ -1122,6 +1122,9 @@ static int enic_dev_init(struct enic *enic)
return err;
}
+ /* Get the supported filters */
+ enic_fdir_info(enic);
+
eth_dev->data->mac_addrs = rte_zmalloc("enic_mac_addr", ETH_ALEN, 0);
if (!eth_dev->data->mac_addrs) {
dev_err(enic, "mac addr storage alloc failed, aborting.\n");
diff --git a/src/dpdk/drivers/net/enic/enic_res.c b/src/dpdk/drivers/net/enic/enic_res.c
index 84c5d336..8a230a16 100644
--- a/src/dpdk/drivers/net/enic/enic_res.c
+++ b/src/dpdk/drivers/net/enic/enic_res.c
@@ -62,6 +62,7 @@ int enic_get_vnic_config(struct enic *enic)
return err;
}
+
#define GET_CONFIG(m) \
do { \
err = vnic_dev_spec(enic->vdev, \
@@ -98,6 +99,10 @@ int enic_get_vnic_config(struct enic *enic)
enic->rte_dev->data->mtu = min_t(u16, enic->max_mtu,
max_t(u16, ENIC_MIN_MTU, c->mtu));
+ enic->adv_filters = vnic_dev_capable_adv_filters(enic->vdev);
+ dev_info(enic, "Advanced Filters %savailable\n", ((enic->adv_filters)
+ ? "" : "not "));
+
c->wq_desc_count =
min_t(u32, ENIC_MAX_WQ_DESCS,
max_t(u32, ENIC_MIN_WQ_DESCS,
diff --git a/src/dpdk/drivers/net/mlx5/mlx5.c b/src/dpdk/drivers/net/mlx5/mlx5.c
index d96a9aff..303b917b 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5.c
@@ -181,6 +181,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
}
if (priv->reta_idx != NULL)
rte_free(priv->reta_idx);
+
+ mlx5_stats_free(dev);
+
priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
@@ -366,6 +369,13 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
unsigned int mps;
int idx;
int i;
+ static int ibv_was_init=0;
+
+ if (ibv_was_init==0) {
+ ibv_fork_init();
+ ibv_was_init=1;
+ }
+
(void)pci_drv;
assert(pci_drv == &mlx5_driver.pci_drv);
@@ -511,7 +521,16 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */
priv->cqe_comp = 1; /* Enable compression by default. */
+
+
err = mlx5_args(priv, pci_dev->devargs);
+
+ /* TREX PATCH */
+ /* set for maximum performance default */
+ priv->txq_inline =128;
+ priv->txqs_inline =4;
+
+
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
@@ -751,7 +770,6 @@ rte_mlx5_pmd_init(const char *name, const char *args)
* using this PMD, which is not supported in forked processes.
*/
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
- ibv_fork_init();
rte_eal_pci_register(&mlx5_driver.pci_drv);
return 0;
}
diff --git a/src/dpdk/drivers/net/mlx5/mlx5.h b/src/dpdk/drivers/net/mlx5/mlx5.h
index 3a866098..68bad904 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5.h
+++ b/src/dpdk/drivers/net/mlx5/mlx5.h
@@ -84,6 +84,34 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016,
};
+struct mlx5_stats_priv {
+
+ struct rte_eth_stats m_shadow;
+ uint32_t n_stats; /* number of counters */
+
+ void * et_stats ;/* point to ethtool counter struct ethtool_stats*/
+
+ /* index into ethtool */
+ uint16_t inx_rx_vport_unicast_bytes;
+ uint16_t inx_rx_vport_multicast_bytes;
+ uint16_t inx_rx_vport_broadcast_bytes;
+ uint16_t inx_rx_vport_unicast_packets;
+ uint16_t inx_rx_vport_multicast_packets;
+ uint16_t inx_rx_vport_broadcast_packets;
+ uint16_t inx_tx_vport_unicast_bytes;
+ uint16_t inx_tx_vport_multicast_bytes;
+ uint16_t inx_tx_vport_broadcast_bytes;
+ uint16_t inx_tx_vport_unicast_packets;
+ uint16_t inx_tx_vport_multicast_packets;
+ uint16_t inx_tx_vport_broadcast_packets;
+ uint16_t inx_rx_wqe_err;
+ uint16_t inx_rx_crc_errors_phy;
+ uint16_t inx_rx_in_range_len_errors_phy;
+ uint16_t inx_rx_symbol_err_phy;
+ uint16_t inx_tx_errors_phy;
+};
+
+
struct priv {
struct rte_eth_dev *dev; /* Ethernet device. */
struct ibv_context *ctx; /* Verbs context. */
@@ -135,6 +163,7 @@ struct priv {
unsigned int reta_idx_n; /* RETA index size. */
struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
rte_spinlock_t lock; /* Lock for control functions. */
+ struct mlx5_stats_priv m_stats;
};
/* Local storage for secondary process data. */
@@ -243,6 +272,8 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *);
void mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *);
void mlx5_stats_reset(struct rte_eth_dev *);
+void mlx5_stats_free(struct rte_eth_dev *dev);
+
/* mlx5_vlan.c */
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h b/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h
new file mode 100644
index 00000000..9fdfff84
--- /dev/null
+++ b/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h
@@ -0,0 +1,8 @@
+#ifndef HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE
+#define HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE 1
+#endif /* HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE */
+
+#ifndef HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE
+#define HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE 1
+#endif /* HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE */
+
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_fdir.c b/src/dpdk/drivers/net/mlx5/mlx5_fdir.c
index 73eb00ec..84fb5d03 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_fdir.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_fdir.c
@@ -42,7 +42,7 @@
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-pedantic"
#endif
-#include <infiniband/verbs.h>
+#include <infiniband/verbs_exp.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
@@ -67,6 +67,10 @@ struct fdir_flow_desc {
uint16_t src_port;
uint32_t src_ip[4];
uint32_t dst_ip[4];
+ uint8_t tos;
+ uint8_t ip_id;
+ uint8_t proto;
+
uint8_t mac[6];
uint16_t vlan_tag;
enum hash_rxq_type type;
@@ -141,9 +145,13 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
+
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
+ desc->tos = fdir_filter->input.flow.ip4_flow.ttl; /* TTL is map to TOS*/
+ desc->ip_id = fdir_filter->input.flow.ip4_flow.ip_id;
+ desc->proto = fdir_filter->input.flow.ip4_flow.proto;
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
@@ -157,12 +165,17 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
rte_memcpy(desc->dst_ip,
fdir_filter->input.flow.ipv6_flow.dst_ip,
sizeof(desc->dst_ip));
+ desc->tos = (uint8_t)fdir_filter->input.flow.ipv6_flow.hop_limits; /* TTL is map to TOS*/
+ desc->ip_id = (uint8_t)fdir_filter->input.flow.ipv6_flow.flow_label;
+ desc->proto = fdir_filter->input.flow.ipv6_flow.proto;
+
break;
default:
break;
}
}
+
/**
* Check if two flow descriptors overlap according to configured mask.
*
@@ -197,6 +210,12 @@ priv_fdir_overlap(const struct priv *priv,
((desc1->dst_port & mask->dst_port_mask) !=
(desc2->dst_port & mask->dst_port_mask)))
return 0;
+
+ if ( (desc1->tos != desc2->tos) ||
+ (desc1->ip_id != desc2->ip_id) ||
+ (desc1->proto != desc2->proto) )
+ return 0;
+
switch (desc1->type) {
case HASH_RXQ_IPV4:
case HASH_RXQ_UDPV4:
@@ -204,8 +223,9 @@ priv_fdir_overlap(const struct priv *priv,
if (((desc1->src_ip[0] & mask->ipv4_mask.src_ip) !=
(desc2->src_ip[0] & mask->ipv4_mask.src_ip)) ||
((desc1->dst_ip[0] & mask->ipv4_mask.dst_ip) !=
- (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
+ (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
return 0;
+
break;
case HASH_RXQ_IPV6:
case HASH_RXQ_UDPV6:
@@ -251,8 +271,8 @@ priv_fdir_flow_add(struct priv *priv,
struct ibv_exp_flow_attr *attr = &data->attr;
uintptr_t spec_offset = (uintptr_t)&data->spec;
struct ibv_exp_flow_spec_eth *spec_eth;
- struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
- struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
+ struct ibv_exp_flow_spec_ipv4_ext *spec_ipv4;
+ struct ibv_exp_flow_spec_ipv6_ext *spec_ipv6;
struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
struct mlx5_fdir_filter *iter_fdir_filter;
unsigned int i;
@@ -264,8 +284,10 @@ priv_fdir_flow_add(struct priv *priv,
(iter_fdir_filter->flow != NULL) &&
(priv_fdir_overlap(priv,
&mlx5_fdir_filter->desc,
- &iter_fdir_filter->desc)))
- return EEXIST;
+ &iter_fdir_filter->desc))){
+ ERROR("overlap rules, please check your rules");
+ return EEXIST;
+ }
/*
* No padding must be inserted by the compiler between attr and spec.
@@ -305,10 +327,10 @@ priv_fdir_flow_add(struct priv *priv,
spec_offset += spec_eth->size;
/* Set IP spec */
- spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset;
+ spec_ipv4 = (struct ibv_exp_flow_spec_ipv4_ext *)spec_offset;
/* The second specification must be IP. */
- assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4);
+ assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4_EXT);
assert(spec_ipv4->size == sizeof(*spec_ipv4));
spec_ipv4->val.src_ip =
@@ -318,6 +340,19 @@ priv_fdir_flow_add(struct priv *priv,
spec_ipv4->mask.src_ip = mask->ipv4_mask.src_ip;
spec_ipv4->mask.dst_ip = mask->ipv4_mask.dst_ip;
+ /* PROTO */
+ spec_ipv4->val.proto = desc->proto & mask->ipv4_mask.proto;
+ spec_ipv4->mask.proto = mask->ipv4_mask.proto;
+
+ /* TOS */
+ if (desc->ip_id ==1 ){
+ spec_ipv4->mask.tos = 0x1;
+ }else{
+ spec_ipv4->mask.tos = 0x0;
+ }
+ spec_ipv4->val.tos =
+ desc->tos & spec_ipv4->mask.tos;// & mask->ipv4_mask.tos;
+
/* Update priority */
attr->priority = 1;
@@ -332,10 +367,10 @@ priv_fdir_flow_add(struct priv *priv,
spec_offset += spec_eth->size;
/* Set IP spec */
- spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset;
+ spec_ipv6 = (struct ibv_exp_flow_spec_ipv6_ext *)spec_offset;
/* The second specification must be IP. */
- assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6);
+ assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6_EXT);
assert(spec_ipv6->size == sizeof(*spec_ipv6));
for (i = 0; i != RTE_DIM(desc->src_ip); ++i) {
@@ -351,6 +386,18 @@ priv_fdir_flow_add(struct priv *priv,
mask->ipv6_mask.dst_ip,
sizeof(spec_ipv6->mask.dst_ip));
+ spec_ipv6->val.next_hdr = desc->proto & mask->ipv6_mask.proto;
+ spec_ipv6->mask.next_hdr = mask->ipv6_mask.proto;
+
+ /* TOS */
+ if (desc->ip_id ==1 ){
+ spec_ipv6->mask.traffic_class = (0x1);
+ }else{
+ spec_ipv6->mask.traffic_class = 0x0;
+ }
+ spec_ipv6->val.traffic_class =
+ (desc->tos) & spec_ipv6->mask.traffic_class;// & mask->ipv4_mask.tos;
+
/* Update priority */
attr->priority = 1;
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rxq.c b/src/dpdk/drivers/net/mlx5/mlx5_rxq.c
index 29c137cd..6be01d39 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_rxq.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_rxq.c
@@ -102,7 +102,7 @@ const struct hash_rxq_init hash_rxq_init[] = {
ETH_RSS_FRAG_IPV4),
.flow_priority = 1,
.flow_spec.ipv4 = {
- .type = IBV_EXP_FLOW_SPEC_IPV4,
+ .type = IBV_EXP_FLOW_SPEC_IPV4_EXT,
.size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
@@ -140,7 +140,7 @@ const struct hash_rxq_init hash_rxq_init[] = {
ETH_RSS_FRAG_IPV6),
.flow_priority = 1,
.flow_spec.ipv6 = {
- .type = IBV_EXP_FLOW_SPEC_IPV6,
+ .type = IBV_EXP_FLOW_SPEC_IPV6_EXT,
.size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h
index f6e2cbac..d87dd19b 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h
+++ b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h
@@ -173,8 +173,8 @@ struct hash_rxq_init {
uint16_t size;
} hdr;
struct ibv_exp_flow_spec_tcp_udp tcp_udp;
- struct ibv_exp_flow_spec_ipv4 ipv4;
- struct ibv_exp_flow_spec_ipv6 ipv6;
+ struct ibv_exp_flow_spec_ipv4_ext ipv4;
+ struct ibv_exp_flow_spec_ipv6_ext ipv6;
struct ibv_exp_flow_spec_eth eth;
} flow_spec; /* Flow specification template. */
const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_stats.c b/src/dpdk/drivers/net/mlx5/mlx5_stats.c
index 2d3cb519..788ef939 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_stats.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_stats.c
@@ -44,6 +44,10 @@
#include "mlx5_rxtx.h"
#include "mlx5_defs.h"
+
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+
/**
* DPDK callback to get device statistics.
*
@@ -52,60 +56,241 @@
* @param[out] stats
* Stats structure output buffer.
*/
+
+
+static void
+mlx5_stats_read_hw(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats){
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+ unsigned int i;
+
+ struct rte_eth_stats tmp = {0};
+ struct ethtool_stats *et_stats = (struct ethtool_stats *)lps->et_stats;
+ struct ifreq ifr;
+
+ et_stats->cmd = ETHTOOL_GSTATS;
+ et_stats->n_stats = lps->n_stats;
+
+ ifr.ifr_data = (caddr_t) et_stats;
+
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get statistic values for mlnx5 ");
+ }
+
+ tmp.ibytes += et_stats->data[lps->inx_rx_vport_unicast_bytes] +
+ et_stats->data[lps->inx_rx_vport_multicast_bytes] +
+ et_stats->data[lps->inx_rx_vport_broadcast_bytes];
+
+ tmp.ipackets += et_stats->data[lps->inx_rx_vport_unicast_packets] +
+ et_stats->data[lps->inx_rx_vport_multicast_packets] +
+ et_stats->data[lps->inx_rx_vport_broadcast_packets];
+
+ tmp.ierrors += (et_stats->data[lps->inx_rx_wqe_err] +
+ et_stats->data[lps->inx_rx_crc_errors_phy] +
+ et_stats->data[lps->inx_rx_in_range_len_errors_phy] +
+ et_stats->data[lps->inx_rx_symbol_err_phy]);
+
+ tmp.obytes += et_stats->data[lps->inx_tx_vport_unicast_bytes] +
+ et_stats->data[lps->inx_tx_vport_multicast_bytes] +
+ et_stats->data[lps->inx_tx_vport_broadcast_bytes];
+
+ tmp.opackets += (et_stats->data[lps->inx_tx_vport_unicast_packets] +
+ et_stats->data[lps->inx_tx_vport_multicast_packets] +
+ et_stats->data[lps->inx_tx_vport_broadcast_packets]);
+
+ tmp.oerrors += et_stats->data[lps->inx_tx_errors_phy];
+
+ /* SW Rx */
+ for (i = 0; (i != priv->rxqs_n); ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+ if (rxq) {
+ tmp.imissed += rxq->stats.idropped;
+ tmp.rx_nombuf += rxq->stats.rx_nombuf;
+ }
+ }
+
+ /*SW Tx */
+ for (i = 0; (i != priv->txqs_n); ++i) {
+ struct txq *txq = (*priv->txqs)[i];
+ if (txq) {
+ tmp.oerrors += txq->stats.odropped;
+ }
+ }
+
+ *stats =tmp;
+}
+
+void
+mlx5_stats_free(struct rte_eth_dev *dev)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+
+ if ( lps->et_stats ){
+ free(lps->et_stats);
+ lps->et_stats=0;
+ }
+}
+
+
+static void
+mlx5_stats_init(struct rte_eth_dev *dev)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+ struct rte_eth_stats tmp = {0};
+
+ unsigned int i;
+ unsigned int idx;
+ char ifname[IF_NAMESIZE];
+ struct ifreq ifr;
+
+ struct ethtool_stats *et_stats = NULL;
+ struct ethtool_drvinfo drvinfo;
+ struct ethtool_gstrings *strings = NULL;
+ unsigned int n_stats, sz_str, sz_stats;
+
+ if (priv_get_ifname(priv, &ifname)) {
+ WARN("unable to get interface name");
+ return;
+ }
+ /* How many statistics are available ? */
+ drvinfo.cmd = ETHTOOL_GDRVINFO;
+ ifr.ifr_data = (caddr_t) &drvinfo;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get driver info for %s", ifname);
+ return;
+ }
+
+ n_stats = drvinfo.n_stats;
+ if (n_stats < 1) {
+ WARN("no statistics available for %s", ifname);
+ return;
+ }
+ lps->n_stats = n_stats;
+
+ /* Allocate memory to grab stat names and values */
+ sz_str = n_stats * ETH_GSTRING_LEN;
+ sz_stats = n_stats * sizeof(uint64_t);
+ strings = calloc(1, sz_str + sizeof(struct ethtool_gstrings));
+ if (!strings) {
+ WARN("unable to allocate memory for strings");
+ return;
+ }
+
+ et_stats = calloc(1, sz_stats + sizeof(struct ethtool_stats));
+ if (!et_stats) {
+ free(strings);
+ WARN("unable to allocate memory for stats");
+ }
+
+ strings->cmd = ETHTOOL_GSTRINGS;
+ strings->string_set = ETH_SS_STATS;
+ strings->len = n_stats;
+ ifr.ifr_data = (caddr_t) strings;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get statistic names for %s", ifname);
+ free(strings);
+ free(et_stats);
+ return;
+ }
+
+ for (i = 0; (i != n_stats); ++i) {
+
+ const char * curr_string = (const char*) &(strings->data[i * ETH_GSTRING_LEN]);
+
+ if (!strcmp("rx_vport_unicast_bytes", curr_string)) lps->inx_rx_vport_unicast_bytes = i;
+ if (!strcmp("rx_vport_multicast_bytes", curr_string)) lps->inx_rx_vport_multicast_bytes = i;
+ if (!strcmp("rx_vport_broadcast_bytes", curr_string)) lps->inx_rx_vport_broadcast_bytes = i;
+
+ if (!strcmp("rx_vport_unicast_packets", curr_string)) lps->inx_rx_vport_unicast_packets = i;
+ if (!strcmp("rx_vport_multicast_packets", curr_string)) lps->inx_rx_vport_multicast_packets = i;
+ if (!strcmp("rx_vport_broadcast_packets", curr_string)) lps->inx_rx_vport_broadcast_packets = i;
+
+ if (!strcmp("tx_vport_unicast_bytes", curr_string)) lps->inx_tx_vport_unicast_bytes = i;
+ if (!strcmp("tx_vport_multicast_bytes", curr_string)) lps->inx_tx_vport_multicast_bytes = i;
+ if (!strcmp("tx_vport_broadcast_bytes", curr_string)) lps->inx_tx_vport_broadcast_bytes = i;
+
+ if (!strcmp("tx_vport_unicast_packets", curr_string)) lps->inx_tx_vport_unicast_packets = i;
+ if (!strcmp("tx_vport_multicast_packets", curr_string)) lps->inx_tx_vport_multicast_packets = i;
+ if (!strcmp("tx_vport_broadcast_packets", curr_string)) lps->inx_tx_vport_broadcast_packets = i;
+
+ if (!strcmp("rx_wqe_err", curr_string)) lps->inx_rx_wqe_err = i;
+ if (!strcmp("rx_crc_errors_phy", curr_string)) lps->inx_rx_crc_errors_phy = i;
+ if (!strcmp("rx_in_range_len_errors_phy", curr_string)) lps->inx_rx_in_range_len_errors_phy = i;
+ if (!strcmp("rx_symbol_err_phy", curr_string)) lps->inx_rx_symbol_err_phy = i;
+
+ if (!strcmp("tx_errors_phy", curr_string)) lps->inx_tx_errors_phy = i;
+ }
+
+ lps->et_stats =(void *)et_stats;
+
+ if (!lps->inx_rx_vport_unicast_bytes ||
+ !lps->inx_rx_vport_multicast_bytes ||
+ !lps->inx_rx_vport_broadcast_bytes ||
+ !lps->inx_rx_vport_unicast_packets ||
+ !lps->inx_rx_vport_multicast_packets ||
+ !lps->inx_rx_vport_broadcast_packets ||
+ !lps->inx_tx_vport_unicast_bytes ||
+ !lps->inx_tx_vport_multicast_bytes ||
+ !lps->inx_tx_vport_broadcast_bytes ||
+ !lps->inx_tx_vport_unicast_packets ||
+ !lps->inx_tx_vport_multicast_packets ||
+ !lps->inx_tx_vport_broadcast_packets ||
+ !lps->inx_rx_wqe_err ||
+ !lps->inx_rx_crc_errors_phy ||
+ !lps->inx_rx_in_range_len_errors_phy) {
+ WARN("Counters are not recognized %s", ifname);
+ return;
+ }
+
+ mlx5_stats_read_hw(dev,&tmp);
+
+ /* copy yo shadow at first time */
+ lps->m_shadow = tmp;
+
+ free(strings);
+}
+
+
+static void
+mlx5_stats_diff(struct rte_eth_stats *a,
+ struct rte_eth_stats *b,
+ struct rte_eth_stats *c){
+ #define MLX5_DIFF(cnt) { a->cnt = (b->cnt - c->cnt); }
+
+ MLX5_DIFF(ipackets);
+ MLX5_DIFF(opackets);
+ MLX5_DIFF(ibytes);
+ MLX5_DIFF(obytes);
+ MLX5_DIFF(imissed);
+
+ MLX5_DIFF(ierrors);
+ MLX5_DIFF(oerrors);
+ MLX5_DIFF(rx_nombuf);
+}
+
+
void
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct priv *priv = mlx5_get_priv(dev);
- struct rte_eth_stats tmp = {0};
- unsigned int i;
- unsigned int idx;
-
- priv_lock(priv);
- /* Add software counters. */
- for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
-
- if (rxq == NULL)
- continue;
- idx = rxq->stats.idx;
- if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.q_ipackets[idx] += rxq->stats.ipackets;
- tmp.q_ibytes[idx] += rxq->stats.ibytes;
-#endif
- tmp.q_errors[idx] += (rxq->stats.idropped +
- rxq->stats.rx_nombuf);
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.ipackets += rxq->stats.ipackets;
- tmp.ibytes += rxq->stats.ibytes;
-#endif
- tmp.ierrors += rxq->stats.idropped;
- tmp.rx_nombuf += rxq->stats.rx_nombuf;
- }
- for (i = 0; (i != priv->txqs_n); ++i) {
- struct txq *txq = (*priv->txqs)[i];
-
- if (txq == NULL)
- continue;
- idx = txq->stats.idx;
- if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.q_opackets[idx] += txq->stats.opackets;
- tmp.q_obytes[idx] += txq->stats.obytes;
-#endif
- tmp.q_errors[idx] += txq->stats.odropped;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.opackets += txq->stats.opackets;
- tmp.obytes += txq->stats.obytes;
-#endif
- tmp.oerrors += txq->stats.odropped;
- }
-#ifndef MLX5_PMD_SOFT_COUNTERS
- /* FIXME: retrieve and add hardware counters. */
-#endif
- *stats = tmp;
+
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+ priv_lock(priv);
+
+ if (lps->et_stats == NULL) {
+ mlx5_stats_init(dev);
+ }
+ struct rte_eth_stats tmp = {0};
+
+ mlx5_stats_read_hw(dev,&tmp);
+
+ mlx5_stats_diff(stats,
+ &tmp,
+ &lps->m_shadow);
+
priv_unlock(priv);
}
@@ -119,26 +304,20 @@ void
mlx5_stats_reset(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- unsigned int i;
- unsigned int idx;
-
- priv_lock(priv);
- for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
- continue;
- idx = (*priv->rxqs)[i]->stats.idx;
- (*priv->rxqs)[i]->stats =
- (struct mlx5_rxq_stats){ .idx = idx };
- }
- for (i = 0; (i != priv->txqs_n); ++i) {
- if ((*priv->txqs)[i] == NULL)
- continue;
- idx = (*priv->txqs)[i]->stats.idx;
- (*priv->txqs)[i]->stats =
- (struct mlx5_txq_stats){ .idx = idx };
- }
-#ifndef MLX5_PMD_SOFT_COUNTERS
- /* FIXME: reset hardware counters. */
-#endif
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+
+ priv_lock(priv);
+
+ if (lps->et_stats == NULL) {
+ mlx5_stats_init(dev);
+ }
+ struct rte_eth_stats tmp = {0};
+
+
+ mlx5_stats_read_hw(dev,&tmp);
+
+ /* copy to shadow */
+ lps->m_shadow = tmp;
+
priv_unlock(priv);
}
diff --git a/src/flow_stat_parser.cpp b/src/flow_stat_parser.cpp
index 7335a6a2..4a6722e6 100644
--- a/src/flow_stat_parser.cpp
+++ b/src/flow_stat_parser.cpp
@@ -27,6 +27,7 @@
#include "common/Network/Packet/TcpHeader.h"
#include "pkt_gen.h"
#include "flow_stat_parser.h"
+#include "bp_sim.h"
void CFlowStatParser::reset() {
m_start = 0;
@@ -120,12 +121,15 @@ int CFlowStatParser::get_ip_id(uint32_t &ip_id) {
int CFlowStatParser::set_ip_id(uint32_t new_id) {
if (m_ipv4) {
// Updating checksum, not recalculating, so if someone put bad checksum on purpose, it will stay bad
+ m_ipv4->updateCheckSum(PKT_NTOHS(m_ipv4->getFirstWord()), PKT_NTOHS(m_ipv4->getFirstWord() |TOS_TTL_RESERVE_DUPLICATE));
m_ipv4->updateCheckSum(PKT_NTOHS(m_ipv4->getId()), PKT_NTOHS(new_id));
m_ipv4->setId(new_id);
+ m_ipv4->setTOS(m_ipv4->getTOS()|TOS_TTL_RESERVE_DUPLICATE);
return 0;
}
if (m_ipv6) {
+ m_ipv6->setTrafficClass(m_ipv6->getTrafficClass()|TOS_TTL_RESERVE_DUPLICATE);
m_ipv6->setFlowLabel(new_id);
return 0;
}
diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp
index fed7a348..e799a5bd 100644
--- a/src/main_dpdk.cpp
+++ b/src/main_dpdk.cpp
@@ -168,6 +168,14 @@ public:
virtual CFlowStatParser *get_flow_stat_parser();
virtual int set_rcv_all(CPhyEthIF * _if, bool set_on)=0;
virtual TRexPortAttr * create_port_attr(uint8_t port_id) = 0;
+
+ /* Does this NIC type support automatic packet dropping in case of a link down?
+ in case it is supported the packets will be dropped, else there would be a back pressure to tx queues
+ this interface is used as a workaround to let TRex work without link in stateless mode, driver that
+ does not support that will be failed at init time because it will cause watchdog due to watchdog hang */
+ virtual bool drop_packets_incase_of_linkdown() {
+ return (false);
+ }
};
@@ -359,10 +367,14 @@ public:
private:
virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
- , uint16_t ip_id, uint16_t l4_proto, int queue, uint16_t stat_idx);
+ , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx);
virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
+ virtual bool drop_packets_incase_of_linkdown() {
+ return (true);
+ }
+
private:
uint8_t m_if_per_card;
};
@@ -370,6 +382,7 @@ private:
class CTRexExtendedDriverBaseVIC : public CTRexExtendedDriverBase40G {
public:
CTRexExtendedDriverBaseVIC(){
+ m_if_per_card=2;
}
TRexPortAttr * create_port_attr(uint8_t port_id) {
@@ -381,15 +394,113 @@ public:
}
virtual bool is_hardware_filter_is_supported(){
- return (false);
+ return (true);
}
- virtual int verify_fw_ver(int i) {return 0;}
+ virtual int verify_fw_ver(int i);
virtual void update_configuration(port_cfg_t * cfg);
+
+ virtual int configure_rx_filter_rules(CPhyEthIF * _if);
+ virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
+ , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
+
+
+ virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
+ virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
+ virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
+ virtual int get_rx_stat_capabilities() {
+ return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
+ }
+ virtual bool hw_rx_stat_supported(){return false;}
+ virtual CFlowStatParser *get_flow_stat_parser();
+ virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
+ virtual int set_rcv_all(CPhyEthIF * _if, bool set_on);
+
+private:
+
+ virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
+ , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx);
+ virtual int add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type);
+ virtual int configure_rx_filter_rules_statefull(CPhyEthIF * _if);
+
+private:
+ uint8_t m_if_per_card;
};
+class CTRexExtendedDriverBaseMlnx5G : public CTRexExtendedDriverBase10G {
+public:
+
+ CTRexExtendedDriverBaseMlnx5G(){
+ // Since we support only 128 counters per if, it is OK to configure here 4 statically.
+ // If we want to support more counters in case of card having less interfaces, we
+ // Will have to identify the number of interfaces dynamically.
+ m_if_per_card = 2;
+ }
+
+ TRexPortAttr * create_port_attr(uint8_t port_id) {
+ // disabling flow control on 40G using DPDK API causes the interface to malfunction
+ return new DpdkTRexPortAttr(port_id, false, false);
+ }
+
+
+ static CTRexExtendedDriverBase * create(){
+ return ( new CTRexExtendedDriverBaseMlnx5G() );
+ }
+
+ virtual void update_global_config_fdir(port_cfg_t * cfg){
+ }
+
+ virtual void update_configuration(port_cfg_t * cfg);
+
+ virtual int configure_rx_filter_rules(CPhyEthIF * _if);
+ virtual int add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
+ , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id);
+
+ virtual bool is_hardware_filter_is_supported(){
+ return (true);
+ }
+
+ virtual bool is_hardware_support_drop_queue(){
+ return(true);
+ }
+ virtual void get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats);
+ virtual void clear_extended_stats(CPhyEthIF * _if);
+ virtual void reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len);
+ virtual int get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts, uint32_t *bytes, uint32_t *prev_bytes, int min, int max);
+ virtual int dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd);
+ virtual int get_stat_counters_num() {return MAX_FLOW_STATS;}
+ virtual int get_rx_stat_capabilities() {
+ return TrexPlatformApi::IF_STAT_IPV4_ID | TrexPlatformApi::IF_STAT_PAYLOAD;
+ }
+ virtual int wait_for_stable_link();
+ // disabling flow control on 40G using DPDK API causes the interface to malfunction
+ virtual bool flow_control_disable_supported(){return false;}
+ virtual bool hw_rx_stat_supported(){return false;}
+ virtual CFlowStatParser *get_flow_stat_parser();
+ virtual int set_rcv_all(CPhyEthIF * _if, bool set_on){
+ /* TBD need to support that */
+ return (-1);
+ }
+
+
+private:
+ virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id,
+ uint16_t type, uint8_t ttl,
+ uint16_t ip_id,
+ uint8_t l4_proto,
+ int queue,
+ uint16_t stat_idx);
+ virtual int configure_rx_filter_rules_statfull(CPhyEthIF * _if);
+
+private:
+ uint8_t m_if_per_card;
+};
+
+
+
+
typedef CTRexExtendedDriverBase * (*create_object_t) (void);
@@ -441,6 +552,8 @@ private:
register_driver(std::string("rte_igb_pmd"),CTRexExtendedDriverBase1G::create);
register_driver(std::string("rte_i40e_pmd"),CTRexExtendedDriverBase40G::create);
register_driver(std::string("rte_enic_pmd"),CTRexExtendedDriverBaseVIC::create);
+ register_driver(std::string("librte_pmd_mlx5"),CTRexExtendedDriverBaseMlnx5G::create);
+
/* virtual devices */
register_driver(std::string("rte_em_pmd"),CTRexExtendedDriverBase1GVm::create);
@@ -734,6 +847,7 @@ static int parse_options(int argc, char *argv[], CParserOption* po, bool first_t
bool latency_was_set=false;
(void)latency_was_set;
char ** rgpszArg = NULL;
+ bool opt_vlan_was_set = false;
int a=0;
int node_dump=0;
@@ -824,9 +938,7 @@ static int parse_options(int argc, char *argv[], CParserOption* po, bool first_t
po->preview.set_disable_flow_control_setting(true);
break;
case OPT_VLAN:
- if ( get_is_stateless() ) {
- po->preview.set_vlan_mode_enable(true);
- }
+ opt_vlan_was_set = true;
break;
case OPT_LIMT_NUM_OF_PORTS :
po->m_expected_portd =atoi(args.OptionArg());
@@ -860,7 +972,7 @@ static int parse_options(int argc, char *argv[], CParserOption* po, bool first_t
}
}
if (po->m_run_mode != CParserOption::RUN_MODE_INVALID) {
- parse_err("Please specify single run mode");
+ parse_err("Please specify single run mode (-i for stateless, or -f <file> for stateful");
}
po->m_run_mode = CParserOption::RUN_MODE_DUMP_INFO;
break;
@@ -957,12 +1069,12 @@ static int parse_options(int argc, char *argv[], CParserOption* po, bool first_t
if ((po->m_run_mode == CParserOption::RUN_MODE_INVALID) ) {
- parse_err("Please provide single run mode (e.g. batch or interactive)");
+ parse_err("Please provide single run mode. -f <file> for stateful or -i for stateless (interactive)");
}
if (CGlobalInfo::is_learn_mode() && po->preview.get_ipv6_mode_enable()) {
- parse_err("--learn mode is not supported with --ipv6, beacuse there is not such thing NAT66 ( ipv6-ipv6) \n" \
- "if you think it is important,open a defect \n");
+ parse_err("--learn mode is not supported with --ipv6, beacuse there is no such thing as NAT66 (ipv6 to ipv6 translation) \n" \
+ "If you think it is important, please open a defect or write to TRex mailing list\n");
}
if (po->preview.get_is_rx_check_enable() || po->is_latency_enabled() || CGlobalInfo::is_learn_mode()
@@ -979,7 +1091,7 @@ static int parse_options(int argc, char *argv[], CParserOption* po, bool first_t
uint32_t cores=po->preview.getCores();
if ( cores > ((BP_MAX_CORES)/2-1) ) {
- printf(" ERROR maximum supported cores are : %d \n",((BP_MAX_CORES)/2-1));
+ fprintf(stderr, " Error: maximum supported core number is: %d \n",((BP_MAX_CORES)/2-1));
return -1;
}
@@ -988,7 +1100,7 @@ static int parse_options(int argc, char *argv[], CParserOption* po, bool first_t
/* only first time read the configuration file */
if ( po->platform_cfg_file.length() >0 ) {
if ( node_dump ){
- printf("Loading platform configuration file from %s \n",po->platform_cfg_file.c_str());
+ printf("Using configuration file %s \n",po->platform_cfg_file.c_str());
}
global_platform_cfg_info.load_from_yaml_file(po->platform_cfg_file);
if ( node_dump ){
@@ -996,7 +1108,9 @@ static int parse_options(int argc, char *argv[], CParserOption* po, bool first_t
}
}else{
if ( utl_is_file_exists("/etc/trex_cfg.yaml") ){
- printf("found configuration file at /etc/trex_cfg.yaml \n");
+ if ( node_dump ){
+ printf("Using configuration file /etc/trex_cfg.yaml \n");
+ }
global_platform_cfg_info.load_from_yaml_file("/etc/trex_cfg.yaml");
if ( node_dump ){
global_platform_cfg_info.Dump(stdout);
@@ -1006,20 +1120,23 @@ static int parse_options(int argc, char *argv[], CParserOption* po, bool first_t
}
if ( get_is_stateless() ) {
+ if ( opt_vlan_was_set ) {
+ po->preview.set_vlan_mode_enable(true);
+ }
if ( po->m_duration ) {
- parse_err("Duration is not supported with interactive mode ");
+ parse_err("Duration is not supported with interactive (stateless) mode ");
}
if ( po->preview.get_is_rx_check_enable() ) {
- parse_err("Rx check is not supported with interactive mode ");
+ parse_err("Rx check is not supported with interactive (stateless) mode ");
}
if ( (po->is_latency_enabled()) || (po->preview.getOnlyLatency()) ){
- parse_err("Latency check is not supported with interactive mode ");
+ parse_err("Latency check is not supported with interactive (stateless) mode ");
}
if ( po->preview.getSingleCore() ){
- parse_err("Single core is not supported with interactive mode ");
+ parse_err("Single core is not supported with interactive (stateless) mode ");
}
}
@@ -3434,9 +3551,15 @@ int CGlobalTRex::ixgbe_start(void){
get_ex_drv()->wait_for_stable_link();
if ( !is_all_links_are_up(true) /*&& !get_is_stateless()*/ ){ // disable start with link down for now
- dump_links_status(stdout);
- rte_exit(EXIT_FAILURE, " "
- " one of the link is down \n");
+
+ /* temporary solution for trex-192 issue, solve the case for X710/XL710, will work for both Statless and Stateful */
+ if ( get_ex_drv()->drop_packets_incase_of_linkdown() ){
+ printf(" WARNING : there is no link on one of the ports, driver support auto drop in case of link down - continue\n");
+ }else{
+ dump_links_status(stdout);
+ rte_exit(EXIT_FAILURE, " "
+ " one of the link is down \n");
+ }
}
} else {
get_ex_drv()->wait_after_link_up();
@@ -5944,12 +6067,6 @@ void CTRexExtendedDriverBase40G::clear_extended_stats(CPhyEthIF * _if){
rte_eth_stats_reset(_if->get_port_id());
}
-void CTRexExtendedDriverBaseVIC::update_configuration(port_cfg_t * cfg){
- cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
- cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
- cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
- cfg->m_port_conf.rxmode.max_rx_pkt_len =9*1000-10;
-}
void CTRexExtendedDriverBase40G::update_configuration(port_cfg_t * cfg){
cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
@@ -5961,7 +6078,7 @@ void CTRexExtendedDriverBase40G::update_configuration(port_cfg_t * cfg){
// What is the type of the rule the respective hw_id counter counts.
struct fdir_hw_id_params_t {
uint16_t rule_type;
- uint16_t l4_proto;
+ uint8_t l4_proto;
};
static struct fdir_hw_id_params_t fdir_hw_id_rule_params[512];
@@ -5970,7 +6087,7 @@ static struct fdir_hw_id_params_t fdir_hw_id_rule_params[512];
// ttl is used in statefull mode, and ip_id in stateless. We configure the driver registers so that only one of them applies.
// So, the rule will apply if packet has either the correct ttl or IP ID, depending if we are in statfull or stateless.
void CTRexExtendedDriverBase40G::add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
- , uint16_t ip_id, uint16_t l4_proto, int queue, uint16_t stat_idx) {
+ , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx) {
int ret=rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
static int filter_soft_id = 0;
@@ -6308,6 +6425,405 @@ int CTRexExtendedDriverBase40G::set_rcv_all(CPhyEthIF * _if, bool set_on) {
}
/////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+/* MLX5 */
+
+void CTRexExtendedDriverBaseMlnx5G::clear_extended_stats(CPhyEthIF * _if){
+ rte_eth_stats_reset(_if->get_port_id());
+}
+
+void CTRexExtendedDriverBaseMlnx5G::update_configuration(port_cfg_t * cfg){
+ cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
+ cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
+ cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
+ cfg->update_global_config_fdir_40g();
+ /* update mask */
+ cfg->m_port_conf.fdir_conf.mask.ipv4_mask.proto=0xff;
+ cfg->m_port_conf.fdir_conf.mask.ipv4_mask.tos=0x01;
+ cfg->m_port_conf.fdir_conf.mask.ipv6_mask.proto=0xff;
+ cfg->m_port_conf.fdir_conf.mask.ipv6_mask.tc=0x01;
+
+}
+
+void CTRexExtendedDriverBaseMlnx5G::add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type,
+ uint8_t ttl,
+ uint16_t ip_id,
+ uint8_t l4_proto,
+ int queue, uint16_t stat_idx) {
+ /* Mellanox card does not have TTL support,
+ so we will replace it in low level with TOS */
+
+ int ret=rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
+ static int filter_soft_id = 0;
+
+ if ( ret != 0 ){
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_filter_supported "
+ "err=%d, port=%u \n",
+ ret, port_id);
+ }
+
+ struct rte_eth_fdir_filter filter;
+
+ memset(&filter,0,sizeof(struct rte_eth_fdir_filter));
+
+#if 0
+ printf("40g::%s rules: port:%d type:%d ttl:%d ip_id:%x l4:%d q:%d hw index:%d\n"
+ , (op == RTE_ETH_FILTER_ADD) ? "add" : "del"
+ , port_id, type, ttl, ip_id, l4_proto, queue, stat_idx);
+#endif
+
+ filter.action.rx_queue = queue;
+ filter.action.behavior =RTE_ETH_FDIR_ACCEPT;
+ filter.action.report_status =RTE_ETH_FDIR_NO_REPORT_STATUS;
+ filter.action.stat_count_index = stat_idx;
+ filter.soft_id = filter_soft_id++;
+ filter.input.flow_type = type;
+
+ if (op == RTE_ETH_FILTER_ADD) {
+ fdir_hw_id_rule_params[stat_idx].rule_type = type;
+ fdir_hw_id_rule_params[stat_idx].l4_proto = l4_proto;
+ }
+
+ switch (type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ filter.input.flow.ip4_flow.ttl=ttl;
+ filter.input.flow.ip4_flow.ip_id = ip_id;
+ if (l4_proto != 0)
+ filter.input.flow.ip4_flow.proto = l4_proto;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ filter.input.flow.ipv6_flow.hop_limits=ttl;
+ filter.input.flow.ipv6_flow.flow_label = ip_id;
+ filter.input.flow.ipv6_flow.proto = l4_proto;
+ break;
+ }
+
+ ret = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, op, (void*)&filter);
+ if ( ret != 0 ) {
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_filter_ctrl: err=%d, port=%u\n",
+ ret, port_id);
+ }
+}
+
+int CTRexExtendedDriverBaseMlnx5G::add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
+ , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id) {
+
+ return 0;
+}
+
+
+int CTRexExtendedDriverBaseMlnx5G::configure_rx_filter_rules_statfull(CPhyEthIF * _if) {
+ uint32_t port_id = _if->get_port_id();
+ /* TTL==TOS */
+
+ /*PID=1 ==> MASK TOS=0x1/0x1*/
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, 0x1, 1, 17, MAIN_DPDK_RX_Q, 0); /*TCP/UDP */
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_TCP, 0x1, 1, 6, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, 0x1, 1, 132, MAIN_DPDK_RX_Q, 0); /*SCTP*/
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, 0x1, 1, 1, MAIN_DPDK_RX_Q, 0); /*ICMP*/
+
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_UDP, 0x1, 1, 17, MAIN_DPDK_RX_Q, 0); /*TCP/UDP */
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_TCP, 0x1, 1, 6, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, 0x1, 1, 132, MAIN_DPDK_RX_Q, 0); /*SCTP*/
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, 0x1, 1, 1, MAIN_DPDK_RX_Q, 0); /*ICMP*/
+ return 0;
+}
+
+int CTRexExtendedDriverBaseMlnx5G::configure_rx_filter_rules(CPhyEthIF * _if) {
+ if (get_is_stateless()) {
+ return configure_rx_filter_rules_statfull(_if);
+ } else {
+ return configure_rx_filter_rules_statfull(_if);
+ }
+}
+
+void CTRexExtendedDriverBaseMlnx5G::reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len) {
+ //uint32_t port_id = _if->get_port_id();
+ //uint32_t rule_id = (port_id % m_if_per_card) * MAX_FLOW_STATS + min;
+
+ // Since flow dir counters are not wrapped around as promised in the data sheet, but rather get stuck at 0xffffffff
+ // we reset the HW value
+ //rte_eth_fdir_stats_reset(port_id, NULL, rule_id, len);
+
+ for (int i =0; i < len; i++) {
+ stats[i] = 0;
+ }
+}
+
+int CTRexExtendedDriverBaseMlnx5G::get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts
+ ,uint32_t *bytes, uint32_t *prev_bytes, int min, int max) {
+ /* not supported yet */
+ return 0;
+}
+
+int CTRexExtendedDriverBaseMlnx5G::dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd)
+{
+ uint32_t port_id = _if->get_port_id();
+ struct rte_eth_fdir_stats stat;
+ int ret;
+
+ ret = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, RTE_ETH_FILTER_STATS, (void*)&stat);
+ if (ret == 0) {
+ if (fd)
+ fprintf(fd, "Num filters on guarant poll:%d, best effort poll:%d\n", stat.guarant_cnt, stat.best_cnt);
+ return (stat.guarant_cnt + stat.best_cnt);
+ } else {
+ if (fd)
+ fprintf(fd, "Failed reading fdir statistics\n");
+ return -1;
+ }
+}
+
+void CTRexExtendedDriverBaseMlnx5G::get_extended_stats(CPhyEthIF * _if,CPhyEthIFStats *stats){
+
+ struct rte_eth_stats stats1;
+ struct rte_eth_stats *prev_stats = &stats->m_prev_stats;
+ rte_eth_stats_get(_if->get_port_id(), &stats1);
+
+ stats->ipackets += stats1.ipackets - prev_stats->ipackets;
+ stats->ibytes += stats1.ibytes - prev_stats->ibytes +
+ + (stats1.ipackets << 2) - (prev_stats->ipackets << 2);
+ stats->opackets += stats1.opackets - prev_stats->opackets;
+ stats->obytes += stats1.obytes - prev_stats->obytes
+ + (stats1.opackets << 2) - (prev_stats->opackets << 2);
+ stats->f_ipackets += 0;
+ stats->f_ibytes += 0;
+ stats->ierrors += stats1.imissed + stats1.ierrors + stats1.rx_nombuf
+ - prev_stats->imissed - prev_stats->ierrors - prev_stats->rx_nombuf;
+ stats->oerrors += stats1.oerrors - prev_stats->oerrors;
+ stats->imcasts += 0;
+ stats->rx_nombuf += stats1.rx_nombuf - prev_stats->rx_nombuf;
+
+ prev_stats->ipackets = stats1.ipackets;
+ prev_stats->ibytes = stats1.ibytes;
+ prev_stats->opackets = stats1.opackets;
+ prev_stats->obytes = stats1.obytes;
+ prev_stats->imissed = stats1.imissed;
+ prev_stats->oerrors = stats1.oerrors;
+ prev_stats->ierrors = stats1.ierrors;
+ prev_stats->rx_nombuf = stats1.rx_nombuf;
+}
+
+int CTRexExtendedDriverBaseMlnx5G::wait_for_stable_link(){
+ delay(20);
+ return (0);
+}
+
+CFlowStatParser *CTRexExtendedDriverBaseMlnx5G::get_flow_stat_parser() {
+ CFlowStatParser *parser = new CFlowStatParser();
+ assert (parser);
+ return parser;
+}
+
+//////////////////////////////////////////////////////////////////////////////////
+/////////////////////////////////////////////////////////////////////
+/* VIC */
+
+void CTRexExtendedDriverBaseVIC::update_configuration(port_cfg_t * cfg){
+ cfg->m_tx_conf.tx_thresh.pthresh = TX_PTHRESH;
+ cfg->m_tx_conf.tx_thresh.hthresh = TX_HTHRESH;
+ cfg->m_tx_conf.tx_thresh.wthresh = TX_WTHRESH;
+ cfg->m_port_conf.rxmode.max_rx_pkt_len =9*1000-10;
+
+ if (get_is_stateless()) {
+ /* work in TOS mode */
+ cfg->m_port_conf.fdir_conf.mask.ipv4_mask.tos = 0x01;
+ cfg->m_port_conf.fdir_conf.mask.ipv6_mask.tc = 0x01;
+ }else{
+ #ifdef VIC_TTL_FILTER
+ cfg->m_port_conf.fdir_conf.mask.ipv4_mask.ttl = 0xff;
+ cfg->m_port_conf.fdir_conf.mask.ipv6_mask.hop_limits = 0xff;
+ #else
+ cfg->m_port_conf.fdir_conf.mask.ipv4_mask.tos = 0x01;
+ cfg->m_port_conf.fdir_conf.mask.ipv6_mask.tc = 0x01;
+ #endif
+ }
+}
+
+
+/* Add rule to send packets with protocol 'type', and ttl 'ttl' to rx queue 1 */
+// ttl is used in statefull mode, and ip_id in stateless. We configure the driver registers so that only one of them applies.
+// So, the rule will apply if packet has either the correct ttl or IP ID, depending if we are in statfull or stateless.
+void CTRexExtendedDriverBaseVIC::add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
+ , uint16_t ip_id, uint8_t l4_proto, int queue, uint16_t stat_idx) {
+ int ret=rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
+ static int filter_soft_id = 0;
+
+ if ( ret != 0 ){
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_filter_supported "
+ "err=%d, port=%u \n",
+ ret, port_id);
+ }
+
+ struct rte_eth_fdir_filter filter;
+
+ memset(&filter,0,sizeof(struct rte_eth_fdir_filter));
+
+#if 0
+ printf("40g::%s rules: port:%d type:%d ttl:%d ip_id:%x l4:%d q:%d hw index:%d\n"
+ , (op == RTE_ETH_FILTER_ADD) ? "add" : "del"
+ , port_id, type, ttl, ip_id, l4_proto, queue, stat_idx);
+#endif
+
+ filter.action.rx_queue = queue;
+ filter.action.behavior =RTE_ETH_FDIR_ACCEPT;
+ filter.action.report_status =RTE_ETH_FDIR_NO_REPORT_STATUS;
+ filter.action.stat_count_index = stat_idx;
+ filter.soft_id = filter_soft_id++;
+ filter.input.flow_type = type;
+
+ if (op == RTE_ETH_FILTER_ADD) {
+ fdir_hw_id_rule_params[stat_idx].rule_type = type;
+ fdir_hw_id_rule_params[stat_idx].l4_proto = l4_proto;
+ }
+
+ switch (type) {
+ case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
+ filter.input.flow.ip4_flow.tos=ttl;
+ filter.input.flow.ip4_flow.ip_id = ip_id;
+ if (l4_proto != 0)
+ filter.input.flow.ip4_flow.proto = l4_proto;
+ break;
+ case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
+ case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
+ filter.input.flow.ipv6_flow.tc=ttl;
+ filter.input.flow.ipv6_flow.flow_label = ip_id;
+ filter.input.flow.ipv6_flow.proto = l4_proto;
+ break;
+ }
+
+ ret = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_FDIR, op, (void*)&filter);
+ if ( ret != 0 ) {
+ rte_exit(EXIT_FAILURE, "rte_eth_dev_filter_ctrl: err=%d, port=%u\n",
+ ret, port_id);
+ }
+}
+
+int CTRexExtendedDriverBaseVIC::add_del_eth_type_rule(uint8_t port_id, enum rte_filter_op op, uint16_t eth_type) {
+ int ret;
+ struct rte_eth_ethertype_filter filter;
+
+ memset(&filter, 0, sizeof(filter));
+ filter.ether_type = eth_type;
+ filter.flags = 0;
+ filter.queue = MAIN_DPDK_RX_Q;
+ ret = rte_eth_dev_filter_ctrl(port_id, RTE_ETH_FILTER_ETHERTYPE, op, (void *) &filter);
+
+ return ret;
+}
+
+extern "C" int rte_eth_fdir_stats_reset(uint8_t port_id, uint32_t *stats, uint32_t start, uint32_t len);
+
+// type - rule type. Currently we only support rules in IP ID.
+// proto - Packet protocol: UDP or TCP
+// id - Counter id in HW. We assume it is in the range 0..MAX_FLOW_STATS
+int CTRexExtendedDriverBaseVIC::add_del_rx_flow_stat_rule(uint8_t port_id, enum rte_filter_op op, uint16_t l3_proto
+ , uint8_t l4_proto, uint8_t ipv6_next_h, uint16_t id) {
+ return 0;
+}
+
+int CTRexExtendedDriverBaseVIC::configure_rx_filter_rules_statefull(CPhyEthIF * _if) {
+ uint32_t port_id = _if->get_port_id();
+#ifndef VIC_TTL_FILTER
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, 0x1, 0, 17, MAIN_DPDK_RX_Q, 0); /*TCP/UDP */
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_TCP, 0x1, 0, 6, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, 0x1, 0, 132, MAIN_DPDK_RX_Q, 0); /*SCTP*/
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, 0x1, 0, 1, MAIN_DPDK_RX_Q, 0); /*ICMP*/
+
+ /* Ipv6*/
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, 0x1, 0, 0, MAIN_DPDK_RX_Q, 0); /*Any protocol on Ipv6*/
+#else
+ uint16_t hops = get_rx_check_hops();
+ int i;
+ for (i = 0; i < 2; i++) {
+ uint8_t ttl = TTL_RESERVE_DUPLICATE - i - hops;
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, ttl, 0, 17, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_TCP, ttl, 0, 6, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_UDP, ttl, 0, RX_CHECK_V6_OPT_TYPE, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_TCP, ttl, 0, RX_CHECK_V6_OPT_TYPE, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, ttl, 0, RX_CHECK_V6_OPT_TYPE, MAIN_DPDK_RX_Q, 0);
+ /* Rules for latency measurement packets */
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, ttl, 0, IPPROTO_ICMP, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, ttl, 0, 138, MAIN_DPDK_RX_Q, 0);
+ }
+#endif
+
+ return 0;
+}
+
+extern "C" int enicpmd_dev_get_fw_support(int port_id,
+ uint32_t *ver);
+
+
+int CTRexExtendedDriverBaseVIC::verify_fw_ver(int port_id) {
+
+ uint32_t ver;
+ int ret=enicpmd_dev_get_fw_support(port_id,&ver);
+
+ if (ret==0) {
+ if (CGlobalInfo::m_options.preview.getVMode() >= 1) {
+ printf("VIC port %d: FW support advanced filtering \n", port_id);
+ }
+ }else{
+ printf("Error: VIC firmware should upgrade to support advanced filtering \n");
+ printf(" Please refer to %s for upgrade instructions\n",
+ "https://trex-tgn.cisco.com/trex/doc/trex_manual.html");
+ exit(1);
+ }
+ return (0);
+}
+
+
+int CTRexExtendedDriverBaseVIC::configure_rx_filter_rules(CPhyEthIF * _if) {
+
+ if (get_is_stateless()) {
+ /* both stateless and stateful work in the same way, might changed in the future TOS */
+ return configure_rx_filter_rules_statefull(_if);
+ } else {
+ return configure_rx_filter_rules_statefull(_if);
+ }
+}
+
+void CTRexExtendedDriverBaseVIC::reset_rx_stats(CPhyEthIF * _if, uint32_t *stats, int min, int len) {
+}
+
+int CTRexExtendedDriverBaseVIC::get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, uint32_t *prev_pkts
+ ,uint32_t *bytes, uint32_t *prev_bytes, int min, int max) {
+ printf(" NOT supported yet \n");
+ return 0;
+}
+
+// if fd != NULL, dump fdir stats of _if
+// return num of filters
+int CTRexExtendedDriverBaseVIC::dump_fdir_global_stats(CPhyEthIF * _if, FILE *fd)
+{
+ //printf(" NOT supported yet \n");
+ return (0);
+}
+
+
+CFlowStatParser *CTRexExtendedDriverBaseVIC::get_flow_stat_parser() {
+ CFlowStatParser *parser = new CFlowStatParser();
+ assert (parser);
+ return parser;
+}
+
+int CTRexExtendedDriverBaseVIC::set_rcv_all(CPhyEthIF * _if, bool set_on) {
+ //printf(" NOT supported yet \n");
+ return 0;
+}
+
+
+/////////////////////////////////////////////////////////////////////////////////////
void CTRexExtendedDriverBase1GVm::update_configuration(port_cfg_t * cfg){
diff --git a/src/pal/linux_dpdk/x86_64-default-linuxapp-gcc/include/rte_config.h b/src/pal/linux_dpdk/x86_64-default-linuxapp-gcc/include/rte_config.h
index fdb5b994..25f8b4fe 100755
--- a/src/pal/linux_dpdk/x86_64-default-linuxapp-gcc/include/rte_config.h
+++ b/src/pal/linux_dpdk/x86_64-default-linuxapp-gcc/include/rte_config.h
@@ -70,3 +70,11 @@
#undef RTE_TEST_PMD_RECORD_BURST_STATS
#undef RTE_LIBRTE_GCOV
#undef RTE_INSECURE_FUNCTION_WARNING
+
+
+//#undef RTE_LIBRTE_MLX5_PMD
+//#define RTE_LIBRTE_MLX5_PMD 1
+//#undef RTE_LIBRTE_MLX5_TX_MP_CACHE
+//#define RTE_LIBRTE_MLX5_TX_MP_CACHE 8
+//#define MLX5_FDIR_SUPPORT 1
+
diff --git a/src/pkt_gen.cpp b/src/pkt_gen.cpp
index eb9a26f9..45e3a298 100644
--- a/src/pkt_gen.cpp
+++ b/src/pkt_gen.cpp
@@ -30,7 +30,7 @@
#include <common/Network/Packet/Arp.h>
#include "rx_check_header.h"
#include "pkt_gen.h"
-
+#include "bp_sim.h"
// For use in tests
char *CTestPktGen::create_test_pkt(uint16_t l3_type, uint16_t l4_proto, uint8_t ttl, uint32_t ip_id, uint16_t flags
, uint16_t max_payload, int &pkt_size) {
@@ -52,7 +52,7 @@ char *CTestPktGen::create_test_pkt(uint16_t l3_type, uint16_t l4_proto, uint8_t
}
uint8_t ip_header[] = {
- 0x45,0x02,0x00,0x30,
+ 0x45,0x03,0x00,0x30,
0x00,0x00,0x40,0x00,
0xff,0x01,0xbd,0x04,
0x10,0x0,0x0,0x1, //SIP
@@ -60,7 +60,7 @@ char *CTestPktGen::create_test_pkt(uint16_t l3_type, uint16_t l4_proto, uint8_t
// 0x82, 0x0b, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 // IP option. change 45 to 48 (header len) if using it.
};
uint8_t ipv6_header[] = {
- 0x60,0x00,0xff,0x30, // traffic class + flow label
+ 0x60,0x10,0xff,0x30, // traffic class + flow label
0x00,0x00,0x40,0x00, // payload len + next header + hop limit
0x10,0x0,0x0,0x1,0x10,0x0,0x0,0x1,0x10,0x0,0x0,0x1,0x10,0x0,0x0,0x1, //SIP
0x30,0x0,0x0,0x1,0x10,0x0,0x0,0x1,0x30,0x0,0x0,0x1,0x10,0x0,0x0,0x1, //DIP
@@ -222,10 +222,22 @@ char *CTestPktGen::create_test_pkt(uint16_t l3_type, uint16_t l4_proto, uint8_t
switch(l3_type) {
case EthernetHeader::Protocol::IP:
ip->setTimeToLive(ttl);
+ if (ttl==TTL_RESERVE_DUPLICATE || ttl==(TTL_RESERVE_DUPLICATE-1)) {
+ ip->setTOS(TOS_TTL_RESERVE_DUPLICATE);
+ }else{
+ ip->setTOS(0x2);
+ }
+
ip->updateCheckSum();
break;
case EthernetHeader::Protocol::IPv6:
ipv6->setHopLimit(ttl);
+ if (ttl==TTL_RESERVE_DUPLICATE || ttl==(TTL_RESERVE_DUPLICATE-1)) {
+ ipv6->setTrafficClass(TOS_TTL_RESERVE_DUPLICATE);
+ }else{
+ ipv6->setTrafficClass(0x2);
+ }
+
break;
}
diff --git a/src/pre_test.cpp b/src/pre_test.cpp
index 130d076d..76fa9a26 100644
--- a/src/pre_test.cpp
+++ b/src/pre_test.cpp
@@ -257,7 +257,7 @@ void CPretest::send_arp_req(uint16_t port_id, bool is_grat) {
int num_sent;
int verbose = CGlobalInfo::m_options.preview.getVMode();
- m[0] = CGlobalInfo::pktmbuf_alloc_small(0);
+ m[0] = CGlobalInfo::pktmbuf_alloc_small_by_port(port_id);
if ( unlikely(m[0] == 0) ) {
fprintf(stderr, "ERROR: Could not allocate mbuf for sending ARP to port:%d\n", port_id);
exit(1);
diff --git a/src/rx_check.cpp b/src/rx_check.cpp
index bfaa4ddb..d7eecede 100755
--- a/src/rx_check.cpp
+++ b/src/rx_check.cpp
@@ -255,7 +255,9 @@ bool RxCheckManager::Create(){
void RxCheckManager::handle_packet(CRx_check_header * rxh){
- //rxh->dump(stdout);
+ // m_stats.Dump(stdout);
+ //rxh->dump(stdout);
+
m_stats.m_total_rx++;
if ( rxh->m_magic != RX_CHECK_MAGIC ){
m_stats.m_err_no_magic++;
diff --git a/src/stateful_rx_core.cpp b/src/stateful_rx_core.cpp
index ebc51fcb..cbf62a17 100644
--- a/src/stateful_rx_core.cpp
+++ b/src/stateful_rx_core.cpp
@@ -33,7 +33,7 @@ const uint8_t sctp_pkt[]={
0x00,0x0e,0x2e,0x24,0x37,0x5f,
0x08,0x00,
- 0x45,0x02,0x00,0x30,
+ 0x45,0x03,0x00,0x30,
0x00,0x00,0x40,0x00,
0xff,0x84,0xbd,0x04,
0x9b,0xe6,0x18,0x9b, //sIP
@@ -57,7 +57,7 @@ const uint8_t icmp_pkt[]={
0x00,0x0e,0x2e,0x24,0x37,0x5f,
0x08,0x00,
- 0x45,0x02,0x00,0x30,
+ 0x45,0x03,0x00,0x30,
0x00,0x00,0x40,0x00,
0xff,0x01,0xbd,0x04,
0x9b,0xe6,0x18,0x9b, //SIP
diff --git a/src/stateless/dp/trex_stateless_dp_core.cpp b/src/stateless/dp/trex_stateless_dp_core.cpp
index 857ac8f9..485e8533 100644
--- a/src/stateless/dp/trex_stateless_dp_core.cpp
+++ b/src/stateless/dp/trex_stateless_dp_core.cpp
@@ -1304,9 +1304,7 @@ bool CGenNodePCAP::create(uint8_t port_id,
m_raw_packet = new CCapPktRaw();
if ( m_reader->ReadPacket(m_raw_packet) == false ){
- /* handle error */
- delete m_reader;
- return (false);
+ return false;
}
/* set the dir */