summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIdo Barnea <ibarnea@cisco.com>2016-08-02 13:45:19 +0300
committerIdo Barnea <ibarnea@cisco.com>2016-08-03 16:35:11 +0300
commit66c49a9d8ee5353dfa60eb90fc93eb4f4abf095e (patch)
treecb714b8a628afaca0c8558ee9b60394ef8f9c785
parentc0b0c84099b91be79bdd7b53f74b2d504b9edd31 (diff)
IPv6 XL710 stateless support and stateful --rx-check
-rw-r--r--src/debug.cpp375
-rw-r--r--src/debug.h3
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_ethdev.c42
-rw-r--r--src/dpdk/drivers/net/i40e/i40e_fdir.c6
-rw-r--r--src/dpdk/lib/librte_ether/rte_eth_ctrl.h2
-rw-r--r--src/main_dpdk.cpp76
6 files changed, 397 insertions, 107 deletions
diff --git a/src/debug.cpp b/src/debug.cpp
index 14e7002a..d0896290 100644
--- a/src/debug.cpp
+++ b/src/debug.cpp
@@ -27,9 +27,26 @@
#include <rte_pci.h>
#include <rte_ethdev.h>
#include <common/basic_utils.h>
+#include "rx_check_header.h"
#include "main_dpdk.h"
#include "debug.h"
+enum {
+ D_PKT_TYPE_ICMP = 1,
+ D_PKT_TYPE_UDP = 2,
+ D_PKT_TYPE_TCP = 3,
+ D_PKT_TYPE_9k_UDP = 4,
+ D_PKT_TYPE_IPV6 = 60,
+ D_PKT_TYPE_HW_VERIFY = 100,
+
+} debug_pkt_types_t;
+
+enum {
+ DPF_VLAN = 0x1,
+ DPF_QINQ = 0X2,
+ DPF_RXCHECK = 0x4
+} debug_pkt_flags;
+
const uint8_t udp_pkt[] = {
0x00,0x00,0x00,0x01,0x00,0x00,
0x00,0x00,0x00,0x01,0x00,0x00,
@@ -95,26 +112,49 @@ int CTrexDebug::rcv_send_all(int queue_id) {
return 0;
}
+#if 0
+rte_mbuf_t *CTrexDebug::create_test_pkt(int ip_ver, uint16_t l4_proto, uint8_t ttl, uint16_t ip_id) {
+ uint8_t test_pkt[] =
+ {0x74, 0xa2, 0xe6, 0xd5, 0x39, 0x25, 0xa0, 0x36, 0x9f, 0x38, 0xa4, 0x02, 0x86, 0xDD, 0x60, 0x00,
+ 0xff, 0x7f, 0x00, 0x14, 0x06, 0xff, 0x10, 0x00, 0x00, 0x01, 0x10, 0x00, 0x00, 0x01, 0x30, 0x00,
+ // 0x00, 0x01, 0x10, 0x00, 0x00, 0x01, 0x20, 0x01, 0x00, 0x00, 0x41, 0x37, 0x93, 0x50, 0x80, 0x00,
+ 0x00, 0x01, 0x10, 0x00, 0x00, 0x01, /* TCP: */ 0xab, 0xcd, 0x00, 0x80, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
+ 0x07, 0x08, 0x50, 0x00, 0xff, 0xff, 0x00, 0x00, 0x00, 0x00, 0x08, 0x0a, 0x01, 0x02, 0x03, 0x04,
+ // bad - 0x03, 0x04, 0x06, 0x02, 0x20, 0x00, 0xBB, 0x79, 0x00, 0x00};
+ 0x03, 0x04, 0x50, 0x02, 0x20, 0x00, 0xBB, 0x79, 0x00, 0x00};
+ rte_mbuf_t *m = CGlobalInfo::pktmbuf_alloc(0, sizeof(test_pkt));
+ char *p = rte_pktmbuf_append(m, sizeof(test_pkt));
+ assert(p);
+
+ /* set pkt data */
+ memcpy(p, test_pkt, sizeof(test_pkt));
+ return m;
+
+}
+
+#else
// For playing around, and testing packet sending in debug mode
-rte_mbuf_t *CTrexDebug::create_test_pkt(int pkt_type, uint8_t ttl, uint16_t ip_id) {
- uint8_t proto;
+rte_mbuf_t *CTrexDebug::create_test_pkt(int ip_ver, uint16_t l4_proto, uint8_t ttl
+ , uint32_t ip_id, uint16_t flags) {
int pkt_size = 0;
// ASA 2
uint8_t dst_mac[6] = {0x74, 0xa2, 0xe6, 0xd5, 0x39, 0x25};
uint8_t src_mac[6] = {0xa0, 0x36, 0x9f, 0x38, 0xa4, 0x02};
- // ASA 1
+ uint8_t vlan_header[4] = {0x0a, 0xbc, 0x00, 0x00}; // we set the type below according to if pkt is ipv4 or 6
+ uint8_t vlan_header2[4] = {0x0a, 0xbc, 0x88, 0xa8};
+ uint16_t l2_proto;
+ // ASA 1A
// uint8_t dst_mac[6] = {0xd4, 0x8c, 0xb5, 0xc9, 0x54, 0x2b};
// uint8_t src_mac[6] = {0xa0, 0x36, 0x9f, 0x38, 0xa4, 0x0};
- //#define VLAN
-#ifdef VLAN
- uint16_t l2_proto = 0x0081;
- uint8_t vlan_header[4] = {0x0a, 0xbc, 0x08, 0x00};
-#ifdef QINQ
- uint8_t vlan_header2[4] = {0x0a, 0xbc, 0x88, 0xa8};
-#endif
-#else
- uint16_t l2_proto = 0x0008;
-#endif
+ if (flags & DPF_VLAN) {
+ l2_proto = 0x0081;
+ } else {
+ if (ip_ver == 4)
+ l2_proto = 0x0008;
+ else // IPV6
+ l2_proto = 0xdd86;
+ }
+
uint8_t ip_header[] = {
0x45,0x02,0x00,0x30,
0x00,0x00,0x40,0x00,
@@ -123,6 +163,12 @@ rte_mbuf_t *CTrexDebug::create_test_pkt(int pkt_type, uint8_t ttl, uint16_t ip_i
0x30,0x0,0x0,0x1, //DIP
// 0x82, 0x0b, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 // IP option. change 45 to 48 (header len) if using it.
};
+ uint8_t ipv6_header[] = {
+ 0x60,0x00,0xff,0x30, // traffic class + flow label
+ 0x00,0x00,0x40,0x00, // payload len + next header + hop limit
+ 0x10,0x0,0x0,0x1,0x10,0x0,0x0,0x1,0x10,0x0,0x0,0x1,0x10,0x0,0x0,0x1, //SIP
+ 0x30,0x0,0x0,0x1,0x10,0x0,0x0,0x1,0x30,0x0,0x0,0x1,0x10,0x0,0x0,0x1, //DIP
+ };
uint8_t udp_header[] = {0x11, 0x11, 0x11,0x11, 0x00, 0x6d, 0x00, 0x00};
uint8_t udp_data[] = {0x64,0x31,0x3a,0x61,
0x64,0x32,0x3a,0x69,0x64,
@@ -139,13 +185,15 @@ rte_mbuf_t *CTrexDebug::create_test_pkt(int pkt_type, uint8_t ttl, uint16_t ip_i
0xa5,0x31,0x3a,0x79,0x31,0x3a,0x71,0x65,0x87,0xa6,0x7d,
0xe7
};
+
uint8_t tcp_header[] = {0xab, 0xcd, 0x00, 0x80, // src, dst ports
0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, // seq num, ack num
0x50, 0x00, 0xff, 0xff, // Header size, flags, window size
0x00, 0x00, 0x00, 0x00, // checksum ,urgent pointer
};
- uint8_t tcp_data[] = {0x8, 0xa, 0x1, 0x2, 0x3, 0x4, 0x3, 0x4, 0x6, 0x5};
+ uint8_t tcp_data[] = {0x8, 0xa, 0x1, 0x2, 0x3, 0x4, 0x3, 0x4, 0x6, 0x5,
+ 0x8, 0xa, 0x1, 0x2, 0x3, 0x4, 0x3, 0x4, 0x6, 0x5};
uint8_t icmp_header[] = {
0x08, 0x00,
@@ -160,18 +208,31 @@ rte_mbuf_t *CTrexDebug::create_test_pkt(int pkt_type, uint8_t ttl, uint16_t ip_i
0x78, 0x56, 0x34, 0x12, 0x00, 0x00 // seq
};
- switch (pkt_type) {
- case 1:
- proto = IPPROTO_ICMP;
- pkt_size = 14 + sizeof(ip_header) + sizeof(icmp_header) + sizeof (icmp_data);
+ pkt_size = 14;
+ switch(ip_ver) {
+ case 4:
+ pkt_size += sizeof(ip_header);
break;
- case 2:
- proto = IPPROTO_UDP;
- pkt_size = 14 + sizeof(ip_header) + sizeof(udp_header) + sizeof (udp_data);
+ case 6:
+ pkt_size += sizeof(ipv6_header);
+ if (flags & DPF_RXCHECK) {
+ pkt_size += sizeof(struct CRx_check_header);
+ }
+ break;
+ default:
+ printf("Internal error. Wrong ip_ver\n");
+ exit(-1);
+ }
+
+ switch (l4_proto) {
+ case IPPROTO_ICMP:
+ pkt_size += sizeof(icmp_header) + sizeof (icmp_data);
+ break;
+ case IPPROTO_UDP:
+ pkt_size += sizeof(udp_header) + sizeof (udp_data);
break;
- case 3:
- proto = IPPROTO_TCP;
- pkt_size = 14 + sizeof(ip_header) + sizeof(tcp_header) + sizeof (tcp_data);
+ case IPPROTO_TCP:
+ pkt_size += sizeof(tcp_header) + sizeof (tcp_data);
break;
default:
return NULL;
@@ -189,45 +250,78 @@ rte_mbuf_t *CTrexDebug::create_test_pkt(int pkt_type, uint8_t ttl, uint16_t ip_i
memcpy(p, dst_mac, sizeof(dst_mac)); p += sizeof(dst_mac);
memcpy(p, src_mac, sizeof(src_mac)); p += sizeof(src_mac);
memcpy(p, &l2_proto, sizeof(l2_proto)); p += sizeof(l2_proto);
-#ifdef VLAN
-#ifdef QINQ
- memcpy(p, &vlan_header2, sizeof(vlan_header2)); p += sizeof(vlan_header2);
-#endif
- memcpy(p, &vlan_header, sizeof(vlan_header)); p += sizeof(vlan_header);
-#endif
+
+ if (flags & DPF_VLAN) {
+ if (flags & DPF_QINQ) {
+ memcpy(p, &vlan_header2, sizeof(vlan_header2)); p += sizeof(vlan_header2);
+ }
+ if (ip_ver == 4) {
+ vlan_header[2] = 0x08;
+ vlan_header[3] = 0x00;
+ } else {
+ vlan_header[2] = 0x86;
+ vlan_header[3] = 0xdd;
+ }
+ memcpy(p, &vlan_header, sizeof(vlan_header)); p += sizeof(vlan_header);
+ }
+
struct IPHeader *ip = (IPHeader *)p;
- memcpy(p, ip_header, sizeof(ip_header)); p += sizeof(ip_header);
- ip->setProtocol(proto);
- ip->setTotalLength(pkt_size - 14);
- ip->setId(ip_id);
+ struct IPv6Header *ipv6 = (IPv6Header *)p;
+ if (ip_ver == 4) {
+ memcpy(p, ip_header, sizeof(ip_header)); p += sizeof(ip_header);
+ ip->setProtocol(l4_proto);
+ ip->setTotalLength(pkt_size - 14);
+ ip->setId(ip_id);
+ } else {
+ memcpy(p, ipv6_header, sizeof(ipv6_header)); p += sizeof(ipv6_header);
+ if (flags & DPF_RXCHECK) {
+ // rx check header
+ ipv6->setNextHdr(RX_CHECK_V6_OPT_TYPE);
+ if (flags & DPF_RXCHECK) {
+ struct CRx_check_header *rxch = (struct CRx_check_header *)p;
+ p += sizeof(CRx_check_header);
+ rxch->m_option_type = l4_proto;
+ rxch->m_option_len = RX_CHECK_V6_OPT_LEN;
+ }
+ } else {
+ ipv6->setNextHdr(l4_proto);
+ }
+ ipv6->setPayloadLen(pkt_size - 14 - sizeof(ipv6_header));
+ ipv6->setFlowLabel(ip_id);
+ }
struct TCPHeader *tcp = (TCPHeader *)p;
struct ICMPHeader *icmp= (ICMPHeader *)p;
- switch (pkt_type) {
- case 1:
+ switch (l4_proto) {
+ case IPPROTO_ICMP:
memcpy(p, icmp_header, sizeof(icmp_header)); p += sizeof(icmp_header);
memcpy(p, icmp_data, sizeof(icmp_data)); p += sizeof(icmp_data);
icmp->updateCheckSum(sizeof(icmp_header) + sizeof(icmp_data));
break;
- case 2:
+ case IPPROTO_UDP:
memcpy(p, udp_header, sizeof(udp_header)); p += sizeof(udp_header);
memcpy(p, udp_data, sizeof(udp_data)); p += sizeof(udp_data);
break;
- case 3:
+ case IPPROTO_TCP:
memcpy(p, tcp_header, sizeof(tcp_header)); p += sizeof(tcp_header);
memcpy(p, tcp_data, sizeof(tcp_data)); p += sizeof(tcp_data);
- tcp->setSynFlag(true);
- printf("Sending TCP header:");
- tcp->dump(stdout);
+ tcp->setSynFlag(false);
+ // printf("Sending TCP header:");
+ //tcp->dump(stdout);
break;
default:
return NULL;
}
- ip->setTimeToLive(ttl);
- ip->updateCheckSum();
+ if (ip_ver == 4) {
+ ip->setTimeToLive(ttl);
+ ip->updateCheckSum();
+ } else {
+ ipv6->setHopLimit(ttl);
+ }
return m;
}
+#endif
rte_mbuf_t *CTrexDebug::create_pkt(uint8_t *pkt, int pkt_size) {
rte_mbuf_t *m = CGlobalInfo::pktmbuf_alloc(0, pkt_size);
@@ -336,25 +430,200 @@ static void rte_stats_dump(const struct rte_eth_stats &stats) {
rte_stat_dump_array(stats.q_errors, "queue dropped", RTE_ETHDEV_QUEUE_STAT_CNTRS);
}
+extern const uint32_t FLOW_STAT_PAYLOAD_IP_ID;
+
+typedef enum debug_expected_q_t_ {
+ ZERO, // always queue 0
+ ONE, // always queue 1
+ STL, // queue 1 on stateless. 0 on stateful
+ STF // queue 1 on stateful. 0 on stateless
+} debug_expected_q_t;
+
+struct pkt_params {
+ char name[100];
+ uint8_t ip_ver;
+ uint16_t l4_proto;
+ uint8_t ttl;
+ uint32_t ip_id;
+ uint16_t pkt_flags;
+ debug_expected_q_t expected_q;
+};
+
+struct pkt_params test_pkts[] = {
+ {"ipv4 TCP ttl 255", 4, IPPROTO_TCP, 255, 5, 0, STF},
+ {"ipv4 TCP ttl 254", 4, IPPROTO_TCP, 254, 5, 0, STF},
+ {"ipv4 TCP ttl 253", 4, IPPROTO_TCP, 253, 5, 0, ZERO},
+ {"ipv4 UDP ttl 255", 4, IPPROTO_UDP, 255, 5, 0, STF},
+ {"ipv4 UDP ttl 254", 4, IPPROTO_UDP, 254, 5, 0, STF},
+ {"ipv4 UDP ttl 253", 4, IPPROTO_UDP, 253, 5, 0, ZERO},
+ {"ipv4 ICMP ttl 255", 4, IPPROTO_ICMP, 255, 5, 0, STF},
+ {"ipv4 ICMP ttl 254", 4, IPPROTO_ICMP, 254, 5, 0, STF},
+ {"ipv4 ICMP ttl 253", 4, IPPROTO_ICMP, 253, 5, 0, ZERO},
+ {"ipv4 TCP latency flow stat", 4, IPPROTO_TCP, 253, FLOW_STAT_PAYLOAD_IP_ID, 0, STL},
+ {"ipv4 UDP latency flow stat", 4, IPPROTO_UDP, 253, FLOW_STAT_PAYLOAD_IP_ID, 0, STL},
+ {"vlan ipv4 TCP ttl 255", 4, IPPROTO_TCP, 255, 5, DPF_VLAN, STF},
+ {"vlan ipv4 TCP ttl 254", 4, IPPROTO_TCP, 254, 5, DPF_VLAN, STF},
+ {"vlan ipv4 TCP ttl 253", 4, IPPROTO_TCP, 253, 5, DPF_VLAN, ZERO},
+ {"vlan ipv4 UDP ttl 255", 4, IPPROTO_UDP, 255, 5, DPF_VLAN, STF},
+ {"vlan ipv4 UDP ttl 254", 4, IPPROTO_UDP, 254, 5, DPF_VLAN, STF},
+ {"vlan ipv4 UDP ttl 253", 4, IPPROTO_UDP, 253, 5, DPF_VLAN, ZERO},
+ {"vlan ipv4 ICMP ttl 255", 4, IPPROTO_ICMP, 255, 5, DPF_VLAN, STF},
+ {"vlan ipv4 ICMP ttl 254", 4, IPPROTO_ICMP, 254, 5, DPF_VLAN, STF},
+ {"vlan ipv4 ICMP ttl 253", 4, IPPROTO_ICMP, 253, 5, DPF_VLAN, ZERO},
+ {"vlan ipv4 TCP latency flow stat", 4, IPPROTO_TCP, 253, FLOW_STAT_PAYLOAD_IP_ID, DPF_VLAN, STL},
+ {"vlan ipv4 UDP latency flow stat", 4, IPPROTO_UDP, 253, FLOW_STAT_PAYLOAD_IP_ID, DPF_VLAN, STL},
+ {"ipv6 TCP ttl 255", 6, IPPROTO_TCP, 255, 5, DPF_RXCHECK, STF},
+ {"ipv6 TCP ttl 254", 6, IPPROTO_TCP, 254, 5, DPF_RXCHECK, STF},
+ {"ipv6 TCP ttl 253", 6, IPPROTO_TCP, 253, 5, DPF_RXCHECK, ZERO},
+ {"ipv6 UDP ttl 255", 6, IPPROTO_UDP, 255, 5, DPF_RXCHECK, STF},
+ {"ipv6 UDP ttl 254", 6, IPPROTO_UDP, 254, 5, DPF_RXCHECK, STF},
+ {"ipv6 UDP ttl 253", 6, IPPROTO_UDP, 253, 5, DPF_RXCHECK, ZERO},
+ {"ipv6 ICMP ttl 255", 6, IPPROTO_ICMP, 255, 5, DPF_RXCHECK, STF},
+ {"ipv6 ICMP ttl 254", 6, IPPROTO_ICMP, 254, 5, DPF_RXCHECK, STF},
+ {"ipv6 ICMP ttl 253", 6, IPPROTO_ICMP, 253, 5, DPF_RXCHECK, ZERO},
+ {"ipv6 TCP latency flow stat", 6, IPPROTO_TCP, 253, FLOW_STAT_PAYLOAD_IP_ID, 0, STL},
+ {"ipv6 UDP latency flow stat", 6, IPPROTO_UDP, 253, FLOW_STAT_PAYLOAD_IP_ID, 0, STL},
+ {"vlan ipv6 TCP ttl 255", 6, IPPROTO_TCP, 255, 5, DPF_VLAN | DPF_RXCHECK, STF},
+ {"vlan ipv6 TCP ttl 254", 6, IPPROTO_TCP, 254, 5, DPF_VLAN | DPF_RXCHECK, STF},
+ {"vlan ipv6 TCP ttl 253", 6, IPPROTO_TCP, 253, 5, DPF_VLAN | DPF_RXCHECK, ZERO},
+ {"vlan ipv6 UDP ttl 255", 6, IPPROTO_UDP, 255, 5, DPF_VLAN | DPF_RXCHECK, STF},
+ {"vlan ipv6 UDP ttl 254", 6, IPPROTO_UDP, 254, 5, DPF_VLAN | DPF_RXCHECK, STF},
+ {"vlan ipv6 UDP ttl 253", 6, IPPROTO_UDP, 253, 5, DPF_VLAN | DPF_RXCHECK, ZERO},
+ {"vlan ipv6 ICMP ttl 255", 6, IPPROTO_ICMP, 255, 5, DPF_VLAN | DPF_RXCHECK, STF},
+ {"vlan ipv6 ICMP ttl 254", 6, IPPROTO_ICMP, 254, 5, DPF_VLAN | DPF_RXCHECK, STF},
+ {"vlan ipv6 ICMP ttl 253", 6, IPPROTO_ICMP, 253, 5, DPF_VLAN | DPF_RXCHECK, ZERO},
+ {"vlan ipv6 TCP latency flow stat", 6, IPPROTO_TCP, 253, FLOW_STAT_PAYLOAD_IP_ID, DPF_VLAN, STL},
+ {"vlan ipv6 UDP latency flow stat", 6, IPPROTO_UDP, 253, FLOW_STAT_PAYLOAD_IP_ID, DPF_VLAN, STL},
+
+};
+
+// unit test for verifying hw queues rule configuration. Can be run by:
+// for stateful: --send-debug-pkt 100 -f cap2/dns.yaml -l 1
+// for stateless: --setnd-debug-pkt 100 -i
+int CTrexDebug::verify_hw_rules() {
+ rte_mbuf_t *m = NULL;
+ CPhyEthIF * lp;
+ rte_mbuf_t * rx_pkts[32];
+ int sent_num = 20;
+ int ret = 0;
+
+ for (int pkt_num = 0; pkt_num < sizeof(test_pkts) / sizeof (test_pkts[0]); pkt_num++) {
+ uint8_t ip_ver = test_pkts[pkt_num].ip_ver;
+ uint16_t l4_proto = test_pkts[pkt_num].l4_proto;
+ uint8_t ttl = test_pkts[pkt_num].ttl;
+ uint32_t ip_id = test_pkts[pkt_num].ip_id;
+ uint8_t exp_q;
+ uint16_t pkt_flags = test_pkts[pkt_num].pkt_flags;
+ debug_expected_q_t expected_q = test_pkts[pkt_num].expected_q;
+ switch (expected_q) {
+ case ZERO:
+ exp_q = 0;
+ break;
+ case ONE:
+ exp_q = 1;
+ break;
+ case STL:
+ if ( CGlobalInfo::m_options.is_stateless() ) {
+ exp_q = 1;
+ } else {
+ exp_q = 0;
+ }
+ break;
+ case STF:
+ if ( CGlobalInfo::m_options.is_stateless() ) {
+ exp_q = 0;
+ } else {
+ exp_q = 1;
+ }
+ break;
+ default:
+ exp_q = 0;
+ break;
+ }
+
+ m = create_test_pkt(ip_ver, l4_proto, ttl, ip_id, pkt_flags);
+ assert(m);
+ test_send_pkts(m, 0, sent_num, 0);
+
+ delay(100);
+
+ int pkt_per_q[2];
+ memset(pkt_per_q, 0, sizeof(pkt_per_q));
+ // We don't know which interfaces connected where, so sum all queue 1 and all queue 0
+ for (int port = 0; port < m_max_ports; port++) {
+ for(int queue_id = 0; queue_id <= 1; queue_id++) {
+ lp = &m_ports[port];
+ uint16_t cnt = lp->rx_burst(queue_id, rx_pkts, 32);
+ pkt_per_q[queue_id] += cnt;
+
+ for (int i = 0; i < (int)cnt; i++) {
+ rte_mbuf_t * m = rx_pkts[i];
+ rte_pktmbuf_free(m);
+ }
+ }
+ }
+
+ if (pkt_per_q[exp_q] != sent_num) {
+ printf("Error:");
+ ret = 1;
+ } else {
+ printf ("OK:");
+ }
+ printf("%s q0: %d, q1:%d\n", test_pkts[pkt_num].name, pkt_per_q[0], pkt_per_q[1]);
+
+ }
+ return ret;
+}
+
int CTrexDebug::test_send(uint pkt_type) {
int port_id;
set_promisc_all(true);
- rte_mbuf_t *m, *d, *d2=NULL, *d3=NULL;
- if (pkt_type < 1 || pkt_type > 4) {
+ rte_mbuf_t *m, *d;
+
+ if (pkt_type == D_PKT_TYPE_HW_VERIFY) {
+ return verify_hw_rules();
+ }
+
+ if (! (pkt_type >= 1 && pkt_type <= 4) && !(pkt_type >= 61 && pkt_type <= 63)) {
printf("Unsupported packet type %d\n", pkt_type);
printf("Supported packet types are: %d(ICMP), %d(UDP), %d(TCP) %d(9k UDP)\n", 1, 2, 3, 4);
+ printf(" IPv6: %d(ICMP), %d(UDP), %d(TCP)\n", 61, 62, 63);
exit(-1);
}
- if (pkt_type == 4) {
+ if (pkt_type == D_PKT_TYPE_9k_UDP) {
m = create_udp_9k_pkt();
assert (m);
d = create_pkt_indirect(m, 9*1024+18);
} else {
- d = create_test_pkt(pkt_type, 255, 0xff35);
- // d2 = create_test_pkt(pkt_type, 253, 0xfe01);
- // d3 = create_test_pkt(pkt_type, 251, 0xfe02);
+ uint16_t l4_proto;
+ int ip_ver;
+
+ if (pkt_type > D_PKT_TYPE_IPV6) {
+ ip_ver = 6;
+ pkt_type -= D_PKT_TYPE_IPV6;
+ } else {
+ ip_ver = 4;
+ }
+ if (pkt_type > 3) {
+ printf("Packet type not supported\n");
+ exit(1);
+ }
+
+ switch(pkt_type) {
+ default:
+ case D_PKT_TYPE_ICMP:
+ l4_proto = IPPROTO_ICMP;
+ break;
+ case D_PKT_TYPE_UDP:
+ l4_proto = IPPROTO_UDP;
+ break;
+ case D_PKT_TYPE_TCP:
+ l4_proto = IPPROTO_TCP;
+ break;
+ }
+ d = create_test_pkt(ip_ver, l4_proto, 254, FLOW_STAT_PAYLOAD_IP_ID, 0);
}
if (d == NULL) {
printf("Packet creation failed\n");
@@ -371,14 +640,6 @@ int CTrexDebug::test_send(uint pkt_type) {
test_send_pkts(d, 0, 2, 0);
test_send_pkts(d, 0, 1, 1);
- if (d2) {
- test_send_pkts(d2, 0, 4, 0);
- test_send_pkts(d2, 0, 3, 1);
- }
- if (d3) {
- test_send_pkts(d3, 0, 6, 0);
- test_send_pkts(d3, 0, 5, 1);
- }
delay(1000);
diff --git a/src/debug.h b/src/debug.h
index 4fc23d93..3ecc3604 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -32,7 +32,8 @@ class CTrexDebug {
rte_mbuf_t *create_udp_9k_pkt();
int set_promisc_all(bool enable);
int test_send_pkts(rte_mbuf_t *, uint16_t queue_id, int pkt, int port);
- rte_mbuf_t *create_test_pkt(int proto, uint8_t ttl, uint16_t ip_id);
+ rte_mbuf_t *create_test_pkt(int ip_ver, uint16_t l4_proto, uint8_t ttl, uint32_t ip_id, uint16_t flags);
+ int verify_hw_rules();
public:
CTrexDebug(CPhyEthIF *m_ports_arg, int max_ports);
diff --git a/src/dpdk/drivers/net/i40e/i40e_ethdev.c b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
index 0e66be74..d9d2b969 100644
--- a/src/dpdk/drivers/net/i40e/i40e_ethdev.c
+++ b/src/dpdk/drivers/net/i40e/i40e_ethdev.c
@@ -784,27 +784,33 @@ static inline void i40e_filter_fields_reg_init(struct i40e_hw *hw)
I40E_WRITE_REG(hw, I40E_GLQF_ORT(12), 0x00000062);
I40E_WRITE_REG(hw, I40E_GLQF_PIT(2), 0x000024A0);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(31, 0), 0);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(33, 0), 0);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(41, 0), 0);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(41, 1), 0x00080000);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(43, 0), 0);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(43, 1), 0x00080000);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(34, 0), 0);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(34, 1), 0x00040000);
- // filter IP according to ttl and L4 protocol
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(35, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, 0), 0);
if (trex_mode == 1) {
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(35, 1), 0x00100000);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(31, 1), 0x00100000);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(33, 1), 0x00100000);
+ // stateless
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, 1), 0x00100000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, 1), 0x00100000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, 1), 0x00100000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, 1), 0x0000000000200000ULL);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, 1), 0x0000000000200000ULL);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, 1), 0x0000000000200000ULL);
} else {
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(35, 1), 0x00040000);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(31, 1), 0x00040000);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(33, 1), 0x00040000);
+ //stateful
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_UDP, 1), 0x00040000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_TCP, 1), 0x00040000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER, 1), 0x00040000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_UDP, 1), 0x00080000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_TCP, 1), 0x00080000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER, 1), 0x00080000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP, 1), 0x00040000);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, 0), 0);
+ I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP, 1), 0x00080000);
}
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(44, 0), 0);
- I40E_WRITE_REG(hw, I40E_PRTQF_FD_INSET(44, 1), 0x00080000);
I40E_WRITE_REG(hw, I40E_GLQF_FD_MSK(0, 34), 0x000DFF00);
I40E_WRITE_REG(hw, I40E_GLQF_FD_MSK(0,44), 0x000C00FF);
I40E_WRITE_FLUSH(hw);
diff --git a/src/dpdk/drivers/net/i40e/i40e_fdir.c b/src/dpdk/drivers/net/i40e/i40e_fdir.c
index 990937ec..33cb6dab 100644
--- a/src/dpdk/drivers/net/i40e/i40e_fdir.c
+++ b/src/dpdk/drivers/net/i40e/i40e_fdir.c
@@ -755,7 +755,11 @@ i40e_fdir_fill_eth_ip_head(const struct rte_eth_fdir_input *fdir_input,
ip6->vtc_flow =
rte_cpu_to_be_32(I40E_FDIR_IPv6_DEFAULT_VTC_FLOW |
(fdir_input->flow.ipv6_flow.tc <<
- I40E_FDIR_IPv6_TC_OFFSET));
+ I40E_FDIR_IPv6_TC_OFFSET)
+#ifdef TREX_PATCH
+ | (fdir_input->flow.ipv6_flow.flow_label & 0x000fffff)
+#endif
+ );
ip6->payload_len =
rte_cpu_to_be_16(I40E_FDIR_IPv6_PAYLOAD_LEN);
ip6->proto = fdir_input->flow.ipv6_flow.proto ?
diff --git a/src/dpdk/lib/librte_ether/rte_eth_ctrl.h b/src/dpdk/lib/librte_ether/rte_eth_ctrl.h
index 96145e86..563e80f8 100644
--- a/src/dpdk/lib/librte_ether/rte_eth_ctrl.h
+++ b/src/dpdk/lib/librte_ether/rte_eth_ctrl.h
@@ -464,6 +464,8 @@ struct rte_eth_ipv6_flow {
uint8_t tc; /**< Traffic class to match. */
uint8_t proto; /**< Protocol, next header to match. */
uint8_t hop_limits; /**< Hop limits to match. */
+ // TREX_PATCH (flow_label)
+ uint32_t flow_label; /**<flow label to match. */
};
/**
diff --git a/src/main_dpdk.cpp b/src/main_dpdk.cpp
index d17fd66e..6aebe0e1 100644
--- a/src/main_dpdk.cpp
+++ b/src/main_dpdk.cpp
@@ -344,7 +344,8 @@ public:
virtual CFlowStatParser *get_flow_stat_parser();
private:
- virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl, uint16_t ip_id, int queue, uint16_t stat_idx);
+ virtual void add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
+ , uint16_t ip_id, uint16_t l4_proto, int queue, uint16_t stat_idx);
virtual int configure_rx_filter_rules_statfull(CPhyEthIF * _if);
private:
@@ -4163,7 +4164,6 @@ void CGlobalTRex::shutdown() {
for (int i = 0; i < m_max_ports; i++) {
m_ports[i].stop();
}
-
if (m_mark_for_shutdown != SHUTDOWN_TEST_ENDED) {
/* we should stop latency and exit to stop agents */
exit(-1);
@@ -5264,7 +5264,7 @@ int CTRexExtendedDriverBase1G::configure_rx_filter_rules_stateless(CPhyEthIF * _
_if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 4) , 0x00000081);
_if->pci_reg_write( (E1000_FHFT(rule_id)+(1*16) + 8) , 0x30); /* MASK */
// + bytes 16 + 17 (vlan type) should indicate IP.
- _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) ) , 0x00000080);
+ _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) ) , 0x00000008);
// Was written together with IP ID filter
// _if->pci_reg_write( (E1000_FHFT(rule_id)+(2*16) + 8) , 0x03); /* MASK */
// FLEX_PRIO[[18:16] = 1, RQUEUE[10:8] = 1
@@ -5553,12 +5553,18 @@ void CTRexExtendedDriverBase40G::update_configuration(port_cfg_t * cfg){
}
// What is the type of the rule the respective hw_id counter counts.
-static uint16_t fdir_hw_id_rule_type[512];
+struct fdir_hw_id_params_t {
+ uint16_t rule_type;
+ uint16_t l4_proto;
+};
+
+static struct fdir_hw_id_params_t fdir_hw_id_rule_params[512];
/* Add rule to send packets with protocol 'type', and ttl 'ttl' to rx queue 1 */
// ttl is used in statefull mode, and ip_id in stateless. We configure the driver registers so that only one of them applies.
// So, the rule will apply if packet has either the correct ttl or IP ID, depending if we are in statfull or stateless.
-void CTRexExtendedDriverBase40G::add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl, uint16_t ip_id, int queue, uint16_t stat_idx) {
+void CTRexExtendedDriverBase40G::add_del_rules(enum rte_filter_op op, uint8_t port_id, uint16_t type, uint8_t ttl
+ , uint16_t ip_id, uint16_t l4_proto, int queue, uint16_t stat_idx) {
int ret=rte_eth_dev_filter_supported(port_id, RTE_ETH_FILTER_FDIR);
static int filter_soft_id = 0;
@@ -5585,28 +5591,27 @@ void CTRexExtendedDriverBase40G::add_del_rules(enum rte_filter_op op, uint8_t po
filter.input.flow_type = type;
if (op == RTE_ETH_FILTER_ADD) {
- fdir_hw_id_rule_type[stat_idx] = type;
+ fdir_hw_id_rule_params[stat_idx].rule_type = type;
+ fdir_hw_id_rule_params[stat_idx].l4_proto = l4_proto;
}
switch (type) {
- case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
- filter.input.flow.ip4_flow.ttl=ttl;
- filter.input.flow.ip4_flow.ip_id = ip_id;
- filter.input.flow.ip4_flow.proto = IPPROTO_ICMP; // In this case we want filter for icmp packets
- break;
case RTE_ETH_FLOW_NONFRAG_IPV4_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
case RTE_ETH_FLOW_NONFRAG_IPV4_SCTP:
+ case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
filter.input.flow.ip4_flow.ttl=ttl;
filter.input.flow.ip4_flow.ip_id = ip_id;
+ if (l4_proto != 0)
+ filter.input.flow.ip4_flow.proto = l4_proto;
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
- filter.input.flow.ipv6_flow.hop_limits=ttl;
- break;
case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER:
filter.input.flow.ipv6_flow.hop_limits=ttl;
- filter.input.flow.ipv6_flow.proto = IPPROTO_ICMP; // In the future we want to support this
+ filter.input.flow.ipv6_flow.flow_label = ip_id;
+ if (l4_proto != 0)
+ filter.input.flow.ipv6_flow.proto = l4_proto;
break;
}
@@ -5640,7 +5645,7 @@ int CTRexExtendedDriverBase40G::add_del_rx_flow_stat_rule(uint8_t port_id, enum
rte_type = RTE_ETH_FLOW_NONFRAG_IPV4_OTHER;
break;
}
- add_del_rules(op, port_id, rte_type, 0, IP_ID_RESERVE_BASE + id, MAIN_DPDK_DATA_Q, rule_id);
+ add_del_rules(op, port_id, rte_type, 0, IP_ID_RESERVE_BASE + id, proto, MAIN_DPDK_DATA_Q, rule_id);
return 0;
}
@@ -5652,17 +5657,15 @@ int CTRexExtendedDriverBase40G::configure_rx_filter_rules_statfull(CPhyEthIF * _
rte_eth_fdir_stats_reset(port_id, NULL, 0, 1);
for (i = 0; i < 2; i++) {
uint8_t ttl = TTL_RESERVE_DUPLICATE - i - hops;
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, ttl, 0, MAIN_DPDK_RX_Q, 0);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_TCP, ttl, 0, MAIN_DPDK_RX_Q, 0);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_UDP, ttl, 0, MAIN_DPDK_RX_Q, 0);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_TCP, ttl, 0, MAIN_DPDK_RX_Q, 0);
-
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, ttl, 0, 0, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_TCP, ttl, 0, 0, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_UDP, ttl, 0, RX_CHECK_V6_OPT_TYPE, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_TCP, ttl, 0, RX_CHECK_V6_OPT_TYPE, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, ttl, 0, RX_CHECK_V6_OPT_TYPE, MAIN_DPDK_RX_Q, 0);
+ /* Rules for latency measurement packets */
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, ttl, 0, IPPROTO_ICMP, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, ttl, 0, 0, MAIN_DPDK_RX_Q, 0);
}
-
- /* Configure rules for latency measurement packets */
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, TTL_RESERVE_DUPLICATE - hops, 0, MAIN_DPDK_RX_Q, 0);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_SCTP, TTL_RESERVE_DUPLICATE - hops, 0, MAIN_DPDK_RX_Q, 0);
-
return 0;
}
@@ -5672,10 +5675,20 @@ extern const uint32_t FLOW_STAT_PAYLOAD_IP_ID;
int CTRexExtendedDriverBase40G::configure_rx_filter_rules(CPhyEthIF * _if) {
if (get_is_stateless()) {
uint32_t port_id = _if->get_port_id();
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_TCP, 0, FLOW_STAT_PAYLOAD_IP_ID, MAIN_DPDK_RX_Q, FDIR_PAYLOAD_RULES_HW_ID);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, 0, FLOW_STAT_PAYLOAD_IP_ID, MAIN_DPDK_RX_Q, FDIR_PAYLOAD_RULES_HW_ID);
- add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, 0, FLOW_STAT_PAYLOAD_IP_ID, MAIN_DPDK_RX_Q, FDIR_PAYLOAD_RULES_HW_ID);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_UDP, 0
+ , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, FDIR_PAYLOAD_RULES_HW_ID);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_TCP, 0
+ , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, FDIR_PAYLOAD_RULES_HW_ID);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV4_OTHER, 0
+ , FLOW_STAT_PAYLOAD_IP_ID, IPPROTO_ICMP, MAIN_DPDK_RX_Q, FDIR_PAYLOAD_RULES_HW_ID);
rte_eth_fdir_stats_reset(_if->get_port_id(), NULL, FDIR_TEMP_HW_ID, 1);
+
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_UDP, 0
+ , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_TCP, 0
+ , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, 0);
+ add_del_rules(RTE_ETH_FILTER_ADD, port_id, RTE_ETH_FLOW_NONFRAG_IPV6_OTHER, 0
+ , FLOW_STAT_PAYLOAD_IP_ID, 0, MAIN_DPDK_RX_Q, 0);
return 0; // Other rules are configured dynamically in stateless
} else {
return configure_rx_filter_rules_statfull(_if);
@@ -5723,10 +5736,13 @@ int CTRexExtendedDriverBase40G::get_rx_stats(CPhyEthIF * _if, uint32_t *pkts, ui
uint32_t counter, temp_count;
uint32_t hw_id = start - min + i;
- add_del_rules( RTE_ETH_FILTER_ADD, port_id, fdir_hw_id_rule_type[hw_id], 0, IP_ID_RESERVE_BASE + i, MAIN_DPDK_DATA_Q, FDIR_TEMP_HW_ID);
+ add_del_rules( RTE_ETH_FILTER_ADD, port_id, fdir_hw_id_rule_params[hw_id].rule_type, 0
+ , IP_ID_RESERVE_BASE + i, fdir_hw_id_rule_params[hw_id].l4_proto, MAIN_DPDK_DATA_Q
+ , FDIR_TEMP_HW_ID);
delay(100);
rte_eth_fdir_stats_reset(port_id, &counter, hw_id, 1);
- add_del_rules( RTE_ETH_FILTER_ADD, port_id, fdir_hw_id_rule_type[hw_id], 0, IP_ID_RESERVE_BASE + i, MAIN_DPDK_DATA_Q, hw_id);
+ add_del_rules( RTE_ETH_FILTER_ADD, port_id, fdir_hw_id_rule_params[hw_id].rule_type, 0
+ , IP_ID_RESERVE_BASE + i, fdir_hw_id_rule_params[hw_id].l4_proto, MAIN_DPDK_DATA_Q, hw_id);
delay(100);
rte_eth_fdir_stats_reset(port_id, &temp_count, FDIR_TEMP_HW_ID, 1);
pkts[i] = counter + temp_count - prev_pkts[i];