summaryrefslogtreecommitdiffstats
path: root/src/dpdk/drivers/net/mlx5
diff options
context:
space:
mode:
Diffstat (limited to 'src/dpdk/drivers/net/mlx5')
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5.c20
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5.h31
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_autoconf.h8
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_fdir.c67
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_rxq.c4
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_rxtx.h4
-rw-r--r--src/dpdk/drivers/net/mlx5/mlx5_stats.c321
7 files changed, 369 insertions, 86 deletions
diff --git a/src/dpdk/drivers/net/mlx5/mlx5.c b/src/dpdk/drivers/net/mlx5/mlx5.c
index d96a9aff..303b917b 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5.c
@@ -181,6 +181,9 @@ mlx5_dev_close(struct rte_eth_dev *dev)
}
if (priv->reta_idx != NULL)
rte_free(priv->reta_idx);
+
+ mlx5_stats_free(dev);
+
priv_unlock(priv);
memset(priv, 0, sizeof(*priv));
}
@@ -366,6 +369,13 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
unsigned int mps;
int idx;
int i;
+ static int ibv_was_init=0;
+
+ if (ibv_was_init==0) {
+ ibv_fork_init();
+ ibv_was_init=1;
+ }
+
(void)pci_drv;
assert(pci_drv == &mlx5_driver.pci_drv);
@@ -511,7 +521,16 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
priv->mtu = ETHER_MTU;
priv->mps = mps; /* Enable MPW by default if supported. */
priv->cqe_comp = 1; /* Enable compression by default. */
+
+
err = mlx5_args(priv, pci_dev->devargs);
+
+ /* TREX PATCH */
+ /* set for maximum performance default */
+ priv->txq_inline =128;
+ priv->txqs_inline =4;
+
+
if (err) {
ERROR("failed to process device arguments: %s",
strerror(err));
@@ -751,7 +770,6 @@ rte_mlx5_pmd_init(const char *name, const char *args)
* using this PMD, which is not supported in forked processes.
*/
setenv("RDMAV_HUGEPAGES_SAFE", "1", 1);
- ibv_fork_init();
rte_eal_pci_register(&mlx5_driver.pci_drv);
return 0;
}
diff --git a/src/dpdk/drivers/net/mlx5/mlx5.h b/src/dpdk/drivers/net/mlx5/mlx5.h
index 3a866098..68bad904 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5.h
+++ b/src/dpdk/drivers/net/mlx5/mlx5.h
@@ -84,6 +84,34 @@ enum {
PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF = 0x1016,
};
+struct mlx5_stats_priv {
+
+ struct rte_eth_stats m_shadow;
+ uint32_t n_stats; /* number of counters */
+
+ void * et_stats ;/* point to ethtool counter struct ethtool_stats*/
+
+ /* index into ethtool */
+ uint16_t inx_rx_vport_unicast_bytes;
+ uint16_t inx_rx_vport_multicast_bytes;
+ uint16_t inx_rx_vport_broadcast_bytes;
+ uint16_t inx_rx_vport_unicast_packets;
+ uint16_t inx_rx_vport_multicast_packets;
+ uint16_t inx_rx_vport_broadcast_packets;
+ uint16_t inx_tx_vport_unicast_bytes;
+ uint16_t inx_tx_vport_multicast_bytes;
+ uint16_t inx_tx_vport_broadcast_bytes;
+ uint16_t inx_tx_vport_unicast_packets;
+ uint16_t inx_tx_vport_multicast_packets;
+ uint16_t inx_tx_vport_broadcast_packets;
+ uint16_t inx_rx_wqe_err;
+ uint16_t inx_rx_crc_errors_phy;
+ uint16_t inx_rx_in_range_len_errors_phy;
+ uint16_t inx_rx_symbol_err_phy;
+ uint16_t inx_tx_errors_phy;
+};
+
+
struct priv {
struct rte_eth_dev *dev; /* Ethernet device. */
struct ibv_context *ctx; /* Verbs context. */
@@ -135,6 +163,7 @@ struct priv {
unsigned int reta_idx_n; /* RETA index size. */
struct fdir_filter_list *fdir_filter_list; /* Flow director rules. */
rte_spinlock_t lock; /* Lock for control functions. */
+ struct mlx5_stats_priv m_stats;
};
/* Local storage for secondary process data. */
@@ -243,6 +272,8 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *);
void mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *);
void mlx5_stats_reset(struct rte_eth_dev *);
+void mlx5_stats_free(struct rte_eth_dev *dev);
+
/* mlx5_vlan.c */
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h b/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h
new file mode 100644
index 00000000..9fdfff84
--- /dev/null
+++ b/src/dpdk/drivers/net/mlx5/mlx5_autoconf.h
@@ -0,0 +1,8 @@
+#ifndef HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE
+#define HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE 1
+#endif /* HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE */
+
+#ifndef HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE
+#define HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE 1
+#endif /* HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE */
+
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_fdir.c b/src/dpdk/drivers/net/mlx5/mlx5_fdir.c
index 73eb00ec..84fb5d03 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_fdir.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_fdir.c
@@ -42,7 +42,7 @@
#ifdef PEDANTIC
#pragma GCC diagnostic ignored "-pedantic"
#endif
-#include <infiniband/verbs.h>
+#include <infiniband/verbs_exp.h>
#ifdef PEDANTIC
#pragma GCC diagnostic error "-pedantic"
#endif
@@ -67,6 +67,10 @@ struct fdir_flow_desc {
uint16_t src_port;
uint32_t src_ip[4];
uint32_t dst_ip[4];
+ uint8_t tos;
+ uint8_t ip_id;
+ uint8_t proto;
+
uint8_t mac[6];
uint16_t vlan_tag;
enum hash_rxq_type type;
@@ -141,9 +145,13 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
case RTE_ETH_FLOW_NONFRAG_IPV4_TCP:
desc->src_port = fdir_filter->input.flow.udp4_flow.src_port;
desc->dst_port = fdir_filter->input.flow.udp4_flow.dst_port;
+
case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER:
desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip;
desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip;
+ desc->tos = fdir_filter->input.flow.ip4_flow.ttl; /* TTL is map to TOS*/
+ desc->ip_id = fdir_filter->input.flow.ip4_flow.ip_id;
+ desc->proto = fdir_filter->input.flow.ip4_flow.proto;
break;
case RTE_ETH_FLOW_NONFRAG_IPV6_UDP:
case RTE_ETH_FLOW_NONFRAG_IPV6_TCP:
@@ -157,12 +165,17 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter,
rte_memcpy(desc->dst_ip,
fdir_filter->input.flow.ipv6_flow.dst_ip,
sizeof(desc->dst_ip));
+ desc->tos = (uint8_t)fdir_filter->input.flow.ipv6_flow.hop_limits; /* TTL is map to TOS*/
+ desc->ip_id = (uint8_t)fdir_filter->input.flow.ipv6_flow.flow_label;
+ desc->proto = fdir_filter->input.flow.ipv6_flow.proto;
+
break;
default:
break;
}
}
+
/**
* Check if two flow descriptors overlap according to configured mask.
*
@@ -197,6 +210,12 @@ priv_fdir_overlap(const struct priv *priv,
((desc1->dst_port & mask->dst_port_mask) !=
(desc2->dst_port & mask->dst_port_mask)))
return 0;
+
+ if ( (desc1->tos != desc2->tos) ||
+ (desc1->ip_id != desc2->ip_id) ||
+ (desc1->proto != desc2->proto) )
+ return 0;
+
switch (desc1->type) {
case HASH_RXQ_IPV4:
case HASH_RXQ_UDPV4:
@@ -204,8 +223,9 @@ priv_fdir_overlap(const struct priv *priv,
if (((desc1->src_ip[0] & mask->ipv4_mask.src_ip) !=
(desc2->src_ip[0] & mask->ipv4_mask.src_ip)) ||
((desc1->dst_ip[0] & mask->ipv4_mask.dst_ip) !=
- (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
+ (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip)))
return 0;
+
break;
case HASH_RXQ_IPV6:
case HASH_RXQ_UDPV6:
@@ -251,8 +271,8 @@ priv_fdir_flow_add(struct priv *priv,
struct ibv_exp_flow_attr *attr = &data->attr;
uintptr_t spec_offset = (uintptr_t)&data->spec;
struct ibv_exp_flow_spec_eth *spec_eth;
- struct ibv_exp_flow_spec_ipv4 *spec_ipv4;
- struct ibv_exp_flow_spec_ipv6 *spec_ipv6;
+ struct ibv_exp_flow_spec_ipv4_ext *spec_ipv4;
+ struct ibv_exp_flow_spec_ipv6_ext *spec_ipv6;
struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp;
struct mlx5_fdir_filter *iter_fdir_filter;
unsigned int i;
@@ -264,8 +284,10 @@ priv_fdir_flow_add(struct priv *priv,
(iter_fdir_filter->flow != NULL) &&
(priv_fdir_overlap(priv,
&mlx5_fdir_filter->desc,
- &iter_fdir_filter->desc)))
- return EEXIST;
+ &iter_fdir_filter->desc))){
+ ERROR("overlap rules, please check your rules");
+ return EEXIST;
+ }
/*
* No padding must be inserted by the compiler between attr and spec.
@@ -305,10 +327,10 @@ priv_fdir_flow_add(struct priv *priv,
spec_offset += spec_eth->size;
/* Set IP spec */
- spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset;
+ spec_ipv4 = (struct ibv_exp_flow_spec_ipv4_ext *)spec_offset;
/* The second specification must be IP. */
- assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4);
+ assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4_EXT);
assert(spec_ipv4->size == sizeof(*spec_ipv4));
spec_ipv4->val.src_ip =
@@ -318,6 +340,19 @@ priv_fdir_flow_add(struct priv *priv,
spec_ipv4->mask.src_ip = mask->ipv4_mask.src_ip;
spec_ipv4->mask.dst_ip = mask->ipv4_mask.dst_ip;
+ /* PROTO */
+ spec_ipv4->val.proto = desc->proto & mask->ipv4_mask.proto;
+ spec_ipv4->mask.proto = mask->ipv4_mask.proto;
+
+ /* TOS */
+ if (desc->ip_id ==1 ){
+ spec_ipv4->mask.tos = 0x1;
+ }else{
+ spec_ipv4->mask.tos = 0x0;
+ }
+ spec_ipv4->val.tos =
+ desc->tos & spec_ipv4->mask.tos;// & mask->ipv4_mask.tos;
+
/* Update priority */
attr->priority = 1;
@@ -332,10 +367,10 @@ priv_fdir_flow_add(struct priv *priv,
spec_offset += spec_eth->size;
/* Set IP spec */
- spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset;
+ spec_ipv6 = (struct ibv_exp_flow_spec_ipv6_ext *)spec_offset;
/* The second specification must be IP. */
- assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6);
+ assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6_EXT);
assert(spec_ipv6->size == sizeof(*spec_ipv6));
for (i = 0; i != RTE_DIM(desc->src_ip); ++i) {
@@ -351,6 +386,18 @@ priv_fdir_flow_add(struct priv *priv,
mask->ipv6_mask.dst_ip,
sizeof(spec_ipv6->mask.dst_ip));
+ spec_ipv6->val.next_hdr = desc->proto & mask->ipv6_mask.proto;
+ spec_ipv6->mask.next_hdr = mask->ipv6_mask.proto;
+
+ /* TOS */
+ if (desc->ip_id ==1 ){
+ spec_ipv6->mask.traffic_class = (0x1);
+ }else{
+ spec_ipv6->mask.traffic_class = 0x0;
+ }
+ spec_ipv6->val.traffic_class =
+ (desc->tos) & spec_ipv6->mask.traffic_class;// & mask->ipv4_mask.tos;
+
/* Update priority */
attr->priority = 1;
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rxq.c b/src/dpdk/drivers/net/mlx5/mlx5_rxq.c
index 29c137cd..6be01d39 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_rxq.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_rxq.c
@@ -102,7 +102,7 @@ const struct hash_rxq_init hash_rxq_init[] = {
ETH_RSS_FRAG_IPV4),
.flow_priority = 1,
.flow_spec.ipv4 = {
- .type = IBV_EXP_FLOW_SPEC_IPV4,
+ .type = IBV_EXP_FLOW_SPEC_IPV4_EXT,
.size = sizeof(hash_rxq_init[0].flow_spec.ipv4),
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
@@ -140,7 +140,7 @@ const struct hash_rxq_init hash_rxq_init[] = {
ETH_RSS_FRAG_IPV6),
.flow_priority = 1,
.flow_spec.ipv6 = {
- .type = IBV_EXP_FLOW_SPEC_IPV6,
+ .type = IBV_EXP_FLOW_SPEC_IPV6_EXT,
.size = sizeof(hash_rxq_init[0].flow_spec.ipv6),
},
.underlayer = &hash_rxq_init[HASH_RXQ_ETH],
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h
index f6e2cbac..d87dd19b 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h
+++ b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h
@@ -173,8 +173,8 @@ struct hash_rxq_init {
uint16_t size;
} hdr;
struct ibv_exp_flow_spec_tcp_udp tcp_udp;
- struct ibv_exp_flow_spec_ipv4 ipv4;
- struct ibv_exp_flow_spec_ipv6 ipv6;
+ struct ibv_exp_flow_spec_ipv4_ext ipv4;
+ struct ibv_exp_flow_spec_ipv6_ext ipv6;
struct ibv_exp_flow_spec_eth eth;
} flow_spec; /* Flow specification template. */
const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */
diff --git a/src/dpdk/drivers/net/mlx5/mlx5_stats.c b/src/dpdk/drivers/net/mlx5/mlx5_stats.c
index 2d3cb519..788ef939 100644
--- a/src/dpdk/drivers/net/mlx5/mlx5_stats.c
+++ b/src/dpdk/drivers/net/mlx5/mlx5_stats.c
@@ -44,6 +44,10 @@
#include "mlx5_rxtx.h"
#include "mlx5_defs.h"
+
+#include <linux/ethtool.h>
+#include <linux/sockios.h>
+
/**
* DPDK callback to get device statistics.
*
@@ -52,60 +56,241 @@
* @param[out] stats
* Stats structure output buffer.
*/
+
+
+static void
+mlx5_stats_read_hw(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats){
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+ unsigned int i;
+
+ struct rte_eth_stats tmp = {0};
+ struct ethtool_stats *et_stats = (struct ethtool_stats *)lps->et_stats;
+ struct ifreq ifr;
+
+ et_stats->cmd = ETHTOOL_GSTATS;
+ et_stats->n_stats = lps->n_stats;
+
+ ifr.ifr_data = (caddr_t) et_stats;
+
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get statistic values for mlnx5 ");
+ }
+
+ tmp.ibytes += et_stats->data[lps->inx_rx_vport_unicast_bytes] +
+ et_stats->data[lps->inx_rx_vport_multicast_bytes] +
+ et_stats->data[lps->inx_rx_vport_broadcast_bytes];
+
+ tmp.ipackets += et_stats->data[lps->inx_rx_vport_unicast_packets] +
+ et_stats->data[lps->inx_rx_vport_multicast_packets] +
+ et_stats->data[lps->inx_rx_vport_broadcast_packets];
+
+ tmp.ierrors += (et_stats->data[lps->inx_rx_wqe_err] +
+ et_stats->data[lps->inx_rx_crc_errors_phy] +
+ et_stats->data[lps->inx_rx_in_range_len_errors_phy] +
+ et_stats->data[lps->inx_rx_symbol_err_phy]);
+
+ tmp.obytes += et_stats->data[lps->inx_tx_vport_unicast_bytes] +
+ et_stats->data[lps->inx_tx_vport_multicast_bytes] +
+ et_stats->data[lps->inx_tx_vport_broadcast_bytes];
+
+ tmp.opackets += (et_stats->data[lps->inx_tx_vport_unicast_packets] +
+ et_stats->data[lps->inx_tx_vport_multicast_packets] +
+ et_stats->data[lps->inx_tx_vport_broadcast_packets]);
+
+ tmp.oerrors += et_stats->data[lps->inx_tx_errors_phy];
+
+ /* SW Rx */
+ for (i = 0; (i != priv->rxqs_n); ++i) {
+ struct rxq *rxq = (*priv->rxqs)[i];
+ if (rxq) {
+ tmp.imissed += rxq->stats.idropped;
+ tmp.rx_nombuf += rxq->stats.rx_nombuf;
+ }
+ }
+
+ /*SW Tx */
+ for (i = 0; (i != priv->txqs_n); ++i) {
+ struct txq *txq = (*priv->txqs)[i];
+ if (txq) {
+ tmp.oerrors += txq->stats.odropped;
+ }
+ }
+
+ *stats =tmp;
+}
+
+void
+mlx5_stats_free(struct rte_eth_dev *dev)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+
+ if ( lps->et_stats ){
+ free(lps->et_stats);
+ lps->et_stats=0;
+ }
+}
+
+
+static void
+mlx5_stats_init(struct rte_eth_dev *dev)
+{
+ struct priv *priv = mlx5_get_priv(dev);
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+ struct rte_eth_stats tmp = {0};
+
+ unsigned int i;
+ unsigned int idx;
+ char ifname[IF_NAMESIZE];
+ struct ifreq ifr;
+
+ struct ethtool_stats *et_stats = NULL;
+ struct ethtool_drvinfo drvinfo;
+ struct ethtool_gstrings *strings = NULL;
+ unsigned int n_stats, sz_str, sz_stats;
+
+ if (priv_get_ifname(priv, &ifname)) {
+ WARN("unable to get interface name");
+ return;
+ }
+ /* How many statistics are available ? */
+ drvinfo.cmd = ETHTOOL_GDRVINFO;
+ ifr.ifr_data = (caddr_t) &drvinfo;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get driver info for %s", ifname);
+ return;
+ }
+
+ n_stats = drvinfo.n_stats;
+ if (n_stats < 1) {
+ WARN("no statistics available for %s", ifname);
+ return;
+ }
+ lps->n_stats = n_stats;
+
+ /* Allocate memory to grab stat names and values */
+ sz_str = n_stats * ETH_GSTRING_LEN;
+ sz_stats = n_stats * sizeof(uint64_t);
+ strings = calloc(1, sz_str + sizeof(struct ethtool_gstrings));
+ if (!strings) {
+ WARN("unable to allocate memory for strings");
+ return;
+ }
+
+ et_stats = calloc(1, sz_stats + sizeof(struct ethtool_stats));
+ if (!et_stats) {
+ free(strings);
+ WARN("unable to allocate memory for stats");
+ }
+
+ strings->cmd = ETHTOOL_GSTRINGS;
+ strings->string_set = ETH_SS_STATS;
+ strings->len = n_stats;
+ ifr.ifr_data = (caddr_t) strings;
+ if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) {
+ WARN("unable to get statistic names for %s", ifname);
+ free(strings);
+ free(et_stats);
+ return;
+ }
+
+ for (i = 0; (i != n_stats); ++i) {
+
+ const char * curr_string = (const char*) &(strings->data[i * ETH_GSTRING_LEN]);
+
+ if (!strcmp("rx_vport_unicast_bytes", curr_string)) lps->inx_rx_vport_unicast_bytes = i;
+ if (!strcmp("rx_vport_multicast_bytes", curr_string)) lps->inx_rx_vport_multicast_bytes = i;
+ if (!strcmp("rx_vport_broadcast_bytes", curr_string)) lps->inx_rx_vport_broadcast_bytes = i;
+
+ if (!strcmp("rx_vport_unicast_packets", curr_string)) lps->inx_rx_vport_unicast_packets = i;
+ if (!strcmp("rx_vport_multicast_packets", curr_string)) lps->inx_rx_vport_multicast_packets = i;
+ if (!strcmp("rx_vport_broadcast_packets", curr_string)) lps->inx_rx_vport_broadcast_packets = i;
+
+ if (!strcmp("tx_vport_unicast_bytes", curr_string)) lps->inx_tx_vport_unicast_bytes = i;
+ if (!strcmp("tx_vport_multicast_bytes", curr_string)) lps->inx_tx_vport_multicast_bytes = i;
+ if (!strcmp("tx_vport_broadcast_bytes", curr_string)) lps->inx_tx_vport_broadcast_bytes = i;
+
+ if (!strcmp("tx_vport_unicast_packets", curr_string)) lps->inx_tx_vport_unicast_packets = i;
+ if (!strcmp("tx_vport_multicast_packets", curr_string)) lps->inx_tx_vport_multicast_packets = i;
+ if (!strcmp("tx_vport_broadcast_packets", curr_string)) lps->inx_tx_vport_broadcast_packets = i;
+
+ if (!strcmp("rx_wqe_err", curr_string)) lps->inx_rx_wqe_err = i;
+ if (!strcmp("rx_crc_errors_phy", curr_string)) lps->inx_rx_crc_errors_phy = i;
+ if (!strcmp("rx_in_range_len_errors_phy", curr_string)) lps->inx_rx_in_range_len_errors_phy = i;
+ if (!strcmp("rx_symbol_err_phy", curr_string)) lps->inx_rx_symbol_err_phy = i;
+
+ if (!strcmp("tx_errors_phy", curr_string)) lps->inx_tx_errors_phy = i;
+ }
+
+ lps->et_stats =(void *)et_stats;
+
+ if (!lps->inx_rx_vport_unicast_bytes ||
+ !lps->inx_rx_vport_multicast_bytes ||
+ !lps->inx_rx_vport_broadcast_bytes ||
+ !lps->inx_rx_vport_unicast_packets ||
+ !lps->inx_rx_vport_multicast_packets ||
+ !lps->inx_rx_vport_broadcast_packets ||
+ !lps->inx_tx_vport_unicast_bytes ||
+ !lps->inx_tx_vport_multicast_bytes ||
+ !lps->inx_tx_vport_broadcast_bytes ||
+ !lps->inx_tx_vport_unicast_packets ||
+ !lps->inx_tx_vport_multicast_packets ||
+ !lps->inx_tx_vport_broadcast_packets ||
+ !lps->inx_rx_wqe_err ||
+ !lps->inx_rx_crc_errors_phy ||
+ !lps->inx_rx_in_range_len_errors_phy) {
+ WARN("Counters are not recognized %s", ifname);
+ return;
+ }
+
+ mlx5_stats_read_hw(dev,&tmp);
+
+ /* copy yo shadow at first time */
+ lps->m_shadow = tmp;
+
+ free(strings);
+}
+
+
+static void
+mlx5_stats_diff(struct rte_eth_stats *a,
+ struct rte_eth_stats *b,
+ struct rte_eth_stats *c){
+ #define MLX5_DIFF(cnt) { a->cnt = (b->cnt - c->cnt); }
+
+ MLX5_DIFF(ipackets);
+ MLX5_DIFF(opackets);
+ MLX5_DIFF(ibytes);
+ MLX5_DIFF(obytes);
+ MLX5_DIFF(imissed);
+
+ MLX5_DIFF(ierrors);
+ MLX5_DIFF(oerrors);
+ MLX5_DIFF(rx_nombuf);
+}
+
+
void
mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
{
struct priv *priv = mlx5_get_priv(dev);
- struct rte_eth_stats tmp = {0};
- unsigned int i;
- unsigned int idx;
-
- priv_lock(priv);
- /* Add software counters. */
- for (i = 0; (i != priv->rxqs_n); ++i) {
- struct rxq *rxq = (*priv->rxqs)[i];
-
- if (rxq == NULL)
- continue;
- idx = rxq->stats.idx;
- if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.q_ipackets[idx] += rxq->stats.ipackets;
- tmp.q_ibytes[idx] += rxq->stats.ibytes;
-#endif
- tmp.q_errors[idx] += (rxq->stats.idropped +
- rxq->stats.rx_nombuf);
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.ipackets += rxq->stats.ipackets;
- tmp.ibytes += rxq->stats.ibytes;
-#endif
- tmp.ierrors += rxq->stats.idropped;
- tmp.rx_nombuf += rxq->stats.rx_nombuf;
- }
- for (i = 0; (i != priv->txqs_n); ++i) {
- struct txq *txq = (*priv->txqs)[i];
-
- if (txq == NULL)
- continue;
- idx = txq->stats.idx;
- if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) {
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.q_opackets[idx] += txq->stats.opackets;
- tmp.q_obytes[idx] += txq->stats.obytes;
-#endif
- tmp.q_errors[idx] += txq->stats.odropped;
- }
-#ifdef MLX5_PMD_SOFT_COUNTERS
- tmp.opackets += txq->stats.opackets;
- tmp.obytes += txq->stats.obytes;
-#endif
- tmp.oerrors += txq->stats.odropped;
- }
-#ifndef MLX5_PMD_SOFT_COUNTERS
- /* FIXME: retrieve and add hardware counters. */
-#endif
- *stats = tmp;
+
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+ priv_lock(priv);
+
+ if (lps->et_stats == NULL) {
+ mlx5_stats_init(dev);
+ }
+ struct rte_eth_stats tmp = {0};
+
+ mlx5_stats_read_hw(dev,&tmp);
+
+ mlx5_stats_diff(stats,
+ &tmp,
+ &lps->m_shadow);
+
priv_unlock(priv);
}
@@ -119,26 +304,20 @@ void
mlx5_stats_reset(struct rte_eth_dev *dev)
{
struct priv *priv = dev->data->dev_private;
- unsigned int i;
- unsigned int idx;
-
- priv_lock(priv);
- for (i = 0; (i != priv->rxqs_n); ++i) {
- if ((*priv->rxqs)[i] == NULL)
- continue;
- idx = (*priv->rxqs)[i]->stats.idx;
- (*priv->rxqs)[i]->stats =
- (struct mlx5_rxq_stats){ .idx = idx };
- }
- for (i = 0; (i != priv->txqs_n); ++i) {
- if ((*priv->txqs)[i] == NULL)
- continue;
- idx = (*priv->txqs)[i]->stats.idx;
- (*priv->txqs)[i]->stats =
- (struct mlx5_txq_stats){ .idx = idx };
- }
-#ifndef MLX5_PMD_SOFT_COUNTERS
- /* FIXME: reset hardware counters. */
-#endif
+ struct mlx5_stats_priv * lps = &priv->m_stats;
+
+ priv_lock(priv);
+
+ if (lps->et_stats == NULL) {
+ mlx5_stats_init(dev);
+ }
+ struct rte_eth_stats tmp = {0};
+
+
+ mlx5_stats_read_hw(dev,&tmp);
+
+ /* copy to shadow */
+ lps->m_shadow = tmp;
+
priv_unlock(priv);
}