From 669374787cd8b6381d695306cd6dd9020e16fc90 Mon Sep 17 00:00:00 2001 From: Hanoh Haim Date: Mon, 6 Feb 2017 15:28:36 +0200 Subject: Our patches to mlx5 in dpdk1702 Signed-off-by: Hanoh Haim --- src/dpdk/drivers/net/mlx5/mlx5.c | 19 +- src/dpdk/drivers/net/mlx5/mlx5.h | 31 ++++ src/dpdk/drivers/net/mlx5/mlx5_fdir.c | 93 ++++++++-- src/dpdk/drivers/net/mlx5/mlx5_rxq.c | 4 +- src/dpdk/drivers/net/mlx5/mlx5_rxtx.h | 4 +- src/dpdk/drivers/net/mlx5/mlx5_stats.c | 323 +++++++++++++++++++++++++-------- 6 files changed, 381 insertions(+), 93 deletions(-) (limited to 'src') diff --git a/src/dpdk/drivers/net/mlx5/mlx5.c b/src/dpdk/drivers/net/mlx5/mlx5.c index d4bd4696..34100a66 100644 --- a/src/dpdk/drivers/net/mlx5/mlx5.c +++ b/src/dpdk/drivers/net/mlx5/mlx5.c @@ -181,6 +181,9 @@ mlx5_dev_close(struct rte_eth_dev *dev) } if (priv->reta_idx != NULL) rte_free(priv->reta_idx); + + mlx5_stats_free(dev); + priv_unlock(priv); memset(priv, 0, sizeof(*priv)); } @@ -372,6 +375,15 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) int idx; int i; + /* TREX PATCH, lazy ibv_init */ + static int ibv_was_init=0; + + if (ibv_was_init==0) { + ibv_fork_init(); + ibv_was_init=1; + } + + (void)pci_drv; assert(pci_drv == &mlx5_driver.pci_drv); /* Get mlx5_dev[] index. */ @@ -533,6 +545,12 @@ mlx5_pci_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->mps = mps; /* Enable MPW by default if supported. */ priv->cqe_comp = 1; /* Enable compression by default. */ err = mlx5_args(priv, pci_dev->device.devargs); + + /* TREX PATCH */ + /* set for maximum performance default */ + priv->txq_inline =64; + priv->txqs_inline =4; + if (err) { ERROR("failed to process device arguments: %s", strerror(err)); @@ -787,7 +805,6 @@ rte_mlx5_pmd_init(void) * using this PMD, which is not supported in forked processes. */ setenv("RDMAV_HUGEPAGES_SAFE", "1", 1); - ibv_fork_init(); rte_eal_pci_register(&mlx5_driver.pci_drv); } diff --git a/src/dpdk/drivers/net/mlx5/mlx5.h b/src/dpdk/drivers/net/mlx5/mlx5.h index 879da5ef..5970f31a 100644 --- a/src/dpdk/drivers/net/mlx5/mlx5.h +++ b/src/dpdk/drivers/net/mlx5/mlx5.h @@ -89,6 +89,34 @@ enum { PCI_DEVICE_ID_MELLANOX_CONNECTX5EXVF = 0x101a, }; +struct mlx5_stats_priv { + + struct rte_eth_stats m_shadow; + uint32_t n_stats; /* number of counters */ + + void * et_stats ;/* point to ethtool counter struct ethtool_stats*/ + + /* index into ethtool */ + uint16_t inx_rx_vport_unicast_bytes; + uint16_t inx_rx_vport_multicast_bytes; + uint16_t inx_rx_vport_broadcast_bytes; + uint16_t inx_rx_vport_unicast_packets; + uint16_t inx_rx_vport_multicast_packets; + uint16_t inx_rx_vport_broadcast_packets; + uint16_t inx_tx_vport_unicast_bytes; + uint16_t inx_tx_vport_multicast_bytes; + uint16_t inx_tx_vport_broadcast_bytes; + uint16_t inx_tx_vport_unicast_packets; + uint16_t inx_tx_vport_multicast_packets; + uint16_t inx_tx_vport_broadcast_packets; + uint16_t inx_rx_wqe_err; + uint16_t inx_rx_crc_errors_phy; + uint16_t inx_rx_in_range_len_errors_phy; + uint16_t inx_rx_symbol_err_phy; + uint16_t inx_tx_errors_phy; +}; + + struct mlx5_xstats_ctrl { /* Number of device stats. */ uint16_t stats_n; @@ -152,6 +180,7 @@ struct priv { uint32_t link_speed_capa; /* Link speed capabilities. */ struct mlx5_xstats_ctrl xstats_ctrl; /* Extended stats control. */ rte_spinlock_t lock; /* Lock for control functions. */ + struct mlx5_stats_priv m_stats; }; /* Local storage for secondary process data. */ @@ -261,6 +290,8 @@ void mlx5_allmulticast_disable(struct rte_eth_dev *); void priv_xstats_init(struct priv *); void mlx5_stats_get(struct rte_eth_dev *, struct rte_eth_stats *); void mlx5_stats_reset(struct rte_eth_dev *); +void mlx5_stats_free(struct rte_eth_dev *dev); + int mlx5_xstats_get(struct rte_eth_dev *, struct rte_eth_xstat *, unsigned int); void mlx5_xstats_reset(struct rte_eth_dev *); diff --git a/src/dpdk/drivers/net/mlx5/mlx5_fdir.c b/src/dpdk/drivers/net/mlx5/mlx5_fdir.c index f80c58b4..92a923d4 100644 --- a/src/dpdk/drivers/net/mlx5/mlx5_fdir.c +++ b/src/dpdk/drivers/net/mlx5/mlx5_fdir.c @@ -37,12 +37,14 @@ #include #include +#define TREX_PATCH + /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ #ifdef PEDANTIC #pragma GCC diagnostic ignored "-Wpedantic" #endif -#include +#include #ifdef PEDANTIC #pragma GCC diagnostic error "-Wpedantic" #endif @@ -69,6 +71,9 @@ struct fdir_flow_desc { uint16_t src_port; uint32_t src_ip[4]; uint32_t dst_ip[4]; + uint8_t tos; + uint8_t ip_id; + uint8_t proto; uint8_t mac[6]; uint16_t vlan_tag; enum hash_rxq_type type; @@ -104,6 +109,7 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, /* Set VLAN ID. */ desc->vlan_tag = fdir_filter->input.flow_ext.vlan_tci; +#ifndef TREX_PATCH /* Set MAC address. */ if (mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { rte_memcpy(desc->mac, @@ -113,6 +119,14 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, desc->type = HASH_RXQ_ETH; return; } +#else + if (fdir_filter->input.flow.ip4_flow.ip_id == 2) { + desc->type = HASH_RXQ_ETH; + desc->ip_id = fdir_filter->input.flow.ip4_flow.ip_id; + return; + } +#endif + /* Set mode */ switch (fdir_filter->input.flow_type) { @@ -147,6 +161,9 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip; desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip; + desc->tos = fdir_filter->input.flow.ip4_flow.ttl; /* TTL is mapped to TOS TREX_PATCH */ + desc->ip_id = fdir_filter->input.flow.ip4_flow.ip_id; + desc->proto = fdir_filter->input.flow.ip4_flow.proto; break; case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: @@ -160,6 +177,9 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, rte_memcpy(desc->dst_ip, fdir_filter->input.flow.ipv6_flow.dst_ip, sizeof(desc->dst_ip)); + desc->tos = (uint8_t)fdir_filter->input.flow.ipv6_flow.hop_limits; /* TTL is mapped to TOS - TREX_PATCH */ + desc->ip_id = (uint8_t)fdir_filter->input.flow.ipv6_flow.flow_label; + desc->proto = fdir_filter->input.flow.ipv6_flow.proto; break; default: break; @@ -200,6 +220,11 @@ priv_fdir_overlap(const struct priv *priv, ((desc1->dst_port & mask->dst_port_mask) != (desc2->dst_port & mask->dst_port_mask))) return 0; + if ( (desc1->tos != desc2->tos) || + (desc1->ip_id != desc2->ip_id) || + (desc1->proto != desc2->proto) ) + return 0; + switch (desc1->type) { case HASH_RXQ_IPV4: case HASH_RXQ_UDPV4: @@ -254,8 +279,8 @@ priv_fdir_flow_add(struct priv *priv, struct ibv_exp_flow_attr *attr = &data->attr; uintptr_t spec_offset = (uintptr_t)&data->spec; struct ibv_exp_flow_spec_eth *spec_eth; - struct ibv_exp_flow_spec_ipv4 *spec_ipv4; - struct ibv_exp_flow_spec_ipv6 *spec_ipv6; + struct ibv_exp_flow_spec_ipv4_ext *spec_ipv4; + struct ibv_exp_flow_spec_ipv6_ext *spec_ipv6; struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp; struct mlx5_fdir_filter *iter_fdir_filter; unsigned int i; @@ -267,8 +292,10 @@ priv_fdir_flow_add(struct priv *priv, (iter_fdir_filter->flow != NULL) && (priv_fdir_overlap(priv, &mlx5_fdir_filter->desc, - &iter_fdir_filter->desc))) - return EEXIST; + &iter_fdir_filter->desc))){ + ERROR("overlap rules, please check your rules"); + return EEXIST; + } /* * No padding must be inserted by the compiler between attr and spec. @@ -291,6 +318,8 @@ priv_fdir_flow_add(struct priv *priv, /* Update priority */ attr->priority = 2; +#ifndef TREX_PATCH + if (fdir_mode == RTE_FDIR_MODE_PERFECT_MAC_VLAN) { /* MAC Address */ for (i = 0; i != RTE_DIM(spec_eth->mask.dst_mac); ++i) { @@ -300,6 +329,15 @@ priv_fdir_flow_add(struct priv *priv, } goto create_flow; } +#else + // empty mask means "match everything". This rule will match all packets, no matter what is the ether type + if (desc->ip_id == 2) { + spec_eth->val.ether_type = 0x0806; + spec_eth->mask.ether_type = 0x0000; + goto create_flow; + } +#endif + switch (desc->type) { case HASH_RXQ_IPV4: @@ -308,10 +346,10 @@ priv_fdir_flow_add(struct priv *priv, spec_offset += spec_eth->size; /* Set IP spec */ - spec_ipv4 = (struct ibv_exp_flow_spec_ipv4 *)spec_offset; + spec_ipv4 = (struct ibv_exp_flow_spec_ipv4_ext *)spec_offset; /* The second specification must be IP. */ - assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4); + assert(spec_ipv4->type == IBV_EXP_FLOW_SPEC_IPV4_EXT); assert(spec_ipv4->size == sizeof(*spec_ipv4)); spec_ipv4->val.src_ip = @@ -324,6 +362,21 @@ priv_fdir_flow_add(struct priv *priv, /* Update priority */ attr->priority = 1; + spec_ipv4->val.proto = desc->proto & mask->ipv4_mask.proto; + spec_ipv4->mask.proto = mask->ipv4_mask.proto; + +#ifdef TREX_PATCH + /* TOS */ + if (desc->ip_id == 1) { + spec_ipv4->mask.tos = 0x1; + spec_ipv4->val.tos = 0x1; + } else { + spec_ipv4->mask.tos = 0x0; + spec_ipv4->val.tos = 0x0; + } +#endif + + if (desc->type == HASH_RXQ_IPV4) goto create_flow; @@ -335,10 +388,10 @@ priv_fdir_flow_add(struct priv *priv, spec_offset += spec_eth->size; /* Set IP spec */ - spec_ipv6 = (struct ibv_exp_flow_spec_ipv6 *)spec_offset; + spec_ipv6 = (struct ibv_exp_flow_spec_ipv6_ext *)spec_offset; /* The second specification must be IP. */ - assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6); + assert(spec_ipv6->type == IBV_EXP_FLOW_SPEC_IPV6_EXT); assert(spec_ipv6->size == sizeof(*spec_ipv6)); for (i = 0; i != RTE_DIM(desc->src_ip); ++i) { @@ -354,6 +407,20 @@ priv_fdir_flow_add(struct priv *priv, mask->ipv6_mask.dst_ip, sizeof(spec_ipv6->mask.dst_ip)); + spec_ipv6->val.next_hdr = desc->proto & mask->ipv6_mask.proto; + spec_ipv6->mask.next_hdr = mask->ipv6_mask.proto; + +#ifdef TREX_PATCH + /* TOS */ + if (desc->ip_id == 1) { + spec_ipv6->mask.traffic_class = 0x1; + spec_ipv6->val.traffic_class = 0x1; + } else { + spec_ipv6->mask.traffic_class = 0; + spec_ipv6->val.traffic_class = 0; + } +#endif + /* Update priority */ attr->priority = 1; @@ -831,8 +898,10 @@ priv_fdir_filter_add(struct priv *priv, /* Duplicate filters are currently unsupported. */ mlx5_fdir_filter = priv_find_filter_in_list(priv, fdir_filter); if (mlx5_fdir_filter != NULL) { +#ifndef TREX_PATCH ERROR("filter already exists"); - return EINVAL; +#endif + return EEXIST; } /* Create new flow director filter. */ @@ -957,9 +1026,11 @@ priv_fdir_filter_delete(struct priv *priv, return 0; } +#ifndef TREX_PATCH ERROR("%p: flow director delete failed, cannot find filter", (void *)priv); - return EINVAL; +#endif + return ENOENT; } /** diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rxq.c b/src/dpdk/drivers/net/mlx5/mlx5_rxq.c index 28e93d3e..c5746fa0 100644 --- a/src/dpdk/drivers/net/mlx5/mlx5_rxq.c +++ b/src/dpdk/drivers/net/mlx5/mlx5_rxq.c @@ -102,7 +102,7 @@ const struct hash_rxq_init hash_rxq_init[] = { ETH_RSS_FRAG_IPV4), .flow_priority = 1, .flow_spec.ipv4 = { - .type = IBV_EXP_FLOW_SPEC_IPV4, + .type = IBV_EXP_FLOW_SPEC_IPV4_EXT, .size = sizeof(hash_rxq_init[0].flow_spec.ipv4), }, .underlayer = &hash_rxq_init[HASH_RXQ_ETH], @@ -140,7 +140,7 @@ const struct hash_rxq_init hash_rxq_init[] = { ETH_RSS_FRAG_IPV6), .flow_priority = 1, .flow_spec.ipv6 = { - .type = IBV_EXP_FLOW_SPEC_IPV6, + .type = IBV_EXP_FLOW_SPEC_IPV6_EXT, .size = sizeof(hash_rxq_init[0].flow_spec.ipv6), }, .underlayer = &hash_rxq_init[HASH_RXQ_ETH], diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h index 41a34d7f..5cf6eaca 100644 --- a/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h +++ b/src/dpdk/drivers/net/mlx5/mlx5_rxtx.h @@ -179,8 +179,8 @@ struct hash_rxq_init { uint16_t size; } hdr; struct ibv_exp_flow_spec_tcp_udp tcp_udp; - struct ibv_exp_flow_spec_ipv4 ipv4; - struct ibv_exp_flow_spec_ipv6 ipv6; + struct ibv_exp_flow_spec_ipv4_ext ipv4; + struct ibv_exp_flow_spec_ipv6_ext ipv6; struct ibv_exp_flow_spec_eth eth; } flow_spec; /* Flow specification template. */ const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */ diff --git a/src/dpdk/drivers/net/mlx5/mlx5_stats.c b/src/dpdk/drivers/net/mlx5/mlx5_stats.c index 20c957e8..99f2d2d2 100644 --- a/src/dpdk/drivers/net/mlx5/mlx5_stats.c +++ b/src/dpdk/drivers/net/mlx5/mlx5_stats.c @@ -246,6 +246,223 @@ free: rte_free(strings); } + + +static void +mlx5_stats_read_hw(struct rte_eth_dev *dev, + struct rte_eth_stats *stats){ + struct priv *priv = mlx5_get_priv(dev); + struct mlx5_stats_priv * lps = &priv->m_stats; + unsigned int i; + + struct rte_eth_stats tmp = {0}; + struct ethtool_stats *et_stats = (struct ethtool_stats *)lps->et_stats; + struct ifreq ifr; + + et_stats->cmd = ETHTOOL_GSTATS; + et_stats->n_stats = lps->n_stats; + + ifr.ifr_data = (caddr_t) et_stats; + + if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + WARN("unable to get statistic values for mlnx5 "); + } + + tmp.ibytes += et_stats->data[lps->inx_rx_vport_unicast_bytes] + + et_stats->data[lps->inx_rx_vport_multicast_bytes] + + et_stats->data[lps->inx_rx_vport_broadcast_bytes]; + + tmp.ipackets += et_stats->data[lps->inx_rx_vport_unicast_packets] + + et_stats->data[lps->inx_rx_vport_multicast_packets] + + et_stats->data[lps->inx_rx_vport_broadcast_packets]; + + tmp.ierrors += (et_stats->data[lps->inx_rx_wqe_err] + + et_stats->data[lps->inx_rx_crc_errors_phy] + + et_stats->data[lps->inx_rx_in_range_len_errors_phy] + + et_stats->data[lps->inx_rx_symbol_err_phy]); + + tmp.obytes += et_stats->data[lps->inx_tx_vport_unicast_bytes] + + et_stats->data[lps->inx_tx_vport_multicast_bytes] + + et_stats->data[lps->inx_tx_vport_broadcast_bytes]; + + tmp.opackets += (et_stats->data[lps->inx_tx_vport_unicast_packets] + + et_stats->data[lps->inx_tx_vport_multicast_packets] + + et_stats->data[lps->inx_tx_vport_broadcast_packets]); + + tmp.oerrors += et_stats->data[lps->inx_tx_errors_phy]; + + /* SW Rx */ + for (i = 0; (i != priv->rxqs_n); ++i) { + struct rxq *rxq = (*priv->rxqs)[i]; + if (rxq) { + tmp.imissed += rxq->stats.idropped; + tmp.rx_nombuf += rxq->stats.rx_nombuf; + } + } + + /*SW Tx */ + for (i = 0; (i != priv->txqs_n); ++i) { + struct txq *txq = (*priv->txqs)[i]; + if (txq) { + tmp.oerrors += txq->stats.odropped; + } + } + + *stats =tmp; +} + +void +mlx5_stats_free(struct rte_eth_dev *dev) +{ + struct priv *priv = mlx5_get_priv(dev); + struct mlx5_stats_priv * lps = &priv->m_stats; + + if ( lps->et_stats ){ + free(lps->et_stats); + lps->et_stats=0; + } +} + + +static void +mlx5_stats_init(struct rte_eth_dev *dev) +{ + struct priv *priv = mlx5_get_priv(dev); + struct mlx5_stats_priv * lps = &priv->m_stats; + struct rte_eth_stats tmp = {0}; + + unsigned int i; + unsigned int idx; + char ifname[IF_NAMESIZE]; + struct ifreq ifr; + + struct ethtool_stats *et_stats = NULL; + struct ethtool_drvinfo drvinfo; + struct ethtool_gstrings *strings = NULL; + unsigned int n_stats, sz_str, sz_stats; + + if (priv_get_ifname(priv, &ifname)) { + WARN("unable to get interface name"); + return; + } + /* How many statistics are available ? */ + drvinfo.cmd = ETHTOOL_GDRVINFO; + ifr.ifr_data = (caddr_t) &drvinfo; + if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + WARN("unable to get driver info for %s", ifname); + return; + } + + n_stats = drvinfo.n_stats; + if (n_stats < 1) { + WARN("no statistics available for %s", ifname); + return; + } + lps->n_stats = n_stats; + + /* Allocate memory to grab stat names and values */ + sz_str = n_stats * ETH_GSTRING_LEN; + sz_stats = n_stats * sizeof(uint64_t); + strings = calloc(1, sz_str + sizeof(struct ethtool_gstrings)); + if (!strings) { + WARN("unable to allocate memory for strings"); + return; + } + + et_stats = calloc(1, sz_stats + sizeof(struct ethtool_stats)); + if (!et_stats) { + free(strings); + WARN("unable to allocate memory for stats"); + } + + strings->cmd = ETHTOOL_GSTRINGS; + strings->string_set = ETH_SS_STATS; + strings->len = n_stats; + ifr.ifr_data = (caddr_t) strings; + if (priv_ifreq(priv, SIOCETHTOOL, &ifr) != 0) { + WARN("unable to get statistic names for %s", ifname); + free(strings); + free(et_stats); + return; + } + + for (i = 0; (i != n_stats); ++i) { + + const char * curr_string = (const char*) &(strings->data[i * ETH_GSTRING_LEN]); + + if (!strcmp("rx_vport_unicast_bytes", curr_string)) lps->inx_rx_vport_unicast_bytes = i; + if (!strcmp("rx_vport_multicast_bytes", curr_string)) lps->inx_rx_vport_multicast_bytes = i; + if (!strcmp("rx_vport_broadcast_bytes", curr_string)) lps->inx_rx_vport_broadcast_bytes = i; + + if (!strcmp("rx_vport_unicast_packets", curr_string)) lps->inx_rx_vport_unicast_packets = i; + if (!strcmp("rx_vport_multicast_packets", curr_string)) lps->inx_rx_vport_multicast_packets = i; + if (!strcmp("rx_vport_broadcast_packets", curr_string)) lps->inx_rx_vport_broadcast_packets = i; + + if (!strcmp("tx_vport_unicast_bytes", curr_string)) lps->inx_tx_vport_unicast_bytes = i; + if (!strcmp("tx_vport_multicast_bytes", curr_string)) lps->inx_tx_vport_multicast_bytes = i; + if (!strcmp("tx_vport_broadcast_bytes", curr_string)) lps->inx_tx_vport_broadcast_bytes = i; + + if (!strcmp("tx_vport_unicast_packets", curr_string)) lps->inx_tx_vport_unicast_packets = i; + if (!strcmp("tx_vport_multicast_packets", curr_string)) lps->inx_tx_vport_multicast_packets = i; + if (!strcmp("tx_vport_broadcast_packets", curr_string)) lps->inx_tx_vport_broadcast_packets = i; + + if (!strcmp("rx_wqe_err", curr_string)) lps->inx_rx_wqe_err = i; + if (!strcmp("rx_crc_errors_phy", curr_string)) lps->inx_rx_crc_errors_phy = i; + if (!strcmp("rx_in_range_len_errors_phy", curr_string)) lps->inx_rx_in_range_len_errors_phy = i; + if (!strcmp("rx_symbol_err_phy", curr_string)) lps->inx_rx_symbol_err_phy = i; + + if (!strcmp("tx_errors_phy", curr_string)) lps->inx_tx_errors_phy = i; + } + + lps->et_stats =(void *)et_stats; + + if (!lps->inx_rx_vport_unicast_bytes || + !lps->inx_rx_vport_multicast_bytes || + !lps->inx_rx_vport_broadcast_bytes || + !lps->inx_rx_vport_unicast_packets || + !lps->inx_rx_vport_multicast_packets || + !lps->inx_rx_vport_broadcast_packets || + !lps->inx_tx_vport_unicast_bytes || + !lps->inx_tx_vport_multicast_bytes || + !lps->inx_tx_vport_broadcast_bytes || + !lps->inx_tx_vport_unicast_packets || + !lps->inx_tx_vport_multicast_packets || + !lps->inx_tx_vport_broadcast_packets || + !lps->inx_rx_wqe_err || + !lps->inx_rx_crc_errors_phy || + !lps->inx_rx_in_range_len_errors_phy) { + WARN("Counters are not recognized %s", ifname); + return; + } + + mlx5_stats_read_hw(dev,&tmp); + + /* copy yo shadow at first time */ + lps->m_shadow = tmp; + + free(strings); +} + + +static void +mlx5_stats_diff(struct rte_eth_stats *a, + struct rte_eth_stats *b, + struct rte_eth_stats *c){ + #define MLX5_DIFF(cnt) { a->cnt = (b->cnt - c->cnt); } + + MLX5_DIFF(ipackets); + MLX5_DIFF(opackets); + MLX5_DIFF(ibytes); + MLX5_DIFF(obytes); + MLX5_DIFF(imissed); + + MLX5_DIFF(ierrors); + MLX5_DIFF(oerrors); + MLX5_DIFF(rx_nombuf); +} + + + /** * Get device extended statistics. * @@ -295,68 +512,25 @@ priv_xstats_reset(struct priv *priv) xstats_ctrl->base[i] = counters[i]; } -/** - * DPDK callback to get device statistics. - * - * @param dev - * Pointer to Ethernet device structure. - * @param[out] stats - * Stats structure output buffer. - */ void mlx5_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct priv *priv = mlx5_get_priv(dev); - struct rte_eth_stats tmp = {0}; - unsigned int i; - unsigned int idx; - priv_lock(priv); - /* Add software counters. */ - for (i = 0; (i != priv->rxqs_n); ++i) { - struct rxq *rxq = (*priv->rxqs)[i]; - - if (rxq == NULL) - continue; - idx = rxq->stats.idx; - if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { -#ifdef MLX5_PMD_SOFT_COUNTERS - tmp.q_ipackets[idx] += rxq->stats.ipackets; - tmp.q_ibytes[idx] += rxq->stats.ibytes; -#endif - tmp.q_errors[idx] += (rxq->stats.idropped + - rxq->stats.rx_nombuf); - } -#ifdef MLX5_PMD_SOFT_COUNTERS - tmp.ipackets += rxq->stats.ipackets; - tmp.ibytes += rxq->stats.ibytes; -#endif - tmp.ierrors += rxq->stats.idropped; - tmp.rx_nombuf += rxq->stats.rx_nombuf; - } - for (i = 0; (i != priv->txqs_n); ++i) { - struct txq *txq = (*priv->txqs)[i]; - - if (txq == NULL) - continue; - idx = txq->stats.idx; - if (idx < RTE_ETHDEV_QUEUE_STAT_CNTRS) { -#ifdef MLX5_PMD_SOFT_COUNTERS - tmp.q_opackets[idx] += txq->stats.opackets; - tmp.q_obytes[idx] += txq->stats.obytes; -#endif - tmp.q_errors[idx] += txq->stats.odropped; - } -#ifdef MLX5_PMD_SOFT_COUNTERS - tmp.opackets += txq->stats.opackets; - tmp.obytes += txq->stats.obytes; -#endif - tmp.oerrors += txq->stats.odropped; - } -#ifndef MLX5_PMD_SOFT_COUNTERS - /* FIXME: retrieve and add hardware counters. */ -#endif - *stats = tmp; + struct mlx5_stats_priv * lps = &priv->m_stats; + priv_lock(priv); + + if (lps->et_stats == NULL) { + mlx5_stats_init(dev); + } + struct rte_eth_stats tmp = {0}; + + mlx5_stats_read_hw(dev,&tmp); + + mlx5_stats_diff(stats, + &tmp, + &lps->m_shadow); + priv_unlock(priv); } @@ -370,30 +544,25 @@ void mlx5_stats_reset(struct rte_eth_dev *dev) { struct priv *priv = dev->data->dev_private; - unsigned int i; - unsigned int idx; + struct mlx5_stats_priv * lps = &priv->m_stats; + + priv_lock(priv); + + if (lps->et_stats == NULL) { + mlx5_stats_init(dev); + } + struct rte_eth_stats tmp = {0}; + + + mlx5_stats_read_hw(dev,&tmp); + + /* copy to shadow */ + lps->m_shadow = tmp; - priv_lock(priv); - for (i = 0; (i != priv->rxqs_n); ++i) { - if ((*priv->rxqs)[i] == NULL) - continue; - idx = (*priv->rxqs)[i]->stats.idx; - (*priv->rxqs)[i]->stats = - (struct mlx5_rxq_stats){ .idx = idx }; - } - for (i = 0; (i != priv->txqs_n); ++i) { - if ((*priv->txqs)[i] == NULL) - continue; - idx = (*priv->txqs)[i]->stats.idx; - (*priv->txqs)[i]->stats = - (struct mlx5_txq_stats){ .idx = idx }; - } -#ifndef MLX5_PMD_SOFT_COUNTERS - /* FIXME: reset hardware counters. */ -#endif priv_unlock(priv); } + /** * DPDK callback to get extended device statistics. * -- cgit 1.2.3-korg