aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/thunderx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/thunderx')
-rw-r--r--drivers/net/thunderx/Makefile4
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.c12
-rw-r--r--drivers/net/thunderx/base/nicvf_bsvf.h2
-rw-r--r--drivers/net/thunderx/base/nicvf_hw_defs.h58
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.c9
-rw-r--r--drivers/net/thunderx/base/nicvf_mbox.h11
-rw-r--r--drivers/net/thunderx/base/nicvf_plat.h40
-rw-r--r--drivers/net/thunderx/nicvf_ethdev.c99
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.c46
-rw-r--r--drivers/net/thunderx/nicvf_rxtx.h27
-rw-r--r--drivers/net/thunderx/nicvf_struct.h23
11 files changed, 186 insertions, 145 deletions
diff --git a/drivers/net/thunderx/Makefile b/drivers/net/thunderx/Makefile
index bcab5f93..706250b8 100644
--- a/drivers/net/thunderx/Makefile
+++ b/drivers/net/thunderx/Makefile
@@ -65,8 +65,4 @@ CFLAGS_nicvf_rxtx.o += -fno-prefetch-loop-arrays
endif
CFLAGS_nicvf_rxtx.o += -Ofast
-# this lib depends upon:
-DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_eal lib/librte_ether
-DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_mempool lib/librte_mbuf
-
include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.c b/drivers/net/thunderx/base/nicvf_bsvf.c
index 9e028a3a..49a2646d 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.c
+++ b/drivers/net/thunderx/base/nicvf_bsvf.c
@@ -37,7 +37,7 @@
#include "nicvf_bsvf.h"
#include "nicvf_plat.h"
-static SIMPLEQ_HEAD(, svf_entry) head = SIMPLEQ_HEAD_INITIALIZER(head);
+static STAILQ_HEAD(, svf_entry) head = STAILQ_HEAD_INITIALIZER(head);
void
nicvf_bsvf_push(struct svf_entry *entry)
@@ -45,7 +45,7 @@ nicvf_bsvf_push(struct svf_entry *entry)
assert(entry != NULL);
assert(entry->vf != NULL);
- SIMPLEQ_INSERT_TAIL(&head, entry, next);
+ STAILQ_INSERT_TAIL(&head, entry, next);
}
struct svf_entry *
@@ -53,14 +53,14 @@ nicvf_bsvf_pop(void)
{
struct svf_entry *entry;
- assert(!SIMPLEQ_EMPTY(&head));
+ assert(!STAILQ_EMPTY(&head));
- entry = SIMPLEQ_FIRST(&head);
+ entry = STAILQ_FIRST(&head);
assert(entry != NULL);
assert(entry->vf != NULL);
- SIMPLEQ_REMOVE_HEAD(&head, next);
+ STAILQ_REMOVE_HEAD(&head, next);
return entry;
}
@@ -68,5 +68,5 @@ nicvf_bsvf_pop(void)
int
nicvf_bsvf_empty(void)
{
- return SIMPLEQ_EMPTY(&head);
+ return STAILQ_EMPTY(&head);
}
diff --git a/drivers/net/thunderx/base/nicvf_bsvf.h b/drivers/net/thunderx/base/nicvf_bsvf.h
index 5d5a25e2..fb9b2484 100644
--- a/drivers/net/thunderx/base/nicvf_bsvf.h
+++ b/drivers/net/thunderx/base/nicvf_bsvf.h
@@ -41,7 +41,7 @@ struct nicvf;
* The base queue structure to hold secondary qsets.
*/
struct svf_entry {
- SIMPLEQ_ENTRY(svf_entry) next; /**< Next element's pointer */
+ STAILQ_ENTRY(svf_entry) next; /**< Next element's pointer */
struct nicvf *vf; /**< Holder of a secondary qset */
};
diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h
index 00dd2feb..79f83c8d 100644
--- a/drivers/net/thunderx/base/nicvf_hw_defs.h
+++ b/drivers/net/thunderx/base/nicvf_hw_defs.h
@@ -36,6 +36,8 @@
#include <stdint.h>
#include <stdbool.h>
+#include "nicvf_plat.h"
+
/* Virtual function register offsets */
#define NIC_VF_CFG (0x000020)
@@ -213,10 +215,6 @@
typedef uint64_t nicvf_phys_addr_t;
-#ifndef __BYTE_ORDER__
-#error __BYTE_ORDER__ not defined
-#endif
-
/* vNIC HW Enumerations */
enum nic_send_ld_type_e {
@@ -559,7 +557,7 @@ enum nic_stat_vnic_tx_e {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4;
uint64_t stdn_fault:1;
uint64_t rsvd0:1;
@@ -604,7 +602,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t pkt_len:16;
uint64_t l2_ptr:8;
uint64_t l3_ptr:8;
@@ -629,7 +627,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t rss_tag:32;
uint64_t vlan_tci:16;
uint64_t vlan_ptr:8;
@@ -646,7 +644,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint16_t rb3_sz;
uint16_t rb2_sz;
uint16_t rb1_sz;
@@ -663,7 +661,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint16_t rb7_sz;
uint16_t rb6_sz;
uint16_t rb5_sz;
@@ -680,7 +678,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint16_t rb11_sz;
uint16_t rb10_sz;
uint16_t rb9_sz;
@@ -697,7 +695,7 @@ typedef union {
typedef union {
uint64_t u64;
struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t vlan_found:1;
uint64_t vlan_stripped:1;
uint64_t vlan2_found:1;
@@ -742,7 +740,7 @@ struct cqe_rx_t {
};
struct cqe_rx_tcp_err_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:60;
@@ -764,7 +762,7 @@ struct cqe_rx_tcp_err_t {
};
struct cqe_rx_tcp_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:52;
uint64_t cq_tcp_status:8;
@@ -786,7 +784,7 @@ struct cqe_rx_tcp_t {
};
struct cqe_send_t {
-#if defined(__BIG_ENDIAN_BITFIELD)
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4; /* W0 */
uint64_t rsvd0:4;
uint64_t sqe_ptr:16;
@@ -798,7 +796,7 @@ struct cqe_send_t {
uint64_t send_status:8;
uint64_t ptp_timestamp:64; /* W1 */
-#elif defined(__LITTLE_ENDIAN_BITFIELD)
+#elif NICVF_BYTE_ORDER == NICVF_LITTLE_ENDIAN
uint64_t send_status:8;
uint64_t rsvd3:8;
uint64_t sq_idx:3;
@@ -814,7 +812,7 @@ struct cqe_send_t {
};
struct cq_entry_type_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t cqe_type:4;
uint64_t __pad:60;
#else
@@ -835,7 +833,7 @@ union cq_entry_t {
NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512);
struct rbdr_entry_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
union {
struct {
uint64_t rsvd0:15;
@@ -860,7 +858,7 @@ NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t));
/* TCP reassembly context */
struct rbe_tcp_cnxt_t {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t tcp_pkt_cnt:12;
uint64_t rsvd1:4;
uint64_t align_hdr_bytes:4;
@@ -899,7 +897,7 @@ struct rx_hdr_t {
};
struct sq_crc_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t rsvd1:32;
uint64_t crc_ival:32;
uint64_t subdesc_type:4;
@@ -921,7 +919,7 @@ struct sq_crc_subdesc {
};
struct sq_gather_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4; /* W0 */
uint64_t ld_type:2;
uint64_t rsvd0:42;
@@ -942,7 +940,7 @@ struct sq_gather_subdesc {
/* SQ immediate subdescriptor */
struct sq_imm_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4; /* W0 */
uint64_t rsvd0:46;
uint64_t len:14;
@@ -958,7 +956,7 @@ struct sq_imm_subdesc {
};
struct sq_mem_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4; /* W0 */
uint64_t mem_alg:4;
uint64_t mem_dsz:2;
@@ -982,7 +980,7 @@ struct sq_mem_subdesc {
};
struct sq_hdr_subdesc {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t subdesc_type:4;
uint64_t tso:1;
uint64_t post_cqe:1; /* Post CQE on no error also */
@@ -1045,7 +1043,7 @@ NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16);
/* Queue config register formats */
struct rq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_2_63:62;
uint64_t ena:1;
uint64_t reserved_0:1;
@@ -1059,7 +1057,7 @@ struct rq_cfg { union { struct {
}; };
struct cq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_43_63:21;
uint64_t ena:1;
uint64_t reset:1;
@@ -1085,7 +1083,7 @@ struct cq_cfg { union { struct {
}; };
struct sq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_20_63:44;
uint64_t ena:1;
uint64_t reserved_18_18:1;
@@ -1111,7 +1109,7 @@ struct sq_cfg { union { struct {
}; };
struct rbdr_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_45_63:19;
uint64_t ena:1;
uint64_t reset:1;
@@ -1139,7 +1137,7 @@ struct rbdr_cfg { union { struct {
}; };
struct pf_qs_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved_32_63:32;
uint64_t ena:1;
uint64_t reserved_27_30:4;
@@ -1169,7 +1167,7 @@ struct pf_qs_cfg { union { struct {
}; };
struct pf_rq_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t reserved1:1;
uint64_t reserved0:34;
uint64_t strip_pre_l2:1;
@@ -1197,7 +1195,7 @@ struct pf_rq_cfg { union { struct {
}; };
struct pf_rq_drop_cfg { union { struct {
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
uint64_t rbdr_red:1;
uint64_t cq_red:1;
uint64_t reserved3:14;
diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c
index 3b7b8a51..a072f19d 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.c
+++ b/drivers/net/thunderx/base/nicvf_mbox.c
@@ -62,9 +62,6 @@ static const char *mbox_message[NIC_MBOX_MSG_MAX] = {
[NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER",
[NIC_MBOX_MSG_CFG_DONE] = "NIC_MBOX_MSG_CFG_DONE",
[NIC_MBOX_MSG_SHUTDOWN] = "NIC_MBOX_MSG_SHUTDOWN",
- [NIC_MBOX_MSG_RES_BIT] = "NIC_MBOX_MSG_RES_BIT",
- [NIC_MBOX_MSG_RSS_SIZE_RES_BIT] = "NIC_MBOX_MSG_RSS_SIZE",
- [NIC_MBOX_MSG_ALLOC_SQS_RES_BIT] = "NIC_MBOX_MSG_ALLOC_SQS",
};
static inline const char * __attribute__((unused))
@@ -176,7 +173,7 @@ nicvf_handle_mbx_intr(struct nicvf *nic)
case NIC_MBOX_MSG_NACK:
nic->pf_nacked = true;
break;
- case NIC_MBOX_MSG_RSS_SIZE_RES_BIT:
+ case NIC_MBOX_MSG_RSS_SIZE:
nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size;
nic->pf_acked = true;
break;
@@ -186,7 +183,7 @@ nicvf_handle_mbx_intr(struct nicvf *nic)
nic->speed = mbx.link_status.speed;
nic->pf_acked = true;
break;
- case NIC_MBOX_MSG_ALLOC_SQS_RES_BIT:
+ case NIC_MBOX_MSG_ALLOC_SQS:
assert_primary(nic);
if (mbx.sqs_alloc.qs_count != nic->sqs_count) {
nicvf_log_error("Received %" PRIu8 "/%" PRIu8
@@ -331,7 +328,7 @@ nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg)
{
struct nic_mbx mbx = { .msg = { 0 } };
-#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#if NICVF_BYTE_ORDER == NICVF_BIG_ENDIAN
qs_cfg->be = 1;
#endif
/* Send a mailbox msg to PF to config Qset */
diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h
index 084f3a76..8675fe8f 100644
--- a/drivers/net/thunderx/base/nicvf_mbox.h
+++ b/drivers/net/thunderx/base/nicvf_mbox.h
@@ -68,16 +68,10 @@
#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */
-#define NIC_MBOX_MSG_CFG_DONE 0x7E /* VF configuration done */
-#define NIC_MBOX_MSG_SHUTDOWN 0x7F /* VF is being shutdown */
-#define NIC_MBOX_MSG_RES_BIT 0x80 /* Reset bit from PF */
+#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
+#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
#define NIC_MBOX_MSG_MAX 0x100 /* Maximum number of messages */
-#define NIC_MBOX_MSG_RSS_SIZE_RES_BIT \
- (NIC_MBOX_MSG_RSS_SIZE | NIC_MBOX_MSG_RES_BIT)
-#define NIC_MBOX_MSG_ALLOC_SQS_RES_BIT \
- (NIC_MBOX_MSG_ALLOC_SQS | NIC_MBOX_MSG_RES_BIT)
-
/* Get vNIC VF configuration */
struct nic_cfg_msg {
uint8_t msg;
@@ -157,6 +151,7 @@ struct rss_cfg_msg {
/* Physical interface link status */
struct bgx_link_status {
uint8_t msg;
+ uint8_t mac_type;
uint8_t link_up;
uint8_t duplex;
uint32_t speed;
diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h
index 83c1844d..36da1200 100644
--- a/drivers/net/thunderx/base/nicvf_plat.h
+++ b/drivers/net/thunderx/base/nicvf_plat.h
@@ -65,35 +65,23 @@
#define nicvf_cpu_to_be_64(x) rte_cpu_to_be_64(x)
#define nicvf_be_to_cpu_64(x) rte_be_to_cpu_64(x)
+#define NICVF_BYTE_ORDER RTE_BYTE_ORDER
+#define NICVF_BIG_ENDIAN RTE_BIG_ENDIAN
+#define NICVF_LITTLE_ENDIAN RTE_LITTLE_ENDIAN
+
/* Constants */
#include <rte_ether.h>
#define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN
+#include <rte_io.h>
+#define nicvf_addr_write(addr, val) rte_write64_relaxed((val), (void *)(addr))
+#define nicvf_addr_read(addr) rte_read64_relaxed((void *)(addr))
+
/* ARM64 specific functions */
#if defined(RTE_ARCH_ARM64)
#define nicvf_prefetch_store_keep(_ptr) ({\
asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); })
-static inline void __attribute__((always_inline))
-nicvf_addr_write(uintptr_t addr, uint64_t val)
-{
- asm volatile(
- "str %x[val], [%x[addr]]"
- :
- : [val] "r" (val), [addr] "r" (addr));
-}
-
-static inline uint64_t __attribute__((always_inline))
-nicvf_addr_read(uintptr_t addr)
-{
- uint64_t val;
-
- asm volatile(
- "ldr %x[val], [%x[addr]]"
- : [val] "=r" (val)
- : [addr] "r" (addr));
- return val;
-}
#define NICVF_LOAD_PAIR(reg1, reg2, addr) ({ \
asm volatile( \
@@ -106,18 +94,6 @@ nicvf_addr_read(uintptr_t addr)
#define nicvf_prefetch_store_keep(_ptr) do {} while (0)
-static inline void __attribute__((always_inline))
-nicvf_addr_write(uintptr_t addr, uint64_t val)
-{
- *(volatile uint64_t *)addr = val;
-}
-
-static inline uint64_t __attribute__((always_inline))
-nicvf_addr_read(uintptr_t addr)
-{
- return *(volatile uint64_t *)addr;
-}
-
#define NICVF_LOAD_PAIR(reg1, reg2, addr) \
do { \
reg1 = nicvf_addr_read((uintptr_t)addr); \
diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c
index 466e49ce..e4910c9b 100644
--- a/drivers/net/thunderx/nicvf_ethdev.c
+++ b/drivers/net/thunderx/nicvf_ethdev.c
@@ -41,7 +41,6 @@
#include <inttypes.h>
#include <netinet/in.h>
#include <sys/queue.h>
-#include <sys/timerfd.h>
#include <rte_alarm.h>
#include <rte_atomic.h>
@@ -54,6 +53,7 @@
#include <rte_eal.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
+#include <rte_ethdev_pci.h>
#include <rte_interrupts.h>
#include <rte_log.h>
#include <rte_memory.h>
@@ -145,16 +145,29 @@ nicvf_periodic_alarm_stop(void (fn)(void *), void *arg)
* Return 0 means link status changed, -1 means not changed
*/
static int
-nicvf_dev_link_update(struct rte_eth_dev *dev,
- int wait_to_complete __rte_unused)
+nicvf_dev_link_update(struct rte_eth_dev *dev, int wait_to_complete)
{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
struct rte_eth_link link;
struct nicvf *nic = nicvf_pmd_priv(dev);
+ int i;
PMD_INIT_FUNC_TRACE();
- memset(&link, 0, sizeof(link));
- nicvf_set_eth_link_status(nic, &link);
+ if (wait_to_complete) {
+ /* rte_eth_link_get() might need to wait up to 9 seconds */
+ for (i = 0; i < MAX_CHECK_TIME; i++) {
+ memset(&link, 0, sizeof(link));
+ nicvf_set_eth_link_status(nic, &link);
+ if (link.link_status)
+ break;
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+ } else {
+ memset(&link, 0, sizeof(link));
+ nicvf_set_eth_link_status(nic, &link);
+ }
return nicvf_atomic_write_link_status(dev, &link);
}
@@ -245,7 +258,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per RX ring stats */
for (qidx = rx_start; qidx <= rx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx);
@@ -258,7 +271,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per TX ring stats */
for (qidx = tx_start; qidx <= tx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx);
@@ -277,7 +290,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
/* Reading per RX ring stats */
for (qidx = rx_start; qidx <= rx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_rx_qstats(snic, &rx_qstats,
@@ -290,7 +303,7 @@ nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats)
nicvf_tx_range(dev, snic, &tx_start, &tx_end);
/* Reading per TX ring stats */
for (qidx = tx_start; qidx <= tx_end; qidx++) {
- if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS)
+ if (qidx >= RTE_ETHDEV_QUEUE_STAT_CNTRS)
break;
nicvf_hw_get_tx_qstats(snic, &tx_qstats,
@@ -1219,6 +1232,23 @@ nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx)
return nicvf_vf_stop_tx_queue(dev, nic, qidx);
}
+static inline void
+nicvf_rxq_mbuf_setup(struct nicvf_rxq *rxq)
+{
+ uintptr_t p;
+ struct rte_mbuf mb_def;
+
+ RTE_BUILD_BUG_ON(sizeof(union mbuf_initializer) != 8);
+ mb_def.nb_segs = 1;
+ mb_def.data_off = RTE_PKTMBUF_HEADROOM;
+ mb_def.port = rxq->port_id;
+ rte_mbuf_refcnt_set(&mb_def, 1);
+
+ /* Prevent compiler reordering: rearm_data covers previous fields */
+ rte_compiler_barrier();
+ p = (uintptr_t)&mb_def.rearm_data;
+ rxq->mbuf_initializer.value = *(uint64_t *)p;
+}
static int
nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
@@ -1311,6 +1341,7 @@ nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx,
else
rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD;
+ nicvf_rxq_mbuf_setup(rxq);
/* Alloc completion queue */
if (nicvf_qset_cq_alloc(dev, nic, rxq, rxq->queue_id, nb_desc)) {
@@ -1335,9 +1366,12 @@ static void
nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
{
struct nicvf *nic = nicvf_pmd_priv(dev);
+ struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
PMD_INIT_FUNC_TRACE();
+ dev_info->pci_dev = RTE_DEV_TO_PCI(dev->device);
+
dev_info->min_rx_bufsize = ETHER_MIN_MTU;
dev_info->max_rx_pktlen = NIC_HW_MAX_FRS;
dev_info->max_rx_queues =
@@ -1345,7 +1379,7 @@ nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
dev_info->max_tx_queues =
(uint16_t)MAX_SND_QUEUES_PER_QS * (MAX_SQS_PER_VF + 1);
dev_info->max_mac_addrs = 1;
- dev_info->max_vfs = dev->pci_dev->max_vfs;
+ dev_info->max_vfs = pci_dev->max_vfs;
dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP;
dev_info->tx_offload_capa =
@@ -1407,7 +1441,7 @@ static int
nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
{
int ret;
- uint16_t qidx;
+ uint16_t qidx, data_off;
uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs;
uint64_t mbuf_phys_off = 0;
struct nicvf_rxq *rxq;
@@ -1448,10 +1482,18 @@ nicvf_vf_start(struct rte_eth_dev *dev, struct nicvf *nic, uint32_t rbdrsz)
nic->vf_id, qidx, rxq->pool->name);
return -ENOMEM;
}
- rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf);
- rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM;
+ data_off = nicvf_mbuff_meta_length(mbuf);
+ data_off += RTE_PKTMBUF_HEADROOM;
rte_pktmbuf_free(mbuf);
+ if (data_off % RTE_CACHE_LINE_SIZE) {
+ PMD_INIT_LOG(ERR, "%s: unaligned data_off=%d delta=%d",
+ rxq->pool->name, data_off,
+ data_off % RTE_CACHE_LINE_SIZE);
+ return -EINVAL;
+ }
+ rxq->mbuf_phys_off -= data_off;
+
if (mbuf_phys_off == 0)
mbuf_phys_off = rxq->mbuf_phys_off;
if (mbuf_phys_off != rxq->mbuf_phys_off) {
@@ -1975,7 +2017,7 @@ nicvf_eth_dev_init(struct rte_eth_dev *eth_dev)
}
}
- pci_dev = eth_dev->pci_dev;
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
rte_eth_copy_pci_info(eth_dev, pci_dev);
nic->device_id = pci_dev->id.device_id;
@@ -2108,16 +2150,25 @@ static const struct rte_pci_id pci_id_nicvf_map[] = {
},
};
-static struct eth_driver rte_nicvf_pmd = {
- .pci_drv = {
- .id_table = pci_id_nicvf_map,
- .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
- .probe = rte_eth_dev_pci_probe,
- .remove = rte_eth_dev_pci_remove,
- },
- .eth_dev_init = nicvf_eth_dev_init,
- .dev_private_size = sizeof(struct nicvf),
+static int nicvf_eth_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct nicvf),
+ nicvf_eth_dev_init);
+}
+
+static int nicvf_eth_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
+}
+
+static struct rte_pci_driver rte_nicvf_pmd = {
+ .id_table = pci_id_nicvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC,
+ .probe = nicvf_eth_pci_probe,
+ .remove = nicvf_eth_pci_remove,
};
-RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd.pci_drv);
+RTE_PMD_REGISTER_PCI(net_thunderx, rte_nicvf_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_thunderx, pci_id_nicvf_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_thunderx, "* igb_uio | uio_pci_generic | vfio");
diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c
index fc43b747..6cae8341 100644
--- a/drivers/net/thunderx/nicvf_rxtx.c
+++ b/drivers/net/thunderx/nicvf_rxtx.c
@@ -430,9 +430,9 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
union cq_entry_t *desc = rxq->desc;
const uint64_t cqe_mask = rxq->qlen_mask;
uint64_t rb0_ptr, mbuf_phys_off = rxq->mbuf_phys_off;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
uint32_t cqe_head = rxq->head & cqe_mask;
int32_t available_space = rxq->available_space;
- uint8_t port_id = rxq->port_id;
const uint8_t rbptr_offset = rxq->rbptr_offset;
to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space);
@@ -448,17 +448,12 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset);
pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt
(rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off);
-
pkt->ol_flags = 0;
- pkt->port = port_id;
pkt->data_len = cqe_rx_w3.rb0_sz;
- pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
- pkt->nb_segs = 1;
pkt->pkt_len = cqe_rx_w3.rb0_sz;
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
-
+ nicvf_mbuff_init_update(pkt, mbuf_init, cqe_rx_w1.align_pad);
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
- rte_mbuf_refcnt_set(pkt, 1);
rx_pkts[i] = pkt;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(pkt);
@@ -469,11 +464,10 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += to_process;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -= nicvf_fill_rbdr(rxq,
- rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;
@@ -481,8 +475,9 @@ nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts)
static inline uint16_t __hot
nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
- uint64_t mbuf_phys_off, uint8_t port_id,
- struct rte_mbuf **rx_pkt, uint8_t rbptr_offset)
+ uint64_t mbuf_phys_off,
+ struct rte_mbuf **rx_pkt, uint8_t rbptr_offset,
+ uint64_t mbuf_init)
{
struct rte_mbuf *pkt, *seg, *prev;
cqe_rx_word0_t cqe_rx_w0;
@@ -501,12 +496,10 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
(rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off);
pkt->ol_flags = 0;
- pkt->port = port_id;
- pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad;
- pkt->nb_segs = nb_segs;
pkt->pkt_len = cqe_rx_w1.pkt_len;
pkt->data_len = rb_sz[nicvf_frag_num(0)];
- rte_mbuf_refcnt_set(pkt, 1);
+ nicvf_mbuff_init_mseg_update(
+ pkt, mbuf_init, cqe_rx_w1.align_pad, nb_segs);
pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0);
nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt);
@@ -518,9 +511,7 @@ nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx,
prev->next = seg;
seg->data_len = rb_sz[nicvf_frag_num(seg_idx)];
- seg->port = port_id;
- seg->data_off = RTE_PKTMBUF_HEADROOM;
- rte_mbuf_refcnt_set(seg, 1);
+ nicvf_mbuff_init_update(seg, mbuf_init, 0);
prev = seg;
}
@@ -541,7 +532,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
uint32_t i, to_process, cqe_head, buffers_consumed = 0;
int32_t available_space = rxq->available_space;
uint16_t nb_segs;
- const uint8_t port_id = rxq->port_id;
+ const uint64_t mbuf_init = rxq->mbuf_initializer.value;
const uint8_t rbptr_offset = rxq->rbptr_offset;
cqe_head = rxq->head & cqe_mask;
@@ -552,7 +543,7 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
cq_entry = &desc[cqe_head];
cqe_rx = (struct cqe_rx_t *)cq_entry;
nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off,
- port_id, rx_pkts + i, rbptr_offset);
+ rx_pkts + i, rbptr_offset, mbuf_init);
buffers_consumed += nb_segs;
cqe_head = (cqe_head + 1) & cqe_mask;
nicvf_prefetch_store_keep(rx_pkts[i]);
@@ -563,11 +554,10 @@ nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts,
rxq->head = cqe_head;
nicvf_addr_write(rxq->cq_door, to_process);
rxq->recv_buffers += buffers_consumed;
- if (rxq->recv_buffers > rxq->rx_free_thresh) {
- rxq->recv_buffers -=
- nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
- NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
- }
+ }
+ if (rxq->recv_buffers > rxq->rx_free_thresh) {
+ rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rxq->rx_free_thresh);
+ NICVF_RX_ASSERT(rxq->recv_buffers >= 0);
}
return to_process;
diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h
index 9dad8a5a..3631ff22 100644
--- a/drivers/net/thunderx/nicvf_rxtx.h
+++ b/drivers/net/thunderx/nicvf_rxtx.h
@@ -84,6 +84,33 @@ fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt)
}
#endif
+static inline void
+nicvf_mbuff_init_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
+ uint16_t apad)
+{
+ union mbuf_initializer init = {.value = mbuf_init};
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ init.fields.data_off += apad;
+#else
+ init.value += apad;
+#endif
+ *(uint64_t *)(&pkt->rearm_data) = init.value;
+}
+
+static inline void
+nicvf_mbuff_init_mseg_update(struct rte_mbuf *pkt, const uint64_t mbuf_init,
+ uint16_t apad, uint16_t nb_segs)
+{
+ union mbuf_initializer init = {.value = mbuf_init};
+#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN
+ init.fields.data_off += apad;
+#else
+ init.value += apad;
+#endif
+ init.fields.nb_segs = nb_segs;
+ *(uint64_t *)(&pkt->rearm_data) = init.value;
+}
+
uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx);
uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx);
diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h
index c900e121..34c41b79 100644
--- a/drivers/net/thunderx/nicvf_struct.h
+++ b/drivers/net/thunderx/nicvf_struct.h
@@ -43,8 +43,8 @@
#include <rte_memory.h>
struct nicvf_rbdr {
- uint64_t rbdr_status;
- uint64_t rbdr_door;
+ uintptr_t rbdr_status;
+ uintptr_t rbdr_door;
struct rbdr_entry_t *desc;
nicvf_phys_addr_t phys;
uint32_t buffsz;
@@ -58,8 +58,8 @@ struct nicvf_txq {
union sq_entry_t *desc;
nicvf_phys_addr_t phys;
struct rte_mbuf **txbuffs;
- uint64_t sq_head;
- uint64_t sq_door;
+ uintptr_t sq_head;
+ uintptr_t sq_door;
struct rte_mempool *pool;
struct nicvf *nic;
void (*pool_free)(struct nicvf_txq *sq);
@@ -72,10 +72,21 @@ struct nicvf_txq {
uint16_t tx_free_thresh;
} __rte_cache_aligned;
+union mbuf_initializer {
+ struct {
+ uint16_t data_off;
+ uint16_t refcnt;
+ uint16_t nb_segs;
+ uint16_t port;
+ } fields;
+ uint64_t value;
+};
+
struct nicvf_rxq {
uint64_t mbuf_phys_off;
- uint64_t cq_status;
- uint64_t cq_door;
+ uintptr_t cq_status;
+ uintptr_t cq_door;
+ union mbuf_initializer mbuf_initializer;
nicvf_phys_addr_t phys;
union cq_entry_t *desc;
struct nicvf_rbdr *shared_rbdr;