summaryrefslogtreecommitdiffstats
path: root/drivers/net/axgbe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/axgbe')
-rw-r--r--drivers/net/axgbe/Makefile35
-rw-r--r--drivers/net/axgbe/axgbe_common.h1710
-rw-r--r--drivers/net/axgbe/axgbe_dev.c1103
-rw-r--r--drivers/net/axgbe/axgbe_ethdev.c770
-rw-r--r--drivers/net/axgbe/axgbe_ethdev.h586
-rw-r--r--drivers/net/axgbe/axgbe_i2c.c331
-rw-r--r--drivers/net/axgbe/axgbe_logs.h26
-rw-r--r--drivers/net/axgbe/axgbe_mdio.c1066
-rw-r--r--drivers/net/axgbe/axgbe_phy.h192
-rw-r--r--drivers/net/axgbe/axgbe_phy_impl.c2191
-rw-r--r--drivers/net/axgbe/axgbe_rxtx.c674
-rw-r--r--drivers/net/axgbe/axgbe_rxtx.h186
-rw-r--r--drivers/net/axgbe/axgbe_rxtx_vec_sse.c93
-rw-r--r--drivers/net/axgbe/meson.build19
-rw-r--r--drivers/net/axgbe/rte_pmd_axgbe_version.map4
15 files changed, 8986 insertions, 0 deletions
diff --git a/drivers/net/axgbe/Makefile b/drivers/net/axgbe/Makefile
new file mode 100644
index 00000000..72215aed
--- /dev/null
+++ b/drivers/net/axgbe/Makefile
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_pmd_axgbe.a
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+EXPORT_MAP := rte_pmd_axgbe_version.map
+
+LIBABIVER := 1
+
+LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool
+LDLIBS += -lrte_pci -lrte_bus_pci
+LDLIBS += -lrte_ethdev
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_ethdev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_dev.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_mdio.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_phy_impl.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_i2c.c
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx.c
+ifeq ($(CONFIG_RTE_ARCH_X86),y)
+SRCS-$(CONFIG_RTE_LIBRTE_AXGBE_PMD) += axgbe_rxtx_vec_sse.c
+endif
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/net/axgbe/axgbe_common.h b/drivers/net/axgbe/axgbe_common.h
new file mode 100644
index 00000000..d25d54ca
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_common.h
@@ -0,0 +1,1710 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef __AXGBE_COMMON_H__
+#define __AXGBE_COMMON_H__
+
+#include "axgbe_logs.h"
+
+#include <stdbool.h>
+#include <limits.h>
+#include <sys/queue.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <inttypes.h>
+#include <pthread.h>
+
+#include <rte_byteorder.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_hexdump.h>
+#include <rte_log.h>
+#include <rte_debug.h>
+#include <rte_branch_prediction.h>
+#include <rte_eal.h>
+#include <rte_memzone.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_dev.h>
+#include <rte_errno.h>
+#include <rte_ethdev_pci.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+
+#define BIT(nr) (1 << (nr))
+#ifndef ARRAY_SIZE
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
+#endif
+
+#define AXGBE_HZ 250
+
+/* DMA register offsets */
+#define DMA_MR 0x3000
+#define DMA_SBMR 0x3004
+#define DMA_ISR 0x3008
+#define DMA_AXIARCR 0x3010
+#define DMA_AXIAWCR 0x3018
+#define DMA_AXIAWRCR 0x301c
+#define DMA_DSR0 0x3020
+#define DMA_DSR1 0x3024
+#define EDMA_TX_CONTROL 0x3040
+#define EDMA_RX_CONTROL 0x3044
+
+/* DMA register entry bit positions and sizes */
+#define DMA_AXIARCR_DRC_INDEX 0
+#define DMA_AXIARCR_DRC_WIDTH 4
+#define DMA_AXIARCR_DRD_INDEX 4
+#define DMA_AXIARCR_DRD_WIDTH 2
+#define DMA_AXIARCR_TEC_INDEX 8
+#define DMA_AXIARCR_TEC_WIDTH 4
+#define DMA_AXIARCR_TED_INDEX 12
+#define DMA_AXIARCR_TED_WIDTH 2
+#define DMA_AXIARCR_THC_INDEX 16
+#define DMA_AXIARCR_THC_WIDTH 4
+#define DMA_AXIARCR_THD_INDEX 20
+#define DMA_AXIARCR_THD_WIDTH 2
+#define DMA_AXIAWCR_DWC_INDEX 0
+#define DMA_AXIAWCR_DWC_WIDTH 4
+#define DMA_AXIAWCR_DWD_INDEX 4
+#define DMA_AXIAWCR_DWD_WIDTH 2
+#define DMA_AXIAWCR_RPC_INDEX 8
+#define DMA_AXIAWCR_RPC_WIDTH 4
+#define DMA_AXIAWCR_RPD_INDEX 12
+#define DMA_AXIAWCR_RPD_WIDTH 2
+#define DMA_AXIAWCR_RHC_INDEX 16
+#define DMA_AXIAWCR_RHC_WIDTH 4
+#define DMA_AXIAWCR_RHD_INDEX 20
+#define DMA_AXIAWCR_RHD_WIDTH 2
+#define DMA_AXIAWCR_RDC_INDEX 24
+#define DMA_AXIAWCR_RDC_WIDTH 4
+#define DMA_AXIAWCR_RDD_INDEX 28
+#define DMA_AXIAWCR_RDD_WIDTH 2
+#define DMA_AXIAWRCR_TDWC_INDEX 0
+#define DMA_AXIAWRCR_TDWC_WIDTH 4
+#define DMA_AXIAWRCR_TDWD_INDEX 4
+#define DMA_AXIAWRCR_TDWD_WIDTH 4
+#define DMA_AXIAWRCR_RDRC_INDEX 8
+#define DMA_AXIAWRCR_RDRC_WIDTH 4
+#define DMA_ISR_MACIS_INDEX 17
+#define DMA_ISR_MACIS_WIDTH 1
+#define DMA_ISR_MTLIS_INDEX 16
+#define DMA_ISR_MTLIS_WIDTH 1
+#define DMA_MR_INTM_INDEX 12
+#define DMA_MR_INTM_WIDTH 2
+#define DMA_MR_SWR_INDEX 0
+#define DMA_MR_SWR_WIDTH 1
+#define DMA_SBMR_WR_OSR_INDEX 24
+#define DMA_SBMR_WR_OSR_WIDTH 6
+#define DMA_SBMR_RD_OSR_INDEX 16
+#define DMA_SBMR_RD_OSR_WIDTH 6
+#define DMA_SBMR_AAL_INDEX 12
+#define DMA_SBMR_AAL_WIDTH 1
+#define DMA_SBMR_EAME_INDEX 11
+#define DMA_SBMR_EAME_WIDTH 1
+#define DMA_SBMR_BLEN_256_INDEX 7
+#define DMA_SBMR_BLEN_256_WIDTH 1
+#define DMA_SBMR_BLEN_32_INDEX 4
+#define DMA_SBMR_BLEN_32_WIDTH 1
+#define DMA_SBMR_UNDEF_INDEX 0
+#define DMA_SBMR_UNDEF_WIDTH 1
+
+/* DMA register values */
+#define DMA_DSR_RPS_WIDTH 4
+#define DMA_DSR_TPS_WIDTH 4
+#define DMA_DSR_Q_WIDTH (DMA_DSR_RPS_WIDTH + DMA_DSR_TPS_WIDTH)
+#define DMA_DSR0_RPS_START 8
+#define DMA_DSR0_TPS_START 12
+#define DMA_DSRX_FIRST_QUEUE 3
+#define DMA_DSRX_INC 4
+#define DMA_DSRX_QPR 4
+#define DMA_DSRX_RPS_START 0
+#define DMA_DSRX_TPS_START 4
+#define DMA_TPS_STOPPED 0x00
+#define DMA_TPS_SUSPENDED 0x06
+
+/* DMA channel register offsets
+ * Multiple channels can be active. The first channel has registers
+ * that begin at 0x3100. Each subsequent channel has registers that
+ * are accessed using an offset of 0x80 from the previous channel.
+ */
+#define DMA_CH_BASE 0x3100
+#define DMA_CH_INC 0x80
+
+#define DMA_CH_CR 0x00
+#define DMA_CH_TCR 0x04
+#define DMA_CH_RCR 0x08
+#define DMA_CH_TDLR_HI 0x10
+#define DMA_CH_TDLR_LO 0x14
+#define DMA_CH_RDLR_HI 0x18
+#define DMA_CH_RDLR_LO 0x1c
+#define DMA_CH_TDTR_LO 0x24
+#define DMA_CH_RDTR_LO 0x2c
+#define DMA_CH_TDRLR 0x30
+#define DMA_CH_RDRLR 0x34
+#define DMA_CH_IER 0x38
+#define DMA_CH_RIWT 0x3c
+#define DMA_CH_CATDR_LO 0x44
+#define DMA_CH_CARDR_LO 0x4c
+#define DMA_CH_CATBR_HI 0x50
+#define DMA_CH_CATBR_LO 0x54
+#define DMA_CH_CARBR_HI 0x58
+#define DMA_CH_CARBR_LO 0x5c
+#define DMA_CH_SR 0x60
+
+/* DMA channel register entry bit positions and sizes */
+#define DMA_CH_CR_PBLX8_INDEX 16
+#define DMA_CH_CR_PBLX8_WIDTH 1
+#define DMA_CH_CR_SPH_INDEX 24
+#define DMA_CH_CR_SPH_WIDTH 1
+#define DMA_CH_IER_AIE_INDEX 14
+#define DMA_CH_IER_AIE_WIDTH 1
+#define DMA_CH_IER_FBEE_INDEX 12
+#define DMA_CH_IER_FBEE_WIDTH 1
+#define DMA_CH_IER_NIE_INDEX 15
+#define DMA_CH_IER_NIE_WIDTH 1
+#define DMA_CH_IER_RBUE_INDEX 7
+#define DMA_CH_IER_RBUE_WIDTH 1
+#define DMA_CH_IER_RIE_INDEX 6
+#define DMA_CH_IER_RIE_WIDTH 1
+#define DMA_CH_IER_RSE_INDEX 8
+#define DMA_CH_IER_RSE_WIDTH 1
+#define DMA_CH_IER_TBUE_INDEX 2
+#define DMA_CH_IER_TBUE_WIDTH 1
+#define DMA_CH_IER_TIE_INDEX 0
+#define DMA_CH_IER_TIE_WIDTH 1
+#define DMA_CH_IER_TXSE_INDEX 1
+#define DMA_CH_IER_TXSE_WIDTH 1
+#define DMA_CH_RCR_PBL_INDEX 16
+#define DMA_CH_RCR_PBL_WIDTH 6
+#define DMA_CH_RCR_RBSZ_INDEX 1
+#define DMA_CH_RCR_RBSZ_WIDTH 14
+#define DMA_CH_RCR_SR_INDEX 0
+#define DMA_CH_RCR_SR_WIDTH 1
+#define DMA_CH_RIWT_RWT_INDEX 0
+#define DMA_CH_RIWT_RWT_WIDTH 8
+#define DMA_CH_SR_FBE_INDEX 12
+#define DMA_CH_SR_FBE_WIDTH 1
+#define DMA_CH_SR_RBU_INDEX 7
+#define DMA_CH_SR_RBU_WIDTH 1
+#define DMA_CH_SR_RI_INDEX 6
+#define DMA_CH_SR_RI_WIDTH 1
+#define DMA_CH_SR_RPS_INDEX 8
+#define DMA_CH_SR_RPS_WIDTH 1
+#define DMA_CH_SR_TBU_INDEX 2
+#define DMA_CH_SR_TBU_WIDTH 1
+#define DMA_CH_SR_TI_INDEX 0
+#define DMA_CH_SR_TI_WIDTH 1
+#define DMA_CH_SR_TPS_INDEX 1
+#define DMA_CH_SR_TPS_WIDTH 1
+#define DMA_CH_TCR_OSP_INDEX 4
+#define DMA_CH_TCR_OSP_WIDTH 1
+#define DMA_CH_TCR_PBL_INDEX 16
+#define DMA_CH_TCR_PBL_WIDTH 6
+#define DMA_CH_TCR_ST_INDEX 0
+#define DMA_CH_TCR_ST_WIDTH 1
+#define DMA_CH_TCR_TSE_INDEX 12
+#define DMA_CH_TCR_TSE_WIDTH 1
+
+/* DMA channel register values */
+#define DMA_OSP_DISABLE 0x00
+#define DMA_OSP_ENABLE 0x01
+#define DMA_PBL_1 1
+#define DMA_PBL_2 2
+#define DMA_PBL_4 4
+#define DMA_PBL_8 8
+#define DMA_PBL_16 16
+#define DMA_PBL_32 32
+#define DMA_PBL_64 64 /* 8 x 8 */
+#define DMA_PBL_128 128 /* 8 x 16 */
+#define DMA_PBL_256 256 /* 8 x 32 */
+#define DMA_PBL_X8_DISABLE 0x00
+#define DMA_PBL_X8_ENABLE 0x01
+
+/* MAC register offsets */
+#define MAC_TCR 0x0000
+#define MAC_RCR 0x0004
+#define MAC_PFR 0x0008
+#define MAC_WTR 0x000c
+#define MAC_HTR0 0x0010
+#define MAC_VLANTR 0x0050
+#define MAC_VLANHTR 0x0058
+#define MAC_VLANIR 0x0060
+#define MAC_IVLANIR 0x0064
+#define MAC_RETMR 0x006c
+#define MAC_Q0TFCR 0x0070
+#define MAC_RFCR 0x0090
+#define MAC_RQC0R 0x00a0
+#define MAC_RQC1R 0x00a4
+#define MAC_RQC2R 0x00a8
+#define MAC_RQC3R 0x00ac
+#define MAC_ISR 0x00b0
+#define MAC_IER 0x00b4
+#define MAC_RTSR 0x00b8
+#define MAC_PMTCSR 0x00c0
+#define MAC_RWKPFR 0x00c4
+#define MAC_LPICSR 0x00d0
+#define MAC_LPITCR 0x00d4
+#define MAC_VR 0x0110
+#define MAC_DR 0x0114
+#define MAC_HWF0R 0x011c
+#define MAC_HWF1R 0x0120
+#define MAC_HWF2R 0x0124
+#define MAC_MDIOSCAR 0x0200
+#define MAC_MDIOSCCDR 0x0204
+#define MAC_MDIOISR 0x0214
+#define MAC_MDIOIER 0x0218
+#define MAC_MDIOCL22R 0x0220
+#define MAC_GPIOCR 0x0278
+#define MAC_GPIOSR 0x027c
+#define MAC_MACA0HR 0x0300
+#define MAC_MACA0LR 0x0304
+#define MAC_MACA1HR 0x0308
+#define MAC_MACA1LR 0x030c
+#define MAC_RSSCR 0x0c80
+#define MAC_RSSAR 0x0c88
+#define MAC_RSSDR 0x0c8c
+#define MAC_TSCR 0x0d00
+#define MAC_SSIR 0x0d04
+#define MAC_STSR 0x0d08
+#define MAC_STNR 0x0d0c
+#define MAC_STSUR 0x0d10
+#define MAC_STNUR 0x0d14
+#define MAC_TSAR 0x0d18
+#define MAC_TSSR 0x0d20
+#define MAC_TXSNR 0x0d30
+#define MAC_TXSSR 0x0d34
+
+#define MAC_QTFCR_INC 4
+#define MAC_MACA_INC 4
+#define MAC_HTR_INC 4
+
+#define MAC_RQC2_INC 4
+#define MAC_RQC2_Q_PER_REG 4
+
+/* MAC register entry bit positions and sizes */
+#define MAC_HWF0R_ADDMACADRSEL_INDEX 18
+#define MAC_HWF0R_ADDMACADRSEL_WIDTH 5
+#define MAC_HWF0R_ARPOFFSEL_INDEX 9
+#define MAC_HWF0R_ARPOFFSEL_WIDTH 1
+#define MAC_HWF0R_EEESEL_INDEX 13
+#define MAC_HWF0R_EEESEL_WIDTH 1
+#define MAC_HWF0R_GMIISEL_INDEX 1
+#define MAC_HWF0R_GMIISEL_WIDTH 1
+#define MAC_HWF0R_MGKSEL_INDEX 7
+#define MAC_HWF0R_MGKSEL_WIDTH 1
+#define MAC_HWF0R_MMCSEL_INDEX 8
+#define MAC_HWF0R_MMCSEL_WIDTH 1
+#define MAC_HWF0R_RWKSEL_INDEX 6
+#define MAC_HWF0R_RWKSEL_WIDTH 1
+#define MAC_HWF0R_RXCOESEL_INDEX 16
+#define MAC_HWF0R_RXCOESEL_WIDTH 1
+#define MAC_HWF0R_SAVLANINS_INDEX 27
+#define MAC_HWF0R_SAVLANINS_WIDTH 1
+#define MAC_HWF0R_SMASEL_INDEX 5
+#define MAC_HWF0R_SMASEL_WIDTH 1
+#define MAC_HWF0R_TSSEL_INDEX 12
+#define MAC_HWF0R_TSSEL_WIDTH 1
+#define MAC_HWF0R_TSSTSSEL_INDEX 25
+#define MAC_HWF0R_TSSTSSEL_WIDTH 2
+#define MAC_HWF0R_TXCOESEL_INDEX 14
+#define MAC_HWF0R_TXCOESEL_WIDTH 1
+#define MAC_HWF0R_VLHASH_INDEX 4
+#define MAC_HWF0R_VLHASH_WIDTH 1
+#define MAC_HWF1R_ADDR64_INDEX 14
+#define MAC_HWF1R_ADDR64_WIDTH 2
+#define MAC_HWF1R_ADVTHWORD_INDEX 13
+#define MAC_HWF1R_ADVTHWORD_WIDTH 1
+#define MAC_HWF1R_DBGMEMA_INDEX 19
+#define MAC_HWF1R_DBGMEMA_WIDTH 1
+#define MAC_HWF1R_DCBEN_INDEX 16
+#define MAC_HWF1R_DCBEN_WIDTH 1
+#define MAC_HWF1R_HASHTBLSZ_INDEX 24
+#define MAC_HWF1R_HASHTBLSZ_WIDTH 3
+#define MAC_HWF1R_L3L4FNUM_INDEX 27
+#define MAC_HWF1R_L3L4FNUM_WIDTH 4
+#define MAC_HWF1R_NUMTC_INDEX 21
+#define MAC_HWF1R_NUMTC_WIDTH 3
+#define MAC_HWF1R_RSSEN_INDEX 20
+#define MAC_HWF1R_RSSEN_WIDTH 1
+#define MAC_HWF1R_RXFIFOSIZE_INDEX 0
+#define MAC_HWF1R_RXFIFOSIZE_WIDTH 5
+#define MAC_HWF1R_SPHEN_INDEX 17
+#define MAC_HWF1R_SPHEN_WIDTH 1
+#define MAC_HWF1R_TSOEN_INDEX 18
+#define MAC_HWF1R_TSOEN_WIDTH 1
+#define MAC_HWF1R_TXFIFOSIZE_INDEX 6
+#define MAC_HWF1R_TXFIFOSIZE_WIDTH 5
+#define MAC_HWF2R_AUXSNAPNUM_INDEX 28
+#define MAC_HWF2R_AUXSNAPNUM_WIDTH 3
+#define MAC_HWF2R_PPSOUTNUM_INDEX 24
+#define MAC_HWF2R_PPSOUTNUM_WIDTH 3
+#define MAC_HWF2R_RXCHCNT_INDEX 12
+#define MAC_HWF2R_RXCHCNT_WIDTH 4
+#define MAC_HWF2R_RXQCNT_INDEX 0
+#define MAC_HWF2R_RXQCNT_WIDTH 4
+#define MAC_HWF2R_TXCHCNT_INDEX 18
+#define MAC_HWF2R_TXCHCNT_WIDTH 4
+#define MAC_HWF2R_TXQCNT_INDEX 6
+#define MAC_HWF2R_TXQCNT_WIDTH 4
+#define MAC_IER_TSIE_INDEX 12
+#define MAC_IER_TSIE_WIDTH 1
+#define MAC_ISR_MMCRXIS_INDEX 9
+#define MAC_ISR_MMCRXIS_WIDTH 1
+#define MAC_ISR_MMCTXIS_INDEX 10
+#define MAC_ISR_MMCTXIS_WIDTH 1
+#define MAC_ISR_PMTIS_INDEX 4
+#define MAC_ISR_PMTIS_WIDTH 1
+#define MAC_ISR_SMI_INDEX 1
+#define MAC_ISR_SMI_WIDTH 1
+#define MAC_ISR_LSI_INDEX 0
+#define MAC_ISR_LSI_WIDTH 1
+#define MAC_ISR_LS_INDEX 24
+#define MAC_ISR_LS_WIDTH 2
+#define MAC_ISR_TSIS_INDEX 12
+#define MAC_ISR_TSIS_WIDTH 1
+#define MAC_MACA1HR_AE_INDEX 31
+#define MAC_MACA1HR_AE_WIDTH 1
+#define MAC_MDIOIER_SNGLCOMPIE_INDEX 12
+#define MAC_MDIOIER_SNGLCOMPIE_WIDTH 1
+#define MAC_MDIOISR_SNGLCOMPINT_INDEX 12
+#define MAC_MDIOISR_SNGLCOMPINT_WIDTH 1
+#define MAC_MDIOSCAR_DA_INDEX 21
+#define MAC_MDIOSCAR_DA_WIDTH 5
+#define MAC_MDIOSCAR_PA_INDEX 16
+#define MAC_MDIOSCAR_PA_WIDTH 5
+#define MAC_MDIOSCAR_RA_INDEX 0
+#define MAC_MDIOSCAR_RA_WIDTH 16
+#define MAC_MDIOSCAR_REG_INDEX 0
+#define MAC_MDIOSCAR_REG_WIDTH 21
+#define MAC_MDIOSCCDR_BUSY_INDEX 22
+#define MAC_MDIOSCCDR_BUSY_WIDTH 1
+#define MAC_MDIOSCCDR_CMD_INDEX 16
+#define MAC_MDIOSCCDR_CMD_WIDTH 2
+#define MAC_MDIOSCCDR_CR_INDEX 19
+#define MAC_MDIOSCCDR_CR_WIDTH 3
+#define MAC_MDIOSCCDR_DATA_INDEX 0
+#define MAC_MDIOSCCDR_DATA_WIDTH 16
+#define MAC_MDIOSCCDR_SADDR_INDEX 18
+#define MAC_MDIOSCCDR_SADDR_WIDTH 1
+#define MAC_PFR_HMC_INDEX 2
+#define MAC_PFR_HMC_WIDTH 1
+#define MAC_PFR_HPF_INDEX 10
+#define MAC_PFR_HPF_WIDTH 1
+#define MAC_PFR_HUC_INDEX 1
+#define MAC_PFR_HUC_WIDTH 1
+#define MAC_PFR_PM_INDEX 4
+#define MAC_PFR_PM_WIDTH 1
+#define MAC_PFR_PR_INDEX 0
+#define MAC_PFR_PR_WIDTH 1
+#define MAC_PFR_VTFE_INDEX 16
+#define MAC_PFR_VTFE_WIDTH 1
+#define MAC_PMTCSR_MGKPKTEN_INDEX 1
+#define MAC_PMTCSR_MGKPKTEN_WIDTH 1
+#define MAC_PMTCSR_PWRDWN_INDEX 0
+#define MAC_PMTCSR_PWRDWN_WIDTH 1
+#define MAC_PMTCSR_RWKFILTRST_INDEX 31
+#define MAC_PMTCSR_RWKFILTRST_WIDTH 1
+#define MAC_PMTCSR_RWKPKTEN_INDEX 2
+#define MAC_PMTCSR_RWKPKTEN_WIDTH 1
+#define MAC_Q0TFCR_PT_INDEX 16
+#define MAC_Q0TFCR_PT_WIDTH 16
+#define MAC_Q0TFCR_TFE_INDEX 1
+#define MAC_Q0TFCR_TFE_WIDTH 1
+#define MAC_RCR_ACS_INDEX 1
+#define MAC_RCR_ACS_WIDTH 1
+#define MAC_RCR_CST_INDEX 2
+#define MAC_RCR_CST_WIDTH 1
+#define MAC_RCR_DCRCC_INDEX 3
+#define MAC_RCR_DCRCC_WIDTH 1
+#define MAC_RCR_HDSMS_INDEX 12
+#define MAC_RCR_HDSMS_WIDTH 3
+#define MAC_RCR_IPC_INDEX 9
+#define MAC_RCR_IPC_WIDTH 1
+#define MAC_RCR_JE_INDEX 8
+#define MAC_RCR_JE_WIDTH 1
+#define MAC_RCR_LM_INDEX 10
+#define MAC_RCR_LM_WIDTH 1
+#define MAC_RCR_RE_INDEX 0
+#define MAC_RCR_RE_WIDTH 1
+#define MAC_RFCR_PFCE_INDEX 8
+#define MAC_RFCR_PFCE_WIDTH 1
+#define MAC_RFCR_RFE_INDEX 0
+#define MAC_RFCR_RFE_WIDTH 1
+#define MAC_RFCR_UP_INDEX 1
+#define MAC_RFCR_UP_WIDTH 1
+#define MAC_RQC0R_RXQ0EN_INDEX 0
+#define MAC_RQC0R_RXQ0EN_WIDTH 2
+#define MAC_RSSAR_ADDRT_INDEX 2
+#define MAC_RSSAR_ADDRT_WIDTH 1
+#define MAC_RSSAR_CT_INDEX 1
+#define MAC_RSSAR_CT_WIDTH 1
+#define MAC_RSSAR_OB_INDEX 0
+#define MAC_RSSAR_OB_WIDTH 1
+#define MAC_RSSAR_RSSIA_INDEX 8
+#define MAC_RSSAR_RSSIA_WIDTH 8
+#define MAC_RSSCR_IP2TE_INDEX 1
+#define MAC_RSSCR_IP2TE_WIDTH 1
+#define MAC_RSSCR_RSSE_INDEX 0
+#define MAC_RSSCR_RSSE_WIDTH 1
+#define MAC_RSSCR_TCP4TE_INDEX 2
+#define MAC_RSSCR_TCP4TE_WIDTH 1
+#define MAC_RSSCR_UDP4TE_INDEX 3
+#define MAC_RSSCR_UDP4TE_WIDTH 1
+#define MAC_RSSDR_DMCH_INDEX 0
+#define MAC_RSSDR_DMCH_WIDTH 4
+#define MAC_SSIR_SNSINC_INDEX 8
+#define MAC_SSIR_SNSINC_WIDTH 8
+#define MAC_SSIR_SSINC_INDEX 16
+#define MAC_SSIR_SSINC_WIDTH 8
+#define MAC_TCR_SS_INDEX 29
+#define MAC_TCR_SS_WIDTH 2
+#define MAC_TCR_TE_INDEX 0
+#define MAC_TCR_TE_WIDTH 1
+#define MAC_TSCR_AV8021ASMEN_INDEX 28
+#define MAC_TSCR_AV8021ASMEN_WIDTH 1
+#define MAC_TSCR_SNAPTYPSEL_INDEX 16
+#define MAC_TSCR_SNAPTYPSEL_WIDTH 2
+#define MAC_TSCR_TSADDREG_INDEX 5
+#define MAC_TSCR_TSADDREG_WIDTH 1
+#define MAC_TSCR_TSCFUPDT_INDEX 1
+#define MAC_TSCR_TSCFUPDT_WIDTH 1
+#define MAC_TSCR_TSCTRLSSR_INDEX 9
+#define MAC_TSCR_TSCTRLSSR_WIDTH 1
+#define MAC_TSCR_TSENA_INDEX 0
+#define MAC_TSCR_TSENA_WIDTH 1
+#define MAC_TSCR_TSENALL_INDEX 8
+#define MAC_TSCR_TSENALL_WIDTH 1
+#define MAC_TSCR_TSEVNTENA_INDEX 14
+#define MAC_TSCR_TSEVNTENA_WIDTH 1
+#define MAC_TSCR_TSINIT_INDEX 2
+#define MAC_TSCR_TSINIT_WIDTH 1
+#define MAC_TSCR_TSIPENA_INDEX 11
+#define MAC_TSCR_TSIPENA_WIDTH 1
+#define MAC_TSCR_TSIPV4ENA_INDEX 13
+#define MAC_TSCR_TSIPV4ENA_WIDTH 1
+#define MAC_TSCR_TSIPV6ENA_INDEX 12
+#define MAC_TSCR_TSIPV6ENA_WIDTH 1
+#define MAC_TSCR_TSMSTRENA_INDEX 15
+#define MAC_TSCR_TSMSTRENA_WIDTH 1
+#define MAC_TSCR_TSVER2ENA_INDEX 10
+#define MAC_TSCR_TSVER2ENA_WIDTH 1
+#define MAC_TSCR_TXTSSTSM_INDEX 24
+#define MAC_TSCR_TXTSSTSM_WIDTH 1
+#define MAC_TSSR_TXTSC_INDEX 15
+#define MAC_TSSR_TXTSC_WIDTH 1
+#define MAC_TXSNR_TXTSSTSMIS_INDEX 31
+#define MAC_TXSNR_TXTSSTSMIS_WIDTH 1
+#define MAC_VLANHTR_VLHT_INDEX 0
+#define MAC_VLANHTR_VLHT_WIDTH 16
+#define MAC_VLANIR_VLTI_INDEX 20
+#define MAC_VLANIR_VLTI_WIDTH 1
+#define MAC_VLANIR_CSVL_INDEX 19
+#define MAC_VLANIR_CSVL_WIDTH 1
+#define MAC_VLANTR_DOVLTC_INDEX 20
+#define MAC_VLANTR_DOVLTC_WIDTH 1
+#define MAC_VLANTR_ERSVLM_INDEX 19
+#define MAC_VLANTR_ERSVLM_WIDTH 1
+#define MAC_VLANTR_ESVL_INDEX 18
+#define MAC_VLANTR_ESVL_WIDTH 1
+#define MAC_VLANTR_ETV_INDEX 16
+#define MAC_VLANTR_ETV_WIDTH 1
+#define MAC_VLANTR_EVLS_INDEX 21
+#define MAC_VLANTR_EVLS_WIDTH 2
+#define MAC_VLANTR_EVLRXS_INDEX 24
+#define MAC_VLANTR_EVLRXS_WIDTH 1
+#define MAC_VLANTR_VL_INDEX 0
+#define MAC_VLANTR_VL_WIDTH 16
+#define MAC_VLANTR_VTHM_INDEX 25
+#define MAC_VLANTR_VTHM_WIDTH 1
+#define MAC_VLANTR_VTIM_INDEX 17
+#define MAC_VLANTR_VTIM_WIDTH 1
+#define MAC_VR_DEVID_INDEX 8
+#define MAC_VR_DEVID_WIDTH 8
+#define MAC_VR_SNPSVER_INDEX 0
+#define MAC_VR_SNPSVER_WIDTH 8
+#define MAC_VR_USERVER_INDEX 16
+#define MAC_VR_USERVER_WIDTH 8
+
+/* MMC register offsets */
+#define MMC_CR 0x0800
+#define MMC_RISR 0x0804
+#define MMC_TISR 0x0808
+#define MMC_RIER 0x080c
+#define MMC_TIER 0x0810
+#define MMC_TXOCTETCOUNT_GB_LO 0x0814
+#define MMC_TXOCTETCOUNT_GB_HI 0x0818
+#define MMC_TXFRAMECOUNT_GB_LO 0x081c
+#define MMC_TXFRAMECOUNT_GB_HI 0x0820
+#define MMC_TXBROADCASTFRAMES_G_LO 0x0824
+#define MMC_TXBROADCASTFRAMES_G_HI 0x0828
+#define MMC_TXMULTICASTFRAMES_G_LO 0x082c
+#define MMC_TXMULTICASTFRAMES_G_HI 0x0830
+#define MMC_TX64OCTETS_GB_LO 0x0834
+#define MMC_TX64OCTETS_GB_HI 0x0838
+#define MMC_TX65TO127OCTETS_GB_LO 0x083c
+#define MMC_TX65TO127OCTETS_GB_HI 0x0840
+#define MMC_TX128TO255OCTETS_GB_LO 0x0844
+#define MMC_TX128TO255OCTETS_GB_HI 0x0848
+#define MMC_TX256TO511OCTETS_GB_LO 0x084c
+#define MMC_TX256TO511OCTETS_GB_HI 0x0850
+#define MMC_TX512TO1023OCTETS_GB_LO 0x0854
+#define MMC_TX512TO1023OCTETS_GB_HI 0x0858
+#define MMC_TX1024TOMAXOCTETS_GB_LO 0x085c
+#define MMC_TX1024TOMAXOCTETS_GB_HI 0x0860
+#define MMC_TXUNICASTFRAMES_GB_LO 0x0864
+#define MMC_TXUNICASTFRAMES_GB_HI 0x0868
+#define MMC_TXMULTICASTFRAMES_GB_LO 0x086c
+#define MMC_TXMULTICASTFRAMES_GB_HI 0x0870
+#define MMC_TXBROADCASTFRAMES_GB_LO 0x0874
+#define MMC_TXBROADCASTFRAMES_GB_HI 0x0878
+#define MMC_TXUNDERFLOWERROR_LO 0x087c
+#define MMC_TXUNDERFLOWERROR_HI 0x0880
+#define MMC_TXOCTETCOUNT_G_LO 0x0884
+#define MMC_TXOCTETCOUNT_G_HI 0x0888
+#define MMC_TXFRAMECOUNT_G_LO 0x088c
+#define MMC_TXFRAMECOUNT_G_HI 0x0890
+#define MMC_TXPAUSEFRAMES_LO 0x0894
+#define MMC_TXPAUSEFRAMES_HI 0x0898
+#define MMC_TXVLANFRAMES_G_LO 0x089c
+#define MMC_TXVLANFRAMES_G_HI 0x08a0
+#define MMC_RXFRAMECOUNT_GB_LO 0x0900
+#define MMC_RXFRAMECOUNT_GB_HI 0x0904
+#define MMC_RXOCTETCOUNT_GB_LO 0x0908
+#define MMC_RXOCTETCOUNT_GB_HI 0x090c
+#define MMC_RXOCTETCOUNT_G_LO 0x0910
+#define MMC_RXOCTETCOUNT_G_HI 0x0914
+#define MMC_RXBROADCASTFRAMES_G_LO 0x0918
+#define MMC_RXBROADCASTFRAMES_G_HI 0x091c
+#define MMC_RXMULTICASTFRAMES_G_LO 0x0920
+#define MMC_RXMULTICASTFRAMES_G_HI 0x0924
+#define MMC_RXCRCERROR_LO 0x0928
+#define MMC_RXCRCERROR_HI 0x092c
+#define MMC_RXRUNTERROR 0x0930
+#define MMC_RXJABBERERROR 0x0934
+#define MMC_RXUNDERSIZE_G 0x0938
+#define MMC_RXOVERSIZE_G 0x093c
+#define MMC_RX64OCTETS_GB_LO 0x0940
+#define MMC_RX64OCTETS_GB_HI 0x0944
+#define MMC_RX65TO127OCTETS_GB_LO 0x0948
+#define MMC_RX65TO127OCTETS_GB_HI 0x094c
+#define MMC_RX128TO255OCTETS_GB_LO 0x0950
+#define MMC_RX128TO255OCTETS_GB_HI 0x0954
+#define MMC_RX256TO511OCTETS_GB_LO 0x0958
+#define MMC_RX256TO511OCTETS_GB_HI 0x095c
+#define MMC_RX512TO1023OCTETS_GB_LO 0x0960
+#define MMC_RX512TO1023OCTETS_GB_HI 0x0964
+#define MMC_RX1024TOMAXOCTETS_GB_LO 0x0968
+#define MMC_RX1024TOMAXOCTETS_GB_HI 0x096c
+#define MMC_RXUNICASTFRAMES_G_LO 0x0970
+#define MMC_RXUNICASTFRAMES_G_HI 0x0974
+#define MMC_RXLENGTHERROR_LO 0x0978
+#define MMC_RXLENGTHERROR_HI 0x097c
+#define MMC_RXOUTOFRANGETYPE_LO 0x0980
+#define MMC_RXOUTOFRANGETYPE_HI 0x0984
+#define MMC_RXPAUSEFRAMES_LO 0x0988
+#define MMC_RXPAUSEFRAMES_HI 0x098c
+#define MMC_RXFIFOOVERFLOW_LO 0x0990
+#define MMC_RXFIFOOVERFLOW_HI 0x0994
+#define MMC_RXVLANFRAMES_GB_LO 0x0998
+#define MMC_RXVLANFRAMES_GB_HI 0x099c
+#define MMC_RXWATCHDOGERROR 0x09a0
+
+/* MMC register entry bit positions and sizes */
+#define MMC_CR_CR_INDEX 0
+#define MMC_CR_CR_WIDTH 1
+#define MMC_CR_CSR_INDEX 1
+#define MMC_CR_CSR_WIDTH 1
+#define MMC_CR_ROR_INDEX 2
+#define MMC_CR_ROR_WIDTH 1
+#define MMC_CR_MCF_INDEX 3
+#define MMC_CR_MCF_WIDTH 1
+#define MMC_CR_MCT_INDEX 4
+#define MMC_CR_MCT_WIDTH 2
+#define MMC_RIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_RIER_ALL_INTERRUPTS_WIDTH 23
+#define MMC_RISR_RXFRAMECOUNT_GB_INDEX 0
+#define MMC_RISR_RXFRAMECOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_GB_INDEX 1
+#define MMC_RISR_RXOCTETCOUNT_GB_WIDTH 1
+#define MMC_RISR_RXOCTETCOUNT_G_INDEX 2
+#define MMC_RISR_RXOCTETCOUNT_G_WIDTH 1
+#define MMC_RISR_RXBROADCASTFRAMES_G_INDEX 3
+#define MMC_RISR_RXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXMULTICASTFRAMES_G_INDEX 4
+#define MMC_RISR_RXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXCRCERROR_INDEX 5
+#define MMC_RISR_RXCRCERROR_WIDTH 1
+#define MMC_RISR_RXRUNTERROR_INDEX 6
+#define MMC_RISR_RXRUNTERROR_WIDTH 1
+#define MMC_RISR_RXJABBERERROR_INDEX 7
+#define MMC_RISR_RXJABBERERROR_WIDTH 1
+#define MMC_RISR_RXUNDERSIZE_G_INDEX 8
+#define MMC_RISR_RXUNDERSIZE_G_WIDTH 1
+#define MMC_RISR_RXOVERSIZE_G_INDEX 9
+#define MMC_RISR_RXOVERSIZE_G_WIDTH 1
+#define MMC_RISR_RX64OCTETS_GB_INDEX 10
+#define MMC_RISR_RX64OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX65TO127OCTETS_GB_INDEX 11
+#define MMC_RISR_RX65TO127OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX128TO255OCTETS_GB_INDEX 12
+#define MMC_RISR_RX128TO255OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX256TO511OCTETS_GB_INDEX 13
+#define MMC_RISR_RX256TO511OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX512TO1023OCTETS_GB_INDEX 14
+#define MMC_RISR_RX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_INDEX 15
+#define MMC_RISR_RX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_RISR_RXUNICASTFRAMES_G_INDEX 16
+#define MMC_RISR_RXUNICASTFRAMES_G_WIDTH 1
+#define MMC_RISR_RXLENGTHERROR_INDEX 17
+#define MMC_RISR_RXLENGTHERROR_WIDTH 1
+#define MMC_RISR_RXOUTOFRANGETYPE_INDEX 18
+#define MMC_RISR_RXOUTOFRANGETYPE_WIDTH 1
+#define MMC_RISR_RXPAUSEFRAMES_INDEX 19
+#define MMC_RISR_RXPAUSEFRAMES_WIDTH 1
+#define MMC_RISR_RXFIFOOVERFLOW_INDEX 20
+#define MMC_RISR_RXFIFOOVERFLOW_WIDTH 1
+#define MMC_RISR_RXVLANFRAMES_GB_INDEX 21
+#define MMC_RISR_RXVLANFRAMES_GB_WIDTH 1
+#define MMC_RISR_RXWATCHDOGERROR_INDEX 22
+#define MMC_RISR_RXWATCHDOGERROR_WIDTH 1
+#define MMC_TIER_ALL_INTERRUPTS_INDEX 0
+#define MMC_TIER_ALL_INTERRUPTS_WIDTH 18
+#define MMC_TISR_TXOCTETCOUNT_GB_INDEX 0
+#define MMC_TISR_TXOCTETCOUNT_GB_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_GB_INDEX 1
+#define MMC_TISR_TXFRAMECOUNT_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_G_INDEX 2
+#define MMC_TISR_TXBROADCASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_G_INDEX 3
+#define MMC_TISR_TXMULTICASTFRAMES_G_WIDTH 1
+#define MMC_TISR_TX64OCTETS_GB_INDEX 4
+#define MMC_TISR_TX64OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX65TO127OCTETS_GB_INDEX 5
+#define MMC_TISR_TX65TO127OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX128TO255OCTETS_GB_INDEX 6
+#define MMC_TISR_TX128TO255OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX256TO511OCTETS_GB_INDEX 7
+#define MMC_TISR_TX256TO511OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX512TO1023OCTETS_GB_INDEX 8
+#define MMC_TISR_TX512TO1023OCTETS_GB_WIDTH 1
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_INDEX 9
+#define MMC_TISR_TX1024TOMAXOCTETS_GB_WIDTH 1
+#define MMC_TISR_TXUNICASTFRAMES_GB_INDEX 10
+#define MMC_TISR_TXUNICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXMULTICASTFRAMES_GB_INDEX 11
+#define MMC_TISR_TXMULTICASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXBROADCASTFRAMES_GB_INDEX 12
+#define MMC_TISR_TXBROADCASTFRAMES_GB_WIDTH 1
+#define MMC_TISR_TXUNDERFLOWERROR_INDEX 13
+#define MMC_TISR_TXUNDERFLOWERROR_WIDTH 1
+#define MMC_TISR_TXOCTETCOUNT_G_INDEX 14
+#define MMC_TISR_TXOCTETCOUNT_G_WIDTH 1
+#define MMC_TISR_TXFRAMECOUNT_G_INDEX 15
+#define MMC_TISR_TXFRAMECOUNT_G_WIDTH 1
+#define MMC_TISR_TXPAUSEFRAMES_INDEX 16
+#define MMC_TISR_TXPAUSEFRAMES_WIDTH 1
+#define MMC_TISR_TXVLANFRAMES_G_INDEX 17
+#define MMC_TISR_TXVLANFRAMES_G_WIDTH 1
+
+/* MTL register offsets */
+#define MTL_OMR 0x1000
+#define MTL_FDCR 0x1008
+#define MTL_FDSR 0x100c
+#define MTL_FDDR 0x1010
+#define MTL_ISR 0x1020
+#define MTL_RQDCM0R 0x1030
+#define MTL_TCPM0R 0x1040
+#define MTL_TCPM1R 0x1044
+
+#define MTL_RQDCM_INC 4
+#define MTL_RQDCM_Q_PER_REG 4
+#define MTL_TCPM_INC 4
+#define MTL_TCPM_TC_PER_REG 4
+
+/* MTL register entry bit positions and sizes */
+#define MTL_OMR_ETSALG_INDEX 5
+#define MTL_OMR_ETSALG_WIDTH 2
+#define MTL_OMR_RAA_INDEX 2
+#define MTL_OMR_RAA_WIDTH 1
+
+/* MTL queue register offsets
+ * Multiple queues can be active. The first queue has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_Q_BASE 0x1100
+#define MTL_Q_INC 0x80
+
+#define MTL_Q_TQOMR 0x00
+#define MTL_Q_TQUR 0x04
+#define MTL_Q_TQDR 0x08
+#define MTL_Q_RQOMR 0x40
+#define MTL_Q_RQMPOCR 0x44
+#define MTL_Q_RQDR 0x48
+#define MTL_Q_RQFCR 0x50
+#define MTL_Q_IER 0x70
+#define MTL_Q_ISR 0x74
+
+/* MTL queue register entry bit positions and sizes */
+#define MTL_Q_RQDR_PRXQ_INDEX 16
+#define MTL_Q_RQDR_PRXQ_WIDTH 14
+#define MTL_Q_RQDR_RXQSTS_INDEX 4
+#define MTL_Q_RQDR_RXQSTS_WIDTH 2
+#define MTL_Q_RQFCR_RFA_INDEX 1
+#define MTL_Q_RQFCR_RFA_WIDTH 6
+#define MTL_Q_RQFCR_RFD_INDEX 17
+#define MTL_Q_RQFCR_RFD_WIDTH 6
+#define MTL_Q_RQOMR_EHFC_INDEX 7
+#define MTL_Q_RQOMR_EHFC_WIDTH 1
+#define MTL_Q_RQOMR_RQS_INDEX 16
+#define MTL_Q_RQOMR_RQS_WIDTH 9
+#define MTL_Q_RQOMR_RSF_INDEX 5
+#define MTL_Q_RQOMR_RSF_WIDTH 1
+#define MTL_Q_RQOMR_RTC_INDEX 0
+#define MTL_Q_RQOMR_RTC_WIDTH 2
+#define MTL_Q_TQDR_TRCSTS_INDEX 1
+#define MTL_Q_TQDR_TRCSTS_WIDTH 2
+#define MTL_Q_TQDR_TXQSTS_INDEX 4
+#define MTL_Q_TQDR_TXQSTS_WIDTH 1
+#define MTL_Q_TQOMR_FTQ_INDEX 0
+#define MTL_Q_TQOMR_FTQ_WIDTH 1
+#define MTL_Q_TQOMR_Q2TCMAP_INDEX 8
+#define MTL_Q_TQOMR_Q2TCMAP_WIDTH 3
+#define MTL_Q_TQOMR_TQS_INDEX 16
+#define MTL_Q_TQOMR_TQS_WIDTH 10
+#define MTL_Q_TQOMR_TSF_INDEX 1
+#define MTL_Q_TQOMR_TSF_WIDTH 1
+#define MTL_Q_TQOMR_TTC_INDEX 4
+#define MTL_Q_TQOMR_TTC_WIDTH 3
+#define MTL_Q_TQOMR_TXQEN_INDEX 2
+#define MTL_Q_TQOMR_TXQEN_WIDTH 2
+
+/* MTL queue register value */
+#define MTL_RSF_DISABLE 0x00
+#define MTL_RSF_ENABLE 0x01
+#define MTL_TSF_DISABLE 0x00
+#define MTL_TSF_ENABLE 0x01
+
+#define MTL_RX_THRESHOLD_64 0x00
+#define MTL_RX_THRESHOLD_96 0x02
+#define MTL_RX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_32 0x01
+#define MTL_TX_THRESHOLD_64 0x00
+#define MTL_TX_THRESHOLD_96 0x02
+#define MTL_TX_THRESHOLD_128 0x03
+#define MTL_TX_THRESHOLD_192 0x04
+#define MTL_TX_THRESHOLD_256 0x05
+#define MTL_TX_THRESHOLD_384 0x06
+#define MTL_TX_THRESHOLD_512 0x07
+
+#define MTL_ETSALG_WRR 0x00
+#define MTL_ETSALG_WFQ 0x01
+#define MTL_ETSALG_DWRR 0x02
+#define MTL_RAA_SP 0x00
+#define MTL_RAA_WSP 0x01
+
+#define MTL_Q_DISABLED 0x00
+#define MTL_Q_ENABLED 0x02
+
+/* MTL traffic class register offsets
+ * Multiple traffic classes can be active. The first class has registers
+ * that begin at 0x1100. Each subsequent queue has registers that
+ * are accessed using an offset of 0x80 from the previous queue.
+ */
+#define MTL_TC_BASE MTL_Q_BASE
+#define MTL_TC_INC MTL_Q_INC
+
+#define MTL_TC_ETSCR 0x10
+#define MTL_TC_ETSSR 0x14
+#define MTL_TC_QWR 0x18
+
+/* MTL traffic class register entry bit positions and sizes */
+#define MTL_TC_ETSCR_TSA_INDEX 0
+#define MTL_TC_ETSCR_TSA_WIDTH 2
+#define MTL_TC_QWR_QW_INDEX 0
+#define MTL_TC_QWR_QW_WIDTH 21
+
+/* MTL traffic class register value */
+#define MTL_TSA_SP 0x00
+#define MTL_TSA_ETS 0x02
+
+/* PCS register offsets */
+#define PCS_V1_WINDOW_SELECT 0x03fc
+#define PCS_V2_WINDOW_DEF 0x9060
+#define PCS_V2_WINDOW_SELECT 0x9064
+
+/* PCS register entry bit positions and sizes */
+#define PCS_V2_WINDOW_DEF_OFFSET_INDEX 6
+#define PCS_V2_WINDOW_DEF_OFFSET_WIDTH 14
+#define PCS_V2_WINDOW_DEF_SIZE_INDEX 2
+#define PCS_V2_WINDOW_DEF_SIZE_WIDTH 4
+
+/* SerDes integration register offsets */
+#define SIR0_KR_RT_1 0x002c
+#define SIR0_STATUS 0x0040
+#define SIR1_SPEED 0x0000
+
+/* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX 11
+#define SIR0_KR_RT_1_RESET_WIDTH 1
+#define SIR0_STATUS_RX_READY_INDEX 0
+#define SIR0_STATUS_RX_READY_WIDTH 1
+#define SIR0_STATUS_TX_READY_INDEX 8
+#define SIR0_STATUS_TX_READY_WIDTH 1
+#define SIR1_SPEED_CDR_RATE_INDEX 12
+#define SIR1_SPEED_CDR_RATE_WIDTH 4
+#define SIR1_SPEED_DATARATE_INDEX 4
+#define SIR1_SPEED_DATARATE_WIDTH 2
+#define SIR1_SPEED_PLLSEL_INDEX 3
+#define SIR1_SPEED_PLLSEL_WIDTH 1
+#define SIR1_SPEED_RATECHANGE_INDEX 6
+#define SIR1_SPEED_RATECHANGE_WIDTH 1
+#define SIR1_SPEED_TXAMP_INDEX 8
+#define SIR1_SPEED_TXAMP_WIDTH 4
+#define SIR1_SPEED_WORDMODE_INDEX 0
+#define SIR1_SPEED_WORDMODE_WIDTH 3
+
+/* SerDes RxTx register offsets */
+#define RXTX_REG6 0x0018
+#define RXTX_REG20 0x0050
+#define RXTX_REG22 0x0058
+#define RXTX_REG114 0x01c8
+#define RXTX_REG129 0x0204
+
+/* SerDes RxTx register entry bit positions and sizes */
+#define RXTX_REG6_RESETB_RXD_INDEX 8
+#define RXTX_REG6_RESETB_RXD_WIDTH 1
+#define RXTX_REG20_BLWC_ENA_INDEX 2
+#define RXTX_REG20_BLWC_ENA_WIDTH 1
+#define RXTX_REG114_PQ_REG_INDEX 9
+#define RXTX_REG114_PQ_REG_WIDTH 7
+#define RXTX_REG129_RXDFE_CONFIG_INDEX 14
+#define RXTX_REG129_RXDFE_CONFIG_WIDTH 2
+
+/* MAC Control register offsets */
+#define XP_PROP_0 0x0000
+#define XP_PROP_1 0x0004
+#define XP_PROP_2 0x0008
+#define XP_PROP_3 0x000c
+#define XP_PROP_4 0x0010
+#define XP_PROP_5 0x0014
+#define XP_MAC_ADDR_LO 0x0020
+#define XP_MAC_ADDR_HI 0x0024
+#define XP_ECC_ISR 0x0030
+#define XP_ECC_IER 0x0034
+#define XP_ECC_CNT0 0x003c
+#define XP_ECC_CNT1 0x0040
+#define XP_DRIVER_INT_REQ 0x0060
+#define XP_DRIVER_INT_RO 0x0064
+#define XP_DRIVER_SCRATCH_0 0x0068
+#define XP_DRIVER_SCRATCH_1 0x006c
+#define XP_INT_EN 0x0078
+#define XP_I2C_MUTEX 0x0080
+#define XP_MDIO_MUTEX 0x0084
+
+/* MAC Control register entry bit positions and sizes */
+#define XP_DRIVER_INT_REQ_REQUEST_INDEX 0
+#define XP_DRIVER_INT_REQ_REQUEST_WIDTH 1
+#define XP_DRIVER_INT_RO_STATUS_INDEX 0
+#define XP_DRIVER_INT_RO_STATUS_WIDTH 1
+#define XP_DRIVER_SCRATCH_0_COMMAND_INDEX 0
+#define XP_DRIVER_SCRATCH_0_COMMAND_WIDTH 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_INDEX 8
+#define XP_DRIVER_SCRATCH_0_SUB_COMMAND_WIDTH 8
+#define XP_ECC_CNT0_RX_DED_INDEX 24
+#define XP_ECC_CNT0_RX_DED_WIDTH 8
+#define XP_ECC_CNT0_RX_SEC_INDEX 16
+#define XP_ECC_CNT0_RX_SEC_WIDTH 8
+#define XP_ECC_CNT0_TX_DED_INDEX 8
+#define XP_ECC_CNT0_TX_DED_WIDTH 8
+#define XP_ECC_CNT0_TX_SEC_INDEX 0
+#define XP_ECC_CNT0_TX_SEC_WIDTH 8
+#define XP_ECC_CNT1_DESC_DED_INDEX 8
+#define XP_ECC_CNT1_DESC_DED_WIDTH 8
+#define XP_ECC_CNT1_DESC_SEC_INDEX 0
+#define XP_ECC_CNT1_DESC_SEC_WIDTH 8
+#define XP_ECC_IER_DESC_DED_INDEX 0
+#define XP_ECC_IER_DESC_DED_WIDTH 1
+#define XP_ECC_IER_DESC_SEC_INDEX 1
+#define XP_ECC_IER_DESC_SEC_WIDTH 1
+#define XP_ECC_IER_RX_DED_INDEX 2
+#define XP_ECC_IER_RX_DED_WIDTH 1
+#define XP_ECC_IER_RX_SEC_INDEX 3
+#define XP_ECC_IER_RX_SEC_WIDTH 1
+#define XP_ECC_IER_TX_DED_INDEX 4
+#define XP_ECC_IER_TX_DED_WIDTH 1
+#define XP_ECC_IER_TX_SEC_INDEX 5
+#define XP_ECC_IER_TX_SEC_WIDTH 1
+#define XP_ECC_ISR_DESC_DED_INDEX 0
+#define XP_ECC_ISR_DESC_DED_WIDTH 1
+#define XP_ECC_ISR_DESC_SEC_INDEX 1
+#define XP_ECC_ISR_DESC_SEC_WIDTH 1
+#define XP_ECC_ISR_RX_DED_INDEX 2
+#define XP_ECC_ISR_RX_DED_WIDTH 1
+#define XP_ECC_ISR_RX_SEC_INDEX 3
+#define XP_ECC_ISR_RX_SEC_WIDTH 1
+#define XP_ECC_ISR_TX_DED_INDEX 4
+#define XP_ECC_ISR_TX_DED_WIDTH 1
+#define XP_ECC_ISR_TX_SEC_INDEX 5
+#define XP_ECC_ISR_TX_SEC_WIDTH 1
+#define XP_I2C_MUTEX_BUSY_INDEX 31
+#define XP_I2C_MUTEX_BUSY_WIDTH 1
+#define XP_I2C_MUTEX_ID_INDEX 29
+#define XP_I2C_MUTEX_ID_WIDTH 2
+#define XP_I2C_MUTEX_ACTIVE_INDEX 0
+#define XP_I2C_MUTEX_ACTIVE_WIDTH 1
+#define XP_MAC_ADDR_HI_VALID_INDEX 31
+#define XP_MAC_ADDR_HI_VALID_WIDTH 1
+#define XP_PROP_0_CONN_TYPE_INDEX 28
+#define XP_PROP_0_CONN_TYPE_WIDTH 3
+#define XP_PROP_0_MDIO_ADDR_INDEX 16
+#define XP_PROP_0_MDIO_ADDR_WIDTH 5
+#define XP_PROP_0_PORT_ID_INDEX 0
+#define XP_PROP_0_PORT_ID_WIDTH 8
+#define XP_PROP_0_PORT_MODE_INDEX 8
+#define XP_PROP_0_PORT_MODE_WIDTH 4
+#define XP_PROP_0_PORT_SPEEDS_INDEX 23
+#define XP_PROP_0_PORT_SPEEDS_WIDTH 4
+#define XP_PROP_1_MAX_RX_DMA_INDEX 24
+#define XP_PROP_1_MAX_RX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_RX_QUEUES_INDEX 8
+#define XP_PROP_1_MAX_RX_QUEUES_WIDTH 5
+#define XP_PROP_1_MAX_TX_DMA_INDEX 16
+#define XP_PROP_1_MAX_TX_DMA_WIDTH 5
+#define XP_PROP_1_MAX_TX_QUEUES_INDEX 0
+#define XP_PROP_1_MAX_TX_QUEUES_WIDTH 5
+#define XP_PROP_2_RX_FIFO_SIZE_INDEX 16
+#define XP_PROP_2_RX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_2_TX_FIFO_SIZE_INDEX 0
+#define XP_PROP_2_TX_FIFO_SIZE_WIDTH 16
+#define XP_PROP_3_GPIO_MASK_INDEX 28
+#define XP_PROP_3_GPIO_MASK_WIDTH 4
+#define XP_PROP_3_GPIO_MOD_ABS_INDEX 20
+#define XP_PROP_3_GPIO_MOD_ABS_WIDTH 4
+#define XP_PROP_3_GPIO_RATE_SELECT_INDEX 16
+#define XP_PROP_3_GPIO_RATE_SELECT_WIDTH 4
+#define XP_PROP_3_GPIO_RX_LOS_INDEX 24
+#define XP_PROP_3_GPIO_RX_LOS_WIDTH 4
+#define XP_PROP_3_GPIO_TX_FAULT_INDEX 12
+#define XP_PROP_3_GPIO_TX_FAULT_WIDTH 4
+#define XP_PROP_3_GPIO_ADDR_INDEX 8
+#define XP_PROP_3_GPIO_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_INDEX 0
+#define XP_PROP_3_MDIO_RESET_WIDTH 2
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_INDEX 8
+#define XP_PROP_3_MDIO_RESET_I2C_ADDR_WIDTH 3
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_INDEX 12
+#define XP_PROP_3_MDIO_RESET_I2C_GPIO_WIDTH 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_INDEX 4
+#define XP_PROP_3_MDIO_RESET_INT_GPIO_WIDTH 2
+#define XP_PROP_4_MUX_ADDR_HI_INDEX 8
+#define XP_PROP_4_MUX_ADDR_HI_WIDTH 5
+#define XP_PROP_4_MUX_ADDR_LO_INDEX 0
+#define XP_PROP_4_MUX_ADDR_LO_WIDTH 3
+#define XP_PROP_4_MUX_CHAN_INDEX 4
+#define XP_PROP_4_MUX_CHAN_WIDTH 3
+#define XP_PROP_4_REDRV_ADDR_INDEX 16
+#define XP_PROP_4_REDRV_ADDR_WIDTH 7
+#define XP_PROP_4_REDRV_IF_INDEX 23
+#define XP_PROP_4_REDRV_IF_WIDTH 1
+#define XP_PROP_4_REDRV_LANE_INDEX 24
+#define XP_PROP_4_REDRV_LANE_WIDTH 3
+#define XP_PROP_4_REDRV_MODEL_INDEX 28
+#define XP_PROP_4_REDRV_MODEL_WIDTH 3
+#define XP_PROP_4_REDRV_PRESENT_INDEX 31
+#define XP_PROP_4_REDRV_PRESENT_WIDTH 1
+
+/* I2C Control register offsets */
+#define IC_CON 0x0000
+#define IC_TAR 0x0004
+#define IC_DATA_CMD 0x0010
+#define IC_INTR_STAT 0x002c
+#define IC_INTR_MASK 0x0030
+#define IC_RAW_INTR_STAT 0x0034
+#define IC_CLR_INTR 0x0040
+#define IC_CLR_TX_ABRT 0x0054
+#define IC_CLR_STOP_DET 0x0060
+#define IC_ENABLE 0x006c
+#define IC_TXFLR 0x0074
+#define IC_RXFLR 0x0078
+#define IC_TX_ABRT_SOURCE 0x0080
+#define IC_ENABLE_STATUS 0x009c
+#define IC_COMP_PARAM_1 0x00f4
+
+/* I2C Control register entry bit positions and sizes */
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_INDEX 2
+#define IC_COMP_PARAM_1_MAX_SPEED_MODE_WIDTH 2
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_INDEX 8
+#define IC_COMP_PARAM_1_RX_BUFFER_DEPTH_WIDTH 8
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_INDEX 16
+#define IC_COMP_PARAM_1_TX_BUFFER_DEPTH_WIDTH 8
+#define IC_CON_MASTER_MODE_INDEX 0
+#define IC_CON_MASTER_MODE_WIDTH 1
+#define IC_CON_RESTART_EN_INDEX 5
+#define IC_CON_RESTART_EN_WIDTH 1
+#define IC_CON_RX_FIFO_FULL_HOLD_INDEX 9
+#define IC_CON_RX_FIFO_FULL_HOLD_WIDTH 1
+#define IC_CON_SLAVE_DISABLE_INDEX 6
+#define IC_CON_SLAVE_DISABLE_WIDTH 1
+#define IC_CON_SPEED_INDEX 1
+#define IC_CON_SPEED_WIDTH 2
+#define IC_DATA_CMD_CMD_INDEX 8
+#define IC_DATA_CMD_CMD_WIDTH 1
+#define IC_DATA_CMD_STOP_INDEX 9
+#define IC_DATA_CMD_STOP_WIDTH 1
+#define IC_ENABLE_ABORT_INDEX 1
+#define IC_ENABLE_ABORT_WIDTH 1
+#define IC_ENABLE_EN_INDEX 0
+#define IC_ENABLE_EN_WIDTH 1
+#define IC_ENABLE_STATUS_EN_INDEX 0
+#define IC_ENABLE_STATUS_EN_WIDTH 1
+#define IC_INTR_MASK_TX_EMPTY_INDEX 4
+#define IC_INTR_MASK_TX_EMPTY_WIDTH 1
+#define IC_RAW_INTR_STAT_RX_FULL_INDEX 2
+#define IC_RAW_INTR_STAT_RX_FULL_WIDTH 1
+#define IC_RAW_INTR_STAT_STOP_DET_INDEX 9
+#define IC_RAW_INTR_STAT_STOP_DET_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_ABRT_INDEX 6
+#define IC_RAW_INTR_STAT_TX_ABRT_WIDTH 1
+#define IC_RAW_INTR_STAT_TX_EMPTY_INDEX 4
+#define IC_RAW_INTR_STAT_TX_EMPTY_WIDTH 1
+
+/* I2C Control register value */
+#define IC_TX_ABRT_7B_ADDR_NOACK 0x0001
+#define IC_TX_ABRT_ARB_LOST 0x1000
+
+/* Descriptor/Packet entry bit positions and sizes */
+#define RX_PACKET_ERRORS_CRC_INDEX 2
+#define RX_PACKET_ERRORS_CRC_WIDTH 1
+#define RX_PACKET_ERRORS_FRAME_INDEX 3
+#define RX_PACKET_ERRORS_FRAME_WIDTH 1
+#define RX_PACKET_ERRORS_LENGTH_INDEX 0
+#define RX_PACKET_ERRORS_LENGTH_WIDTH 1
+#define RX_PACKET_ERRORS_OVERRUN_INDEX 1
+#define RX_PACKET_ERRORS_OVERRUN_WIDTH 1
+
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_INDEX 0
+#define RX_PACKET_ATTRIBUTES_CSUM_DONE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 1
+#define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX 2
+#define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX 3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX 4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX 5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH 1
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_INDEX 6
+#define RX_PACKET_ATTRIBUTES_RSS_HASH_WIDTH 1
+
+#define RX_NORMAL_DESC0_OVT_INDEX 0
+#define RX_NORMAL_DESC0_OVT_WIDTH 16
+#define RX_NORMAL_DESC2_HL_INDEX 0
+#define RX_NORMAL_DESC2_HL_WIDTH 10
+#define RX_NORMAL_DESC3_CDA_INDEX 27
+#define RX_NORMAL_DESC3_CDA_WIDTH 1
+#define RX_NORMAL_DESC3_CTXT_INDEX 30
+#define RX_NORMAL_DESC3_CTXT_WIDTH 1
+#define RX_NORMAL_DESC3_ES_INDEX 15
+#define RX_NORMAL_DESC3_ES_WIDTH 1
+#define RX_NORMAL_DESC3_ETLT_INDEX 16
+#define RX_NORMAL_DESC3_ETLT_WIDTH 4
+#define RX_NORMAL_DESC3_FD_INDEX 29
+#define RX_NORMAL_DESC3_FD_WIDTH 1
+#define RX_NORMAL_DESC3_INTE_INDEX 30
+#define RX_NORMAL_DESC3_INTE_WIDTH 1
+#define RX_NORMAL_DESC3_L34T_INDEX 20
+#define RX_NORMAL_DESC3_L34T_WIDTH 4
+#define RX_NORMAL_DESC3_LD_INDEX 28
+#define RX_NORMAL_DESC3_LD_WIDTH 1
+#define RX_NORMAL_DESC3_OWN_INDEX 31
+#define RX_NORMAL_DESC3_OWN_WIDTH 1
+#define RX_NORMAL_DESC3_PL_INDEX 0
+#define RX_NORMAL_DESC3_PL_WIDTH 14
+#define RX_NORMAL_DESC3_RSV_INDEX 26
+#define RX_NORMAL_DESC3_RSV_WIDTH 1
+
+#define RX_DESC3_L34T_IPV4_TCP 1
+#define RX_DESC3_L34T_IPV4_UDP 2
+#define RX_DESC3_L34T_IPV4_ICMP 3
+#define RX_DESC3_L34T_IPV6_TCP 9
+#define RX_DESC3_L34T_IPV6_UDP 10
+#define RX_DESC3_L34T_IPV6_ICMP 11
+
+#define RX_CONTEXT_DESC3_TSA_INDEX 4
+#define RX_CONTEXT_DESC3_TSA_WIDTH 1
+#define RX_CONTEXT_DESC3_TSD_INDEX 6
+#define RX_CONTEXT_DESC3_TSD_WIDTH 1
+
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
+#define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX 1
+#define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX 2
+#define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH 1
+#define TX_PACKET_ATTRIBUTES_PTP_INDEX 3
+#define TX_PACKET_ATTRIBUTES_PTP_WIDTH 1
+
+#define TX_CONTEXT_DESC2_MSS_INDEX 0
+#define TX_CONTEXT_DESC2_MSS_WIDTH 15
+#define TX_CONTEXT_DESC3_CTXT_INDEX 30
+#define TX_CONTEXT_DESC3_CTXT_WIDTH 1
+#define TX_CONTEXT_DESC3_TCMSSV_INDEX 26
+#define TX_CONTEXT_DESC3_TCMSSV_WIDTH 1
+#define TX_CONTEXT_DESC3_VLTV_INDEX 16
+#define TX_CONTEXT_DESC3_VLTV_WIDTH 1
+#define TX_CONTEXT_DESC3_VT_INDEX 0
+#define TX_CONTEXT_DESC3_VT_WIDTH 16
+
+#define TX_NORMAL_DESC2_HL_B1L_INDEX 0
+#define TX_NORMAL_DESC2_HL_B1L_WIDTH 14
+#define TX_NORMAL_DESC2_IC_INDEX 31
+#define TX_NORMAL_DESC2_IC_WIDTH 1
+#define TX_NORMAL_DESC2_TTSE_INDEX 30
+#define TX_NORMAL_DESC2_TTSE_WIDTH 1
+#define TX_NORMAL_DESC2_VTIR_INDEX 14
+#define TX_NORMAL_DESC2_VTIR_WIDTH 2
+#define TX_NORMAL_DESC3_CIC_INDEX 16
+#define TX_NORMAL_DESC3_CIC_WIDTH 2
+#define TX_NORMAL_DESC3_CPC_INDEX 26
+#define TX_NORMAL_DESC3_CPC_WIDTH 2
+#define TX_NORMAL_DESC3_CTXT_INDEX 30
+#define TX_NORMAL_DESC3_CTXT_WIDTH 1
+#define TX_NORMAL_DESC3_FD_INDEX 29
+#define TX_NORMAL_DESC3_FD_WIDTH 1
+#define TX_NORMAL_DESC3_FL_INDEX 0
+#define TX_NORMAL_DESC3_FL_WIDTH 15
+#define TX_NORMAL_DESC3_LD_INDEX 28
+#define TX_NORMAL_DESC3_LD_WIDTH 1
+#define TX_NORMAL_DESC3_OWN_INDEX 31
+#define TX_NORMAL_DESC3_OWN_WIDTH 1
+#define TX_NORMAL_DESC3_TCPHDRLEN_INDEX 19
+#define TX_NORMAL_DESC3_TCPHDRLEN_WIDTH 4
+#define TX_NORMAL_DESC3_TCPPL_INDEX 0
+#define TX_NORMAL_DESC3_TCPPL_WIDTH 18
+#define TX_NORMAL_DESC3_TSE_INDEX 18
+#define TX_NORMAL_DESC3_TSE_WIDTH 1
+
+#define TX_NORMAL_DESC2_VLAN_INSERT 0x2
+
+/* MDIO undefined or vendor specific registers */
+#ifndef MDIO_PMA_10GBR_PMD_CTRL
+#define MDIO_PMA_10GBR_PMD_CTRL 0x0096
+#endif
+
+#ifndef MDIO_PMA_10GBR_FECCTRL
+#define MDIO_PMA_10GBR_FECCTRL 0x00ab
+#endif
+
+#ifndef MDIO_PCS_DIG_CTRL
+#define MDIO_PCS_DIG_CTRL 0x8000
+#endif
+
+#ifndef MDIO_AN_XNP
+#define MDIO_AN_XNP 0x0016
+#endif
+
+#ifndef MDIO_AN_LPX
+#define MDIO_AN_LPX 0x0019
+#endif
+
+#ifndef MDIO_AN_COMP_STAT
+#define MDIO_AN_COMP_STAT 0x0030
+#endif
+
+#ifndef MDIO_AN_INTMASK
+#define MDIO_AN_INTMASK 0x8001
+#endif
+
+#ifndef MDIO_AN_INT
+#define MDIO_AN_INT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_AN_ADVERTISE
+#define MDIO_VEND2_AN_ADVERTISE 0x0004
+#endif
+
+#ifndef MDIO_VEND2_AN_LP_ABILITY
+#define MDIO_VEND2_AN_LP_ABILITY 0x0005
+#endif
+
+#ifndef MDIO_VEND2_AN_CTRL
+#define MDIO_VEND2_AN_CTRL 0x8001
+#endif
+
+#ifndef MDIO_VEND2_AN_STAT
+#define MDIO_VEND2_AN_STAT 0x8002
+#endif
+
+#ifndef MDIO_VEND2_PMA_CDR_CONTROL
+#define MDIO_VEND2_PMA_CDR_CONTROL 0x8056
+#endif
+
+#ifndef MDIO_CTRL1_SPEED1G
+#define MDIO_CTRL1_SPEED1G (MDIO_CTRL1_SPEED10G & ~BMCR_SPEED100)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_ENABLE
+#define MDIO_VEND2_CTRL1_AN_ENABLE BIT(12)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_AN_RESTART
+#define MDIO_VEND2_CTRL1_AN_RESTART BIT(9)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS6
+#define MDIO_VEND2_CTRL1_SS6 BIT(6)
+#endif
+
+#ifndef MDIO_VEND2_CTRL1_SS13
+#define MDIO_VEND2_CTRL1_SS13 BIT(13)
+#endif
+
+/* MDIO mask values */
+#define AXGBE_AN_CL73_INT_CMPLT BIT(0)
+#define AXGBE_AN_CL73_INC_LINK BIT(1)
+#define AXGBE_AN_CL73_PG_RCV BIT(2)
+#define AXGBE_AN_CL73_INT_MASK 0x07
+
+#define AXGBE_XNP_MCF_NULL_MESSAGE 0x001
+#define AXGBE_XNP_ACK_PROCESSED BIT(12)
+#define AXGBE_XNP_MP_FORMATTED BIT(13)
+#define AXGBE_XNP_NP_EXCHANGE BIT(15)
+
+#define AXGBE_KR_TRAINING_START BIT(0)
+#define AXGBE_KR_TRAINING_ENABLE BIT(1)
+
+#define AXGBE_PCS_CL37_BP BIT(12)
+
+#define AXGBE_AN_CL37_INT_CMPLT BIT(0)
+#define AXGBE_AN_CL37_INT_MASK 0x01
+
+#define AXGBE_AN_CL37_HD_MASK 0x40
+#define AXGBE_AN_CL37_FD_MASK 0x20
+
+#define AXGBE_AN_CL37_PCS_MODE_MASK 0x06
+#define AXGBE_AN_CL37_PCS_MODE_BASEX 0x00
+#define AXGBE_AN_CL37_PCS_MODE_SGMII 0x04
+#define AXGBE_AN_CL37_TX_CONFIG_MASK 0x08
+
+#define AXGBE_PMA_CDR_TRACK_EN_MASK 0x01
+#define AXGBE_PMA_CDR_TRACK_EN_OFF 0x00
+#define AXGBE_PMA_CDR_TRACK_EN_ON 0x01
+
+/*generic*/
+#define __iomem
+
+#define rmb() rte_rmb() /* dpdk rte provided rmb */
+#define wmb() rte_wmb() /* dpdk rte provided wmb */
+
+#define __le16 u16
+#define __le32 u32
+#define __le64 u64
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef unsigned long long dma_addr_t;
+
+static inline uint32_t low32_value(uint64_t addr)
+{
+ return (addr) & 0x0ffffffff;
+}
+
+static inline uint32_t high32_value(uint64_t addr)
+{
+ return (addr >> 32) & 0x0ffffffff;
+}
+
+/*END*/
+
+/* Bit setting and getting macros
+ * The get macro will extract the current bit field value from within
+ * the variable
+ *
+ * The set macro will clear the current bit field value within the
+ * variable and then set the bit field of the variable to the
+ * specified value
+ */
+#define GET_BITS(_var, _index, _width) \
+ (((_var) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS(_var, _index, _width, _val) \
+do { \
+ (_var) &= ~(((0x1 << (_width)) - 1) << (_index)); \
+ (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index)); \
+} while (0)
+
+#define GET_BITS_LE(_var, _index, _width) \
+ ((rte_le_to_cpu_32((_var)) >> (_index)) & ((0x1 << (_width)) - 1))
+
+#define SET_BITS_LE(_var, _index, _width, _val) \
+do { \
+ (_var) &= rte_cpu_to_le_32(~(((0x1 << (_width)) - 1) << (_index)));\
+ (_var) |= rte_cpu_to_le_32((((_val) & \
+ ((0x1 << (_width)) - 1)) << (_index))); \
+} while (0)
+
+/* Bit setting and getting macros based on register fields
+ * The get macro uses the bit field definitions formed using the input
+ * names to extract the current bit field value from within the
+ * variable
+ *
+ * The set macro uses the bit field definitions formed using the input
+ * names to set the bit field of the variable to the specified value
+ */
+#define AXGMAC_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define AXGMAC_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define AXGMAC_GET_BITS_LE(_var, _prefix, _field) \
+ GET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define AXGMAC_SET_BITS_LE(_var, _prefix, _field, _val) \
+ SET_BITS_LE((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+/* Macros for reading or writing registers
+ * The ioread macros will get bit fields or full values using the
+ * register definitions formed using the input names
+ *
+ * The iowrite macros will set bit fields or full values using the
+ * register definitions formed using the input names
+ */
+#define AXGMAC_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xgmac_regs) + (_reg))
+
+#define AXGMAC_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(AXGMAC_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xgmac_regs) + (_reg))
+
+#define AXGMAC_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing MTL queue or traffic class registers
+ * Similar to the standard read and write macros except that the
+ * base register value is calculated by the queue or traffic class number
+ */
+#define AXGMAC_MTL_IOREAD(_pdata, _n, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xgmac_regs) + \
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg))
+
+#define AXGMAC_MTL_IOREAD_BITS(_pdata, _n, _reg, _field) \
+ GET_BITS(AXGMAC_MTL_IOREAD((_pdata), (_n), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_MTL_IOWRITE(_pdata, _n, _reg, _val) \
+ rte_write32((_val), (uint8_t *)((_pdata)->xgmac_regs) +\
+ MTL_Q_BASE + ((_n) * MTL_Q_INC) + (_reg))
+
+#define AXGMAC_MTL_IOWRITE_BITS(_pdata, _n, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_MTL_IOREAD((_pdata), (_n), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_MTL_IOWRITE((_pdata), (_n), _reg, reg_val); \
+} while (0)
+
+/* Macros for reading or writing DMA channel registers
+ * Similar to the standard read and write macros except that the
+ * base register value is obtained from the ring
+ */
+#define AXGMAC_DMA_IOREAD(_channel, _reg) \
+ rte_read32((uint8_t *)((_channel)->dma_regs) + (_reg))
+
+#define AXGMAC_DMA_IOREAD_BITS(_channel, _reg, _field) \
+ GET_BITS(AXGMAC_DMA_IOREAD((_channel), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define AXGMAC_DMA_IOWRITE(_channel, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_channel)->dma_regs) + (_reg))
+
+#define AXGMAC_DMA_IOWRITE_BITS(_channel, _reg, _field, _val) \
+do { \
+ u32 reg_val = AXGMAC_DMA_IOREAD((_channel), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ AXGMAC_DMA_IOWRITE((_channel), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of XPCS registers.
+ */
+#define XPCS_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XPCS_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XPCS32_IOWRITE(_pdata, _off, _val) \
+ rte_write32(_val, \
+ (uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS32_IOREAD(_pdata, _off) \
+ rte_read32((uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS16_IOWRITE(_pdata, _off, _val) \
+ rte_write16(_val, \
+ (uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+#define XPCS16_IOREAD(_pdata, _off) \
+ rte_read16((uint8_t *)((_pdata)->xpcs_regs) + (_off))
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes integration registers.
+ */
+#define XSIR_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XSIR0_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->sir0_regs) + (_reg))
+
+#define XSIR0_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR0_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR0_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->sir0_regs) + (_reg))
+
+#define XSIR0_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR0_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR0_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+#define XSIR1_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->sir1_regs) + _reg)
+
+#define XSIR1_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XSIR1_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XSIR1_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->sir1_regs) + (_reg))
+
+#define XSIR1_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XSIR1_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XSIR1_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of SerDes RxTx registers.
+ */
+#define XRXTX_IOREAD(_pdata, _reg) \
+ rte_read16((uint8_t *)((_pdata)->rxtx_regs) + (_reg))
+
+#define XRXTX_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XRXTX_IOREAD((_pdata), _reg), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XRXTX_IOWRITE(_pdata, _reg, _val) \
+ rte_write16((_val), \
+ (uint8_t *)((_pdata)->rxtx_regs) + (_reg))
+
+#define XRXTX_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u16 reg_val = XRXTX_IOREAD((_pdata), _reg); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XRXTX_IOWRITE((_pdata), _reg, reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of MAC Control registers.
+ */
+#define XP_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XP_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XP_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xprop_regs) + (_reg))
+
+#define XP_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XP_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XP_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xprop_regs) + (_reg))
+
+#define XP_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XP_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XP_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * within the register values of I2C Control registers.
+ */
+#define XI2C_GET_BITS(_var, _prefix, _field) \
+ GET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH)
+
+#define XI2C_SET_BITS(_var, _prefix, _field, _val) \
+ SET_BITS((_var), \
+ _prefix##_##_field##_INDEX, \
+ _prefix##_##_field##_WIDTH, (_val))
+
+#define XI2C_IOREAD(_pdata, _reg) \
+ rte_read32((uint8_t *)((_pdata)->xi2c_regs) + (_reg))
+
+#define XI2C_IOREAD_BITS(_pdata, _reg, _field) \
+ GET_BITS(XI2C_IOREAD((_pdata), (_reg)), \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH)
+
+#define XI2C_IOWRITE(_pdata, _reg, _val) \
+ rte_write32((_val), \
+ (uint8_t *)((_pdata)->xi2c_regs) + (_reg))
+
+#define XI2C_IOWRITE_BITS(_pdata, _reg, _field, _val) \
+do { \
+ u32 reg_val = XI2C_IOREAD((_pdata), (_reg)); \
+ SET_BITS(reg_val, \
+ _reg##_##_field##_INDEX, \
+ _reg##_##_field##_WIDTH, (_val)); \
+ XI2C_IOWRITE((_pdata), (_reg), reg_val); \
+} while (0)
+
+/* Macros for building, reading or writing register values or bits
+ * using MDIO. Different from above because of the use of standardized
+ * Linux include values. No shifting is performed with the bit
+ * operations, everything works on mask values.
+ */
+#define XMDIO_READ(_pdata, _mmd, _reg) \
+ ((_pdata)->hw_if.read_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff)))
+
+#define XMDIO_READ_BITS(_pdata, _mmd, _reg, _mask) \
+ (XMDIO_READ((_pdata), _mmd, _reg) & _mask)
+
+#define XMDIO_WRITE(_pdata, _mmd, _reg, _val) \
+ ((_pdata)->hw_if.write_mmd_regs((_pdata), 0, \
+ MII_ADDR_C45 | ((_mmd) << 16) | ((_reg) & 0xffff), (_val)))
+
+#define XMDIO_WRITE_BITS(_pdata, _mmd, _reg, _mask, _val) \
+do { \
+ u32 mmd_val = XMDIO_READ((_pdata), (_mmd), (_reg)); \
+ mmd_val &= ~(_mask); \
+ mmd_val |= (_val); \
+ XMDIO_WRITE((_pdata), (_mmd), (_reg), (mmd_val)); \
+} while (0)
+
+/*
+ * time_after(a,b) returns true if the time a is after time b.
+ *
+ * Do this with "<0" and ">=0" to only test the sign of the result. A
+ * good compiler would generate better code (and a really good compiler
+ * wouldn't care). Gcc is currently neither.
+ */
+#define time_after(a, b) ((long)((b) - (a)) < 0)
+#define time_before(a, b) time_after(b, a)
+
+#define time_after_eq(a, b) ((long)((a) - (b)) >= 0)
+#define time_before_eq(a, b) time_after_eq(b, a)
+
+/*---bitmap support apis---*/
+static inline int axgbe_test_bit(int nr, volatile unsigned long *addr)
+{
+ int res;
+
+ rte_mb();
+ res = ((*addr) & (1UL << nr)) != 0;
+ rte_mb();
+ return res;
+}
+
+static inline void axgbe_set_bit(unsigned int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_or(addr, (1UL << nr));
+}
+
+static inline void axgbe_clear_bit(int nr, volatile unsigned long *addr)
+{
+ __sync_fetch_and_and(addr, ~(1UL << nr));
+}
+
+static inline int axgbe_test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ unsigned long mask = (1UL << nr);
+
+ return __sync_fetch_and_and(addr, ~mask) & mask;
+}
+
+static inline unsigned long msecs_to_timer_cycles(unsigned int m)
+{
+ return rte_get_timer_hz() * (m / 1000);
+}
+
+#endif /* __AXGBE_COMMON_H__ */
diff --git a/drivers/net/axgbe/axgbe_dev.c b/drivers/net/axgbe/axgbe_dev.c
new file mode 100644
index 00000000..707f1ee9
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_dev.c
@@ -0,0 +1,1103 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+#include "axgbe_rxtx.h"
+
+static inline unsigned int axgbe_get_max_frame(struct axgbe_port *pdata)
+{
+ return pdata->eth_dev->data->mtu + ETHER_HDR_LEN +
+ ETHER_CRC_LEN + VLAN_HLEN;
+}
+
+/* query busy bit */
+static int mdio_complete(struct axgbe_port *pdata)
+{
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, BUSY))
+ return 1;
+
+ return 0;
+}
+
+static int axgbe_write_ext_mii_regs(struct axgbe_port *pdata, int addr,
+ int reg, u16 val)
+{
+ unsigned int mdio_sca, mdio_sccd;
+ uint64_t timeout;
+
+ mdio_sca = 0;
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (mdio_complete(pdata))
+ return 0;
+ }
+
+ PMD_DRV_LOG(ERR, "Mdio write operation timed out\n");
+ return -ETIMEDOUT;
+}
+
+static int axgbe_read_ext_mii_regs(struct axgbe_port *pdata, int addr,
+ int reg)
+{
+ unsigned int mdio_sca, mdio_sccd;
+ uint64_t timeout;
+
+ mdio_sca = 0;
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, REG, reg);
+ AXGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, addr);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
+
+ mdio_sccd = 0;
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
+ AXGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
+ AXGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
+
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (mdio_complete(pdata))
+ goto success;
+ }
+
+ PMD_DRV_LOG(ERR, "Mdio read operation timed out\n");
+ return -ETIMEDOUT;
+
+success:
+ return AXGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
+}
+
+static int axgbe_set_ext_mii_mode(struct axgbe_port *pdata, unsigned int port,
+ enum axgbe_mdio_mode mode)
+{
+ unsigned int reg_val = 0;
+
+ switch (mode) {
+ case AXGBE_MDIO_MODE_CL22:
+ if (port > AXGMAC_MAX_C22_PORT)
+ return -EINVAL;
+ reg_val |= (1 << port);
+ break;
+ case AXGBE_MDIO_MODE_CL45:
+ break;
+ default:
+ return -EINVAL;
+ }
+ AXGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
+
+ return 0;
+}
+
+static int axgbe_read_mmd_regs_v2(struct axgbe_port *pdata,
+ int prtad __rte_unused, int mmd_reg)
+{
+ unsigned int mmd_address, index, offset;
+ int mmd_data;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and reading 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ pthread_mutex_lock(&pdata->xpcs_mutex);
+
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ mmd_data = XPCS16_IOREAD(pdata, offset);
+
+ pthread_mutex_unlock(&pdata->xpcs_mutex);
+
+ return mmd_data;
+}
+
+static void axgbe_write_mmd_regs_v2(struct axgbe_port *pdata,
+ int prtad __rte_unused,
+ int mmd_reg, int mmd_data)
+{
+ unsigned int mmd_address, index, offset;
+
+ if (mmd_reg & MII_ADDR_C45)
+ mmd_address = mmd_reg & ~MII_ADDR_C45;
+ else
+ mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
+
+ /* The PCS registers are accessed using mmio. The underlying
+ * management interface uses indirect addressing to access the MMD
+ * register sets. This requires accessing of the PCS register in two
+ * phases, an address phase and a data phase.
+ *
+ * The mmio interface is based on 16-bit offsets and values. All
+ * register offsets must therefore be adjusted by left shifting the
+ * offset 1 bit and writing 16 bits of data.
+ */
+ mmd_address <<= 1;
+ index = mmd_address & ~pdata->xpcs_window_mask;
+ offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
+
+ pthread_mutex_lock(&pdata->xpcs_mutex);
+
+ XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
+ XPCS16_IOWRITE(pdata, offset, mmd_data);
+
+ pthread_mutex_unlock(&pdata->xpcs_mutex);
+}
+
+static int axgbe_read_mmd_regs(struct axgbe_port *pdata, int prtad,
+ int mmd_reg)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case AXGBE_XPCS_ACCESS_V1:
+ PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
+ return -1;
+ case AXGBE_XPCS_ACCESS_V2:
+ default:
+ return axgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
+ }
+}
+
+static void axgbe_write_mmd_regs(struct axgbe_port *pdata, int prtad,
+ int mmd_reg, int mmd_data)
+{
+ switch (pdata->vdata->xpcs_access) {
+ case AXGBE_XPCS_ACCESS_V1:
+ PMD_DRV_LOG(ERR, "PHY_Version 1 is not supported\n");
+ return;
+ case AXGBE_XPCS_ACCESS_V2:
+ default:
+ return axgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
+ }
+}
+
+static int axgbe_set_speed(struct axgbe_port *pdata, int speed)
+{
+ unsigned int ss;
+
+ switch (speed) {
+ case SPEED_1000:
+ ss = 0x03;
+ break;
+ case SPEED_2500:
+ ss = 0x02;
+ break;
+ case SPEED_10000:
+ ss = 0x00;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
+
+ return 0;
+}
+
+static int axgbe_disable_tx_flow_control(struct axgbe_port *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Clear MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
+
+ /* Clear MAC flow control */
+ max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = RTE_MIN(pdata->tx_q_count,
+ max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = AXGMAC_IOREAD(pdata, reg);
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int axgbe_enable_tx_flow_control(struct axgbe_port *pdata)
+{
+ unsigned int max_q_count, q_count;
+ unsigned int reg, reg_val;
+ unsigned int i;
+
+ /* Set MTL flow control */
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ unsigned int ehfc = 0;
+
+ /* Flow control thresholds are established */
+ if (pdata->rx_rfd[i])
+ ehfc = 1;
+
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
+ }
+
+ /* Set MAC flow control */
+ max_q_count = AXGMAC_MAX_FLOW_CONTROL_QUEUES;
+ q_count = RTE_MIN(pdata->tx_q_count,
+ max_q_count);
+ reg = MAC_Q0TFCR;
+ for (i = 0; i < q_count; i++) {
+ reg_val = AXGMAC_IOREAD(pdata, reg);
+
+ /* Enable transmit flow control */
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
+ /* Set pause time */
+ AXGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
+
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MAC_QTFCR_INC;
+ }
+
+ return 0;
+}
+
+static int axgbe_disable_rx_flow_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
+
+ return 0;
+}
+
+static int axgbe_enable_rx_flow_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
+
+ return 0;
+}
+
+static int axgbe_config_tx_flow_control(struct axgbe_port *pdata)
+{
+ if (pdata->tx_pause)
+ axgbe_enable_tx_flow_control(pdata);
+ else
+ axgbe_disable_tx_flow_control(pdata);
+
+ return 0;
+}
+
+static int axgbe_config_rx_flow_control(struct axgbe_port *pdata)
+{
+ if (pdata->rx_pause)
+ axgbe_enable_rx_flow_control(pdata);
+ else
+ axgbe_disable_rx_flow_control(pdata);
+
+ return 0;
+}
+
+static void axgbe_config_flow_control(struct axgbe_port *pdata)
+{
+ axgbe_config_tx_flow_control(pdata);
+ axgbe_config_rx_flow_control(pdata);
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
+}
+
+static void axgbe_queue_flow_control_threshold(struct axgbe_port *pdata,
+ unsigned int queue,
+ unsigned int q_fifo_size)
+{
+ unsigned int frame_fifo_size;
+ unsigned int rfa, rfd;
+
+ frame_fifo_size = AXGMAC_FLOW_CONTROL_ALIGN(axgbe_get_max_frame(pdata));
+
+ /* This path deals with just maximum frame sizes which are
+ * limited to a jumbo frame of 9,000 (plus headers, etc.)
+ * so we can never exceed the maximum allowable RFA/RFD
+ * values.
+ */
+ if (q_fifo_size <= 2048) {
+ /* rx_rfd to zero to signal no flow control */
+ pdata->rx_rfa[queue] = 0;
+ pdata->rx_rfd[queue] = 0;
+ return;
+ }
+
+ if (q_fifo_size <= 4096) {
+ /* Between 2048 and 4096 */
+ pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
+ pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= frame_fifo_size) {
+ /* Between 4096 and max-frame */
+ pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
+ pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
+ return;
+ }
+
+ if (q_fifo_size <= (frame_fifo_size * 3)) {
+ /* Between max-frame and 3 max-frames,
+ * trigger if we get just over a frame of data and
+ * resume when we have just under half a frame left.
+ */
+ rfa = q_fifo_size - frame_fifo_size;
+ rfd = rfa + (frame_fifo_size / 2);
+ } else {
+ /* Above 3 max-frames - trigger when just over
+ * 2 frames of space available
+ */
+ rfa = frame_fifo_size * 2;
+ rfa += AXGMAC_FLOW_CONTROL_UNIT;
+ rfd = rfa + frame_fifo_size;
+ }
+
+ pdata->rx_rfa[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfa);
+ pdata->rx_rfd[queue] = AXGMAC_FLOW_CONTROL_VALUE(rfd);
+}
+
+static void axgbe_calculate_flow_control_threshold(struct axgbe_port *pdata)
+{
+ unsigned int q_fifo_size;
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ q_fifo_size = (pdata->fifo + 1) * AXGMAC_FIFO_UNIT;
+
+ axgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
+ }
+}
+
+static void axgbe_config_flow_control_threshold(struct axgbe_port *pdata)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++) {
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
+ pdata->rx_rfa[i]);
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
+ pdata->rx_rfd[i]);
+ }
+}
+
+static int __axgbe_exit(struct axgbe_port *pdata)
+{
+ unsigned int count = 2000;
+
+ /* Issue a software reset */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
+ rte_delay_us(10);
+
+ /* Poll Until Poll Condition */
+ while (--count && AXGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
+ rte_delay_us(500);
+
+ if (!count)
+ return -EBUSY;
+
+ return 0;
+}
+
+static int axgbe_exit(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* To guard against possible incorrectly generated interrupts,
+ * issue the software reset twice.
+ */
+ ret = __axgbe_exit(pdata);
+ if (ret)
+ return ret;
+
+ return __axgbe_exit(pdata);
+}
+
+static int axgbe_flush_tx_queues(struct axgbe_port *pdata)
+{
+ unsigned int i, count;
+
+ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
+ return 0;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
+
+ /* Poll Until Poll Condition */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ count = 2000;
+ while (--count && AXGMAC_MTL_IOREAD_BITS(pdata, i,
+ MTL_Q_TQOMR, FTQ))
+ rte_delay_us(500);
+
+ if (!count)
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void axgbe_config_dma_bus(struct axgbe_port *pdata)
+{
+ /* Set enhanced addressing mode */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, EAME, 1);
+
+ /* Out standing read/write requests*/
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, RD_OSR, 0x3f);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, WR_OSR, 0x3f);
+
+ /* Set the System Bus mode */
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_32, 1);
+ AXGMAC_IOWRITE_BITS(pdata, DMA_SBMR, AAL, 1);
+}
+
+static void axgbe_config_dma_cache(struct axgbe_port *pdata)
+{
+ unsigned int arcache, awcache, arwcache;
+
+ arcache = 0;
+ AXGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, 0x3);
+ AXGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
+
+ awcache = 0;
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, 0x1);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, 0x1);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDC, 0x3);
+ AXGMAC_SET_BITS(awcache, DMA_AXIAWCR, RDD, 0x1);
+ AXGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
+
+ arwcache = 0;
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWD, 0x1);
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, TDWC, 0x3);
+ AXGMAC_SET_BITS(arwcache, DMA_AXIAWRCR, RDRC, 0x3);
+ AXGMAC_IOWRITE(pdata, DMA_AXIAWRCR, arwcache);
+}
+
+static void axgbe_config_edma_control(struct axgbe_port *pdata)
+{
+ AXGMAC_IOWRITE(pdata, EDMA_TX_CONTROL, 0x5);
+ AXGMAC_IOWRITE(pdata, EDMA_RX_CONTROL, 0x5);
+}
+
+static int axgbe_config_osp_mode(struct axgbe_port *pdata)
+{
+ /* Force DMA to operate on second packet before closing descriptors
+ * of first packet
+ */
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, OSP,
+ pdata->tx_osp_mode);
+ }
+
+ return 0;
+}
+
+static int axgbe_config_pblx8(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_CR, PBLX8,
+ pdata->pblx8);
+ }
+ return 0;
+}
+
+static int axgbe_config_tx_pbl_val(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, PBL,
+ pdata->tx_pbl);
+ }
+
+ return 0;
+}
+
+static int axgbe_config_rx_pbl_val(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, PBL,
+ pdata->rx_pbl);
+ }
+
+ return 0;
+}
+
+static void axgbe_config_rx_buffer_size(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+
+ rxq->buf_size = rte_pktmbuf_data_room_size(rxq->mb_pool) -
+ RTE_PKTMBUF_HEADROOM;
+ rxq->buf_size = (rxq->buf_size + AXGBE_RX_BUF_ALIGN - 1) &
+ ~(AXGBE_RX_BUF_ALIGN - 1);
+
+ if (rxq->buf_size > pdata->rx_buf_size)
+ pdata->rx_buf_size = rxq->buf_size;
+
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, RBSZ,
+ rxq->buf_size);
+ }
+}
+
+static int axgbe_write_rss_reg(struct axgbe_port *pdata, unsigned int type,
+ unsigned int index, unsigned int val)
+{
+ unsigned int wait;
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ return -EBUSY;
+
+ AXGMAC_IOWRITE(pdata, MAC_RSSDR, val);
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
+
+ wait = 1000;
+ while (wait--) {
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
+ return 0;
+
+ rte_delay_us(1500);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_write_rss_hash_key(struct axgbe_port *pdata)
+{
+ struct rte_eth_rss_conf *rss_conf;
+ unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
+ unsigned int *key;
+ int ret;
+
+ rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+
+ if (!rss_conf->rss_key)
+ key = (unsigned int *)&pdata->rss_key;
+ else
+ key = (unsigned int *)&rss_conf->rss_key;
+
+ while (key_regs--) {
+ ret = axgbe_write_rss_reg(pdata, AXGBE_RSS_HASH_KEY_TYPE,
+ key_regs, *key++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axgbe_write_rss_lookup_table(struct axgbe_port *pdata)
+{
+ unsigned int i;
+ int ret;
+
+ for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
+ ret = axgbe_write_rss_reg(pdata,
+ AXGBE_RSS_LOOKUP_TABLE_TYPE, i,
+ pdata->rss_table[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int axgbe_enable_rss(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* Program the hash key */
+ ret = axgbe_write_rss_hash_key(pdata);
+ if (ret)
+ return ret;
+
+ /* Program the lookup table */
+ ret = axgbe_write_rss_lookup_table(pdata);
+ if (ret)
+ return ret;
+
+ /* Set the RSS options */
+ AXGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
+
+ /* Enable RSS */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
+
+ return 0;
+}
+
+static void axgbe_rss_options(struct axgbe_port *pdata)
+{
+ struct rte_eth_rss_conf *rss_conf;
+ uint64_t rss_hf;
+
+ rss_conf = &pdata->eth_dev->data->dev_conf.rx_adv_conf.rss_conf;
+ rss_hf = rss_conf->rss_hf;
+
+ if (rss_hf & (ETH_RSS_IPV4 | ETH_RSS_IPV6))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV6_TCP))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
+ if (rss_hf & (ETH_RSS_NONFRAG_IPV4_UDP | ETH_RSS_NONFRAG_IPV6_UDP))
+ AXGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);
+}
+
+static int axgbe_config_rss(struct axgbe_port *pdata)
+{
+ uint32_t i;
+
+ if (pdata->rss_enable) {
+ /* Initialize RSS hash key and lookup table */
+ uint32_t *key = (uint32_t *)pdata->rss_key;
+
+ for (i = 0; i < sizeof(pdata->rss_key) / 4; i++)
+ *key++ = (uint32_t)rte_rand();
+ for (i = 0; i < AXGBE_RSS_MAX_TABLE_SIZE; i++)
+ AXGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
+ i % pdata->eth_dev->data->nb_rx_queues);
+ axgbe_rss_options(pdata);
+ if (axgbe_enable_rss(pdata)) {
+ PMD_DRV_LOG(ERR, "Error in enabling RSS support\n");
+ return -1;
+ }
+ } else {
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
+ }
+
+ return 0;
+}
+
+static void axgbe_enable_dma_interrupts(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int dma_ch_isr, dma_ch_ier;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+
+ /* Clear all the interrupts which are set */
+ dma_ch_isr = AXGMAC_DMA_IOREAD(txq, DMA_CH_SR);
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_SR, dma_ch_isr);
+
+ /* Clear all interrupt enable bits */
+ dma_ch_ier = 0;
+
+ /* Enable following interrupts
+ * NIE - Normal Interrupt Summary Enable
+ * AIE - Abnormal Interrupt Summary Enable
+ * FBEE - Fatal Bus Error Enable
+ */
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, NIE, 0);
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, AIE, 1);
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
+
+ /* Enable following Rx interrupts
+ * RBUE - Receive Buffer Unavailable Enable
+ * RIE - Receive Interrupt Enable (unless using
+ * per channel interrupts in edge triggered
+ * mode)
+ */
+ AXGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
+
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_IER, dma_ch_ier);
+ }
+}
+
+static void wrapper_tx_desc_init(struct axgbe_port *pdata)
+{
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_tx_queues; i++) {
+ txq = pdata->eth_dev->data->tx_queues[i];
+ txq->cur = 0;
+ txq->dirty = 0;
+ /* Update the total number of Tx descriptors */
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDRLR, txq->nb_desc - 1);
+ /* Update the starting address of descriptor ring */
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_HI,
+ high32_value(txq->ring_phys_addr));
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDLR_LO,
+ low32_value(txq->ring_phys_addr));
+ }
+}
+
+static int wrapper_rx_desc_init(struct axgbe_port *pdata)
+{
+ struct axgbe_rx_queue *rxq;
+ struct rte_mbuf *mbuf;
+ volatile union axgbe_rx_desc *desc;
+ unsigned int i, j;
+
+ for (i = 0; i < pdata->eth_dev->data->nb_rx_queues; i++) {
+ rxq = pdata->eth_dev->data->rx_queues[i];
+
+ /* Initialize software ring entries */
+ rxq->mbuf_alloc = 0;
+ rxq->cur = 0;
+ rxq->dirty = 0;
+ desc = AXGBE_GET_DESC_PT(rxq, 0);
+
+ for (j = 0; j < rxq->nb_desc; j++) {
+ mbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (mbuf == NULL) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed queue_id = %u, idx = %d\n",
+ (unsigned int)rxq->queue_id, j);
+ axgbe_dev_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ rxq->sw_ring[j] = mbuf;
+ /* Mbuf populate */
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ desc->read.baddr =
+ rte_cpu_to_le_64(
+ rte_mbuf_data_iova_default(mbuf));
+ rte_wmb();
+ AXGMAC_SET_BITS_LE(desc->read.desc3,
+ RX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+ rxq->mbuf_alloc++;
+ desc++;
+ }
+ /* Update the total number of Rx descriptors */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDRLR,
+ rxq->nb_desc - 1);
+ /* Update the starting address of descriptor ring */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_HI,
+ high32_value(rxq->ring_phys_addr));
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDLR_LO,
+ low32_value(rxq->ring_phys_addr));
+ /* Update the Rx Descriptor Tail Pointer */
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (rxq->nb_desc - 1) *
+ sizeof(union axgbe_rx_desc)));
+ }
+ return 0;
+}
+
+static void axgbe_config_mtl_mode(struct axgbe_port *pdata)
+{
+ unsigned int i;
+
+ /* Set Tx to weighted round robin scheduling algorithm */
+ AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
+
+ /* Set Tx traffic classes to use WRR algorithm with equal weights */
+ for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+ MTL_TSA_ETS);
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+ }
+
+ /* Set Rx to strict priority algorithm */
+ AXGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
+}
+
+static int axgbe_config_tsf_mode(struct axgbe_port *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
+
+ return 0;
+}
+
+static int axgbe_config_rsf_mode(struct axgbe_port *pdata, unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
+
+ return 0;
+}
+
+static int axgbe_config_tx_threshold(struct axgbe_port *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
+
+ return 0;
+}
+
+static int axgbe_config_rx_threshold(struct axgbe_port *pdata,
+ unsigned int val)
+{
+ unsigned int i;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
+
+ return 0;
+}
+
+/*Distrubting fifo size */
+static void axgbe_config_rx_fifo_size(struct axgbe_port *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int q_fifo_size;
+ unsigned int p_fifo, i;
+
+ fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
+ pdata->hw_feat.rx_fifo_size);
+ q_fifo_size = fifo_size / pdata->rx_q_count;
+
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result
+ * by 1).
+ */
+ p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
+ if (p_fifo)
+ p_fifo--;
+
+ for (i = 0; i < pdata->rx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, p_fifo);
+ pdata->fifo = p_fifo;
+
+ /*Calculate and config Flow control threshold*/
+ axgbe_calculate_flow_control_threshold(pdata);
+ axgbe_config_flow_control_threshold(pdata);
+}
+
+static void axgbe_config_tx_fifo_size(struct axgbe_port *pdata)
+{
+ unsigned int fifo_size;
+ unsigned int q_fifo_size;
+ unsigned int p_fifo, i;
+
+ fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
+ pdata->hw_feat.tx_fifo_size);
+ q_fifo_size = fifo_size / pdata->tx_q_count;
+
+ /* Calculate the fifo setting by dividing the queue's fifo size
+ * by the fifo allocation increment (with 0 representing the
+ * base allocation increment so decrement the result
+ * by 1).
+ */
+ p_fifo = q_fifo_size / AXGMAC_FIFO_UNIT;
+ if (p_fifo)
+ p_fifo--;
+
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, p_fifo);
+}
+
+static void axgbe_config_queue_mapping(struct axgbe_port *pdata)
+{
+ unsigned int qptc, qptc_extra, queue;
+ unsigned int i, j, reg, reg_val;
+
+ /* Map the MTL Tx Queues to Traffic Classes
+ * Note: Tx Queues >= Traffic Classes
+ */
+ qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+ qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+ for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+ for (j = 0; j < qptc; j++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ if (i < qptc_extra)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+ Q2TCMAP, i);
+ }
+
+ if (pdata->rss_enable) {
+ /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
+ reg = MTL_RQDCM0R;
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count;) {
+ reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
+
+ if ((i % MTL_RQDCM_Q_PER_REG) &&
+ (i != pdata->rx_q_count))
+ continue;
+
+ AXGMAC_IOWRITE(pdata, reg, reg_val);
+
+ reg += MTL_RQDCM_INC;
+ reg_val = 0;
+ }
+ }
+}
+
+static void axgbe_enable_mtl_interrupts(struct axgbe_port *pdata)
+{
+ unsigned int mtl_q_isr;
+ unsigned int q_count, i;
+
+ q_count = RTE_MAX(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
+ for (i = 0; i < q_count; i++) {
+ /* Clear all the interrupts which are set */
+ mtl_q_isr = AXGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
+ AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
+
+ /* No MTL interrupts to be enabled */
+ AXGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
+ }
+}
+
+static int axgbe_set_mac_address(struct axgbe_port *pdata, u8 *addr)
+{
+ unsigned int mac_addr_hi, mac_addr_lo;
+
+ mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
+ mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
+ (addr[1] << 8) | (addr[0] << 0);
+
+ AXGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
+ AXGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
+
+ return 0;
+}
+
+static void axgbe_config_mac_address(struct axgbe_port *pdata)
+{
+ axgbe_set_mac_address(pdata, pdata->mac_addr.addr_bytes);
+}
+
+static void axgbe_config_jumbo_enable(struct axgbe_port *pdata)
+{
+ unsigned int val;
+
+ val = (pdata->rx_buf_size > AXGMAC_STD_PACKET_MTU) ? 1 : 0;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
+}
+
+static void axgbe_config_mac_speed(struct axgbe_port *pdata)
+{
+ axgbe_set_speed(pdata, pdata->phy_speed);
+}
+
+static void axgbe_config_checksum_offload(struct axgbe_port *pdata)
+{
+ if (pdata->rx_csum_enable)
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
+ else
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
+}
+
+static int axgbe_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ /* Flush Tx queues */
+ ret = axgbe_flush_tx_queues(pdata);
+ if (ret)
+ return ret;
+ /* Initialize DMA related features */
+ axgbe_config_dma_bus(pdata);
+ axgbe_config_dma_cache(pdata);
+ axgbe_config_edma_control(pdata);
+ axgbe_config_osp_mode(pdata);
+ axgbe_config_pblx8(pdata);
+ axgbe_config_tx_pbl_val(pdata);
+ axgbe_config_rx_pbl_val(pdata);
+ axgbe_config_rx_buffer_size(pdata);
+ axgbe_config_rss(pdata);
+ wrapper_tx_desc_init(pdata);
+ ret = wrapper_rx_desc_init(pdata);
+ if (ret)
+ return ret;
+ axgbe_enable_dma_interrupts(pdata);
+
+ /* Initialize MTL related features */
+ axgbe_config_mtl_mode(pdata);
+ axgbe_config_queue_mapping(pdata);
+ axgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
+ axgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
+ axgbe_config_tx_threshold(pdata, pdata->tx_threshold);
+ axgbe_config_rx_threshold(pdata, pdata->rx_threshold);
+ axgbe_config_tx_fifo_size(pdata);
+ axgbe_config_rx_fifo_size(pdata);
+
+ axgbe_enable_mtl_interrupts(pdata);
+
+ /* Initialize MAC related features */
+ axgbe_config_mac_address(pdata);
+ axgbe_config_jumbo_enable(pdata);
+ axgbe_config_flow_control(pdata);
+ axgbe_config_mac_speed(pdata);
+ axgbe_config_checksum_offload(pdata);
+
+ return 0;
+}
+
+void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if)
+{
+ hw_if->exit = axgbe_exit;
+ hw_if->config_flow_control = axgbe_config_flow_control;
+
+ hw_if->init = axgbe_init;
+
+ hw_if->read_mmd_regs = axgbe_read_mmd_regs;
+ hw_if->write_mmd_regs = axgbe_write_mmd_regs;
+
+ hw_if->set_speed = axgbe_set_speed;
+
+ hw_if->set_ext_mii_mode = axgbe_set_ext_mii_mode;
+ hw_if->read_ext_mii_regs = axgbe_read_ext_mii_regs;
+ hw_if->write_ext_mii_regs = axgbe_write_ext_mii_regs;
+ /* For FLOW ctrl */
+ hw_if->config_tx_flow_control = axgbe_config_tx_flow_control;
+ hw_if->config_rx_flow_control = axgbe_config_rx_flow_control;
+}
diff --git a/drivers/net/axgbe/axgbe_ethdev.c b/drivers/net/axgbe/axgbe_ethdev.c
new file mode 100644
index 00000000..9ae9f063
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_ethdev.c
@@ -0,0 +1,770 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_rxtx.h"
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+static int eth_axgbe_dev_init(struct rte_eth_dev *eth_dev);
+static int eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev);
+static int axgbe_dev_configure(struct rte_eth_dev *dev);
+static int axgbe_dev_start(struct rte_eth_dev *dev);
+static void axgbe_dev_stop(struct rte_eth_dev *dev);
+static void axgbe_dev_interrupt_handler(void *param);
+static void axgbe_dev_close(struct rte_eth_dev *dev);
+static void axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev);
+static void axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev);
+static void axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev);
+static void axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev);
+static int axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete);
+static int axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats);
+static void axgbe_dev_stats_reset(struct rte_eth_dev *dev);
+static void axgbe_dev_info_get(struct rte_eth_dev *dev,
+ struct rte_eth_dev_info *dev_info);
+
+/* The set of PCI devices this driver supports */
+#define AMD_PCI_VENDOR_ID 0x1022
+#define AMD_PCI_AXGBE_DEVICE_V2A 0x1458
+#define AMD_PCI_AXGBE_DEVICE_V2B 0x1459
+
+int axgbe_logtype_init;
+int axgbe_logtype_driver;
+
+static const struct rte_pci_id pci_id_axgbe_map[] = {
+ {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2A)},
+ {RTE_PCI_DEVICE(AMD_PCI_VENDOR_ID, AMD_PCI_AXGBE_DEVICE_V2B)},
+ { .vendor_id = 0, },
+};
+
+static struct axgbe_version_data axgbe_v2a = {
+ .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 229376,
+ .rx_max_fifo_size = 229376,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .an_cdr_workaround = 1,
+};
+
+static struct axgbe_version_data axgbe_v2b = {
+ .init_function_ptrs_phy_impl = axgbe_init_function_ptrs_phy_v2,
+ .xpcs_access = AXGBE_XPCS_ACCESS_V2,
+ .mmc_64bit = 1,
+ .tx_max_fifo_size = 65536,
+ .rx_max_fifo_size = 65536,
+ .tx_tstamp_workaround = 1,
+ .ecc_support = 1,
+ .i2c_support = 1,
+ .an_cdr_workaround = 1,
+};
+
+static const struct rte_eth_desc_lim rx_desc_lim = {
+ .nb_max = AXGBE_MAX_RING_DESC,
+ .nb_min = AXGBE_MIN_RING_DESC,
+ .nb_align = 8,
+};
+
+static const struct rte_eth_desc_lim tx_desc_lim = {
+ .nb_max = AXGBE_MAX_RING_DESC,
+ .nb_min = AXGBE_MIN_RING_DESC,
+ .nb_align = 8,
+};
+
+static const struct eth_dev_ops axgbe_eth_dev_ops = {
+ .dev_configure = axgbe_dev_configure,
+ .dev_start = axgbe_dev_start,
+ .dev_stop = axgbe_dev_stop,
+ .dev_close = axgbe_dev_close,
+ .promiscuous_enable = axgbe_dev_promiscuous_enable,
+ .promiscuous_disable = axgbe_dev_promiscuous_disable,
+ .allmulticast_enable = axgbe_dev_allmulticast_enable,
+ .allmulticast_disable = axgbe_dev_allmulticast_disable,
+ .link_update = axgbe_dev_link_update,
+ .stats_get = axgbe_dev_stats_get,
+ .stats_reset = axgbe_dev_stats_reset,
+ .dev_infos_get = axgbe_dev_info_get,
+ .rx_queue_setup = axgbe_dev_rx_queue_setup,
+ .rx_queue_release = axgbe_dev_rx_queue_release,
+ .tx_queue_setup = axgbe_dev_tx_queue_setup,
+ .tx_queue_release = axgbe_dev_tx_queue_release,
+};
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ pdata->phy_link = -1;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ return pdata->phy_if.phy_reset(pdata);
+}
+
+/*
+ * Interrupt handler triggered by NIC for handling
+ * specific interrupt.
+ *
+ * @param handle
+ * Pointer to interrupt handle.
+ * @param param
+ * The address of parameter (struct rte_eth_dev *) regsitered before.
+ *
+ * @return
+ * void
+ */
+static void
+axgbe_dev_interrupt_handler(void *param)
+{
+ struct rte_eth_dev *dev = (struct rte_eth_dev *)param;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int dma_isr, dma_ch_isr;
+
+ pdata->phy_if.an_isr(pdata);
+ /*DMA related interrupts*/
+ dma_isr = AXGMAC_IOREAD(pdata, DMA_ISR);
+ if (dma_isr) {
+ if (dma_isr & 1) {
+ dma_ch_isr =
+ AXGMAC_DMA_IOREAD((struct axgbe_rx_queue *)
+ pdata->rx_queues[0],
+ DMA_CH_SR);
+ AXGMAC_DMA_IOWRITE((struct axgbe_rx_queue *)
+ pdata->rx_queues[0],
+ DMA_CH_SR, dma_ch_isr);
+ }
+ }
+ /* Enable interrupts since disabled after generation*/
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+}
+
+/*
+ * Configure device link speed and setup link.
+ * It returns 0 on success.
+ */
+static int
+axgbe_dev_configure(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ /* Checksum offload to hardware */
+ pdata->rx_csum_enable = dev->data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_CHECKSUM;
+ return 0;
+}
+
+static int
+axgbe_dev_rx_mq_config(struct rte_eth_dev *dev)
+{
+ struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+
+ if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS)
+ pdata->rss_enable = 1;
+ else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE)
+ pdata->rss_enable = 0;
+ else
+ return -1;
+ return 0;
+}
+
+static int
+axgbe_dev_start(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = (struct axgbe_port *)dev->data->dev_private;
+ int ret;
+
+ /* Multiqueue RSS */
+ ret = axgbe_dev_rx_mq_config(dev);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "Unable to config RX MQ\n");
+ return ret;
+ }
+ ret = axgbe_phy_reset(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "phy reset failed\n");
+ return ret;
+ }
+ ret = pdata->hw_if.init(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "dev_init failed\n");
+ return ret;
+ }
+
+ /* enable uio/vfio intr/eventfd mapping */
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+
+ /* phy start*/
+ pdata->phy_if.phy_start(pdata);
+ axgbe_dev_enable_tx(dev);
+ axgbe_dev_enable_rx(dev);
+
+ axgbe_clear_bit(AXGBE_STOPPED, &pdata->dev_state);
+ axgbe_clear_bit(AXGBE_DOWN, &pdata->dev_state);
+ return 0;
+}
+
+/* Stop device: disable rx and tx functions to allow for reconfiguring. */
+static void
+axgbe_dev_stop(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ rte_intr_disable(&pdata->pci_dev->intr_handle);
+
+ if (axgbe_test_bit(AXGBE_STOPPED, &pdata->dev_state))
+ return;
+
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ axgbe_dev_disable_tx(dev);
+ axgbe_dev_disable_rx(dev);
+
+ pdata->phy_if.phy_stop(pdata);
+ pdata->hw_if.exit(pdata);
+ memset(&dev->data->dev_link, 0, sizeof(struct rte_eth_link));
+ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
+}
+
+/* Clear all resources like TX/RX queues. */
+static void
+axgbe_dev_close(struct rte_eth_dev *dev)
+{
+ axgbe_dev_clear_queues(dev);
+}
+
+static void
+axgbe_dev_promiscuous_enable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 1);
+}
+
+static void
+axgbe_dev_promiscuous_disable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, 0);
+}
+
+static void
+axgbe_dev_allmulticast_enable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ if (AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+ return;
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 1);
+}
+
+static void
+axgbe_dev_allmulticast_disable(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ if (!AXGMAC_IOREAD_BITS(pdata, MAC_PFR, PM))
+ return;
+ AXGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, 0);
+}
+
+/* return 0 means link status changed, -1 means not changed */
+static int
+axgbe_dev_link_update(struct rte_eth_dev *dev,
+ int wait_to_complete __rte_unused)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+ struct rte_eth_link link;
+ int ret = 0;
+
+ PMD_INIT_FUNC_TRACE();
+ rte_delay_ms(800);
+
+ pdata->phy_if.phy_status(pdata);
+
+ memset(&link, 0, sizeof(struct rte_eth_link));
+ link.link_duplex = pdata->phy.duplex;
+ link.link_status = pdata->phy_link;
+ link.link_speed = pdata->phy_speed;
+ link.link_autoneg = !(dev->data->dev_conf.link_speeds &
+ ETH_LINK_SPEED_FIXED);
+ ret = rte_eth_linkstatus_set(dev, &link);
+ if (ret == -1)
+ PMD_DRV_LOG(ERR, "No change in link status\n");
+
+ return ret;
+}
+
+static int
+axgbe_dev_stats_get(struct rte_eth_dev *dev,
+ struct rte_eth_stats *stats)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ stats->q_ipackets[i] = rxq->pkts;
+ stats->ipackets += rxq->pkts;
+ stats->q_ibytes[i] = rxq->bytes;
+ stats->ibytes += rxq->bytes;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ stats->q_opackets[i] = txq->pkts;
+ stats->opackets += txq->pkts;
+ stats->q_obytes[i] = txq->bytes;
+ stats->obytes += txq->bytes;
+ }
+
+ return 0;
+}
+
+static void
+axgbe_dev_stats_reset(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ rxq->pkts = 0;
+ rxq->bytes = 0;
+ rxq->errors = 0;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ txq->pkts = 0;
+ txq->bytes = 0;
+ txq->errors = 0;
+ }
+}
+
+static void
+axgbe_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info)
+{
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ dev_info->max_rx_queues = pdata->rx_ring_count;
+ dev_info->max_tx_queues = pdata->tx_ring_count;
+ dev_info->min_rx_bufsize = AXGBE_RX_MIN_BUF_SIZE;
+ dev_info->max_rx_pktlen = AXGBE_RX_MAX_BUF_SIZE;
+ dev_info->max_mac_addrs = AXGBE_MAX_MAC_ADDRS;
+ dev_info->speed_capa = ETH_LINK_SPEED_10G;
+
+ dev_info->rx_offload_capa =
+ DEV_RX_OFFLOAD_IPV4_CKSUM |
+ DEV_RX_OFFLOAD_UDP_CKSUM |
+ DEV_RX_OFFLOAD_TCP_CKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP |
+ DEV_RX_OFFLOAD_KEEP_CRC;
+
+ dev_info->tx_offload_capa =
+ DEV_TX_OFFLOAD_IPV4_CKSUM |
+ DEV_TX_OFFLOAD_UDP_CKSUM |
+ DEV_TX_OFFLOAD_TCP_CKSUM;
+
+ if (pdata->hw_feat.rss) {
+ dev_info->flow_type_rss_offloads = AXGBE_RSS_OFFLOAD;
+ dev_info->reta_size = pdata->hw_feat.hash_table_size;
+ dev_info->hash_key_size = AXGBE_RSS_HASH_KEY_SIZE;
+ }
+
+ dev_info->rx_desc_lim = rx_desc_lim;
+ dev_info->tx_desc_lim = tx_desc_lim;
+
+ dev_info->default_rxconf = (struct rte_eth_rxconf) {
+ .rx_free_thresh = AXGBE_RX_FREE_THRESH,
+ };
+
+ dev_info->default_txconf = (struct rte_eth_txconf) {
+ .tx_free_thresh = AXGBE_TX_FREE_THRESH,
+ };
+}
+
+static void axgbe_get_all_hw_features(struct axgbe_port *pdata)
+{
+ unsigned int mac_hfr0, mac_hfr1, mac_hfr2;
+ struct axgbe_hw_features *hw_feat = &pdata->hw_feat;
+
+ mac_hfr0 = AXGMAC_IOREAD(pdata, MAC_HWF0R);
+ mac_hfr1 = AXGMAC_IOREAD(pdata, MAC_HWF1R);
+ mac_hfr2 = AXGMAC_IOREAD(pdata, MAC_HWF2R);
+
+ memset(hw_feat, 0, sizeof(*hw_feat));
+
+ hw_feat->version = AXGMAC_IOREAD(pdata, MAC_VR);
+
+ /* Hardware feature register 0 */
+ hw_feat->gmii = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, GMIISEL);
+ hw_feat->vlhash = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, VLHASH);
+ hw_feat->sma = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SMASEL);
+ hw_feat->rwk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RWKSEL);
+ hw_feat->mgk = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MGKSEL);
+ hw_feat->mmc = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, MMCSEL);
+ hw_feat->aoe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, ARPOFFSEL);
+ hw_feat->ts = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSEL);
+ hw_feat->eee = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, EEESEL);
+ hw_feat->tx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TXCOESEL);
+ hw_feat->rx_coe = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, RXCOESEL);
+ hw_feat->addn_mac = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R,
+ ADDMACADRSEL);
+ hw_feat->ts_src = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, TSSTSSEL);
+ hw_feat->sa_vlan_ins = AXGMAC_GET_BITS(mac_hfr0, MAC_HWF0R, SAVLANINS);
+
+ /* Hardware feature register 1 */
+ hw_feat->rx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ RXFIFOSIZE);
+ hw_feat->tx_fifo_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ TXFIFOSIZE);
+ hw_feat->adv_ts_hi = AXGMAC_GET_BITS(mac_hfr1,
+ MAC_HWF1R, ADVTHWORD);
+ hw_feat->dma_width = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, ADDR64);
+ hw_feat->dcb = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DCBEN);
+ hw_feat->sph = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
+ hw_feat->tso = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
+ hw_feat->dma_debug = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+ hw_feat->rss = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, RSSEN);
+ hw_feat->tc_cnt = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
+ hw_feat->hash_table_size = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ HASHTBLSZ);
+ hw_feat->l3l4_filter_num = AXGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
+ L3L4FNUM);
+
+ /* Hardware feature register 2 */
+ hw_feat->rx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXQCNT);
+ hw_feat->tx_q_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXQCNT);
+ hw_feat->rx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, RXCHCNT);
+ hw_feat->tx_ch_cnt = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, TXCHCNT);
+ hw_feat->pps_out_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
+ hw_feat->aux_snap_num = AXGMAC_GET_BITS(mac_hfr2, MAC_HWF2R,
+ AUXSNAPNUM);
+
+ /* Translate the Hash Table size into actual number */
+ switch (hw_feat->hash_table_size) {
+ case 0:
+ break;
+ case 1:
+ hw_feat->hash_table_size = 64;
+ break;
+ case 2:
+ hw_feat->hash_table_size = 128;
+ break;
+ case 3:
+ hw_feat->hash_table_size = 256;
+ break;
+ }
+
+ /* Translate the address width setting into actual number */
+ switch (hw_feat->dma_width) {
+ case 0:
+ hw_feat->dma_width = 32;
+ break;
+ case 1:
+ hw_feat->dma_width = 40;
+ break;
+ case 2:
+ hw_feat->dma_width = 48;
+ break;
+ default:
+ hw_feat->dma_width = 32;
+ }
+
+ /* The Queue, Channel and TC counts are zero based so increment them
+ * to get the actual number
+ */
+ hw_feat->rx_q_cnt++;
+ hw_feat->tx_q_cnt++;
+ hw_feat->rx_ch_cnt++;
+ hw_feat->tx_ch_cnt++;
+ hw_feat->tc_cnt++;
+
+ /* Translate the fifo sizes into actual numbers */
+ hw_feat->rx_fifo_size = 1 << (hw_feat->rx_fifo_size + 7);
+ hw_feat->tx_fifo_size = 1 << (hw_feat->tx_fifo_size + 7);
+}
+
+static void axgbe_init_all_fptrs(struct axgbe_port *pdata)
+{
+ axgbe_init_function_ptrs_dev(&pdata->hw_if);
+ axgbe_init_function_ptrs_phy(&pdata->phy_if);
+ axgbe_init_function_ptrs_i2c(&pdata->i2c_if);
+ pdata->vdata->init_function_ptrs_phy_impl(&pdata->phy_if);
+}
+
+static void axgbe_set_counts(struct axgbe_port *pdata)
+{
+ /* Set all the function pointers */
+ axgbe_init_all_fptrs(pdata);
+
+ /* Populate the hardware features */
+ axgbe_get_all_hw_features(pdata);
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_channel_count)
+ pdata->tx_max_channel_count = pdata->hw_feat.tx_ch_cnt;
+ if (!pdata->rx_max_channel_count)
+ pdata->rx_max_channel_count = pdata->hw_feat.rx_ch_cnt;
+
+ if (!pdata->tx_max_q_count)
+ pdata->tx_max_q_count = pdata->hw_feat.tx_q_cnt;
+ if (!pdata->rx_max_q_count)
+ pdata->rx_max_q_count = pdata->hw_feat.rx_q_cnt;
+
+ /* Calculate the number of Tx and Rx rings to be created
+ * -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+ * the number of Tx queues to the number of Tx channels
+ * enabled
+ * -Rx (DMA) Channels do not map 1-to-1 so use the actual
+ * number of Rx queues or maximum allowed
+ */
+ pdata->tx_ring_count = RTE_MIN(pdata->hw_feat.tx_ch_cnt,
+ pdata->tx_max_channel_count);
+ pdata->tx_ring_count = RTE_MIN(pdata->tx_ring_count,
+ pdata->tx_max_q_count);
+
+ pdata->tx_q_count = pdata->tx_ring_count;
+
+ pdata->rx_ring_count = RTE_MIN(pdata->hw_feat.rx_ch_cnt,
+ pdata->rx_max_channel_count);
+
+ pdata->rx_q_count = RTE_MIN(pdata->hw_feat.rx_q_cnt,
+ pdata->rx_max_q_count);
+}
+
+static void axgbe_default_config(struct axgbe_port *pdata)
+{
+ pdata->pblx8 = DMA_PBL_X8_ENABLE;
+ pdata->tx_sf_mode = MTL_TSF_ENABLE;
+ pdata->tx_threshold = MTL_TX_THRESHOLD_64;
+ pdata->tx_pbl = DMA_PBL_32;
+ pdata->tx_osp_mode = DMA_OSP_ENABLE;
+ pdata->rx_sf_mode = MTL_RSF_ENABLE;
+ pdata->rx_threshold = MTL_RX_THRESHOLD_64;
+ pdata->rx_pbl = DMA_PBL_32;
+ pdata->pause_autoneg = 1;
+ pdata->tx_pause = 0;
+ pdata->rx_pause = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ pdata->power_down = 0;
+}
+
+/*
+ * It returns 0 on success.
+ */
+static int
+eth_axgbe_dev_init(struct rte_eth_dev *eth_dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ struct axgbe_port *pdata;
+ struct rte_pci_device *pci_dev;
+ uint32_t reg, mac_lo, mac_hi;
+ int ret;
+
+ eth_dev->dev_ops = &axgbe_eth_dev_ops;
+ eth_dev->rx_pkt_burst = &axgbe_recv_pkts;
+
+ /*
+ * For secondary processes, we don't initialise any further as primary
+ * has already done this work.
+ */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pdata = (struct axgbe_port *)eth_dev->data->dev_private;
+ /* initial state */
+ axgbe_set_bit(AXGBE_DOWN, &pdata->dev_state);
+ axgbe_set_bit(AXGBE_STOPPED, &pdata->dev_state);
+ pdata->eth_dev = eth_dev;
+
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ pdata->pci_dev = pci_dev;
+
+ pdata->xgmac_regs =
+ (void *)pci_dev->mem_resource[AXGBE_AXGMAC_BAR].addr;
+ pdata->xprop_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ + AXGBE_MAC_PROP_OFFSET);
+ pdata->xi2c_regs = (void *)((uint8_t *)pdata->xgmac_regs
+ + AXGBE_I2C_CTRL_OFFSET);
+ pdata->xpcs_regs = (void *)pci_dev->mem_resource[AXGBE_XPCS_BAR].addr;
+
+ /* version specific driver data*/
+ if (pci_dev->id.device_id == AMD_PCI_AXGBE_DEVICE_V2A)
+ pdata->vdata = &axgbe_v2a;
+ else
+ pdata->vdata = &axgbe_v2b;
+
+ /* Configure the PCS indirect addressing support */
+ reg = XPCS32_IOREAD(pdata, PCS_V2_WINDOW_DEF);
+ pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
+ pdata->xpcs_window <<= 6;
+ pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
+ pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
+ pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
+ pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
+ pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
+ PMD_INIT_LOG(DEBUG,
+ "xpcs window :%x, size :%x, mask :%x ", pdata->xpcs_window,
+ pdata->xpcs_window_size, pdata->xpcs_window_mask);
+ XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);
+
+ /* Retrieve the MAC address */
+ mac_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
+ mac_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
+ pdata->mac_addr.addr_bytes[0] = mac_lo & 0xff;
+ pdata->mac_addr.addr_bytes[1] = (mac_lo >> 8) & 0xff;
+ pdata->mac_addr.addr_bytes[2] = (mac_lo >> 16) & 0xff;
+ pdata->mac_addr.addr_bytes[3] = (mac_lo >> 24) & 0xff;
+ pdata->mac_addr.addr_bytes[4] = mac_hi & 0xff;
+ pdata->mac_addr.addr_bytes[5] = (mac_hi >> 8) & 0xff;
+
+ eth_dev->data->mac_addrs = rte_zmalloc("axgbe_mac_addr",
+ ETHER_ADDR_LEN, 0);
+ if (!eth_dev->data->mac_addrs) {
+ PMD_INIT_LOG(ERR,
+ "Failed to alloc %u bytes needed to store MAC addr tbl",
+ ETHER_ADDR_LEN);
+ return -ENOMEM;
+ }
+
+ if (!is_valid_assigned_ether_addr(&pdata->mac_addr))
+ eth_random_addr(pdata->mac_addr.addr_bytes);
+
+ /* Copy the permanent MAC address */
+ ether_addr_copy(&pdata->mac_addr, &eth_dev->data->mac_addrs[0]);
+
+ /* Clock settings */
+ pdata->sysclk_rate = AXGBE_V2_DMA_CLOCK_FREQ;
+ pdata->ptpclk_rate = AXGBE_V2_PTP_CLOCK_FREQ;
+
+ /* Set the DMA coherency values */
+ pdata->coherent = 1;
+ pdata->axdomain = AXGBE_DMA_OS_AXDOMAIN;
+ pdata->arcache = AXGBE_DMA_OS_ARCACHE;
+ pdata->awcache = AXGBE_DMA_OS_AWCACHE;
+
+ /* Set the maximum channels and queues */
+ reg = XP_IOREAD(pdata, XP_PROP_1);
+ pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
+ pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
+ pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
+ pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
+
+ /* Set the hardware channel and queue counts */
+ axgbe_set_counts(pdata);
+
+ /* Set the maximum fifo amounts */
+ reg = XP_IOREAD(pdata, XP_PROP_2);
+ pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
+ pdata->tx_max_fifo_size *= 16384;
+ pdata->tx_max_fifo_size = RTE_MIN(pdata->tx_max_fifo_size,
+ pdata->vdata->tx_max_fifo_size);
+ pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
+ pdata->rx_max_fifo_size *= 16384;
+ pdata->rx_max_fifo_size = RTE_MIN(pdata->rx_max_fifo_size,
+ pdata->vdata->rx_max_fifo_size);
+ /* Issue software reset to DMA */
+ ret = pdata->hw_if.exit(pdata);
+ if (ret)
+ PMD_DRV_LOG(ERR, "hw_if->exit EBUSY error\n");
+
+ /* Set default configuration data */
+ axgbe_default_config(pdata);
+
+ /* Set default max values if not provided */
+ if (!pdata->tx_max_fifo_size)
+ pdata->tx_max_fifo_size = pdata->hw_feat.tx_fifo_size;
+ if (!pdata->rx_max_fifo_size)
+ pdata->rx_max_fifo_size = pdata->hw_feat.rx_fifo_size;
+
+ pdata->tx_desc_count = AXGBE_MAX_RING_DESC;
+ pdata->rx_desc_count = AXGBE_MAX_RING_DESC;
+ pthread_mutex_init(&pdata->xpcs_mutex, NULL);
+ pthread_mutex_init(&pdata->i2c_mutex, NULL);
+ pthread_mutex_init(&pdata->an_mutex, NULL);
+ pthread_mutex_init(&pdata->phy_mutex, NULL);
+
+ ret = pdata->phy_if.phy_init(pdata);
+ if (ret) {
+ rte_free(eth_dev->data->mac_addrs);
+ return ret;
+ }
+
+ rte_intr_callback_register(&pci_dev->intr_handle,
+ axgbe_dev_interrupt_handler,
+ (void *)eth_dev);
+ PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x",
+ eth_dev->data->port_id, pci_dev->id.vendor_id,
+ pci_dev->id.device_id);
+
+ return 0;
+}
+
+static int
+eth_axgbe_dev_uninit(struct rte_eth_dev *eth_dev)
+{
+ struct rte_pci_device *pci_dev;
+
+ PMD_INIT_FUNC_TRACE();
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ pci_dev = RTE_DEV_TO_PCI(eth_dev->device);
+ /*Free macaddres*/
+ rte_free(eth_dev->data->mac_addrs);
+ eth_dev->data->mac_addrs = NULL;
+ eth_dev->dev_ops = NULL;
+ eth_dev->rx_pkt_burst = NULL;
+ eth_dev->tx_pkt_burst = NULL;
+ axgbe_dev_clear_queues(eth_dev);
+
+ /* disable uio intr before callback unregister */
+ rte_intr_disable(&pci_dev->intr_handle);
+ rte_intr_callback_unregister(&pci_dev->intr_handle,
+ axgbe_dev_interrupt_handler,
+ (void *)eth_dev);
+
+ return 0;
+}
+
+static int eth_axgbe_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
+ struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_probe(pci_dev,
+ sizeof(struct axgbe_port), eth_axgbe_dev_init);
+}
+
+static int eth_axgbe_pci_remove(struct rte_pci_device *pci_dev)
+{
+ return rte_eth_dev_pci_generic_remove(pci_dev, eth_axgbe_dev_uninit);
+}
+
+static struct rte_pci_driver rte_axgbe_pmd = {
+ .id_table = pci_id_axgbe_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = eth_axgbe_pci_probe,
+ .remove = eth_axgbe_pci_remove,
+};
+
+RTE_PMD_REGISTER_PCI(net_axgbe, rte_axgbe_pmd);
+RTE_PMD_REGISTER_PCI_TABLE(net_axgbe, pci_id_axgbe_map);
+RTE_PMD_REGISTER_KMOD_DEP(net_axgbe, "* igb_uio | uio_pci_generic | vfio-pci");
+
+RTE_INIT(axgbe_init_log)
+{
+ axgbe_logtype_init = rte_log_register("pmd.net.axgbe.init");
+ if (axgbe_logtype_init >= 0)
+ rte_log_set_level(axgbe_logtype_init, RTE_LOG_NOTICE);
+ axgbe_logtype_driver = rte_log_register("pmd.net.axgbe.driver");
+ if (axgbe_logtype_driver >= 0)
+ rte_log_set_level(axgbe_logtype_driver, RTE_LOG_NOTICE);
+}
diff --git a/drivers/net/axgbe/axgbe_ethdev.h b/drivers/net/axgbe/axgbe_ethdev.h
new file mode 100644
index 00000000..b1cd2980
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_ethdev.h
@@ -0,0 +1,586 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef RTE_ETH_AXGBE_H_
+#define RTE_ETH_AXGBE_H_
+
+#include <rte_mempool.h>
+#include <rte_lcore.h>
+#include "axgbe_common.h"
+
+#define IRQ 0xff
+#define VLAN_HLEN 4
+
+#define AXGBE_TX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define AXGBE_RX_MAX_BUF_SIZE (0x3fff & ~(64 - 1))
+#define AXGBE_RX_MIN_BUF_SIZE (ETHER_MAX_LEN + VLAN_HLEN)
+#define AXGBE_MAX_MAC_ADDRS 1
+
+#define AXGBE_RX_BUF_ALIGN 64
+
+#define AXGBE_MAX_DMA_CHANNELS 16
+#define AXGBE_MAX_QUEUES 16
+#define AXGBE_PRIORITY_QUEUES 8
+#define AXGBE_DMA_STOP_TIMEOUT 1
+
+/* DMA cache settings - Outer sharable, write-back, write-allocate */
+#define AXGBE_DMA_OS_AXDOMAIN 0x2
+#define AXGBE_DMA_OS_ARCACHE 0xb
+#define AXGBE_DMA_OS_AWCACHE 0xf
+
+/* DMA cache settings - System, no caches used */
+#define AXGBE_DMA_SYS_AXDOMAIN 0x3
+#define AXGBE_DMA_SYS_ARCACHE 0x0
+#define AXGBE_DMA_SYS_AWCACHE 0x0
+
+/* DMA channel interrupt modes */
+#define AXGBE_IRQ_MODE_EDGE 0
+#define AXGBE_IRQ_MODE_LEVEL 1
+
+#define AXGBE_DMA_INTERRUPT_MASK 0x31c7
+
+#define AXGMAC_MIN_PACKET 60
+#define AXGMAC_STD_PACKET_MTU 1500
+#define AXGMAC_MAX_STD_PACKET 1518
+#define AXGMAC_JUMBO_PACKET_MTU 9000
+#define AXGMAC_MAX_JUMBO_PACKET 9018
+/* Inter-frame gap + preamble */
+#define AXGMAC_ETH_PREAMBLE (12 + 8)
+
+#define AXGMAC_PFC_DATA_LEN 46
+#define AXGMAC_PFC_DELAYS 14000
+
+/* PCI BAR mapping */
+#define AXGBE_AXGMAC_BAR 0
+#define AXGBE_XPCS_BAR 1
+#define AXGBE_MAC_PROP_OFFSET 0x1d000
+#define AXGBE_I2C_CTRL_OFFSET 0x1e000
+
+/* PCI clock frequencies */
+#define AXGBE_V2_DMA_CLOCK_FREQ 500000000
+#define AXGBE_V2_PTP_CLOCK_FREQ 125000000
+
+#define AXGMAC_FIFO_MIN_ALLOC 2048
+#define AXGMAC_FIFO_UNIT 256
+#define AXGMAC_FIFO_ALIGN(_x) \
+ (((_x) + AXGMAC_FIFO_UNIT - 1) & ~(XGMAC_FIFO_UNIT - 1))
+#define AXGMAC_FIFO_FC_OFF 2048
+#define AXGMAC_FIFO_FC_MIN 4096
+
+#define AXGBE_TC_MIN_QUANTUM 10
+
+/* Flow control queue count */
+#define AXGMAC_MAX_FLOW_CONTROL_QUEUES 8
+
+/* Flow control threshold units */
+#define AXGMAC_FLOW_CONTROL_UNIT 512
+#define AXGMAC_FLOW_CONTROL_ALIGN(_x) \
+ (((_x) + AXGMAC_FLOW_CONTROL_UNIT - 1) & \
+ ~(AXGMAC_FLOW_CONTROL_UNIT - 1))
+#define AXGMAC_FLOW_CONTROL_VALUE(_x) \
+ (((_x) < 1024) ? 0 : ((_x) / AXGMAC_FLOW_CONTROL_UNIT) - 2)
+#define AXGMAC_FLOW_CONTROL_MAX 33280
+
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define AXGBE_MAC_HASH_TABLE_SIZE 8
+
+/* Receive Side Scaling */
+#define AXGBE_RSS_OFFLOAD ( \
+ ETH_RSS_IPV4 | \
+ ETH_RSS_NONFRAG_IPV4_TCP | \
+ ETH_RSS_NONFRAG_IPV4_UDP | \
+ ETH_RSS_IPV6 | \
+ ETH_RSS_NONFRAG_IPV6_TCP | \
+ ETH_RSS_NONFRAG_IPV6_UDP)
+
+#define AXGBE_RSS_HASH_KEY_SIZE 40
+#define AXGBE_RSS_MAX_TABLE_SIZE 256
+#define AXGBE_RSS_LOOKUP_TABLE_TYPE 0
+#define AXGBE_RSS_HASH_KEY_TYPE 1
+
+/* Auto-negotiation */
+#define AXGBE_AN_MS_TIMEOUT 500
+#define AXGBE_LINK_TIMEOUT 5
+
+#define AXGBE_SGMII_AN_LINK_STATUS BIT(1)
+#define AXGBE_SGMII_AN_LINK_SPEED (BIT(2) | BIT(3))
+#define AXGBE_SGMII_AN_LINK_SPEED_100 0x04
+#define AXGBE_SGMII_AN_LINK_SPEED_1000 0x08
+#define AXGBE_SGMII_AN_LINK_DUPLEX BIT(4)
+
+/* ECC correctable error notification window (seconds) */
+#define AXGBE_ECC_LIMIT 60
+
+/* MDIO port types */
+#define AXGMAC_MAX_C22_PORT 3
+
+/* Helper macro for descriptor handling
+ * Always use AXGBE_GET_DESC_DATA to access the descriptor data
+ * since the index is free-running and needs to be and-ed
+ * with the descriptor count value of the ring to index to
+ * the proper descriptor data.
+ */
+#define AXGBE_GET_DESC_DATA(_ring, _idx) \
+ ((_ring)->rdata + \
+ ((_idx) & ((_ring)->rdesc_count - 1)))
+
+struct axgbe_port;
+
+enum axgbe_state {
+ AXGBE_DOWN,
+ AXGBE_LINK_INIT,
+ AXGBE_LINK_ERR,
+ AXGBE_STOPPED,
+};
+
+enum axgbe_int {
+ AXGMAC_INT_DMA_CH_SR_TI,
+ AXGMAC_INT_DMA_CH_SR_TPS,
+ AXGMAC_INT_DMA_CH_SR_TBU,
+ AXGMAC_INT_DMA_CH_SR_RI,
+ AXGMAC_INT_DMA_CH_SR_RBU,
+ AXGMAC_INT_DMA_CH_SR_RPS,
+ AXGMAC_INT_DMA_CH_SR_TI_RI,
+ AXGMAC_INT_DMA_CH_SR_FBE,
+ AXGMAC_INT_DMA_ALL,
+};
+
+enum axgbe_int_state {
+ AXGMAC_INT_STATE_SAVE,
+ AXGMAC_INT_STATE_RESTORE,
+};
+
+enum axgbe_ecc_sec {
+ AXGBE_ECC_SEC_TX,
+ AXGBE_ECC_SEC_RX,
+ AXGBE_ECC_SEC_DESC,
+};
+
+enum axgbe_speed {
+ AXGBE_SPEED_1000 = 0,
+ AXGBE_SPEED_2500,
+ AXGBE_SPEED_10000,
+ AXGBE_SPEEDS,
+};
+
+enum axgbe_xpcs_access {
+ AXGBE_XPCS_ACCESS_V1 = 0,
+ AXGBE_XPCS_ACCESS_V2,
+};
+
+enum axgbe_an_mode {
+ AXGBE_AN_MODE_CL73 = 0,
+ AXGBE_AN_MODE_CL73_REDRV,
+ AXGBE_AN_MODE_CL37,
+ AXGBE_AN_MODE_CL37_SGMII,
+ AXGBE_AN_MODE_NONE,
+};
+
+enum axgbe_an {
+ AXGBE_AN_READY = 0,
+ AXGBE_AN_PAGE_RECEIVED,
+ AXGBE_AN_INCOMPAT_LINK,
+ AXGBE_AN_COMPLETE,
+ AXGBE_AN_NO_LINK,
+ AXGBE_AN_ERROR,
+};
+
+enum axgbe_rx {
+ AXGBE_RX_BPA = 0,
+ AXGBE_RX_XNP,
+ AXGBE_RX_COMPLETE,
+ AXGBE_RX_ERROR,
+};
+
+enum axgbe_mode {
+ AXGBE_MODE_KX_1000 = 0,
+ AXGBE_MODE_KX_2500,
+ AXGBE_MODE_KR,
+ AXGBE_MODE_X,
+ AXGBE_MODE_SGMII_100,
+ AXGBE_MODE_SGMII_1000,
+ AXGBE_MODE_SFI,
+ AXGBE_MODE_UNKNOWN,
+};
+
+enum axgbe_speedset {
+ AXGBE_SPEEDSET_1000_10000 = 0,
+ AXGBE_SPEEDSET_2500_10000,
+};
+
+enum axgbe_mdio_mode {
+ AXGBE_MDIO_MODE_NONE = 0,
+ AXGBE_MDIO_MODE_CL22,
+ AXGBE_MDIO_MODE_CL45,
+};
+
+struct axgbe_phy {
+ uint32_t supported;
+ uint32_t advertising;
+ uint32_t lp_advertising;
+
+ int address;
+
+ int autoneg;
+ int speed;
+ int duplex;
+
+ int link;
+
+ int pause_autoneg;
+ int tx_pause;
+ int rx_pause;
+};
+
+enum axgbe_i2c_cmd {
+ AXGBE_I2C_CMD_READ = 0,
+ AXGBE_I2C_CMD_WRITE,
+};
+
+struct axgbe_i2c_op {
+ enum axgbe_i2c_cmd cmd;
+
+ unsigned int target;
+
+ uint8_t *buf;
+ unsigned int len;
+};
+
+struct axgbe_i2c_op_state {
+ struct axgbe_i2c_op *op;
+
+ unsigned int tx_len;
+ unsigned char *tx_buf;
+
+ unsigned int rx_len;
+ unsigned char *rx_buf;
+
+ unsigned int tx_abort_source;
+
+ int ret;
+};
+
+struct axgbe_i2c {
+ unsigned int started;
+ unsigned int max_speed_mode;
+ unsigned int rx_fifo_size;
+ unsigned int tx_fifo_size;
+
+ struct axgbe_i2c_op_state op_state;
+};
+
+struct axgbe_hw_if {
+ void (*config_flow_control)(struct axgbe_port *);
+ int (*config_rx_mode)(struct axgbe_port *);
+
+ int (*init)(struct axgbe_port *);
+
+ int (*read_mmd_regs)(struct axgbe_port *, int, int);
+ void (*write_mmd_regs)(struct axgbe_port *, int, int, int);
+ int (*set_speed)(struct axgbe_port *, int);
+
+ int (*set_ext_mii_mode)(struct axgbe_port *, unsigned int,
+ enum axgbe_mdio_mode);
+ int (*read_ext_mii_regs)(struct axgbe_port *, int, int);
+ int (*write_ext_mii_regs)(struct axgbe_port *, int, int, uint16_t);
+
+ /* For FLOW ctrl */
+ int (*config_tx_flow_control)(struct axgbe_port *);
+ int (*config_rx_flow_control)(struct axgbe_port *);
+
+ int (*exit)(struct axgbe_port *);
+};
+
+/* This structure represents implementation specific routines for an
+ * implementation of a PHY. All routines are required unless noted below.
+ * Optional routines:
+ * kr_training_pre, kr_training_post
+ */
+struct axgbe_phy_impl_if {
+ /* Perform Setup/teardown actions */
+ int (*init)(struct axgbe_port *);
+ void (*exit)(struct axgbe_port *);
+
+ /* Perform start/stop specific actions */
+ int (*reset)(struct axgbe_port *);
+ int (*start)(struct axgbe_port *);
+ void (*stop)(struct axgbe_port *);
+
+ /* Return the link status */
+ int (*link_status)(struct axgbe_port *, int *);
+
+ /* Indicate if a particular speed is valid */
+ int (*valid_speed)(struct axgbe_port *, int);
+
+ /* Check if the specified mode can/should be used */
+ bool (*use_mode)(struct axgbe_port *, enum axgbe_mode);
+ /* Switch the PHY into various modes */
+ void (*set_mode)(struct axgbe_port *, enum axgbe_mode);
+ /* Retrieve mode needed for a specific speed */
+ enum axgbe_mode (*get_mode)(struct axgbe_port *, int);
+ /* Retrieve new/next mode when trying to auto-negotiate */
+ enum axgbe_mode (*switch_mode)(struct axgbe_port *);
+ /* Retrieve current mode */
+ enum axgbe_mode (*cur_mode)(struct axgbe_port *);
+
+ /* Retrieve current auto-negotiation mode */
+ enum axgbe_an_mode (*an_mode)(struct axgbe_port *);
+
+ /* Configure auto-negotiation settings */
+ int (*an_config)(struct axgbe_port *);
+
+ /* Set/override auto-negotiation advertisement settings */
+ unsigned int (*an_advertising)(struct axgbe_port *port);
+
+ /* Process results of auto-negotiation */
+ enum axgbe_mode (*an_outcome)(struct axgbe_port *);
+
+ /* Pre/Post auto-negotiation support */
+ void (*an_pre)(struct axgbe_port *port);
+ void (*an_post)(struct axgbe_port *port);
+
+ /* Pre/Post KR training enablement support */
+ void (*kr_training_pre)(struct axgbe_port *);
+ void (*kr_training_post)(struct axgbe_port *);
+};
+
+struct axgbe_phy_if {
+ /* For PHY setup/teardown */
+ int (*phy_init)(struct axgbe_port *);
+ void (*phy_exit)(struct axgbe_port *);
+
+ /* For PHY support when setting device up/down */
+ int (*phy_reset)(struct axgbe_port *);
+ int (*phy_start)(struct axgbe_port *);
+ void (*phy_stop)(struct axgbe_port *);
+
+ /* For PHY support while device is up */
+ void (*phy_status)(struct axgbe_port *);
+ int (*phy_config_aneg)(struct axgbe_port *);
+
+ /* For PHY settings validation */
+ int (*phy_valid_speed)(struct axgbe_port *, int);
+ /* For single interrupt support */
+ void (*an_isr)(struct axgbe_port *);
+ /* PHY implementation specific services */
+ struct axgbe_phy_impl_if phy_impl;
+};
+
+struct axgbe_i2c_if {
+ /* For initial I2C setup */
+ int (*i2c_init)(struct axgbe_port *);
+
+ /* For I2C support when setting device up/down */
+ int (*i2c_start)(struct axgbe_port *);
+ void (*i2c_stop)(struct axgbe_port *);
+
+ /* For performing I2C operations */
+ int (*i2c_xfer)(struct axgbe_port *, struct axgbe_i2c_op *);
+};
+
+/* This structure contains flags that indicate what hardware features
+ * or configurations are present in the device.
+ */
+struct axgbe_hw_features {
+ /* HW Version */
+ unsigned int version;
+
+ /* HW Feature Register0 */
+ unsigned int gmii; /* 1000 Mbps support */
+ unsigned int vlhash; /* VLAN Hash Filter */
+ unsigned int sma; /* SMA(MDIO) Interface */
+ unsigned int rwk; /* PMT remote wake-up packet */
+ unsigned int mgk; /* PMT magic packet */
+ unsigned int mmc; /* RMON module */
+ unsigned int aoe; /* ARP Offload */
+ unsigned int ts; /* IEEE 1588-2008 Advanced Timestamp */
+ unsigned int eee; /* Energy Efficient Ethernet */
+ unsigned int tx_coe; /* Tx Checksum Offload */
+ unsigned int rx_coe; /* Rx Checksum Offload */
+ unsigned int addn_mac; /* Additional MAC Addresses */
+ unsigned int ts_src; /* Timestamp Source */
+ unsigned int sa_vlan_ins; /* Source Address or VLAN Insertion */
+
+ /* HW Feature Register1 */
+ unsigned int rx_fifo_size; /* MTL Receive FIFO Size */
+ unsigned int tx_fifo_size; /* MTL Transmit FIFO Size */
+ unsigned int adv_ts_hi; /* Advance Timestamping High Word */
+ unsigned int dma_width; /* DMA width */
+ unsigned int dcb; /* DCB Feature */
+ unsigned int sph; /* Split Header Feature */
+ unsigned int tso; /* TCP Segmentation Offload */
+ unsigned int dma_debug; /* DMA Debug Registers */
+ unsigned int rss; /* Receive Side Scaling */
+ unsigned int tc_cnt; /* Number of Traffic Classes */
+ unsigned int hash_table_size; /* Hash Table Size */
+ unsigned int l3l4_filter_num; /* Number of L3-L4 Filters */
+
+ /* HW Feature Register2 */
+ unsigned int rx_q_cnt; /* Number of MTL Receive Queues */
+ unsigned int tx_q_cnt; /* Number of MTL Transmit Queues */
+ unsigned int rx_ch_cnt; /* Number of DMA Receive Channels */
+ unsigned int tx_ch_cnt; /* Number of DMA Transmit Channels */
+ unsigned int pps_out_num; /* Number of PPS outputs */
+ unsigned int aux_snap_num; /* Number of Aux snapshot inputs */
+};
+
+struct axgbe_version_data {
+ void (*init_function_ptrs_phy_impl)(struct axgbe_phy_if *);
+ enum axgbe_xpcs_access xpcs_access;
+ unsigned int mmc_64bit;
+ unsigned int tx_max_fifo_size;
+ unsigned int rx_max_fifo_size;
+ unsigned int tx_tstamp_workaround;
+ unsigned int ecc_support;
+ unsigned int i2c_support;
+ unsigned int an_cdr_workaround;
+};
+
+/*
+ * Structure to store private data for each port.
+ */
+struct axgbe_port {
+ /* Ethdev where port belongs*/
+ struct rte_eth_dev *eth_dev;
+ /* Pci dev info */
+ const struct rte_pci_device *pci_dev;
+ /* Version related data */
+ struct axgbe_version_data *vdata;
+
+ /* AXGMAC/XPCS related mmio registers */
+ void *xgmac_regs; /* AXGMAC CSRs */
+ void *xpcs_regs; /* XPCS MMD registers */
+ void *xprop_regs; /* AXGBE property registers */
+ void *xi2c_regs; /* AXGBE I2C CSRs */
+
+ bool cdr_track_early;
+ /* XPCS indirect addressing lock */
+ unsigned int xpcs_window_def_reg;
+ unsigned int xpcs_window_sel_reg;
+ unsigned int xpcs_window;
+ unsigned int xpcs_window_size;
+ unsigned int xpcs_window_mask;
+
+ /* Flags representing axgbe_state */
+ unsigned long dev_state;
+
+ struct axgbe_hw_if hw_if;
+ struct axgbe_phy_if phy_if;
+ struct axgbe_i2c_if i2c_if;
+
+ /* AXI DMA settings */
+ unsigned int coherent;
+ unsigned int axdomain;
+ unsigned int arcache;
+ unsigned int awcache;
+
+ unsigned int tx_max_channel_count;
+ unsigned int rx_max_channel_count;
+ unsigned int channel_count;
+ unsigned int tx_ring_count;
+ unsigned int tx_desc_count;
+ unsigned int rx_ring_count;
+ unsigned int rx_desc_count;
+
+ unsigned int tx_max_q_count;
+ unsigned int rx_max_q_count;
+ unsigned int tx_q_count;
+ unsigned int rx_q_count;
+
+ /* Tx/Rx common settings */
+ unsigned int pblx8;
+
+ /* Tx settings */
+ unsigned int tx_sf_mode;
+ unsigned int tx_threshold;
+ unsigned int tx_pbl;
+ unsigned int tx_osp_mode;
+ unsigned int tx_max_fifo_size;
+
+ /* Rx settings */
+ unsigned int rx_sf_mode;
+ unsigned int rx_threshold;
+ unsigned int rx_pbl;
+ unsigned int rx_max_fifo_size;
+ unsigned int rx_buf_size;
+
+ /* Device clocks */
+ unsigned long sysclk_rate;
+ unsigned long ptpclk_rate;
+
+ /* Keeps track of power mode */
+ unsigned int power_down;
+
+ /* Current PHY settings */
+ int phy_link;
+ int phy_speed;
+
+ pthread_mutex_t xpcs_mutex;
+ pthread_mutex_t i2c_mutex;
+ pthread_mutex_t an_mutex;
+ pthread_mutex_t phy_mutex;
+
+ /* Flow control settings */
+ unsigned int pause_autoneg;
+ unsigned int tx_pause;
+ unsigned int rx_pause;
+ unsigned int rx_rfa[AXGBE_MAX_QUEUES];
+ unsigned int rx_rfd[AXGBE_MAX_QUEUES];
+ unsigned int fifo;
+
+ /* Receive Side Scaling settings */
+ u8 rss_key[AXGBE_RSS_HASH_KEY_SIZE];
+ uint32_t rss_table[AXGBE_RSS_MAX_TABLE_SIZE];
+ uint32_t rss_options;
+ int rss_enable;
+
+ /* Hardware features of the device */
+ struct axgbe_hw_features hw_feat;
+
+ struct ether_addr mac_addr;
+
+ /* Software Tx/Rx structure pointers*/
+ void **rx_queues;
+ void **tx_queues;
+
+ /* MDIO/PHY related settings */
+ unsigned int phy_started;
+ void *phy_data;
+ struct axgbe_phy phy;
+ int mdio_mmd;
+ unsigned long link_check;
+ volatile int mdio_completion;
+
+ unsigned int kr_redrv;
+
+ /* Auto-negotiation atate machine support */
+ unsigned int an_int;
+ unsigned int an_status;
+ enum axgbe_an an_result;
+ enum axgbe_an an_state;
+ enum axgbe_rx kr_state;
+ enum axgbe_rx kx_state;
+ unsigned int an_supported;
+ unsigned int parallel_detect;
+ unsigned int fec_ability;
+ unsigned long an_start;
+ enum axgbe_an_mode an_mode;
+
+ /* I2C support */
+ struct axgbe_i2c i2c;
+ volatile int i2c_complete;
+
+ /* CRC stripping by H/w for Rx packet*/
+ int crc_strip_enable;
+ /* csum enable to hardware */
+ uint32_t rx_csum_enable;
+};
+
+void axgbe_init_function_ptrs_dev(struct axgbe_hw_if *hw_if);
+void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if);
+void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if);
+void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if);
+
+#endif /* RTE_ETH_AXGBE_H_ */
diff --git a/drivers/net/axgbe/axgbe_i2c.c b/drivers/net/axgbe/axgbe_i2c.c
new file mode 100644
index 00000000..204ec367
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_i2c.c
@@ -0,0 +1,331 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+
+#define AXGBE_ABORT_COUNT 500
+#define AXGBE_DISABLE_COUNT 1000
+
+#define AXGBE_STD_SPEED 1
+
+#define AXGBE_INTR_RX_FULL BIT(IC_RAW_INTR_STAT_RX_FULL_INDEX)
+#define AXGBE_INTR_TX_EMPTY BIT(IC_RAW_INTR_STAT_TX_EMPTY_INDEX)
+#define AXGBE_INTR_TX_ABRT BIT(IC_RAW_INTR_STAT_TX_ABRT_INDEX)
+#define AXGBE_INTR_STOP_DET BIT(IC_RAW_INTR_STAT_STOP_DET_INDEX)
+#define AXGBE_DEFAULT_INT_MASK (AXGBE_INTR_RX_FULL | \
+ AXGBE_INTR_TX_EMPTY | \
+ AXGBE_INTR_TX_ABRT | \
+ AXGBE_INTR_STOP_DET)
+
+#define AXGBE_I2C_READ BIT(8)
+#define AXGBE_I2C_STOP BIT(9)
+
+static int axgbe_i2c_abort(struct axgbe_port *pdata)
+{
+ unsigned int wait = AXGBE_ABORT_COUNT;
+
+ /* Must be enabled to recognize the abort request */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, 1);
+
+ /* Issue the abort */
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, ABORT, 1);
+
+ while (wait--) {
+ if (!XI2C_IOREAD_BITS(pdata, IC_ENABLE, ABORT))
+ return 0;
+ rte_delay_us(500);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_i2c_set_enable(struct axgbe_port *pdata, bool enable)
+{
+ unsigned int wait = AXGBE_DISABLE_COUNT;
+ unsigned int mode = enable ? 1 : 0;
+
+ while (wait--) {
+ XI2C_IOWRITE_BITS(pdata, IC_ENABLE, EN, mode);
+ if (XI2C_IOREAD_BITS(pdata, IC_ENABLE_STATUS, EN) == mode)
+ return 0;
+
+ rte_delay_us(100);
+ }
+
+ return -EBUSY;
+}
+
+static int axgbe_i2c_disable(struct axgbe_port *pdata)
+{
+ unsigned int ret;
+
+ ret = axgbe_i2c_set_enable(pdata, false);
+ if (ret) {
+ /* Disable failed, try an abort */
+ ret = axgbe_i2c_abort(pdata);
+ if (ret)
+ return ret;
+
+ /* Abort succeeded, try to disable again */
+ ret = axgbe_i2c_set_enable(pdata, false);
+ }
+
+ return ret;
+}
+
+static int axgbe_i2c_enable(struct axgbe_port *pdata)
+{
+ return axgbe_i2c_set_enable(pdata, true);
+}
+
+static void axgbe_i2c_clear_all_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOREAD(pdata, IC_CLR_INTR);
+}
+
+static void axgbe_i2c_disable_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, 0);
+}
+
+static void axgbe_i2c_enable_interrupts(struct axgbe_port *pdata)
+{
+ XI2C_IOWRITE(pdata, IC_INTR_MASK, AXGBE_DEFAULT_INT_MASK);
+}
+
+static void axgbe_i2c_write(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int tx_slots;
+ unsigned int cmd;
+
+ /* Configured to never receive Rx overflows, so fill up Tx fifo */
+ tx_slots = pdata->i2c.tx_fifo_size - XI2C_IOREAD(pdata, IC_TXFLR);
+ while (tx_slots && state->tx_len) {
+ if (state->op->cmd == AXGBE_I2C_CMD_READ)
+ cmd = AXGBE_I2C_READ;
+ else
+ cmd = *state->tx_buf++;
+
+ if (state->tx_len == 1)
+ XI2C_SET_BITS(cmd, IC_DATA_CMD, STOP, 1);
+
+ XI2C_IOWRITE(pdata, IC_DATA_CMD, cmd);
+
+ tx_slots--;
+ state->tx_len--;
+ }
+
+ /* No more Tx operations, so ignore TX_EMPTY and return */
+ if (!state->tx_len)
+ XI2C_IOWRITE_BITS(pdata, IC_INTR_MASK, TX_EMPTY, 0);
+}
+
+static void axgbe_i2c_read(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int rx_slots;
+
+ /* Anything to be read? */
+ if (state->op->cmd != AXGBE_I2C_CMD_READ)
+ return;
+
+ rx_slots = XI2C_IOREAD(pdata, IC_RXFLR);
+ while (rx_slots && state->rx_len) {
+ *state->rx_buf++ = XI2C_IOREAD(pdata, IC_DATA_CMD);
+ state->rx_len--;
+ rx_slots--;
+ }
+}
+
+static void axgbe_i2c_clear_isr_interrupts(struct axgbe_port *pdata,
+ unsigned int isr)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+
+ if (isr & AXGBE_INTR_TX_ABRT) {
+ state->tx_abort_source = XI2C_IOREAD(pdata, IC_TX_ABRT_SOURCE);
+ XI2C_IOREAD(pdata, IC_CLR_TX_ABRT);
+ }
+
+ if (isr & AXGBE_INTR_STOP_DET)
+ XI2C_IOREAD(pdata, IC_CLR_STOP_DET);
+}
+
+static int axgbe_i2c_isr(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ unsigned int isr;
+
+ isr = XI2C_IOREAD(pdata, IC_RAW_INTR_STAT);
+
+ axgbe_i2c_clear_isr_interrupts(pdata, isr);
+
+ if (isr & AXGBE_INTR_TX_ABRT) {
+ axgbe_i2c_disable_interrupts(pdata);
+
+ state->ret = -EIO;
+ goto out;
+ }
+
+ /* Check for data in the Rx fifo */
+ axgbe_i2c_read(pdata);
+
+ /* Fill up the Tx fifo next */
+ axgbe_i2c_write(pdata);
+
+out:
+ /* Complete on an error or STOP condition */
+ if (state->ret || XI2C_GET_BITS(isr, IC_RAW_INTR_STAT, STOP_DET))
+ return 1;
+
+ return 0;
+}
+
+static void axgbe_i2c_set_mode(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_CON);
+ XI2C_SET_BITS(reg, IC_CON, MASTER_MODE, 1);
+ XI2C_SET_BITS(reg, IC_CON, SLAVE_DISABLE, 1);
+ XI2C_SET_BITS(reg, IC_CON, RESTART_EN, 1);
+ XI2C_SET_BITS(reg, IC_CON, SPEED, AXGBE_STD_SPEED);
+ XI2C_SET_BITS(reg, IC_CON, RX_FIFO_FULL_HOLD, 1);
+ XI2C_IOWRITE(pdata, IC_CON, reg);
+}
+
+static void axgbe_i2c_get_features(struct axgbe_port *pdata)
+{
+ struct axgbe_i2c *i2c = &pdata->i2c;
+ unsigned int reg;
+
+ reg = XI2C_IOREAD(pdata, IC_COMP_PARAM_1);
+ i2c->max_speed_mode = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ MAX_SPEED_MODE);
+ i2c->rx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ RX_BUFFER_DEPTH);
+ i2c->tx_fifo_size = XI2C_GET_BITS(reg, IC_COMP_PARAM_1,
+ TX_BUFFER_DEPTH);
+}
+
+static void axgbe_i2c_set_target(struct axgbe_port *pdata, unsigned int addr)
+{
+ XI2C_IOWRITE(pdata, IC_TAR, addr);
+}
+
+static int axgbe_i2c_xfer(struct axgbe_port *pdata, struct axgbe_i2c_op *op)
+{
+ struct axgbe_i2c_op_state *state = &pdata->i2c.op_state;
+ int ret;
+ uint64_t timeout;
+
+ pthread_mutex_lock(&pdata->i2c_mutex);
+ ret = axgbe_i2c_disable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
+ return ret;
+ }
+
+ axgbe_i2c_set_target(pdata, op->target);
+
+ memset(state, 0, sizeof(*state));
+ state->op = op;
+ state->tx_len = op->len;
+ state->tx_buf = (unsigned char *)op->buf;
+ state->rx_len = op->len;
+ state->rx_buf = (unsigned char *)op->buf;
+
+ axgbe_i2c_clear_all_interrupts(pdata);
+ ret = axgbe_i2c_enable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to enable i2c master\n");
+ return ret;
+ }
+
+ /* Enabling the interrupts will cause the TX FIFO empty interrupt to
+ * fire and begin to process the command via the ISR.
+ */
+ axgbe_i2c_enable_interrupts(pdata);
+ timeout = rte_get_timer_cycles() + rte_get_timer_hz();
+
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ rte_delay_us(100);
+ if (XI2C_IOREAD(pdata, IC_RAW_INTR_STAT)) {
+ if (axgbe_i2c_isr(pdata))
+ goto success;
+ }
+ }
+
+ PMD_DRV_LOG(ERR, "i2c operation timed out\n");
+ axgbe_i2c_disable_interrupts(pdata);
+ axgbe_i2c_disable(pdata);
+ ret = -ETIMEDOUT;
+ goto unlock;
+
+success:
+ ret = state->ret;
+ if (ret) {
+ if (state->tx_abort_source & IC_TX_ABRT_7B_ADDR_NOACK)
+ ret = -ENOTCONN;
+ else if (state->tx_abort_source & IC_TX_ABRT_ARB_LOST)
+ ret = -EAGAIN;
+ }
+
+unlock:
+ pthread_mutex_unlock(&pdata->i2c_mutex);
+ return ret;
+}
+
+static void axgbe_i2c_stop(struct axgbe_port *pdata)
+{
+ if (!pdata->i2c.started)
+ return;
+
+ pdata->i2c.started = 0;
+ axgbe_i2c_disable_interrupts(pdata);
+ axgbe_i2c_disable(pdata);
+ axgbe_i2c_clear_all_interrupts(pdata);
+}
+
+static int axgbe_i2c_start(struct axgbe_port *pdata)
+{
+ if (pdata->i2c.started)
+ return 0;
+
+ pdata->i2c.started = 1;
+
+ return 0;
+}
+
+static int axgbe_i2c_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ axgbe_i2c_disable_interrupts(pdata);
+
+ ret = axgbe_i2c_disable(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "failed to disable i2c master\n");
+ return ret;
+ }
+
+ axgbe_i2c_get_features(pdata);
+
+ axgbe_i2c_set_mode(pdata);
+
+ axgbe_i2c_clear_all_interrupts(pdata);
+
+ return 0;
+}
+
+void axgbe_init_function_ptrs_i2c(struct axgbe_i2c_if *i2c_if)
+{
+ i2c_if->i2c_init = axgbe_i2c_init;
+ i2c_if->i2c_start = axgbe_i2c_start;
+ i2c_if->i2c_stop = axgbe_i2c_stop;
+ i2c_if->i2c_xfer = axgbe_i2c_xfer;
+}
diff --git a/drivers/net/axgbe/axgbe_logs.h b/drivers/net/axgbe/axgbe_logs.h
new file mode 100644
index 00000000..d1487017
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_logs.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ */
+
+#ifndef _AXGBE_LOGS_H_
+#define _AXGBE_LOGS_H_
+
+#include <stdio.h>
+
+extern int axgbe_logtype_init;
+#define PMD_INIT_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, axgbe_logtype_init, "%s(): " fmt "\n", \
+ __func__, ##args)
+
+#ifdef RTE_LIBRTE_AXGBE_PMD_DEBUG
+#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, " >>")
+#else
+#define PMD_INIT_FUNC_TRACE() do { } while (0)
+#endif
+
+extern int axgbe_logtype_driver;
+#define PMD_DRV_LOG(level, fmt, args...) \
+ rte_log(RTE_LOG_ ## level, axgbe_logtype_driver, "%s(): " fmt, \
+ __func__, ## args)
+
+#endif /* _AXGBE_LOGS_H_ */
diff --git a/drivers/net/axgbe/axgbe_mdio.c b/drivers/net/axgbe/axgbe_mdio.c
new file mode 100644
index 00000000..2721e5cc
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_mdio.c
@@ -0,0 +1,1066 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+static void axgbe_an37_clear_interrupts(struct axgbe_port *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT);
+ reg &= ~AXGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_STAT, reg);
+}
+
+static void axgbe_an37_disable_interrupts(struct axgbe_port *pdata)
+{
+ int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL);
+ reg &= ~AXGBE_AN_CL37_INT_MASK;
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_VEND2_AN_CTRL, reg);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL);
+ reg &= ~AXGBE_PCS_CL37_BP;
+ XMDIO_WRITE(pdata, MDIO_MMD_PCS, MDIO_PCS_DIG_CTRL, reg);
+}
+
+static void axgbe_an73_clear_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, 0);
+}
+
+static void axgbe_an73_disable_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK, 0);
+}
+
+static void axgbe_an73_enable_interrupts(struct axgbe_port *pdata)
+{
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INTMASK,
+ AXGBE_AN_CL73_INT_MASK);
+}
+
+static void axgbe_an_enable_interrupts(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_enable_interrupts(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MOD_37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_clear_interrupts_all(struct axgbe_port *pdata)
+{
+ axgbe_an73_clear_interrupts(pdata);
+ axgbe_an37_clear_interrupts(pdata);
+}
+
+static void axgbe_an73_enable_kr_training(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+
+ reg |= AXGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void axgbe_an73_disable_kr_training(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+
+ reg &= ~AXGBE_KR_TRAINING_ENABLE;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, reg);
+}
+
+static void axgbe_kr_mode(struct axgbe_port *pdata)
+{
+ /* Enable KR training */
+ axgbe_an73_enable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KR);
+}
+
+static void axgbe_kx_2500_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 2.5G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_2500);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_2500);
+}
+
+static void axgbe_kx_1000_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_KX_1000);
+}
+
+static void axgbe_sfi_mode(struct axgbe_port *pdata)
+{
+ /* If a KR re-driver is present, change to KR mode instead */
+ if (pdata->kr_redrv)
+ return axgbe_kr_mode(pdata);
+
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 10G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_10000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SFI);
+}
+
+static void axgbe_x_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_X);
+}
+
+static void axgbe_sgmii_1000_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_1000);
+}
+
+static void axgbe_sgmii_100_mode(struct axgbe_port *pdata)
+{
+ /* Disable KR training */
+ axgbe_an73_disable_kr_training(pdata);
+
+ /* Set MAC to 1G speed */
+ pdata->hw_if.set_speed(pdata, SPEED_1000);
+
+ /* Call PHY implementation support to complete rate change */
+ pdata->phy_if.phy_impl.set_mode(pdata, AXGBE_MODE_SGMII_100);
+}
+
+static enum axgbe_mode axgbe_cur_mode(struct axgbe_port *pdata)
+{
+ return pdata->phy_if.phy_impl.cur_mode(pdata);
+}
+
+static bool axgbe_in_kr_mode(struct axgbe_port *pdata)
+{
+ return axgbe_cur_mode(pdata) == AXGBE_MODE_KR;
+}
+
+static void axgbe_change_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ axgbe_kx_1000_mode(pdata);
+ break;
+ case AXGBE_MODE_KX_2500:
+ axgbe_kx_2500_mode(pdata);
+ break;
+ case AXGBE_MODE_KR:
+ axgbe_kr_mode(pdata);
+ break;
+ case AXGBE_MODE_SGMII_100:
+ axgbe_sgmii_100_mode(pdata);
+ break;
+ case AXGBE_MODE_SGMII_1000:
+ axgbe_sgmii_1000_mode(pdata);
+ break;
+ case AXGBE_MODE_X:
+ axgbe_x_mode(pdata);
+ break;
+ case AXGBE_MODE_SFI:
+ axgbe_sfi_mode(pdata);
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "invalid operation mode requested (%u)\n", mode);
+ }
+}
+
+static void axgbe_switch_mode(struct axgbe_port *pdata)
+{
+ axgbe_change_mode(pdata, pdata->phy_if.phy_impl.switch_mode(pdata));
+}
+
+static void axgbe_set_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ if (mode == axgbe_cur_mode(pdata))
+ return;
+
+ axgbe_change_mode(pdata, mode);
+}
+
+static bool axgbe_use_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ return pdata->phy_if.phy_impl.use_mode(pdata, mode);
+}
+
+static void axgbe_an37_set(struct axgbe_port *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_VEND2, MDIO_CTRL1);
+ reg &= ~MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (enable)
+ reg |= MDIO_VEND2_CTRL1_AN_ENABLE;
+
+ if (restart)
+ reg |= MDIO_VEND2_CTRL1_AN_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_VEND2, MDIO_CTRL1, reg);
+}
+
+static void axgbe_an37_disable(struct axgbe_port *pdata)
+{
+ axgbe_an37_set(pdata, false, false);
+ axgbe_an37_disable_interrupts(pdata);
+}
+
+static void axgbe_an73_set(struct axgbe_port *pdata, bool enable,
+ bool restart)
+{
+ unsigned int reg;
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_CTRL1);
+ reg &= ~MDIO_AN_CTRL1_ENABLE;
+
+ if (enable)
+ reg |= MDIO_AN_CTRL1_ENABLE;
+
+ if (restart)
+ reg |= MDIO_AN_CTRL1_RESTART;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_CTRL1, reg);
+}
+
+static void axgbe_an73_restart(struct axgbe_port *pdata)
+{
+ axgbe_an73_enable_interrupts(pdata);
+ axgbe_an73_set(pdata, true, true);
+}
+
+static void axgbe_an73_disable(struct axgbe_port *pdata)
+{
+ axgbe_an73_set(pdata, false, false);
+ axgbe_an73_disable_interrupts(pdata);
+ pdata->an_start = 0;
+}
+
+static void axgbe_an_restart(struct axgbe_port *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_pre)
+ pdata->phy_if.phy_impl.an_pre(pdata);
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_restart(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_disable(struct axgbe_port *pdata)
+{
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_disable(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_MODE_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_disable_all(struct axgbe_port *pdata)
+{
+ axgbe_an73_disable(pdata);
+ axgbe_an37_disable(pdata);
+}
+
+static enum axgbe_an axgbe_an73_tx_training(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg, reg;
+
+ *state = AXGBE_RX_COMPLETE;
+
+ /* If we're not in KR mode then we're done */
+ if (!axgbe_in_kr_mode(pdata))
+ return AXGBE_AN_PAGE_RECEIVED;
+
+ /* Enable/Disable FEC */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL);
+ reg &= ~(MDIO_PMA_10GBR_FECABLE_ABLE | MDIO_PMA_10GBR_FECABLE_ERRABLE);
+ if ((ad_reg & 0xc000) && (lp_reg & 0xc000))
+ reg |= pdata->fec_ability;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FECCTRL, reg);
+
+ /* Start KR training */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL);
+ if (reg & AXGBE_KR_TRAINING_ENABLE) {
+ if (pdata->phy_if.phy_impl.kr_training_pre)
+ pdata->phy_if.phy_impl.kr_training_pre(pdata);
+
+ reg |= AXGBE_KR_TRAINING_START;
+ XMDIO_WRITE(pdata, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL,
+ reg);
+
+ if (pdata->phy_if.phy_impl.kr_training_post)
+ pdata->phy_if.phy_impl.kr_training_post(pdata);
+ }
+
+ return AXGBE_AN_PAGE_RECEIVED;
+}
+
+static enum axgbe_an axgbe_an73_tx_xnp(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ u16 msg;
+
+ *state = AXGBE_RX_XNP;
+
+ msg = AXGBE_XNP_MCF_NULL_MESSAGE;
+ msg |= AXGBE_XNP_MP_FORMATTED;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 2, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP + 1, 0);
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_XNP, msg);
+
+ return AXGBE_AN_PAGE_RECEIVED;
+}
+
+static enum axgbe_an axgbe_an73_rx_bpa(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int link_support;
+ unsigned int reg, ad_reg, lp_reg;
+
+ /* Read Base Ability register 2 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+
+ /* Check for a supported mode, otherwise restart in a different one */
+ link_support = axgbe_in_kr_mode(pdata) ? 0x80 : 0x20;
+ if (!(reg & link_support))
+ return AXGBE_AN_INCOMPAT_LINK;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+
+ return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & AXGBE_XNP_NP_EXCHANGE))
+ ? axgbe_an73_tx_xnp(pdata, state)
+ : axgbe_an73_tx_training(pdata, state);
+}
+
+static enum axgbe_an axgbe_an73_rx_xnp(struct axgbe_port *pdata,
+ enum axgbe_rx *state)
+{
+ unsigned int ad_reg, lp_reg;
+
+ /* Check Extended Next Page support */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_XNP);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPX);
+
+ return ((ad_reg & AXGBE_XNP_NP_EXCHANGE) ||
+ (lp_reg & AXGBE_XNP_NP_EXCHANGE))
+ ? axgbe_an73_tx_xnp(pdata, state)
+ : axgbe_an73_tx_training(pdata, state);
+}
+
+static enum axgbe_an axgbe_an73_page_received(struct axgbe_port *pdata)
+{
+ enum axgbe_rx *state;
+ unsigned long an_timeout;
+ enum axgbe_an ret;
+ unsigned long ticks;
+
+ if (!pdata->an_start) {
+ pdata->an_start = rte_get_timer_cycles();
+ } else {
+ an_timeout = pdata->an_start +
+ msecs_to_timer_cycles(AXGBE_AN_MS_TIMEOUT);
+ ticks = rte_get_timer_cycles();
+ if (time_after(ticks, an_timeout)) {
+ /* Auto-negotiation timed out, reset state */
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+
+ pdata->an_start = rte_get_timer_cycles();
+ }
+ }
+
+ state = axgbe_in_kr_mode(pdata) ? &pdata->kr_state
+ : &pdata->kx_state;
+
+ switch (*state) {
+ case AXGBE_RX_BPA:
+ ret = axgbe_an73_rx_bpa(pdata, state);
+ break;
+ case AXGBE_RX_XNP:
+ ret = axgbe_an73_rx_xnp(pdata, state);
+ break;
+ default:
+ ret = AXGBE_AN_ERROR;
+ }
+
+ return ret;
+}
+
+static enum axgbe_an axgbe_an73_incompat_link(struct axgbe_port *pdata)
+{
+ /* Be sure we aren't looping trying to negotiate */
+ if (axgbe_in_kr_mode(pdata)) {
+ pdata->kr_state = AXGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_1000baseKX_Full) &&
+ !(pdata->phy.advertising & ADVERTISED_2500baseX_Full))
+ return AXGBE_AN_NO_LINK;
+
+ if (pdata->kx_state != AXGBE_RX_BPA)
+ return AXGBE_AN_NO_LINK;
+ } else {
+ pdata->kx_state = AXGBE_RX_ERROR;
+
+ if (!(pdata->phy.advertising & ADVERTISED_10000baseKR_Full))
+ return AXGBE_AN_NO_LINK;
+
+ if (pdata->kr_state != AXGBE_RX_BPA)
+ return AXGBE_AN_NO_LINK;
+ }
+
+ axgbe_an_disable(pdata);
+ axgbe_switch_mode(pdata);
+ axgbe_an_restart(pdata);
+
+ return AXGBE_AN_INCOMPAT_LINK;
+}
+
+static void axgbe_an73_state_machine(struct axgbe_port *pdata)
+{
+ enum axgbe_an cur_state = pdata->an_state;
+
+ if (!pdata->an_int)
+ return;
+
+next_int:
+ if (pdata->an_int & AXGBE_AN_CL73_PG_RCV) {
+ pdata->an_state = AXGBE_AN_PAGE_RECEIVED;
+ pdata->an_int &= ~AXGBE_AN_CL73_PG_RCV;
+ } else if (pdata->an_int & AXGBE_AN_CL73_INC_LINK) {
+ pdata->an_state = AXGBE_AN_INCOMPAT_LINK;
+ pdata->an_int &= ~AXGBE_AN_CL73_INC_LINK;
+ } else if (pdata->an_int & AXGBE_AN_CL73_INT_CMPLT) {
+ pdata->an_state = AXGBE_AN_COMPLETE;
+ pdata->an_int &= ~AXGBE_AN_CL73_INT_CMPLT;
+ } else {
+ pdata->an_state = AXGBE_AN_ERROR;
+ }
+
+again:
+ cur_state = pdata->an_state;
+
+ switch (pdata->an_state) {
+ case AXGBE_AN_READY:
+ pdata->an_supported = 0;
+ break;
+ case AXGBE_AN_PAGE_RECEIVED:
+ pdata->an_state = axgbe_an73_page_received(pdata);
+ pdata->an_supported++;
+ break;
+ case AXGBE_AN_INCOMPAT_LINK:
+ pdata->an_supported = 0;
+ pdata->parallel_detect = 0;
+ pdata->an_state = axgbe_an73_incompat_link(pdata);
+ break;
+ case AXGBE_AN_COMPLETE:
+ pdata->parallel_detect = pdata->an_supported ? 0 : 1;
+ break;
+ case AXGBE_AN_NO_LINK:
+ break;
+ default:
+ pdata->an_state = AXGBE_AN_ERROR;
+ }
+
+ if (pdata->an_state == AXGBE_AN_NO_LINK) {
+ pdata->an_int = 0;
+ axgbe_an73_clear_interrupts(pdata);
+ pdata->eth_dev->data->dev_link.link_status =
+ ETH_LINK_DOWN;
+ } else if (pdata->an_state == AXGBE_AN_ERROR) {
+ PMD_DRV_LOG(ERR, "error during auto-negotiation, state=%u\n",
+ cur_state);
+ pdata->an_int = 0;
+ axgbe_an73_clear_interrupts(pdata);
+ }
+
+ if (pdata->an_state >= AXGBE_AN_COMPLETE) {
+ pdata->an_result = pdata->an_state;
+ pdata->an_state = AXGBE_AN_READY;
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+ pdata->an_start = 0;
+ if (pdata->phy_if.phy_impl.an_post)
+ pdata->phy_if.phy_impl.an_post(pdata);
+ }
+
+ if (cur_state != pdata->an_state)
+ goto again;
+
+ if (pdata->an_int)
+ goto next_int;
+
+ axgbe_an73_enable_interrupts(pdata);
+}
+
+static void axgbe_an73_isr(struct axgbe_port *pdata)
+{
+ /* Disable AN interrupts */
+ axgbe_an73_disable_interrupts(pdata);
+
+ /* Save the interrupt(s) that fired */
+ pdata->an_int = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_INT);
+
+ if (pdata->an_int) {
+ /* Clear the interrupt(s) that fired and process them */
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_INT, ~pdata->an_int);
+ pthread_mutex_lock(&pdata->an_mutex);
+ axgbe_an73_state_machine(pdata);
+ pthread_mutex_unlock(&pdata->an_mutex);
+ } else {
+ /* Enable AN interrupts */
+ axgbe_an73_enable_interrupts(pdata);
+ }
+}
+
+static void axgbe_an_isr(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_isr(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "AN_MODE_37 not supported\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_an_combined_isr(struct axgbe_port *pdata)
+{
+ axgbe_an_isr(pdata);
+}
+
+static void axgbe_an73_init(struct axgbe_port *pdata)
+{
+ unsigned int advertising, reg;
+
+ advertising = pdata->phy_if.phy_impl.an_advertising(pdata);
+
+ /* Set up Advertisement register 3 first */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ if (advertising & ADVERTISED_10000baseR_FEC)
+ reg |= 0xc000;
+ else
+ reg &= ~0xc000;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2, reg);
+
+ /* Set up Advertisement register 2 next */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ if (advertising & ADVERTISED_10000baseKR_Full)
+ reg |= 0x80;
+ else
+ reg &= ~0x80;
+
+ if ((advertising & ADVERTISED_1000baseKX_Full) ||
+ (advertising & ADVERTISED_2500baseX_Full))
+ reg |= 0x20;
+ else
+ reg &= ~0x20;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1, reg);
+
+ /* Set up Advertisement register 1 last */
+ reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ if (advertising & ADVERTISED_Pause)
+ reg |= 0x400;
+ else
+ reg &= ~0x400;
+
+ if (advertising & ADVERTISED_Asym_Pause)
+ reg |= 0x800;
+ else
+ reg &= ~0x800;
+
+ /* We don't intend to perform XNP */
+ reg &= ~AXGBE_XNP_NP_EXCHANGE;
+
+ XMDIO_WRITE(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
+}
+
+static void axgbe_an_init(struct axgbe_port *pdata)
+{
+ /* Set up advertisement registers based on current settings */
+ pdata->an_mode = pdata->phy_if.phy_impl.an_mode(pdata);
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ axgbe_an73_init(pdata);
+ break;
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ PMD_DRV_LOG(ERR, "Unsupported AN_CL37\n");
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_adjust_link(struct axgbe_port *pdata)
+{
+ if (pdata->phy.link) {
+ /* Flow control support */
+ pdata->pause_autoneg = pdata->phy.pause_autoneg;
+
+ if (pdata->tx_pause != (unsigned int)pdata->phy.tx_pause) {
+ pdata->hw_if.config_tx_flow_control(pdata);
+ pdata->tx_pause = pdata->phy.tx_pause;
+ }
+
+ if (pdata->rx_pause != (unsigned int)pdata->phy.rx_pause) {
+ pdata->hw_if.config_rx_flow_control(pdata);
+ pdata->rx_pause = pdata->phy.rx_pause;
+ }
+
+ /* Speed support */
+ if (pdata->phy_speed != pdata->phy.speed)
+ pdata->phy_speed = pdata->phy.speed;
+ if (pdata->phy_link != pdata->phy.link)
+ pdata->phy_link = pdata->phy.link;
+ } else if (pdata->phy_link) {
+ pdata->phy_link = 0;
+ pdata->phy_speed = SPEED_UNKNOWN;
+ }
+}
+
+static int axgbe_phy_config_fixed(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+
+ /* Disable auto-negotiation */
+ axgbe_an_disable(pdata);
+
+ /* Set specified mode for specified speed */
+ mode = pdata->phy_if.phy_impl.get_mode(pdata, pdata->phy.speed);
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ case AXGBE_MODE_KX_2500:
+ case AXGBE_MODE_KR:
+ case AXGBE_MODE_SGMII_100:
+ case AXGBE_MODE_SGMII_1000:
+ case AXGBE_MODE_X:
+ case AXGBE_MODE_SFI:
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ default:
+ return -EINVAL;
+ }
+
+ /* Validate duplex mode */
+ if (pdata->phy.duplex != DUPLEX_FULL)
+ return -EINVAL;
+
+ axgbe_set_mode(pdata, mode);
+
+ return 0;
+}
+
+static int __axgbe_phy_config_aneg(struct axgbe_port *pdata)
+{
+ int ret;
+
+ axgbe_set_bit(AXGBE_LINK_INIT, &pdata->dev_state);
+ pdata->link_check = rte_get_timer_cycles();
+
+ ret = pdata->phy_if.phy_impl.an_config(pdata);
+ if (ret)
+ return ret;
+
+ if (pdata->phy.autoneg != AUTONEG_ENABLE) {
+ ret = axgbe_phy_config_fixed(pdata);
+ if (ret || !pdata->kr_redrv)
+ return ret;
+ }
+
+ /* Disable auto-negotiation interrupt */
+ rte_intr_disable(&pdata->pci_dev->intr_handle);
+
+ /* Start auto-negotiation in a supported mode */
+ if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KR);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KX_2500);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_KX_1000);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SFI);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_X);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SGMII_1000);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
+ axgbe_set_mode(pdata, AXGBE_MODE_SGMII_100);
+ } else {
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+ return -EINVAL;
+ }
+
+ /* Disable and stop any in progress auto-negotiation */
+ axgbe_an_disable_all(pdata);
+
+ /* Clear any auto-negotitation interrupts */
+ axgbe_an_clear_interrupts_all(pdata);
+
+ pdata->an_result = AXGBE_AN_READY;
+ pdata->an_state = AXGBE_AN_READY;
+ pdata->kr_state = AXGBE_RX_BPA;
+ pdata->kx_state = AXGBE_RX_BPA;
+
+ /* Re-enable auto-negotiation interrupt */
+ rte_intr_enable(&pdata->pci_dev->intr_handle);
+
+ axgbe_an_init(pdata);
+ axgbe_an_restart(pdata);
+
+ return 0;
+}
+
+static int axgbe_phy_config_aneg(struct axgbe_port *pdata)
+{
+ int ret;
+
+ pthread_mutex_lock(&pdata->an_mutex);
+
+ ret = __axgbe_phy_config_aneg(pdata);
+ if (ret)
+ axgbe_set_bit(AXGBE_LINK_ERR, &pdata->dev_state);
+ else
+ axgbe_clear_bit(AXGBE_LINK_ERR, &pdata->dev_state);
+
+ pthread_mutex_unlock(&pdata->an_mutex);
+
+ return ret;
+}
+
+static bool axgbe_phy_aneg_done(struct axgbe_port *pdata)
+{
+ return pdata->an_result == AXGBE_AN_COMPLETE;
+}
+
+static void axgbe_check_link_timeout(struct axgbe_port *pdata)
+{
+ unsigned long link_timeout;
+ unsigned long ticks;
+
+ link_timeout = pdata->link_check + (AXGBE_LINK_TIMEOUT *
+ 2 * rte_get_timer_hz());
+ ticks = rte_get_timer_cycles();
+ if (time_after(ticks, link_timeout))
+ axgbe_phy_config_aneg(pdata);
+}
+
+static enum axgbe_mode axgbe_phy_status_aneg(struct axgbe_port *pdata)
+{
+ return pdata->phy_if.phy_impl.an_outcome(pdata);
+}
+
+static void axgbe_phy_status_result(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+
+ pdata->phy.lp_advertising = 0;
+
+ if ((pdata->phy.autoneg != AUTONEG_ENABLE) || pdata->parallel_detect)
+ mode = axgbe_cur_mode(pdata);
+ else
+ mode = axgbe_phy_status_aneg(pdata);
+
+ switch (mode) {
+ case AXGBE_MODE_SGMII_100:
+ pdata->phy.speed = SPEED_100;
+ break;
+ case AXGBE_MODE_X:
+ case AXGBE_MODE_KX_1000:
+ case AXGBE_MODE_SGMII_1000:
+ pdata->phy.speed = SPEED_1000;
+ break;
+ case AXGBE_MODE_KX_2500:
+ pdata->phy.speed = SPEED_2500;
+ break;
+ case AXGBE_MODE_KR:
+ case AXGBE_MODE_SFI:
+ pdata->phy.speed = SPEED_10000;
+ break;
+ case AXGBE_MODE_UNKNOWN:
+ default:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ }
+
+ pdata->phy.duplex = DUPLEX_FULL;
+
+ axgbe_set_mode(pdata, mode);
+}
+
+static void axgbe_phy_status(struct axgbe_port *pdata)
+{
+ unsigned int link_aneg;
+ int an_restart;
+
+ if (axgbe_test_bit(AXGBE_LINK_ERR, &pdata->dev_state)) {
+ pdata->phy.link = 0;
+ goto adjust_link;
+ }
+
+ link_aneg = (pdata->phy.autoneg == AUTONEG_ENABLE);
+
+ pdata->phy.link = pdata->phy_if.phy_impl.link_status(pdata,
+ &an_restart);
+ if (an_restart) {
+ axgbe_phy_config_aneg(pdata);
+ return;
+ }
+
+ if (pdata->phy.link) {
+ if (link_aneg && !axgbe_phy_aneg_done(pdata)) {
+ axgbe_check_link_timeout(pdata);
+ return;
+ }
+ axgbe_phy_status_result(pdata);
+ if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state))
+ axgbe_clear_bit(AXGBE_LINK_INIT, &pdata->dev_state);
+ } else {
+ if (axgbe_test_bit(AXGBE_LINK_INIT, &pdata->dev_state)) {
+ axgbe_check_link_timeout(pdata);
+
+ if (link_aneg)
+ return;
+ }
+ axgbe_phy_status_result(pdata);
+ }
+
+adjust_link:
+ axgbe_phy_adjust_link(pdata);
+}
+
+static void axgbe_phy_stop(struct axgbe_port *pdata)
+{
+ if (!pdata->phy_started)
+ return;
+ /* Indicate the PHY is down */
+ pdata->phy_started = 0;
+ /* Disable auto-negotiation */
+ axgbe_an_disable_all(pdata);
+ pdata->phy_if.phy_impl.stop(pdata);
+ pdata->phy.link = 0;
+ axgbe_phy_adjust_link(pdata);
+}
+
+static int axgbe_phy_start(struct axgbe_port *pdata)
+{
+ int ret;
+
+ ret = pdata->phy_if.phy_impl.start(pdata);
+ if (ret)
+ return ret;
+ /* Set initial mode - call the mode setting routines
+ * directly to insure we are properly configured
+ */
+ if (axgbe_use_mode(pdata, AXGBE_MODE_KR)) {
+ axgbe_kr_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_2500)) {
+ axgbe_kx_2500_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_KX_1000)) {
+ axgbe_kx_1000_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SFI)) {
+ axgbe_sfi_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_X)) {
+ axgbe_x_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_1000)) {
+ axgbe_sgmii_1000_mode(pdata);
+ } else if (axgbe_use_mode(pdata, AXGBE_MODE_SGMII_100)) {
+ axgbe_sgmii_100_mode(pdata);
+ } else {
+ ret = -EINVAL;
+ goto err_stop;
+ }
+ /* Indicate the PHY is up and running */
+ pdata->phy_started = 1;
+ axgbe_an_init(pdata);
+ axgbe_an_enable_interrupts(pdata);
+ return axgbe_phy_config_aneg(pdata);
+
+err_stop:
+ pdata->phy_if.phy_impl.stop(pdata);
+
+ return ret;
+}
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ int ret;
+
+ ret = pdata->phy_if.phy_impl.reset(pdata);
+ if (ret)
+ return ret;
+
+ /* Disable auto-negotiation for now */
+ axgbe_an_disable_all(pdata);
+
+ /* Clear auto-negotiation interrupts */
+ axgbe_an_clear_interrupts_all(pdata);
+
+ return 0;
+}
+
+static int axgbe_phy_best_advertised_speed(struct axgbe_port *pdata)
+{
+ if (pdata->phy.advertising & ADVERTISED_10000baseKR_Full)
+ return SPEED_10000;
+ else if (pdata->phy.advertising & ADVERTISED_10000baseT_Full)
+ return SPEED_10000;
+ else if (pdata->phy.advertising & ADVERTISED_2500baseX_Full)
+ return SPEED_2500;
+ else if (pdata->phy.advertising & ADVERTISED_1000baseKX_Full)
+ return SPEED_1000;
+ else if (pdata->phy.advertising & ADVERTISED_1000baseT_Full)
+ return SPEED_1000;
+ else if (pdata->phy.advertising & ADVERTISED_100baseT_Full)
+ return SPEED_100;
+
+ return SPEED_UNKNOWN;
+}
+
+static int axgbe_phy_init(struct axgbe_port *pdata)
+{
+ int ret;
+
+ pdata->mdio_mmd = MDIO_MMD_PCS;
+
+ /* Check for FEC support */
+ pdata->fec_ability = XMDIO_READ(pdata, MDIO_MMD_PMAPMD,
+ MDIO_PMA_10GBR_FECABLE);
+ pdata->fec_ability &= (MDIO_PMA_10GBR_FECABLE_ABLE |
+ MDIO_PMA_10GBR_FECABLE_ERRABLE);
+
+ /* Setup the phy (including supported features) */
+ ret = pdata->phy_if.phy_impl.init(pdata);
+ if (ret)
+ return ret;
+ pdata->phy.advertising = pdata->phy.supported;
+
+ pdata->phy.address = 0;
+
+ if (pdata->phy.advertising & ADVERTISED_Autoneg) {
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ } else {
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ pdata->phy.speed = axgbe_phy_best_advertised_speed(pdata);
+ pdata->phy.duplex = DUPLEX_FULL;
+ }
+
+ pdata->phy.link = 0;
+
+ pdata->phy.pause_autoneg = pdata->pause_autoneg;
+ pdata->phy.tx_pause = pdata->tx_pause;
+ pdata->phy.rx_pause = pdata->rx_pause;
+
+ /* Fix up Flow Control advertising */
+ pdata->phy.advertising &= ~ADVERTISED_Pause;
+ pdata->phy.advertising &= ~ADVERTISED_Asym_Pause;
+
+ if (pdata->rx_pause) {
+ pdata->phy.advertising |= ADVERTISED_Pause;
+ pdata->phy.advertising |= ADVERTISED_Asym_Pause;
+ }
+
+ if (pdata->tx_pause)
+ pdata->phy.advertising ^= ADVERTISED_Asym_Pause;
+ return 0;
+}
+
+void axgbe_init_function_ptrs_phy(struct axgbe_phy_if *phy_if)
+{
+ phy_if->phy_init = axgbe_phy_init;
+ phy_if->phy_reset = axgbe_phy_reset;
+ phy_if->phy_start = axgbe_phy_start;
+ phy_if->phy_stop = axgbe_phy_stop;
+ phy_if->phy_status = axgbe_phy_status;
+ phy_if->phy_config_aneg = axgbe_phy_config_aneg;
+ phy_if->an_isr = axgbe_an_combined_isr;
+}
diff --git a/drivers/net/axgbe/axgbe_phy.h b/drivers/net/axgbe/axgbe_phy.h
new file mode 100644
index 00000000..77ee20a3
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_phy.h
@@ -0,0 +1,192 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef __AXGBE_PHY_H__
+#define __AXGBE_PHY_H__
+
+#define SPEED_10 10
+#define SPEED_100 100
+#define SPEED_1000 1000
+#define SPEED_2500 2500
+#define SPEED_10000 10000
+
+
+/* Or MII_ADDR_C45 into regnum for read/write on mii_bus to enable the 21 bit
+ * IEEE 802.3ae clause 45 addressing mode used by 10GIGE phy chips.
+ */
+#define MII_ADDR_C45 (1 << 30)
+
+/* Basic mode status register. */
+#define BMSR_LSTATUS 0x0004 /* Link status */
+
+/* Status register 1. */
+#define MDIO_STAT1_LSTATUS BMSR_LSTATUS
+
+/* Generic MII registers. */
+#define MII_BMCR 0x00 /* Basic mode control register */
+#define MII_BMSR 0x01 /* Basic mode status register */
+#define MII_PHYSID1 0x02 /* PHYS ID 1 */
+#define MII_PHYSID2 0x03 /* PHYS ID 2 */
+#define MII_ADVERTISE 0x04 /* Advertisement control reg */
+#define MII_LPA 0x05 /* Link partner ability reg */
+#define MII_EXPANSION 0x06 /* Expansion register */
+#define MII_CTRL1000 0x09 /* 1000BASE-T control */
+#define MII_STAT1000 0x0a /* 1000BASE-T status */
+#define MII_MMD_CTRL 0x0d /* MMD Access Control Register */
+#define MII_MMD_DATA 0x0e /* MMD Access Data Register */
+#define MII_ESTATUS 0x0f /* Extended Status */
+#define MII_DCOUNTER 0x12 /* Disconnect counter */
+#define MII_FCSCOUNTER 0x13 /* False carrier counter */
+#define MII_NWAYTEST 0x14 /* N-way auto-neg test reg */
+#define MII_RERRCOUNTER 0x15 /* Receive error counter */
+#define MII_SREVISION 0x16 /* Silicon revision */
+#define MII_RESV1 0x17 /* Reserved... */
+#define MII_LBRERROR 0x18 /* Lpback, rx, bypass error */
+#define MII_PHYADDR 0x19 /* PHY address */
+#define MII_RESV2 0x1a /* Reserved... */
+#define MII_TPISTATUS 0x1b /* TPI status for 10mbps */
+#define MII_NCONFIG 0x1c /* Network interface config */
+
+/* Basic mode control register. */
+#define BMCR_RESV 0x003f /* Unused... */
+#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */
+#define BMCR_CTST 0x0080 /* Collision test */
+#define BMCR_FULLDPLX 0x0100 /* Full duplex */
+#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */
+#define BMCR_ISOLATE 0x0400 /* Isolate data paths from MII */
+#define BMCR_PDOWN 0x0800 /* Enable low power state */
+#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */
+#define BMCR_SPEED100 0x2000 /* Select 100Mbps */
+#define BMCR_LOOPBACK 0x4000 /* TXD loopback bits */
+#define BMCR_RESET 0x8000 /* Reset to default state */
+#define BMCR_SPEED10 0x0000 /* Select 10Mbps */
+
+
+/* MDIO Manageable Devices (MMDs). */
+#define MDIO_MMD_PMAPMD 1 /* Physical Medium Attachment
+ * Physical Medium Dependent
+ */
+#define MDIO_MMD_WIS 2 /* WAN Interface Sublayer */
+#define MDIO_MMD_PCS 3 /* Physical Coding Sublayer */
+#define MDIO_MMD_PHYXS 4 /* PHY Extender Sublayer */
+#define MDIO_MMD_DTEXS 5 /* DTE Extender Sublayer */
+#define MDIO_MMD_TC 6 /* Transmission Convergence */
+#define MDIO_MMD_AN 7 /* Auto-Negotiation */
+#define MDIO_MMD_C22EXT 29 /* Clause 22 extension */
+#define MDIO_MMD_VEND1 30 /* Vendor specific 1 */
+#define MDIO_MMD_VEND2 31 /* Vendor specific 2 */
+
+/* Generic MDIO registers. */
+#define MDIO_CTRL1 MII_BMCR
+#define MDIO_STAT1 MII_BMSR
+#define MDIO_DEVID1 MII_PHYSID1
+#define MDIO_DEVID2 MII_PHYSID2
+#define MDIO_SPEED 4 /* Speed ability */
+#define MDIO_DEVS1 5 /* Devices in package */
+#define MDIO_DEVS2 6
+#define MDIO_CTRL2 7 /* 10G control 2 */
+#define MDIO_STAT2 8 /* 10G status 2 */
+#define MDIO_PMA_TXDIS 9 /* 10G PMA/PMD transmit disable */
+#define MDIO_PMA_RXDET 10 /* 10G PMA/PMD receive signal detect */
+#define MDIO_PMA_EXTABLE 11 /* 10G PMA/PMD extended ability */
+#define MDIO_PKGID1 14 /* Package identifier */
+#define MDIO_PKGID2 15
+#define MDIO_AN_ADVERTISE 16 /* AN advertising (base page) */
+#define MDIO_AN_LPA 19 /* AN LP abilities (base page) */
+#define MDIO_PCS_EEE_ABLE 20 /* EEE Capability register */
+#define MDIO_PCS_EEE_WK_ERR 22 /* EEE wake error counter */
+#define MDIO_PHYXS_LNSTAT 24 /* PHY XGXS lane state */
+#define MDIO_AN_EEE_ADV 60 /* EEE advertisement */
+#define MDIO_AN_EEE_LPABLE 61 /* EEE link partner ability */
+
+/* Media-dependent registers. */
+#define MDIO_PMA_10GBT_SWAPPOL 130 /* 10GBASE-T pair swap & polarity */
+#define MDIO_PMA_10GBT_TXPWR 131 /* 10GBASE-T TX power control */
+#define MDIO_PMA_10GBT_SNR 133 /* 10GBASE-T SNR margin, lane A.
+ * Lanes B-D are numbered 134-136.
+ */
+#define MDIO_PMA_10GBR_FECABLE 170 /* 10GBASE-R FEC ability */
+#define MDIO_PCS_10GBX_STAT1 24 /* 10GBASE-X PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT1 32 /* 10GBASE-R/-T PCS status 1 */
+#define MDIO_PCS_10GBRT_STAT2 33 /* 10GBASE-R/-T PCS status 2 */
+#define MDIO_AN_10GBT_CTRL 32 /* 10GBASE-T auto-negotiation control */
+#define MDIO_AN_10GBT_STAT 33 /* 10GBASE-T auto-negotiation status */
+
+/* Control register 1. */
+/* Enable extended speed selection */
+#define MDIO_CTRL1_SPEEDSELEXT (BMCR_SPEED1000 | BMCR_SPEED100)
+/* All speed selection bits */
+#define MDIO_CTRL1_SPEEDSEL (MDIO_CTRL1_SPEEDSELEXT | 0x003c)
+#define MDIO_CTRL1_FULLDPLX BMCR_FULLDPLX
+#define MDIO_CTRL1_LPOWER BMCR_PDOWN
+#define MDIO_CTRL1_RESET BMCR_RESET
+#define MDIO_PMA_CTRL1_LOOPBACK 0x0001
+#define MDIO_PMA_CTRL1_SPEED1000 BMCR_SPEED1000
+#define MDIO_PMA_CTRL1_SPEED100 BMCR_SPEED100
+#define MDIO_PCS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_PHYXS_CTRL1_LOOPBACK BMCR_LOOPBACK
+#define MDIO_AN_CTRL1_RESTART BMCR_ANRESTART
+#define MDIO_AN_CTRL1_ENABLE BMCR_ANENABLE
+#define MDIO_AN_CTRL1_XNP 0x2000 /* Enable extended next page */
+#define MDIO_PCS_CTRL1_CLKSTOP_EN 0x400 /* Stop the clock during LPI */
+
+
+
+
+
+/* PMA 10GBASE-R FEC ability register. */
+#define MDIO_PMA_10GBR_FECABLE_ABLE 0x0001 /* FEC ability */
+#define MDIO_PMA_10GBR_FECABLE_ERRABLE 0x0002 /* FEC error indic. ability */
+
+
+/* Autoneg related */
+#define ADVERTISED_Autoneg (1 << 6)
+#define SUPPORTED_Autoneg (1 << 6)
+#define AUTONEG_DISABLE 0x00
+#define AUTONEG_ENABLE 0x01
+
+#define ADVERTISED_Pause (1 << 13)
+#define ADVERTISED_Asym_Pause (1 << 14)
+
+#define SUPPORTED_Pause (1 << 13)
+#define SUPPORTED_Asym_Pause (1 << 14)
+
+#define SUPPORTED_Backplane (1 << 16)
+#define SUPPORTED_TP (1 << 7)
+
+#define ADVERTISED_10000baseR_FEC (1 << 20)
+
+#define SUPPORTED_10000baseR_FEC (1 << 20)
+
+#define SUPPORTED_FIBRE (1 << 10)
+
+#define ADVERTISED_10000baseKR_Full (1 << 19)
+#define ADVERTISED_10000baseT_Full (1 << 12)
+#define ADVERTISED_2500baseX_Full (1 << 15)
+#define ADVERTISED_1000baseKX_Full (1 << 17)
+#define ADVERTISED_1000baseT_Full (1 << 5)
+#define ADVERTISED_100baseT_Full (1 << 3)
+#define ADVERTISED_TP (1 << 7)
+#define ADVERTISED_FIBRE (1 << 10)
+#define ADVERTISED_Backplane (1 << 16)
+
+#define SUPPORTED_1000baseKX_Full (1 << 17)
+#define SUPPORTED_10000baseKR_Full (1 << 19)
+#define SUPPORTED_2500baseX_Full (1 << 15)
+#define SUPPORTED_100baseT_Full (1 << 2)
+#define SUPPORTED_1000baseT_Full (1 << 5)
+#define SUPPORTED_10000baseT_Full (1 << 12)
+#define SUPPORTED_2500baseX_Full (1 << 15)
+
+
+#define SPEED_UNKNOWN -1
+
+/* Duplex, half or full. */
+#define DUPLEX_HALF 0x00
+#define DUPLEX_FULL 0x01
+#define DUPLEX_UNKNOWN 0xff
+
+#endif
+/* PHY */
diff --git a/drivers/net/axgbe/axgbe_phy_impl.c b/drivers/net/axgbe/axgbe_phy_impl.c
new file mode 100644
index 00000000..973177f6
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_phy_impl.c
@@ -0,0 +1,2191 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_common.h"
+#include "axgbe_phy.h"
+
+#define AXGBE_PHY_PORT_SPEED_100 BIT(0)
+#define AXGBE_PHY_PORT_SPEED_1000 BIT(1)
+#define AXGBE_PHY_PORT_SPEED_2500 BIT(2)
+#define AXGBE_PHY_PORT_SPEED_10000 BIT(3)
+
+#define AXGBE_MUTEX_RELEASE 0x80000000
+
+#define AXGBE_SFP_DIRECT 7
+
+/* I2C target addresses */
+#define AXGBE_SFP_SERIAL_ID_ADDRESS 0x50
+#define AXGBE_SFP_DIAG_INFO_ADDRESS 0x51
+#define AXGBE_SFP_PHY_ADDRESS 0x56
+#define AXGBE_GPIO_ADDRESS_PCA9555 0x20
+
+/* SFP sideband signal indicators */
+#define AXGBE_GPIO_NO_TX_FAULT BIT(0)
+#define AXGBE_GPIO_NO_RATE_SELECT BIT(1)
+#define AXGBE_GPIO_NO_MOD_ABSENT BIT(2)
+#define AXGBE_GPIO_NO_RX_LOS BIT(3)
+
+/* Rate-change complete wait/retry count */
+#define AXGBE_RATECHANGE_COUNT 500
+
+/* CDR delay values for KR support (in usec) */
+#define AXGBE_CDR_DELAY_INIT 10000
+#define AXGBE_CDR_DELAY_INC 10000
+#define AXGBE_CDR_DELAY_MAX 100000
+
+enum axgbe_port_mode {
+ AXGBE_PORT_MODE_RSVD = 0,
+ AXGBE_PORT_MODE_BACKPLANE,
+ AXGBE_PORT_MODE_BACKPLANE_2500,
+ AXGBE_PORT_MODE_1000BASE_T,
+ AXGBE_PORT_MODE_1000BASE_X,
+ AXGBE_PORT_MODE_NBASE_T,
+ AXGBE_PORT_MODE_10GBASE_T,
+ AXGBE_PORT_MODE_10GBASE_R,
+ AXGBE_PORT_MODE_SFP,
+ AXGBE_PORT_MODE_MAX,
+};
+
+enum axgbe_conn_type {
+ AXGBE_CONN_TYPE_NONE = 0,
+ AXGBE_CONN_TYPE_SFP,
+ AXGBE_CONN_TYPE_MDIO,
+ AXGBE_CONN_TYPE_RSVD1,
+ AXGBE_CONN_TYPE_BACKPLANE,
+ AXGBE_CONN_TYPE_MAX,
+};
+
+/* SFP/SFP+ related definitions */
+enum axgbe_sfp_comm {
+ AXGBE_SFP_COMM_DIRECT = 0,
+ AXGBE_SFP_COMM_PCA9545,
+};
+
+enum axgbe_sfp_cable {
+ AXGBE_SFP_CABLE_UNKNOWN = 0,
+ AXGBE_SFP_CABLE_ACTIVE,
+ AXGBE_SFP_CABLE_PASSIVE,
+};
+
+enum axgbe_sfp_base {
+ AXGBE_SFP_BASE_UNKNOWN = 0,
+ AXGBE_SFP_BASE_1000_T,
+ AXGBE_SFP_BASE_1000_SX,
+ AXGBE_SFP_BASE_1000_LX,
+ AXGBE_SFP_BASE_1000_CX,
+ AXGBE_SFP_BASE_10000_SR,
+ AXGBE_SFP_BASE_10000_LR,
+ AXGBE_SFP_BASE_10000_LRM,
+ AXGBE_SFP_BASE_10000_ER,
+ AXGBE_SFP_BASE_10000_CR,
+};
+
+enum axgbe_sfp_speed {
+ AXGBE_SFP_SPEED_UNKNOWN = 0,
+ AXGBE_SFP_SPEED_100_1000,
+ AXGBE_SFP_SPEED_1000,
+ AXGBE_SFP_SPEED_10000,
+};
+
+/* SFP Serial ID Base ID values relative to an offset of 0 */
+#define AXGBE_SFP_BASE_ID 0
+#define AXGBE_SFP_ID_SFP 0x03
+
+#define AXGBE_SFP_BASE_EXT_ID 1
+#define AXGBE_SFP_EXT_ID_SFP 0x04
+
+#define AXGBE_SFP_BASE_10GBE_CC 3
+#define AXGBE_SFP_BASE_10GBE_CC_SR BIT(4)
+#define AXGBE_SFP_BASE_10GBE_CC_LR BIT(5)
+#define AXGBE_SFP_BASE_10GBE_CC_LRM BIT(6)
+#define AXGBE_SFP_BASE_10GBE_CC_ER BIT(7)
+
+#define AXGBE_SFP_BASE_1GBE_CC 6
+#define AXGBE_SFP_BASE_1GBE_CC_SX BIT(0)
+#define AXGBE_SFP_BASE_1GBE_CC_LX BIT(1)
+#define AXGBE_SFP_BASE_1GBE_CC_CX BIT(2)
+#define AXGBE_SFP_BASE_1GBE_CC_T BIT(3)
+
+#define AXGBE_SFP_BASE_CABLE 8
+#define AXGBE_SFP_BASE_CABLE_PASSIVE BIT(2)
+#define AXGBE_SFP_BASE_CABLE_ACTIVE BIT(3)
+
+#define AXGBE_SFP_BASE_BR 12
+#define AXGBE_SFP_BASE_BR_1GBE_MIN 0x0a
+#define AXGBE_SFP_BASE_BR_1GBE_MAX 0x0d
+#define AXGBE_SFP_BASE_BR_10GBE_MIN 0x64
+#define AXGBE_SFP_BASE_BR_10GBE_MAX 0x68
+
+#define AXGBE_SFP_BASE_CU_CABLE_LEN 18
+
+#define AXGBE_SFP_BASE_VENDOR_NAME 20
+#define AXGBE_SFP_BASE_VENDOR_NAME_LEN 16
+#define AXGBE_SFP_BASE_VENDOR_PN 40
+#define AXGBE_SFP_BASE_VENDOR_PN_LEN 16
+#define AXGBE_SFP_BASE_VENDOR_REV 56
+#define AXGBE_SFP_BASE_VENDOR_REV_LEN 4
+
+#define AXGBE_SFP_BASE_CC 63
+
+/* SFP Serial ID Extended ID values relative to an offset of 64 */
+#define AXGBE_SFP_BASE_VENDOR_SN 4
+#define AXGBE_SFP_BASE_VENDOR_SN_LEN 16
+
+#define AXGBE_SFP_EXTD_DIAG 28
+#define AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE BIT(2)
+
+#define AXGBE_SFP_EXTD_SFF_8472 30
+
+#define AXGBE_SFP_EXTD_CC 31
+
+struct axgbe_sfp_eeprom {
+ u8 base[64];
+ u8 extd[32];
+ u8 vendor[32];
+};
+
+#define AXGBE_BEL_FUSE_VENDOR "BEL-FUSE"
+#define AXGBE_BEL_FUSE_PARTNO "1GBT-SFP06"
+
+struct axgbe_sfp_ascii {
+ union {
+ char vendor[AXGBE_SFP_BASE_VENDOR_NAME_LEN + 1];
+ char partno[AXGBE_SFP_BASE_VENDOR_PN_LEN + 1];
+ char rev[AXGBE_SFP_BASE_VENDOR_REV_LEN + 1];
+ char serno[AXGBE_SFP_BASE_VENDOR_SN_LEN + 1];
+ } u;
+};
+
+/* MDIO PHY reset types */
+enum axgbe_mdio_reset {
+ AXGBE_MDIO_RESET_NONE = 0,
+ AXGBE_MDIO_RESET_I2C_GPIO,
+ AXGBE_MDIO_RESET_INT_GPIO,
+ AXGBE_MDIO_RESET_MAX,
+};
+
+/* Re-driver related definitions */
+enum axgbe_phy_redrv_if {
+ AXGBE_PHY_REDRV_IF_MDIO = 0,
+ AXGBE_PHY_REDRV_IF_I2C,
+ AXGBE_PHY_REDRV_IF_MAX,
+};
+
+enum axgbe_phy_redrv_model {
+ AXGBE_PHY_REDRV_MODEL_4223 = 0,
+ AXGBE_PHY_REDRV_MODEL_4227,
+ AXGBE_PHY_REDRV_MODEL_MAX,
+};
+
+enum axgbe_phy_redrv_mode {
+ AXGBE_PHY_REDRV_MODE_CX = 5,
+ AXGBE_PHY_REDRV_MODE_SR = 9,
+};
+
+#define AXGBE_PHY_REDRV_MODE_REG 0x12b0
+
+/* PHY related configuration information */
+struct axgbe_phy_data {
+ enum axgbe_port_mode port_mode;
+
+ unsigned int port_id;
+
+ unsigned int port_speeds;
+
+ enum axgbe_conn_type conn_type;
+
+ enum axgbe_mode cur_mode;
+ enum axgbe_mode start_mode;
+
+ unsigned int rrc_count;
+
+ unsigned int mdio_addr;
+
+ unsigned int comm_owned;
+
+ /* SFP Support */
+ enum axgbe_sfp_comm sfp_comm;
+ unsigned int sfp_mux_address;
+ unsigned int sfp_mux_channel;
+
+ unsigned int sfp_gpio_address;
+ unsigned int sfp_gpio_mask;
+ unsigned int sfp_gpio_rx_los;
+ unsigned int sfp_gpio_tx_fault;
+ unsigned int sfp_gpio_mod_absent;
+ unsigned int sfp_gpio_rate_select;
+
+ unsigned int sfp_rx_los;
+ unsigned int sfp_tx_fault;
+ unsigned int sfp_mod_absent;
+ unsigned int sfp_diags;
+ unsigned int sfp_changed;
+ unsigned int sfp_phy_avail;
+ unsigned int sfp_cable_len;
+ enum axgbe_sfp_base sfp_base;
+ enum axgbe_sfp_cable sfp_cable;
+ enum axgbe_sfp_speed sfp_speed;
+ struct axgbe_sfp_eeprom sfp_eeprom;
+
+ /* External PHY support */
+ enum axgbe_mdio_mode phydev_mode;
+ enum axgbe_mdio_reset mdio_reset;
+ unsigned int mdio_reset_addr;
+ unsigned int mdio_reset_gpio;
+
+ /* Re-driver support */
+ unsigned int redrv;
+ unsigned int redrv_if;
+ unsigned int redrv_addr;
+ unsigned int redrv_lane;
+ unsigned int redrv_model;
+
+ /* KR AN support */
+ unsigned int phy_cdr_notrack;
+ unsigned int phy_cdr_delay;
+};
+
+static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata);
+
+static int axgbe_phy_i2c_xfer(struct axgbe_port *pdata,
+ struct axgbe_i2c_op *i2c_op)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Be sure we own the bus */
+ if (!phy_data->comm_owned)
+ return -EIO;
+
+ return pdata->i2c_if.i2c_xfer(pdata, i2c_op);
+}
+
+static int axgbe_phy_redrv_write(struct axgbe_port *pdata, unsigned int reg,
+ unsigned int val)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ uint16_t *redrv_val;
+ u8 redrv_data[5], csum;
+ unsigned int i, retry;
+ int ret;
+
+ /* High byte of register contains read/write indicator */
+ redrv_data[0] = ((reg >> 8) & 0xff) << 1;
+ redrv_data[1] = reg & 0xff;
+ redrv_val = (uint16_t *)&redrv_data[2];
+ *redrv_val = rte_cpu_to_be_16(val);
+
+ /* Calculate 1 byte checksum */
+ csum = 0;
+ for (i = 0; i < 4; i++) {
+ csum += redrv_data[i];
+ if (redrv_data[i] > csum)
+ csum++;
+ }
+ redrv_data[4] = ~csum;
+
+ retry = 1;
+again1:
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = sizeof(redrv_data);
+ i2c_op.buf = redrv_data;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ i2c_op.cmd = AXGBE_I2C_CMD_READ;
+ i2c_op.target = phy_data->redrv_addr;
+ i2c_op.len = 1;
+ i2c_op.buf = redrv_data;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+ }
+
+ if (redrv_data[0] != 0xff) {
+ PMD_DRV_LOG(ERR, "Redriver write checksum error\n");
+ ret = -EIO;
+ }
+
+ return ret;
+}
+
+static int axgbe_phy_i2c_read(struct axgbe_port *pdata, unsigned int target,
+ void *reg, unsigned int reg_len,
+ void *val, unsigned int val_len)
+{
+ struct axgbe_i2c_op i2c_op;
+ int retry, ret;
+
+ retry = 1;
+again1:
+ /* Set the specified register to read */
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = target;
+ i2c_op.len = reg_len;
+ i2c_op.buf = reg;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if (ret) {
+ if ((ret == -EAGAIN) && retry--)
+ goto again1;
+
+ return ret;
+ }
+
+ retry = 1;
+again2:
+ /* Read the specfied register */
+ i2c_op.cmd = AXGBE_I2C_CMD_READ;
+ i2c_op.target = target;
+ i2c_op.len = val_len;
+ i2c_op.buf = val;
+ ret = axgbe_phy_i2c_xfer(pdata, &i2c_op);
+ if ((ret == -EAGAIN) && retry--)
+ goto again2;
+
+ return ret;
+}
+
+static int axgbe_phy_sfp_put_mux(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ uint8_t mux_channel;
+
+ if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select no mux channels */
+ mux_channel = 0;
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return axgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static int axgbe_phy_sfp_get_mux(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_i2c_op i2c_op;
+ u8 mux_channel;
+
+ if (phy_data->sfp_comm == AXGBE_SFP_COMM_DIRECT)
+ return 0;
+
+ /* Select desired mux channel */
+ mux_channel = 1 << phy_data->sfp_mux_channel;
+ i2c_op.cmd = AXGBE_I2C_CMD_WRITE;
+ i2c_op.target = phy_data->sfp_mux_address;
+ i2c_op.len = sizeof(mux_channel);
+ i2c_op.buf = &mux_channel;
+
+ return axgbe_phy_i2c_xfer(pdata, &i2c_op);
+}
+
+static void axgbe_phy_put_comm_ownership(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->comm_owned = 0;
+
+ pthread_mutex_unlock(&pdata->phy_mutex);
+}
+
+static int axgbe_phy_get_comm_ownership(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ uint64_t timeout;
+ unsigned int mutex_id;
+
+ if (phy_data->comm_owned)
+ return 0;
+
+ /* The I2C and MDIO/GPIO bus is multiplexed between multiple devices,
+ * the driver needs to take the software mutex and then the hardware
+ * mutexes before being able to use the busses.
+ */
+ pthread_mutex_lock(&pdata->phy_mutex);
+
+ /* Clear the mutexes */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, AXGBE_MUTEX_RELEASE);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, AXGBE_MUTEX_RELEASE);
+
+ /* Mutex formats are the same for I2C and MDIO/GPIO */
+ mutex_id = 0;
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ID, phy_data->port_id);
+ XP_SET_BITS(mutex_id, XP_I2C_MUTEX, ACTIVE, 1);
+
+ timeout = rte_get_timer_cycles() + (rte_get_timer_hz() * 5);
+ while (time_before(rte_get_timer_cycles(), timeout)) {
+ /* Must be all zeroes in order to obtain the mutex */
+ if (XP_IOREAD(pdata, XP_I2C_MUTEX) ||
+ XP_IOREAD(pdata, XP_MDIO_MUTEX)) {
+ rte_delay_us(100);
+ continue;
+ }
+
+ /* Obtain the mutex */
+ XP_IOWRITE(pdata, XP_I2C_MUTEX, mutex_id);
+ XP_IOWRITE(pdata, XP_MDIO_MUTEX, mutex_id);
+
+ phy_data->comm_owned = 1;
+ return 0;
+ }
+
+ pthread_mutex_unlock(&pdata->phy_mutex);
+
+ PMD_DRV_LOG(ERR, "unable to obtain hardware mutexes\n");
+
+ return -ETIMEDOUT;
+}
+
+static void axgbe_phy_sfp_phy_settings(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (phy_data->sfp_mod_absent) {
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising = pdata->phy.supported;
+ }
+
+ pdata->phy.advertising &= ~ADVERTISED_Autoneg;
+ pdata->phy.advertising &= ~ADVERTISED_TP;
+ pdata->phy.advertising &= ~ADVERTISED_FIBRE;
+ pdata->phy.advertising &= ~ADVERTISED_100baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_1000baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_10000baseT_Full;
+ pdata->phy.advertising &= ~ADVERTISED_10000baseR_FEC;
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ pdata->phy.speed = SPEED_UNKNOWN;
+ pdata->phy.duplex = DUPLEX_UNKNOWN;
+ pdata->phy.autoneg = AUTONEG_ENABLE;
+ pdata->phy.advertising |= ADVERTISED_Autoneg;
+ break;
+ case AXGBE_SFP_BASE_10000_SR:
+ case AXGBE_SFP_BASE_10000_LR:
+ case AXGBE_SFP_BASE_10000_LRM:
+ case AXGBE_SFP_BASE_10000_ER:
+ case AXGBE_SFP_BASE_10000_CR:
+ default:
+ pdata->phy.speed = SPEED_10000;
+ pdata->phy.duplex = DUPLEX_FULL;
+ pdata->phy.autoneg = AUTONEG_DISABLE;
+ break;
+ }
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_CX:
+ case AXGBE_SFP_BASE_10000_CR:
+ pdata->phy.advertising |= ADVERTISED_TP;
+ break;
+ default:
+ pdata->phy.advertising |= ADVERTISED_FIBRE;
+ }
+
+ switch (phy_data->sfp_speed) {
+ case AXGBE_SFP_SPEED_100_1000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100)
+ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case AXGBE_SFP_SPEED_1000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ break;
+ case AXGBE_SFP_SPEED_10000:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ break;
+ default:
+ /* Choose the fastest supported speed */
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ pdata->phy.advertising |= ADVERTISED_10000baseT_Full;
+ else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ pdata->phy.advertising |= ADVERTISED_1000baseT_Full;
+ else if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100)
+ pdata->phy.advertising |= ADVERTISED_100baseT_Full;
+ }
+}
+
+static bool axgbe_phy_sfp_bit_rate(struct axgbe_sfp_eeprom *sfp_eeprom,
+ enum axgbe_sfp_speed sfp_speed)
+{
+ u8 *sfp_base, min, max;
+
+ sfp_base = sfp_eeprom->base;
+
+ switch (sfp_speed) {
+ case AXGBE_SFP_SPEED_1000:
+ min = AXGBE_SFP_BASE_BR_1GBE_MIN;
+ max = AXGBE_SFP_BASE_BR_1GBE_MAX;
+ break;
+ case AXGBE_SFP_SPEED_10000:
+ min = AXGBE_SFP_BASE_BR_10GBE_MIN;
+ max = AXGBE_SFP_BASE_BR_10GBE_MAX;
+ break;
+ default:
+ return false;
+ }
+
+ return ((sfp_base[AXGBE_SFP_BASE_BR] >= min) &&
+ (sfp_base[AXGBE_SFP_BASE_BR] <= max));
+}
+
+static void axgbe_phy_sfp_external_phy(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!phy_data->sfp_changed)
+ return;
+
+ phy_data->sfp_phy_avail = 0;
+
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return;
+}
+
+static bool axgbe_phy_belfuse_parse_quirks(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+
+ if (memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_NAME],
+ AXGBE_BEL_FUSE_VENDOR, strlen(AXGBE_BEL_FUSE_VENDOR)))
+ return false;
+
+ if (!memcmp(&sfp_eeprom->base[AXGBE_SFP_BASE_VENDOR_PN],
+ AXGBE_BEL_FUSE_PARTNO, strlen(AXGBE_BEL_FUSE_PARTNO))) {
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_1000;
+ return true;
+ }
+
+ return false;
+}
+
+static bool axgbe_phy_sfp_parse_quirks(struct axgbe_port *pdata)
+{
+ if (axgbe_phy_belfuse_parse_quirks(pdata))
+ return true;
+
+ return false;
+}
+
+static void axgbe_phy_sfp_parse_eeprom(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom *sfp_eeprom = &phy_data->sfp_eeprom;
+ uint8_t *sfp_base;
+
+ sfp_base = sfp_eeprom->base;
+
+ if (sfp_base[AXGBE_SFP_BASE_ID] != AXGBE_SFP_ID_SFP)
+ return;
+
+ if (sfp_base[AXGBE_SFP_BASE_EXT_ID] != AXGBE_SFP_EXT_ID_SFP)
+ return;
+
+ if (axgbe_phy_sfp_parse_quirks(pdata))
+ return;
+
+ /* Assume ACTIVE cable unless told it is PASSIVE */
+ if (sfp_base[AXGBE_SFP_BASE_CABLE] & AXGBE_SFP_BASE_CABLE_PASSIVE) {
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_PASSIVE;
+ phy_data->sfp_cable_len = sfp_base[AXGBE_SFP_BASE_CU_CABLE_LEN];
+ } else {
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_ACTIVE;
+ }
+
+ /* Determine the type of SFP */
+ if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_SR)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_SR;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_LR)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_LR;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] &
+ AXGBE_SFP_BASE_10GBE_CC_LRM)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_LRM;
+ else if (sfp_base[AXGBE_SFP_BASE_10GBE_CC] & AXGBE_SFP_BASE_10GBE_CC_ER)
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_ER;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_SX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_SX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_LX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_LX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_CX)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_CX;
+ else if (sfp_base[AXGBE_SFP_BASE_1GBE_CC] & AXGBE_SFP_BASE_1GBE_CC_T)
+ phy_data->sfp_base = AXGBE_SFP_BASE_1000_T;
+ else if ((phy_data->sfp_cable == AXGBE_SFP_CABLE_PASSIVE) &&
+ axgbe_phy_sfp_bit_rate(sfp_eeprom, AXGBE_SFP_SPEED_10000))
+ phy_data->sfp_base = AXGBE_SFP_BASE_10000_CR;
+
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_100_1000;
+ break;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_1000;
+ break;
+ case AXGBE_SFP_BASE_10000_SR:
+ case AXGBE_SFP_BASE_10000_LR:
+ case AXGBE_SFP_BASE_10000_LRM:
+ case AXGBE_SFP_BASE_10000_ER:
+ case AXGBE_SFP_BASE_10000_CR:
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_10000;
+ break;
+ default:
+ break;
+ }
+}
+
+static bool axgbe_phy_sfp_verify_eeprom(uint8_t cc_in, uint8_t *buf,
+ unsigned int len)
+{
+ uint8_t cc;
+
+ for (cc = 0; len; buf++, len--)
+ cc += *buf;
+
+ return (cc == cc_in) ? true : false;
+}
+
+static int axgbe_phy_sfp_read_eeprom(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ struct axgbe_sfp_eeprom sfp_eeprom;
+ uint8_t eeprom_addr;
+ int ret;
+
+ ret = axgbe_phy_sfp_get_mux(pdata);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error setting SFP MUX\n");
+ return ret;
+ }
+
+ /* Read the SFP serial ID eeprom */
+ eeprom_addr = 0;
+ ret = axgbe_phy_i2c_read(pdata, AXGBE_SFP_SERIAL_ID_ADDRESS,
+ &eeprom_addr, sizeof(eeprom_addr),
+ &sfp_eeprom, sizeof(sfp_eeprom));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error reading SFP EEPROM\n");
+ goto put;
+ }
+
+ /* Validate the contents read */
+ if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.base[AXGBE_SFP_BASE_CC],
+ sfp_eeprom.base,
+ sizeof(sfp_eeprom.base) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ if (!axgbe_phy_sfp_verify_eeprom(sfp_eeprom.extd[AXGBE_SFP_EXTD_CC],
+ sfp_eeprom.extd,
+ sizeof(sfp_eeprom.extd) - 1)) {
+ ret = -EINVAL;
+ goto put;
+ }
+
+ /* Check for an added or changed SFP */
+ if (memcmp(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom))) {
+ phy_data->sfp_changed = 1;
+ memcpy(&phy_data->sfp_eeprom, &sfp_eeprom, sizeof(sfp_eeprom));
+
+ if (sfp_eeprom.extd[AXGBE_SFP_EXTD_SFF_8472]) {
+ uint8_t diag_type;
+ diag_type = sfp_eeprom.extd[AXGBE_SFP_EXTD_DIAG];
+
+ if (!(diag_type & AXGBE_SFP_EXTD_DIAG_ADDR_CHANGE))
+ phy_data->sfp_diags = 1;
+ }
+ } else {
+ phy_data->sfp_changed = 0;
+ }
+
+put:
+ axgbe_phy_sfp_put_mux(pdata);
+
+ return ret;
+}
+
+static void axgbe_phy_sfp_signals(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int gpio_input;
+ u8 gpio_reg, gpio_ports[2];
+ int ret;
+
+ /* Read the input port registers */
+ gpio_reg = 0;
+ ret = axgbe_phy_i2c_read(pdata, phy_data->sfp_gpio_address,
+ &gpio_reg, sizeof(gpio_reg),
+ gpio_ports, sizeof(gpio_ports));
+ if (ret) {
+ PMD_DRV_LOG(ERR, "I2C error reading SFP GPIOs\n");
+ return;
+ }
+
+ gpio_input = (gpio_ports[1] << 8) | gpio_ports[0];
+
+ if (phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_MOD_ABSENT) {
+ /* No GPIO, just assume the module is present for now */
+ phy_data->sfp_mod_absent = 0;
+ } else {
+ if (!(gpio_input & (1 << phy_data->sfp_gpio_mod_absent)))
+ phy_data->sfp_mod_absent = 0;
+ }
+
+ if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_RX_LOS) &&
+ (gpio_input & (1 << phy_data->sfp_gpio_rx_los)))
+ phy_data->sfp_rx_los = 1;
+
+ if (!(phy_data->sfp_gpio_mask & AXGBE_GPIO_NO_TX_FAULT) &&
+ (gpio_input & (1 << phy_data->sfp_gpio_tx_fault)))
+ phy_data->sfp_tx_fault = 1;
+}
+
+static void axgbe_phy_sfp_mod_absent(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_phy_avail = 0;
+ memset(&phy_data->sfp_eeprom, 0, sizeof(phy_data->sfp_eeprom));
+}
+
+static void axgbe_phy_sfp_reset(struct axgbe_phy_data *phy_data)
+{
+ phy_data->sfp_rx_los = 0;
+ phy_data->sfp_tx_fault = 0;
+ phy_data->sfp_mod_absent = 1;
+ phy_data->sfp_diags = 0;
+ phy_data->sfp_base = AXGBE_SFP_BASE_UNKNOWN;
+ phy_data->sfp_cable = AXGBE_SFP_CABLE_UNKNOWN;
+ phy_data->sfp_speed = AXGBE_SFP_SPEED_UNKNOWN;
+}
+
+static void axgbe_phy_sfp_detect(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Reset the SFP signals and info */
+ axgbe_phy_sfp_reset(phy_data);
+
+ ret = axgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ /* Read the SFP signals and check for module presence */
+ axgbe_phy_sfp_signals(pdata);
+ if (phy_data->sfp_mod_absent) {
+ axgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ ret = axgbe_phy_sfp_read_eeprom(pdata);
+ if (ret) {
+ /* Treat any error as if there isn't an SFP plugged in */
+ axgbe_phy_sfp_reset(phy_data);
+ axgbe_phy_sfp_mod_absent(pdata);
+ goto put;
+ }
+
+ axgbe_phy_sfp_parse_eeprom(pdata);
+ axgbe_phy_sfp_external_phy(pdata);
+
+put:
+ axgbe_phy_sfp_phy_settings(pdata);
+ axgbe_phy_put_comm_ownership(pdata);
+}
+
+static void axgbe_phy_phydev_flowctrl(struct axgbe_port *pdata)
+{
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+}
+
+static enum axgbe_mode axgbe_phy_an73_redrv_outcome(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Use external PHY to determine flow control */
+ if (pdata->phy.pause_autoneg)
+ axgbe_phy_phydev_flowctrl(pdata);
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80) {
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ mode = AXGBE_MODE_KR;
+ break;
+ default:
+ mode = AXGBE_MODE_SFI;
+ break;
+ }
+ } else if (ad_reg & 0x20) {
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ mode = AXGBE_MODE_KX_1000;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ mode = AXGBE_MODE_X;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ mode = AXGBE_MODE_SGMII_1000;
+ break;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ default:
+ mode = AXGBE_MODE_X;
+ break;
+ }
+ break;
+ default:
+ mode = AXGBE_MODE_SGMII_1000;
+ break;
+ }
+ } else {
+ mode = AXGBE_MODE_UNKNOWN;
+ }
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static enum axgbe_mode axgbe_phy_an73_outcome(struct axgbe_port *pdata)
+{
+ enum axgbe_mode mode;
+ unsigned int ad_reg, lp_reg;
+
+ pdata->phy.lp_advertising |= ADVERTISED_Autoneg;
+ pdata->phy.lp_advertising |= ADVERTISED_Backplane;
+
+ /* Compare Advertisement and Link Partner register 1 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA);
+ if (lp_reg & 0x400)
+ pdata->phy.lp_advertising |= ADVERTISED_Pause;
+ if (lp_reg & 0x800)
+ pdata->phy.lp_advertising |= ADVERTISED_Asym_Pause;
+
+ if (pdata->phy.pause_autoneg) {
+ /* Set flow control based on auto-negotiation result */
+ pdata->phy.tx_pause = 0;
+ pdata->phy.rx_pause = 0;
+
+ if (ad_reg & lp_reg & 0x400) {
+ pdata->phy.tx_pause = 1;
+ pdata->phy.rx_pause = 1;
+ } else if (ad_reg & lp_reg & 0x800) {
+ if (ad_reg & 0x400)
+ pdata->phy.rx_pause = 1;
+ else if (lp_reg & 0x400)
+ pdata->phy.tx_pause = 1;
+ }
+ }
+
+ /* Compare Advertisement and Link Partner register 2 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 1);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 1);
+ if (lp_reg & 0x80)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseKR_Full;
+ if (lp_reg & 0x20)
+ pdata->phy.lp_advertising |= ADVERTISED_1000baseKX_Full;
+
+ ad_reg &= lp_reg;
+ if (ad_reg & 0x80)
+ mode = AXGBE_MODE_KR;
+ else if (ad_reg & 0x20)
+ mode = AXGBE_MODE_KX_1000;
+ else
+ mode = AXGBE_MODE_UNKNOWN;
+
+ /* Compare Advertisement and Link Partner register 3 */
+ ad_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_ADVERTISE + 2);
+ lp_reg = XMDIO_READ(pdata, MDIO_MMD_AN, MDIO_AN_LPA + 2);
+ if (lp_reg & 0xc000)
+ pdata->phy.lp_advertising |= ADVERTISED_10000baseR_FEC;
+
+ return mode;
+}
+
+static enum axgbe_mode axgbe_phy_an_outcome(struct axgbe_port *pdata)
+{
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ return axgbe_phy_an73_outcome(pdata);
+ case AXGBE_AN_MODE_CL73_REDRV:
+ return axgbe_phy_an73_redrv_outcome(pdata);
+ case AXGBE_AN_MODE_CL37:
+ case AXGBE_AN_MODE_CL37_SGMII:
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static unsigned int axgbe_phy_an_advertising(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int advertising;
+
+ /* Without a re-driver, just return current advertising */
+ if (!phy_data->redrv)
+ return pdata->phy.advertising;
+
+ /* With the KR re-driver we need to advertise a single speed */
+ advertising = pdata->phy.advertising;
+ advertising &= ~ADVERTISED_1000baseKX_Full;
+ advertising &= ~ADVERTISED_10000baseKR_Full;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_NBASE_T:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ PMD_DRV_LOG(ERR, "10GBASE_T mode is not supported\n");
+ break;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ advertising |= ADVERTISED_1000baseKX_Full;
+ break;
+ default:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ }
+ break;
+ default:
+ advertising |= ADVERTISED_10000baseKR_Full;
+ break;
+ }
+
+ return advertising;
+}
+
+static int axgbe_phy_an_config(struct axgbe_port *pdata __rte_unused)
+{
+ return 0;
+ /* Dummy API since there is no case to support
+ * external phy devices registred through kerenl apis
+ */
+}
+
+static enum axgbe_an_mode axgbe_phy_an_sfp_mode(struct axgbe_phy_data *phy_data)
+{
+ switch (phy_data->sfp_base) {
+ case AXGBE_SFP_BASE_1000_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_SFP_BASE_1000_SX:
+ case AXGBE_SFP_BASE_1000_LX:
+ case AXGBE_SFP_BASE_1000_CX:
+ return AXGBE_AN_MODE_CL37;
+ default:
+ return AXGBE_AN_MODE_NONE;
+ }
+}
+
+static enum axgbe_an_mode axgbe_phy_an_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* A KR re-driver will always require CL73 AN */
+ if (phy_data->redrv)
+ return AXGBE_AN_MODE_CL73_REDRV;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return AXGBE_AN_MODE_CL73;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return AXGBE_AN_MODE_NONE;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ return AXGBE_AN_MODE_CL37;
+ case AXGBE_PORT_MODE_NBASE_T:
+ return AXGBE_AN_MODE_CL37_SGMII;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return AXGBE_AN_MODE_CL73;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return AXGBE_AN_MODE_NONE;
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_an_sfp_mode(phy_data);
+ default:
+ return AXGBE_AN_MODE_NONE;
+ }
+}
+
+static int axgbe_phy_set_redrv_mode_mdio(struct axgbe_port *pdata,
+ enum axgbe_phy_redrv_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ u16 redrv_reg, redrv_val;
+
+ redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+ redrv_val = (u16)mode;
+
+ return pdata->hw_if.write_ext_mii_regs(pdata, phy_data->redrv_addr,
+ redrv_reg, redrv_val);
+}
+
+static int axgbe_phy_set_redrv_mode_i2c(struct axgbe_port *pdata,
+ enum axgbe_phy_redrv_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int redrv_reg;
+ int ret;
+
+ /* Calculate the register to write */
+ redrv_reg = AXGBE_PHY_REDRV_MODE_REG + (phy_data->redrv_lane * 0x1000);
+
+ ret = axgbe_phy_redrv_write(pdata, redrv_reg, mode);
+
+ return ret;
+}
+
+static void axgbe_phy_set_redrv_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_phy_redrv_mode mode;
+ int ret;
+
+ if (!phy_data->redrv)
+ return;
+
+ mode = AXGBE_PHY_REDRV_MODE_CX;
+ if ((phy_data->port_mode == AXGBE_PORT_MODE_SFP) &&
+ (phy_data->sfp_base != AXGBE_SFP_BASE_1000_CX) &&
+ (phy_data->sfp_base != AXGBE_SFP_BASE_10000_CR))
+ mode = AXGBE_PHY_REDRV_MODE_SR;
+
+ ret = axgbe_phy_get_comm_ownership(pdata);
+ if (ret)
+ return;
+
+ if (phy_data->redrv_if)
+ axgbe_phy_set_redrv_mode_i2c(pdata, mode);
+ else
+ axgbe_phy_set_redrv_mode_mdio(pdata, mode);
+
+ axgbe_phy_put_comm_ownership(pdata);
+}
+
+static void axgbe_phy_start_ratechange(struct axgbe_port *pdata)
+{
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ return;
+}
+
+static void axgbe_phy_complete_ratechange(struct axgbe_port *pdata)
+{
+ unsigned int wait;
+
+ /* Wait for command to complete */
+ wait = AXGBE_RATECHANGE_COUNT;
+ while (wait--) {
+ if (!XP_IOREAD_BITS(pdata, XP_DRIVER_INT_RO, STATUS))
+ return;
+
+ rte_delay_us(1500);
+ }
+}
+
+static void axgbe_phy_rrc(struct axgbe_port *pdata)
+{
+ unsigned int s0;
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* Receiver Reset Cycle */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 5);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+
+ axgbe_phy_complete_ratechange(pdata);
+}
+
+static void axgbe_phy_power_off(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, 0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
+}
+
+static void axgbe_phy_sfi_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ axgbe_phy_set_redrv_mode(pdata);
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* 10G/SFI */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 3);
+ if (phy_data->sfp_cable != AXGBE_SFP_CABLE_PASSIVE) {
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+ } else {
+ if (phy_data->sfp_cable_len <= 1)
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 1);
+ else if (phy_data->sfp_cable_len <= 3)
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 2);
+ else
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 3);
+ }
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_SFI;
+}
+
+static void axgbe_phy_kr_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int s0;
+
+ axgbe_phy_set_redrv_mode(pdata);
+
+ axgbe_phy_start_ratechange(pdata);
+
+ /* 10G/KR */
+ s0 = 0;
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, COMMAND, 4);
+ XP_SET_BITS(s0, XP_DRIVER_SCRATCH_0, SUB_COMMAND, 0);
+
+ /* Call FW to make the change */
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_0, s0);
+ XP_IOWRITE(pdata, XP_DRIVER_SCRATCH_1, 0);
+ XP_IOWRITE_BITS(pdata, XP_DRIVER_INT_REQ, REQUEST, 1);
+ axgbe_phy_complete_ratechange(pdata);
+ phy_data->cur_mode = AXGBE_MODE_KR;
+}
+
+static enum axgbe_mode axgbe_phy_cur_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ return phy_data->cur_mode;
+}
+
+static enum axgbe_mode axgbe_phy_switch_baset_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* No switching if not 10GBase-T */
+ if (phy_data->port_mode != AXGBE_PORT_MODE_10GBASE_T)
+ return axgbe_phy_cur_mode(pdata);
+
+ switch (axgbe_phy_cur_mode(pdata)) {
+ case AXGBE_MODE_SGMII_100:
+ case AXGBE_MODE_SGMII_1000:
+ return AXGBE_MODE_KR;
+ case AXGBE_MODE_KR:
+ default:
+ return AXGBE_MODE_SGMII_1000;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_switch_bp_2500_mode(struct axgbe_port *pdata
+ __rte_unused)
+{
+ return AXGBE_MODE_KX_2500;
+}
+
+static enum axgbe_mode axgbe_phy_switch_bp_mode(struct axgbe_port *pdata)
+{
+ /* If we are in KR switch to KX, and vice-versa */
+ switch (axgbe_phy_cur_mode(pdata)) {
+ case AXGBE_MODE_KX_1000:
+ return AXGBE_MODE_KR;
+ case AXGBE_MODE_KR:
+ default:
+ return AXGBE_MODE_KX_1000;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_switch_mode(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_switch_bp_mode(pdata);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_switch_bp_2500_mode(pdata);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_switch_baset_mode(pdata);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ case AXGBE_PORT_MODE_SFP:
+ /* No switching, so just return current mode */
+ return axgbe_phy_cur_mode(pdata);
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_basex_mode(struct axgbe_phy_data *phy_data
+ __rte_unused,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return AXGBE_MODE_X;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_baset_mode(struct axgbe_phy_data *phy_data
+ __rte_unused,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return AXGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ return AXGBE_MODE_SGMII_1000;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_sfp_mode(struct axgbe_phy_data *phy_data,
+ int speed)
+{
+ switch (speed) {
+ case SPEED_100:
+ return AXGBE_MODE_SGMII_100;
+ case SPEED_1000:
+ if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T)
+ return AXGBE_MODE_SGMII_1000;
+ else
+ return AXGBE_MODE_X;
+ case SPEED_10000:
+ case SPEED_UNKNOWN:
+ return AXGBE_MODE_SFI;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_bp_2500_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_2500:
+ return AXGBE_MODE_KX_2500;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_bp_mode(int speed)
+{
+ switch (speed) {
+ case SPEED_1000:
+ return AXGBE_MODE_KX_1000;
+ case SPEED_10000:
+ return AXGBE_MODE_KR;
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static enum axgbe_mode axgbe_phy_get_mode(struct axgbe_port *pdata,
+ int speed)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_get_bp_mode(speed);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_get_bp_2500_mode(speed);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_get_baset_mode(phy_data, speed);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return axgbe_phy_get_basex_mode(phy_data, speed);
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_get_sfp_mode(phy_data, speed);
+ default:
+ return AXGBE_MODE_UNKNOWN;
+ }
+}
+
+static void axgbe_phy_set_mode(struct axgbe_port *pdata, enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KR:
+ axgbe_phy_kr_mode(pdata);
+ break;
+ case AXGBE_MODE_SFI:
+ axgbe_phy_sfi_mode(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static bool axgbe_phy_check_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode, u32 advert)
+{
+ if (pdata->phy.autoneg == AUTONEG_ENABLE) {
+ if (pdata->phy.advertising & advert)
+ return true;
+ } else {
+ enum axgbe_mode cur_mode;
+
+ cur_mode = axgbe_phy_get_mode(pdata, pdata->phy.speed);
+ if (cur_mode == mode)
+ return true;
+ }
+
+ return false;
+}
+
+static bool axgbe_phy_use_basex_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_X:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_baset_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_SGMII_100:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_100baseT_Full);
+ case AXGBE_MODE_SGMII_1000:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_sfp_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (mode) {
+ case AXGBE_MODE_X:
+ if (phy_data->sfp_base == AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_SGMII_100:
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_100baseT_Full);
+ case AXGBE_MODE_SGMII_1000:
+ if (phy_data->sfp_base != AXGBE_SFP_BASE_1000_T)
+ return false;
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseT_Full);
+ case AXGBE_MODE_SFI:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseT_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_bp_2500_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_2500:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_2500baseX_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_bp_mode(struct axgbe_port *pdata,
+ enum axgbe_mode mode)
+{
+ switch (mode) {
+ case AXGBE_MODE_KX_1000:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_1000baseKX_Full);
+ case AXGBE_MODE_KR:
+ return axgbe_phy_check_mode(pdata, mode,
+ ADVERTISED_10000baseKR_Full);
+ default:
+ return false;
+ }
+}
+
+static bool axgbe_phy_use_mode(struct axgbe_port *pdata, enum axgbe_mode mode)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ return axgbe_phy_use_bp_mode(pdata, mode);
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ return axgbe_phy_use_bp_2500_mode(pdata, mode);
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ return axgbe_phy_use_baset_mode(pdata, mode);
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ return axgbe_phy_use_basex_mode(pdata, mode);
+ case AXGBE_PORT_MODE_SFP:
+ return axgbe_phy_use_sfp_mode(pdata, mode);
+ default:
+ return false;
+ }
+}
+
+static int axgbe_phy_link_status(struct axgbe_port *pdata, int *an_restart)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ *an_restart = 0;
+
+ if (phy_data->port_mode == AXGBE_PORT_MODE_SFP) {
+ /* Check SFP signals */
+ axgbe_phy_sfp_detect(pdata);
+
+ if (phy_data->sfp_changed) {
+ *an_restart = 1;
+ return 0;
+ }
+
+ if (phy_data->sfp_mod_absent || phy_data->sfp_rx_los)
+ return 0;
+ }
+
+ /* Link status is latched low, so read once to clear
+ * and then read again to get current state
+ */
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ reg = XMDIO_READ(pdata, MDIO_MMD_PCS, MDIO_STAT1);
+ if (reg & MDIO_STAT1_LSTATUS)
+ return 1;
+
+ /* No link, attempt a receiver reset cycle */
+ if (phy_data->rrc_count++) {
+ phy_data->rrc_count = 0;
+ axgbe_phy_rrc(pdata);
+ }
+
+ return 0;
+}
+
+static void axgbe_phy_sfp_gpio_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ reg = XP_IOREAD(pdata, XP_PROP_3);
+
+ phy_data->sfp_gpio_address = AXGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(reg, XP_PROP_3, GPIO_ADDR);
+
+ phy_data->sfp_gpio_mask = XP_GET_BITS(reg, XP_PROP_3, GPIO_MASK);
+
+ phy_data->sfp_gpio_rx_los = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_RX_LOS);
+ phy_data->sfp_gpio_tx_fault = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_TX_FAULT);
+ phy_data->sfp_gpio_mod_absent = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_MOD_ABS);
+ phy_data->sfp_gpio_rate_select = XP_GET_BITS(reg, XP_PROP_3,
+ GPIO_RATE_SELECT);
+}
+
+static void axgbe_phy_sfp_comm_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg, mux_addr_hi, mux_addr_lo;
+
+ reg = XP_IOREAD(pdata, XP_PROP_4);
+
+ mux_addr_hi = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_HI);
+ mux_addr_lo = XP_GET_BITS(reg, XP_PROP_4, MUX_ADDR_LO);
+ if (mux_addr_lo == AXGBE_SFP_DIRECT)
+ return;
+
+ phy_data->sfp_comm = AXGBE_SFP_COMM_PCA9545;
+ phy_data->sfp_mux_address = (mux_addr_hi << 2) + mux_addr_lo;
+ phy_data->sfp_mux_channel = XP_GET_BITS(reg, XP_PROP_4, MUX_CHAN);
+}
+
+static void axgbe_phy_sfp_setup(struct axgbe_port *pdata)
+{
+ axgbe_phy_sfp_comm_setup(pdata);
+ axgbe_phy_sfp_gpio_setup(pdata);
+}
+
+static bool axgbe_phy_redrv_error(struct axgbe_phy_data *phy_data)
+{
+ if (!phy_data->redrv)
+ return false;
+
+ if (phy_data->redrv_if >= AXGBE_PHY_REDRV_IF_MAX)
+ return true;
+
+ switch (phy_data->redrv_model) {
+ case AXGBE_PHY_REDRV_MODEL_4223:
+ if (phy_data->redrv_lane > 3)
+ return true;
+ break;
+ case AXGBE_PHY_REDRV_MODEL_4227:
+ if (phy_data->redrv_lane > 1)
+ return true;
+ break;
+ default:
+ return true;
+ }
+
+ return false;
+}
+
+static int axgbe_phy_mdio_reset_setup(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ unsigned int reg;
+
+ if (phy_data->conn_type != AXGBE_CONN_TYPE_MDIO)
+ return 0;
+ reg = XP_IOREAD(pdata, XP_PROP_3);
+ phy_data->mdio_reset = XP_GET_BITS(reg, XP_PROP_3, MDIO_RESET);
+ switch (phy_data->mdio_reset) {
+ case AXGBE_MDIO_RESET_NONE:
+ case AXGBE_MDIO_RESET_I2C_GPIO:
+ case AXGBE_MDIO_RESET_INT_GPIO:
+ break;
+ default:
+ PMD_DRV_LOG(ERR, "unsupported MDIO reset (%#x)\n",
+ phy_data->mdio_reset);
+ return -EINVAL;
+ }
+ if (phy_data->mdio_reset == AXGBE_MDIO_RESET_I2C_GPIO) {
+ phy_data->mdio_reset_addr = AXGBE_GPIO_ADDRESS_PCA9555 +
+ XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_I2C_ADDR);
+ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_I2C_GPIO);
+ } else if (phy_data->mdio_reset == AXGBE_MDIO_RESET_INT_GPIO) {
+ phy_data->mdio_reset_gpio = XP_GET_BITS(reg, XP_PROP_3,
+ MDIO_RESET_INT_GPIO);
+ }
+
+ return 0;
+}
+
+static bool axgbe_phy_port_mode_mismatch(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_X:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_NBASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_T:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ case AXGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ if ((phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) ||
+ (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000))
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool axgbe_phy_conn_type_mismatch(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_BACKPLANE:
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_BACKPLANE)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_1000BASE_T:
+ case AXGBE_PORT_MODE_1000BASE_X:
+ case AXGBE_PORT_MODE_NBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_T:
+ case AXGBE_PORT_MODE_10GBASE_R:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_MDIO)
+ return false;
+ break;
+ case AXGBE_PORT_MODE_SFP:
+ if (phy_data->conn_type == AXGBE_CONN_TYPE_SFP)
+ return false;
+ break;
+ default:
+ break;
+ }
+
+ return true;
+}
+
+static bool axgbe_phy_port_enabled(struct axgbe_port *pdata)
+{
+ unsigned int reg;
+
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ if (!XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS))
+ return false;
+ if (!XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE))
+ return false;
+
+ return true;
+}
+
+static void axgbe_phy_cdr_track(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->vdata->an_cdr_workaround)
+ return;
+
+ if (!phy_data->phy_cdr_notrack)
+ return;
+
+ rte_delay_us(phy_data->phy_cdr_delay + 400);
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ AXGBE_PMA_CDR_TRACK_EN_MASK,
+ AXGBE_PMA_CDR_TRACK_EN_ON);
+
+ phy_data->phy_cdr_notrack = 0;
+}
+
+static void axgbe_phy_cdr_notrack(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ if (!pdata->vdata->an_cdr_workaround)
+ return;
+
+ if (phy_data->phy_cdr_notrack)
+ return;
+
+ XMDIO_WRITE_BITS(pdata, MDIO_MMD_PMAPMD, MDIO_VEND2_PMA_CDR_CONTROL,
+ AXGBE_PMA_CDR_TRACK_EN_MASK,
+ AXGBE_PMA_CDR_TRACK_EN_OFF);
+
+ axgbe_phy_rrc(pdata);
+
+ phy_data->phy_cdr_notrack = 1;
+}
+
+static void axgbe_phy_kr_training_post(struct axgbe_port *pdata)
+{
+ if (!pdata->cdr_track_early)
+ axgbe_phy_cdr_track(pdata);
+}
+
+static void axgbe_phy_kr_training_pre(struct axgbe_port *pdata)
+{
+ if (pdata->cdr_track_early)
+ axgbe_phy_cdr_track(pdata);
+}
+
+static void axgbe_phy_an_post(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != AXGBE_MODE_KR)
+ break;
+
+ axgbe_phy_cdr_track(pdata);
+
+ switch (pdata->an_result) {
+ case AXGBE_AN_READY:
+ case AXGBE_AN_COMPLETE:
+ break;
+ default:
+ if (phy_data->phy_cdr_delay < AXGBE_CDR_DELAY_MAX)
+ phy_data->phy_cdr_delay += AXGBE_CDR_DELAY_INC;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_an_pre(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ switch (pdata->an_mode) {
+ case AXGBE_AN_MODE_CL73:
+ case AXGBE_AN_MODE_CL73_REDRV:
+ if (phy_data->cur_mode != AXGBE_MODE_KR)
+ break;
+
+ axgbe_phy_cdr_notrack(pdata);
+ break;
+ default:
+ break;
+ }
+}
+
+static void axgbe_phy_stop(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+
+ /* Reset SFP data */
+ axgbe_phy_sfp_reset(phy_data);
+ axgbe_phy_sfp_mod_absent(pdata);
+
+ /* Reset CDR support */
+ axgbe_phy_cdr_track(pdata);
+
+ /* Power off the PHY */
+ axgbe_phy_power_off(pdata);
+
+ /* Stop the I2C controller */
+ pdata->i2c_if.i2c_stop(pdata);
+}
+
+static int axgbe_phy_start(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ int ret;
+
+ /* Start the I2C controller */
+ ret = pdata->i2c_if.i2c_start(pdata);
+ if (ret)
+ return ret;
+
+ /* Start in highest supported mode */
+ axgbe_phy_set_mode(pdata, phy_data->start_mode);
+
+ /* Reset CDR support */
+ axgbe_phy_cdr_track(pdata);
+
+ /* After starting the I2C controller, we can check for an SFP */
+ switch (phy_data->port_mode) {
+ case AXGBE_PORT_MODE_SFP:
+ axgbe_phy_sfp_detect(pdata);
+ break;
+ default:
+ break;
+ }
+
+ return ret;
+}
+
+static int axgbe_phy_reset(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data = pdata->phy_data;
+ enum axgbe_mode cur_mode;
+
+ /* Reset by power cycling the PHY */
+ cur_mode = phy_data->cur_mode;
+ axgbe_phy_power_off(pdata);
+ /* First time reset is done with passed unknown mode*/
+ axgbe_phy_set_mode(pdata, cur_mode);
+ return 0;
+}
+
+static int axgbe_phy_init(struct axgbe_port *pdata)
+{
+ struct axgbe_phy_data *phy_data;
+ unsigned int reg;
+ int ret;
+
+ /* Check if enabled */
+ if (!axgbe_phy_port_enabled(pdata)) {
+ PMD_DRV_LOG(ERR, "device is not enabled\n");
+ return -ENODEV;
+ }
+
+ /* Initialize the I2C controller */
+ ret = pdata->i2c_if.i2c_init(pdata);
+ if (ret)
+ return ret;
+
+ phy_data = rte_zmalloc("phy_data memory", sizeof(*phy_data), 0);
+ if (!phy_data) {
+ PMD_DRV_LOG(ERR, "phy_data allocation failed\n");
+ return -ENOMEM;
+ }
+ pdata->phy_data = phy_data;
+
+ reg = XP_IOREAD(pdata, XP_PROP_0);
+ phy_data->port_mode = XP_GET_BITS(reg, XP_PROP_0, PORT_MODE);
+ phy_data->port_id = XP_GET_BITS(reg, XP_PROP_0, PORT_ID);
+ phy_data->port_speeds = XP_GET_BITS(reg, XP_PROP_0, PORT_SPEEDS);
+ phy_data->conn_type = XP_GET_BITS(reg, XP_PROP_0, CONN_TYPE);
+ phy_data->mdio_addr = XP_GET_BITS(reg, XP_PROP_0, MDIO_ADDR);
+
+ reg = XP_IOREAD(pdata, XP_PROP_4);
+ phy_data->redrv = XP_GET_BITS(reg, XP_PROP_4, REDRV_PRESENT);
+ phy_data->redrv_if = XP_GET_BITS(reg, XP_PROP_4, REDRV_IF);
+ phy_data->redrv_addr = XP_GET_BITS(reg, XP_PROP_4, REDRV_ADDR);
+ phy_data->redrv_lane = XP_GET_BITS(reg, XP_PROP_4, REDRV_LANE);
+ phy_data->redrv_model = XP_GET_BITS(reg, XP_PROP_4, REDRV_MODEL);
+
+ /* Validate the connection requested */
+ if (axgbe_phy_conn_type_mismatch(pdata)) {
+ PMD_DRV_LOG(ERR, "phy mode/connection mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->conn_type);
+ return -EINVAL;
+ }
+
+ /* Validate the mode requested */
+ if (axgbe_phy_port_mode_mismatch(pdata)) {
+ PMD_DRV_LOG(ERR, "phy mode/speed mismatch (%#x/%#x)\n",
+ phy_data->port_mode, phy_data->port_speeds);
+ return -EINVAL;
+ }
+
+ /* Check for and validate MDIO reset support */
+ ret = axgbe_phy_mdio_reset_setup(pdata);
+ if (ret)
+ return ret;
+
+ /* Validate the re-driver information */
+ if (axgbe_phy_redrv_error(phy_data)) {
+ PMD_DRV_LOG(ERR, "phy re-driver settings error\n");
+ return -EINVAL;
+ }
+ pdata->kr_redrv = phy_data->redrv;
+
+ /* Indicate current mode is unknown */
+ phy_data->cur_mode = AXGBE_MODE_UNKNOWN;
+
+ /* Initialize supported features */
+ pdata->phy.supported = 0;
+
+ switch (phy_data->port_mode) {
+ /* Backplane support */
+ case AXGBE_PORT_MODE_BACKPLANE:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseKX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseKR_Full;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |=
+ SUPPORTED_10000baseR_FEC;
+ phy_data->start_mode = AXGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+ case AXGBE_PORT_MODE_BACKPLANE_2500:
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_Backplane;
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_2500;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* MDIO 1GBase-T support */
+ case AXGBE_PORT_MODE_1000BASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO Base-X support */
+ case AXGBE_PORT_MODE_1000BASE_X:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_X;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+ break;
+
+ /* MDIO NBase-T support */
+ case AXGBE_PORT_MODE_NBASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_2500) {
+ pdata->phy.supported |= SUPPORTED_2500baseX_Full;
+ phy_data->start_mode = AXGBE_MODE_KX_2500;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL45;
+ break;
+
+ /* 10GBase-T support */
+ case AXGBE_PORT_MODE_10GBASE_T:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_KR;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* 10GBase-R support */
+ case AXGBE_PORT_MODE_10GBASE_R:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |= SUPPORTED_10000baseR_FEC;
+ phy_data->start_mode = AXGBE_MODE_SFI;
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_NONE;
+ break;
+
+ /* SFP support */
+ case AXGBE_PORT_MODE_SFP:
+ pdata->phy.supported |= SUPPORTED_Autoneg;
+ pdata->phy.supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
+ pdata->phy.supported |= SUPPORTED_TP;
+ pdata->phy.supported |= SUPPORTED_FIBRE;
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_100) {
+ pdata->phy.supported |= SUPPORTED_100baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_100;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_1000) {
+ pdata->phy.supported |= SUPPORTED_1000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SGMII_1000;
+ }
+ if (phy_data->port_speeds & AXGBE_PHY_PORT_SPEED_10000) {
+ pdata->phy.supported |= SUPPORTED_10000baseT_Full;
+ phy_data->start_mode = AXGBE_MODE_SFI;
+ if (pdata->fec_ability & MDIO_PMA_10GBR_FECABLE_ABLE)
+ pdata->phy.supported |=
+ SUPPORTED_10000baseR_FEC;
+ }
+
+ phy_data->phydev_mode = AXGBE_MDIO_MODE_CL22;
+
+ axgbe_phy_sfp_setup(pdata);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if ((phy_data->conn_type & AXGBE_CONN_TYPE_MDIO) &&
+ (phy_data->phydev_mode != AXGBE_MDIO_MODE_NONE)) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->mdio_addr,
+ phy_data->phydev_mode);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "mdio port/clause not compatible (%d/%u)\n",
+ phy_data->mdio_addr, phy_data->phydev_mode);
+ return -EINVAL;
+ }
+ }
+
+ if (phy_data->redrv && !phy_data->redrv_if) {
+ ret = pdata->hw_if.set_ext_mii_mode(pdata, phy_data->redrv_addr,
+ AXGBE_MDIO_MODE_CL22);
+ if (ret) {
+ PMD_DRV_LOG(ERR, "redriver mdio port not compatible (%u)\n",
+ phy_data->redrv_addr);
+ return -EINVAL;
+ }
+ }
+
+ phy_data->phy_cdr_delay = AXGBE_CDR_DELAY_INIT;
+ return 0;
+}
+void axgbe_init_function_ptrs_phy_v2(struct axgbe_phy_if *phy_if)
+{
+ struct axgbe_phy_impl_if *phy_impl = &phy_if->phy_impl;
+
+ phy_impl->init = axgbe_phy_init;
+ phy_impl->reset = axgbe_phy_reset;
+ phy_impl->start = axgbe_phy_start;
+ phy_impl->stop = axgbe_phy_stop;
+ phy_impl->link_status = axgbe_phy_link_status;
+ phy_impl->use_mode = axgbe_phy_use_mode;
+ phy_impl->set_mode = axgbe_phy_set_mode;
+ phy_impl->get_mode = axgbe_phy_get_mode;
+ phy_impl->switch_mode = axgbe_phy_switch_mode;
+ phy_impl->cur_mode = axgbe_phy_cur_mode;
+ phy_impl->an_mode = axgbe_phy_an_mode;
+ phy_impl->an_config = axgbe_phy_an_config;
+ phy_impl->an_advertising = axgbe_phy_an_advertising;
+ phy_impl->an_outcome = axgbe_phy_an_outcome;
+
+ phy_impl->an_pre = axgbe_phy_an_pre;
+ phy_impl->an_post = axgbe_phy_an_post;
+
+ phy_impl->kr_training_pre = axgbe_phy_kr_training_pre;
+ phy_impl->kr_training_post = axgbe_phy_kr_training_post;
+}
diff --git a/drivers/net/axgbe/axgbe_rxtx.c b/drivers/net/axgbe/axgbe_rxtx.c
new file mode 100644
index 00000000..c5fd5f41
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_rxtx.c
@@ -0,0 +1,674 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_rxtx.h"
+#include "axgbe_phy.h"
+
+#include <rte_time.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+static void
+axgbe_rx_queue_release(struct axgbe_rx_queue *rx_queue)
+{
+ uint16_t i;
+ struct rte_mbuf **sw_ring;
+
+ if (rx_queue) {
+ sw_ring = rx_queue->sw_ring;
+ if (sw_ring) {
+ for (i = 0; i < rx_queue->nb_desc; i++) {
+ if (sw_ring[i])
+ rte_pktmbuf_free(sw_ring[i]);
+ }
+ rte_free(sw_ring);
+ }
+ rte_free(rx_queue);
+ }
+}
+
+void axgbe_dev_rx_queue_release(void *rxq)
+{
+ axgbe_rx_queue_release(rxq);
+}
+
+int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mp)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint32_t size;
+ const struct rte_memzone *dma;
+ struct axgbe_rx_queue *rxq;
+ uint32_t rx_desc = nb_desc;
+ struct axgbe_port *pdata = dev->data->dev_private;
+
+ /*
+ * validate Rx descriptors count
+ * should be power of 2 and less than h/w supported
+ */
+ if ((!rte_is_power_of_2(rx_desc)) ||
+ rx_desc > pdata->rx_desc_count)
+ return -EINVAL;
+ /* First allocate the rx queue data structure */
+ rxq = rte_zmalloc_socket("ethdev RX queue",
+ sizeof(struct axgbe_rx_queue),
+ RTE_CACHE_LINE_SIZE, socket_id);
+ if (!rxq) {
+ PMD_INIT_LOG(ERR, "rte_zmalloc for rxq failed!");
+ return -ENOMEM;
+ }
+
+ rxq->cur = 0;
+ rxq->dirty = 0;
+ rxq->pdata = pdata;
+ rxq->mb_pool = mp;
+ rxq->queue_id = queue_idx;
+ rxq->port_id = dev->data->port_id;
+ rxq->nb_desc = rx_desc;
+ rxq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * rxq->queue_id));
+ rxq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)rxq->dma_regs +
+ DMA_CH_RDTR_LO);
+ if (rte_eth_dev_must_keep_crc(dev->data->dev_conf.rxmode.offloads))
+ rxq->crc_len = ETHER_CRC_LEN;
+ else
+ rxq->crc_len = 0;
+
+ /* CRC strip in AXGBE supports per port not per queue */
+ pdata->crc_strip_enable = (rxq->crc_len == 0) ? 1 : 0;
+ rxq->free_thresh = rx_conf->rx_free_thresh ?
+ rx_conf->rx_free_thresh : AXGBE_RX_FREE_THRESH;
+ if (rxq->free_thresh > rxq->nb_desc)
+ rxq->free_thresh = rxq->nb_desc >> 3;
+
+ /* Allocate RX ring hardware descriptors */
+ size = rxq->nb_desc * sizeof(union axgbe_rx_desc);
+ dma = rte_eth_dma_zone_reserve(dev, "rx_ring", queue_idx, size, 128,
+ socket_id);
+ if (!dma) {
+ PMD_DRV_LOG(ERR, "ring_dma_zone_reserve for rx_ring failed\n");
+ axgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ rxq->ring_phys_addr = (uint64_t)dma->phys_addr;
+ rxq->desc = (volatile union axgbe_rx_desc *)dma->addr;
+ memset((void *)rxq->desc, 0, size);
+ /* Allocate software ring */
+ size = rxq->nb_desc * sizeof(struct rte_mbuf *);
+ rxq->sw_ring = rte_zmalloc_socket("sw_ring", size,
+ RTE_CACHE_LINE_SIZE,
+ socket_id);
+ if (!rxq->sw_ring) {
+ PMD_DRV_LOG(ERR, "rte_zmalloc for sw_ring failed\n");
+ axgbe_rx_queue_release(rxq);
+ return -ENOMEM;
+ }
+ dev->data->rx_queues[queue_idx] = rxq;
+ if (!pdata->rx_queues)
+ pdata->rx_queues = dev->data->rx_queues;
+
+ return 0;
+}
+
+static void axgbe_prepare_rx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int rx_status;
+ unsigned long rx_timeout;
+
+ /* The Rx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Rx queue to empty the Rx fifo. Don't
+ * wait forever though...
+ */
+ rx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+
+ while (time_before(rte_get_timer_cycles(), rx_timeout)) {
+ rx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
+ if ((AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
+ (AXGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), rx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Rx queue %u to empty\n",
+ queue);
+}
+
+void axgbe_dev_disable_rx(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ /* Disable MAC Rx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
+
+ /* Prepare for Rx DMA channel stop */
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ axgbe_prepare_rx_stop(pdata, i);
+ }
+ /* Disable each Rx queue */
+ AXGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ /* Disable Rx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 0);
+ }
+}
+
+void axgbe_dev_enable_rx(struct rte_eth_dev *dev)
+{
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+ unsigned int reg_val = 0;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ /* Enable Rx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(rxq, DMA_CH_RCR, SR, 1);
+ }
+
+ reg_val = 0;
+ for (i = 0; i < pdata->rx_q_count; i++)
+ reg_val |= (0x02 << (i << 1));
+ AXGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
+
+ /* Enable MAC Rx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
+ /* Frame is forwarded after stripping CRC to application*/
+ if (pdata->crc_strip_enable) {
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
+ }
+ AXGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
+}
+
+/* Rx function one to one refresh */
+uint16_t
+axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint16_t nb_rx = 0;
+ struct axgbe_rx_queue *rxq = rx_queue;
+ volatile union axgbe_rx_desc *desc;
+ uint64_t old_dirty = rxq->dirty;
+ struct rte_mbuf *mbuf, *tmbuf;
+ unsigned int err;
+ uint32_t error_status;
+ uint16_t idx, pidx, pkt_len;
+
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->cur);
+ while (nb_rx < nb_pkts) {
+ if (unlikely(idx == rxq->nb_desc))
+ idx = 0;
+
+ desc = &rxq->desc[idx];
+
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, OWN))
+ break;
+ tmbuf = rte_mbuf_raw_alloc(rxq->mb_pool);
+ if (unlikely(!tmbuf)) {
+ PMD_DRV_LOG(ERR, "RX mbuf alloc failed port_id = %u"
+ " queue_id = %u\n",
+ (unsigned int)rxq->port_id,
+ (unsigned int)rxq->queue_id);
+ rte_eth_devices[
+ rxq->port_id].data->rx_mbuf_alloc_failed++;
+ break;
+ }
+ pidx = idx + 1;
+ if (unlikely(pidx == rxq->nb_desc))
+ pidx = 0;
+
+ rte_prefetch0(rxq->sw_ring[pidx]);
+ if ((pidx & 0x3) == 0) {
+ rte_prefetch0(&rxq->desc[pidx]);
+ rte_prefetch0(&rxq->sw_ring[pidx]);
+ }
+
+ mbuf = rxq->sw_ring[idx];
+ /* Check for any errors and free mbuf*/
+ err = AXGMAC_GET_BITS_LE(desc->write.desc3,
+ RX_NORMAL_DESC3, ES);
+ error_status = 0;
+ if (unlikely(err)) {
+ error_status = desc->write.desc3 & AXGBE_ERR_STATUS;
+ if ((error_status != AXGBE_L3_CSUM_ERR) &&
+ (error_status != AXGBE_L4_CSUM_ERR)) {
+ rxq->errors++;
+ rte_pktmbuf_free(mbuf);
+ goto err_set;
+ }
+ }
+ if (rxq->pdata->rx_csum_enable) {
+ mbuf->ol_flags = 0;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_GOOD;
+ if (unlikely(error_status == AXGBE_L3_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_IP_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_IP_CKSUM_BAD;
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_UNKNOWN;
+ } else if (
+ unlikely(error_status == AXGBE_L4_CSUM_ERR)) {
+ mbuf->ol_flags &= ~PKT_RX_L4_CKSUM_GOOD;
+ mbuf->ol_flags |= PKT_RX_L4_CKSUM_BAD;
+ }
+ }
+ rte_prefetch1(rte_pktmbuf_mtod(mbuf, void *));
+ /* Get the RSS hash */
+ if (AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3, RSV))
+ mbuf->hash.rss = rte_le_to_cpu_32(desc->write.desc1);
+ pkt_len = AXGMAC_GET_BITS_LE(desc->write.desc3, RX_NORMAL_DESC3,
+ PL) - rxq->crc_len;
+ /* Mbuf populate */
+ mbuf->next = NULL;
+ mbuf->data_off = RTE_PKTMBUF_HEADROOM;
+ mbuf->nb_segs = 1;
+ mbuf->port = rxq->port_id;
+ mbuf->pkt_len = pkt_len;
+ mbuf->data_len = pkt_len;
+ rxq->bytes += pkt_len;
+ rx_pkts[nb_rx++] = mbuf;
+err_set:
+ rxq->cur++;
+ rxq->sw_ring[idx++] = tmbuf;
+ desc->read.baddr =
+ rte_cpu_to_le_64(rte_mbuf_data_iova_default(tmbuf));
+ memset((void *)(&desc->read.desc2), 0, 8);
+ AXGMAC_SET_BITS_LE(desc->read.desc3, RX_NORMAL_DESC3, OWN, 1);
+ rxq->dirty++;
+ }
+ rxq->pkts += nb_rx;
+ if (rxq->dirty != old_dirty) {
+ rte_wmb();
+ idx = AXGBE_GET_DESC_IDX(rxq, rxq->dirty - 1);
+ AXGMAC_DMA_IOWRITE(rxq, DMA_CH_RDTR_LO,
+ low32_value(rxq->ring_phys_addr +
+ (idx * sizeof(union axgbe_rx_desc))));
+ }
+
+ return nb_rx;
+}
+
+/* Tx Apis */
+static void axgbe_tx_queue_release(struct axgbe_tx_queue *tx_queue)
+{
+ uint16_t i;
+ struct rte_mbuf **sw_ring;
+
+ if (tx_queue) {
+ sw_ring = tx_queue->sw_ring;
+ if (sw_ring) {
+ for (i = 0; i < tx_queue->nb_desc; i++) {
+ if (sw_ring[i])
+ rte_pktmbuf_free(sw_ring[i]);
+ }
+ rte_free(sw_ring);
+ }
+ rte_free(tx_queue);
+ }
+}
+
+void axgbe_dev_tx_queue_release(void *txq)
+{
+ axgbe_tx_queue_release(txq);
+}
+
+int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
+ uint16_t nb_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint32_t tx_desc;
+ struct axgbe_port *pdata;
+ struct axgbe_tx_queue *txq;
+ unsigned int tsize;
+ const struct rte_memzone *tz;
+
+ tx_desc = nb_desc;
+ pdata = (struct axgbe_port *)dev->data->dev_private;
+
+ /*
+ * validate tx descriptors count
+ * should be power of 2 and less than h/w supported
+ */
+ if ((!rte_is_power_of_2(tx_desc)) ||
+ tx_desc > pdata->tx_desc_count ||
+ tx_desc < AXGBE_MIN_RING_DESC)
+ return -EINVAL;
+
+ /* First allocate the tx queue data structure */
+ txq = rte_zmalloc("ethdev TX queue", sizeof(struct axgbe_tx_queue),
+ RTE_CACHE_LINE_SIZE);
+ if (!txq)
+ return -ENOMEM;
+ txq->pdata = pdata;
+
+ txq->nb_desc = tx_desc;
+ txq->free_thresh = tx_conf->tx_free_thresh ?
+ tx_conf->tx_free_thresh : AXGBE_TX_FREE_THRESH;
+ if (txq->free_thresh > txq->nb_desc)
+ txq->free_thresh = (txq->nb_desc >> 1);
+ txq->free_batch_cnt = txq->free_thresh;
+
+ /* In vector_tx path threshold should be multiple of queue_size*/
+ if (txq->nb_desc % txq->free_thresh != 0)
+ txq->vector_disable = 1;
+
+ if (tx_conf->offloads != 0)
+ txq->vector_disable = 1;
+
+ /* Allocate TX ring hardware descriptors */
+ tsize = txq->nb_desc * sizeof(struct axgbe_tx_desc);
+ tz = rte_eth_dma_zone_reserve(dev, "tx_ring", queue_idx,
+ tsize, AXGBE_DESC_ALIGN, socket_id);
+ if (!tz) {
+ axgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ memset(tz->addr, 0, tsize);
+ txq->ring_phys_addr = (uint64_t)tz->phys_addr;
+ txq->desc = tz->addr;
+ txq->queue_id = queue_idx;
+ txq->port_id = dev->data->port_id;
+ txq->dma_regs = (void *)((uint8_t *)pdata->xgmac_regs + DMA_CH_BASE +
+ (DMA_CH_INC * txq->queue_id));
+ txq->dma_tail_reg = (volatile uint32_t *)((uint8_t *)txq->dma_regs +
+ DMA_CH_TDTR_LO);
+ txq->cur = 0;
+ txq->dirty = 0;
+ txq->nb_desc_free = txq->nb_desc;
+ /* Allocate software ring */
+ tsize = txq->nb_desc * sizeof(struct rte_mbuf *);
+ txq->sw_ring = rte_zmalloc("tx_sw_ring", tsize,
+ RTE_CACHE_LINE_SIZE);
+ if (!txq->sw_ring) {
+ axgbe_tx_queue_release(txq);
+ return -ENOMEM;
+ }
+ dev->data->tx_queues[queue_idx] = txq;
+ if (!pdata->tx_queues)
+ pdata->tx_queues = dev->data->tx_queues;
+
+ if (txq->vector_disable)
+ dev->tx_pkt_burst = &axgbe_xmit_pkts;
+ else
+#ifdef RTE_ARCH_X86
+ dev->tx_pkt_burst = &axgbe_xmit_pkts_vec;
+#else
+ dev->tx_pkt_burst = &axgbe_xmit_pkts;
+#endif
+
+ return 0;
+}
+
+static void axgbe_txq_prepare_tx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * packets. Wait for the Tx queue to empty the Tx fifo. Don't
+ * wait forever though...
+ */
+ tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+ while (time_before(rte_get_timer_cycles(), tx_timeout)) {
+ tx_status = AXGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
+ if ((AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
+ (AXGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), tx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Tx queue %u to empty\n",
+ queue);
+}
+
+static void axgbe_prepare_tx_stop(struct axgbe_port *pdata,
+ unsigned int queue)
+{
+ unsigned int tx_dsr, tx_pos, tx_qidx;
+ unsigned int tx_status;
+ unsigned long tx_timeout;
+
+ if (AXGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
+ return axgbe_txq_prepare_tx_stop(pdata, queue);
+
+ /* Calculate the status register to read and the position within */
+ if (queue < DMA_DSRX_FIRST_QUEUE) {
+ tx_dsr = DMA_DSR0;
+ tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
+ } else {
+ tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
+
+ tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
+ tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
+ DMA_DSRX_TPS_START;
+ }
+
+ /* The Tx engine cannot be stopped if it is actively processing
+ * descriptors. Wait for the Tx engine to enter the stopped or
+ * suspended state. Don't wait forever though...
+ */
+ tx_timeout = rte_get_timer_cycles() + (AXGBE_DMA_STOP_TIMEOUT *
+ rte_get_timer_hz());
+ while (time_before(rte_get_timer_cycles(), tx_timeout)) {
+ tx_status = AXGMAC_IOREAD(pdata, tx_dsr);
+ tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
+ if ((tx_status == DMA_TPS_STOPPED) ||
+ (tx_status == DMA_TPS_SUSPENDED))
+ break;
+
+ rte_delay_us(900);
+ }
+
+ if (!time_before(rte_get_timer_cycles(), tx_timeout))
+ PMD_DRV_LOG(ERR,
+ "timed out waiting for Tx DMA channel %u to stop\n",
+ queue);
+}
+
+void axgbe_dev_disable_tx(struct rte_eth_dev *dev)
+{
+ struct axgbe_tx_queue *txq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ /* Prepare for stopping DMA channel */
+ for (i = 0; i < pdata->tx_q_count; i++) {
+ txq = dev->data->tx_queues[i];
+ axgbe_prepare_tx_stop(pdata, i);
+ }
+ /* Disable MAC Tx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
+ /* Disable each Tx queue*/
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ 0);
+ /* Disable each Tx DMA channel */
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 0);
+ }
+}
+
+void axgbe_dev_enable_tx(struct rte_eth_dev *dev)
+{
+ struct axgbe_tx_queue *txq;
+ struct axgbe_port *pdata = dev->data->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ /* Enable Tx DMA channel */
+ AXGMAC_DMA_IOWRITE_BITS(txq, DMA_CH_TCR, ST, 1);
+ }
+ /* Enable Tx queue*/
+ for (i = 0; i < pdata->tx_q_count; i++)
+ AXGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
+ MTL_Q_ENABLED);
+ /* Enable MAC Tx */
+ AXGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
+}
+
+/* Free Tx conformed mbufs */
+static void axgbe_xmit_cleanup(struct axgbe_tx_queue *txq)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty);
+ while (txq->cur != txq->dirty) {
+ if (unlikely(idx == txq->nb_desc))
+ idx = 0;
+ desc = &txq->desc[idx];
+ /* Check for ownership */
+ if (AXGMAC_GET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN))
+ return;
+ memset((void *)&desc->desc2, 0, 8);
+ /* Free mbuf */
+ rte_pktmbuf_free(txq->sw_ring[idx]);
+ txq->sw_ring[idx++] = NULL;
+ txq->dirty++;
+ }
+}
+
+/* Tx Descriptor formation
+ * Considering each mbuf requires one desc
+ * mbuf is linear
+ */
+static int axgbe_xmit_hw(struct axgbe_tx_queue *txq,
+ struct rte_mbuf *mbuf)
+{
+ volatile struct axgbe_tx_desc *desc;
+ uint16_t idx;
+ uint64_t mask;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ desc = &txq->desc[idx];
+
+ /* Update buffer address and length */
+ desc->baddr = rte_mbuf_data_iova(mbuf);
+ AXGMAC_SET_BITS_LE(desc->desc2, TX_NORMAL_DESC2, HL_B1L,
+ mbuf->pkt_len);
+ /* Total msg length to transmit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FL,
+ mbuf->pkt_len);
+ /* Mark it as First and Last Descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, FD, 1);
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, LD, 1);
+ /* Mark it as a NORMAL descriptor */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CTXT, 0);
+ /* configure h/w Offload */
+ mask = mbuf->ol_flags & PKT_TX_L4_MASK;
+ if ((mask == PKT_TX_TCP_CKSUM) || (mask == PKT_TX_UDP_CKSUM))
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x3);
+ else if (mbuf->ol_flags & PKT_TX_IP_CKSUM)
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, CIC, 0x1);
+ rte_wmb();
+
+ /* Set OWN bit */
+ AXGMAC_SET_BITS_LE(desc->desc3, TX_NORMAL_DESC3, OWN, 1);
+ rte_wmb();
+
+ /* Save mbuf */
+ txq->sw_ring[idx] = mbuf;
+ /* Update current index*/
+ txq->cur++;
+ /* Update stats */
+ txq->bytes += mbuf->pkt_len;
+
+ return 0;
+}
+
+/* Eal supported tx wrapper*/
+uint16_t
+axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ if (unlikely(nb_pkts == 0))
+ return nb_pkts;
+
+ struct axgbe_tx_queue *txq;
+ uint16_t nb_desc_free;
+ uint16_t nb_pkt_sent = 0;
+ uint16_t idx;
+ uint32_t tail_addr;
+ struct rte_mbuf *mbuf;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+
+ if (unlikely(nb_desc_free <= txq->free_thresh)) {
+ axgbe_xmit_cleanup(txq);
+ nb_desc_free = txq->nb_desc - (txq->cur - txq->dirty);
+ if (unlikely(nb_desc_free == 0))
+ return 0;
+ }
+ nb_pkts = RTE_MIN(nb_desc_free, nb_pkts);
+ while (nb_pkts--) {
+ mbuf = *tx_pkts++;
+ if (axgbe_xmit_hw(txq, mbuf))
+ goto out;
+ nb_pkt_sent++;
+ }
+out:
+ /* Sync read and write */
+ rte_mb();
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ tail_addr = low32_value(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ AXGMAC_DMA_IOWRITE(txq, DMA_CH_TDTR_LO, tail_addr);
+ txq->pkts += nb_pkt_sent;
+ return nb_pkt_sent;
+}
+
+void axgbe_dev_clear_queues(struct rte_eth_dev *dev)
+{
+ PMD_INIT_FUNC_TRACE();
+ uint8_t i;
+ struct axgbe_rx_queue *rxq;
+ struct axgbe_tx_queue *txq;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq) {
+ axgbe_rx_queue_release(rxq);
+ dev->data->rx_queues[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ if (txq) {
+ axgbe_tx_queue_release(txq);
+ dev->data->tx_queues[i] = NULL;
+ }
+ }
+}
diff --git a/drivers/net/axgbe/axgbe_rxtx.h b/drivers/net/axgbe/axgbe_rxtx.h
new file mode 100644
index 00000000..917da58c
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_rxtx.h
@@ -0,0 +1,186 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#ifndef _AXGBE_RXTX_H_
+#define _AXGBE_RXTX_H_
+
+/* to suppress gcc warnings related to descriptor casting*/
+#ifdef RTE_TOOLCHAIN_GCC
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+#ifdef RTE_TOOLCHAIN_CLANG
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+
+/* Descriptor related defines */
+#define AXGBE_MAX_RING_DESC 4096 /*should be power of 2*/
+#define AXGBE_TX_DESC_MIN_FREE (AXGBE_MAX_RING_DESC >> 3)
+#define AXGBE_TX_DESC_MAX_PROC (AXGBE_MAX_RING_DESC >> 1)
+#define AXGBE_MIN_RING_DESC 32
+#define RTE_AXGBE_DESCS_PER_LOOP 4
+#define RTE_AXGBE_MAX_RX_BURST 32
+
+#define AXGBE_RX_FREE_THRESH 32
+#define AXGBE_TX_FREE_THRESH 32
+
+#define AXGBE_DESC_ALIGN 128
+#define AXGBE_DESC_OWN 0x80000000
+#define AXGBE_ERR_STATUS 0x000f0000
+#define AXGBE_L3_CSUM_ERR 0x00050000
+#define AXGBE_L4_CSUM_ERR 0x00060000
+
+#include "axgbe_common.h"
+
+#define AXGBE_GET_DESC_PT(_queue, _idx) \
+ (((_queue)->desc) + \
+ ((_idx) & ((_queue)->nb_desc - 1)))
+
+#define AXGBE_GET_DESC_IDX(_queue, _idx) \
+ ((_idx) & ((_queue)->nb_desc - 1)) \
+
+/* Rx desc format */
+union axgbe_rx_desc {
+ struct {
+ uint64_t baddr;
+ uint32_t desc2;
+ uint32_t desc3;
+ } read;
+ struct {
+ uint32_t desc0;
+ uint32_t desc1;
+ uint32_t desc2;
+ uint32_t desc3;
+ } write;
+};
+
+struct axgbe_rx_queue {
+ /* membuf pool for rx buffers */
+ struct rte_mempool *mb_pool;
+ /* H/w Rx buffer size configured in DMA */
+ unsigned int buf_size;
+ /* CRC h/w offload */
+ uint16_t crc_len;
+ /* address of s/w rx buffers */
+ struct rte_mbuf **sw_ring;
+ /* Port private data */
+ struct axgbe_port *pdata;
+ /* Number of Rx descriptors in queue */
+ uint16_t nb_desc;
+ /* max free RX desc to hold */
+ uint16_t free_thresh;
+ /* Index of descriptor to check for packet availability */
+ uint64_t cur;
+ /* Index of descriptor to check for buffer reallocation */
+ uint64_t dirty;
+ /* Software Rx descriptor ring*/
+ volatile union axgbe_rx_desc *desc;
+ /* Ring physical address */
+ uint64_t ring_phys_addr;
+ /* Dma Channel register address */
+ void *dma_regs;
+ /* Dma channel tail register address*/
+ volatile uint32_t *dma_tail_reg;
+ /* DPDK queue index */
+ uint16_t queue_id;
+ /* dpdk port id*/
+ uint16_t port_id;
+ /* queue stats */
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t errors;
+ /* Number of mbufs allocated from pool*/
+ uint64_t mbuf_alloc;
+
+} __rte_cache_aligned;
+
+/*Tx descriptor format */
+struct axgbe_tx_desc {
+ phys_addr_t baddr;
+ uint32_t desc2;
+ uint32_t desc3;
+};
+
+struct axgbe_tx_queue {
+ /* Port private data reference */
+ struct axgbe_port *pdata;
+ /* Number of Tx descriptors in queue*/
+ uint16_t nb_desc;
+ /* Start freeing TX buffers if there are less free descriptors than
+ * this value
+ */
+ uint16_t free_thresh;
+ /* Available descriptors for Tx processing*/
+ uint16_t nb_desc_free;
+ /* Batch of mbufs/descs to release */
+ uint16_t free_batch_cnt;
+ /* Flag for vector support */
+ uint16_t vector_disable;
+ /* Index of descriptor to be used for current transfer */
+ uint64_t cur;
+ /* Index of descriptor to check for transfer complete */
+ uint64_t dirty;
+ /* Virtual address of ring */
+ volatile struct axgbe_tx_desc *desc;
+ /* Physical address of ring */
+ uint64_t ring_phys_addr;
+ /* Dma channel register space */
+ void *dma_regs;
+ /* Dma tail register address of ring*/
+ volatile uint32_t *dma_tail_reg;
+ /* Tx queue index/id*/
+ uint16_t queue_id;
+ /* Reference to hold Tx mbufs mapped to Tx descriptors freed
+ * after transmission confirmation
+ */
+ struct rte_mbuf **sw_ring;
+ /* dpdk port id*/
+ uint16_t port_id;
+ /* queue stats */
+ uint64_t pkts;
+ uint64_t bytes;
+ uint64_t errors;
+
+} __rte_cache_aligned;
+
+/*Queue related APIs */
+
+/*
+ * RX/TX function prototypes
+ */
+
+
+void axgbe_dev_tx_queue_release(void *txq);
+int axgbe_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t tx_queue_id,
+ uint16_t nb_tx_desc, unsigned int socket_id,
+ const struct rte_eth_txconf *tx_conf);
+void axgbe_dev_enable_tx(struct rte_eth_dev *dev);
+void axgbe_dev_disable_tx(struct rte_eth_dev *dev);
+int axgbe_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+int axgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id);
+
+uint16_t axgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+uint16_t axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
+
+void axgbe_dev_rx_queue_release(void *rxq);
+int axgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t rx_queue_id,
+ uint16_t nb_rx_desc, unsigned int socket_id,
+ const struct rte_eth_rxconf *rx_conf,
+ struct rte_mempool *mb_pool);
+void axgbe_dev_enable_rx(struct rte_eth_dev *dev);
+void axgbe_dev_disable_rx(struct rte_eth_dev *dev);
+int axgbe_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+int axgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id);
+uint16_t axgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+uint16_t axgbe_recv_pkts_threshold_refresh(void *rx_queue,
+ struct rte_mbuf **rx_pkts,
+ uint16_t nb_pkts);
+void axgbe_dev_clear_queues(struct rte_eth_dev *dev);
+
+#endif /* _AXGBE_RXTX_H_ */
diff --git a/drivers/net/axgbe/axgbe_rxtx_vec_sse.c b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
new file mode 100644
index 00000000..9be70371
--- /dev/null
+++ b/drivers/net/axgbe/axgbe_rxtx_vec_sse.c
@@ -0,0 +1,93 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+ * Copyright(c) 2018 Synopsys, Inc. All rights reserved.
+ */
+
+#include "axgbe_ethdev.h"
+#include "axgbe_rxtx.h"
+#include "axgbe_phy.h"
+
+#include <rte_time.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+
+/* Useful to avoid shifting for every descriptor prepration*/
+#define TX_DESC_CTRL_FLAGS 0xb000000000000000
+#define TX_FREE_BULK 8
+#define TX_FREE_BULK_CHECK (TX_FREE_BULK - 1)
+
+static inline void
+axgbe_vec_tx(volatile struct axgbe_tx_desc *desc,
+ struct rte_mbuf *mbuf)
+{
+ __m128i descriptor = _mm_set_epi64x((uint64_t)mbuf->pkt_len << 32 |
+ TX_DESC_CTRL_FLAGS | mbuf->data_len,
+ mbuf->buf_iova
+ + mbuf->data_off);
+ _mm_store_si128((__m128i *)desc, descriptor);
+}
+
+static void
+axgbe_xmit_cleanup_vec(struct axgbe_tx_queue *txq)
+{
+ volatile struct axgbe_tx_desc *desc;
+ int idx, i;
+
+ idx = AXGBE_GET_DESC_IDX(txq, txq->dirty + txq->free_batch_cnt
+ - 1);
+ desc = &txq->desc[idx];
+ if (desc->desc3 & AXGBE_DESC_OWN)
+ return;
+ /* memset avoided for desc ctrl fields since in vec_tx path
+ * all 128 bits are populated
+ */
+ for (i = 0; i < txq->free_batch_cnt; i++, idx--)
+ rte_pktmbuf_free_seg(txq->sw_ring[idx]);
+
+
+ txq->dirty += txq->free_batch_cnt;
+ txq->nb_desc_free += txq->free_batch_cnt;
+}
+
+uint16_t
+axgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts)
+{
+ PMD_INIT_FUNC_TRACE();
+
+ struct axgbe_tx_queue *txq;
+ uint16_t idx, nb_commit, loop, i;
+ uint32_t tail_addr;
+
+ txq = (struct axgbe_tx_queue *)tx_queue;
+ if (txq->nb_desc_free < txq->free_thresh) {
+ axgbe_xmit_cleanup_vec(txq);
+ if (unlikely(txq->nb_desc_free == 0))
+ return 0;
+ }
+ nb_pkts = RTE_MIN(txq->nb_desc_free, nb_pkts);
+ nb_commit = nb_pkts;
+ idx = AXGBE_GET_DESC_IDX(txq, txq->cur);
+ loop = txq->nb_desc - idx;
+ if (nb_commit >= loop) {
+ for (i = 0; i < loop; ++i, ++idx, ++tx_pkts) {
+ axgbe_vec_tx(&txq->desc[idx], *tx_pkts);
+ txq->sw_ring[idx] = *tx_pkts;
+ }
+ nb_commit -= loop;
+ idx = 0;
+ }
+ for (i = 0; i < nb_commit; ++i, ++idx, ++tx_pkts) {
+ axgbe_vec_tx(&txq->desc[idx], *tx_pkts);
+ txq->sw_ring[idx] = *tx_pkts;
+ }
+ txq->cur += nb_pkts;
+ tail_addr = (uint32_t)(txq->ring_phys_addr +
+ idx * sizeof(struct axgbe_tx_desc));
+ /* Update tail reg with next immediate address to kick Tx DMA channel*/
+ rte_write32(tail_addr, (void *)txq->dma_tail_reg);
+ txq->pkts += nb_pkts;
+ txq->nb_desc_free -= nb_pkts;
+
+ return nb_pkts;
+}
diff --git a/drivers/net/axgbe/meson.build b/drivers/net/axgbe/meson.build
new file mode 100644
index 00000000..548ffff7
--- /dev/null
+++ b/drivers/net/axgbe/meson.build
@@ -0,0 +1,19 @@
+# SPDX-License-Identifier: BSD-3-Clause
+# Copyright (c) 2018 Advanced Micro Devices, Inc. All rights reserved.
+
+if host_machine.system() != 'linux'
+ build = false
+endif
+
+sources = files('axgbe_ethdev.c',
+ 'axgbe_dev.c',
+ 'axgbe_mdio.c',
+ 'axgbe_phy_impl.c',
+ 'axgbe_i2c.c',
+ 'axgbe_rxtx.c')
+
+cflags += '-Wno-cast-qual'
+
+if arch_subdir == 'x86'
+ sources += files('axgbe_rxtx_vec_sse.c')
+endif
diff --git a/drivers/net/axgbe/rte_pmd_axgbe_version.map b/drivers/net/axgbe/rte_pmd_axgbe_version.map
new file mode 100644
index 00000000..b26efa67
--- /dev/null
+++ b/drivers/net/axgbe/rte_pmd_axgbe_version.map
@@ -0,0 +1,4 @@
+DPDK_18.05 {
+
+ local: *;
+};