diff options
Diffstat (limited to 'drivers')
285 files changed, 90088 insertions, 5523 deletions
diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index b4205385..dc4ef7f9 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -35,6 +35,7 @@ DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += aesni_gcm DIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += aesni_mb DIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += qat DIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += snow3g +DIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += kasumi DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += null include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/crypto/aesni_gcm/Makefile b/drivers/crypto/aesni_gcm/Makefile index aa2621bd..5898cae1 100644 --- a/drivers/crypto/aesni_gcm/Makefile +++ b/drivers/crypto/aesni_gcm/Makefile @@ -30,9 +30,11 @@ include $(RTE_SDK)/mk/rte.vars.mk +ifneq ($(MAKECMDGOALS),clean) ifeq ($(AESNI_MULTI_BUFFER_LIB_PATH),) $(error "Please define AESNI_MULTI_BUFFER_LIB_PATH environment variable") endif +endif # library name LIB = librte_pmd_aesni_gcm.a @@ -47,9 +49,10 @@ LIBABIVER := 1 # versioning export map EXPORT_MAP := rte_pmd_aesni_gcm_version.map -# external library include paths +# external library dependencies CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include +LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB LDLIBS += -lcrypto # library source files @@ -62,6 +65,8 @@ SYMLINK-y-include += # library dependencies DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_ring DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_GCM) += lib/librte_cryptodev include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/aesni_mb/Makefile b/drivers/crypto/aesni_mb/Makefile index ec65291f..d3994cc6 100644 --- a/drivers/crypto/aesni_mb/Makefile +++ b/drivers/crypto/aesni_mb/Makefile @@ -49,9 +49,10 @@ LIBABIVER := 1 # versioning export map EXPORT_MAP := rte_pmd_aesni_version.map -# external library include paths +# external library dependencies CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH) CFLAGS += -I$(AESNI_MULTI_BUFFER_LIB_PATH)/include +LDLIBS += -L$(AESNI_MULTI_BUFFER_LIB_PATH) -lIPSec_MB # library source files SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd.c @@ -60,6 +61,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += rte_aesni_mb_pmd_ops.c # library dependencies DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_ring DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AESNI_MB) += lib/librte_cryptodev include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c index 3415ac1b..6554fc4e 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd.c @@ -222,6 +222,9 @@ aesni_mb_set_session_cipher_parameters(const struct aesni_mb_ops *mb_ops, case RTE_CRYPTO_CIPHER_AES_CBC: sess->cipher.mode = CBC; break; + case RTE_CRYPTO_CIPHER_AES_CTR: + sess->cipher.mode = CNTR; + break; default: MB_LOG_ERR("Unsupported cipher mode parameter"); return -1; @@ -379,9 +382,11 @@ process_crypto_op(struct aesni_mb_qp *qp, struct rte_crypto_op *op, /* append space for output data to mbuf */ char *odata = rte_pktmbuf_append(m_dst, rte_pktmbuf_data_len(op->sym->m_src)); - if (odata == NULL) + if (odata == NULL) { MB_LOG_ERR("failed to allocate space in destination " "mbuf for source data"); + return NULL; + } memcpy(odata, rte_pktmbuf_mtod(op->sym->m_src, void*), rte_pktmbuf_data_len(op->sym->m_src)); @@ -560,7 +565,7 @@ aesni_mb_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, goto flush_jobs; else qp->stats.enqueued_count += processed_jobs; - return i; + return i; flush_jobs: /* diff --git a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c index 3806a66e..d3c46ace 100644 --- a/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c +++ b/drivers/crypto/aesni_mb/rte_aesni_mb_pmd_ops.c @@ -207,6 +207,26 @@ static const struct rte_cryptodev_capabilities aesni_mb_pmd_capabilities[] = { }, } }, } }, + { /* AES CTR */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; diff --git a/drivers/crypto/kasumi/Makefile b/drivers/crypto/kasumi/Makefile new file mode 100644 index 00000000..9fb0be85 --- /dev/null +++ b/drivers/crypto/kasumi/Makefile @@ -0,0 +1,69 @@ +# BSD LICENSE +# +# Copyright(c) 2016 Intel Corporation. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +ifneq ($(MAKECMDGOALS),clean) +ifeq ($(LIBSSO_KASUMI_PATH),) +$(error "Please define LIBSSO_KASUMI_PATH environment variable") +endif +endif + +# library name +LIB = librte_pmd_kasumi.a + +# build flags +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +# library version +LIBABIVER := 1 + +# versioning export map +EXPORT_MAP := rte_pmd_kasumi_version.map + +# external library dependencies +CFLAGS += -I$(LIBSSO_KASUMI_PATH) +CFLAGS += -I$(LIBSSO_KASUMI_PATH)/include +CFLAGS += -I$(LIBSSO_KASUMI_PATH)/build +LDLIBS += -L$(LIBSSO_KASUMI_PATH)/build -lsso_kasumi + +# library source files +SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd.c +SRCS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += rte_kasumi_pmd_ops.c + +# library dependencies +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_eal +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_ring +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_KASUMI) += lib/librte_cryptodev + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd.c b/drivers/crypto/kasumi/rte_kasumi_pmd.c new file mode 100644 index 00000000..5f8c7a2e --- /dev/null +++ b/drivers/crypto/kasumi/rte_kasumi_pmd.c @@ -0,0 +1,657 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_common.h> +#include <rte_config.h> +#include <rte_hexdump.h> +#include <rte_cryptodev.h> +#include <rte_cryptodev_pmd.h> +#include <rte_dev.h> +#include <rte_malloc.h> +#include <rte_cpuflags.h> + +#include "rte_kasumi_pmd_private.h" + +#define KASUMI_KEY_LENGTH 16 +#define KASUMI_IV_LENGTH 8 +#define KASUMI_DIGEST_LENGTH 4 +#define KASUMI_MAX_BURST 4 +#define BYTE_LEN 8 + +/** + * Global static parameter used to create a unique name for each KASUMI + * crypto device. + */ +static unsigned unique_name_id; + +static inline int +create_unique_device_name(char *name, size_t size) +{ + int ret; + + if (name == NULL) + return -EINVAL; + + ret = snprintf(name, size, "%s_%u", CRYPTODEV_NAME_KASUMI_PMD, + unique_name_id++); + if (ret < 0) + return ret; + return 0; +} + +/** Get xform chain order. */ +static enum kasumi_operation +kasumi_get_mode(const struct rte_crypto_sym_xform *xform) +{ + if (xform == NULL) + return KASUMI_OP_NOT_SUPPORTED; + + if (xform->next) + if (xform->next->next != NULL) + return KASUMI_OP_NOT_SUPPORTED; + + if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) { + if (xform->next == NULL) + return KASUMI_OP_ONLY_AUTH; + else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_CIPHER) + return KASUMI_OP_AUTH_CIPHER; + else + return KASUMI_OP_NOT_SUPPORTED; + } + + if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) { + if (xform->next == NULL) + return KASUMI_OP_ONLY_CIPHER; + else if (xform->next->type == RTE_CRYPTO_SYM_XFORM_AUTH) + return KASUMI_OP_CIPHER_AUTH; + else + return KASUMI_OP_NOT_SUPPORTED; + } + + return KASUMI_OP_NOT_SUPPORTED; +} + + +/** Parse crypto xform chain and set private session parameters. */ +int +kasumi_set_session_parameters(struct kasumi_session *sess, + const struct rte_crypto_sym_xform *xform) +{ + const struct rte_crypto_sym_xform *auth_xform = NULL; + const struct rte_crypto_sym_xform *cipher_xform = NULL; + int mode; + + /* Select Crypto operation - hash then cipher / cipher then hash */ + mode = kasumi_get_mode(xform); + + switch (mode) { + case KASUMI_OP_CIPHER_AUTH: + auth_xform = xform->next; + /* Fall-through */ + case KASUMI_OP_ONLY_CIPHER: + cipher_xform = xform; + break; + case KASUMI_OP_AUTH_CIPHER: + cipher_xform = xform->next; + /* Fall-through */ + case KASUMI_OP_ONLY_AUTH: + auth_xform = xform; + } + + if (mode == KASUMI_OP_NOT_SUPPORTED) { + KASUMI_LOG_ERR("Unsupported operation chain order parameter"); + return -EINVAL; + } + + if (cipher_xform) { + /* Only KASUMI F8 supported */ + if (cipher_xform->cipher.algo != RTE_CRYPTO_CIPHER_KASUMI_F8) + return -EINVAL; + /* Initialize key */ + sso_kasumi_init_f8_key_sched(xform->cipher.key.data, + &sess->pKeySched_cipher); + } + + if (auth_xform) { + /* Only KASUMI F9 supported */ + if (auth_xform->auth.algo != RTE_CRYPTO_AUTH_KASUMI_F9) + return -EINVAL; + sess->auth_op = auth_xform->auth.op; + /* Initialize key */ + sso_kasumi_init_f9_key_sched(xform->auth.key.data, + &sess->pKeySched_hash); + } + + + sess->op = mode; + + return 0; +} + +/** Get KASUMI session. */ +static struct kasumi_session * +kasumi_get_session(struct kasumi_qp *qp, struct rte_crypto_op *op) +{ + struct kasumi_session *sess; + + if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_WITH_SESSION) { + if (unlikely(op->sym->session->dev_type != + RTE_CRYPTODEV_KASUMI_PMD)) + return NULL; + + sess = (struct kasumi_session *)op->sym->session->_private; + } else { + struct rte_cryptodev_session *c_sess = NULL; + + if (rte_mempool_get(qp->sess_mp, (void **)&c_sess)) + return NULL; + + sess = (struct kasumi_session *)c_sess->_private; + + if (unlikely(kasumi_set_session_parameters(sess, + op->sym->xform) != 0)) + return NULL; + } + + return sess; +} + +/** Encrypt/decrypt mbufs with same cipher key. */ +static uint8_t +process_kasumi_cipher_op(struct rte_crypto_op **ops, + struct kasumi_session *session, + uint8_t num_ops) +{ + unsigned i; + uint8_t processed_ops = 0; + uint8_t *src[num_ops], *dst[num_ops]; + uint64_t IV[num_ops]; + uint32_t num_bytes[num_ops]; + + for (i = 0; i < num_ops; i++) { + /* Sanity checks. */ + if (ops[i]->sym->cipher.iv.length != KASUMI_IV_LENGTH) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("iv"); + break; + } + + src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + + (ops[i]->sym->cipher.data.offset >> 3); + dst[i] = ops[i]->sym->m_dst ? + rte_pktmbuf_mtod(ops[i]->sym->m_dst, uint8_t *) + + (ops[i]->sym->cipher.data.offset >> 3) : + rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + + (ops[i]->sym->cipher.data.offset >> 3); + IV[i] = *((uint64_t *)(ops[i]->sym->cipher.iv.data)); + num_bytes[i] = ops[i]->sym->cipher.data.length >> 3; + + processed_ops++; + } + + if (processed_ops != 0) + sso_kasumi_f8_n_buffer(&session->pKeySched_cipher, IV, + src, dst, num_bytes, processed_ops); + + return processed_ops; +} + +/** Encrypt/decrypt mbuf (bit level function). */ +static uint8_t +process_kasumi_cipher_op_bit(struct rte_crypto_op *op, + struct kasumi_session *session) +{ + uint8_t *src, *dst; + uint64_t IV; + uint32_t length_in_bits, offset_in_bits; + + /* Sanity checks. */ + if (unlikely(op->sym->cipher.iv.length != KASUMI_IV_LENGTH)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("iv"); + return 0; + } + + offset_in_bits = op->sym->cipher.data.offset; + src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); + dst = op->sym->m_dst ? + rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *) : + rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); + IV = *((uint64_t *)(op->sym->cipher.iv.data)); + length_in_bits = op->sym->cipher.data.length; + + sso_kasumi_f8_1_buffer_bit(&session->pKeySched_cipher, IV, + src, dst, length_in_bits, offset_in_bits); + + return 1; +} + +/** Generate/verify hash from mbufs with same hash key. */ +static int +process_kasumi_hash_op(struct rte_crypto_op **ops, + struct kasumi_session *session, + uint8_t num_ops) +{ + unsigned i; + uint8_t processed_ops = 0; + uint8_t *src, *dst; + uint32_t length_in_bits; + uint32_t num_bytes; + uint32_t shift_bits; + uint64_t IV; + uint8_t direction; + + for (i = 0; i < num_ops; i++) { + if (unlikely(ops[i]->sym->auth.aad.length != KASUMI_IV_LENGTH)) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("aad"); + break; + } + + if (unlikely(ops[i]->sym->auth.digest.length != KASUMI_DIGEST_LENGTH)) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("digest"); + break; + } + + /* Data must be byte aligned */ + if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { + ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + KASUMI_LOG_ERR("offset"); + break; + } + + length_in_bits = ops[i]->sym->auth.data.length; + + src = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + + (ops[i]->sym->auth.data.offset >> 3); + /* IV from AAD */ + IV = *((uint64_t *)(ops[i]->sym->auth.aad.data)); + /* Direction from next bit after end of message */ + num_bytes = (length_in_bits >> 3) + 1; + shift_bits = (BYTE_LEN - 1 - length_in_bits) % BYTE_LEN; + direction = (src[num_bytes - 1] >> shift_bits) & 0x01; + + if (session->auth_op == RTE_CRYPTO_AUTH_OP_VERIFY) { + dst = (uint8_t *)rte_pktmbuf_append(ops[i]->sym->m_src, + ops[i]->sym->auth.digest.length); + + sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash, + IV, src, + length_in_bits, dst, direction); + /* Verify digest. */ + if (memcmp(dst, ops[i]->sym->auth.digest.data, + ops[i]->sym->auth.digest.length) != 0) + ops[i]->status = RTE_CRYPTO_OP_STATUS_AUTH_FAILED; + + /* Trim area used for digest from mbuf. */ + rte_pktmbuf_trim(ops[i]->sym->m_src, + ops[i]->sym->auth.digest.length); + } else { + dst = ops[i]->sym->auth.digest.data; + + sso_kasumi_f9_1_buffer_user(&session->pKeySched_hash, + IV, src, + length_in_bits, dst, direction); + } + processed_ops++; + } + + return processed_ops; +} + +/** Process a batch of crypto ops which shares the same session. */ +static int +process_ops(struct rte_crypto_op **ops, struct kasumi_session *session, + struct kasumi_qp *qp, uint8_t num_ops, + uint16_t *accumulated_enqueued_ops) +{ + unsigned i; + unsigned enqueued_ops, processed_ops; + + switch (session->op) { + case KASUMI_OP_ONLY_CIPHER: + processed_ops = process_kasumi_cipher_op(ops, + session, num_ops); + break; + case KASUMI_OP_ONLY_AUTH: + processed_ops = process_kasumi_hash_op(ops, session, + num_ops); + break; + case KASUMI_OP_CIPHER_AUTH: + processed_ops = process_kasumi_cipher_op(ops, session, + num_ops); + process_kasumi_hash_op(ops, session, processed_ops); + break; + case KASUMI_OP_AUTH_CIPHER: + processed_ops = process_kasumi_hash_op(ops, session, + num_ops); + process_kasumi_cipher_op(ops, session, processed_ops); + break; + default: + /* Operation not supported. */ + processed_ops = 0; + } + + for (i = 0; i < num_ops; i++) { + /* + * If there was no error/authentication failure, + * change status to successful. + */ + if (ops[i]->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + ops[i]->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + /* Free session if a session-less crypto op. */ + if (ops[i]->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { + rte_mempool_put(qp->sess_mp, ops[i]->sym->session); + ops[i]->sym->session = NULL; + } + } + + enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops, + (void **)ops, processed_ops); + qp->qp_stats.enqueued_count += enqueued_ops; + *accumulated_enqueued_ops += enqueued_ops; + + return enqueued_ops; +} + +/** Process a crypto op with length/offset in bits. */ +static int +process_op_bit(struct rte_crypto_op *op, struct kasumi_session *session, + struct kasumi_qp *qp, uint16_t *accumulated_enqueued_ops) +{ + unsigned enqueued_op, processed_op; + + switch (session->op) { + case KASUMI_OP_ONLY_CIPHER: + processed_op = process_kasumi_cipher_op_bit(op, + session); + break; + case KASUMI_OP_ONLY_AUTH: + processed_op = process_kasumi_hash_op(&op, session, 1); + break; + case KASUMI_OP_CIPHER_AUTH: + processed_op = process_kasumi_cipher_op_bit(op, session); + if (processed_op == 1) + process_kasumi_hash_op(&op, session, 1); + break; + case KASUMI_OP_AUTH_CIPHER: + processed_op = process_kasumi_hash_op(&op, session, 1); + if (processed_op == 1) + process_kasumi_cipher_op_bit(op, session); + break; + default: + /* Operation not supported. */ + processed_op = 0; + } + + /* + * If there was no error/authentication failure, + * change status to successful. + */ + if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + /* Free session if a session-less crypto op. */ + if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { + rte_mempool_put(qp->sess_mp, op->sym->session); + op->sym->session = NULL; + } + + enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, (void **)&op, + processed_op); + qp->qp_stats.enqueued_count += enqueued_op; + *accumulated_enqueued_ops += enqueued_op; + + return enqueued_op; +} + +static uint16_t +kasumi_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, + uint16_t nb_ops) +{ + struct rte_crypto_op *c_ops[nb_ops]; + struct rte_crypto_op *curr_c_op; + + struct kasumi_session *prev_sess = NULL, *curr_sess = NULL; + struct kasumi_qp *qp = queue_pair; + unsigned i; + uint8_t burst_size = 0; + uint16_t enqueued_ops = 0; + uint8_t processed_ops; + + for (i = 0; i < nb_ops; i++) { + curr_c_op = ops[i]; + + /* Set status as enqueued (not processed yet) by default. */ + curr_c_op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + curr_sess = kasumi_get_session(qp, curr_c_op); + if (unlikely(curr_sess == NULL || + curr_sess->op == KASUMI_OP_NOT_SUPPORTED)) { + curr_c_op->status = + RTE_CRYPTO_OP_STATUS_INVALID_SESSION; + break; + } + + /* If length/offset is at bit-level, process this buffer alone. */ + if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0) + || ((ops[i]->sym->cipher.data.offset + % BYTE_LEN) != 0)) { + /* Process the ops of the previous session. */ + if (prev_sess != NULL) { + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; + } + + burst_size = 0; + prev_sess = NULL; + } + + processed_ops = process_op_bit(curr_c_op, curr_sess, + qp, &enqueued_ops); + if (processed_ops != 1) + break; + + continue; + } + + /* Batch ops that share the same session. */ + if (prev_sess == NULL) { + prev_sess = curr_sess; + c_ops[burst_size++] = curr_c_op; + } else if (curr_sess == prev_sess) { + c_ops[burst_size++] = curr_c_op; + /* + * When there are enough ops to process in a batch, + * process them, and start a new batch. + */ + if (burst_size == KASUMI_MAX_BURST) { + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; + } + + burst_size = 0; + prev_sess = NULL; + } + } else { + /* + * Different session, process the ops + * of the previous session. + */ + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; + } + + burst_size = 0; + prev_sess = curr_sess; + + c_ops[burst_size++] = curr_c_op; + } + } + + if (burst_size != 0) { + /* Process the crypto ops of the last session. */ + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + } + + qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops; + return enqueued_ops; +} + +static uint16_t +kasumi_pmd_dequeue_burst(void *queue_pair, + struct rte_crypto_op **c_ops, uint16_t nb_ops) +{ + struct kasumi_qp *qp = queue_pair; + + unsigned nb_dequeued; + + nb_dequeued = rte_ring_dequeue_burst(qp->processed_ops, + (void **)c_ops, nb_ops); + qp->qp_stats.dequeued_count += nb_dequeued; + + return nb_dequeued; +} + +static int cryptodev_kasumi_uninit(const char *name); + +static int +cryptodev_kasumi_create(const char *name, + struct rte_crypto_vdev_init_params *init_params) +{ + struct rte_cryptodev *dev; + char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; + struct kasumi_private *internals; + uint64_t cpu_flags = 0; + + /* Check CPU for supported vector instruction set */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_AVX)) + cpu_flags |= RTE_CRYPTODEV_FF_CPU_AVX; + else if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) + cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE; + else { + KASUMI_LOG_ERR("Vector instructions are not supported by CPU"); + return -EFAULT; + } + + /* Create a unique device name. */ + if (create_unique_device_name(crypto_dev_name, + RTE_CRYPTODEV_NAME_MAX_LEN) != 0) { + KASUMI_LOG_ERR("failed to create unique cryptodev name"); + return -EINVAL; + } + + dev = rte_cryptodev_pmd_virtual_dev_init(crypto_dev_name, + sizeof(struct kasumi_private), init_params->socket_id); + if (dev == NULL) { + KASUMI_LOG_ERR("failed to create cryptodev vdev"); + goto init_error; + } + + dev->dev_type = RTE_CRYPTODEV_KASUMI_PMD; + dev->dev_ops = rte_kasumi_pmd_ops; + + /* Register RX/TX burst functions for data path. */ + dev->dequeue_burst = kasumi_pmd_dequeue_burst; + dev->enqueue_burst = kasumi_pmd_enqueue_burst; + + dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + cpu_flags; + + internals = dev->data->dev_private; + + internals->max_nb_queue_pairs = init_params->max_nb_queue_pairs; + internals->max_nb_sessions = init_params->max_nb_sessions; + + return 0; +init_error: + KASUMI_LOG_ERR("driver %s: cryptodev_kasumi_create failed", name); + + cryptodev_kasumi_uninit(crypto_dev_name); + return -EFAULT; +} + +static int +cryptodev_kasumi_init(const char *name, + const char *input_args) +{ + struct rte_crypto_vdev_init_params init_params = { + RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_QUEUE_PAIRS, + RTE_CRYPTODEV_VDEV_DEFAULT_MAX_NB_SESSIONS, + rte_socket_id() + }; + + rte_cryptodev_parse_vdev_init_params(&init_params, input_args); + + RTE_LOG(INFO, PMD, "Initialising %s on NUMA node %d\n", name, + init_params.socket_id); + RTE_LOG(INFO, PMD, " Max number of queue pairs = %d\n", + init_params.max_nb_queue_pairs); + RTE_LOG(INFO, PMD, " Max number of sessions = %d\n", + init_params.max_nb_sessions); + + return cryptodev_kasumi_create(name, &init_params); +} + +static int +cryptodev_kasumi_uninit(const char *name) +{ + if (name == NULL) + return -EINVAL; + + RTE_LOG(INFO, PMD, "Closing KASUMI crypto device %s" + " on numa socket %u\n", + name, rte_socket_id()); + + return 0; +} + +static struct rte_driver cryptodev_kasumi_pmd_drv = { + .name = CRYPTODEV_NAME_KASUMI_PMD, + .type = PMD_VDEV, + .init = cryptodev_kasumi_init, + .uninit = cryptodev_kasumi_uninit +}; + +PMD_REGISTER_DRIVER(cryptodev_kasumi_pmd_drv); diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c new file mode 100644 index 00000000..da5854eb --- /dev/null +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_ops.c @@ -0,0 +1,344 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <string.h> + +#include <rte_common.h> +#include <rte_malloc.h> +#include <rte_cryptodev_pmd.h> + +#include "rte_kasumi_pmd_private.h" + +static const struct rte_cryptodev_capabilities kasumi_pmd_capabilities[] = { + { /* KASUMI (F9) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_AUTH, + {.auth = { + .algo = RTE_CRYPTO_AUTH_KASUMI_F9, + .block_size = 8, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .digest_size = { + .min = 4, + .max = 4, + .increment = 0 + }, + .aad_size = { + .min = 9, + .max = 9, + .increment = 0 + } + }, } + }, } + }, + { /* KASUMI (F8) */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_KASUMI_F8, + .block_size = 8, + .key_size = { + .min = 16, + .max = 16, + .increment = 0 + }, + .iv_size = { + .min = 8, + .max = 8, + .increment = 0 + } + }, } + }, } + }, + RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() +}; + +/** Configure device */ +static int +kasumi_pmd_config(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +/** Start device */ +static int +kasumi_pmd_start(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + +/** Stop device */ +static void +kasumi_pmd_stop(__rte_unused struct rte_cryptodev *dev) +{ +} + +/** Close device */ +static int +kasumi_pmd_close(__rte_unused struct rte_cryptodev *dev) +{ + return 0; +} + + +/** Get device statistics */ +static void +kasumi_pmd_stats_get(struct rte_cryptodev *dev, + struct rte_cryptodev_stats *stats) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct kasumi_qp *qp = dev->data->queue_pairs[qp_id]; + + stats->enqueued_count += qp->qp_stats.enqueued_count; + stats->dequeued_count += qp->qp_stats.dequeued_count; + + stats->enqueue_err_count += qp->qp_stats.enqueue_err_count; + stats->dequeue_err_count += qp->qp_stats.dequeue_err_count; + } +} + +/** Reset device statistics */ +static void +kasumi_pmd_stats_reset(struct rte_cryptodev *dev) +{ + int qp_id; + + for (qp_id = 0; qp_id < dev->data->nb_queue_pairs; qp_id++) { + struct kasumi_qp *qp = dev->data->queue_pairs[qp_id]; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + } +} + + +/** Get device info */ +static void +kasumi_pmd_info_get(struct rte_cryptodev *dev, + struct rte_cryptodev_info *dev_info) +{ + struct kasumi_private *internals = dev->data->dev_private; + + if (dev_info != NULL) { + dev_info->dev_type = dev->dev_type; + dev_info->max_nb_queue_pairs = internals->max_nb_queue_pairs; + dev_info->sym.max_nb_sessions = internals->max_nb_sessions; + dev_info->feature_flags = dev->feature_flags; + dev_info->capabilities = kasumi_pmd_capabilities; + } +} + +/** Release queue pair */ +static int +kasumi_pmd_qp_release(struct rte_cryptodev *dev, uint16_t qp_id) +{ + struct kasumi_qp *qp = dev->data->queue_pairs[qp_id]; + + if (qp != NULL) { + rte_ring_free(qp->processed_ops); + rte_free(qp); + dev->data->queue_pairs[qp_id] = NULL; + } + return 0; +} + +/** set a unique name for the queue pair based on its name, dev_id and qp_id */ +static int +kasumi_pmd_qp_set_unique_name(struct rte_cryptodev *dev, + struct kasumi_qp *qp) +{ + unsigned n = snprintf(qp->name, sizeof(qp->name), + "kasumi_pmd_%u_qp_%u", + dev->data->dev_id, qp->id); + + if (n > sizeof(qp->name)) + return -1; + + return 0; +} + +/** Create a ring to place processed ops on */ +static struct rte_ring * +kasumi_pmd_qp_create_processed_ops_ring(struct kasumi_qp *qp, + unsigned ring_size, int socket_id) +{ + struct rte_ring *r; + + r = rte_ring_lookup(qp->name); + if (r) { + if (r->prod.size == ring_size) { + KASUMI_LOG_INFO("Reusing existing ring %s" + " for processed packets", + qp->name); + return r; + } + + KASUMI_LOG_ERR("Unable to reuse existing ring %s" + " for processed packets", + qp->name); + return NULL; + } + + return rte_ring_create(qp->name, ring_size, socket_id, + RING_F_SP_ENQ | RING_F_SC_DEQ); +} + +/** Setup a queue pair */ +static int +kasumi_pmd_qp_setup(struct rte_cryptodev *dev, uint16_t qp_id, + const struct rte_cryptodev_qp_conf *qp_conf, + int socket_id) +{ + struct kasumi_qp *qp = NULL; + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->queue_pairs[qp_id] != NULL) + kasumi_pmd_qp_release(dev, qp_id); + + /* Allocate the queue pair data structure. */ + qp = rte_zmalloc_socket("KASUMI PMD Queue Pair", sizeof(*qp), + RTE_CACHE_LINE_SIZE, socket_id); + if (qp == NULL) + return (-ENOMEM); + + qp->id = qp_id; + dev->data->queue_pairs[qp_id] = qp; + + if (kasumi_pmd_qp_set_unique_name(dev, qp)) + goto qp_setup_cleanup; + + qp->processed_ops = kasumi_pmd_qp_create_processed_ops_ring(qp, + qp_conf->nb_descriptors, socket_id); + if (qp->processed_ops == NULL) + goto qp_setup_cleanup; + + qp->sess_mp = dev->data->session_pool; + + memset(&qp->qp_stats, 0, sizeof(qp->qp_stats)); + + return 0; + +qp_setup_cleanup: + rte_free(qp); + + return -1; +} + +/** Start queue pair */ +static int +kasumi_pmd_qp_start(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint16_t queue_pair_id) +{ + return -ENOTSUP; +} + +/** Stop queue pair */ +static int +kasumi_pmd_qp_stop(__rte_unused struct rte_cryptodev *dev, + __rte_unused uint16_t queue_pair_id) +{ + return -ENOTSUP; +} + +/** Return the number of allocated queue pairs */ +static uint32_t +kasumi_pmd_qp_count(struct rte_cryptodev *dev) +{ + return dev->data->nb_queue_pairs; +} + +/** Returns the size of the KASUMI session structure */ +static unsigned +kasumi_pmd_session_get_size(struct rte_cryptodev *dev __rte_unused) +{ + return sizeof(struct kasumi_session); +} + +/** Configure a KASUMI session from a crypto xform chain */ +static void * +kasumi_pmd_session_configure(struct rte_cryptodev *dev __rte_unused, + struct rte_crypto_sym_xform *xform, void *sess) +{ + if (unlikely(sess == NULL)) { + KASUMI_LOG_ERR("invalid session struct"); + return NULL; + } + + if (kasumi_set_session_parameters(sess, xform) != 0) { + KASUMI_LOG_ERR("failed configure session parameters"); + return NULL; + } + + return sess; +} + +/** Clear the memory of session so it doesn't leave key material behind */ +static void +kasumi_pmd_session_clear(struct rte_cryptodev *dev __rte_unused, void *sess) +{ + /* + * Current just resetting the whole data structure, need to investigate + * whether a more selective reset of key would be more performant + */ + if (sess) + memset(sess, 0, sizeof(struct kasumi_session)); +} + +struct rte_cryptodev_ops kasumi_pmd_ops = { + .dev_configure = kasumi_pmd_config, + .dev_start = kasumi_pmd_start, + .dev_stop = kasumi_pmd_stop, + .dev_close = kasumi_pmd_close, + + .stats_get = kasumi_pmd_stats_get, + .stats_reset = kasumi_pmd_stats_reset, + + .dev_infos_get = kasumi_pmd_info_get, + + .queue_pair_setup = kasumi_pmd_qp_setup, + .queue_pair_release = kasumi_pmd_qp_release, + .queue_pair_start = kasumi_pmd_qp_start, + .queue_pair_stop = kasumi_pmd_qp_stop, + .queue_pair_count = kasumi_pmd_qp_count, + + .session_get_size = kasumi_pmd_session_get_size, + .session_configure = kasumi_pmd_session_configure, + .session_clear = kasumi_pmd_session_clear +}; + +struct rte_cryptodev_ops *rte_kasumi_pmd_ops = &kasumi_pmd_ops; diff --git a/drivers/crypto/kasumi/rte_kasumi_pmd_private.h b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h new file mode 100644 index 00000000..04e1c437 --- /dev/null +++ b/drivers/crypto/kasumi/rte_kasumi_pmd_private.h @@ -0,0 +1,106 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2016 Intel Corporation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _RTE_KASUMI_PMD_PRIVATE_H_ +#define _RTE_KASUMI_PMD_PRIVATE_H_ + +#include <sso_kasumi.h> + +#define KASUMI_LOG_ERR(fmt, args...) \ + RTE_LOG(ERR, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + CRYPTODEV_NAME_KASUMI_PMD, \ + __func__, __LINE__, ## args) + +#ifdef RTE_LIBRTE_KASUMI_DEBUG +#define KASUMI_LOG_INFO(fmt, args...) \ + RTE_LOG(INFO, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + CRYPTODEV_NAME_KASUMI_PMD, \ + __func__, __LINE__, ## args) + +#define KASUMI_LOG_DBG(fmt, args...) \ + RTE_LOG(DEBUG, CRYPTODEV, "[%s] %s() line %u: " fmt "\n", \ + CRYPTODEV_NAME_KASUMI_PMD, \ + __func__, __LINE__, ## args) +#else +#define KASUMI_LOG_INFO(fmt, args...) +#define KASUMI_LOG_DBG(fmt, args...) +#endif + +/** private data structure for each virtual KASUMI device */ +struct kasumi_private { + unsigned max_nb_queue_pairs; + /**< Max number of queue pairs supported by device */ + unsigned max_nb_sessions; + /**< Max number of sessions supported by device */ +}; + +/** KASUMI buffer queue pair */ +struct kasumi_qp { + uint16_t id; + /**< Queue Pair Identifier */ + char name[RTE_CRYPTODEV_NAME_LEN]; + /**< Unique Queue Pair Name */ + struct rte_ring *processed_ops; + /**< Ring for placing processed ops */ + struct rte_mempool *sess_mp; + /**< Session Mempool */ + struct rte_cryptodev_stats qp_stats; + /**< Queue pair statistics */ +} __rte_cache_aligned; + +enum kasumi_operation { + KASUMI_OP_ONLY_CIPHER, + KASUMI_OP_ONLY_AUTH, + KASUMI_OP_CIPHER_AUTH, + KASUMI_OP_AUTH_CIPHER, + KASUMI_OP_NOT_SUPPORTED +}; + +/** KASUMI private session structure */ +struct kasumi_session { + /* Keys have to be 16-byte aligned */ + sso_kasumi_key_sched_t pKeySched_cipher; + sso_kasumi_key_sched_t pKeySched_hash; + enum kasumi_operation op; + enum rte_crypto_auth_operation auth_op; +} __rte_cache_aligned; + + +int +kasumi_set_session_parameters(struct kasumi_session *sess, + const struct rte_crypto_sym_xform *xform); + + +/** device specific operations function pointer structure */ +struct rte_cryptodev_ops *rte_kasumi_pmd_ops; + +#endif /* _RTE_KASUMI_PMD_PRIVATE_H_ */ diff --git a/drivers/crypto/kasumi/rte_pmd_kasumi_version.map b/drivers/crypto/kasumi/rte_pmd_kasumi_version.map new file mode 100644 index 00000000..8ffeca93 --- /dev/null +++ b/drivers/crypto/kasumi/rte_pmd_kasumi_version.map @@ -0,0 +1,3 @@ +DPDK_16.07 { + local: *; +}; diff --git a/drivers/crypto/null/Makefile b/drivers/crypto/null/Makefile index 2173277b..c143929f 100644 --- a/drivers/crypto/null/Makefile +++ b/drivers/crypto/null/Makefile @@ -54,6 +54,8 @@ SYMLINK-y-include += # library dependencies DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_ring DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL_CRYPTO) += lib/librte_cryptodev include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/qat/Makefile b/drivers/crypto/qat/Makefile index 258c2d5f..20a70d4a 100644 --- a/drivers/crypto/qat/Makefile +++ b/drivers/crypto/qat/Makefile @@ -38,6 +38,7 @@ LIBABIVER := 1 # build flags CFLAGS += $(WERROR_FLAGS) +CFLAGS += -O3 # external library include paths CFLAGS += -I$(SRCDIR)/qat_adf @@ -58,6 +59,7 @@ EXPORT_MAP := rte_pmd_qat_version.map # library dependencies DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_mempool DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_QAT) += lib/librte_cryptodev diff --git a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c index bcccdf4f..aa108d47 100644 --- a/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c +++ b/drivers/crypto/qat/qat_adf/qat_algs_build_desc.c @@ -616,10 +616,11 @@ int qat_alg_aead_session_create_content_desc_auth(struct qat_session *cdesc, * Write (the length of AAD) into bytes 16-19 of state2 * in big-endian format. This field is 8 bytes */ - *(uint32_t *)&(hash->sha.state1[ + uint32_t *aad_len = (uint32_t *)&hash->sha.state1[ ICP_QAT_HW_GALOIS_128_STATE1_SZ + - ICP_QAT_HW_GALOIS_H_SZ]) = - rte_bswap32(add_auth_data_length); + ICP_QAT_HW_GALOIS_H_SZ]; + *aad_len = rte_bswap32(add_auth_data_length); + proto = ICP_QAT_FW_LA_GCM_PROTO; } else if (cdesc->qat_hash_alg == ICP_QAT_HW_AUTH_ALGO_SNOW_3G_UIA2) { proto = ICP_QAT_FW_LA_SNOW_3G_PROTO; diff --git a/drivers/crypto/qat/qat_crypto.c b/drivers/crypto/qat/qat_crypto.c index 495ea1c7..940b2b63 100644 --- a/drivers/crypto/qat/qat_crypto.c +++ b/drivers/crypto/qat/qat_crypto.c @@ -263,6 +263,26 @@ static const struct rte_cryptodev_capabilities qat_pmd_capabilities[] = { }, } }, } }, + { /* AES CTR */ + .op = RTE_CRYPTO_OP_TYPE_SYMMETRIC, + {.sym = { + .xform_type = RTE_CRYPTO_SYM_XFORM_CIPHER, + {.cipher = { + .algo = RTE_CRYPTO_CIPHER_AES_CTR, + .block_size = 16, + .key_size = { + .min = 16, + .max = 32, + .increment = 8 + }, + .iv_size = { + .min = 16, + .max = 16, + .increment = 0 + } + }, } + }, } + }, RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() }; @@ -276,14 +296,15 @@ void qat_crypto_sym_clear_session(struct rte_cryptodev *dev, void *session) { struct qat_session *sess = session; - phys_addr_t cd_paddr = sess->cd_paddr; + phys_addr_t cd_paddr; PMD_INIT_FUNC_TRACE(); if (session) { + cd_paddr = sess->cd_paddr; memset(sess, 0, qat_crypto_sym_get_session_private_size(dev)); - sess->cd_paddr = cd_paddr; - } + } else + PMD_DRV_LOG(ERR, "NULL session"); } static int @@ -368,6 +389,14 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, } session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; break; + case RTE_CRYPTO_CIPHER_AES_CTR: + if (qat_alg_validate_aes_key(cipher_xform->key.length, + &session->qat_cipher_alg) != 0) { + PMD_DRV_LOG(ERR, "Invalid AES cipher key size"); + goto error_out; + } + session->qat_mode = ICP_QAT_HW_CIPHER_CTR_MODE; + break; case RTE_CRYPTO_CIPHER_SNOW3G_UEA2: if (qat_alg_validate_snow3g_key(cipher_xform->key.length, &session->qat_cipher_alg) != 0) { @@ -380,7 +409,6 @@ qat_crypto_sym_configure_session_cipher(struct rte_cryptodev *dev, case RTE_CRYPTO_CIPHER_3DES_ECB: case RTE_CRYPTO_CIPHER_3DES_CBC: case RTE_CRYPTO_CIPHER_AES_ECB: - case RTE_CRYPTO_CIPHER_AES_CTR: case RTE_CRYPTO_CIPHER_AES_CCM: case RTE_CRYPTO_CIPHER_KASUMI_F8: PMD_DRV_LOG(ERR, "Crypto: Unsupported Cipher alg %u", @@ -807,12 +835,15 @@ static inline uint32_t adf_modulo(uint32_t data, uint32_t shift) return data - mult; } -void qat_crypto_sym_session_init(struct rte_mempool *mp, void *priv_sess) +void qat_crypto_sym_session_init(struct rte_mempool *mp, void *sym_sess) { - struct qat_session *s = priv_sess; + struct rte_cryptodev_sym_session *sess = sym_sess; + struct qat_session *s = (void *)sess->_private; PMD_INIT_FUNC_TRACE(); - s->cd_paddr = rte_mempool_virt2phy(mp, &s->cd); + s->cd_paddr = rte_mempool_virt2phy(mp, sess) + + offsetof(struct qat_session, cd) + + offsetof(struct rte_cryptodev_sym_session, _private); } int qat_dev_config(__rte_unused struct rte_cryptodev *dev) diff --git a/drivers/crypto/qat/rte_qat_cryptodev.c b/drivers/crypto/qat/rte_qat_cryptodev.c index a7912f5a..f46ec857 100644 --- a/drivers/crypto/qat/rte_qat_cryptodev.c +++ b/drivers/crypto/qat/rte_qat_cryptodev.c @@ -69,10 +69,7 @@ static struct rte_cryptodev_ops crypto_qat_ops = { static struct rte_pci_id pci_id_qat_map[] = { { - .vendor_id = 0x8086, - .device_id = 0x0443, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID + RTE_PCI_DEVICE(0x8086, 0x0443), }, {.device_id = 0}, }; diff --git a/drivers/crypto/snow3g/Makefile b/drivers/crypto/snow3g/Makefile index ee582702..bea6760b 100644 --- a/drivers/crypto/snow3g/Makefile +++ b/drivers/crypto/snow3g/Makefile @@ -30,8 +30,10 @@ include $(RTE_SDK)/mk/rte.vars.mk -ifeq ($(LIBSSO_PATH),) -$(error "Please define LIBSSO_PATH environment variable") +ifneq ($(MAKECMDGOALS),clean) +ifeq ($(LIBSSO_SNOW3G_PATH),) +$(error "Please define LIBSSO_SNOW3G_PATH environment variable") +endif endif # library name @@ -47,10 +49,11 @@ LIBABIVER := 1 # versioning export map EXPORT_MAP := rte_pmd_snow3g_version.map -# external library include paths -CFLAGS += -I$(LIBSSO_PATH) -CFLAGS += -I$(LIBSSO_PATH)/include -CFLAGS += -I$(LIBSSO_PATH)/build +# external library dependencies +CFLAGS += -I$(LIBSSO_SNOW3G_PATH) +CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/include +CFLAGS += -I$(LIBSSO_SNOW3G_PATH)/build +LDLIBS += -L$(LIBSSO_SNOW3G_PATH)/build -lsso_snow3g # library source files SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd.c @@ -59,6 +62,8 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += rte_snow3g_pmd_ops.c # library dependencies DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_ring DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SNOW3G) += lib/librte_cryptodev include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/crypto/snow3g/rte_snow3g_pmd.c b/drivers/crypto/snow3g/rte_snow3g_pmd.c index f3e0e667..dc8de6bd 100644 --- a/drivers/crypto/snow3g/rte_snow3g_pmd.c +++ b/drivers/crypto/snow3g/rte_snow3g_pmd.c @@ -38,10 +38,11 @@ #include <rte_dev.h> #include <rte_malloc.h> #include <rte_cpuflags.h> -#include <rte_kvargs.h> #include "rte_snow3g_pmd_private.h" +#define SNOW3G_IV_LENGTH 16 +#define SNOW3G_DIGEST_LENGTH 4 #define SNOW3G_MAX_BURST 8 #define BYTE_LEN 8 @@ -198,20 +199,12 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops, for (i = 0; i < num_ops; i++) { /* Sanity checks. */ - if (ops[i]->sym->cipher.iv.length != 16) { + if (unlikely(ops[i]->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; SNOW3G_LOG_ERR("iv"); break; } - if (((ops[i]->sym->cipher.data.length % BYTE_LEN) != 0) - || ((ops[i]->sym->cipher.data.offset - % BYTE_LEN) != 0)) { - ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - SNOW3G_LOG_ERR("Data Length or offset"); - break; - } - src[i] = rte_pktmbuf_mtod(ops[i]->sym->m_src, uint8_t *) + (ops[i]->sym->cipher.data.offset >> 3); dst[i] = ops[i]->sym->m_dst ? @@ -231,6 +224,39 @@ process_snow3g_cipher_op(struct rte_crypto_op **ops, return processed_ops; } +/** Encrypt/decrypt mbuf (bit level function). */ +static uint8_t +process_snow3g_cipher_op_bit(struct rte_crypto_op *op, + struct snow3g_session *session) +{ + uint8_t *src, *dst; + uint8_t *IV; + uint32_t length_in_bits, offset_in_bits; + + /* Sanity checks. */ + if (unlikely(op->sym->cipher.iv.length != SNOW3G_IV_LENGTH)) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + SNOW3G_LOG_ERR("iv"); + return 0; + } + + offset_in_bits = op->sym->cipher.data.offset; + src = rte_pktmbuf_mtod(op->sym->m_src, uint8_t *); + if (op->sym->m_dst == NULL) { + op->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; + SNOW3G_LOG_ERR("bit-level in-place not supported\n"); + return 0; + } + dst = rte_pktmbuf_mtod(op->sym->m_dst, uint8_t *); + IV = op->sym->cipher.iv.data; + length_in_bits = op->sym->cipher.data.length; + + sso_snow3g_f8_1_buffer_bit(&session->pKeySched_cipher, IV, + src, dst, length_in_bits, offset_in_bits); + + return 1; +} + /** Generate/verify hash from mbufs with same hash key. */ static int process_snow3g_hash_op(struct rte_crypto_op **ops, @@ -243,23 +269,22 @@ process_snow3g_hash_op(struct rte_crypto_op **ops, uint32_t length_in_bits; for (i = 0; i < num_ops; i++) { - if (ops[i]->sym->auth.aad.length != 16) { + if (unlikely(ops[i]->sym->auth.aad.length != SNOW3G_IV_LENGTH)) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; SNOW3G_LOG_ERR("aad"); break; } - if (ops[i]->sym->auth.digest.length != 4) { + if (unlikely(ops[i]->sym->auth.digest.length != SNOW3G_DIGEST_LENGTH)) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; SNOW3G_LOG_ERR("digest"); break; } - if (((ops[i]->sym->auth.data.length % BYTE_LEN) != 0) - || ((ops[i]->sym->auth.data.offset - % BYTE_LEN) != 0)) { + /* Data must be byte aligned */ + if ((ops[i]->sym->auth.data.offset % BYTE_LEN) != 0) { ops[i]->status = RTE_CRYPTO_OP_STATUS_INVALID_ARGS; - SNOW3G_LOG_ERR("Data Length or offset"); + SNOW3G_LOG_ERR("Offset"); break; } @@ -299,10 +324,11 @@ process_snow3g_hash_op(struct rte_crypto_op **ops, /** Process a batch of crypto ops which shares the same session. */ static int process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, - struct snow3g_qp *qp, uint8_t num_ops) + struct snow3g_qp *qp, uint8_t num_ops, + uint16_t *accumulated_enqueued_ops) { unsigned i; - unsigned processed_ops; + unsigned enqueued_ops, processed_ops; switch (session->op) { case SNOW3G_OP_ONLY_CIPHER: @@ -342,7 +368,63 @@ process_ops(struct rte_crypto_op **ops, struct snow3g_session *session, } } - return processed_ops; + enqueued_ops = rte_ring_enqueue_burst(qp->processed_ops, + (void **)ops, processed_ops); + qp->qp_stats.enqueued_count += enqueued_ops; + *accumulated_enqueued_ops += enqueued_ops; + + return enqueued_ops; +} + +/** Process a crypto op with length/offset in bits. */ +static int +process_op_bit(struct rte_crypto_op *op, struct snow3g_session *session, + struct snow3g_qp *qp, uint16_t *accumulated_enqueued_ops) +{ + unsigned enqueued_op, processed_op; + + switch (session->op) { + case SNOW3G_OP_ONLY_CIPHER: + processed_op = process_snow3g_cipher_op_bit(op, + session); + break; + case SNOW3G_OP_ONLY_AUTH: + processed_op = process_snow3g_hash_op(&op, session, 1); + break; + case SNOW3G_OP_CIPHER_AUTH: + processed_op = process_snow3g_cipher_op_bit(op, session); + if (processed_op == 1) + process_snow3g_hash_op(&op, session, 1); + break; + case SNOW3G_OP_AUTH_CIPHER: + processed_op = process_snow3g_hash_op(&op, session, 1); + if (processed_op == 1) + process_snow3g_cipher_op_bit(op, session); + break; + default: + /* Operation not supported. */ + processed_op = 0; + } + + /* + * If there was no error/authentication failure, + * change status to successful. + */ + if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED) + op->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + + /* Free session if a session-less crypto op. */ + if (op->sym->sess_type == RTE_CRYPTO_SYM_OP_SESSIONLESS) { + rte_mempool_put(qp->sess_mp, op->sym->session); + op->sym->session = NULL; + } + + enqueued_op = rte_ring_enqueue_burst(qp->processed_ops, + (void **)&op, processed_op); + qp->qp_stats.enqueued_count += enqueued_op; + *accumulated_enqueued_ops += enqueued_op; + + return enqueued_op; } static uint16_t @@ -354,7 +436,7 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, struct snow3g_session *prev_sess = NULL, *curr_sess = NULL; struct snow3g_qp *qp = queue_pair; - unsigned i, n; + unsigned i; uint8_t burst_size = 0; uint16_t enqueued_ops = 0; uint8_t processed_ops; @@ -370,8 +452,32 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, curr_sess->op == SNOW3G_OP_NOT_SUPPORTED)) { curr_c_op->status = RTE_CRYPTO_OP_STATUS_INVALID_SESSION; - qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops; - return enqueued_ops; + break; + } + + /* If length/offset is at bit-level, process this buffer alone. */ + if (((curr_c_op->sym->cipher.data.length % BYTE_LEN) != 0) + || ((curr_c_op->sym->cipher.data.offset + % BYTE_LEN) != 0)) { + /* Process the ops of the previous session. */ + if (prev_sess != NULL) { + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; + } + + burst_size = 0; + prev_sess = NULL; + } + + processed_ops = process_op_bit(curr_c_op, curr_sess, + qp, &enqueued_ops); + if (processed_ops != 1) + break; + + continue; } /* Batch ops that share the same session. */ @@ -385,20 +491,14 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, * process them, and start a new batch. */ if (burst_size == SNOW3G_MAX_BURST) { - processed_ops = process_ops(c_ops, - prev_sess, qp, burst_size); - n = rte_ring_enqueue_burst(qp->processed_ops, - (void **)c_ops, - processed_ops); - qp->qp_stats.enqueued_count += n; - enqueued_ops += n; - if (n < burst_size) { - qp->qp_stats.enqueue_err_count += - nb_ops - enqueued_ops; - return enqueued_ops; + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; } - burst_size = 0; + burst_size = 0; prev_sess = NULL; } } else { @@ -406,41 +506,27 @@ snow3g_pmd_enqueue_burst(void *queue_pair, struct rte_crypto_op **ops, * Different session, process the ops * of the previous session. */ - processed_ops = process_ops(c_ops, - prev_sess, qp, burst_size); - n = rte_ring_enqueue_burst(qp->processed_ops, - (void **)c_ops, - processed_ops); - qp->qp_stats.enqueued_count += n; - enqueued_ops += n; - if (n < burst_size) { - qp->qp_stats.enqueue_err_count += - nb_ops - enqueued_ops; - return enqueued_ops; + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); + if (processed_ops < burst_size) { + burst_size = 0; + break; } - burst_size = 0; + burst_size = 0; prev_sess = curr_sess; + c_ops[burst_size++] = curr_c_op; } } if (burst_size != 0) { /* Process the crypto ops of the last session. */ - processed_ops = process_ops(c_ops, - prev_sess, qp, burst_size); - n = rte_ring_enqueue_burst(qp->processed_ops, - (void **)c_ops, - processed_ops); - qp->qp_stats.enqueued_count += n; - enqueued_ops += n; - if (n < burst_size) { - qp->qp_stats.enqueue_err_count += - nb_ops - enqueued_ops; - return enqueued_ops; - } + processed_ops = process_ops(c_ops, prev_sess, + qp, burst_size, &enqueued_ops); } + qp->qp_stats.enqueue_err_count += nb_ops - enqueued_ops; return enqueued_ops; } @@ -468,6 +554,16 @@ cryptodev_snow3g_create(const char *name, struct rte_cryptodev *dev; char crypto_dev_name[RTE_CRYPTODEV_NAME_MAX_LEN]; struct snow3g_private *internals; + uint64_t cpu_flags = 0; + + /* Check CPU for supported vector instruction set */ + if (rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) + cpu_flags |= RTE_CRYPTODEV_FF_CPU_SSE; + else { + SNOW3G_LOG_ERR("Vector instructions are not supported by CPU"); + return -EFAULT; + } + /* Create a unique device name. */ if (create_unique_device_name(crypto_dev_name, @@ -491,7 +587,8 @@ cryptodev_snow3g_create(const char *name, dev->enqueue_burst = snow3g_pmd_enqueue_burst; dev->feature_flags = RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO | - RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING; + RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING | + cpu_flags; internals = dev->data->dev_private; diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 3386a673..bc932309 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -45,10 +45,13 @@ DIRS-$(CONFIG_RTE_LIBRTE_MLX4_PMD) += mlx4 DIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5 DIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe DIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp +DIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt DIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += null DIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += pcap +DIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede DIRS-$(CONFIG_RTE_LIBRTE_PMD_RING) += ring DIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += szedata2 +DIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += thunderx DIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio DIRS-$(CONFIG_RTE_LIBRTE_VMXNET3_PMD) += vmxnet3 DIRS-$(CONFIG_RTE_LIBRTE_PMD_XENVIRT) += xenvirt diff --git a/drivers/net/af_packet/Makefile b/drivers/net/af_packet/Makefile index cb1a7aea..e14d6d0c 100644 --- a/drivers/net/af_packet/Makefile +++ b/drivers/net/af_packet/Makefile @@ -51,7 +51,9 @@ CFLAGS += $(WERROR_FLAGS) SRCS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += rte_eth_af_packet.c # this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_mempool DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_AF_PACKET) += lib/librte_kvargs diff --git a/drivers/net/af_packet/rte_eth_af_packet.c b/drivers/net/af_packet/rte_eth_af_packet.c index f17bd7e7..2d7f3448 100644 --- a/drivers/net/af_packet/rte_eth_af_packet.c +++ b/drivers/net/af_packet/rte_eth_af_packet.c @@ -78,6 +78,7 @@ struct pkt_rx_queue { volatile unsigned long rx_pkts; volatile unsigned long err_pkts; + volatile unsigned long rx_bytes; }; struct pkt_tx_queue { @@ -90,6 +91,7 @@ struct pkt_tx_queue { volatile unsigned long tx_pkts; volatile unsigned long err_pkts; + volatile unsigned long tx_bytes; }; struct pmd_internals { @@ -131,6 +133,7 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) uint8_t *pbuf; struct pkt_rx_queue *pkt_q = queue; uint16_t num_rx = 0; + unsigned long num_rx_bytes = 0; unsigned int framecount, framenum; if (unlikely(nb_pkts == 0)) @@ -167,9 +170,11 @@ eth_af_packet_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) /* account for the receive frame */ bufs[i] = mbuf; num_rx++; + num_rx_bytes += mbuf->pkt_len; } pkt_q->framenum = framenum; pkt_q->rx_pkts += num_rx; + pkt_q->rx_bytes += num_rx_bytes; return num_rx; } @@ -186,6 +191,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) struct pollfd pfd; struct pkt_tx_queue *pkt_q = queue; uint16_t num_tx = 0; + unsigned long num_tx_bytes = 0; int i; if (unlikely(nb_pkts == 0)) @@ -219,6 +225,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) ppd = (struct tpacket2_hdr *) pkt_q->rd[framenum].iov_base; num_tx++; + num_tx_bytes += mbuf->pkt_len; rte_pktmbuf_free(mbuf); } @@ -229,6 +236,7 @@ eth_af_packet_tx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts) pkt_q->framenum = framenum; pkt_q->tx_pkts += num_tx; pkt_q->err_pkts += nb_pkts - num_tx; + pkt_q->tx_bytes += num_tx_bytes; return num_tx; } @@ -287,13 +295,16 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats) { unsigned i, imax; unsigned long rx_total = 0, tx_total = 0, tx_err_total = 0; + unsigned long rx_bytes_total = 0, tx_bytes_total = 0; const struct pmd_internals *internal = dev->data->dev_private; imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ? internal->nb_queues : RTE_ETHDEV_QUEUE_STAT_CNTRS); for (i = 0; i < imax; i++) { igb_stats->q_ipackets[i] = internal->rx_queue[i].rx_pkts; + igb_stats->q_ibytes[i] = internal->rx_queue[i].rx_bytes; rx_total += igb_stats->q_ipackets[i]; + rx_bytes_total += igb_stats->q_ibytes[i]; } imax = (internal->nb_queues < RTE_ETHDEV_QUEUE_STAT_CNTRS ? @@ -301,13 +312,17 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *igb_stats) for (i = 0; i < imax; i++) { igb_stats->q_opackets[i] = internal->tx_queue[i].tx_pkts; igb_stats->q_errors[i] = internal->tx_queue[i].err_pkts; + igb_stats->q_obytes[i] = internal->tx_queue[i].tx_bytes; tx_total += igb_stats->q_opackets[i]; tx_err_total += igb_stats->q_errors[i]; + tx_bytes_total += igb_stats->q_obytes[i]; } igb_stats->ipackets = rx_total; + igb_stats->ibytes = rx_bytes_total; igb_stats->opackets = tx_total; igb_stats->oerrors = tx_err_total; + igb_stats->obytes = tx_bytes_total; } static void @@ -316,12 +331,15 @@ eth_stats_reset(struct rte_eth_dev *dev) unsigned i; struct pmd_internals *internal = dev->data->dev_private; - for (i = 0; i < internal->nb_queues; i++) + for (i = 0; i < internal->nb_queues; i++) { internal->rx_queue[i].rx_pkts = 0; + internal->rx_queue[i].rx_bytes = 0; + } for (i = 0; i < internal->nb_queues; i++) { internal->tx_queue[i].tx_pkts = 0; internal->tx_queue[i].err_pkts = 0; + internal->tx_queue[i].tx_bytes = 0; } } diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile index 6f1f385d..c2ddd8d7 100644 --- a/drivers/net/bnx2x/Makefile +++ b/drivers/net/bnx2x/Makefile @@ -14,6 +14,10 @@ EXPORT_MAP := rte_pmd_bnx2x_version.map LIBABIVER := 1 +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS += -wd188 #188: enumerated type mixed with another type +endif + # # all source are stored in SRCS-y # diff --git a/drivers/net/bnx2x/bnx2x.c b/drivers/net/bnx2x/bnx2x.c index 6edb2f95..3095d2bb 100644 --- a/drivers/net/bnx2x/bnx2x.c +++ b/drivers/net/bnx2x/bnx2x.c @@ -32,18 +32,20 @@ #define BNX2X_PMD_VER_PREFIX "BNX2X PMD" #define BNX2X_PMD_VERSION_MAJOR 1 #define BNX2X_PMD_VERSION_MINOR 0 -#define BNX2X_PMD_VERSION_PATCH 0 +#define BNX2X_PMD_VERSION_REVISION 1 +#define BNX2X_PMD_VERSION_PATCH 1 static inline const char * bnx2x_pmd_version(void) { static char version[32]; - snprintf(version, sizeof(version), "%s %s_%d.%d.%d", + snprintf(version, sizeof(version), "%s %s_%d.%d.%d.%d", BNX2X_PMD_VER_PREFIX, BNX2X_DRIVER_VERSION, BNX2X_PMD_VERSION_MAJOR, BNX2X_PMD_VERSION_MINOR, + BNX2X_PMD_VERSION_REVISION, BNX2X_PMD_VERSION_PATCH); return version; @@ -1293,7 +1295,7 @@ bnx2x_free_tx_pkt(__rte_unused struct bnx2x_fastpath *fp, struct bnx2x_tx_queue struct rte_mbuf *tx_mbuf = txq->sw_ring[TX_BD(pkt_idx, txq)]; if (likely(tx_mbuf != NULL)) { - rte_pktmbuf_free(tx_mbuf); + rte_pktmbuf_free_seg(tx_mbuf); } else { PMD_RX_LOG(ERR, "fp[%02d] lost mbuf %lu", fp->index, (unsigned long)TX_BD(pkt_idx, txq)); @@ -2113,147 +2115,127 @@ bnx2x_nic_unload(struct bnx2x_softc *sc, uint32_t unload_mode, uint8_t keep_link * the mbuf and return to the caller. * * Returns: - * 0 = Success, !0 = Failure + * int: Number of TX BDs used for the mbuf + * * Note the side effect that an mbuf may be freed if it causes a problem. */ -int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts) +int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0) { - struct rte_mbuf *m0; struct eth_tx_start_bd *tx_start_bd; uint16_t bd_prod, pkt_prod; - int m_tx; struct bnx2x_softc *sc; uint32_t nbds = 0; - struct bnx2x_fastpath *fp; sc = txq->sc; - fp = &sc->fp[txq->queue_id]; - bd_prod = txq->tx_bd_tail; pkt_prod = txq->tx_pkt_tail; - for (m_tx = 0; m_tx < m_pkts; m_tx++) { - - m0 = *m_head++; - - if (unlikely(txq->nb_tx_avail < 3)) { - PMD_TX_LOG(ERR, "no enough bds %d/%d", - bd_prod, txq->nb_tx_avail); - return -ENOMEM; - } + txq->sw_ring[TX_BD(pkt_prod, txq)] = m0; - txq->sw_ring[TX_BD(pkt_prod, txq)] = m0; + tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd; - tx_start_bd = &txq->tx_ring[TX_BD(bd_prod, txq)].start_bd; + tx_start_bd->addr = + rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0)); + tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len); + tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; + tx_start_bd->general_data = + (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); - tx_start_bd->addr = - rte_cpu_to_le_64(rte_mbuf_data_dma_addr(m0)); - tx_start_bd->nbytes = rte_cpu_to_le_16(m0->data_len); - tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD; - tx_start_bd->general_data = - (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT); + tx_start_bd->nbd = rte_cpu_to_le_16(2); - tx_start_bd->nbd = rte_cpu_to_le_16(2); + if (m0->ol_flags & PKT_TX_VLAN_PKT) { + tx_start_bd->vlan_or_ethertype = + rte_cpu_to_le_16(m0->vlan_tci); + tx_start_bd->bd_flags.as_bitfield |= + (X_ETH_OUTBAND_VLAN << + ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); + } else { + if (IS_PF(sc)) + tx_start_bd->vlan_or_ethertype = + rte_cpu_to_le_16(pkt_prod); + else { + struct ether_hdr *eh = + rte_pktmbuf_mtod(m0, struct ether_hdr *); - if (m0->ol_flags & PKT_TX_VLAN_PKT) { tx_start_bd->vlan_or_ethertype = - rte_cpu_to_le_16(m0->vlan_tci); - tx_start_bd->bd_flags.as_bitfield |= - (X_ETH_OUTBAND_VLAN << - ETH_TX_BD_FLAGS_VLAN_MODE_SHIFT); - } else { - if (IS_PF(sc)) - tx_start_bd->vlan_or_ethertype = - rte_cpu_to_le_16(pkt_prod); - else { - struct ether_hdr *eh - = rte_pktmbuf_mtod(m0, struct ether_hdr *); - - tx_start_bd->vlan_or_ethertype - = rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type)); - } + rte_cpu_to_le_16(rte_be_to_cpu_16(eh->ether_type)); } + } - bd_prod = NEXT_TX_BD(bd_prod); - if (IS_VF(sc)) { - struct eth_tx_parse_bd_e2 *tx_parse_bd; - const struct ether_hdr *eh = rte_pktmbuf_mtod(m0, struct ether_hdr *); - uint8_t mac_type = UNICAST_ADDRESS; - - tx_parse_bd = - &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2; - if (is_multicast_ether_addr(&eh->d_addr)) { - if (is_broadcast_ether_addr(&eh->d_addr)) - mac_type = BROADCAST_ADDRESS; - else - mac_type = MULTICAST_ADDRESS; - } - tx_parse_bd->parsing_data = - (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); - - rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi, - &eh->d_addr.addr_bytes[0], 2); - rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid, - &eh->d_addr.addr_bytes[2], 2); - rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo, - &eh->d_addr.addr_bytes[4], 2); - rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi, - &eh->s_addr.addr_bytes[0], 2); - rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid, - &eh->s_addr.addr_bytes[2], 2); - rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo, - &eh->s_addr.addr_bytes[4], 2); - - tx_parse_bd->data.mac_addr.dst_hi = - rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi); - tx_parse_bd->data.mac_addr.dst_mid = - rte_cpu_to_be_16(tx_parse_bd->data. - mac_addr.dst_mid); - tx_parse_bd->data.mac_addr.dst_lo = - rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo); - tx_parse_bd->data.mac_addr.src_hi = - rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi); - tx_parse_bd->data.mac_addr.src_mid = - rte_cpu_to_be_16(tx_parse_bd->data. - mac_addr.src_mid); - tx_parse_bd->data.mac_addr.src_lo = - rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo); - - PMD_TX_LOG(DEBUG, - "PBD dst %x %x %x src %x %x %x p_data %x", - tx_parse_bd->data.mac_addr.dst_hi, - tx_parse_bd->data.mac_addr.dst_mid, - tx_parse_bd->data.mac_addr.dst_lo, - tx_parse_bd->data.mac_addr.src_hi, - tx_parse_bd->data.mac_addr.src_mid, - tx_parse_bd->data.mac_addr.src_lo, - tx_parse_bd->parsing_data); - } + bd_prod = NEXT_TX_BD(bd_prod); + if (IS_VF(sc)) { + struct eth_tx_parse_bd_e2 *tx_parse_bd; + const struct ether_hdr *eh = + rte_pktmbuf_mtod(m0, struct ether_hdr *); + uint8_t mac_type = UNICAST_ADDRESS; + + tx_parse_bd = + &txq->tx_ring[TX_BD(bd_prod, txq)].parse_bd_e2; + if (is_multicast_ether_addr(&eh->d_addr)) { + if (is_broadcast_ether_addr(&eh->d_addr)) + mac_type = BROADCAST_ADDRESS; + else + mac_type = MULTICAST_ADDRESS; + } + tx_parse_bd->parsing_data = + (mac_type << ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE_SHIFT); + + rte_memcpy(&tx_parse_bd->data.mac_addr.dst_hi, + &eh->d_addr.addr_bytes[0], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.dst_mid, + &eh->d_addr.addr_bytes[2], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.dst_lo, + &eh->d_addr.addr_bytes[4], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.src_hi, + &eh->s_addr.addr_bytes[0], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.src_mid, + &eh->s_addr.addr_bytes[2], 2); + rte_memcpy(&tx_parse_bd->data.mac_addr.src_lo, + &eh->s_addr.addr_bytes[4], 2); + + tx_parse_bd->data.mac_addr.dst_hi = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_hi); + tx_parse_bd->data.mac_addr.dst_mid = + rte_cpu_to_be_16(tx_parse_bd->data. + mac_addr.dst_mid); + tx_parse_bd->data.mac_addr.dst_lo = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.dst_lo); + tx_parse_bd->data.mac_addr.src_hi = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_hi); + tx_parse_bd->data.mac_addr.src_mid = + rte_cpu_to_be_16(tx_parse_bd->data. + mac_addr.src_mid); + tx_parse_bd->data.mac_addr.src_lo = + rte_cpu_to_be_16(tx_parse_bd->data.mac_addr.src_lo); PMD_TX_LOG(DEBUG, - "start bd: nbytes %d flags %x vlan %x\n", - tx_start_bd->nbytes, - tx_start_bd->bd_flags.as_bitfield, - tx_start_bd->vlan_or_ethertype); + "PBD dst %x %x %x src %x %x %x p_data %x", + tx_parse_bd->data.mac_addr.dst_hi, + tx_parse_bd->data.mac_addr.dst_mid, + tx_parse_bd->data.mac_addr.dst_lo, + tx_parse_bd->data.mac_addr.src_hi, + tx_parse_bd->data.mac_addr.src_mid, + tx_parse_bd->data.mac_addr.src_lo, + tx_parse_bd->parsing_data); + } - bd_prod = NEXT_TX_BD(bd_prod); - pkt_prod++; + PMD_TX_LOG(DEBUG, + "start bd: nbytes %d flags %x vlan %x\n", + tx_start_bd->nbytes, + tx_start_bd->bd_flags.as_bitfield, + tx_start_bd->vlan_or_ethertype); - if (TX_IDX(bd_prod) < 2) { - nbds++; - } - } + bd_prod = NEXT_TX_BD(bd_prod); + pkt_prod++; + + if (TX_IDX(bd_prod) < 2) + nbds++; - txq->nb_tx_avail -= m_pkts << 1; + txq->nb_tx_avail -= 2; txq->tx_bd_tail = bd_prod; txq->tx_pkt_tail = pkt_prod; - mb(); - fp->tx_db.data.prod += (m_pkts << 1) + nbds; - DOORBELL(sc, txq->queue_id, fp->tx_db.raw); - mb(); - - return 0; + return nbds + 2; } static uint16_t bnx2x_cid_ilt_lines(struct bnx2x_softc *sc) @@ -9570,8 +9552,10 @@ static int bnx2x_pci_get_caps(struct bnx2x_softc *sc) static void bnx2x_init_rte(struct bnx2x_softc *sc) { if (IS_VF(sc)) { - sc->max_tx_queues = BNX2X_VF_MAX_QUEUES_PER_VF; - sc->max_rx_queues = BNX2X_VF_MAX_QUEUES_PER_VF; + sc->max_tx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF, + sc->igu_sb_cnt); + sc->max_rx_queues = min(BNX2X_VF_MAX_QUEUES_PER_VF, + sc->igu_sb_cnt); } else { sc->max_tx_queues = 128; sc->max_rx_queues = 128; @@ -9713,7 +9697,7 @@ int bnx2x_attach(struct bnx2x_softc *sc) pci_read(sc, (sc->devinfo.pcie_msix_cap_reg + PCIR_MSIX_CTRL), &val, 2); - sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE); + sc->igu_sb_cnt = (val & PCIM_MSIXCTRL_TABLE_SIZE) + 1; } else { sc->igu_sb_cnt = 1; } diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h index 135a6eb1..c24a5308 100644 --- a/drivers/net/bnx2x/bnx2x.h +++ b/drivers/net/bnx2x/bnx2x.h @@ -167,6 +167,8 @@ struct bnx2x_device_type { #define TX_PAGE(x) (((x) & ~USABLE_TX_BD_PER_PAGE) >> 8) #define TX_IDX(x) ((x) & USABLE_TX_BD_PER_PAGE) +#define BDS_PER_TX_PKT (3) + /* * Trigger pending transmits when the number of available BDs is greater * than 1/8 of the total number of usable BDs. @@ -1864,7 +1866,7 @@ int bnx2x_alloc_hsi_mem(struct bnx2x_softc *sc); int bnx2x_alloc_ilt_mem(struct bnx2x_softc *sc); void bnx2x_free_ilt_mem(struct bnx2x_softc *sc); void bnx2x_dump_tx_chain(struct bnx2x_fastpath * fp, int bd_prod, int count); -int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf **m_head, int m_pkts); +int bnx2x_tx_encap(struct bnx2x_tx_queue *txq, struct rte_mbuf *m0); uint8_t bnx2x_txeof(struct bnx2x_softc *sc, struct bnx2x_fastpath *fp); void bnx2x_print_adapter_info(struct bnx2x_softc *sc); int bnx2x_intr_legacy(struct bnx2x_softc *sc, int scan_fp); diff --git a/drivers/net/bnx2x/bnx2x_ethdev.c b/drivers/net/bnx2x/bnx2x_ethdev.c index 071b44fe..3ff57c42 100644 --- a/drivers/net/bnx2x/bnx2x_ethdev.c +++ b/drivers/net/bnx2x/bnx2x_ethdev.c @@ -276,6 +276,9 @@ static void bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) { struct bnx2x_softc *sc = dev->data->dev_private; + uint32_t brb_truncate_discard; + uint64_t brb_drops; + uint64_t brb_truncates; PMD_INIT_FUNC_TRACE(); @@ -316,6 +319,19 @@ bnx2x_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->rx_nombuf = HILO_U64(sc->eth_stats.no_buff_discard_hi, sc->eth_stats.no_buff_discard_lo); + + brb_drops = + HILO_U64(sc->eth_stats.brb_drop_hi, + sc->eth_stats.brb_drop_lo); + + brb_truncates = + HILO_U64(sc->eth_stats.brb_truncate_hi, + sc->eth_stats.brb_truncate_lo); + + brb_truncate_discard = sc->eth_stats.brb_truncate_discard; + + stats->imissed = brb_drops + brb_truncates + + brb_truncate_discard + stats->rx_nombuf; } static void diff --git a/drivers/net/bnx2x/bnx2x_rxtx.c b/drivers/net/bnx2x/bnx2x_rxtx.c index 752a5e81..0ec4f899 100644 --- a/drivers/net/bnx2x/bnx2x_rxtx.c +++ b/drivers/net/bnx2x/bnx2x_rxtx.c @@ -11,17 +11,6 @@ #include "bnx2x.h" #include "bnx2x_rxtx.h" -static inline struct rte_mbuf * -bnx2x_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check(m, 0); - - return m; -} - static const struct rte_memzone * ring_dma_zone_reserve(struct rte_eth_dev *dev, const char *ring_name, uint16_t queue_id, uint32_t ring_size, int socket_id) @@ -148,7 +137,7 @@ bnx2x_dev_rx_queue_setup(struct rte_eth_dev *dev, /* Initialize software ring entries */ rxq->rx_mbuf_alloc = 0; for (idx = 0; idx < rxq->nb_rx_desc; idx = NEXT_RX_BD(idx)) { - mbuf = bnx2x_rxmbuf_alloc(mp); + mbuf = rte_mbuf_raw_alloc(mp); if (NULL == mbuf) { PMD_RX_LOG(ERR, "RX mbuf alloc failed queue_id=%u, idx=%d", (unsigned)rxq->queue_id, idx); @@ -222,40 +211,40 @@ bnx2x_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) struct bnx2x_tx_queue *txq; struct bnx2x_softc *sc; struct bnx2x_fastpath *fp; - uint32_t burst, nb_tx; - struct rte_mbuf **m = tx_pkts; - int ret; + uint16_t nb_tx_pkts; + uint16_t nb_pkt_sent = 0; + uint32_t ret; txq = p_txq; sc = txq->sc; fp = &sc->fp[txq->queue_id]; - nb_tx = nb_pkts; - - do { - burst = RTE_MIN(nb_pkts, RTE_PMD_BNX2X_TX_MAX_BURST); - - ret = bnx2x_tx_encap(txq, m, burst); - if (unlikely(ret)) { - PMD_TX_LOG(ERR, "tx_encap failed!"); - } - - bnx2x_update_fp_sb_idx(fp); + if ((unlikely((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh))) + bnx2x_txeof(sc, fp); - if ((txq->nb_tx_desc - txq->nb_tx_avail) > txq->tx_free_thresh) { - bnx2x_txeof(sc, fp); - } + nb_tx_pkts = RTE_MIN(nb_pkts, txq->nb_tx_avail / BDS_PER_TX_PKT); + if (unlikely(nb_tx_pkts == 0)) + return 0; - if (unlikely(ret == -ENOMEM)) { - break; - } + while (nb_tx_pkts--) { + struct rte_mbuf *m = *tx_pkts++; + assert(m != NULL); + ret = bnx2x_tx_encap(txq, m); + fp->tx_db.data.prod += ret; + nb_pkt_sent++; + } - m += burst; - nb_pkts -= burst; + bnx2x_update_fp_sb_idx(fp); + mb(); + DOORBELL(sc, txq->queue_id, fp->tx_db.raw); + mb(); - } while (nb_pkts); + if ((txq->nb_tx_desc - txq->nb_tx_avail) > + txq->tx_free_thresh) + bnx2x_txeof(sc, fp); - return nb_tx - nb_pkts; + return nb_pkt_sent; } int @@ -405,9 +394,11 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) len = cqe_fp->pkt_len_or_gro_seg_len; pad = cqe_fp->placement_offset; - new_mb = bnx2x_rxmbuf_alloc(rxq->mb_pool); + new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); if (unlikely(!new_mb)) { PMD_RX_LOG(ERR, "mbuf alloc fail fp[%02d]", fp->index); + rte_eth_devices[rxq->port_id].data-> + rx_mbuf_alloc_failed++; goto next_rx; } @@ -427,7 +418,6 @@ bnx2x_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_mb->next = NULL; rx_mb->pkt_len = rx_mb->data_len = len; rx_mb->port = rxq->port_id; - rx_mb->buf_len = len + pad; rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); /* diff --git a/drivers/net/bnx2x/elink.c b/drivers/net/bnx2x/elink.c index b9149b89..149cc975 100644 --- a/drivers/net/bnx2x/elink.c +++ b/drivers/net/bnx2x/elink.c @@ -5029,7 +5029,7 @@ static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy, if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) { PMD_DRV_LOG(DEBUG, "1G parallel detect link on port %d", params->port); - return 1; + return ELINK_STATUS_ERROR; } CL22_RD_OVER_CL45(sc, phy, @@ -5039,7 +5039,7 @@ static elink_status_t elink_direct_parallel_detect_used(struct elink_phy *phy, if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) { PMD_DRV_LOG(DEBUG, "10G parallel detect link on port %d", params->port); - return 1; + return ELINK_STATUS_ERROR; } return ELINK_STATUS_OK; } @@ -6735,7 +6735,7 @@ static elink_status_t elink_8073_is_snr_needed(struct bnx2x_softc *sc, if (val != 0x102) return ELINK_STATUS_OK; - return 1; + return ELINK_STATUS_ERROR; } static elink_status_t elink_8073_xaui_wa(struct bnx2x_softc *sc, @@ -7557,7 +7557,7 @@ static elink_status_t elink_read_sfp_module_eeprom(struct elink_phy *phy, uint16_t byte_cnt, uint8_t * o_buf) { - elink_status_t rc = 0; + elink_status_t rc = ELINK_STATUS_OK; uint8_t xfer_size; uint8_t *user_data = o_buf; read_sfp_module_eeprom_func_p read_func; diff --git a/drivers/net/bnxt/Makefile b/drivers/net/bnxt/Makefile new file mode 100644 index 00000000..d9c5a4cb --- /dev/null +++ b/drivers/net/bnxt/Makefile @@ -0,0 +1,74 @@ +# BSD LICENSE +# +# Copyright(c) 2010-2014 Intel Corporation. All rights reserved. +# Copyright(c) 2014 6WIND S.A. +# Copyright(c) Broadcom Limited. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Intel Corporation nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_bnxt.a + +LIBABIVER := 1 + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +EXPORT_MAP := rte_pmd_bnxt_version.map + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_cpr.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_filter.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_hwrm.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_ring.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxq.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_rxr.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_stats.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txq.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_txr.c +SRCS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += bnxt_vnic.c + +# +# Export include files +# +SYMLINK-y-include += + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_BNXT_PMD) += lib/librte_eal + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/bnxt/bnxt.h b/drivers/net/bnxt/bnxt.h new file mode 100644 index 00000000..df1f7718 --- /dev/null +++ b/drivers/net/bnxt/bnxt.h @@ -0,0 +1,176 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_H_ +#define _BNXT_H_ + +#include <inttypes.h> +#include <sys/queue.h> + +#include <rte_ethdev.h> +#include <rte_memory.h> +#include <rte_lcore.h> +#include <rte_spinlock.h> + +#include "bnxt_cpr.h" + +#define BNXT_MAX_MTU 9000 +#define VLAN_TAG_SIZE 4 + +enum bnxt_hw_context { + HW_CONTEXT_NONE = 0, + HW_CONTEXT_IS_RSS = 1, + HW_CONTEXT_IS_COS = 2, + HW_CONTEXT_IS_LB = 3, +}; + +struct bnxt_vf_info { + uint16_t fw_fid; + uint8_t mac_addr[ETHER_ADDR_LEN]; + uint16_t max_rsscos_ctx; + uint16_t max_cp_rings; + uint16_t max_tx_rings; + uint16_t max_rx_rings; + uint16_t max_l2_ctx; + uint16_t max_vnics; + struct bnxt_pf_info *pf; +}; + +struct bnxt_pf_info { +#define BNXT_FIRST_PF_FID 1 +#define BNXT_MAX_VFS(bp) (bp->pf.max_vfs) +#define BNXT_FIRST_VF_FID 128 +#define BNXT_PF_RINGS_USED(bp) bnxt_get_num_queues(bp) +#define BNXT_PF_RINGS_AVAIL(bp) (bp->pf.max_cp_rings - BNXT_PF_RINGS_USED(bp)) + uint32_t fw_fid; + uint8_t port_id; + uint8_t mac_addr[ETHER_ADDR_LEN]; + uint16_t max_rsscos_ctx; + uint16_t max_cp_rings; + uint16_t max_tx_rings; + uint16_t max_rx_rings; + uint16_t max_l2_ctx; + uint16_t max_vnics; + uint16_t first_vf_id; + uint16_t active_vfs; + uint16_t max_vfs; + void *vf_req_buf; + phys_addr_t vf_req_buf_dma_addr; + uint32_t vf_req_fwd[8]; + struct bnxt_vf_info *vf; +}; + +/* Max wait time is 10 * 100ms = 1s */ +#define BNXT_LINK_WAIT_CNT 10 +#define BNXT_LINK_WAIT_INTERVAL 100 +struct bnxt_link_info { + uint8_t phy_flags; + uint8_t mac_type; + uint8_t phy_link_status; + uint8_t loop_back; + uint8_t link_up; + uint8_t duplex; + uint8_t pause; + uint8_t force_pause; + uint8_t auto_pause; + uint8_t auto_mode; +#define PHY_VER_LEN 3 + uint8_t phy_ver[PHY_VER_LEN]; + uint16_t link_speed; + uint16_t support_speeds; + uint16_t auto_link_speed; + uint16_t auto_link_speed_mask; + uint32_t preemphasis; +}; + +#define BNXT_COS_QUEUE_COUNT 8 +struct bnxt_cos_queue_info { + uint8_t id; + uint8_t profile; +}; + +struct bnxt { + void *bar0; + + struct rte_eth_dev *eth_dev; + struct rte_pci_device *pdev; + + uint32_t flags; +#define BNXT_FLAG_REGISTERED (1 << 0) +#define BNXT_FLAG_VF (1 << 1) +#define BNXT_PF(bp) (!((bp)->flags & BNXT_FLAG_VF)) +#define BNXT_VF(bp) ((bp)->flags & BNXT_FLAG_VF) + + unsigned int rx_nr_rings; + unsigned int rx_cp_nr_rings; + struct bnxt_rx_queue **rx_queues; + + unsigned int tx_nr_rings; + unsigned int tx_cp_nr_rings; + struct bnxt_tx_queue **tx_queues; + + /* Default completion ring */ + struct bnxt_cp_ring_info *def_cp_ring; + uint32_t max_ring_grps; + struct bnxt_ring_grp_info *grp_info; + + unsigned int nr_vnics; + + struct bnxt_vnic_info *vnic_info; + STAILQ_HEAD(, bnxt_vnic_info) free_vnic_list; + + struct bnxt_filter_info *filter_info; + STAILQ_HEAD(, bnxt_filter_info) free_filter_list; + + /* VNIC pointer for flow filter (VMDq) pools */ +#define MAX_FF_POOLS ETH_64_POOLS + STAILQ_HEAD(, bnxt_vnic_info) ff_pool[MAX_FF_POOLS]; + +#define MAX_NUM_MAC_ADDR 32 + uint8_t mac_addr[ETHER_ADDR_LEN]; + + uint16_t hwrm_cmd_seq; + void *hwrm_cmd_resp_addr; + phys_addr_t hwrm_cmd_resp_dma_addr; + rte_spinlock_t hwrm_lock; + uint16_t max_req_len; + uint16_t max_resp_len; + + struct bnxt_link_info link_info; + struct bnxt_cos_queue_info cos_queue[BNXT_COS_QUEUE_COUNT]; + + struct bnxt_pf_info pf; + struct bnxt_vf_info vf; +}; + +#endif diff --git a/drivers/net/bnxt/bnxt_cpr.c b/drivers/net/bnxt/bnxt_cpr.c new file mode 100644 index 00000000..60c277a1 --- /dev/null +++ b/drivers/net/bnxt/bnxt_cpr.c @@ -0,0 +1,159 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_malloc.h> + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_hwrm.h" +#include "bnxt_ring.h" +#include "hsi_struct_def_dpdk.h" + +/* + * Async event handling + */ +void bnxt_handle_async_event(struct bnxt *bp __rte_unused, + struct cmpl_base *cmp) +{ + struct hwrm_async_event_cmpl *async_cmp = + (struct hwrm_async_event_cmpl *)cmp; + + /* TODO: HWRM async events are not defined yet */ + /* Needs to handle: link events, error events, etc. */ + switch (async_cmp->event_id) { + case 0: + /* Assume LINK_CHANGE == 0 */ + RTE_LOG(INFO, PMD, "Link change event\n"); + + /* Can just prompt the update_op routine to do a qcfg + * instead of doing the actual qcfg + */ + break; + case 1: + break; + default: + RTE_LOG(ERR, PMD, "handle_async_event id = 0x%x\n", + async_cmp->event_id); + break; + } +} + +void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmpl) +{ + struct hwrm_fwd_req_cmpl *fwd_cmpl = (struct hwrm_fwd_req_cmpl *)cmpl; + struct input *fwd_cmd; + uint16_t logical_vf_id, error_code; + + /* Qualify the fwd request */ + if (fwd_cmpl->source_id < bp->pf.first_vf_id) { + RTE_LOG(ERR, PMD, + "FWD req's source_id 0x%x > first_vf_id 0x%x\n", + fwd_cmpl->source_id, bp->pf.first_vf_id); + error_code = HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED; + goto reject; + } else if (fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT > + 128 - sizeof(struct input)) { + RTE_LOG(ERR, PMD, + "FWD req's cmd len 0x%x > 108 bytes allowed\n", + fwd_cmpl->req_len_type >> HWRM_FWD_REQ_CMPL_REQ_LEN_SFT); + error_code = HWRM_ERR_CODE_INVALID_PARAMS; + goto reject; + } + + /* Locate VF's forwarded command */ + logical_vf_id = fwd_cmpl->source_id - bp->pf.first_vf_id; + fwd_cmd = (struct input *)((uint8_t *)bp->pf.vf_req_buf + + (logical_vf_id * 128)); + + /* Provision the request */ + switch (fwd_cmd->req_type) { + case HWRM_CFA_L2_FILTER_ALLOC: + case HWRM_CFA_L2_FILTER_FREE: + case HWRM_CFA_L2_FILTER_CFG: + case HWRM_CFA_L2_SET_RX_MASK: + break; + default: + error_code = HWRM_ERR_CODE_INVALID_PARAMS; + goto reject; + } + + /* Forward */ + fwd_cmd->target_id = fwd_cmpl->source_id; + bnxt_hwrm_exec_fwd_resp(bp, fwd_cmd); + return; + +reject: + /* TODO: Encap the reject error resp into the hwrm_err_iput? */ + /* Use the error_code for the reject cmd */ + RTE_LOG(ERR, PMD, + "Error 0x%x found in the forward request\n", error_code); +} + +/* For the default completion ring only */ +void bnxt_free_def_cp_ring(struct bnxt *bp) +{ + struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; + + bnxt_free_ring(cpr->cp_ring_struct); + rte_free(cpr->cp_ring_struct); + rte_free(cpr); +} + +/* For the default completion ring only */ +int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id) +{ + struct bnxt_cp_ring_info *cpr; + struct bnxt_ring *ring; + + cpr = rte_zmalloc_socket("cpr", + sizeof(struct bnxt_cp_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (cpr == NULL) + return -ENOMEM; + bp->def_cp_ring = cpr; + + ring = rte_zmalloc_socket("bnxt_cp_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + cpr->cp_ring_struct = ring; + ring->bd = (void *)cpr->cp_desc_ring; + ring->bd_dma = cpr->cp_desc_mapping; + ring->ring_size = rte_align32pow2(DEFAULT_CP_RING_SIZE); + ring->ring_mask = ring->ring_size - 1; + ring->vmem_size = 0; + ring->vmem = NULL; + + return 0; +} diff --git a/drivers/net/bnxt/bnxt_cpr.h b/drivers/net/bnxt/bnxt_cpr.h new file mode 100644 index 00000000..c176f8c5 --- /dev/null +++ b/drivers/net/bnxt/bnxt_cpr.h @@ -0,0 +1,86 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_CPR_H_ +#define _BNXT_CPR_H_ + +#define CMP_VALID(cmp, raw_cons, ring) \ + (!!(((struct cmpl_base *)(cmp))->info3_v & CMPL_BASE_V) == \ + !((raw_cons) & ((ring)->ring_size))) + +#define CMP_TYPE(cmp) \ + (((struct cmpl_base *)cmp)->type & CMPL_BASE_TYPE_MASK) + +#define ADV_RAW_CMP(idx, n) ((idx) + (n)) +#define NEXT_RAW_CMP(idx) ADV_RAW_CMP(idx, 1) +#define RING_CMP(ring, idx) ((idx) & (ring)->ring_mask) +#define NEXT_CMP(idx) RING_CMP(ADV_RAW_CMP(idx, 1)) + +#define DB_CP_REARM_FLAGS (DB_KEY_CP | DB_IDX_VALID) +#define DB_CP_FLAGS (DB_KEY_CP | DB_IDX_VALID | DB_IRQ_DIS) + +#define B_CP_DB_REARM(cpr, raw_cons) \ + (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_REARM_FLAGS | \ + RING_CMP(cpr->cp_ring_struct, raw_cons))) + +#define B_CP_DIS_DB(cpr, raw_cons) \ + (*(uint32_t *)((cpr)->cp_doorbell) = (DB_CP_FLAGS | \ + RING_CMP(cpr->cp_ring_struct, raw_cons))) + +struct bnxt_ring; +struct bnxt_cp_ring_info { + uint32_t cp_raw_cons; + void *cp_doorbell; + + struct cmpl_base *cp_desc_ring; + + phys_addr_t cp_desc_mapping; + + struct ctx_hw_stats *hw_stats; + phys_addr_t hw_stats_map; + uint32_t hw_stats_ctx_id; + + struct bnxt_ring *cp_ring_struct; +}; + +#define RX_CMP_L2_ERRORS \ + (RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK | RX_PKT_CMPL_ERRORS_CRC_ERROR) + + +struct bnxt; +void bnxt_free_def_cp_ring(struct bnxt *bp); +int bnxt_init_def_ring_struct(struct bnxt *bp, unsigned int socket_id); +void bnxt_handle_async_event(struct bnxt *bp, struct cmpl_base *cmp); +void bnxt_handle_fwd_req(struct bnxt *bp, struct cmpl_base *cmp); + +#endif diff --git a/drivers/net/bnxt/bnxt_ethdev.c b/drivers/net/bnxt/bnxt_ethdev.c new file mode 100644 index 00000000..406e38a6 --- /dev/null +++ b/drivers/net/bnxt/bnxt_ethdev.c @@ -0,0 +1,1049 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <inttypes.h> +#include <stdbool.h> + +#include <rte_dev.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> +#include <rte_cycles.h> + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_ring.h" +#include "bnxt_rxq.h" +#include "bnxt_rxr.h" +#include "bnxt_stats.h" +#include "bnxt_txq.h" +#include "bnxt_txr.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +#define DRV_MODULE_NAME "bnxt" +static const char bnxt_version[] = + "Broadcom Cumulus driver " DRV_MODULE_NAME "\n"; + +static struct rte_pci_id bnxt_pci_id_map[] = { +#define RTE_PCI_DEV_ID_DECL_BNXT(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, +#include "rte_pci_dev_ids.h" + {.device_id = 0}, +}; + +#define BNXT_ETH_RSS_SUPPORT ( \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP) + +/***********************/ + +/* + * High level utility functions + */ + +static void bnxt_free_mem(struct bnxt *bp) +{ + bnxt_free_filter_mem(bp); + bnxt_free_vnic_attributes(bp); + bnxt_free_vnic_mem(bp); + + bnxt_free_stats(bp); + bnxt_free_tx_rings(bp); + bnxt_free_rx_rings(bp); + bnxt_free_def_cp_ring(bp); +} + +static int bnxt_alloc_mem(struct bnxt *bp) +{ + int rc; + + /* Default completion ring */ + rc = bnxt_init_def_ring_struct(bp, SOCKET_ID_ANY); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_rings(bp, 0, NULL, NULL, + bp->def_cp_ring, "def_cp"); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_vnic_mem(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_vnic_attributes(bp); + if (rc) + goto alloc_mem_err; + + rc = bnxt_alloc_filter_mem(bp); + if (rc) + goto alloc_mem_err; + + return 0; + +alloc_mem_err: + bnxt_free_mem(bp); + return rc; +} + +static int bnxt_init_chip(struct bnxt *bp) +{ + unsigned int i, rss_idx, fw_idx; + int rc; + + rc = bnxt_alloc_all_hwrm_stat_ctxs(bp); + if (rc) { + RTE_LOG(ERR, PMD, "HWRM stat ctx alloc failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_alloc_hwrm_rings(bp); + if (rc) { + RTE_LOG(ERR, PMD, "HWRM ring alloc failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_alloc_all_hwrm_ring_grps(bp); + if (rc) { + RTE_LOG(ERR, PMD, "HWRM ring grp alloc failure: %x\n", rc); + goto err_out; + } + + rc = bnxt_mq_rx_configure(bp); + if (rc) { + RTE_LOG(ERR, PMD, "MQ mode configure failure rc: %x\n", rc); + goto err_out; + } + + /* VNIC configuration */ + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + rc = bnxt_hwrm_vnic_alloc(bp, vnic); + if (rc) { + RTE_LOG(ERR, PMD, "HWRM vnic alloc failure rc: %x\n", + rc); + goto err_out; + } + + rc = bnxt_hwrm_vnic_ctx_alloc(bp, vnic); + if (rc) { + RTE_LOG(ERR, PMD, + "HWRM vnic ctx alloc failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_hwrm_vnic_cfg(bp, vnic); + if (rc) { + RTE_LOG(ERR, PMD, "HWRM vnic cfg failure rc: %x\n", rc); + goto err_out; + } + + rc = bnxt_set_hwrm_vnic_filters(bp, vnic); + if (rc) { + RTE_LOG(ERR, PMD, "HWRM vnic filter failure rc: %x\n", + rc); + goto err_out; + } + if (vnic->rss_table && vnic->hash_type) { + /* + * Fill the RSS hash & redirection table with + * ring group ids for all VNICs + */ + for (rss_idx = 0, fw_idx = 0; + rss_idx < HW_HASH_INDEX_SIZE; + rss_idx++, fw_idx++) { + if (vnic->fw_grp_ids[fw_idx] == + INVALID_HW_RING_ID) + fw_idx = 0; + vnic->rss_table[rss_idx] = + vnic->fw_grp_ids[fw_idx]; + } + rc = bnxt_hwrm_vnic_rss_cfg(bp, vnic); + if (rc) { + RTE_LOG(ERR, PMD, + "HWRM vnic set RSS failure rc: %x\n", + rc); + goto err_out; + } + } + } + rc = bnxt_hwrm_cfa_l2_set_rx_mask(bp, &bp->vnic_info[0]); + if (rc) { + RTE_LOG(ERR, PMD, + "HWRM cfa l2 rx mask failure rc: %x\n", rc); + goto err_out; + } + + return 0; + +err_out: + bnxt_free_all_hwrm_resources(bp); + + return rc; +} + +static int bnxt_shutdown_nic(struct bnxt *bp) +{ + bnxt_free_all_hwrm_resources(bp); + bnxt_free_all_filters(bp); + bnxt_free_all_vnics(bp); + return 0; +} + +static int bnxt_init_nic(struct bnxt *bp) +{ + int rc; + + bnxt_init_ring_grps(bp); + bnxt_init_vnics(bp); + bnxt_init_filters(bp); + + rc = bnxt_init_chip(bp); + if (rc) + return rc; + + return 0; +} + +/* + * Device configuration and status function + */ + +static void bnxt_dev_info_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint16_t max_vnics, i, j, vpool, vrxq; + + /* MAC Specifics */ + dev_info->max_mac_addrs = MAX_NUM_MAC_ADDR; + dev_info->max_hash_mac_addrs = 0; + + /* PF/VF specifics */ + if (BNXT_PF(bp)) { + dev_info->max_rx_queues = bp->pf.max_rx_rings; + dev_info->max_tx_queues = bp->pf.max_tx_rings; + dev_info->max_vfs = bp->pf.active_vfs; + dev_info->reta_size = bp->pf.max_rsscos_ctx; + max_vnics = bp->pf.max_vnics; + } else { + dev_info->max_rx_queues = bp->vf.max_rx_rings; + dev_info->max_tx_queues = bp->vf.max_tx_rings; + dev_info->reta_size = bp->vf.max_rsscos_ctx; + max_vnics = bp->vf.max_vnics; + } + + /* Fast path specifics */ + dev_info->min_rx_bufsize = 1; + dev_info->max_rx_pktlen = BNXT_MAX_MTU + ETHER_HDR_LEN + ETHER_CRC_LEN + + VLAN_TAG_SIZE; + dev_info->rx_offload_capa = 0; + dev_info->tx_offload_capa = DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO; + + /* *INDENT-OFF* */ + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_thresh = { + .pthresh = 8, + .hthresh = 8, + .wthresh = 0, + }, + .rx_free_thresh = 32, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_thresh = { + .pthresh = 32, + .hthresh = 0, + .wthresh = 0, + }, + .tx_free_thresh = 32, + .tx_rs_thresh = 32, + .txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOOFFLOADS, + }; + /* *INDENT-ON* */ + + /* + * TODO: default_rxconf, default_txconf, rx_desc_lim, and tx_desc_lim + * need further investigation. + */ + + /* VMDq resources */ + vpool = 64; /* ETH_64_POOLS */ + vrxq = 128; /* ETH_VMDQ_DCB_NUM_QUEUES */ + for (i = 0; i < 4; vpool >>= 1, i++) { + if (max_vnics > vpool) { + for (j = 0; j < 5; vrxq >>= 1, j++) { + if (dev_info->max_rx_queues > vrxq) { + if (vpool > vrxq) + vpool = vrxq; + goto found; + } + } + /* Not enough resources to support VMDq */ + break; + } + } + /* Not enough resources to support VMDq */ + vpool = 0; + vrxq = 0; +found: + dev_info->max_vmdq_pools = vpool; + dev_info->vmdq_queue_num = vrxq; + + dev_info->vmdq_pool_base = 0; + dev_info->vmdq_queue_base = 0; +} + +/* Configure the device based on the configuration provided */ +static int bnxt_dev_configure_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + int rc; + + bp->rx_queues = (void *)eth_dev->data->rx_queues; + bp->tx_queues = (void *)eth_dev->data->tx_queues; + + /* Inherit new configurations */ + bp->rx_nr_rings = eth_dev->data->nb_rx_queues; + bp->tx_nr_rings = eth_dev->data->nb_tx_queues; + bp->rx_cp_nr_rings = bp->rx_nr_rings; + bp->tx_cp_nr_rings = bp->tx_nr_rings; + + if (eth_dev->data->dev_conf.rxmode.jumbo_frame) + eth_dev->data->mtu = + eth_dev->data->dev_conf.rxmode.max_rx_pkt_len - + ETHER_HDR_LEN - ETHER_CRC_LEN - VLAN_TAG_SIZE; + rc = bnxt_set_hwrm_link_config(bp, true); + return rc; +} + +static int bnxt_dev_start_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + int rc; + + rc = bnxt_hwrm_func_reset(bp); + if (rc) { + RTE_LOG(ERR, PMD, "hwrm chip reset failure rc: %x\n", rc); + rc = -1; + goto error; + } + + rc = bnxt_alloc_mem(bp); + if (rc) + goto error; + + rc = bnxt_init_nic(bp); + if (rc) + goto error; + + return 0; + +error: + bnxt_shutdown_nic(bp); + bnxt_free_tx_mbufs(bp); + bnxt_free_rx_mbufs(bp); + bnxt_free_mem(bp); + return rc; +} + +static int bnxt_dev_set_link_up_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + + eth_dev->data->dev_link.link_status = 1; + bnxt_set_hwrm_link_config(bp, true); + return 0; +} + +static int bnxt_dev_set_link_down_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + + eth_dev->data->dev_link.link_status = 0; + bnxt_set_hwrm_link_config(bp, false); + return 0; +} + +static void bnxt_dev_close_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + + bnxt_free_tx_mbufs(bp); + bnxt_free_rx_mbufs(bp); + bnxt_free_mem(bp); + rte_free(eth_dev->data->mac_addrs); +} + +/* Unload the driver, release resources */ +static void bnxt_dev_stop_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + + if (bp->eth_dev->data->dev_started) { + /* TBD: STOP HW queues DMA */ + eth_dev->data->dev_link.link_status = 0; + } + bnxt_shutdown_nic(bp); +} + +static void bnxt_mac_addr_remove_op(struct rte_eth_dev *eth_dev, + uint32_t index) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + uint64_t pool_mask = eth_dev->data->mac_pool_sel[index]; + struct bnxt_vnic_info *vnic; + struct bnxt_filter_info *filter, *temp_filter; + int i; + + /* + * Loop through all VNICs from the specified filter flow pools to + * remove the corresponding MAC addr filter + */ + for (i = 0; i < MAX_FF_POOLS; i++) { + if (!(pool_mask & (1 << i))) + continue; + + STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + temp_filter = STAILQ_NEXT(filter, next); + if (filter->mac_index == index) { + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + bnxt_hwrm_clear_filter(bp, filter); + filter->mac_index = INVALID_MAC_INDEX; + memset(&filter->l2_addr, 0, + ETHER_ADDR_LEN); + STAILQ_INSERT_TAIL( + &bp->free_filter_list, + filter, next); + } + filter = temp_filter; + } + } + } +} + +static void bnxt_mac_addr_add_op(struct rte_eth_dev *eth_dev, + struct ether_addr *mac_addr, + uint32_t index, uint32_t pool) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic = STAILQ_FIRST(&bp->ff_pool[pool]); + struct bnxt_filter_info *filter; + + if (!vnic) { + RTE_LOG(ERR, PMD, "VNIC not found for pool %d!\n", pool); + return; + } + /* Attach requested MAC address to the new l2_filter */ + STAILQ_FOREACH(filter, &vnic->filter, next) { + if (filter->mac_index == index) { + RTE_LOG(ERR, PMD, + "MAC addr already existed for pool %d\n", pool); + return; + } + } + filter = bnxt_alloc_filter(bp); + if (!filter) { + RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); + return; + } + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + filter->mac_index = index; + memcpy(filter->l2_addr, mac_addr, ETHER_ADDR_LEN); + bnxt_hwrm_set_filter(bp, vnic, filter); +} + +static int bnxt_link_update_op(struct rte_eth_dev *eth_dev, + int wait_to_complete) +{ + int rc = 0; + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct rte_eth_link new; + unsigned int cnt = BNXT_LINK_WAIT_CNT; + + memset(&new, 0, sizeof(new)); + do { + /* Retrieve link info from hardware */ + rc = bnxt_get_hwrm_link_config(bp, &new); + if (rc) { + new.link_speed = ETH_LINK_SPEED_100M; + new.link_duplex = ETH_LINK_FULL_DUPLEX; + RTE_LOG(ERR, PMD, + "Failed to retrieve link rc = 0x%x!", rc); + goto out; + } + if (!wait_to_complete) + break; + + rte_delay_ms(BNXT_LINK_WAIT_INTERVAL); + + } while (!new.link_status && cnt--); + + /* Timed out or success */ + if (new.link_status) { + /* Update only if success */ + eth_dev->data->dev_link.link_duplex = new.link_duplex; + eth_dev->data->dev_link.link_speed = new.link_speed; + } + eth_dev->data->dev_link.link_status = new.link_status; +out: + return rc; +} + +static void bnxt_promiscuous_enable_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic; + + if (bp->vnic_info == NULL) + return; + + vnic = &bp->vnic_info[0]; + + vnic->flags |= BNXT_VNIC_INFO_PROMISC; + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); +} + +static void bnxt_promiscuous_disable_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic; + + if (bp->vnic_info == NULL) + return; + + vnic = &bp->vnic_info[0]; + + vnic->flags &= ~BNXT_VNIC_INFO_PROMISC; + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); +} + +static void bnxt_allmulticast_enable_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic; + + if (bp->vnic_info == NULL) + return; + + vnic = &bp->vnic_info[0]; + + vnic->flags |= BNXT_VNIC_INFO_ALLMULTI; + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); +} + +static void bnxt_allmulticast_disable_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic; + + if (bp->vnic_info == NULL) + return; + + vnic = &bp->vnic_info[0]; + + vnic->flags &= ~BNXT_VNIC_INFO_ALLMULTI; + bnxt_hwrm_cfa_l2_set_rx_mask(bp, vnic); +} + +static int bnxt_reta_update_op(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct bnxt_vnic_info *vnic; + int i; + + if (!(dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG)) + return -EINVAL; + + if (reta_size != HW_HASH_INDEX_SIZE) { + RTE_LOG(ERR, PMD, "The configured hash table lookup size " + "(%d) must equal the size supported by the hardware " + "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); + return -EINVAL; + } + /* Update the RSS VNIC(s) */ + for (i = 0; i < MAX_FF_POOLS; i++) { + STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { + memcpy(vnic->rss_table, reta_conf, reta_size); + + bnxt_hwrm_vnic_rss_cfg(bp, vnic); + } + } + return 0; +} + +static int bnxt_reta_query_op(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + + /* Retrieve from the default VNIC */ + if (!vnic) + return -EINVAL; + if (!vnic->rss_table) + return -EINVAL; + + if (reta_size != HW_HASH_INDEX_SIZE) { + RTE_LOG(ERR, PMD, "The configured hash table lookup size " + "(%d) must equal the size supported by the hardware " + "(%d)\n", reta_size, HW_HASH_INDEX_SIZE); + return -EINVAL; + } + /* EW - need to revisit here copying from u64 to u16 */ + memcpy(reta_conf, vnic->rss_table, reta_size); + + return 0; +} + +static int bnxt_rss_hash_update_op(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct bnxt_vnic_info *vnic; + uint16_t hash_type = 0; + int i; + + /* + * If RSS enablement were different than dev_configure, + * then return -EINVAL + */ + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) { + if (!rss_conf->rss_hf) + return -EINVAL; + } else { + if (rss_conf->rss_hf & BNXT_ETH_RSS_SUPPORT) + return -EINVAL; + } + if (rss_conf->rss_hf & ETH_RSS_IPV4) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_TCP) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV4_UDP) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; + if (rss_conf->rss_hf & ETH_RSS_IPV6) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_TCP) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; + if (rss_conf->rss_hf & ETH_RSS_NONFRAG_IPV6_UDP) + hash_type |= HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; + + /* Update the RSS VNIC(s) */ + for (i = 0; i < MAX_FF_POOLS; i++) { + STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { + vnic->hash_type = hash_type; + + /* + * Use the supplied key if the key length is + * acceptable and the rss_key is not NULL + */ + if (rss_conf->rss_key && + rss_conf->rss_key_len <= HW_HASH_KEY_SIZE) + memcpy(vnic->rss_hash_key, rss_conf->rss_key, + rss_conf->rss_key_len); + + bnxt_hwrm_vnic_rss_cfg(bp, vnic); + } + } + return 0; +} + +static int bnxt_rss_hash_conf_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt_vnic_info *vnic = &bp->vnic_info[0]; + int len; + uint32_t hash_types; + + /* RSS configuration is the same for all VNICs */ + if (vnic && vnic->rss_hash_key) { + if (rss_conf->rss_key) { + len = rss_conf->rss_key_len <= HW_HASH_KEY_SIZE ? + rss_conf->rss_key_len : HW_HASH_KEY_SIZE; + memcpy(rss_conf->rss_key, vnic->rss_hash_key, len); + } + + hash_types = vnic->hash_type; + rss_conf->rss_hf = 0; + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4) { + rss_conf->rss_hf |= ETH_RSS_IPV4; + hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4; + } + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4) { + rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_TCP; + hash_types &= + ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4; + } + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4) { + rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV4_UDP; + hash_types &= + ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4; + } + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6) { + rss_conf->rss_hf |= ETH_RSS_IPV6; + hash_types &= ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; + } + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6) { + rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_TCP; + hash_types &= + ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6; + } + if (hash_types & HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6) { + rss_conf->rss_hf |= ETH_RSS_NONFRAG_IPV6_UDP; + hash_types &= + ~HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6; + } + if (hash_types) { + RTE_LOG(ERR, PMD, + "Unknwon RSS config from firmware (%08x), RSS disabled", + vnic->hash_type); + return -ENOTSUP; + } + } else { + rss_conf->rss_hf = 0; + } + return 0; +} + +static int bnxt_flow_ctrl_get_op(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf __rte_unused) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + struct rte_eth_link link_info; + int rc; + + rc = bnxt_get_hwrm_link_config(bp, &link_info); + if (rc) + return rc; + + memset(fc_conf, 0, sizeof(*fc_conf)); + if (bp->link_info.auto_pause) + fc_conf->autoneg = 1; + switch (bp->link_info.pause) { + case 0: + fc_conf->mode = RTE_FC_NONE; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX: + fc_conf->mode = RTE_FC_TX_PAUSE; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX: + fc_conf->mode = RTE_FC_RX_PAUSE; + break; + case (HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX | + HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX): + fc_conf->mode = RTE_FC_FULL; + break; + } + return 0; +} + +static int bnxt_flow_ctrl_set_op(struct rte_eth_dev *dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct bnxt *bp = (struct bnxt *)dev->data->dev_private; + + switch (fc_conf->mode) { + case RTE_FC_NONE: + bp->link_info.auto_pause = 0; + bp->link_info.force_pause = 0; + break; + case RTE_FC_RX_PAUSE: + if (fc_conf->autoneg) { + bp->link_info.auto_pause = + HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; + bp->link_info.force_pause = 0; + } else { + bp->link_info.auto_pause = 0; + bp->link_info.force_pause = + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; + } + break; + case RTE_FC_TX_PAUSE: + if (fc_conf->autoneg) { + bp->link_info.auto_pause = + HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX; + bp->link_info.force_pause = 0; + } else { + bp->link_info.auto_pause = 0; + bp->link_info.force_pause = + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX; + } + break; + case RTE_FC_FULL: + if (fc_conf->autoneg) { + bp->link_info.auto_pause = + HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX | + HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX; + bp->link_info.force_pause = 0; + } else { + bp->link_info.auto_pause = 0; + bp->link_info.force_pause = + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX | + HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX; + } + break; + } + return bnxt_set_hwrm_link_config(bp, true); +} + +/* + * Initialization + */ + +static struct eth_dev_ops bnxt_dev_ops = { + .dev_infos_get = bnxt_dev_info_get_op, + .dev_close = bnxt_dev_close_op, + .dev_configure = bnxt_dev_configure_op, + .dev_start = bnxt_dev_start_op, + .dev_stop = bnxt_dev_stop_op, + .dev_set_link_up = bnxt_dev_set_link_up_op, + .dev_set_link_down = bnxt_dev_set_link_down_op, + .stats_get = bnxt_stats_get_op, + .stats_reset = bnxt_stats_reset_op, + .rx_queue_setup = bnxt_rx_queue_setup_op, + .rx_queue_release = bnxt_rx_queue_release_op, + .tx_queue_setup = bnxt_tx_queue_setup_op, + .tx_queue_release = bnxt_tx_queue_release_op, + .reta_update = bnxt_reta_update_op, + .reta_query = bnxt_reta_query_op, + .rss_hash_update = bnxt_rss_hash_update_op, + .rss_hash_conf_get = bnxt_rss_hash_conf_get_op, + .link_update = bnxt_link_update_op, + .promiscuous_enable = bnxt_promiscuous_enable_op, + .promiscuous_disable = bnxt_promiscuous_disable_op, + .allmulticast_enable = bnxt_allmulticast_enable_op, + .allmulticast_disable = bnxt_allmulticast_disable_op, + .mac_addr_add = bnxt_mac_addr_add_op, + .mac_addr_remove = bnxt_mac_addr_remove_op, + .flow_ctrl_get = bnxt_flow_ctrl_get_op, + .flow_ctrl_set = bnxt_flow_ctrl_set_op, +}; + +static bool bnxt_vf_pciid(uint16_t id) +{ + if (id == BROADCOM_DEV_ID_57304_VF || + id == BROADCOM_DEV_ID_57406_VF) + return true; + return false; +} + +static int bnxt_init_board(struct rte_eth_dev *eth_dev) +{ + int rc; + struct bnxt *bp = eth_dev->data->dev_private; + + /* enable device (incl. PCI PM wakeup), and bus-mastering */ + if (!eth_dev->pci_dev->mem_resource[0].addr) { + RTE_LOG(ERR, PMD, + "Cannot find PCI device base address, aborting\n"); + rc = -ENODEV; + goto init_err_disable; + } + + bp->eth_dev = eth_dev; + bp->pdev = eth_dev->pci_dev; + + bp->bar0 = (void *)eth_dev->pci_dev->mem_resource[0].addr; + if (!bp->bar0) { + RTE_LOG(ERR, PMD, "Cannot map device registers, aborting\n"); + rc = -ENOMEM; + goto init_err_release; + } + return 0; + +init_err_release: + if (bp->bar0) + bp->bar0 = NULL; + +init_err_disable: + + return rc; +} + +static int +bnxt_dev_init(struct rte_eth_dev *eth_dev) +{ + static int version_printed; + struct bnxt *bp; + int rc; + + if (version_printed++ == 0) + RTE_LOG(INFO, PMD, "%s", bnxt_version); + + if (eth_dev->pci_dev->addr.function >= 2 && + eth_dev->pci_dev->addr.function < 4) { + RTE_LOG(ERR, PMD, "Function not enabled %x:\n", + eth_dev->pci_dev->addr.function); + rc = -ENOMEM; + goto error; + } + + rte_eth_copy_pci_info(eth_dev, eth_dev->pci_dev); + bp = eth_dev->data->dev_private; + + if (bnxt_vf_pciid(eth_dev->pci_dev->id.device_id)) + bp->flags |= BNXT_FLAG_VF; + + rc = bnxt_init_board(eth_dev); + if (rc) { + RTE_LOG(ERR, PMD, + "Board initialization failed rc: %x\n", rc); + goto error; + } + eth_dev->dev_ops = &bnxt_dev_ops; + eth_dev->rx_pkt_burst = &bnxt_recv_pkts; + eth_dev->tx_pkt_burst = &bnxt_xmit_pkts; + + rc = bnxt_alloc_hwrm_resources(bp); + if (rc) { + RTE_LOG(ERR, PMD, + "hwrm resource allocation failure rc: %x\n", rc); + goto error_free; + } + rc = bnxt_hwrm_ver_get(bp); + if (rc) + goto error_free; + bnxt_hwrm_queue_qportcfg(bp); + + /* Get the MAX capabilities for this function */ + rc = bnxt_hwrm_func_qcaps(bp); + if (rc) { + RTE_LOG(ERR, PMD, "hwrm query capability failure rc: %x\n", rc); + goto error_free; + } + eth_dev->data->mac_addrs = rte_zmalloc("bnxt_mac_addr_tbl", + ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR, 0); + if (eth_dev->data->mac_addrs == NULL) { + RTE_LOG(ERR, PMD, + "Failed to alloc %u bytes needed to store MAC addr tbl", + ETHER_ADDR_LEN * MAX_NUM_MAC_ADDR); + rc = -ENOMEM; + goto error_free; + } + /* Copy the permanent MAC from the qcap response address now. */ + if (BNXT_PF(bp)) + memcpy(bp->mac_addr, bp->pf.mac_addr, sizeof(bp->mac_addr)); + else + memcpy(bp->mac_addr, bp->vf.mac_addr, sizeof(bp->mac_addr)); + memcpy(ð_dev->data->mac_addrs[0], bp->mac_addr, ETHER_ADDR_LEN); + bp->grp_info = rte_zmalloc("bnxt_grp_info", + sizeof(*bp->grp_info) * bp->max_ring_grps, 0); + if (!bp->grp_info) { + RTE_LOG(ERR, PMD, + "Failed to alloc %zu bytes needed to store group info table\n", + sizeof(*bp->grp_info) * bp->max_ring_grps); + rc = -ENOMEM; + goto error_free; + } + + rc = bnxt_hwrm_func_driver_register(bp, 0, + bp->pf.vf_req_fwd); + if (rc) { + RTE_LOG(ERR, PMD, + "Failed to register driver"); + rc = -EBUSY; + goto error_free; + } + + RTE_LOG(INFO, PMD, + DRV_MODULE_NAME " found at mem %" PRIx64 ", node addr %pM\n", + eth_dev->pci_dev->mem_resource[0].phys_addr, + eth_dev->pci_dev->mem_resource[0].addr); + + return 0; + +error_free: + eth_dev->driver->eth_dev_uninit(eth_dev); +error: + return rc; +} + +static int +bnxt_dev_uninit(struct rte_eth_dev *eth_dev) { + struct bnxt *bp = eth_dev->data->dev_private; + int rc; + + if (eth_dev->data->mac_addrs) + rte_free(eth_dev->data->mac_addrs); + if (bp->grp_info) + rte_free(bp->grp_info); + rc = bnxt_hwrm_func_driver_unregister(bp, 0); + bnxt_free_hwrm_resources(bp); + return rc; +} + +static struct eth_driver bnxt_rte_pmd = { + .pci_drv = { + .name = "rte_" DRV_MODULE_NAME "_pmd", + .id_table = bnxt_pci_id_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + }, + .eth_dev_init = bnxt_dev_init, + .eth_dev_uninit = bnxt_dev_uninit, + .dev_private_size = sizeof(struct bnxt), +}; + +static int bnxt_rte_pmd_init(const char *name, const char *params __rte_unused) +{ + RTE_LOG(INFO, PMD, "bnxt_rte_pmd_init() called for %s\n", name); + rte_eth_driver_register(&bnxt_rte_pmd); + return 0; +} + +static struct rte_driver bnxt_pmd_drv = { + .name = "eth_bnxt", + .type = PMD_PDEV, + .init = bnxt_rte_pmd_init, +}; + +PMD_REGISTER_DRIVER(bnxt_pmd_drv); diff --git a/drivers/net/bnxt/bnxt_filter.c b/drivers/net/bnxt/bnxt_filter.c new file mode 100644 index 00000000..f03a1dc8 --- /dev/null +++ b/drivers/net/bnxt/bnxt_filter.c @@ -0,0 +1,175 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/queue.h> + +#include <rte_log.h> +#include <rte_malloc.h> + +#include "bnxt.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +/* + * Filter Functions + */ + +struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp) +{ + struct bnxt_filter_info *filter; + + /* Find the 1st unused filter from the free_filter_list pool*/ + filter = STAILQ_FIRST(&bp->free_filter_list); + if (!filter) { + RTE_LOG(ERR, PMD, "No more free filter resources\n"); + return NULL; + } + STAILQ_REMOVE_HEAD(&bp->free_filter_list, next); + + /* Default to L2 MAC Addr filter */ + filter->flags = HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX; + filter->enables = HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK; + memcpy(filter->l2_addr, bp->eth_dev->data->mac_addrs->addr_bytes, + ETHER_ADDR_LEN); + memset(filter->l2_addr_mask, 0xff, ETHER_ADDR_LEN); + return filter; +} + +void bnxt_init_filters(struct bnxt *bp) +{ + struct bnxt_filter_info *filter; + int i, max_filters; + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + max_filters = pf->max_l2_ctx; + } else { + struct bnxt_vf_info *vf = &bp->vf; + + max_filters = vf->max_l2_ctx; + } + STAILQ_INIT(&bp->free_filter_list); + for (i = 0; i < max_filters; i++) { + filter = &bp->filter_info[i]; + filter->fw_l2_filter_id = -1; + STAILQ_INSERT_TAIL(&bp->free_filter_list, filter, next); + } +} + +void bnxt_free_all_filters(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + struct bnxt_filter_info *filter, *temp_filter; + int i; + + for (i = 0; i < MAX_FF_POOLS; i++) { + STAILQ_FOREACH(vnic, &bp->ff_pool[i], next) { + filter = STAILQ_FIRST(&vnic->filter); + while (filter) { + temp_filter = STAILQ_NEXT(filter, next); + STAILQ_REMOVE(&vnic->filter, filter, + bnxt_filter_info, next); + STAILQ_INSERT_TAIL(&bp->free_filter_list, + filter, next); + filter = temp_filter; + } + STAILQ_INIT(&vnic->filter); + } + } +} + +void bnxt_free_filter_mem(struct bnxt *bp) +{ + struct bnxt_filter_info *filter; + uint16_t max_filters, i; + int rc = 0; + + /* Ensure that all filters are freed */ + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + max_filters = pf->max_l2_ctx; + } else { + struct bnxt_vf_info *vf = &bp->vf; + + max_filters = vf->max_l2_ctx; + } + for (i = 0; i < max_filters; i++) { + filter = &bp->filter_info[i]; + if (filter->fw_l2_filter_id != ((uint64_t)-1)) { + RTE_LOG(ERR, PMD, "HWRM filter is not freed??\n"); + /* Call HWRM to try to free filter again */ + rc = bnxt_hwrm_clear_filter(bp, filter); + if (rc) + RTE_LOG(ERR, PMD, + "HWRM filter cannot be freed rc = %d\n", + rc); + } + filter->fw_l2_filter_id = -1; + } + STAILQ_INIT(&bp->free_filter_list); + + rte_free(bp->filter_info); + bp->filter_info = NULL; +} + +int bnxt_alloc_filter_mem(struct bnxt *bp) +{ + struct bnxt_filter_info *filter_mem; + uint16_t max_filters; + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + max_filters = pf->max_l2_ctx; + } else { + struct bnxt_vf_info *vf = &bp->vf; + + max_filters = vf->max_l2_ctx; + } + /* Allocate memory for VNIC pool and filter pool */ + filter_mem = rte_zmalloc("bnxt_filter_info", + max_filters * sizeof(struct bnxt_filter_info), + 0); + if (filter_mem == NULL) { + RTE_LOG(ERR, PMD, "Failed to alloc memory for %d filters", + max_filters); + return -ENOMEM; + } + bp->filter_info = filter_mem; + return 0; +} diff --git a/drivers/net/bnxt/bnxt_filter.h b/drivers/net/bnxt/bnxt_filter.h new file mode 100644 index 00000000..06fe134a --- /dev/null +++ b/drivers/net/bnxt/bnxt_filter.h @@ -0,0 +1,74 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_FILTER_H_ +#define _BNXT_FILTER_H_ + +#include <rte_ether.h> + +struct bnxt; +struct bnxt_filter_info { + STAILQ_ENTRY(bnxt_filter_info) next; + uint64_t fw_l2_filter_id; +#define INVALID_MAC_INDEX ((uint16_t)-1) + uint16_t mac_index; + + /* Filter Characteristics */ + uint32_t flags; + uint32_t enables; + uint8_t l2_addr[ETHER_ADDR_LEN]; + uint8_t l2_addr_mask[ETHER_ADDR_LEN]; + uint16_t l2_ovlan; + uint16_t l2_ovlan_mask; + uint16_t l2_ivlan; + uint16_t l2_ivlan_mask; + uint8_t t_l2_addr[ETHER_ADDR_LEN]; + uint8_t t_l2_addr_mask[ETHER_ADDR_LEN]; + uint16_t t_l2_ovlan; + uint16_t t_l2_ovlan_mask; + uint16_t t_l2_ivlan; + uint16_t t_l2_ivlan_mask; + uint8_t tunnel_type; + uint16_t mirror_vnic_id; + uint32_t vni; + uint8_t pri_hint; + uint64_t l2_filter_id_hint; +}; + +struct bnxt_filter_info *bnxt_alloc_filter(struct bnxt *bp); +void bnxt_init_filters(struct bnxt *bp); +void bnxt_free_all_filters(struct bnxt *bp); +void bnxt_free_filter_mem(struct bnxt *bp); +int bnxt_alloc_filter_mem(struct bnxt *bp); + +#endif diff --git a/drivers/net/bnxt/bnxt_hwrm.c b/drivers/net/bnxt/bnxt_hwrm.c new file mode 100644 index 00000000..5d81a60d --- /dev/null +++ b/drivers/net/bnxt/bnxt_hwrm.c @@ -0,0 +1,1491 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_malloc.h> +#include <rte_memzone.h> +#include <rte_version.h> + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_rxq.h" +#include "bnxt_rxr.h" +#include "bnxt_ring.h" +#include "bnxt_txq.h" +#include "bnxt_txr.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +#define HWRM_CMD_TIMEOUT 2000 + +/* + * HWRM Functions (sent to HWRM) + * These are named bnxt_hwrm_*() and return -1 if bnxt_hwrm_send_message() + * fails (ie: a timeout), and a positive non-zero HWRM error code if the HWRM + * command was failed by the ChiMP. + */ + +static int bnxt_hwrm_send_message_locked(struct bnxt *bp, void *msg, + uint32_t msg_len) +{ + unsigned int i; + struct input *req = msg; + struct output *resp = bp->hwrm_cmd_resp_addr; + uint32_t *data = msg; + uint8_t *bar; + uint8_t *valid; + + /* Write request msg to hwrm channel */ + for (i = 0; i < msg_len; i += 4) { + bar = (uint8_t *)bp->bar0 + i; + *(volatile uint32_t *)bar = *data; + data++; + } + + /* Zero the rest of the request space */ + for (; i < bp->max_req_len; i += 4) { + bar = (uint8_t *)bp->bar0 + i; + *(volatile uint32_t *)bar = 0; + } + + /* Ring channel doorbell */ + bar = (uint8_t *)bp->bar0 + 0x100; + *(volatile uint32_t *)bar = 1; + + /* Poll for the valid bit */ + for (i = 0; i < HWRM_CMD_TIMEOUT; i++) { + /* Sanity check on the resp->resp_len */ + rte_rmb(); + if (resp->resp_len && resp->resp_len <= + bp->max_resp_len) { + /* Last byte of resp contains the valid key */ + valid = (uint8_t *)resp + resp->resp_len - 1; + if (*valid == HWRM_RESP_VALID_KEY) + break; + } + rte_delay_us(600); + } + + if (i >= HWRM_CMD_TIMEOUT) { + RTE_LOG(ERR, PMD, "Error sending msg %x\n", + req->req_type); + goto err_ret; + } + return 0; + +err_ret: + return -1; +} + +static int bnxt_hwrm_send_message(struct bnxt *bp, void *msg, uint32_t msg_len) +{ + int rc; + + rte_spinlock_lock(&bp->hwrm_lock); + rc = bnxt_hwrm_send_message_locked(bp, msg, msg_len); + rte_spinlock_unlock(&bp->hwrm_lock); + return rc; +} + +#define HWRM_PREP(req, type, cr, resp) \ + memset(bp->hwrm_cmd_resp_addr, 0, bp->max_resp_len); \ + req.req_type = rte_cpu_to_le_16(HWRM_##type); \ + req.cmpl_ring = rte_cpu_to_le_16(cr); \ + req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); \ + req.target_id = rte_cpu_to_le_16(0xffff); \ + req.resp_addr = rte_cpu_to_le_64(bp->hwrm_cmd_resp_dma_addr) + +#define HWRM_CHECK_RESULT \ + { \ + if (rc) { \ + RTE_LOG(ERR, PMD, "%s failed rc:%d\n", \ + __func__, rc); \ + return rc; \ + } \ + if (resp->error_code) { \ + rc = rte_le_to_cpu_16(resp->error_code); \ + RTE_LOG(ERR, PMD, "%s error %d\n", __func__, rc); \ + return rc; \ + } \ + } + +int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 }; + struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.mask = 0; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_cfa_l2_set_rx_mask_input req = {.req_type = 0 }; + struct hwrm_cfa_l2_set_rx_mask_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t mask = 0; + + HWRM_PREP(req, CFA_L2_SET_RX_MASK, -1, resp); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + + /* FIXME add multicast flag, when multicast adding options is supported + * by ethtool. + */ + if (vnic->flags & BNXT_VNIC_INFO_PROMISC) + mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS; + if (vnic->flags & BNXT_VNIC_INFO_ALLMULTI) + mask = HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST; + req.mask = rte_cpu_to_le_32(HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST | + HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST | + mask); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_clear_filter(struct bnxt *bp, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_l2_filter_free_input req = {.req_type = 0 }; + struct hwrm_cfa_l2_filter_free_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, CFA_L2_FILTER_FREE, -1, resp); + + req.l2_filter_id = rte_cpu_to_le_64(filter->fw_l2_filter_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + filter->fw_l2_filter_id = -1; + + return 0; +} + +int bnxt_hwrm_set_filter(struct bnxt *bp, + struct bnxt_vnic_info *vnic, + struct bnxt_filter_info *filter) +{ + int rc = 0; + struct hwrm_cfa_l2_filter_alloc_input req = {.req_type = 0 }; + struct hwrm_cfa_l2_filter_alloc_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t enables = 0; + + HWRM_PREP(req, CFA_L2_FILTER_ALLOC, -1, resp); + + req.flags = rte_cpu_to_le_32(filter->flags); + + enables = filter->enables | + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID; + req.dst_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR) + memcpy(req.l2_addr, filter->l2_addr, + ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK) + memcpy(req.l2_addr_mask, filter->l2_addr_mask, + ETHER_ADDR_LEN); + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN) + req.l2_ovlan = filter->l2_ovlan; + if (enables & + HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK) + req.l2_ovlan_mask = filter->l2_ovlan_mask; + + req.enables = rte_cpu_to_le_32(enables); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + filter->fw_l2_filter_id = rte_le_to_cpu_64(resp->l2_filter_id); + + return rc; +} + +int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd) +{ + int rc; + struct hwrm_exec_fwd_resp_input req = {.req_type = 0 }; + struct hwrm_exec_fwd_resp_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, EXEC_FWD_RESP, -1, resp); + + memcpy(req.encap_request, fwd_cmd, + sizeof(req.encap_request)); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_func_qcaps(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_func_qcaps_input req = {.req_type = 0 }; + struct hwrm_func_qcaps_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, FUNC_QCAPS, -1, resp); + + req.fid = rte_cpu_to_le_16(0xffff); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + bp->max_ring_grps = rte_le_to_cpu_32(resp->max_hw_ring_grps); + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + pf->fw_fid = rte_le_to_cpu_32(resp->fid); + pf->port_id = resp->port_id; + memcpy(pf->mac_addr, resp->perm_mac_address, ETHER_ADDR_LEN); + pf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); + pf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); + pf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); + pf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); + pf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + pf->max_vnics = rte_le_to_cpu_16(resp->max_vnics); + pf->first_vf_id = rte_le_to_cpu_16(resp->first_vf_id); + pf->max_vfs = rte_le_to_cpu_16(resp->max_vfs); + } else { + struct bnxt_vf_info *vf = &bp->vf; + + vf->fw_fid = rte_le_to_cpu_32(resp->fid); + memcpy(vf->mac_addr, &resp->perm_mac_address, ETHER_ADDR_LEN); + vf->max_rsscos_ctx = rte_le_to_cpu_16(resp->max_rsscos_ctx); + vf->max_cp_rings = rte_le_to_cpu_16(resp->max_cmpl_rings); + vf->max_tx_rings = rte_le_to_cpu_16(resp->max_tx_rings); + vf->max_rx_rings = rte_le_to_cpu_16(resp->max_rx_rings); + vf->max_l2_ctx = rte_le_to_cpu_16(resp->max_l2_ctxs); + vf->max_vnics = rte_le_to_cpu_16(resp->max_vnics); + } + + return rc; +} + +int bnxt_hwrm_func_reset(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_func_reset_input req = {.req_type = 0 }; + struct hwrm_func_reset_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, FUNC_RESET, -1, resp); + + req.enables = rte_cpu_to_le_32(0); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags, + uint32_t *vf_req_fwd) +{ + int rc; + struct hwrm_func_drv_rgtr_input req = {.req_type = 0 }; + struct hwrm_func_drv_rgtr_output *resp = bp->hwrm_cmd_resp_addr; + + if (bp->flags & BNXT_FLAG_REGISTERED) + return 0; + + HWRM_PREP(req, FUNC_DRV_RGTR, -1, resp); + req.flags = flags; + req.enables = HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER; + req.ver_maj = RTE_VER_YEAR; + req.ver_min = RTE_VER_MONTH; + req.ver_upd = RTE_VER_MINOR; + + memcpy(req.vf_req_fwd, vf_req_fwd, sizeof(req.vf_req_fwd)); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + bp->flags |= BNXT_FLAG_REGISTERED; + + return rc; +} + +int bnxt_hwrm_ver_get(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_ver_get_input req = {.req_type = 0 }; + struct hwrm_ver_get_output *resp = bp->hwrm_cmd_resp_addr; + uint32_t my_version; + uint32_t fw_version; + uint16_t max_resp_len; + char type[RTE_MEMZONE_NAMESIZE]; + + HWRM_PREP(req, VER_GET, -1, resp); + + req.hwrm_intf_maj = HWRM_VERSION_MAJOR; + req.hwrm_intf_min = HWRM_VERSION_MINOR; + req.hwrm_intf_upd = HWRM_VERSION_UPDATE; + + /* + * Hold the lock since we may be adjusting the response pointers. + */ + rte_spinlock_lock(&bp->hwrm_lock); + rc = bnxt_hwrm_send_message_locked(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + RTE_LOG(INFO, PMD, "%d.%d.%d:%d.%d.%d\n", + resp->hwrm_intf_maj, resp->hwrm_intf_min, + resp->hwrm_intf_upd, + resp->hwrm_fw_maj, resp->hwrm_fw_min, resp->hwrm_fw_bld); + + my_version = HWRM_VERSION_MAJOR << 16; + my_version |= HWRM_VERSION_MINOR << 8; + my_version |= HWRM_VERSION_UPDATE; + + fw_version = resp->hwrm_intf_maj << 16; + fw_version |= resp->hwrm_intf_min << 8; + fw_version |= resp->hwrm_intf_upd; + + if (resp->hwrm_intf_maj != HWRM_VERSION_MAJOR) { + RTE_LOG(ERR, PMD, "Unsupported firmware API version\n"); + rc = -EINVAL; + goto error; + } + + if (my_version != fw_version) { + RTE_LOG(INFO, PMD, "BNXT Driver/HWRM API mismatch.\n"); + if (my_version < fw_version) { + RTE_LOG(INFO, PMD, + "Firmware API version is newer than driver.\n"); + RTE_LOG(INFO, PMD, + "The driver may be missing features.\n"); + } else { + RTE_LOG(INFO, PMD, + "Firmware API version is older than driver.\n"); + RTE_LOG(INFO, PMD, + "Not all driver features may be functional.\n"); + } + } + + if (bp->max_req_len > resp->max_req_win_len) { + RTE_LOG(ERR, PMD, "Unsupported request length\n"); + rc = -EINVAL; + } + bp->max_req_len = resp->max_req_win_len; + max_resp_len = resp->max_resp_len; + if (bp->max_resp_len != max_resp_len) { + sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", + bp->pdev->addr.domain, bp->pdev->addr.bus, + bp->pdev->addr.devid, bp->pdev->addr.function); + + rte_free(bp->hwrm_cmd_resp_addr); + + bp->hwrm_cmd_resp_addr = rte_malloc(type, max_resp_len, 0); + if (bp->hwrm_cmd_resp_addr == NULL) { + rc = -ENOMEM; + goto error; + } + bp->hwrm_cmd_resp_dma_addr = + rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr); + bp->max_resp_len = max_resp_len; + } + +error: + rte_spinlock_unlock(&bp->hwrm_lock); + return rc; +} + +int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags) +{ + int rc; + struct hwrm_func_drv_unrgtr_input req = {.req_type = 0 }; + struct hwrm_func_drv_unrgtr_output *resp = bp->hwrm_cmd_resp_addr; + + if (!(bp->flags & BNXT_FLAG_REGISTERED)) + return 0; + + HWRM_PREP(req, FUNC_DRV_UNRGTR, -1, resp); + req.flags = flags; + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + bp->flags &= ~BNXT_FLAG_REGISTERED; + + return rc; +} + +static int bnxt_hwrm_port_phy_cfg(struct bnxt *bp, struct bnxt_link_info *conf) +{ + int rc = 0; + struct hwrm_port_phy_cfg_input req = {.req_type = 0}; + struct hwrm_port_phy_cfg_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, PORT_PHY_CFG, -1, resp); + + req.flags = conf->phy_flags; + if (conf->link_up) { + req.force_link_speed = conf->link_speed; + /* + * Note, ChiMP FW 20.2.1 and 20.2.2 return an error when we set + * any auto mode, even "none". + */ + if (req.auto_mode == HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE) { + req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE; + } else { + req.auto_mode = conf->auto_mode; + req.enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE; + req.auto_link_speed_mask = conf->auto_link_speed_mask; + req.enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK; + req.auto_link_speed = conf->auto_link_speed; + req.enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED; + } + req.auto_duplex = conf->duplex; + req.enables |= HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX; + req.auto_pause = conf->auto_pause; + /* Set force_pause if there is no auto or if there is a force */ + if (req.auto_pause) + req.enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE; + else + req.enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE; + req.force_pause = conf->force_pause; + if (req.force_pause) + req.enables |= + HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE; + } else { + req.flags &= ~HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; + req.flags |= HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN; + req.force_link_speed = 0; + } + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +static int bnxt_hwrm_port_phy_qcfg(struct bnxt *bp, + struct bnxt_link_info *link_info) +{ + int rc = 0; + struct hwrm_port_phy_qcfg_input req = {.req_type = 0}; + struct hwrm_port_phy_qcfg_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, PORT_PHY_QCFG, -1, resp); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + link_info->phy_link_status = resp->link; + if (link_info->phy_link_status == HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK) { + link_info->link_up = 1; + link_info->link_speed = rte_le_to_cpu_16(resp->link_speed); + } else { + link_info->link_up = 0; + link_info->link_speed = 0; + } + link_info->duplex = resp->duplex; + link_info->pause = resp->pause; + link_info->auto_pause = resp->auto_pause; + link_info->force_pause = resp->force_pause; + link_info->auto_mode = resp->auto_mode; + + link_info->support_speeds = rte_le_to_cpu_16(resp->support_speeds); + link_info->auto_link_speed = rte_le_to_cpu_16(resp->auto_link_speed); + link_info->preemphasis = rte_le_to_cpu_32(resp->preemphasis); + link_info->phy_ver[0] = resp->phy_maj; + link_info->phy_ver[1] = resp->phy_min; + link_info->phy_ver[2] = resp->phy_bld; + + return rc; +} + +int bnxt_hwrm_queue_qportcfg(struct bnxt *bp) +{ + int rc = 0; + struct hwrm_queue_qportcfg_input req = {.req_type = 0 }; + struct hwrm_queue_qportcfg_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, QUEUE_QPORTCFG, -1, resp); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + +#define GET_QUEUE_INFO(x) \ + bp->cos_queue[x].id = resp->queue_id##x; \ + bp->cos_queue[x].profile = resp->queue_id##x##_service_profile + + GET_QUEUE_INFO(0); + GET_QUEUE_INFO(1); + GET_QUEUE_INFO(2); + GET_QUEUE_INFO(3); + GET_QUEUE_INFO(4); + GET_QUEUE_INFO(5); + GET_QUEUE_INFO(6); + GET_QUEUE_INFO(7); + + return rc; +} + +int bnxt_hwrm_ring_alloc(struct bnxt *bp, + struct bnxt_ring *ring, + uint32_t ring_type, uint32_t map_index, + uint32_t stats_ctx_id) +{ + int rc = 0; + struct hwrm_ring_alloc_input req = {.req_type = 0 }; + struct hwrm_ring_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, RING_ALLOC, -1, resp); + + req.enables = rte_cpu_to_le_32(0); + + req.page_tbl_addr = rte_cpu_to_le_64(ring->bd_dma); + req.fbo = rte_cpu_to_le_32(0); + /* Association of ring index with doorbell index */ + req.logical_id = rte_cpu_to_le_16(map_index); + + switch (ring_type) { + case HWRM_RING_ALLOC_INPUT_RING_TYPE_TX: + req.queue_id = bp->cos_queue[0].id; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_RX: + req.ring_type = ring_type; + req.cmpl_ring_id = + rte_cpu_to_le_16(bp->grp_info[map_index].cp_fw_ring_id); + req.length = rte_cpu_to_le_32(ring->ring_size); + req.stat_ctx_id = rte_cpu_to_le_16(stats_ctx_id); + req.enables = rte_cpu_to_le_32(rte_le_to_cpu_32(req.enables) | + HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID); + break; + case HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL: + req.ring_type = ring_type; + /* + * TODO: Some HWRM versions crash with + * HWRM_RING_ALLOC_INPUT_INT_MODE_POLL + */ + req.int_mode = HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX; + req.length = rte_cpu_to_le_32(ring->ring_size); + break; + default: + RTE_LOG(ERR, PMD, "hwrm alloc invalid ring type %d\n", + ring_type); + return -1; + } + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + if (rc || resp->error_code) { + if (rc == 0 && resp->error_code) + rc = rte_le_to_cpu_16(resp->error_code); + switch (ring_type) { + case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL: + RTE_LOG(ERR, PMD, + "hwrm_ring_alloc cp failed. rc:%d\n", rc); + return rc; + case HWRM_RING_FREE_INPUT_RING_TYPE_RX: + RTE_LOG(ERR, PMD, + "hwrm_ring_alloc rx failed. rc:%d\n", rc); + return rc; + case HWRM_RING_FREE_INPUT_RING_TYPE_TX: + RTE_LOG(ERR, PMD, + "hwrm_ring_alloc tx failed. rc:%d\n", rc); + return rc; + default: + RTE_LOG(ERR, PMD, "Invalid ring. rc:%d\n", rc); + return rc; + } + } + + ring->fw_ring_id = rte_le_to_cpu_16(resp->ring_id); + return rc; +} + +int bnxt_hwrm_ring_free(struct bnxt *bp, + struct bnxt_ring *ring, uint32_t ring_type) +{ + int rc; + struct hwrm_ring_free_input req = {.req_type = 0 }; + struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, RING_FREE, -1, resp); + + req.ring_type = ring_type; + req.ring_id = rte_cpu_to_le_16(ring->fw_ring_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + if (rc || resp->error_code) { + if (rc == 0 && resp->error_code) + rc = rte_le_to_cpu_16(resp->error_code); + + switch (ring_type) { + case HWRM_RING_FREE_INPUT_RING_TYPE_CMPL: + RTE_LOG(ERR, PMD, "hwrm_ring_free cp failed. rc:%d\n", + rc); + return rc; + case HWRM_RING_FREE_INPUT_RING_TYPE_RX: + RTE_LOG(ERR, PMD, "hwrm_ring_free rx failed. rc:%d\n", + rc); + return rc; + case HWRM_RING_FREE_INPUT_RING_TYPE_TX: + RTE_LOG(ERR, PMD, "hwrm_ring_free tx failed. rc:%d\n", + rc); + return rc; + default: + RTE_LOG(ERR, PMD, "Invalid ring, rc:%d\n", rc); + return rc; + } + } + return 0; +} + +int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx) +{ + int rc = 0; + struct hwrm_ring_grp_alloc_input req = {.req_type = 0 }; + struct hwrm_ring_grp_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, RING_GRP_ALLOC, -1, resp); + + req.cr = rte_cpu_to_le_16(bp->grp_info[idx].cp_fw_ring_id); + req.rr = rte_cpu_to_le_16(bp->grp_info[idx].rx_fw_ring_id); + req.ar = rte_cpu_to_le_16(bp->grp_info[idx].ag_fw_ring_id); + req.sc = rte_cpu_to_le_16(bp->grp_info[idx].fw_stats_ctx); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + bp->grp_info[idx].fw_grp_id = + rte_le_to_cpu_16(resp->ring_group_id); + + return rc; +} + +int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx) +{ + int rc; + struct hwrm_ring_grp_free_input req = {.req_type = 0 }; + struct hwrm_ring_grp_free_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, RING_GRP_FREE, -1, resp); + + req.ring_group_id = rte_cpu_to_le_16(bp->grp_info[idx].fw_grp_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + bp->grp_info[idx].fw_grp_id = INVALID_HW_RING_ID; + return rc; +} + +int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr) +{ + int rc = 0; + struct hwrm_stat_ctx_clr_stats_input req = {.req_type = 0 }; + struct hwrm_stat_ctx_clr_stats_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, STAT_CTX_CLR_STATS, -1, resp); + + if (cpr->hw_stats_ctx_id == (uint32_t)HWRM_NA_SIGNATURE) + return rc; + + req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); + req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, unsigned int idx) +{ + int rc; + struct hwrm_stat_ctx_alloc_input req = {.req_type = 0 }; + struct hwrm_stat_ctx_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, STAT_CTX_ALLOC, -1, resp); + + req.update_period_ms = rte_cpu_to_le_32(1000); + + req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); + req.stats_dma_addr = + rte_cpu_to_le_64(cpr->hw_stats_map); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + cpr->hw_stats_ctx_id = rte_le_to_cpu_16(resp->stat_ctx_id); + bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id; + + return rc; +} + +int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, unsigned int idx) +{ + int rc; + struct hwrm_stat_ctx_free_input req = {.req_type = 0 }; + struct hwrm_stat_ctx_free_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, STAT_CTX_FREE, -1, resp); + + req.stat_ctx_id = rte_cpu_to_le_16(cpr->hw_stats_ctx_id); + req.seq_id = rte_cpu_to_le_16(bp->hwrm_cmd_seq++); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + cpr->hw_stats_ctx_id = HWRM_NA_SIGNATURE; + bp->grp_info[idx].fw_stats_ctx = cpr->hw_stats_ctx_id; + + return rc; +} + +int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0, i, j; + struct hwrm_vnic_alloc_input req = {.req_type = 0 }; + struct hwrm_vnic_alloc_output *resp = bp->hwrm_cmd_resp_addr; + + /* map ring groups to this vnic */ + for (i = vnic->start_grp_id, j = 0; i <= vnic->end_grp_id; i++, j++) { + if (bp->grp_info[i].fw_grp_id == (uint16_t)HWRM_NA_SIGNATURE) { + RTE_LOG(ERR, PMD, + "Not enough ring groups avail:%x req:%x\n", j, + (vnic->end_grp_id - vnic->start_grp_id) + 1); + break; + } + vnic->fw_grp_ids[j] = bp->grp_info[i].fw_grp_id; + } + + vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE; + vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE; + + HWRM_PREP(req, VNIC_ALLOC, -1, resp); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + vnic->fw_vnic_id = rte_le_to_cpu_16(resp->vnic_id); + return rc; +} + +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_vnic_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_cfg_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, VNIC_CFG, -1, resp); + + /* Only RSS support for now TBD: COS & LB */ + req.enables = + rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP | + HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE | + HWRM_VNIC_CFG_INPUT_ENABLES_MRU); + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + req.dflt_ring_grp = + rte_cpu_to_le_16(bp->grp_info[vnic->start_grp_id].fw_grp_id); + req.rss_rule = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx); + req.cos_rule = rte_cpu_to_le_16(0xffff); + req.lb_rule = rte_cpu_to_le_16(0xffff); + req.mru = rte_cpu_to_le_16(bp->eth_dev->data->mtu + ETHER_HDR_LEN + + ETHER_CRC_LEN + VLAN_TAG_SIZE); + if (vnic->func_default) + req.flags = 1; + if (vnic->vlan_strip) + req.flags |= + rte_cpu_to_le_32(HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_input req = {.req_type = 0 }; + struct hwrm_vnic_rss_cos_lb_ctx_alloc_output *resp = + bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_ALLOC, -1, resp); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + vnic->fw_rss_cos_lb_ctx = rte_le_to_cpu_16(resp->rss_cos_lb_ctx_id); + + return rc; +} + +int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_vnic_rss_cos_lb_ctx_free_input req = {.req_type = 0 }; + struct hwrm_vnic_rss_cos_lb_ctx_free_output *resp = + bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, VNIC_RSS_COS_LB_CTX_FREE, -1, resp); + + req.rss_cos_lb_ctx_id = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + vnic->fw_rss_cos_lb_ctx = INVALID_HW_RING_ID; + + return rc; +} + +int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_vnic_free_input req = {.req_type = 0 }; + struct hwrm_vnic_free_output *resp = bp->hwrm_cmd_resp_addr; + + if (vnic->fw_vnic_id == INVALID_HW_RING_ID) + return rc; + + HWRM_PREP(req, VNIC_FREE, -1, resp); + + req.vnic_id = rte_cpu_to_le_16(vnic->fw_vnic_id); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + vnic->fw_vnic_id = INVALID_HW_RING_ID; + return rc; +} + +int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic) +{ + int rc = 0; + struct hwrm_vnic_rss_cfg_input req = {.req_type = 0 }; + struct hwrm_vnic_rss_cfg_output *resp = bp->hwrm_cmd_resp_addr; + + HWRM_PREP(req, VNIC_RSS_CFG, -1, resp); + + req.hash_type = rte_cpu_to_le_32(vnic->hash_type); + + req.ring_grp_tbl_addr = + rte_cpu_to_le_64(vnic->rss_table_dma_addr); + req.hash_key_tbl_addr = + rte_cpu_to_le_64(vnic->rss_hash_key_dma_addr); + req.rss_ctx_idx = rte_cpu_to_le_16(vnic->fw_rss_cos_lb_ctx); + + rc = bnxt_hwrm_send_message(bp, &req, sizeof(req)); + + HWRM_CHECK_RESULT; + + return rc; +} + +/* + * HWRM utility functions + */ + +int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq; + struct bnxt_rx_queue *rxq; + struct bnxt_cp_ring_info *cpr; + + if (i >= bp->rx_cp_nr_rings) { + txq = bp->tx_queues[i - bp->rx_cp_nr_rings]; + cpr = txq->cp_ring; + } else { + rxq = bp->rx_queues[i]; + cpr = rxq->cp_ring; + } + + rc = bnxt_hwrm_stat_clear(bp, cpr); + if (rc) + return rc; + } + return 0; +} + +int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp) +{ + int rc; + unsigned int i; + struct bnxt_cp_ring_info *cpr; + + for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { + unsigned int idx = i + 1; + + if (i >= bp->rx_cp_nr_rings) + cpr = bp->tx_queues[i - bp->rx_cp_nr_rings]->cp_ring; + else + cpr = bp->rx_queues[i]->cp_ring; + if (cpr->hw_stats_ctx_id != HWRM_NA_SIGNATURE) { + rc = bnxt_hwrm_stat_ctx_free(bp, cpr, idx); + if (rc) + return rc; + } + } + return 0; +} + +int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < bp->rx_cp_nr_rings + bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq; + struct bnxt_rx_queue *rxq; + struct bnxt_cp_ring_info *cpr; + unsigned int idx = i + 1; + + if (i >= bp->rx_cp_nr_rings) { + txq = bp->tx_queues[i - bp->rx_cp_nr_rings]; + cpr = txq->cp_ring; + } else { + rxq = bp->rx_queues[i]; + cpr = rxq->cp_ring; + } + + rc = bnxt_hwrm_stat_ctx_alloc(bp, cpr, idx); + + if (rc) + return rc; + } + return rc; +} + +int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp) +{ + uint16_t i; + uint32_t rc = 0; + + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + unsigned int idx = i + 1; + + if (bp->grp_info[idx].fw_grp_id == INVALID_HW_RING_ID) { + RTE_LOG(ERR, PMD, + "Attempt to free invalid ring group %d\n", + idx); + continue; + } + + rc = bnxt_hwrm_ring_grp_free(bp, idx); + + if (rc) + return rc; + } + return rc; +} + +static void bnxt_free_cp_ring(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, unsigned int idx) +{ + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + + bnxt_hwrm_ring_free(bp, cp_ring, + HWRM_RING_FREE_INPUT_RING_TYPE_CMPL); + cp_ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[idx].cp_fw_ring_id = INVALID_HW_RING_ID; + memset(cpr->cp_desc_ring, 0, cpr->cp_ring_struct->ring_size * + sizeof(*cpr->cp_desc_ring)); + cpr->cp_raw_cons = 0; +} + +int bnxt_free_all_hwrm_rings(struct bnxt *bp) +{ + unsigned int i; + int rc = 0; + + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct bnxt_ring *ring = txr->tx_ring_struct; + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + unsigned int idx = bp->rx_cp_nr_rings + i + 1; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_TX); + ring->fw_ring_id = INVALID_HW_RING_ID; + memset(txr->tx_desc_ring, 0, + txr->tx_ring_struct->ring_size * + sizeof(*txr->tx_desc_ring)); + memset(txr->tx_buf_ring, 0, + txr->tx_ring_struct->ring_size * + sizeof(*txr->tx_buf_ring)); + txr->tx_prod = 0; + txr->tx_cons = 0; + } + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + bnxt_free_cp_ring(bp, cpr, idx); + } + + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_ring *ring = rxr->rx_ring_struct; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + unsigned int idx = i + 1; + + if (ring->fw_ring_id != INVALID_HW_RING_ID) { + bnxt_hwrm_ring_free(bp, ring, + HWRM_RING_FREE_INPUT_RING_TYPE_RX); + ring->fw_ring_id = INVALID_HW_RING_ID; + bp->grp_info[idx].rx_fw_ring_id = INVALID_HW_RING_ID; + memset(rxr->rx_desc_ring, 0, + rxr->rx_ring_struct->ring_size * + sizeof(*rxr->rx_desc_ring)); + memset(rxr->rx_buf_ring, 0, + rxr->rx_ring_struct->ring_size * + sizeof(*rxr->rx_buf_ring)); + rxr->rx_prod = 0; + } + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + bnxt_free_cp_ring(bp, cpr, idx); + } + + /* Default completion ring */ + { + struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; + + if (cpr->cp_ring_struct->fw_ring_id != INVALID_HW_RING_ID) + bnxt_free_cp_ring(bp, cpr, 0); + } + + return rc; +} + +int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp) +{ + uint16_t i; + uint32_t rc = 0; + + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + unsigned int idx = i + 1; + + if (bp->grp_info[idx].cp_fw_ring_id == INVALID_HW_RING_ID || + bp->grp_info[idx].rx_fw_ring_id == INVALID_HW_RING_ID) + continue; + + rc = bnxt_hwrm_ring_grp_alloc(bp, idx); + + if (rc) + return rc; + } + return rc; +} + +void bnxt_free_hwrm_resources(struct bnxt *bp) +{ + /* Release memzone */ + rte_free(bp->hwrm_cmd_resp_addr); + bp->hwrm_cmd_resp_addr = NULL; + bp->hwrm_cmd_resp_dma_addr = 0; +} + +int bnxt_alloc_hwrm_resources(struct bnxt *bp) +{ + struct rte_pci_device *pdev = bp->pdev; + char type[RTE_MEMZONE_NAMESIZE]; + + sprintf(type, "bnxt_hwrm_%04x:%02x:%02x:%02x", pdev->addr.domain, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function); + bp->max_req_len = HWRM_MAX_REQ_LEN; + bp->max_resp_len = HWRM_MAX_RESP_LEN; + bp->hwrm_cmd_resp_addr = rte_malloc(type, bp->max_resp_len, 0); + if (bp->hwrm_cmd_resp_addr == NULL) + return -ENOMEM; + bp->hwrm_cmd_resp_dma_addr = + rte_malloc_virt2phy(bp->hwrm_cmd_resp_addr); + rte_spinlock_init(&bp->hwrm_lock); + + return 0; +} + +int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter; + int rc = 0; + + STAILQ_FOREACH(filter, &vnic->filter, next) { + rc = bnxt_hwrm_clear_filter(bp, filter); + if (rc) + break; + } + return rc; +} + +int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic) +{ + struct bnxt_filter_info *filter; + int rc = 0; + + STAILQ_FOREACH(filter, &vnic->filter, next) { + rc = bnxt_hwrm_set_filter(bp, vnic, filter); + if (rc) + break; + } + return rc; +} + +void bnxt_free_all_hwrm_resources(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + unsigned int i; + + if (bp->vnic_info == NULL) + return; + + vnic = &bp->vnic_info[0]; + bnxt_hwrm_cfa_l2_clear_rx_mask(bp, vnic); + + /* VNIC resources */ + for (i = 0; i < bp->nr_vnics; i++) { + struct bnxt_vnic_info *vnic = &bp->vnic_info[i]; + + bnxt_clear_hwrm_vnic_filters(bp, vnic); + + bnxt_hwrm_vnic_ctx_free(bp, vnic); + bnxt_hwrm_vnic_free(bp, vnic); + } + /* Ring resources */ + bnxt_free_all_hwrm_rings(bp); + bnxt_free_all_hwrm_ring_grps(bp); + bnxt_free_all_hwrm_stat_ctxs(bp); +} + +static uint16_t bnxt_parse_eth_link_duplex(uint32_t conf_link_speed) +{ + uint8_t hw_link_duplex = HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH; + + if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG) + return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH; + + switch (conf_link_speed) { + case ETH_LINK_SPEED_10M_HD: + case ETH_LINK_SPEED_100M_HD: + return HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF; + } + return hw_link_duplex; +} + +static uint16_t bnxt_parse_eth_link_speed(uint32_t conf_link_speed) +{ + uint16_t eth_link_speed = 0; + + if ((conf_link_speed & ETH_LINK_SPEED_FIXED) == ETH_LINK_SPEED_AUTONEG) + return ETH_LINK_SPEED_AUTONEG; + + switch (conf_link_speed & ~ETH_LINK_SPEED_FIXED) { + case ETH_LINK_SPEED_100M: + case ETH_LINK_SPEED_100M_HD: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB; + break; + case ETH_LINK_SPEED_1G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB; + break; + case ETH_LINK_SPEED_2_5G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB; + break; + case ETH_LINK_SPEED_10G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB; + break; + case ETH_LINK_SPEED_20G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB; + break; + case ETH_LINK_SPEED_25G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB; + break; + case ETH_LINK_SPEED_40G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB; + break; + case ETH_LINK_SPEED_50G: + eth_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB; + break; + default: + RTE_LOG(ERR, PMD, + "Unsupported link speed %d; default to AUTO\n", + conf_link_speed); + break; + } + return eth_link_speed; +} + +#define BNXT_SUPPORTED_SPEEDS (ETH_LINK_SPEED_100M | ETH_LINK_SPEED_100M_HD | \ + ETH_LINK_SPEED_1G | ETH_LINK_SPEED_2_5G | \ + ETH_LINK_SPEED_10G | ETH_LINK_SPEED_20G | ETH_LINK_SPEED_25G | \ + ETH_LINK_SPEED_40G | ETH_LINK_SPEED_50G) + +static int bnxt_valid_link_speed(uint32_t link_speed, uint8_t port_id) +{ + uint32_t one_speed; + + if (link_speed == ETH_LINK_SPEED_AUTONEG) + return 0; + + if (link_speed & ETH_LINK_SPEED_FIXED) { + one_speed = link_speed & ~ETH_LINK_SPEED_FIXED; + + if (one_speed & (one_speed - 1)) { + RTE_LOG(ERR, PMD, + "Invalid advertised speeds (%u) for port %u\n", + link_speed, port_id); + return -EINVAL; + } + if ((one_speed & BNXT_SUPPORTED_SPEEDS) != one_speed) { + RTE_LOG(ERR, PMD, + "Unsupported advertised speed (%u) for port %u\n", + link_speed, port_id); + return -EINVAL; + } + } else { + if (!(link_speed & BNXT_SUPPORTED_SPEEDS)) { + RTE_LOG(ERR, PMD, + "Unsupported advertised speeds (%u) for port %u\n", + link_speed, port_id); + return -EINVAL; + } + } + return 0; +} + +static uint16_t bnxt_parse_eth_link_speed_mask(uint32_t link_speed) +{ + uint16_t ret = 0; + + if (link_speed == ETH_LINK_SPEED_AUTONEG) + link_speed = BNXT_SUPPORTED_SPEEDS; + + if (link_speed & ETH_LINK_SPEED_100M) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB; + if (link_speed & ETH_LINK_SPEED_100M_HD) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB; + if (link_speed & ETH_LINK_SPEED_1G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB; + if (link_speed & ETH_LINK_SPEED_2_5G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB; + if (link_speed & ETH_LINK_SPEED_10G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB; + if (link_speed & ETH_LINK_SPEED_20G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB; + if (link_speed & ETH_LINK_SPEED_25G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB; + if (link_speed & ETH_LINK_SPEED_40G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB; + if (link_speed & ETH_LINK_SPEED_50G) + ret |= HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB; + return ret; +} + +static uint32_t bnxt_parse_hw_link_speed(uint16_t hw_link_speed) +{ + uint32_t eth_link_speed = ETH_SPEED_NUM_NONE; + + switch (hw_link_speed) { + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB: + eth_link_speed = ETH_SPEED_NUM_100M; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB: + eth_link_speed = ETH_SPEED_NUM_1G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB: + eth_link_speed = ETH_SPEED_NUM_2_5G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB: + eth_link_speed = ETH_SPEED_NUM_10G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB: + eth_link_speed = ETH_SPEED_NUM_20G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB: + eth_link_speed = ETH_SPEED_NUM_25G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB: + eth_link_speed = ETH_SPEED_NUM_40G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB: + eth_link_speed = ETH_SPEED_NUM_50G; + break; + case HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB: + default: + RTE_LOG(ERR, PMD, "HWRM link speed %d not defined\n", + hw_link_speed); + break; + } + return eth_link_speed; +} + +static uint16_t bnxt_parse_hw_link_duplex(uint16_t hw_link_duplex) +{ + uint16_t eth_link_duplex = ETH_LINK_FULL_DUPLEX; + + switch (hw_link_duplex) { + case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH: + case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL: + eth_link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF: + eth_link_duplex = ETH_LINK_HALF_DUPLEX; + break; + default: + RTE_LOG(ERR, PMD, "HWRM link duplex %d not defined\n", + hw_link_duplex); + break; + } + return eth_link_duplex; +} + +int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link) +{ + int rc = 0; + struct bnxt_link_info *link_info = &bp->link_info; + + rc = bnxt_hwrm_port_phy_qcfg(bp, link_info); + if (rc) { + RTE_LOG(ERR, PMD, + "Get link config failed with rc %d\n", rc); + goto exit; + } + if (link_info->link_up) + link->link_speed = + bnxt_parse_hw_link_speed(link_info->link_speed); + else + link->link_speed = ETH_LINK_SPEED_10M; + link->link_duplex = bnxt_parse_hw_link_duplex(link_info->duplex); + link->link_status = link_info->link_up; + link->link_autoneg = link_info->auto_mode == + HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE ? + ETH_LINK_SPEED_FIXED : ETH_LINK_SPEED_AUTONEG; +exit: + return rc; +} + +int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up) +{ + int rc = 0; + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + struct bnxt_link_info link_req; + uint16_t speed; + + rc = bnxt_valid_link_speed(dev_conf->link_speeds, + bp->eth_dev->data->port_id); + if (rc) + goto error; + + memset(&link_req, 0, sizeof(link_req)); + speed = bnxt_parse_eth_link_speed(dev_conf->link_speeds); + link_req.link_up = link_up; + if (speed == 0) { + link_req.phy_flags = + HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG; + link_req.auto_mode = + HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW; + link_req.auto_link_speed_mask = + bnxt_parse_eth_link_speed_mask(dev_conf->link_speeds); + link_req.auto_link_speed = + HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB; + } else { + link_req.auto_mode = HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE; + link_req.phy_flags = HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE | + HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY; + link_req.link_speed = speed; + } + link_req.duplex = bnxt_parse_eth_link_duplex(dev_conf->link_speeds); + link_req.auto_pause = bp->link_info.auto_pause; + link_req.force_pause = bp->link_info.force_pause; + + rc = bnxt_hwrm_port_phy_cfg(bp, &link_req); + if (rc) { + RTE_LOG(ERR, PMD, + "Set link config failed with rc %d\n", rc); + } + +error: + return rc; +} diff --git a/drivers/net/bnxt/bnxt_hwrm.h b/drivers/net/bnxt/bnxt_hwrm.h new file mode 100644 index 00000000..a508024f --- /dev/null +++ b/drivers/net/bnxt/bnxt_hwrm.h @@ -0,0 +1,104 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_HWRM_H_ +#define _BNXT_HWRM_H_ + +#include <inttypes.h> +#include <stdbool.h> + +struct bnxt; +struct bnxt_filter_info; +struct bnxt_cp_ring_info; + +#define HWRM_SEQ_ID_INVALID -1U + +int bnxt_hwrm_cfa_l2_clear_rx_mask(struct bnxt *bp, + struct bnxt_vnic_info *vnic); +int bnxt_hwrm_cfa_l2_set_rx_mask(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_clear_filter(struct bnxt *bp, + struct bnxt_filter_info *filter); +int bnxt_hwrm_set_filter(struct bnxt *bp, + struct bnxt_vnic_info *vnic, + struct bnxt_filter_info *filter); + +int bnxt_hwrm_exec_fwd_resp(struct bnxt *bp, void *fwd_cmd); + +int bnxt_hwrm_func_driver_register(struct bnxt *bp, uint32_t flags, + uint32_t *vf_req_fwd); +int bnxt_hwrm_func_qcaps(struct bnxt *bp); +int bnxt_hwrm_func_reset(struct bnxt *bp); +int bnxt_hwrm_func_driver_unregister(struct bnxt *bp, uint32_t flags); + +int bnxt_hwrm_queue_qportcfg(struct bnxt *bp); + +int bnxt_hwrm_ring_alloc(struct bnxt *bp, + struct bnxt_ring *ring, + uint32_t ring_type, uint32_t map_index, + uint32_t stats_ctx_id); +int bnxt_hwrm_ring_free(struct bnxt *bp, + struct bnxt_ring *ring, uint32_t ring_type); +int bnxt_hwrm_ring_grp_alloc(struct bnxt *bp, unsigned int idx); +int bnxt_hwrm_ring_grp_free(struct bnxt *bp, unsigned int idx); + +int bnxt_hwrm_stat_clear(struct bnxt *bp, struct bnxt_cp_ring_info *cpr); +int bnxt_hwrm_stat_ctx_alloc(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, unsigned int idx); +int bnxt_hwrm_stat_ctx_free(struct bnxt *bp, + struct bnxt_cp_ring_info *cpr, unsigned int idx); + +int bnxt_hwrm_ver_get(struct bnxt *bp); + +int bnxt_hwrm_vnic_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_cfg(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_ctx_alloc(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_ctx_free(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_free(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_hwrm_vnic_rss_cfg(struct bnxt *bp, + struct bnxt_vnic_info *vnic); + +int bnxt_alloc_all_hwrm_stat_ctxs(struct bnxt *bp); +int bnxt_clear_all_hwrm_stat_ctxs(struct bnxt *bp); +int bnxt_free_all_hwrm_stat_ctxs(struct bnxt *bp); +int bnxt_free_all_hwrm_rings(struct bnxt *bp); +int bnxt_free_all_hwrm_ring_grps(struct bnxt *bp); +int bnxt_alloc_all_hwrm_ring_grps(struct bnxt *bp); +int bnxt_set_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic); +int bnxt_clear_hwrm_vnic_filters(struct bnxt *bp, struct bnxt_vnic_info *vnic); +void bnxt_free_all_hwrm_resources(struct bnxt *bp); +void bnxt_free_hwrm_resources(struct bnxt *bp); +int bnxt_alloc_hwrm_resources(struct bnxt *bp); +int bnxt_get_hwrm_link_config(struct bnxt *bp, struct rte_eth_link *link); +int bnxt_set_hwrm_link_config(struct bnxt *bp, bool link_up); + +#endif diff --git a/drivers/net/bnxt/bnxt_ring.c b/drivers/net/bnxt/bnxt_ring.c new file mode 100644 index 00000000..3f81ffcc --- /dev/null +++ b/drivers/net/bnxt/bnxt_ring.c @@ -0,0 +1,306 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_memzone.h> + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_hwrm.h" +#include "bnxt_ring.h" +#include "bnxt_rxq.h" +#include "bnxt_rxr.h" +#include "bnxt_txq.h" +#include "bnxt_txr.h" + +#include "hsi_struct_def_dpdk.h" + +/* + * Generic ring handling + */ + +void bnxt_free_ring(struct bnxt_ring *ring) +{ + if (ring->vmem_size && *ring->vmem) { + memset((char *)*ring->vmem, 0, ring->vmem_size); + *ring->vmem = NULL; + } + rte_memzone_free((const struct rte_memzone *)ring->mem_zone); +} + +/* + * Ring groups + */ + +void bnxt_init_ring_grps(struct bnxt *bp) +{ + unsigned int i; + + for (i = 0; i < bp->max_ring_grps; i++) + memset(&bp->grp_info[i], (uint8_t)HWRM_NA_SIGNATURE, + sizeof(struct bnxt_ring_grp_info)); +} + +/* + * Allocates a completion ring with vmem and stats optionally also allocating + * a TX and/or RX ring. Passing NULL as tx_ring_info and/or rx_ring_info + * to not allocate them. + * + * Order in the allocation is: + * stats - Always non-zero length + * cp vmem - Always zero-length, supported for the bnxt_ring abstraction + * tx vmem - Only non-zero length if tx_ring_info is not NULL + * rx vmem - Only non-zero length if rx_ring_info is not NULL + * cp bd ring - Always non-zero length + * tx bd ring - Only non-zero length if tx_ring_info is not NULL + * rx bd ring - Only non-zero length if rx_ring_info is not NULL + */ +int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, + struct bnxt_tx_ring_info *tx_ring_info, + struct bnxt_rx_ring_info *rx_ring_info, + struct bnxt_cp_ring_info *cp_ring_info, + const char *suffix) +{ + struct bnxt_ring *cp_ring = cp_ring_info->cp_ring_struct; + struct bnxt_ring *tx_ring; + struct bnxt_ring *rx_ring; + struct rte_pci_device *pdev = bp->pdev; + const struct rte_memzone *mz = NULL; + char mz_name[RTE_MEMZONE_NAMESIZE]; + + int stats_len = (tx_ring_info || rx_ring_info) ? + RTE_CACHE_LINE_ROUNDUP(sizeof(struct ctx_hw_stats64)) : 0; + + int cp_vmem_start = stats_len; + int cp_vmem_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->vmem_size); + + int tx_vmem_start = cp_vmem_start + cp_vmem_len; + int tx_vmem_len = + tx_ring_info ? RTE_CACHE_LINE_ROUNDUP(tx_ring_info-> + tx_ring_struct->vmem_size) : 0; + + int rx_vmem_start = tx_vmem_start + tx_vmem_len; + int rx_vmem_len = rx_ring_info ? + RTE_CACHE_LINE_ROUNDUP(rx_ring_info-> + rx_ring_struct->vmem_size) : 0; + + int cp_ring_start = rx_vmem_start + rx_vmem_len; + int cp_ring_len = RTE_CACHE_LINE_ROUNDUP(cp_ring->ring_size * + sizeof(struct cmpl_base)); + + int tx_ring_start = cp_ring_start + cp_ring_len; + int tx_ring_len = tx_ring_info ? + RTE_CACHE_LINE_ROUNDUP(tx_ring_info->tx_ring_struct->ring_size * + sizeof(struct tx_bd_long)) : 0; + + int rx_ring_start = tx_ring_start + tx_ring_len; + int rx_ring_len = rx_ring_info ? + RTE_CACHE_LINE_ROUNDUP(rx_ring_info->rx_ring_struct->ring_size * + sizeof(struct rx_prod_pkt_bd)) : 0; + + int total_alloc_len = rx_ring_start + rx_ring_len; + + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_%04x:%02x:%02x:%02x-%04x_%s", pdev->addr.domain, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function, qidx, + suffix); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + if (!mz) { + mz = rte_memzone_reserve(mz_name, total_alloc_len, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY); + if (mz == NULL) + return -ENOMEM; + } + memset(mz->addr, 0, mz->len); + + if (tx_ring_info) { + tx_ring = tx_ring_info->tx_ring_struct; + + tx_ring->bd = ((char *)mz->addr + tx_ring_start); + tx_ring_info->tx_desc_ring = (struct tx_bd_long *)tx_ring->bd; + tx_ring->bd_dma = mz->phys_addr + tx_ring_start; + tx_ring_info->tx_desc_mapping = tx_ring->bd_dma; + tx_ring->mem_zone = (const void *)mz; + + if (!tx_ring->bd) + return -ENOMEM; + if (tx_ring->vmem_size) { + tx_ring->vmem = + (void **)((char *)mz->addr + tx_vmem_start); + tx_ring_info->tx_buf_ring = + (struct bnxt_sw_tx_bd *)tx_ring->vmem; + } + } + + if (rx_ring_info) { + rx_ring = rx_ring_info->rx_ring_struct; + + rx_ring->bd = ((char *)mz->addr + rx_ring_start); + rx_ring_info->rx_desc_ring = + (struct rx_prod_pkt_bd *)rx_ring->bd; + rx_ring->bd_dma = mz->phys_addr + rx_ring_start; + rx_ring_info->rx_desc_mapping = rx_ring->bd_dma; + rx_ring->mem_zone = (const void *)mz; + + if (!rx_ring->bd) + return -ENOMEM; + if (rx_ring->vmem_size) { + rx_ring->vmem = + (void **)((char *)mz->addr + rx_vmem_start); + rx_ring_info->rx_buf_ring = + (struct bnxt_sw_rx_bd *)rx_ring->vmem; + } + } + + cp_ring->bd = ((char *)mz->addr + cp_ring_start); + cp_ring->bd_dma = mz->phys_addr + cp_ring_start; + cp_ring_info->cp_desc_ring = cp_ring->bd; + cp_ring_info->cp_desc_mapping = cp_ring->bd_dma; + cp_ring->mem_zone = (const void *)mz; + + if (!cp_ring->bd) + return -ENOMEM; + if (cp_ring->vmem_size) + *cp_ring->vmem = ((char *)mz->addr + stats_len); + if (stats_len) { + cp_ring_info->hw_stats = mz->addr; + cp_ring_info->hw_stats_map = mz->phys_addr; + } + cp_ring_info->hw_stats_ctx_id = HWRM_NA_SIGNATURE; + return 0; +} + +/* ring_grp usage: + * [0] = default completion ring + * [1 -> +rx_cp_nr_rings] = rx_cp, rx rings + * [1+rx_cp_nr_rings + 1 -> +tx_cp_nr_rings] = tx_cp, tx rings + */ +int bnxt_alloc_hwrm_rings(struct bnxt *bp) +{ + unsigned int i; + int rc = 0; + + /* Default completion ring */ + { + struct bnxt_cp_ring_info *cpr = bp->def_cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + + rc = bnxt_hwrm_ring_alloc(bp, cp_ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL, + 0, HWRM_NA_SIGNATURE); + if (rc) + goto err_out; + cpr->cp_doorbell = + (char *)bp->eth_dev->pci_dev->mem_resource[2].addr; + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + bp->grp_info[0].cp_fw_ring_id = cp_ring->fw_ring_id; + } + + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct bnxt_ring *ring = rxr->rx_ring_struct; + unsigned int idx = i + 1; + + /* Rx cmpl */ + rc = bnxt_hwrm_ring_alloc(bp, cp_ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL, + idx, HWRM_NA_SIGNATURE); + if (rc) + goto err_out; + cpr->cp_doorbell = + (char *)bp->eth_dev->pci_dev->mem_resource[2].addr + + idx * 0x80; + bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id; + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + + /* Rx ring */ + rc = bnxt_hwrm_ring_alloc(bp, ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_RX, + idx, cpr->hw_stats_ctx_id); + if (rc) + goto err_out; + rxr->rx_prod = 0; + rxr->rx_doorbell = + (char *)bp->eth_dev->pci_dev->mem_resource[2].addr + + idx * 0x80; + bp->grp_info[idx].rx_fw_ring_id = ring->fw_ring_id; + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + if (bnxt_init_one_rx_ring(rxq)) { + RTE_LOG(ERR, PMD, "bnxt_init_one_rx_ring failed!"); + bnxt_rx_queue_release_op(rxq); + return -ENOMEM; + } + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + } + + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + struct bnxt_ring *cp_ring = cpr->cp_ring_struct; + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct bnxt_ring *ring = txr->tx_ring_struct; + unsigned int idx = 1 + bp->rx_cp_nr_rings + i; + + /* Tx cmpl */ + rc = bnxt_hwrm_ring_alloc(bp, cp_ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL, + idx, HWRM_NA_SIGNATURE); + if (rc) + goto err_out; + + cpr->cp_doorbell = + (char *)bp->eth_dev->pci_dev->mem_resource[2].addr + + idx * 0x80; + bp->grp_info[idx].cp_fw_ring_id = cp_ring->fw_ring_id; + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + + /* Tx ring */ + rc = bnxt_hwrm_ring_alloc(bp, ring, + HWRM_RING_ALLOC_INPUT_RING_TYPE_TX, + idx, cpr->hw_stats_ctx_id); + if (rc) + goto err_out; + + txr->tx_doorbell = + (char *)bp->eth_dev->pci_dev->mem_resource[2].addr + + idx * 0x80; + } + +err_out: + return rc; +} diff --git a/drivers/net/bnxt/bnxt_ring.h b/drivers/net/bnxt/bnxt_ring.h new file mode 100644 index 00000000..8656549a --- /dev/null +++ b/drivers/net/bnxt/bnxt_ring.h @@ -0,0 +1,103 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_RING_H_ +#define _BNXT_RING_H_ + +#include <inttypes.h> + +#include <rte_memory.h> + +#define RING_NEXT(ring, idx) (((idx) + 1) & (ring)->ring_mask) + +#define RTE_MBUF_DATA_DMA_ADDR(mb) \ + ((uint64_t)((mb)->buf_physaddr + (mb)->data_off)) + +#define DB_IDX_MASK 0xffffff +#define DB_IDX_VALID (0x1 << 26) +#define DB_IRQ_DIS (0x1 << 27) +#define DB_KEY_TX (0x0 << 28) +#define DB_KEY_RX (0x1 << 28) +#define DB_KEY_CP (0x2 << 28) +#define DB_KEY_ST (0x3 << 28) +#define DB_KEY_TX_PUSH (0x4 << 28) +#define DB_LONG_TX_PUSH (0x2 << 24) + +#define DEFAULT_CP_RING_SIZE 256 +#define DEFAULT_RX_RING_SIZE 256 +#define DEFAULT_TX_RING_SIZE 256 + +#define MAX_TPA 128 + +/* These assume 4k pages */ +#define MAX_RX_DESC_CNT (8 * 1024) +#define MAX_TX_DESC_CNT (4 * 1024) +#define MAX_CP_DESC_CNT (16 * 1024) + +#define INVALID_HW_RING_ID ((uint16_t)-1) + +struct bnxt_ring { + void *bd; + phys_addr_t bd_dma; + uint32_t ring_size; + uint32_t ring_mask; + + int vmem_size; + void **vmem; + + uint16_t fw_ring_id; /* Ring id filled by Chimp FW */ + const void *mem_zone; +}; + +struct bnxt_ring_grp_info { + uint16_t fw_stats_ctx; + uint16_t fw_grp_id; + uint16_t rx_fw_ring_id; + uint16_t cp_fw_ring_id; + uint16_t ag_fw_ring_id; +}; + +struct bnxt; +struct bnxt_tx_ring_info; +struct bnxt_rx_ring_info; +struct bnxt_cp_ring_info; +void bnxt_free_ring(struct bnxt_ring *ring); +void bnxt_init_ring_grps(struct bnxt *bp); +int bnxt_alloc_rings(struct bnxt *bp, uint16_t qidx, + struct bnxt_tx_ring_info *tx_ring_info, + struct bnxt_rx_ring_info *rx_ring_info, + struct bnxt_cp_ring_info *cp_ring_info, + const char *suffix); +int bnxt_alloc_hwrm_rings(struct bnxt *bp); + +#endif diff --git a/drivers/net/bnxt/bnxt_rxq.c b/drivers/net/bnxt/bnxt_rxq.c new file mode 100644 index 00000000..cddf17d5 --- /dev/null +++ b/drivers/net/bnxt/bnxt_rxq.c @@ -0,0 +1,319 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <inttypes.h> + +#include <rte_malloc.h> + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_filter.h" +#include "bnxt_hwrm.h" +#include "bnxt_ring.h" +#include "bnxt_rxq.h" +#include "bnxt_rxr.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +/* + * RX Queues + */ + +void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq) +{ + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + + if (cpr->hw_stats) + cpr->hw_stats = NULL; +} + +int bnxt_mq_rx_configure(struct bnxt *bp) +{ + struct rte_eth_conf *dev_conf = &bp->eth_dev->data->dev_conf; + unsigned int i, j, nb_q_per_grp, ring_idx; + int start_grp_id, end_grp_id, rc = 0; + struct bnxt_vnic_info *vnic; + struct bnxt_filter_info *filter; + struct bnxt_rx_queue *rxq; + + bp->nr_vnics = 0; + + /* Single queue mode */ + if (bp->rx_cp_nr_rings < 2) { + vnic = bnxt_alloc_vnic(bp); + if (!vnic) { + RTE_LOG(ERR, PMD, "VNIC alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next); + bp->nr_vnics++; + + rxq = bp->eth_dev->data->rx_queues[0]; + rxq->vnic = vnic; + + vnic->func_default = true; + vnic->ff_pool_idx = 0; + vnic->start_grp_id = 1; + vnic->end_grp_id = vnic->start_grp_id + + bp->rx_cp_nr_rings - 1; + filter = bnxt_alloc_filter(bp); + if (!filter) { + RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + goto out; + } + + /* Multi-queue mode */ + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_VMDQ_FLAG) { + /* VMDq ONLY, VMDq+RSS, VMDq+DCB, VMDq+DCB+RSS */ + enum rte_eth_nb_pools pools; + + switch (dev_conf->rxmode.mq_mode) { + case ETH_MQ_RX_VMDQ_RSS: + case ETH_MQ_RX_VMDQ_ONLY: + { + const struct rte_eth_vmdq_rx_conf *conf = + &dev_conf->rx_adv_conf.vmdq_rx_conf; + + /* ETH_8/64_POOLs */ + pools = conf->nb_queue_pools; + break; + } + default: + RTE_LOG(ERR, PMD, "Unsupported mq_mod %d\n", + dev_conf->rxmode.mq_mode); + rc = -EINVAL; + goto err_out; + } + /* For each pool, allocate MACVLAN CFA rule & VNIC */ + if (!pools) { + RTE_LOG(ERR, PMD, + "VMDq pool not set, defaulted to 64\n"); + pools = ETH_64_POOLS; + } + nb_q_per_grp = bp->rx_cp_nr_rings / pools; + start_grp_id = 1; + end_grp_id = start_grp_id + nb_q_per_grp - 1; + + ring_idx = 0; + for (i = 0; i < pools; i++) { + vnic = bnxt_alloc_vnic(bp); + if (!vnic) { + RTE_LOG(ERR, PMD, + "VNIC alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + STAILQ_INSERT_TAIL(&bp->ff_pool[i], vnic, next); + bp->nr_vnics++; + + for (j = 0; j < nb_q_per_grp; j++, ring_idx++) { + rxq = bp->eth_dev->data->rx_queues[ring_idx]; + rxq->vnic = vnic; + } + if (i == 0) + vnic->func_default = true; + vnic->ff_pool_idx = i; + vnic->start_grp_id = start_grp_id; + vnic->end_grp_id = end_grp_id; + + filter = bnxt_alloc_filter(bp); + if (!filter) { + RTE_LOG(ERR, PMD, + "L2 filter alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + /* + * TODO: Configure & associate CFA rule for + * each VNIC for each VMDq with MACVLAN, MACVLAN+TC + */ + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + + start_grp_id = end_grp_id + 1; + end_grp_id += nb_q_per_grp; + } + goto out; + } + + /* Non-VMDq mode - RSS, DCB, RSS+DCB */ + /* Init default VNIC for RSS or DCB only */ + vnic = bnxt_alloc_vnic(bp); + if (!vnic) { + RTE_LOG(ERR, PMD, "VNIC alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + /* Partition the rx queues for the single pool */ + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + rxq = bp->eth_dev->data->rx_queues[i]; + rxq->vnic = vnic; + } + STAILQ_INSERT_TAIL(&bp->ff_pool[0], vnic, next); + bp->nr_vnics++; + + vnic->func_default = true; + vnic->ff_pool_idx = 0; + vnic->start_grp_id = 1; + vnic->end_grp_id = vnic->start_grp_id + + bp->rx_cp_nr_rings - 1; + filter = bnxt_alloc_filter(bp); + if (!filter) { + RTE_LOG(ERR, PMD, "L2 filter alloc failed\n"); + rc = -ENOMEM; + goto err_out; + } + STAILQ_INSERT_TAIL(&vnic->filter, filter, next); + + if (dev_conf->rxmode.mq_mode & ETH_MQ_RX_RSS_FLAG) + vnic->hash_type = + HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 | + HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6; + +out: + return rc; + +err_out: + /* Free allocated vnic/filters */ + + return rc; +} + +static void bnxt_rx_queue_release_mbufs(struct bnxt_rx_queue *rxq __rte_unused) +{ + struct bnxt_sw_rx_bd *sw_ring; + uint16_t i; + + if (rxq) { + sw_ring = rxq->rx_ring->rx_buf_ring; + if (sw_ring) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (sw_ring[i].mbuf) { + rte_pktmbuf_free_seg(sw_ring[i].mbuf); + sw_ring[i].mbuf = NULL; + } + } + } + } +} + +void bnxt_free_rx_mbufs(struct bnxt *bp) +{ + struct bnxt_rx_queue *rxq; + int i; + + for (i = 0; i < (int)bp->rx_nr_rings; i++) { + rxq = bp->rx_queues[i]; + bnxt_rx_queue_release_mbufs(rxq); + } +} + +void bnxt_rx_queue_release_op(void *rx_queue) +{ + struct bnxt_rx_queue *rxq = (struct bnxt_rx_queue *)rx_queue; + + if (rxq) { + bnxt_rx_queue_release_mbufs(rxq); + + /* Free RX ring hardware descriptors */ + bnxt_free_ring(rxq->rx_ring->rx_ring_struct); + + /* Free RX completion ring hardware descriptors */ + bnxt_free_ring(rxq->cp_ring->cp_ring_struct); + + bnxt_free_rxq_stats(rxq); + + rte_free(rxq); + } +} + +int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt_rx_queue *rxq; + int rc = 0; + + if (!nb_desc || nb_desc > MAX_RX_DESC_CNT) { + RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc); + rc = -EINVAL; + goto out; + } + + if (eth_dev->data->rx_queues) { + rxq = eth_dev->data->rx_queues[queue_idx]; + if (rxq) + bnxt_rx_queue_release_op(rxq); + } + rxq = rte_zmalloc_socket("bnxt_rx_queue", sizeof(struct bnxt_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq) { + RTE_LOG(ERR, PMD, "bnxt_rx_queue allocation failed!"); + rc = -ENOMEM; + goto out; + } + rxq->bp = bp; + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->rx_free_thresh = rx_conf->rx_free_thresh; + + rc = bnxt_init_rx_ring_struct(rxq, socket_id); + if (rc) + goto out; + + rxq->queue_id = queue_idx; + rxq->port_id = eth_dev->data->port_id; + rxq->crc_len = (uint8_t)((eth_dev->data->dev_conf.rxmode.hw_strip_crc) ? + 0 : ETHER_CRC_LEN); + + eth_dev->data->rx_queues[queue_idx] = rxq; + /* Allocate RX ring hardware descriptors */ + if (bnxt_alloc_rings(bp, queue_idx, NULL, rxq->rx_ring, rxq->cp_ring, + "rxr")) { + RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for rx_ring failed!"); + bnxt_rx_queue_release_op(rxq); + rc = -ENOMEM; + goto out; + } + +out: + return rc; +} diff --git a/drivers/net/bnxt/bnxt_rxq.h b/drivers/net/bnxt/bnxt_rxq.h new file mode 100644 index 00000000..95543298 --- /dev/null +++ b/drivers/net/bnxt/bnxt_rxq.h @@ -0,0 +1,74 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_RQX_H_ +#define _BNXT_RQX_H_ + +struct bnxt; +struct bnxt_rx_ring_info; +struct bnxt_cp_ring_info; +struct bnxt_rx_queue { + struct rte_mempool *mb_pool; /* mbuf pool for RX ring */ + struct rte_mbuf *pkt_first_seg; /* 1st seg of pkt */ + struct rte_mbuf *pkt_last_seg; /* Last seg of pkt */ + uint64_t mbuf_initializer; /* val to init mbuf */ + uint16_t nb_rx_desc; /* num of RX desc */ + uint16_t rx_tail; /* cur val of RDT register */ + uint16_t nb_rx_hold; /* num held free RX desc */ + uint16_t rx_free_thresh; /* max free RX desc to hold */ + uint16_t queue_id; /* RX queue index */ + uint16_t reg_idx; /* RX queue register index */ + uint8_t port_id; /* Device port identifier */ + uint8_t crc_len; /* 0 if CRC stripped, 4 otherwise */ + + struct bnxt *bp; + struct bnxt_vnic_info *vnic; + + uint32_t rx_buf_size; + uint32_t rx_buf_use_size; /* useable size */ + struct bnxt_rx_ring_info *rx_ring; + struct bnxt_cp_ring_info *cp_ring; +}; + +void bnxt_free_rxq_stats(struct bnxt_rx_queue *rxq); +int bnxt_mq_rx_configure(struct bnxt *bp); +void bnxt_rx_queue_release_op(void *rx_queue); +int bnxt_rx_queue_setup_op(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); +void bnxt_free_rx_mbufs(struct bnxt *bp); + +#endif diff --git a/drivers/net/bnxt/bnxt_rxr.c b/drivers/net/bnxt/bnxt_rxr.c new file mode 100644 index 00000000..5d93de26 --- /dev/null +++ b/drivers/net/bnxt/bnxt_rxr.c @@ -0,0 +1,367 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <inttypes.h> +#include <stdbool.h> + +#include <rte_byteorder.h> +#include <rte_malloc.h> +#include <rte_memory.h> + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_ring.h" +#include "bnxt_rxr.h" +#include "bnxt_rxq.h" +#include "hsi_struct_def_dpdk.h" + +/* + * RX Ring handling + */ + +static inline struct rte_mbuf *__bnxt_alloc_rx_data(struct rte_mempool *mb) +{ + struct rte_mbuf *data; + + data = rte_mbuf_raw_alloc(mb); + + return data; +} + +static inline int bnxt_alloc_rx_data(struct bnxt_rx_queue *rxq, + struct bnxt_rx_ring_info *rxr, + uint16_t prod) +{ + struct rx_prod_pkt_bd *rxbd = &rxr->rx_desc_ring[prod]; + struct bnxt_sw_rx_bd *rx_buf = &rxr->rx_buf_ring[prod]; + struct rte_mbuf *data; + + data = __bnxt_alloc_rx_data(rxq->mb_pool); + if (!data) + return -ENOMEM; + + rx_buf->mbuf = data; + + rxbd->addr = rte_cpu_to_le_64(RTE_MBUF_DATA_DMA_ADDR(rx_buf->mbuf)); + + return 0; +} + +static void bnxt_reuse_rx_mbuf(struct bnxt_rx_ring_info *rxr, uint16_t cons, + struct rte_mbuf *mbuf) +{ + uint16_t prod = rxr->rx_prod; + struct bnxt_sw_rx_bd *prod_rx_buf; + struct rx_prod_pkt_bd *prod_bd, *cons_bd; + + prod_rx_buf = &rxr->rx_buf_ring[prod]; + + prod_rx_buf->mbuf = mbuf; + + prod_bd = &rxr->rx_desc_ring[prod]; + cons_bd = &rxr->rx_desc_ring[cons]; + + prod_bd->addr = cons_bd->addr; +} + +static uint16_t bnxt_rx_pkt(struct rte_mbuf **rx_pkt, + struct bnxt_rx_queue *rxq, uint32_t *raw_cons) +{ + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + struct rx_pkt_cmpl *rxcmp; + struct rx_pkt_cmpl_hi *rxcmp1; + uint32_t tmp_raw_cons = *raw_cons; + uint16_t cons, prod, cp_cons = + RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); + struct bnxt_sw_rx_bd *rx_buf; + struct rte_mbuf *mbuf; + int rc = 0; + + rxcmp = (struct rx_pkt_cmpl *) + &cpr->cp_desc_ring[cp_cons]; + + tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons); + cp_cons = RING_CMP(cpr->cp_ring_struct, tmp_raw_cons); + rxcmp1 = (struct rx_pkt_cmpl_hi *)&cpr->cp_desc_ring[cp_cons]; + + if (!CMP_VALID(rxcmp1, tmp_raw_cons, cpr->cp_ring_struct)) + return -EBUSY; + + prod = rxr->rx_prod; + + /* EW - GRO deferred to phase 3 */ + cons = rxcmp->opaque; + rx_buf = &rxr->rx_buf_ring[cons]; + mbuf = rx_buf->mbuf; + rte_prefetch0(mbuf); + + mbuf->nb_segs = 1; + mbuf->next = NULL; + mbuf->pkt_len = rxcmp->len; + mbuf->data_len = mbuf->pkt_len; + mbuf->port = rxq->port_id; + mbuf->ol_flags = 0; + if (rxcmp->flags_type & RX_PKT_CMPL_FLAGS_RSS_VALID) { + mbuf->hash.rss = rxcmp->rss_hash; + mbuf->ol_flags |= PKT_RX_RSS_HASH; + } else { + mbuf->hash.fdir.id = rxcmp1->cfa_code; + mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID; + } + if (rxcmp1->flags2 & RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN) { + mbuf->vlan_tci = rxcmp1->metadata & + (RX_PKT_CMPL_METADATA_VID_MASK | + RX_PKT_CMPL_METADATA_DE | + RX_PKT_CMPL_METADATA_PRI_MASK); + mbuf->ol_flags |= PKT_RX_VLAN_PKT; + } + + rx_buf->mbuf = NULL; + if (rxcmp1->errors_v2 & RX_CMP_L2_ERRORS) { + /* Re-install the mbuf back to the rx ring */ + bnxt_reuse_rx_mbuf(rxr, cons, mbuf); + + rc = -EIO; + goto next_rx; + } + /* + * TODO: Redesign this.... + * If the allocation fails, the packet does not get received. + * Simply returning this will result in slowly falling behind + * on the producer ring buffers. + * Instead, "filling up" the producer just before ringing the + * doorbell could be a better solution since it will let the + * producer ring starve until memory is available again pushing + * the drops into hardware and getting them out of the driver + * allowing recovery to a full producer ring. + * + * This could also help with cache usage by preventing per-packet + * calls in favour of a tight loop with the same function being called + * in it. + */ + if (bnxt_alloc_rx_data(rxq, rxr, prod)) { + RTE_LOG(ERR, PMD, "mbuf alloc failed with prod=0x%x\n", prod); + rc = -ENOMEM; + goto next_rx; + } + + /* + * All MBUFs are allocated with the same size under DPDK, + * no optimization for rx_copy_thresh + */ + + /* AGG buf operation is deferred */ + + /* EW - VLAN reception. Must compare against the ol_flags */ + + *rx_pkt = mbuf; +next_rx: + rxr->rx_prod = RING_NEXT(rxr->rx_ring_struct, prod); + + *raw_cons = tmp_raw_cons; + + return rc; +} + +uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct bnxt_rx_queue *rxq = rx_queue; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct bnxt_rx_ring_info *rxr = rxq->rx_ring; + uint32_t raw_cons = cpr->cp_raw_cons; + uint32_t cons; + int nb_rx_pkts = 0; + bool rx_event = false; + struct rx_pkt_cmpl *rxcmp; + + /* Handle RX burst request */ + while (1) { + int rc; + + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + rte_prefetch0(&cpr->cp_desc_ring[cons]); + rxcmp = (struct rx_pkt_cmpl *)&cpr->cp_desc_ring[cons]; + + if (!CMP_VALID(rxcmp, raw_cons, cpr->cp_ring_struct)) + break; + + /* TODO: Avoid magic numbers... */ + if ((CMP_TYPE(rxcmp) & 0x30) == 0x10) { + rc = bnxt_rx_pkt(&rx_pkts[nb_rx_pkts], rxq, &raw_cons); + if (likely(!rc)) + nb_rx_pkts++; + else if (rc == -EBUSY) /* partial completion */ + break; + rx_event = true; + } + raw_cons = NEXT_RAW_CMP(raw_cons); + if (nb_rx_pkts == nb_pkts) + break; + } + if (raw_cons == cpr->cp_raw_cons) { + /* + * For PMD, there is no need to keep on pushing to REARM + * the doorbell if there are no new completions + */ + return nb_rx_pkts; + } + cpr->cp_raw_cons = raw_cons; + + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + if (rx_event) + B_RX_DB(rxr->rx_doorbell, rxr->rx_prod); + return nb_rx_pkts; +} + +void bnxt_free_rx_rings(struct bnxt *bp) +{ + int i; + + for (i = 0; i < (int)bp->rx_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + + if (!rxq) + continue; + + bnxt_free_ring(rxq->rx_ring->rx_ring_struct); + rte_free(rxq->rx_ring->rx_ring_struct); + rte_free(rxq->rx_ring); + + bnxt_free_ring(rxq->cp_ring->cp_ring_struct); + rte_free(rxq->cp_ring->cp_ring_struct); + rte_free(rxq->cp_ring); + + rte_free(rxq); + bp->rx_queues[i] = NULL; + } +} + +int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id) +{ + struct bnxt *bp = rxq->bp; + struct bnxt_cp_ring_info *cpr; + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring *ring; + + rxq->rx_buf_use_size = bp->eth_dev->data->mtu + + ETHER_HDR_LEN + ETHER_CRC_LEN + + (2 * VLAN_TAG_SIZE); + rxq->rx_buf_size = rxq->rx_buf_use_size + sizeof(struct rte_mbuf); + + rxr = rte_zmalloc_socket("bnxt_rx_ring", + sizeof(struct bnxt_rx_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (rxr == NULL) + return -ENOMEM; + rxq->rx_ring = rxr; + + ring = rte_zmalloc_socket("bnxt_rx_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + rxr->rx_ring_struct = ring; + ring->ring_size = rte_align32pow2(rxq->nb_rx_desc); + ring->ring_mask = ring->ring_size - 1; + ring->bd = (void *)rxr->rx_desc_ring; + ring->bd_dma = rxr->rx_desc_mapping; + ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_rx_bd); + ring->vmem = (void **)&rxr->rx_buf_ring; + + cpr = rte_zmalloc_socket("bnxt_rx_ring", + sizeof(struct bnxt_cp_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (cpr == NULL) + return -ENOMEM; + rxq->cp_ring = cpr; + + ring = rte_zmalloc_socket("bnxt_rx_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + cpr->cp_ring_struct = ring; + ring->ring_size = rxr->rx_ring_struct->ring_size * 2; + ring->ring_mask = ring->ring_size - 1; + ring->bd = (void *)cpr->cp_desc_ring; + ring->bd_dma = cpr->cp_desc_mapping; + ring->vmem_size = 0; + ring->vmem = NULL; + + return 0; +} + +static void bnxt_init_rxbds(struct bnxt_ring *ring, uint32_t type, + uint16_t len) +{ + uint32_t j; + struct rx_prod_pkt_bd *rx_bd_ring = (struct rx_prod_pkt_bd *)ring->bd; + + if (!rx_bd_ring) + return; + for (j = 0; j < ring->ring_size; j++) { + rx_bd_ring[j].flags_type = type; + rx_bd_ring[j].len = len; + rx_bd_ring[j].opaque = j; + } +} + +int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq) +{ + struct bnxt_rx_ring_info *rxr; + struct bnxt_ring *ring; + uint32_t prod, type; + unsigned int i; + + type = RX_PROD_PKT_BD_TYPE_RX_PROD_PKT | RX_PROD_PKT_BD_FLAGS_EOP_PAD; + + rxr = rxq->rx_ring; + ring = rxr->rx_ring_struct; + bnxt_init_rxbds(ring, type, rxq->rx_buf_use_size); + + prod = rxr->rx_prod; + for (i = 0; i < ring->ring_size; i++) { + if (bnxt_alloc_rx_data(rxq, rxr, prod) != 0) { + RTE_LOG(WARNING, PMD, + "init'ed rx ring %d with %d/%d mbufs only\n", + rxq->queue_id, i, ring->ring_size); + break; + } + rxr->rx_prod = prod; + prod = RING_NEXT(rxr->rx_ring_struct, prod); + } + + return 0; +} diff --git a/drivers/net/bnxt/bnxt_rxr.h b/drivers/net/bnxt/bnxt_rxr.h new file mode 100644 index 00000000..f766b26c --- /dev/null +++ b/drivers/net/bnxt/bnxt_rxr.h @@ -0,0 +1,62 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_RXR_H_ +#define _BNXT_RXR_H_ + +#define B_RX_DB(db, prod) \ + (*(uint32_t *)db = (DB_KEY_RX | prod)) + +struct bnxt_sw_rx_bd { + struct rte_mbuf *mbuf; /* data associated with RX descriptor */ +}; + +struct bnxt_rx_ring_info { + uint16_t rx_prod; + void *rx_doorbell; + + struct rx_prod_pkt_bd *rx_desc_ring; + struct bnxt_sw_rx_bd *rx_buf_ring; /* sw ring */ + + phys_addr_t rx_desc_mapping; + + struct bnxt_ring *rx_ring_struct; +}; + +uint16_t bnxt_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); +void bnxt_free_rx_rings(struct bnxt *bp); +int bnxt_init_rx_ring_struct(struct bnxt_rx_queue *rxq, unsigned int socket_id); +int bnxt_init_one_rx_ring(struct bnxt_rx_queue *rxq); + +#endif diff --git a/drivers/net/bnxt/bnxt_stats.c b/drivers/net/bnxt/bnxt_stats.c new file mode 100644 index 00000000..6f1c7602 --- /dev/null +++ b/drivers/net/bnxt/bnxt_stats.c @@ -0,0 +1,142 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <inttypes.h> + +#include <rte_byteorder.h> + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_hwrm.h" +#include "bnxt_rxq.h" +#include "bnxt_stats.h" +#include "bnxt_txq.h" +#include "hsi_struct_def_dpdk.h" + +/* + * Statistics functions + */ + +void bnxt_free_stats(struct bnxt *bp) +{ + int i; + + for (i = 0; i < (int)bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + + bnxt_free_txq_stats(txq); + } + for (i = 0; i < (int)bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + + bnxt_free_rxq_stats(rxq); + } +} + +void bnxt_stats_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *bnxt_stats) +{ + unsigned int i; + struct bnxt *bp = eth_dev->data->dev_private; + + memset(bnxt_stats, 0, sizeof(*bnxt_stats)); + + for (i = 0; i < bp->rx_cp_nr_rings; i++) { + struct bnxt_rx_queue *rxq = bp->rx_queues[i]; + struct bnxt_cp_ring_info *cpr = rxq->cp_ring; + struct ctx_hw_stats64 *hw_stats = + (struct ctx_hw_stats64 *)cpr->hw_stats; + + bnxt_stats->q_ipackets[i] += + rte_le_to_cpu_64(hw_stats->rx_ucast_pkts); + bnxt_stats->q_ipackets[i] += + rte_le_to_cpu_64(hw_stats->rx_mcast_pkts); + bnxt_stats->q_ipackets[i] += + rte_le_to_cpu_64(hw_stats->rx_bcast_pkts); + + bnxt_stats->q_ibytes[i] += + rte_le_to_cpu_64(hw_stats->rx_ucast_bytes); + bnxt_stats->q_ibytes[i] += + rte_le_to_cpu_64(hw_stats->rx_mcast_bytes); + bnxt_stats->q_ibytes[i] += + rte_le_to_cpu_64(hw_stats->rx_bcast_bytes); + + /* + * TBD: No clear mapping to this... we don't seem + * to have a stat specifically for dropped due to + * insufficient mbufs. + */ + bnxt_stats->q_errors[i] = 0; + + /* These get replaced once the *_QSTATS commands work */ + bnxt_stats->ipackets += bnxt_stats->q_ipackets[i]; + bnxt_stats->ibytes += bnxt_stats->q_ibytes[i]; + bnxt_stats->imissed += bnxt_stats->q_errors[i]; + bnxt_stats->ierrors += + rte_le_to_cpu_64(hw_stats->rx_err_pkts); + } + + for (i = 0; i < bp->tx_cp_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + struct ctx_hw_stats64 *hw_stats = + (struct ctx_hw_stats64 *)cpr->hw_stats; + + bnxt_stats->q_opackets[i] += + rte_le_to_cpu_64(hw_stats->tx_ucast_pkts); + bnxt_stats->q_opackets[i] += + rte_le_to_cpu_64(hw_stats->tx_mcast_pkts); + bnxt_stats->q_opackets[i] += + rte_le_to_cpu_64(hw_stats->tx_bcast_pkts); + + bnxt_stats->q_obytes[i] += + rte_le_to_cpu_64(hw_stats->tx_ucast_bytes); + bnxt_stats->q_obytes[i] += + rte_le_to_cpu_64(hw_stats->tx_mcast_bytes); + bnxt_stats->q_obytes[i] += + rte_le_to_cpu_64(hw_stats->tx_bcast_bytes); + + /* These get replaced once the *_QSTATS commands work */ + bnxt_stats->opackets += bnxt_stats->q_opackets[i]; + bnxt_stats->obytes += bnxt_stats->q_obytes[i]; + bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_drop_pkts); + bnxt_stats->oerrors += rte_le_to_cpu_64(hw_stats->tx_err_pkts); + } +} + +void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + + bnxt_clear_all_hwrm_stat_ctxs(bp); +} diff --git a/drivers/net/bnxt/bnxt_stats.h b/drivers/net/bnxt/bnxt_stats.h new file mode 100644 index 00000000..65408a44 --- /dev/null +++ b/drivers/net/bnxt/bnxt_stats.h @@ -0,0 +1,44 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Broadcom Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_STATS_H_ +#define _BNXT_STATS_H_ + +#include <rte_ethdev.h> + +void bnxt_free_stats(struct bnxt *bp); +void bnxt_stats_get_op(struct rte_eth_dev *eth_dev, + struct rte_eth_stats *bnxt_stats); +void bnxt_stats_reset_op(struct rte_eth_dev *eth_dev); + +#endif diff --git a/drivers/net/bnxt/bnxt_txq.c b/drivers/net/bnxt/bnxt_txq.c new file mode 100644 index 00000000..99dddddf --- /dev/null +++ b/drivers/net/bnxt/bnxt_txq.c @@ -0,0 +1,162 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <inttypes.h> + +#include <rte_malloc.h> + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_ring.h" +#include "bnxt_txq.h" +#include "bnxt_txr.h" + +/* + * TX Queues + */ + +void bnxt_free_txq_stats(struct bnxt_tx_queue *txq) +{ + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + + if (cpr->hw_stats) + cpr->hw_stats = NULL; +} + +static void bnxt_tx_queue_release_mbufs(struct bnxt_tx_queue *txq) +{ + struct bnxt_sw_tx_bd *sw_ring; + uint16_t i; + + sw_ring = txq->tx_ring->tx_buf_ring; + if (sw_ring) { + for (i = 0; i < txq->tx_ring->tx_ring_struct->ring_size; i++) { + if (sw_ring[i].mbuf) { + rte_pktmbuf_free(sw_ring[i].mbuf); + sw_ring[i].mbuf = NULL; + } + } + } +} + +void bnxt_free_tx_mbufs(struct bnxt *bp) +{ + struct bnxt_tx_queue *txq; + int i; + + for (i = 0; i < (int)bp->tx_nr_rings; i++) { + txq = bp->tx_queues[i]; + bnxt_tx_queue_release_mbufs(txq); + } +} + +void bnxt_tx_queue_release_op(void *tx_queue) +{ + struct bnxt_tx_queue *txq = (struct bnxt_tx_queue *)tx_queue; + + if (txq) { + /* Free TX ring hardware descriptors */ + bnxt_tx_queue_release_mbufs(txq); + bnxt_free_ring(txq->tx_ring->tx_ring_struct); + + /* Free TX completion ring hardware descriptors */ + bnxt_free_ring(txq->cp_ring->cp_ring_struct); + + bnxt_free_txq_stats(txq); + + rte_free(txq); + } +} + +int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct bnxt *bp = (struct bnxt *)eth_dev->data->dev_private; + struct bnxt_tx_queue *txq; + int rc = 0; + + if (!nb_desc || nb_desc > MAX_TX_DESC_CNT) { + RTE_LOG(ERR, PMD, "nb_desc %d is invalid", nb_desc); + rc = -EINVAL; + goto out; + } + + if (eth_dev->data->tx_queues) { + txq = eth_dev->data->tx_queues[queue_idx]; + if (txq) { + bnxt_tx_queue_release_op(txq); + txq = NULL; + } + } + txq = rte_zmalloc_socket("bnxt_tx_queue", sizeof(struct bnxt_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + if (!txq) { + RTE_LOG(ERR, PMD, "bnxt_tx_queue allocation failed!"); + rc = -ENOMEM; + goto out; + } + txq->bp = bp; + txq->nb_tx_desc = nb_desc; + txq->tx_free_thresh = tx_conf->tx_free_thresh; + + rc = bnxt_init_tx_ring_struct(txq, socket_id); + if (rc) + goto out; + + txq->queue_id = queue_idx; + txq->port_id = eth_dev->data->port_id; + + /* Allocate TX ring hardware descriptors */ + if (bnxt_alloc_rings(bp, queue_idx, txq->tx_ring, NULL, txq->cp_ring, + "txr")) { + RTE_LOG(ERR, PMD, "ring_dma_zone_reserve for tx_ring failed!"); + bnxt_tx_queue_release_op(txq); + rc = -ENOMEM; + goto out; + } + + if (bnxt_init_one_tx_ring(txq)) { + RTE_LOG(ERR, PMD, "bnxt_init_one_tx_ring failed!"); + bnxt_tx_queue_release_op(txq); + rc = -ENOMEM; + goto out; + } + + eth_dev->data->tx_queues[queue_idx] = txq; + +out: + return rc; +} diff --git a/drivers/net/bnxt/bnxt_txq.h b/drivers/net/bnxt/bnxt_txq.h new file mode 100644 index 00000000..16f3a0bd --- /dev/null +++ b/drivers/net/bnxt/bnxt_txq.h @@ -0,0 +1,75 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_TXQ_H_ +#define _BNXT_TXQ_H_ + +struct bnxt_tx_ring_info; +struct bnxt_cp_ring_info; +struct bnxt_tx_queue { + uint16_t nb_tx_desc; /* number of TX descriptors */ + uint16_t tx_free_thresh;/* minimum TX before freeing */ + /** Index to last TX descriptor to have been cleaned. */ + uint16_t last_desc_cleaned; + /** Total number of TX descriptors ready to be allocated. */ + uint16_t tx_next_dd; /* next desc to scan for DD bit */ + uint16_t tx_next_rs; /* next desc to set RS bit */ + uint16_t queue_id; /* TX queue index */ + uint16_t reg_idx; /* TX queue register index */ + uint8_t port_id; /* Device port identifier */ + uint8_t pthresh; /* Prefetch threshold register */ + uint8_t hthresh; /* Host threshold register */ + uint8_t wthresh; /* Write-back threshold reg */ + uint32_t txq_flags; /* Holds flags for this TXq */ + uint32_t ctx_curr; /* Hardware context states */ + uint8_t tx_deferred_start; /* not in global dev start */ + + struct bnxt *bp; + int index; + int tx_wake_thresh; + struct bnxt_tx_ring_info *tx_ring; + + unsigned int cp_nr_rings; + struct bnxt_cp_ring_info *cp_ring; +}; + +void bnxt_free_txq_stats(struct bnxt_tx_queue *txq); +void bnxt_free_tx_mbufs(struct bnxt *bp); +void bnxt_tx_queue_release_op(void *tx_queue); +int bnxt_tx_queue_setup_op(struct rte_eth_dev *eth_dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +#endif diff --git a/drivers/net/bnxt/bnxt_txr.c b/drivers/net/bnxt/bnxt_txr.c new file mode 100644 index 00000000..8bf8fee3 --- /dev/null +++ b/drivers/net/bnxt/bnxt_txr.c @@ -0,0 +1,339 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <inttypes.h> + +#include <rte_byteorder.h> +#include <rte_malloc.h> + +#include "bnxt.h" +#include "bnxt_cpr.h" +#include "bnxt_ring.h" +#include "bnxt_txq.h" +#include "bnxt_txr.h" +#include "hsi_struct_def_dpdk.h" +#include <stdbool.h> + +/* + * TX Ring handling + */ + +void bnxt_free_tx_rings(struct bnxt *bp) +{ + int i; + + for (i = 0; i < (int)bp->tx_nr_rings; i++) { + struct bnxt_tx_queue *txq = bp->tx_queues[i]; + + if (!txq) + continue; + + bnxt_free_ring(txq->tx_ring->tx_ring_struct); + rte_free(txq->tx_ring->tx_ring_struct); + rte_free(txq->tx_ring); + + bnxt_free_ring(txq->cp_ring->cp_ring_struct); + rte_free(txq->cp_ring->cp_ring_struct); + rte_free(txq->cp_ring); + + rte_free(txq); + bp->tx_queues[i] = NULL; + } +} + +int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq) +{ + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct bnxt_ring *ring = txr->tx_ring_struct; + + txq->tx_wake_thresh = ring->ring_size / 2; + ring->fw_ring_id = INVALID_HW_RING_ID; + + return 0; +} + +int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id) +{ + struct bnxt_cp_ring_info *cpr; + struct bnxt_tx_ring_info *txr; + struct bnxt_ring *ring; + + txr = rte_zmalloc_socket("bnxt_tx_ring", + sizeof(struct bnxt_tx_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (txr == NULL) + return -ENOMEM; + txq->tx_ring = txr; + + ring = rte_zmalloc_socket("bnxt_tx_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + txr->tx_ring_struct = ring; + ring->ring_size = rte_align32pow2(txq->nb_tx_desc + 1); + ring->ring_mask = ring->ring_size - 1; + ring->bd = (void *)txr->tx_desc_ring; + ring->bd_dma = txr->tx_desc_mapping; + ring->vmem_size = ring->ring_size * sizeof(struct bnxt_sw_tx_bd); + ring->vmem = (void **)&txr->tx_buf_ring; + + cpr = rte_zmalloc_socket("bnxt_tx_ring", + sizeof(struct bnxt_cp_ring_info), + RTE_CACHE_LINE_SIZE, socket_id); + if (cpr == NULL) + return -ENOMEM; + txq->cp_ring = cpr; + + ring = rte_zmalloc_socket("bnxt_tx_ring_struct", + sizeof(struct bnxt_ring), + RTE_CACHE_LINE_SIZE, socket_id); + if (ring == NULL) + return -ENOMEM; + cpr->cp_ring_struct = ring; + ring->ring_size = txr->tx_ring_struct->ring_size; + ring->ring_mask = ring->ring_size - 1; + ring->bd = (void *)cpr->cp_desc_ring; + ring->bd_dma = cpr->cp_desc_mapping; + ring->vmem_size = 0; + ring->vmem = NULL; + + return 0; +} + +static inline uint32_t bnxt_tx_avail(struct bnxt_tx_ring_info *txr) +{ + /* Tell compiler to fetch tx indices from memory. */ + rte_compiler_barrier(); + + return txr->tx_ring_struct->ring_size - + ((txr->tx_prod - txr->tx_cons) & + txr->tx_ring_struct->ring_mask) - 1; +} + +static uint16_t bnxt_start_xmit(struct rte_mbuf *tx_pkt, + struct bnxt_tx_queue *txq) +{ + struct bnxt_tx_ring_info *txr = txq->tx_ring; + struct tx_bd_long *txbd; + struct tx_bd_long_hi *txbd1; + uint32_t vlan_tag_flags, cfa_action; + bool long_bd = false; + uint16_t last_prod = 0; + struct rte_mbuf *m_seg; + struct bnxt_sw_tx_bd *tx_buf; + static const uint32_t lhint_arr[4] = { + TX_BD_LONG_FLAGS_LHINT_LT512, + TX_BD_LONG_FLAGS_LHINT_LT1K, + TX_BD_LONG_FLAGS_LHINT_LT2K, + TX_BD_LONG_FLAGS_LHINT_LT2K + }; + + if (tx_pkt->ol_flags & (PKT_TX_TCP_SEG | PKT_TX_TCP_CKSUM | + PKT_TX_UDP_CKSUM | PKT_TX_IP_CKSUM | + PKT_TX_VLAN_PKT)) + long_bd = true; + + tx_buf = &txr->tx_buf_ring[txr->tx_prod]; + tx_buf->mbuf = tx_pkt; + tx_buf->nr_bds = long_bd + tx_pkt->nb_segs; + last_prod = (txr->tx_prod + tx_buf->nr_bds - 1) & + txr->tx_ring_struct->ring_mask; + + if (unlikely(bnxt_tx_avail(txr) < tx_buf->nr_bds)) + return -ENOMEM; + + txbd = &txr->tx_desc_ring[txr->tx_prod]; + txbd->opaque = txr->tx_prod; + txbd->flags_type = tx_buf->nr_bds << TX_BD_LONG_FLAGS_BD_CNT_SFT; + txbd->len = tx_pkt->data_len; + if (txbd->len >= 2014) + txbd->flags_type |= TX_BD_LONG_FLAGS_LHINT_GTE2K; + else + txbd->flags_type |= lhint_arr[txbd->len >> 9]; + txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(tx_buf->mbuf)); + + if (long_bd) { + txbd->flags_type |= TX_BD_LONG_TYPE_TX_BD_LONG; + vlan_tag_flags = 0; + cfa_action = 0; + if (tx_buf->mbuf->ol_flags & PKT_TX_VLAN_PKT) { + /* shurd: Should this mask at + * TX_BD_LONG_CFA_META_VLAN_VID_MASK? + */ + vlan_tag_flags = TX_BD_LONG_CFA_META_KEY_VLAN_TAG | + tx_buf->mbuf->vlan_tci; + /* Currently supports 8021Q, 8021AD vlan offloads + * QINQ1, QINQ2, QINQ3 vlan headers are deprecated + */ + /* DPDK only supports 802.11q VLAN packets */ + vlan_tag_flags |= + TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100; + } + + txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); + + txbd1 = (struct tx_bd_long_hi *) + &txr->tx_desc_ring[txr->tx_prod]; + txbd1->lflags = 0; + txbd1->cfa_meta = vlan_tag_flags; + txbd1->cfa_action = cfa_action; + + if (tx_pkt->ol_flags & PKT_TX_TCP_SEG) { + /* TSO */ + txbd1->lflags = TX_BD_LONG_LFLAGS_LSO; + txbd1->hdr_size = tx_pkt->l2_len + tx_pkt->l3_len + + tx_pkt->l4_len; + txbd1->mss = tx_pkt->tso_segsz; + + } else if (tx_pkt->ol_flags & (PKT_TX_TCP_CKSUM | + PKT_TX_UDP_CKSUM)) { + /* TCP/UDP CSO */ + txbd1->lflags = TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM; + txbd1->mss = 0; + + } else if (tx_pkt->ol_flags & PKT_TX_IP_CKSUM) { + /* IP CSO */ + txbd1->lflags = TX_BD_LONG_LFLAGS_IP_CHKSUM; + txbd1->mss = 0; + } + } else { + txbd->flags_type |= TX_BD_SHORT_TYPE_TX_BD_SHORT; + } + + m_seg = tx_pkt->next; + /* i is set at the end of the if(long_bd) block */ + while (txr->tx_prod != last_prod) { + txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); + tx_buf = &txr->tx_buf_ring[txr->tx_prod]; + + txbd = &txr->tx_desc_ring[txr->tx_prod]; + txbd->addr = rte_cpu_to_le_32(RTE_MBUF_DATA_DMA_ADDR(m_seg)); + txbd->flags_type = TX_BD_SHORT_TYPE_TX_BD_SHORT; + txbd->len = m_seg->data_len; + + m_seg = m_seg->next; + } + + txbd->flags_type |= TX_BD_LONG_FLAGS_PACKET_END; + + txr->tx_prod = RING_NEXT(txr->tx_ring_struct, txr->tx_prod); + + return 0; +} + +static void bnxt_tx_cmp(struct bnxt_tx_queue *txq, int nr_pkts) +{ + struct bnxt_tx_ring_info *txr = txq->tx_ring; + uint16_t cons = txr->tx_cons; + int i, j; + + for (i = 0; i < nr_pkts; i++) { + struct bnxt_sw_tx_bd *tx_buf; + struct rte_mbuf *mbuf; + + tx_buf = &txr->tx_buf_ring[cons]; + cons = RING_NEXT(txr->tx_ring_struct, cons); + mbuf = tx_buf->mbuf; + tx_buf->mbuf = NULL; + + /* EW - no need to unmap DMA memory? */ + + for (j = 1; j < tx_buf->nr_bds; j++) + cons = RING_NEXT(txr->tx_ring_struct, cons); + rte_pktmbuf_free(mbuf); + } + + txr->tx_cons = cons; +} + +static int bnxt_handle_tx_cp(struct bnxt_tx_queue *txq) +{ + struct bnxt_cp_ring_info *cpr = txq->cp_ring; + uint32_t raw_cons = cpr->cp_raw_cons; + uint32_t cons; + int nb_tx_pkts = 0; + struct tx_cmpl *txcmp; + + if ((txq->tx_ring->tx_ring_struct->ring_size - + (bnxt_tx_avail(txq->tx_ring))) > + txq->tx_free_thresh) { + while (1) { + cons = RING_CMP(cpr->cp_ring_struct, raw_cons); + txcmp = (struct tx_cmpl *)&cpr->cp_desc_ring[cons]; + + if (!CMP_VALID(txcmp, raw_cons, cpr->cp_ring_struct)) + break; + + if (CMP_TYPE(txcmp) == TX_CMPL_TYPE_TX_L2) + nb_tx_pkts++; + else + RTE_LOG(DEBUG, PMD, + "Unhandled CMP type %02x\n", + CMP_TYPE(txcmp)); + raw_cons = NEXT_RAW_CMP(raw_cons); + } + if (nb_tx_pkts) + bnxt_tx_cmp(txq, nb_tx_pkts); + cpr->cp_raw_cons = raw_cons; + B_CP_DIS_DB(cpr, cpr->cp_raw_cons); + } + return nb_tx_pkts; +} + +uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct bnxt_tx_queue *txq = tx_queue; + uint16_t nb_tx_pkts = 0; + uint16_t db_mask = txq->tx_ring->tx_ring_struct->ring_size >> 2; + uint16_t last_db_mask = 0; + + /* Handle TX completions */ + bnxt_handle_tx_cp(txq); + + /* Handle TX burst request */ + for (nb_tx_pkts = 0; nb_tx_pkts < nb_pkts; nb_tx_pkts++) { + if (bnxt_start_xmit(tx_pkts[nb_tx_pkts], txq)) { + break; + } else if ((nb_tx_pkts & db_mask) != last_db_mask) { + B_TX_DB(txq->tx_ring->tx_doorbell, + txq->tx_ring->tx_prod); + last_db_mask = nb_tx_pkts & db_mask; + } + } + if (nb_tx_pkts) + B_TX_DB(txq->tx_ring->tx_doorbell, txq->tx_ring->tx_prod); + + return nb_tx_pkts; +} diff --git a/drivers/net/bnxt/bnxt_txr.h b/drivers/net/bnxt/bnxt_txr.h new file mode 100644 index 00000000..2176acaa --- /dev/null +++ b/drivers/net/bnxt/bnxt_txr.h @@ -0,0 +1,71 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_TXR_H_ +#define _BNXT_TXR_H_ + +#define MAX_TX_RINGS 16 +#define BNXT_TX_PUSH_THRESH 92 + +#define B_TX_DB(db, prod) \ + (*(uint32_t *)db = (DB_KEY_TX | prod)) + +struct bnxt_tx_ring_info { + uint16_t tx_prod; + uint16_t tx_cons; + void *tx_doorbell; + + struct tx_bd_long *tx_desc_ring; + struct bnxt_sw_tx_bd *tx_buf_ring; + + phys_addr_t tx_desc_mapping; + +#define BNXT_DEV_STATE_CLOSING 0x1 + uint32_t dev_state; + + struct bnxt_ring *tx_ring_struct; +}; + +struct bnxt_sw_tx_bd { + struct rte_mbuf *mbuf; /* mbuf associated with TX descriptor */ + uint8_t is_gso; + unsigned short nr_bds; +}; + +void bnxt_free_tx_rings(struct bnxt *bp); +int bnxt_init_one_tx_ring(struct bnxt_tx_queue *txq); +int bnxt_init_tx_ring_struct(struct bnxt_tx_queue *txq, unsigned int socket_id); +uint16_t bnxt_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +#endif diff --git a/drivers/net/bnxt/bnxt_vnic.c b/drivers/net/bnxt/bnxt_vnic.c new file mode 100644 index 00000000..c04c4c74 --- /dev/null +++ b/drivers/net/bnxt/bnxt_vnic.c @@ -0,0 +1,277 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Broadcom Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <inttypes.h> + +#include <rte_memzone.h> +#include <rte_malloc.h> + +#include "bnxt.h" +#include "bnxt_vnic.h" +#include "hsi_struct_def_dpdk.h" + +/* + * VNIC Functions + */ + +static void prandom_bytes(void *dest_ptr, size_t len) +{ + char *dest = (char *)dest_ptr; + uint64_t rb; + + while (len) { + rb = rte_rand(); + if (len >= 8) { + memcpy(dest, &rb, 8); + len -= 8; + dest += 8; + } else { + memcpy(dest, &rb, len); + dest += len; + len = 0; + } + } +} + +void bnxt_init_vnics(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + uint16_t max_vnics; + int i, j; + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + max_vnics = pf->max_vnics; + } else { + struct bnxt_vf_info *vf = &bp->vf; + + max_vnics = vf->max_vnics; + } + STAILQ_INIT(&bp->free_vnic_list); + for (i = 0; i < max_vnics; i++) { + vnic = &bp->vnic_info[i]; + vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE; + vnic->fw_rss_cos_lb_ctx = (uint16_t)HWRM_NA_SIGNATURE; + vnic->ctx_is_rss_cos_lb = HW_CONTEXT_NONE; + + for (j = 0; j < MAX_QUEUES_PER_VNIC; j++) + vnic->fw_grp_ids[j] = (uint16_t)HWRM_NA_SIGNATURE; + + prandom_bytes(vnic->rss_hash_key, HW_HASH_KEY_SIZE); + STAILQ_INIT(&vnic->filter); + STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, next); + } + for (i = 0; i < MAX_FF_POOLS; i++) + STAILQ_INIT(&bp->ff_pool[i]); +} + +int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic, + int pool) +{ + struct bnxt_vnic_info *temp; + + temp = STAILQ_FIRST(&bp->ff_pool[pool]); + while (temp) { + if (temp == vnic) { + STAILQ_REMOVE(&bp->ff_pool[pool], vnic, + bnxt_vnic_info, next); + vnic->fw_vnic_id = (uint16_t)HWRM_NA_SIGNATURE; + STAILQ_INSERT_TAIL(&bp->free_vnic_list, vnic, + next); + return 0; + } + temp = STAILQ_NEXT(temp, next); + } + RTE_LOG(ERR, PMD, "VNIC %p is not found in pool[%d]\n", vnic, pool); + return -EINVAL; +} + +struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + + /* Find the 1st unused vnic from the free_vnic_list pool*/ + vnic = STAILQ_FIRST(&bp->free_vnic_list); + if (!vnic) { + RTE_LOG(ERR, PMD, "No more free VNIC resources\n"); + return NULL; + } + STAILQ_REMOVE_HEAD(&bp->free_vnic_list, next); + return vnic; +} + +void bnxt_free_all_vnics(struct bnxt *bp) +{ + struct bnxt_vnic_info *temp, *next; + int i; + + for (i = 0; i < MAX_FF_POOLS; i++) { + temp = STAILQ_FIRST(&bp->ff_pool[i]); + while (temp) { + next = STAILQ_NEXT(temp, next); + STAILQ_REMOVE(&bp->ff_pool[i], temp, bnxt_vnic_info, + next); + STAILQ_INSERT_TAIL(&bp->free_vnic_list, temp, next); + temp = next; + } + } +} + +void bnxt_free_vnic_attributes(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + + STAILQ_FOREACH(vnic, &bp->free_vnic_list, next) { + if (vnic->rss_table) { + /* 'Unreserve' the rss_table */ + /* N/A */ + + vnic->rss_table = NULL; + } + + if (vnic->rss_hash_key) { + /* 'Unreserve' the rss_hash_key */ + /* N/A */ + + vnic->rss_hash_key = NULL; + } + } +} + +int bnxt_alloc_vnic_attributes(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + struct rte_pci_device *pdev = bp->pdev; + const struct rte_memzone *mz; + char mz_name[RTE_MEMZONE_NAMESIZE]; + int entry_length = RTE_CACHE_LINE_ROUNDUP( + HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table) + + HW_HASH_KEY_SIZE); + uint16_t max_vnics; + int i; + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + max_vnics = pf->max_vnics; + } else { + struct bnxt_vf_info *vf = &bp->vf; + + max_vnics = vf->max_vnics; + } + snprintf(mz_name, RTE_MEMZONE_NAMESIZE, + "bnxt_%04x:%02x:%02x:%02x_vnicattr", pdev->addr.domain, + pdev->addr.bus, pdev->addr.devid, pdev->addr.function); + mz_name[RTE_MEMZONE_NAMESIZE - 1] = 0; + mz = rte_memzone_lookup(mz_name); + if (!mz) { + mz = rte_memzone_reserve(mz_name, + entry_length * max_vnics, + SOCKET_ID_ANY, + RTE_MEMZONE_2MB | + RTE_MEMZONE_SIZE_HINT_ONLY); + if (!mz) + return -ENOMEM; + } + + for (i = 0; i < max_vnics; i++) { + vnic = &bp->vnic_info[i]; + + /* Allocate rss table and hash key */ + vnic->rss_table = + (void *)((char *)mz->addr + (entry_length * i)); + memset(vnic->rss_table, -1, entry_length); + + vnic->rss_table_dma_addr = mz->phys_addr + (entry_length * i); + vnic->rss_hash_key = (void *)((char *)vnic->rss_table + + HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table)); + + vnic->rss_hash_key_dma_addr = vnic->rss_table_dma_addr + + HW_HASH_INDEX_SIZE * sizeof(*vnic->rss_table); + } + + return 0; +} + +void bnxt_free_vnic_mem(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic; + uint16_t max_vnics, i; + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + max_vnics = pf->max_vnics; + } else { + struct bnxt_vf_info *vf = &bp->vf; + + max_vnics = vf->max_vnics; + } + for (i = 0; i < max_vnics; i++) { + vnic = &bp->vnic_info[i]; + if (vnic->fw_vnic_id != (uint16_t)HWRM_NA_SIGNATURE) { + RTE_LOG(ERR, PMD, "VNIC is not freed yet!\n"); + /* TODO Call HWRM to free VNIC */ + } + } + + rte_free(bp->vnic_info); + bp->vnic_info = NULL; +} + +int bnxt_alloc_vnic_mem(struct bnxt *bp) +{ + struct bnxt_vnic_info *vnic_mem; + uint16_t max_vnics; + + if (BNXT_PF(bp)) { + struct bnxt_pf_info *pf = &bp->pf; + + max_vnics = pf->max_vnics; + } else { + struct bnxt_vf_info *vf = &bp->vf; + + max_vnics = vf->max_vnics; + } + /* Allocate memory for VNIC pool and filter pool */ + vnic_mem = rte_zmalloc("bnxt_vnic_info", + max_vnics * sizeof(struct bnxt_vnic_info), 0); + if (vnic_mem == NULL) { + RTE_LOG(ERR, PMD, "Failed to alloc memory for %d VNICs", + max_vnics); + return -ENOMEM; + } + bp->vnic_info = vnic_mem; + return 0; +} diff --git a/drivers/net/bnxt/bnxt_vnic.h b/drivers/net/bnxt/bnxt_vnic.h new file mode 100644 index 00000000..9671ba42 --- /dev/null +++ b/drivers/net/bnxt/bnxt_vnic.h @@ -0,0 +1,80 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2014-2015 Broadcom Corporation. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _BNXT_VNIC_H_ +#define _BNXT_VNIC_H_ + +#include <sys/queue.h> +#include <stdbool.h> + +struct bnxt_vnic_info { + STAILQ_ENTRY(bnxt_vnic_info) next; + uint8_t ff_pool_idx; + + uint16_t fw_vnic_id; /* returned by Chimp during alloc */ + uint16_t fw_rss_cos_lb_ctx; + uint16_t ctx_is_rss_cos_lb; +#define MAX_NUM_TRAFFIC_CLASSES 8 +#define MAX_NUM_RSS_QUEUES_PER_VNIC 16 +#define MAX_QUEUES_PER_VNIC (MAX_NUM_RSS_QUEUES_PER_VNIC + \ + MAX_NUM_TRAFFIC_CLASSES) + uint16_t start_grp_id; + uint16_t end_grp_id; + uint16_t fw_grp_ids[MAX_QUEUES_PER_VNIC]; + uint16_t hash_type; + phys_addr_t rss_table_dma_addr; + uint16_t *rss_table; + phys_addr_t rss_hash_key_dma_addr; + void *rss_hash_key; + uint32_t flags; +#define BNXT_VNIC_INFO_PROMISC (1 << 0) +#define BNXT_VNIC_INFO_ALLMULTI (1 << 1) + + bool vlan_strip; + bool func_default; + + STAILQ_HEAD(, bnxt_filter_info) filter; +}; + +struct bnxt; +void bnxt_init_vnics(struct bnxt *bp); +int bnxt_free_vnic(struct bnxt *bp, struct bnxt_vnic_info *vnic, + int pool); +struct bnxt_vnic_info *bnxt_alloc_vnic(struct bnxt *bp); +void bnxt_free_all_vnics(struct bnxt *bp); +void bnxt_free_vnic_attributes(struct bnxt *bp); +int bnxt_alloc_vnic_attributes(struct bnxt *bp); +void bnxt_free_vnic_mem(struct bnxt *bp); +int bnxt_alloc_vnic_mem(struct bnxt *bp); + +#endif diff --git a/drivers/net/bnxt/hsi_struct_def_dpdk.h b/drivers/net/bnxt/hsi_struct_def_dpdk.h new file mode 100644 index 00000000..f2db3eaf --- /dev/null +++ b/drivers/net/bnxt/hsi_struct_def_dpdk.h @@ -0,0 +1,5799 @@ +/*- + * BSD LICENSE + * + * Copyright(c) Broadcom Limited. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Broadcom Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _HSI_STRUCT_DEF_EXTERNAL_H_ +#define _HSI_STRUCT_DEF_EXTERNAL_H_ + +/* + * per-context HW statistics -- chip view + */ + +struct ctx_hw_stats64 { + uint64_t rx_ucast_pkts; + uint64_t rx_mcast_pkts; + uint64_t rx_bcast_pkts; + uint64_t rx_drop_pkts; + uint64_t rx_err_pkts; + uint64_t rx_ucast_bytes; + uint64_t rx_mcast_bytes; + uint64_t rx_bcast_bytes; + + uint64_t tx_ucast_pkts; + uint64_t tx_mcast_pkts; + uint64_t tx_bcast_pkts; + uint64_t tx_drop_pkts; + uint64_t tx_err_pkts; + uint64_t tx_ucast_bytes; + uint64_t tx_mcast_bytes; + uint64_t tx_bcast_bytes; + + uint64_t tpa_pkts; + uint64_t tpa_bytes; + uint64_t tpa_events; + uint64_t tpa_aborts; +} ctx_hw_stats64_t; + +/* HW Resource Manager Specification 1.2.0 */ +#define HWRM_VERSION_MAJOR 1 +#define HWRM_VERSION_MINOR 2 +#define HWRM_VERSION_UPDATE 0 + +/* + * Following is the signature for HWRM message field that indicates not + * applicable (All F's). Need to cast it the size of the field if needed. + */ +#define HWRM_NA_SIGNATURE ((uint32_t)(-1)) +#define HWRM_MAX_REQ_LEN (128) /* hwrm_func_buf_rgtr */ +#define HWRM_MAX_RESP_LEN (176) /* hwrm_func_qstats */ +#define HW_HASH_INDEX_SIZE 0x80 /* 7 bit indirection table index. */ +#define HW_HASH_KEY_SIZE 40 +#define HWRM_RESP_VALID_KEY 1 /* valid key for HWRM response */ + +/* + * Request types + */ +#define HWRM_VER_GET (UINT32_C(0x0)) +#define HWRM_FUNC_RESET (UINT32_C(0x11)) +#define HWRM_FUNC_QCAPS (UINT32_C(0x15)) +#define HWRM_FUNC_DRV_UNRGTR (UINT32_C(0x1a)) +#define HWRM_FUNC_DRV_RGTR (UINT32_C(0x1d)) +#define HWRM_PORT_PHY_CFG (UINT32_C(0x20)) +#define HWRM_PORT_PHY_QCFG (UINT32_C(0x27)) +#define HWRM_QUEUE_QPORTCFG (UINT32_C(0x30)) +#define HWRM_VNIC_ALLOC (UINT32_C(0x40)) +#define HWRM_VNIC_FREE (UINT32_C(0x41)) +#define HWRM_VNIC_CFG (UINT32_C(0x42)) +#define HWRM_VNIC_RSS_CFG (UINT32_C(0x46)) +#define HWRM_RING_ALLOC (UINT32_C(0x50)) +#define HWRM_RING_FREE (UINT32_C(0x51)) +#define HWRM_RING_GRP_ALLOC (UINT32_C(0x60)) +#define HWRM_RING_GRP_FREE (UINT32_C(0x61)) +#define HWRM_VNIC_RSS_COS_LB_CTX_ALLOC (UINT32_C(0x70)) +#define HWRM_VNIC_RSS_COS_LB_CTX_FREE (UINT32_C(0x71)) +#define HWRM_CFA_L2_FILTER_ALLOC (UINT32_C(0x90)) +#define HWRM_CFA_L2_FILTER_FREE (UINT32_C(0x91)) +#define HWRM_CFA_L2_FILTER_CFG (UINT32_C(0x92)) +#define HWRM_CFA_L2_SET_RX_MASK (UINT32_C(0x93)) +#define HWRM_STAT_CTX_ALLOC (UINT32_C(0xb0)) +#define HWRM_STAT_CTX_FREE (UINT32_C(0xb1)) +#define HWRM_STAT_CTX_CLR_STATS (UINT32_C(0xb3)) +#define HWRM_EXEC_FWD_RESP (UINT32_C(0xd0)) + +/* Return Codes */ +#define HWRM_ERR_CODE_INVALID_PARAMS (UINT32_C(0x2)) +#define HWRM_ERR_CODE_RESOURCE_ACCESS_DENIED (UINT32_C(0x3)) + +/* Short TX BD (16 bytes) */ +struct tx_bd_short { + /* + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs of a + * packet. + */ + /* This value identifies the type of buffer descriptor. */ + #define TX_BD_SHORT_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_SHORT_TYPE_SFT 0 + /* + * Indicates that this BD is 16B long and is used for normal L2 + * packet transmission. + */ + #define TX_BD_SHORT_TYPE_TX_BD_SHORT (UINT32_C(0x0) << 0) + /* + * If set to 1, the packet ends with the data in the buffer pointed to + * by this descriptor. This flag must be valid on every BD. + */ + #define TX_BD_SHORT_FLAGS_PACKET_END UINT32_C(0x40) + /* + * If set to 1, the device will not generate a completion for this + * transmit packet unless there is an error in it's processing. If this + * bit is set to 0, then the packet will be completed normally. This bit + * must be valid only on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_NO_CMPL UINT32_C(0x80) + /* + * This value indicates how many 16B BD locations are consumed in the + * ring by this packet. A value of 1 indicates that this BD is the only + * BD (and that the it is a short BD). A value of 3 indicates either 3 + * short BDs or 1 long BD and one short BD in the packet. A value of 0 + * indicates that there are 32 BD locations in the packet (the maximum). + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) + #define TX_BD_SHORT_FLAGS_BD_CNT_SFT 8 + /* + * This value is a hint for the length of the entire packet. It is used + * by the chip to optimize internal processing. The packet will be + * dropped if the hint is too short. This field is valid only on the + * first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_LHINT_MASK UINT32_C(0x6000) + #define TX_BD_SHORT_FLAGS_LHINT_SFT 13 + /* indicates packet length < 512B */ + #define TX_BD_SHORT_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) + /* indicates 512 <= packet length < 1KB */ + #define TX_BD_SHORT_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) + /* indicates 1KB <= packet length < 2KB */ + #define TX_BD_SHORT_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) + /* indicates packet length >= 2KB */ + #define TX_BD_SHORT_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) + #define TX_BD_SHORT_FLAGS_LHINT_LAST TX_BD_SHORT_FLAGS_LHINT_GTE2K + /* + * If set to 1, the device immediately updates the Send Consumer Index + * after the buffer associated with this descriptor has been transferred + * via DMA to NIC memory from host memory. An interrupt may or may not + * be generated according to the state of the interrupt avoidance + * mechanisms. If this bit is set to 0, then the Consumer Index is only + * updated as soon as one of the host interrupt coalescing conditions + * has been met. This bit must be valid on the first BD of a packet. + */ + #define TX_BD_SHORT_FLAGS_COAL_NOW UINT32_C(0x8000) + /* + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs of a + * packet. + */ + #define TX_BD_SHORT_FLAGS_MASK UINT32_C(0xffc0) + #define TX_BD_SHORT_FLAGS_SFT 6 + uint16_t flags_type; + + /* + * This is the length of the host physical buffer this BD describes in + * bytes. This field must be valid on all BDs of a packet. + */ + uint16_t len; + /* + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with the + * transmit BD. This field must be valid on the first BD of a packet. + */ + uint32_t opaque; + + /* + * This is the host physical address for the portion of the packet + * described by this TX BD. This value must be valid on all BDs of a + * packet. + */ + uint64_t addr; +} __attribute__((packed)); + +/* Long TX BD (32 bytes split to 2 16-byte struct) */ +struct tx_bd_long { + /* + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs of a + * packet. + */ + /* This value identifies the type of buffer descriptor. */ + #define TX_BD_LONG_TYPE_MASK UINT32_C(0x3f) + #define TX_BD_LONG_TYPE_SFT 0 + /* + * Indicates that this BD is 32B long and is used for normal L2 + * packet transmission. + */ + #define TX_BD_LONG_TYPE_TX_BD_LONG (UINT32_C(0x10) << 0) + /* + * If set to 1, the packet ends with the data in the buffer pointed to + * by this descriptor. This flag must be valid on every BD. + */ + #define TX_BD_LONG_FLAGS_PACKET_END UINT32_C(0x40) + /* + * If set to 1, the device will not generate a completion for this + * transmit packet unless there is an error in it's processing. If this + * bit is set to 0, then the packet will be completed normally. This bit + * must be valid only on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_NO_CMPL UINT32_C(0x80) + /* + * This value indicates how many 16B BD locations are consumed in the + * ring by this packet. A value of 1 indicates that this BD is the only + * BD (and that the it is a short BD). A value of 3 indicates either 3 + * short BDs or 1 long BD and one short BD in the packet. A value of 0 + * indicates that there are 32 BD locations in the packet (the maximum). + * This field is valid only on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_BD_CNT_MASK UINT32_C(0x1f00) + #define TX_BD_LONG_FLAGS_BD_CNT_SFT 8 + /* + * This value is a hint for the length of the entire packet. It is used + * by the chip to optimize internal processing. The packet will be + * dropped if the hint is too short. This field is valid only on the + * first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_LHINT_MASK UINT32_C(0x6000) + #define TX_BD_LONG_FLAGS_LHINT_SFT 13 + /* indicates packet length < 512B */ + #define TX_BD_LONG_FLAGS_LHINT_LT512 (UINT32_C(0x0) << 13) + /* indicates 512 <= packet length < 1KB */ + #define TX_BD_LONG_FLAGS_LHINT_LT1K (UINT32_C(0x1) << 13) + /* indicates 1KB <= packet length < 2KB */ + #define TX_BD_LONG_FLAGS_LHINT_LT2K (UINT32_C(0x2) << 13) + /* indicates packet length >= 2KB */ + #define TX_BD_LONG_FLAGS_LHINT_GTE2K (UINT32_C(0x3) << 13) + #define TX_BD_LONG_FLAGS_LHINT_LAST TX_BD_LONG_FLAGS_LHINT_GTE2K + /* + * If set to 1, the device immediately updates the Send Consumer Index + * after the buffer associated with this descriptor has been transferred + * via DMA to NIC memory from host memory. An interrupt may or may not + * be generated according to the state of the interrupt avoidance + * mechanisms. If this bit is set to 0, then the Consumer Index is only + * updated as soon as one of the host interrupt coalescing conditions + * has been met. This bit must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_FLAGS_COAL_NOW UINT32_C(0x8000) + /* + * All bits in this field must be valid on the first BD of a packet. + * Only the packet_end bit must be valid for the remaining BDs of a + * packet. + */ + #define TX_BD_LONG_FLAGS_MASK UINT32_C(0xffc0) + #define TX_BD_LONG_FLAGS_SFT 6 + uint16_t flags_type; + + /* + * This is the length of the host physical buffer this BD describes in + * bytes. This field must be valid on all BDs of a packet. + */ + uint16_t len; + + /* + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with the + * transmit BD. This field must be valid on the first BD of a packet. + */ + uint32_t opaque; + + /* + * This is the host physical address for the portion of the packet + * described by this TX BD. This value must be valid on all BDs of a + * packet. + */ + uint64_t addr; +} __attribute__((packed)); + +/* last 16 bytes of Long TX BD */ + +struct tx_bd_long_hi { + /* + * All bits in this field must be valid on the first BD of a packet. + * Their value on other BDs of the packet will be ignored. + */ + /* + * If set to 1, the controller replaces the TCP/UPD checksum fields of + * normal TCP/UPD checksum, or the inner TCP/UDP checksum field of the + * encapsulated TCP/UDP packets with the hardware calculated TCP/UDP + * checksum for the packet associated with this descriptor. This bit + * must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_LFLAGS_TCP_UDP_CHKSUM UINT32_C(0x1) + /* + * If set to 1, the controller replaces the IP checksum of the normal + * packets, or the inner IP checksum of the encapsulated packets with + * the hardware calculated IP checksum for the packet associated with + * this descriptor. This bit must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_LFLAGS_IP_CHKSUM UINT32_C(0x2) + /* + * If set to 1, the controller will not append an Ethernet CRC to the + * end of the frame. This bit must be valid on the first BD of a packet. + * Packet must be 64B or longer when this flag is set. It is not useful + * to use this bit with any form of TX offload such as CSO or LSO. The + * intent is that the packet from the host already has a valid Ethernet + * CRC on the packet. + */ + #define TX_BD_LONG_LFLAGS_NOCRC UINT32_C(0x4) + /* + * If set to 1, the device will record the time at which the packet was + * actually transmitted at the TX MAC. This bit must be valid on the + * first BD of a packet. + */ + #define TX_BD_LONG_LFLAGS_STAMP UINT32_C(0x8) + /* + * If set to 1, The controller replaces the tunnel IP checksum field + * with hardware calculated IP checksum for the IP header of the packet + * associated with this descriptor. In case of VXLAN, the controller + * also replaces the outer header UDP checksum with hardware calculated + * UDP checksum for the packet associated with this descriptor. + */ + #define TX_BD_LONG_LFLAGS_T_IP_CHKSUM UINT32_C(0x10) + /* + * If set to 1, the device will treat this packet with LSO(Large Send + * Offload) processing for both normal or encapsulated packets, which is + * a form of TCP segmentation. When this bit is 1, the hdr_size and mss + * fields must be valid. The driver doesn't need to set t_ip_chksum, + * ip_chksum, and tcp_udp_chksum flags since the controller will replace + * the appropriate checksum fields for segmented packets. When this bit + * is 1, the hdr_size and mss fields must be valid. + */ + #define TX_BD_LONG_LFLAGS_LSO UINT32_C(0x20) + /* + * If set to zero when LSO is '1', then the IPID will be treated as a + * 16b number and will be wrapped if it exceeds a value of 0xffff. If + * set to one when LSO is '1', then the IPID will be treated as a 15b + * number and will be wrapped if it exceeds a value 0f 0x7fff. + */ + #define TX_BD_LONG_LFLAGS_IPID_FMT UINT32_C(0x40) + /* + * If set to zero when LSO is '1', then the IPID of the tunnel IP header + * will not be modified during LSO operations. If set to one when LSO is + * '1', then the IPID of the tunnel IP header will be incremented for + * each subsequent segment of an LSO operation. + */ + #define TX_BD_LONG_LFLAGS_T_IPID UINT32_C(0x80) + /* + * If set to '1', then the RoCE ICRC will be appended to the packet. + * Packet must be a valid RoCE format packet. + */ + #define TX_BD_LONG_LFLAGS_ROCE_CRC UINT32_C(0x100) + /* + * If set to '1', then the FCoE CRC will be appended to the packet. + * Packet must be a valid FCoE format packet. + */ + #define TX_BD_LONG_LFLAGS_FCOE_CRC UINT32_C(0x200) + uint16_t lflags; + + /* + * When LSO is '1', this field must contain the offset of the TCP + * payload from the beginning of the packet in as 16b words. In case of + * encapsulated/tunneling packet, this field contains the offset of the + * inner TCP payload from beginning of the packet as 16-bit words. This + * value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_HDR_SIZE_MASK UINT32_C(0x1ff) + #define TX_BD_LONG_HDR_SIZE_SFT 0 + uint16_t hdr_size; + + /* + * This is the MSS value that will be used to do the LSO processing. The + * value is the length in bytes of the TCP payload for each segment + * generated by the LSO operation. This value must be valid on the first + * BD of a packet. + */ + #define TX_BD_LONG_MSS_MASK UINT32_C(0x7fff) + #define TX_BD_LONG_MSS_SFT 0 + uint32_t mss; + + uint16_t unused_2; + + /* + * This value selects a CFA action to perform on the packet. Set this + * value to zero if no CFA action is desired. This value must be valid + * on the first BD of a packet. + */ + uint16_t cfa_action; + + /* + * This value is action meta-data that defines CFA edit operations that + * are done in addition to any action editing. + */ + /* When key=1, This is the VLAN tag VID value. */ + #define TX_BD_LONG_CFA_META_VLAN_VID_MASK UINT32_C(0xfff) + #define TX_BD_LONG_CFA_META_VLAN_VID_SFT 0 + /* When key=1, This is the VLAN tag DE value. */ + #define TX_BD_LONG_CFA_META_VLAN_DE UINT32_C(0x1000) + /* When key=1, This is the VLAN tag PRI value. */ + #define TX_BD_LONG_CFA_META_VLAN_PRI_MASK UINT32_C(0xe000) + #define TX_BD_LONG_CFA_META_VLAN_PRI_SFT 13 + /* When key=1, This is the VLAN tag TPID select value. */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_MASK UINT32_C(0x70000) + #define TX_BD_LONG_CFA_META_VLAN_TPID_SFT 16 + /* 0x88a8 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID88A8 (UINT32_C(0x0) << 16) + /* 0x8100 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID8100 (UINT32_C(0x1) << 16) + /* 0x9100 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9100 (UINT32_C(0x2) << 16) + /* 0x9200 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9200 (UINT32_C(0x3) << 16) + /* 0x9300 */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPID9300 (UINT32_C(0x4) << 16) + /* Value programmed in CFA VLANTPID register. */ + #define TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG (UINT32_C(0x5) << 16) + #define TX_BD_LONG_CFA_META_VLAN_TPID_LAST \ + TX_BD_LONG_CFA_META_VLAN_TPID_TPIDCFG + /* When key=1, This is the VLAN tag TPID select value. */ + #define TX_BD_LONG_CFA_META_VLAN_RESERVED_MASK UINT32_C(0xff80000) + #define TX_BD_LONG_CFA_META_VLAN_RESERVED_SFT 19 + /* + * This field identifies the type of edit to be performed on the packet. + * This value must be valid on the first BD of a packet. + */ + #define TX_BD_LONG_CFA_META_KEY_MASK UINT32_C(0xf0000000) + #define TX_BD_LONG_CFA_META_KEY_SFT 28 + /* No editing */ + #define TX_BD_LONG_CFA_META_KEY_NONE (UINT32_C(0x0) << 28) + /* + * - meta[17:16] - TPID select value (0 = 0x8100). - meta[15:12] + * - PRI/DE value. - meta[11:0] - VID value. + */ + #define TX_BD_LONG_CFA_META_KEY_VLAN_TAG (UINT32_C(0x1) << 28) + #define TX_BD_LONG_CFA_META_KEY_LAST TX_BD_LONG_CFA_META_KEY_VLAN_TAG + uint32_t cfa_meta; +} __attribute__((packed)); + +/* RX Producer Packet BD (16 bytes) */ +struct rx_prod_pkt_bd { + /* This value identifies the type of buffer descriptor. */ + #define RX_PROD_PKT_BD_TYPE_MASK UINT32_C(0x3f) + #define RX_PROD_PKT_BD_TYPE_SFT 0 + /* + * Indicates that this BD is 16B long and is an RX Producer (ie. + * empty) buffer descriptor. + */ + #define RX_PROD_PKT_BD_TYPE_RX_PROD_PKT (UINT32_C(0x4) << 0) + /* + * If set to 1, the packet will be placed at the address plus 2B. The 2 + * Bytes of padding will be written as zero. + */ + /* + * This is intended to be used when the host buffer is cache-line + * aligned to produce packets that are easy to parse in host memory + * while still allowing writes to be cache line aligned. + */ + #define RX_PROD_PKT_BD_FLAGS_SOP_PAD UINT32_C(0x40) + /* + * If set to 1, the packet write will be padded out to the nearest + * cache-line with zero value padding. + */ + /* + * If receive buffers start/end on cache-line boundaries, this feature + * will ensure that all data writes on the PCI bus start/end on cache + * line boundaries. + */ + #define RX_PROD_PKT_BD_FLAGS_EOP_PAD UINT32_C(0x80) + /* + * This value is the number of additional buffers in the ring that + * describe the buffer space to be consumed for the this packet. If the + * value is zero, then the packet must fit within the space described by + * this BD. If this value is 1 or more, it indicates how many additional + * "buffer" BDs are in the ring immediately following this BD to be used + * for the same network packet. Even if the packet to be placed does not + * need all the additional buffers, they will be consumed anyway. + */ + #define RX_PROD_PKT_BD_FLAGS_BUFFERS_MASK UINT32_C(0x300) + #define RX_PROD_PKT_BD_FLAGS_BUFFERS_SFT 8 + #define RX_PROD_PKT_BD_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PROD_PKT_BD_FLAGS_SFT 6 + uint16_t flags_type; + + /* + * This is the length in Bytes of the host physical buffer where data + * for the packet may be placed in host memory. + */ + /* + * While this is a Byte resolution value, it is often advantageous to + * ensure that the buffers provided end on a host cache line. + */ + uint16_t len; + + /* + * The opaque data field is pass through to the completion and can be + * used for any data that the driver wants to associate with this + * receive buffer set. + */ + uint32_t opaque; + + /* + * This is the host physical address where data for the packet may by + * placed in host memory. + */ + /* + * While this is a Byte resolution value, it is often advantageous to + * ensure that the buffers provide start on a host cache line. + */ + uint64_t addr; +} __attribute__((packed)); + +/* Completion Ring Structures */ +/* Note: This structure is used by the HWRM to communicate HWRM Error. */ +/* Base Completion Record (16 bytes) */ +struct cmpl_base { + /* unused is 10 b */ + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records. + */ + #define CMPL_BASE_TYPE_MASK UINT32_C(0x3f) + #define CMPL_BASE_TYPE_SFT 0 + /* TX L2 completion: Completion of TX packet. Length = 16B */ + #define CMPL_BASE_TYPE_TX_L2 (UINT32_C(0x0) << 0) + /* + * RX L2 completion: Completion of and L2 RX packet. + * Length = 32B + */ + #define CMPL_BASE_TYPE_RX_L2 (UINT32_C(0x11) << 0) + /* + * RX Aggregation Buffer completion : Completion of an L2 + * aggregation buffer in support of TPA, HDS, or Jumbo packet + * completion. Length = 16B + */ + #define CMPL_BASE_TYPE_RX_AGG (UINT32_C(0x12) << 0) + /* + * RX L2 TPA Start Completion: Completion at the beginning of a + * TPA operation. Length = 32B + */ + #define CMPL_BASE_TYPE_RX_TPA_START (UINT32_C(0x13) << 0) + /* + * RX L2 TPA End Completion: Completion at the end of a TPA + * operation. Length = 32B + */ + #define CMPL_BASE_TYPE_RX_TPA_END (UINT32_C(0x15) << 0) + /* + * Statistics Ejection Completion: Completion of statistics data + * ejection buffer. Length = 16B + */ + #define CMPL_BASE_TYPE_STAT_EJECT (UINT32_C(0x1a) << 0) + /* HWRM Command Completion: Completion of an HWRM command. */ + #define CMPL_BASE_TYPE_HWRM_DONE (UINT32_C(0x20) << 0) + /* Forwarded HWRM Request */ + #define CMPL_BASE_TYPE_HWRM_FWD_REQ (UINT32_C(0x22) << 0) + /* Forwarded HWRM Response */ + #define CMPL_BASE_TYPE_HWRM_FWD_RESP (UINT32_C(0x24) << 0) + /* HWRM Asynchronous Event Information */ + #define CMPL_BASE_TYPE_HWRM_ASYNC_EVENT (UINT32_C(0x2e) << 0) + /* CQ Notification */ + #define CMPL_BASE_TYPE_CQ_NOTIFICATION (UINT32_C(0x30) << 0) + /* SRQ Threshold Event */ + #define CMPL_BASE_TYPE_SRQ_EVENT (UINT32_C(0x32) << 0) + /* DBQ Threshold Event */ + #define CMPL_BASE_TYPE_DBQ_EVENT (UINT32_C(0x34) << 0) + /* QP Async Notification */ + #define CMPL_BASE_TYPE_QP_EVENT (UINT32_C(0x38) << 0) + /* Function Async Notification */ + #define CMPL_BASE_TYPE_FUNC_EVENT (UINT32_C(0x3a) << 0) + uint16_t type; + + uint16_t info1; + uint32_t info2; + + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + #define CMPL_BASE_V UINT32_C(0x1) + /* info3 is 31 b */ + #define CMPL_BASE_INFO3_MASK UINT32_C(0xfffffffe) + #define CMPL_BASE_INFO3_SFT 1 + uint32_t info3_v; + + uint32_t info4; +} __attribute__((packed)); + +/* TX Completion Record (16 bytes) */ +struct tx_cmpl { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records. + */ + #define TX_CMPL_TYPE_MASK UINT32_C(0x3f) + #define TX_CMPL_TYPE_SFT 0 + /* TX L2 completion: Completion of TX packet. Length = 16B */ + #define TX_CMPL_TYPE_TX_L2 (UINT32_C(0x0) << 0) + /* + * When this bit is '1', it indicates a packet that has an error of some + * type. Type of error is indicated in error_flags. + */ + #define TX_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* + * When this bit is '1', it indicates that the packet completed was + * transmitted using the push acceleration data provided by the driver. + * When this bit is '0', it indicates that the packet had not push + * acceleration data written or was executed as a normal packet even + * though push data was provided. + */ + #define TX_CMPL_FLAGS_PUSH UINT32_C(0x80) + #define TX_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define TX_CMPL_FLAGS_SFT 6 + uint16_t flags_type; + + uint16_t unused_0; + + /* + * This is a copy of the opaque field from the first TX BD of this + * transmitted packet. + */ + uint32_t opaque; + + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + #define TX_CMPL_V UINT32_C(0x1) + /* + * This error indicates that there was some sort of problem with the BDs + * for the packet. + */ + #define TX_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) + #define TX_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No error */ + #define TX_CMPL_ERRORS_BUFFER_ERROR_NO_ERROR (UINT32_C(0x0) << 1) + /* Bad Format: BDs were not formatted correctly. */ + #define TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT (UINT32_C(0x2) << 1) + #define TX_CMPL_ERRORS_BUFFER_ERROR_LAST \ + TX_CMPL_ERRORS_BUFFER_ERROR_BAD_FMT + /* + * When this bit is '1', it indicates that the length of the packet was + * zero. No packet was transmitted. + */ + #define TX_CMPL_ERRORS_ZERO_LENGTH_PKT UINT32_C(0x10) + /* + * When this bit is '1', it indicates that the packet was longer than + * the programmed limit in TDI. No packet was transmitted. + */ + #define TX_CMPL_ERRORS_EXCESSIVE_BD_LENGTH UINT32_C(0x20) + /* + * When this bit is '1', it indicates that one or more of the BDs + * associated with this packet generated a PCI error. This probably + * means the address was not valid. + */ + #define TX_CMPL_ERRORS_DMA_ERROR UINT32_C(0x40) + /* + * When this bit is '1', it indicates that the packet was longer than + * indicated by the hint. No packet was transmitted. + */ + #define TX_CMPL_ERRORS_HINT_TOO_SHORT UINT32_C(0x80) + /* + * When this bit is '1', it indicates that the packet was dropped due to + * Poison TLP error on one or more of the TLPs in the PXP completion. + */ + #define TX_CMPL_ERRORS_POISON_TLP_ERROR UINT32_C(0x100) + #define TX_CMPL_ERRORS_MASK UINT32_C(0xfffe) + #define TX_CMPL_ERRORS_SFT 1 + uint16_t errors_v; + + uint16_t unused_1; + uint32_t unused_2; +} __attribute__((packed)) tx_cmpl_t, *ptx_cmpl_t; + +/* RX Packet Completion Record (32 bytes split to 2 16-byte struct) */ +struct rx_pkt_cmpl { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records. + */ + #define RX_PKT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define RX_PKT_CMPL_TYPE_SFT 0 + /* + * RX L2 completion: Completion of and L2 RX packet. + * Length = 32B + */ + #define RX_PKT_CMPL_TYPE_RX_L2 (UINT32_C(0x11) << 0) + /* + * When this bit is '1', it indicates a packet that has an error of some + * type. Type of error is indicated in error_flags. + */ + #define RX_PKT_CMPL_FLAGS_ERROR UINT32_C(0x40) + /* This field indicates how the packet was placed in the buffer. */ + #define RX_PKT_CMPL_FLAGS_PLACEMENT_MASK UINT32_C(0x380) + #define RX_PKT_CMPL_FLAGS_PLACEMENT_SFT 7 + /* Normal: Packet was placed using normal algorithm. */ + #define RX_PKT_CMPL_FLAGS_PLACEMENT_NORMAL (UINT32_C(0x0) << 7) + /* Jumbo: Packet was placed using jumbo algorithm. */ + #define RX_PKT_CMPL_FLAGS_PLACEMENT_JUMBO (UINT32_C(0x1) << 7) + /* + * Header/Data Separation: Packet was placed using Header/Data + * separation algorithm. The separation location is indicated by + * the itype field. + */ + #define RX_PKT_CMPL_FLAGS_PLACEMENT_HDS (UINT32_C(0x2) << 7) + #define RX_PKT_CMPL_FLAGS_PLACEMENT_LAST \ + RX_PKT_CMPL_FLAGS_PLACEMENT_HDS + /* This bit is '1' if the RSS field in this completion is valid. */ + #define RX_PKT_CMPL_FLAGS_RSS_VALID UINT32_C(0x400) + /* + * This value indicates what the inner packet determined for the packet + * was. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_MASK UINT32_C(0xf000) + #define RX_PKT_CMPL_FLAGS_ITYPE_SFT 12 + /* Not Known: Indicates that the packet type was not known. */ + #define RX_PKT_CMPL_FLAGS_ITYPE_NOT_KNOWN (UINT32_C(0x0) << 12) + /* + * IP Packet: Indicates that the packet was an IP packet, but + * further classification was not possible. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_IP (UINT32_C(0x1) << 12) + /* + * TCP Packet: Indicates that the packet was IP and TCP. This + * indicates that the payload_offset field is valid. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_TCP (UINT32_C(0x2) << 12) + /* + * UDP Packet: Indicates that the packet was IP and UDP. This + * indicates that the payload_offset field is valid. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_UDP (UINT32_C(0x3) << 12) + /* + * FCoE Packet: Indicates that the packet was recognized as a + * FCoE. This also indicates that the payload_offset field is + * valid. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_FCOE (UINT32_C(0x4) << 12) + /* + * RoCE Packet: Indicates that the packet was recognized as a + * RoCE. This also indicates that the payload_offset field is + * valid. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_ROCE (UINT32_C(0x5) << 12) + /* + * ICMP Packet: Indicates that the packet was recognized as + * ICMP. This indicates that the payload_offset field is valid. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_ICMP (UINT32_C(0x7) << 12) + /* + * PtP packet wo/timestamp: Indicates that the packet was + * recognized as a PtP packet. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_WO_TIMESTAMP \ + (UINT32_C(0x8) << 12) + /* + * PtP packet w/timestamp: Indicates that the packet was + * recognized as a PtP packet and that a timestamp was taken for + * the packet. + */ + #define RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP (UINT32_C(0x9) << 12) + #define RX_PKT_CMPL_FLAGS_ITYPE_LAST \ + RX_PKT_CMPL_FLAGS_ITYPE_PTP_W_TIMESTAMP + #define RX_PKT_CMPL_FLAGS_MASK UINT32_C(0xffc0) + #define RX_PKT_CMPL_FLAGS_SFT 6 + uint16_t flags_type; + + /* + * This is the length of the data for the packet stored in the buffer(s) + * identified by the opaque value. This includes the packet BD and any + * associated buffer BDs. This does not include the the length of any + * data places in aggregation BDs. + */ + uint16_t len; + + /* + * This is a copy of the opaque field from the RX BD this completion + * corresponds to. + */ + uint32_t opaque; + + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + #define RX_PKT_CMPL_V1 UINT32_C(0x1) + /* + * This value is the number of aggregation buffers that follow this + * entry in the completion ring that are a part of this packet. If the + * value is zero, then the packet is completely contained in the buffer + * space provided for the packet in the RX ring. + */ + #define RX_PKT_CMPL_AGG_BUFS_MASK UINT32_C(0x3e) + #define RX_PKT_CMPL_AGG_BUFS_SFT 1 + uint8_t agg_bufs_v1; + + /* + * This is the RSS hash type for the packet. The value is packed + * {tuple_extrac_op[1:0],rss_profile_id[4:0],tuple_extrac_op[2]}. + */ + uint8_t rss_hash_type; + + /* + * This value indicates the offset from the beginning of the packet + * where the inner payload starts. This value is valid for TCP, UDP, + * FCoE, and RoCE packets. + */ + uint8_t payload_offset; + + uint8_t unused_1; + + /* + * This value is the RSS hash value calculated for the packet based on + * the mode bits and key value in the VNIC. + */ + uint32_t rss_hash; +} __attribute__((packed)); + +/* last 16 bytes of RX Packet Completion Record */ +struct rx_pkt_cmpl_hi { + /* + * This indicates that the ip checksum was calculated for the inner + * packet and that the ip_cs_error field indicates if there was an + * error. + */ + #define RX_PKT_CMPL_FLAGS2_IP_CS_CALC UINT32_C(0x1) + /* + * This indicates that the TCP, UDP or ICMP checksum was calculated for + * the inner packet and that the l4_cs_error field indicates if there + * was an error. + */ + #define RX_PKT_CMPL_FLAGS2_L4_CS_CALC UINT32_C(0x2) + /* + * This indicates that the ip checksum was calculated for the tunnel + * header and that the t_ip_cs_error field indicates if there was an + * error. + */ + #define RX_PKT_CMPL_FLAGS2_T_IP_CS_CALC UINT32_C(0x4) + /* + * This indicates that the UDP checksum was calculated for the tunnel + * packet and that the t_l4_cs_error field indicates if there was an + * error. + */ + #define RX_PKT_CMPL_FLAGS2_T_L4_CS_CALC UINT32_C(0x8) + /* This value indicates what format the metadata field is. */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_MASK UINT32_C(0xf0) + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_SFT 4 + /* No metadata informtaion. Value is zero. */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_NONE (UINT32_C(0x0) << 4) + /* + * The metadata field contains the VLAN tag and TPID value. - + * metadata[11:0] contains the vlan VID value. - metadata[12] + * contains the vlan DE value. - metadata[15:13] contains the + * vlan PRI value. - metadata[31:16] contains the vlan TPID + * value. + */ + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN (UINT32_C(0x1) << 4) + #define RX_PKT_CMPL_FLAGS2_META_FORMAT_LAST \ + RX_PKT_CMPL_FLAGS2_META_FORMAT_VLAN + /* + * This field indicates the IP type for the inner-most IP header. A + * value of '0' indicates IPv4. A value of '1' indicates IPv6. This + * value is only valid if itype indicates a packet with an IP header. + */ + #define RX_PKT_CMPL_FLAGS2_IP_TYPE UINT32_C(0x100) + uint32_t flags2; + + /* + * This is data from the CFA block as indicated by the meta_format + * field. + */ + /* When meta_format=1, this value is the VLAN VID. */ + #define RX_PKT_CMPL_METADATA_VID_MASK UINT32_C(0xfff) + #define RX_PKT_CMPL_METADATA_VID_SFT 0 + /* When meta_format=1, this value is the VLAN DE. */ + #define RX_PKT_CMPL_METADATA_DE UINT32_C(0x1000) + /* When meta_format=1, this value is the VLAN PRI. */ + #define RX_PKT_CMPL_METADATA_PRI_MASK UINT32_C(0xe000) + #define RX_PKT_CMPL_METADATA_PRI_SFT 13 + /* When meta_format=1, this value is the VLAN TPID. */ + #define RX_PKT_CMPL_METADATA_TPID_MASK UINT32_C(0xffff0000) + #define RX_PKT_CMPL_METADATA_TPID_SFT 16 + uint32_t metadata; + + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + #define RX_PKT_CMPL_V2 UINT32_C(0x1) + /* + * This error indicates that there was some sort of problem with the BDs + * for the packet that was found after part of the packet was already + * placed. The packet should be treated as invalid. + */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_MASK UINT32_C(0xe) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_SFT 1 + /* No buffer error */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NO_BUFFER \ + (UINT32_C(0x0) << 1) + /* + * Did Not Fit: Packet did not fit into packet buffer provided. + * For regular placement, this means the packet did not fit in + * the buffer provided. For HDS and jumbo placement, this means + * that the packet could not be placed into 7 physical buffers + * or less. + */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_DID_NOT_FIT \ + (UINT32_C(0x1) << 1) + /* + * Not On Chip: All BDs needed for the packet were not on-chip + * when the packet arrived. + */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_NOT_ON_CHIP \ + (UINT32_C(0x2) << 1) + /* Bad Format: BDs were not formatted correctly. */ + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT \ + (UINT32_C(0x3) << 1) + #define RX_PKT_CMPL_ERRORS_BUFFER_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_BUFFER_ERROR_BAD_FORMAT + /* This indicates that there was an error in the IP header checksum. */ + #define RX_PKT_CMPL_ERRORS_IP_CS_ERROR UINT32_C(0x10) + /* + * This indicates that there was an error in the TCP, UDP or ICMP + * checksum. + */ + #define RX_PKT_CMPL_ERRORS_L4_CS_ERROR UINT32_C(0x20) + /* + * This indicates that there was an error in the tunnel IP header + * checksum. + */ + #define RX_PKT_CMPL_ERRORS_T_IP_CS_ERROR UINT32_C(0x40) + /* This indicates that there was an error in the tunnel UDP checksum. */ + #define RX_PKT_CMPL_ERRORS_T_L4_CS_ERROR UINT32_C(0x80) + /* + * This indicates that there was a CRC error on either an FCoE or RoCE + * packet. The itype indicates the packet type. + */ + #define RX_PKT_CMPL_ERRORS_CRC_ERROR UINT32_C(0x100) + /* + * This indicates that there was an error in the tunnel portion of the + * packet when this field is non-zero. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_MASK UINT32_C(0xe00) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_SFT 9 + /* + * No additional error occurred on the tunnel portion of the + * packet of the packet does not have a tunnel. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 9) + /* + * Indicates that IP header version does not match expectation + * from L2 Ethertype for IPv4 and IPv6 in the tunnel header. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_VERSION \ + (UINT32_C(0x1) << 9) + /* + * Indicates that header length is out of range in the tunnel + * header. Valid for IPv4. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_HDR_LEN \ + (UINT32_C(0x2) << 9) + /* + * Indicates that the physical packet is shorter than that + * claimed by the PPPoE header length for a tunnel PPPoE packet. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_TUNNEL_TOTAL_ERROR \ + (UINT32_C(0x3) << 9) + /* + * Indicates that physical packet is shorter than that claimed + * by the tunnel l3 header length. Valid for IPv4, or IPv6 + * tunnel packet packets. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_IP_TOTAL_ERROR \ + (UINT32_C(0x4) << 9) + /* + * Indicates that the physical packet is shorter than that + * claimed by the tunnel UDP header length for a tunnel UDP + * packet that is not fragmented. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_UDP_TOTAL_ERROR \ + (UINT32_C(0x5) << 9) + /* + * indicates that the IPv4 TTL or IPv6 hop limit check have + * failed (e.g. TTL = 0) in the tunnel header. Valid for IPv4, + * and IPv6. + */ + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL \ + (UINT32_C(0x6) << 9) + #define RX_PKT_CMPL_ERRORS_T_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_T_PKT_ERROR_T_L3_BAD_TTL + /* + * This indicates that there was an error in the inner portion of the + * packet when this field is non-zero. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_MASK UINT32_C(0xf000) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_SFT 12 + /* + * No additional error occurred on the tunnel portion of the + * packet of the packet does not have a tunnel. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_NO_ERROR (UINT32_C(0x0) << 12) + /* + * Indicates that IP header version does not match expectation + * from L2 Ethertype for IPv4 and IPv6 or that option other than + * VFT was parsed on FCoE packet. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_VERSION \ + (UINT32_C(0x1) << 12) + /* + * indicates that header length is out of range. Valid for IPv4 + * and RoCE + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_HDR_LEN \ + (UINT32_C(0x2) << 12) + /* + * indicates that the IPv4 TTL or IPv6 hop limit check have + * failed (e.g. TTL = 0). Valid for IPv4, and IPv6 + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L3_BAD_TTL (UINT32_C(0x3) << 12) + /* + * Indicates that physical packet is shorter than that claimed + * by the l3 header length. Valid for IPv4, IPv6 packet or RoCE + * packets. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_IP_TOTAL_ERROR \ + (UINT32_C(0x4) << 12) + /* + * Indicates that the physical packet is shorter than that + * claimed by the UDP header length for a UDP packet that is not + * fragmented. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_UDP_TOTAL_ERROR \ + (UINT32_C(0x5) << 12) + /* + * Indicates that TCP header length > IP payload. Valid for TCP + * packets only. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN \ + (UINT32_C(0x6) << 12) + /* Indicates that TCP header length < 5. Valid for TCP. */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_HDR_LEN_TOO_SMALL \ + (UINT32_C(0x7) << 12) + /* + * Indicates that TCP option headers result in a TCP header size + * that does not match data offset in TCP header. Valid for TCP. + */ + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN \ + (UINT32_C(0x8) << 12) + #define RX_PKT_CMPL_ERRORS_PKT_ERROR_LAST \ + RX_PKT_CMPL_ERRORS_PKT_ERROR_L4_BAD_OPT_LEN + #define RX_PKT_CMPL_ERRORS_MASK UINT32_C(0xfffe) + #define RX_PKT_CMPL_ERRORS_SFT 1 + uint16_t errors_v2; + + /* + * This field identifies the CFA action rule that was used for this + * packet. + */ + uint16_t cfa_code; + + /* + * This value holds the reordering sequence number for the packet. If + * the reordering sequence is not valid, then this value is zero. The + * reordering domain for the packet is in the bottom 8 to 10b of the + * rss_hash value. The bottom 20b of this value contain the ordering + * domain value for the packet. + */ + #define RX_PKT_CMPL_REORDER_MASK UINT32_C(0xffffff) + #define RX_PKT_CMPL_REORDER_SFT 0 + uint32_t reorder; +} __attribute__((packed)); + +/* HWRM Forwarded Request (16 bytes) */ +struct hwrm_fwd_req_cmpl { + /* Length of forwarded request in bytes. */ + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records. + */ + #define HWRM_FWD_REQ_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_FWD_REQ_CMPL_TYPE_SFT 0 + /* Forwarded HWRM Request */ + #define HWRM_FWD_REQ_CMPL_TYPE_HWRM_FWD_REQ (UINT32_C(0x22) << 0) + /* Length of forwarded request in bytes. */ + #define HWRM_FWD_REQ_CMPL_REQ_LEN_MASK UINT32_C(0xffc0) + #define HWRM_FWD_REQ_CMPL_REQ_LEN_SFT 6 + uint16_t req_len_type; + + /* + * Source ID of this request. Typically used in forwarding requests and + * responses. 0x0 - 0xFFF8 - Used for function ids 0xFFF8 - 0xFFFE - + * Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t source_id; + + uint32_t unused_0; + + /* Address of forwarded request. */ + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + #define HWRM_FWD_REQ_CMPL_V UINT32_C(0x1) + /* Address of forwarded request. */ + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_MASK UINT32_C(0xfffffffe) + #define HWRM_FWD_REQ_CMPL_REQ_BUF_ADDR_SFT 1 + uint64_t req_buf_addr_v; +} __attribute__((packed)); + +/* HWRM Asynchronous Event Completion Record (16 bytes) */ +struct hwrm_async_event_cmpl { + /* + * This field indicates the exact type of the completion. By convention, + * the LSB identifies the length of the record in 16B units. Even values + * indicate 16B records. Odd values indicate 32B records. + */ + #define HWRM_ASYNC_EVENT_CMPL_TYPE_MASK UINT32_C(0x3f) + #define HWRM_ASYNC_EVENT_CMPL_TYPE_SFT 0 + /* HWRM Asynchronous Event Information */ + #define HWRM_ASYNC_EVENT_CMPL_TYPE_HWRM_ASYNC_EVENT \ + (UINT32_C(0x2e) << 0) + uint16_t type; + + /* Identifiers of events. */ + /* Link status changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_STATUS_CHANGE \ + (UINT32_C(0x0) << 0) + /* Link MTU changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_MTU_CHANGE \ + (UINT32_C(0x1) << 0) + /* Link speed changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CHANGE \ + (UINT32_C(0x2) << 0) + /* DCB Configuration changed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_DCB_CONFIG_CHANGE \ + (UINT32_C(0x3) << 0) + /* Port connection not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PORT_CONN_NOT_ALLOWED \ + (UINT32_C(0x4) << 0) + /* Link speed configuration was not allowed */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_LINK_SPEED_CFG_NOT_ALLOWED \ + (UINT32_C(0x5) << 0) + /* Function driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_UNLOAD \ + (UINT32_C(0x10) << 0) + /* Function driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_FUNC_DRVR_LOAD \ + (UINT32_C(0x11) << 0) + /* PF driver unloaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_UNLOAD \ + (UINT32_C(0x20) << 0) + /* PF driver loaded */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_DRVR_LOAD \ + (UINT32_C(0x21) << 0) + /* VF Function Level Reset (FLR) */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_FLR (UINT32_C(0x30) << 0) + /* VF MAC Address Change */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_VF_MAC_ADDR_CHANGE \ + (UINT32_C(0x31) << 0) + /* PF-VF communication channel status change. */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_PF_VF_COMM_STATUS_CHANGE \ + (UINT32_C(0x32) << 0) + /* HWRM Error */ + #define HWRM_ASYNC_EVENT_CMPL_EVENT_ID_HWRM_ERROR \ + (UINT32_C(0xff) << 0) + uint16_t event_id; + + /* Event specific data */ + uint32_t event_data2; + + /* opaque is 7 b */ + /* + * This value is written by the NIC such that it will be different for + * each pass through the completion queue. The even passes will write 1. + * The odd passes will write 0. + */ + #define HWRM_ASYNC_EVENT_CMPL_V UINT32_C(0x1) + /* opaque is 7 b */ + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_MASK UINT32_C(0xfe) + #define HWRM_ASYNC_EVENT_CMPL_OPAQUE_SFT 1 + uint8_t opaque_v; + + /* 8-lsb timestamp from POR (100-msec resolution) */ + uint8_t timestamp_lo; + + /* 16-lsb timestamp from POR (100-msec resolution) */ + uint16_t timestamp_hi; + + /* Event specific data */ + uint32_t event_data1; +} __attribute__((packed)); + +/* + * Note: The Hardware Resource Manager (HWRM) manages various hardware resources + * inside the chip. The HWRM is implemented in firmware, and runs on embedded + * processors inside the chip. This firmware is vital part of the chip's + * hardware. The chip can not be used by driver without it. + */ + +/* Input (16 bytes) */ +struct input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; +} __attribute__((packed)); + +/* Output (8 bytes) */ +struct output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; +} __attribute__((packed)); + +/* hwrm_cfa_l2_filter_alloc */ +/* + * A filter is used to identify traffic that contains a matching set of + * parameters like unicast or broadcast MAC address or a VLAN tag amongst + * other things which then allows the ASIC to direct the incoming traffic + * to an appropriate VNIC or Rx ring. + */ + +/* Input (96 bytes) */ +struct hwrm_cfa_l2_filter_alloc_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * Enumeration denoting the RX, TX type of the resource. This + * enumeration is used for resources that are similar for both TX and RX + * paths of the chip. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH \ + UINT32_C(0x1) + /* tx path */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_TX \ + (UINT32_C(0x0) << 0) + /* rx path */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX \ + (UINT32_C(0x1) << 0) + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_LAST \ + HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_PATH_RX + /* + * Setting of this flag indicates the applicability to the loopback + * path. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_LOOPBACK \ + UINT32_C(0x2) + /* + * Setting of this flag indicates drop action. If this flag is not set, + * then it should be considered accept action. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_DROP \ + UINT32_C(0x4) + /* + * If this flag is set, all t_l2_* fields are invalid and they should + * not be specified. If this flag is set, then l2_* fields refer to + * fields of outermost L2 header. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_FLAGS_OUTERMOST \ + UINT32_C(0x8) + uint32_t flags; + + /* This bit must be '1' for the l2_addr field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR \ + UINT32_C(0x1) + /* This bit must be '1' for the l2_addr_mask field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_ADDR_MASK \ + UINT32_C(0x2) + /* This bit must be '1' for the l2_ovlan field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN \ + UINT32_C(0x4) + /* This bit must be '1' for the l2_ovlan_mask field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_OVLAN_MASK \ + UINT32_C(0x8) + /* This bit must be '1' for the l2_ivlan field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN \ + UINT32_C(0x10) + /* This bit must be '1' for the l2_ivlan_mask field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_L2_IVLAN_MASK \ + UINT32_C(0x20) + /* This bit must be '1' for the t_l2_addr field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR \ + UINT32_C(0x40) + /* + * This bit must be '1' for the t_l2_addr_mask field to be configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_ADDR_MASK \ + UINT32_C(0x80) + /* This bit must be '1' for the t_l2_ovlan field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN \ + UINT32_C(0x100) + /* + * This bit must be '1' for the t_l2_ovlan_mask field to be configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_OVLAN_MASK \ + UINT32_C(0x200) + /* This bit must be '1' for the t_l2_ivlan field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN \ + UINT32_C(0x400) + /* + * This bit must be '1' for the t_l2_ivlan_mask field to be configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_T_L2_IVLAN_MASK \ + UINT32_C(0x800) + /* This bit must be '1' for the src_type field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_TYPE \ + UINT32_C(0x1000) + /* This bit must be '1' for the src_id field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_SRC_ID \ + UINT32_C(0x2000) + /* This bit must be '1' for the tunnel_type field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_TUNNEL_TYPE \ + UINT32_C(0x4000) + /* This bit must be '1' for the dst_id field to be configured. */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_DST_ID \ + UINT32_C(0x8000) + /* + * This bit must be '1' for the mirror_vnic_id field to be configured. + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_ENABLES_MIRROR_VNIC_ID \ + UINT32_C(0x10000) + uint32_t enables; + + /* + * This value sets the match value for the L2 MAC address. Destination + * MAC address for RX path. Source MAC address for TX path. + */ + uint8_t l2_addr[6]; + + uint8_t unused_0; + uint8_t unused_1; + + /* + * This value sets the mask value for the L2 address. A value of 0 will + * mask the corresponding bit from compare. + */ + uint8_t l2_addr_mask[6]; + + /* This value sets VLAN ID value for outer VLAN. */ + uint16_t l2_ovlan; + + /* + * This value sets the mask value for the ovlan id. A value of 0 will + * mask the corresponding bit from compare. + */ + uint16_t l2_ovlan_mask; + + /* This value sets VLAN ID value for inner VLAN. */ + uint16_t l2_ivlan; + + /* + * This value sets the mask value for the ivlan id. A value of 0 will + * mask the corresponding bit from compare. + */ + uint16_t l2_ivlan_mask; + + uint8_t unused_2; + uint8_t unused_3; + + /* + * This value sets the match value for the tunnel L2 MAC address. + * Destination MAC address for RX path. Source MAC address for TX path. + */ + uint8_t t_l2_addr[6]; + + uint8_t unused_4; + uint8_t unused_5; + + /* + * This value sets the mask value for the tunnel L2 address. A value of + * 0 will mask the corresponding bit from compare. + */ + uint8_t t_l2_addr_mask[6]; + + /* This value sets VLAN ID value for tunnel outer VLAN. */ + uint16_t t_l2_ovlan; + + /* + * This value sets the mask value for the tunnel ovlan id. A value of 0 + * will mask the corresponding bit from compare. + */ + uint16_t t_l2_ovlan_mask; + + /* This value sets VLAN ID value for tunnel inner VLAN. */ + uint16_t t_l2_ivlan; + + /* + * This value sets the mask value for the tunnel ivlan id. A value of 0 + * will mask the corresponding bit from compare. + */ + uint16_t t_l2_ivlan_mask; + + /* This value identifies the type of source of the packet. */ + /* Network port */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_NPORT \ + (UINT32_C(0x0) << 0) + /* Physical function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_PF \ + (UINT32_C(0x1) << 0) + /* Virtual function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VF \ + (UINT32_C(0x2) << 0) + /* Virtual NIC of a function */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_VNIC \ + (UINT32_C(0x3) << 0) + /* Embedded processor for CFA management */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_KONG \ + (UINT32_C(0x4) << 0) + /* Embedded processor for OOB management */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_APE \ + (UINT32_C(0x5) << 0) + /* Embedded processor for RoCE */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_BONO \ + (UINT32_C(0x6) << 0) + /* Embedded processor for network proxy functions */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_SRC_TYPE_TANG \ + (UINT32_C(0x7) << 0) + uint8_t src_type; + + uint8_t unused_6; + /* + * This value is the id of the source. For a network port, it represents + * port_id. For a physical function, it represents fid. For a virtual + * function, it represents vf_id. For a vnic, it represents vnic_id. For + * embedded processors, this id is not valid. Notes: 1. The function ID + * is implied if it src_id is not provided for a src_type that is either + */ + uint32_t src_id; + + /* Tunnel Type. */ + /* Non-tunnel */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NONTUNNEL \ + (UINT32_C(0x0) << 0) + /* Virtual eXtensible Local Area Network (VXLAN) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_VXLAN \ + (UINT32_C(0x1) << 0) + /* + * Network Virtualization Generic Routing Encapsulation (NVGRE) + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_NVGRE \ + (UINT32_C(0x2) << 0) + /* + * Generic Routing Encapsulation (GRE) inside Ethernet payload + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_L2GRE \ + (UINT32_C(0x3) << 0) + /* IP in IP */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPIP \ + (UINT32_C(0x4) << 0) + /* Generic Network Virtualization Encapsulation (Geneve) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_GENEVE \ + (UINT32_C(0x5) << 0) + /* Multi-Protocol Lable Switching (MPLS) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_MPLS \ + (UINT32_C(0x6) << 0) + /* Stateless Transport Tunnel (STT) */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_STT \ + (UINT32_C(0x7) << 0) + /* + * Generic Routing Encapsulation (GRE) inside IP datagram + * payload + */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_IPGRE \ + (UINT32_C(0x8) << 0) + /* Any tunneled traffic */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_TUNNEL_TYPE_ANYTUNNEL \ + (UINT32_C(0xff) << 0) + uint8_t tunnel_type; + + uint8_t unused_7; + + /* + * If set, this value shall represent the Logical VNIC ID of the + * destination VNIC for the RX path and network port id of the + * destination port for the TX path. + */ + uint16_t dst_id; + + /* Logical VNIC ID of the VNIC where traffic is mirrored. */ + uint16_t mirror_vnic_id; + + /* + * This hint is provided to help in placing the filter in the filter + * table. + */ + /* No preference */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_NO_PREFER \ + (UINT32_C(0x0) << 0) + /* Above the given filter */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_ABOVE_FILTER \ + (UINT32_C(0x1) << 0) + /* Below the given filter */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_BELOW_FILTER \ + (UINT32_C(0x2) << 0) + /* As high as possible */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MAX \ + (UINT32_C(0x3) << 0) + /* As low as possible */ + #define HWRM_CFA_L2_FILTER_ALLOC_INPUT_PRI_HINT_MIN \ + (UINT32_C(0x4) << 0) + uint8_t pri_hint; + + uint8_t unused_8; + uint32_t unused_9; + + /* + * This is the ID of the filter that goes along with the pri_hint. This + * field is valid only for the following values. 1 - Above the given + * filter 2 - Below the given filter + */ + uint64_t l2_filter_id_hint; +} __attribute__((packed)); + +/* Output (24 bytes) */ +struct hwrm_cfa_l2_filter_alloc_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* + * This value identifies a set of CFA data structures used for an L2 + * context. + */ + uint64_t l2_filter_id; + + /* + * This is the ID of the flow associated with this filter. This value + * shall be used to match and associate the flow identifier returned in + * completion records. A value of 0xFFFFFFFF shall indicate no flow id. + */ + uint32_t flow_id; + + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_cfa_l2_filter_free */ +/* + * Description: Free a L2 filter. The HWRM shall free all associated filter + * resources with the L2 filter. + */ + +/* Input (24 bytes) */ +struct hwrm_cfa_l2_filter_free_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * This value identifies a set of CFA data structures used for an L2 + * context. + */ + uint64_t l2_filter_id; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_cfa_l2_filter_free_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_cfa_l2_set_rx_mask */ +/* Description: This command will set rx mask of the function. */ + +/* Input (40 bytes) */ +struct hwrm_cfa_l2_set_rx_mask_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* VNIC ID */ + uint32_t vnic_id; + + /* Reserved for future use. */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_RESERVED UINT32_C(0x1) + /* + * When this bit is '1', the function is requested to accept multi-cast + * packets specified by the multicast addr table. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_MCAST UINT32_C(0x2) + /* + * When this bit is '1', the function is requested to accept all multi- + * cast packets. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_ALL_MCAST UINT32_C(0x4) + /* + * When this bit is '1', the function is requested to accept broadcast + * packets. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_BCAST UINT32_C(0x8) + /* + * When this bit is '1', the function is requested to be put in the + * promiscuous mode. The HWRM should accept any function to set up + * promiscuous mode. The HWRM shall follow the semantics below for the + * promiscuous mode support. # When partitioning is not enabled on a + * port (i.e. single PF on the port), then the PF shall be allowed to be + * in the promiscuous mode. When the PF is in the promiscuous mode, then + * it shall receive all host bound traffic on that port. # When + * partitioning is enabled on a port (i.e. multiple PFs per port) and a + * PF on that port is in the promiscuous mode, then the PF receives all + * traffic within that partition as identified by a unique identifier + * for the PF (e.g. S-Tag). If a unique outer VLAN for the PF is + * specified, then the setting of promiscuous mode on that PF shall + * result in the PF receiving all host bound traffic with matching outer + * VLAN. # A VF shall can be set in the promiscuous mode. In the + * promiscuous mode, the VF does not receive any traffic unless a unique + * outer VLAN for the VF is specified. If a unique outer VLAN for the VF + * is specified, then the setting of promiscuous mode on that VF shall + * result in the VF receiving all host bound traffic with the matching + * outer VLAN. # The HWRM shall allow the setting of promiscuous mode on + * a function independently from the promiscuous mode settings on other + * functions. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_PROMISCUOUS UINT32_C(0x10) + /* + * If this flag is set, the corresponding RX filters shall be set up to + * cover multicast/broadcast filters for the outermost Layer 2 + * destination MAC address field. + */ + #define HWRM_CFA_L2_SET_RX_MASK_INPUT_MASK_OUTERMOST UINT32_C(0x20) + uint32_t mask; + + /* This is the address for mcast address tbl. */ + uint64_t mc_tbl_addr; + + /* + * This value indicates how many entries in mc_tbl are valid. Each entry + * is 6 bytes. + */ + uint32_t num_mc_entries; + + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_cfa_l2_set_rx_mask_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_exec_fwd_resp */ +/* + * Description: This command is used to send an encapsulated request to the + * HWRM. This command instructs the HWRM to execute the request and forward the + * response of the encapsulated request to the location specified in the + * original request that is encapsulated. The target id of this command shall be + * set to 0xFFFF (HWRM). The response location in this command shall be used to + * acknowledge the receipt of the encapsulated request and forwarding of the + * response. + */ + +/* Input (128 bytes) */ +struct hwrm_exec_fwd_resp_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * This is an encapsulated request. This request should be executed by + * the HWRM and the response should be provided in the response buffer + * inside the encapsulated request. + */ + uint32_t encap_request[26]; + + /* + * This value indicates the target id of the response to the + * encapsulated request. 0x0 - 0xFFF8 - Used for function ids 0xFFF8 - + * 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t encap_resp_target_id; + + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_exec_fwd_resp_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_func_qcaps */ +/* + * Description: This command returns capabilities of a function. The input FID + * value is used to indicate what function is being queried. This allows a + * physical function driver to query virtual functions that are children of the + * physical function. The output FID value is needed to configure Rings and + * MSI-X vectors so their DMA operations appear correctly on the PCI bus. + */ + +/* Input (24 bytes) */ +struct hwrm_func_qcaps_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * Function ID of the function that is being queried. 0xFF... (All Fs) + * if the query is for the requesting function. + */ + uint16_t fid; + + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (80 bytes) */ +struct hwrm_func_qcaps_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* + * FID value. This value is used to identify operations on the PCI bus + * as belonging to a particular PCI function. + */ + uint16_t fid; + + /* + * Port ID of port that this function is associated with. Valid only for + * the PF. 0xFF... (All Fs) if this function is not associated with any + * port. 0xFF... (All Fs) if this function is called from a VF. + */ + uint16_t port_id; + + /* If 1, then Push mode is supported on this function. */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PUSH_MODE_SUPPORTED UINT32_C(0x1) + /* + * If 1, then the global MSI-X auto-masking is enabled for the device. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_GLOBAL_MSIX_AUTOMASKING \ + UINT32_C(0x2) + /* + * If 1, then the Precision Time Protocol (PTP) processing is supported + * on this function. The HWRM should enable PTP on only a single + * Physical Function (PF) per port. + */ + #define HWRM_FUNC_QCAPS_OUTPUT_FLAGS_PTP_SUPPORTED UINT32_C(0x4) + uint32_t flags; + + /* + * This value is current MAC address configured for this function. A + * value of 00-00-00-00-00-00 indicates no MAC address is currently + * configured. + */ + uint8_t perm_mac_address[6]; + + /* + * The maximum number of RSS/COS contexts that can be allocated to the + * function. + */ + uint16_t max_rsscos_ctx; + + /* + * The maximum number of completion rings that can be allocated to the + * function. + */ + uint16_t max_cmpl_rings; + + /* + * The maximum number of transmit rings that can be allocated to the + * function. + */ + uint16_t max_tx_rings; + + /* + * The maximum number of receive rings that can be allocated to the + * function. + */ + uint16_t max_rx_rings; + + /* + * The maximum number of L2 contexts that can be allocated to the + * function. + */ + uint16_t max_l2_ctxs; + + /* The maximum number of VNICs that can be allocated to the function. */ + uint16_t max_vnics; + + /* + * The identifier for the first VF enabled on a PF. This is valid only + * on the PF with SR-IOV enabled. 0xFF... (All Fs) if this command is + * called on a PF with SR-IOV disabled or on a VF. + */ + uint16_t first_vf_id; + + /* + * The maximum number of VFs that can be allocated to the function. This + * is valid only on the PF with SR-IOV enabled. 0xFF... (All Fs) if this + * command is called on a PF with SR-IOV disabled or on a VF. + */ + uint16_t max_vfs; + + /* + * The maximum number of statistic contexts that can be allocated to the + * function. + */ + uint16_t max_stat_ctx; + + /* + * The maximum number of Encapsulation records that can be offloaded by + * this function. + */ + uint32_t max_encap_records; + + /* + * The maximum number of decapsulation records that can be offloaded by + * this function. + */ + uint32_t max_decap_records; + + /* + * The maximum number of Exact Match (EM) flows that can be offloaded by + * this function on the TX side. + */ + uint32_t max_tx_em_flows; + + /* + * The maximum number of Wildcard Match (WM) flows that can be offloaded + * by this function on the TX side. + */ + uint32_t max_tx_wm_flows; + + /* + * The maximum number of Exact Match (EM) flows that can be offloaded by + * this function on the RX side. + */ + uint32_t max_rx_em_flows; + + /* + * The maximum number of Wildcard Match (WM) flows that can be offloaded + * by this function on the RX side. + */ + uint32_t max_rx_wm_flows; + + /* + * The maximum number of multicast filters that can be supported by this + * function on the RX side. + */ + uint32_t max_mcast_filters; + + /* + * The maximum value of flow_id that can be supported in completion + * records. + */ + uint32_t max_flow_id; + + /* + * The maximum number of HW ring groups that can be supported on this + * function. + */ + uint32_t max_hw_ring_grps; + + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_func_reset */ +/* + * Description: This command resets a hardware function (PCIe function) and + * frees any resources used by the function. This command shall be initiated by + * the driver after an FLR has occurred to prepare the function for re-use. This + * command may also be initiated by a driver prior to doing it's own + * configuration. This command puts the function into the reset state. In the + * reset state, global and port related features of the chip are not available. + */ +/* + * Note: This command will reset a function that has already been disabled or + * idled. The command returns all the resources owned by the function so a new + * driver may allocate and configure resources normally. + */ + +/* Input (24 bytes) */ +struct hwrm_func_reset_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* This bit must be '1' for the vf_id_valid field to be configured. */ + #define HWRM_FUNC_RESET_INPUT_ENABLES_VF_ID_VALID \ + UINT32_C(0x1) + uint32_t enables; + + /* + * The ID of the VF that this PF is trying to reset. Only the parent PF + * shall be allowed to reset a child VF. A parent PF driver shall use + * this field only when a specific child VF is requested to be reset. + */ + uint16_t vf_id; + + /* This value indicates the level of a function reset. */ + /* + * Reset the caller function and its children VFs (if any). If + * no children functions exist, then reset the caller function + * only. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETALL \ + (UINT32_C(0x0) << 0) + /* Reset the caller function only */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETME \ + (UINT32_C(0x1) << 0) + /* + * Reset all children VFs of the caller function driver if the + * caller is a PF driver. It is an error to specify this level + * by a VF driver. It is an error to specify this level by a PF + * driver with no children VFs. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETCHILDREN \ + (UINT32_C(0x2) << 0) + /* + * Reset a specific VF of the caller function driver if the + * caller is the parent PF driver. It is an error to specify + * this level by a VF driver. It is an error to specify this + * level by a PF driver that is not the parent of the VF that is + * being requested to reset. + */ + #define HWRM_FUNC_RESET_INPUT_FUNC_RESET_LEVEL_RESETVF \ + (UINT32_C(0x3) << 0) + uint8_t func_reset_level; + + uint8_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_func_reset_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_port_phy_cfg */ +/* + * Description: This command configures the PHY device for the port. It allows + * setting of the most generic settings for the PHY. The HWRM shall complete + * this command as soon as PHY settings are configured. They may not be applied + * when the command response is provided. A VF driver shall not be allowed to + * configure PHY using this command. In a network partition mode, a PF driver + * shall not be allowed to configure PHY using this command. + */ + +/* Input (56 bytes) */ +struct hwrm_port_phy_cfg_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * When this bit is set to '1', the PHY for the port shall be reset. # + * If this bit is set to 1, then the HWRM shall reset the PHY after + * applying PHY configuration changes specified in this command. # In + * order to guarantee that PHY configuration changes specified in this + * command take effect, the HWRM client should set this flag to 1. # If + * this bit is not set to 1, then the HWRM may reset the PHY depending + * on the current PHY configuration and settings specified in this + * command. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESET_PHY UINT32_C(0x1) + /* + * When this bit is set to '1', the link shall be forced to be taken + * down. # When this bit is set to '1", all other command input settings + * related to the link speed shall be ignored. Once the link state is + * forced down, it can be explicitly cleared from that state by setting + * this flag to '0'. # If this flag is set to '0', then the link shall + * be cleared from forced down state if the link is in forced down + * state. There may be conditions (e.g. out-of-band or sideband + * configuration changes for the link) outside the scope of the HWRM + * implementation that may clear forced down link state. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE_LINK_DOWN UINT32_C(0x2) + /* + * When this bit is set to '1', the link shall be forced to the + * force_link_speed value. When this bit is set to '1', the HWRM client + * should not enable any of the auto negotiation related fields + * represented by auto_XXX fields in this command. When this bit is set + * to '1' and the HWRM client has enabled a auto_XXX field in this + * command, then the HWRM shall ignore the enabled auto_XXX field. When + * this bit is set to zero, the link shall be allowed to autoneg. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_FORCE UINT32_C(0x4) + /* + * When this bit is set to '1', the auto-negotiation process shall be + * restarted on the link. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_RESTART_AUTONEG UINT32_C(0x8) + /* + * When this bit is set to '1', Energy Efficient Ethernet (EEE) is + * requested to be enabled on this link. If EEE is not supported on this + * port, then this flag shall be ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_ENABLE UINT32_C(0x10) + /* + * When this bit is set to '1', Energy Efficient Ethernet (EEE) is + * requested to be disabled on this link. If EEE is not supported on + * this port, then this flag shall be ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_DISABLE UINT32_C(0x20) + /* + * When this bit is set to '1' and EEE is enabled on this link, then TX + * LPI is requested to be enabled on the link. If EEE is not supported + * on this port, then this flag shall be ignored by the HWRM. If EEE is + * disabled on this port, then this flag shall be ignored by the HWRM. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FLAGS_EEE_TX_LPI UINT32_C(0x40) + uint32_t flags; + + /* This bit must be '1' for the auto_mode field to be configured. */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_MODE UINT32_C(0x1) + /* This bit must be '1' for the auto_duplex field to be configured. */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_DUPLEX UINT32_C(0x2) + /* This bit must be '1' for the auto_pause field to be configured. */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_PAUSE UINT32_C(0x4) + /* + * This bit must be '1' for the auto_link_speed field to be configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED UINT32_C(0x8) + /* + * This bit must be '1' for the auto_link_speed_mask field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_AUTO_LINK_SPEED_MASK \ + UINT32_C(0x10) + /* This bit must be '1' for the wirespeed field to be configured. */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_WIRESPEED UINT32_C(0x20) + /* This bit must be '1' for the lpbk field to be configured. */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_LPBK UINT32_C(0x40) + /* This bit must be '1' for the preemphasis field to be configured. */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_PREEMPHASIS UINT32_C(0x80) + /* This bit must be '1' for the force_pause field to be configured. */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_FORCE_PAUSE UINT32_C(0x100) + /* + * This bit must be '1' for the eee_link_speed_mask field to be + * configured. + */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_EEE_LINK_SPEED_MASK \ + UINT32_C(0x200) + /* This bit must be '1' for the tx_lpi_timer field to be configured. */ + #define HWRM_PORT_PHY_CFG_INPUT_ENABLES_TX_LPI_TIMER UINT32_C(0x400) + uint32_t enables; + + /* Port ID of port that is to be configured. */ + uint16_t port_id; + + /* + * This is the speed that will be used if the force bit is '1'. If + * unsupported speed is selected, an error will be generated. + */ + /* 100Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100MB \ + (UINT32_C(0x1) << 0) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_1GB \ + (UINT32_C(0xa) << 0) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2GB \ + (UINT32_C(0x14) << 0) + /* 2.5Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_2_5GB \ + (UINT32_C(0x19) << 0) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10GB \ + (UINT32_C(0x64) << 0) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_20GB \ + (UINT32_C(0xc8) << 0) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_25GB \ + (UINT32_C(0xfa) << 0) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_40GB \ + (UINT32_C(0x190) << 0) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_50GB \ + (UINT32_C(0x1f4) << 0) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_100GB \ + (UINT32_C(0x3e8) << 0) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_LINK_SPEED_10MB \ + (UINT32_C(0xffff) << 0) + uint16_t force_link_speed; + + /* + * This value is used to identify what autoneg mode is used when the + * link speed is not being forced. + */ + /* + * Disable autoneg or autoneg disabled. No speeds are selected. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_NONE (UINT32_C(0x0) << 0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ALL_SPEEDS \ + (UINT32_C(0x1) << 0) + /* + * Select only the auto_link_speed speed for autoneg mode. This + * mode has been DEPRECATED. An HWRM client should not use this + * mode. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_SPEED \ + (UINT32_C(0x2) << 0) + /* + * Select the auto_link_speed or any speed below that speed for + * autoneg. This mode has been DEPRECATED. An HWRM client should + * not use this mode. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_ONE_OR_BELOW \ + (UINT32_C(0x3) << 0) + /* + * Select the speeds based on the corresponding link speed mask + * value that is provided. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_MODE_SPEED_MASK \ + (UINT32_C(0x4) << 0) + uint8_t auto_mode; + + /* + * This is the duplex setting that will be used if the autoneg_mode is + * "one_speed" or "one_or_below". + */ + /* Half Duplex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_HALF \ + (UINT32_C(0x0) << 0) + /* Full duplex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_FULL \ + (UINT32_C(0x1) << 0) + /* Both Half and Full dupex will be requested. */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_DUPLEX_BOTH \ + (UINT32_C(0x2) << 0) + uint8_t auto_duplex; + + /* + * This value is used to configure the pause that will be used for + * autonegotiation. Add text on the usage of auto_pause and force_pause. + */ + /* + * When this bit is '1', Generation of tx pause messages has been + * requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages has been + * requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_RX UINT32_C(0x2) + /* + * When set to 1, the advertisement of pause is enabled. # When the + * auto_mode is not set to none and this flag is set to 1, then the + * auto_pause bits on this port are being advertised and autoneg pause + * results are being interpreted. # When the auto_mode is not set to + * none and this flag is set to 0, the pause is forced as indicated in + * force_pause, and also advertised as auto_pause bits, but the autoneg + * results are not interpreted since the pause configuration is being + * forced. # When the auto_mode is set to none and this flag is set to + * 1, auto_pause bits should be ignored and should be set to 0. + */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_PAUSE_AUTONEG_PAUSE UINT32_C(0x4) + uint8_t auto_pause; + + uint8_t unused_0; + + /* + * This is the speed that will be used if the autoneg_mode is + * "one_speed" or "one_or_below". If an unsupported speed is selected, + * an error will be generated. + */ + /* 100Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100MB \ + (UINT32_C(0x1) << 0) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_1GB \ + (UINT32_C(0xa) << 0) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2GB \ + (UINT32_C(0x14) << 0) + /* 2.5Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_2_5GB \ + (UINT32_C(0x19) << 0) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10GB \ + (UINT32_C(0x64) << 0) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_20GB \ + (UINT32_C(0xc8) << 0) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_25GB \ + (UINT32_C(0xfa) << 0) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_40GB \ + (UINT32_C(0x190) << 0) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_50GB \ + (UINT32_C(0x1f4) << 0) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_100GB \ + (UINT32_C(0x3e8) << 0) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_10MB \ + (UINT32_C(0xffff) << 0) + uint16_t auto_link_speed; + + /* + * This is a mask of link speeds that will be used if autoneg_mode is + * "mask". If unsupported speed is enabled an error will be generated. + */ + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2GB \ + UINT32_C(0x10) + /* 2.5Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_AUTO_LINK_SPEED_MASK_10MB \ + UINT32_C(0x2000) + uint16_t auto_link_speed_mask; + + /* This value controls the wirespeed feature. */ + /* Wirespeed feature is disabled. */ + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_OFF (UINT32_C(0x0) << 0) + /* Wirespeed feature is enabled. */ + #define HWRM_PORT_PHY_CFG_INPUT_WIRESPEED_ON (UINT32_C(0x1) << 0) + uint8_t wirespeed; + + /* This value controls the loopback setting for the PHY. */ + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_NONE (UINT32_C(0x0) << 0) + /* + * The HW will be configured with local loopback such that host + * data is sent back to the host without modification. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_LOCAL (UINT32_C(0x1) << 0) + /* + * The HW will be configured with remote loopback such that port + * logic will send packets back out the transmitter that are + * received. + */ + #define HWRM_PORT_PHY_CFG_INPUT_LPBK_REMOTE (UINT32_C(0x2) << 0) + uint8_t lpbk; + + /* + * This value is used to configure the pause that will be used for force + * mode. + */ + /* + * When this bit is '1', Generation of tx pause messages is supported. + * Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages is supported. + * Disabled otherwise. + */ + #define HWRM_PORT_PHY_CFG_INPUT_FORCE_PAUSE_RX UINT32_C(0x2) + uint8_t force_pause; + + uint8_t unused_1; + + /* + * This value controls the pre-emphasis to be used for the link. Driver + * should not set this value (use enable.preemphasis = 0) unless driver + * is sure of setting. Normally HWRM FW will determine proper pre- + * emphasis. + */ + uint32_t preemphasis; + + /* + * Setting for link speed mask that is used to advertise speeds during + * autonegotiation when EEE is enabled. This field is valid only when + * EEE is enabled. The speeds specified in this field shall be a subset + * of speeds specified in auto_link_speed_mask. If EEE is enabled,then + * at least one speed shall be provided in this mask. + */ + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD1 UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_100MB UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD2 UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_1GB UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_CFG_INPUT_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + uint16_t eee_link_speed_mask; + + uint8_t unused_2; + uint8_t unused_3; + + /* + * Reuested setting of TX LPI timer in microseconds. This field is valid + * only when EEE is enabled and TX LPI is enabled. + */ + #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_CFG_INPUT_TX_LPI_TIMER_SFT 0 + uint32_t tx_lpi_timer; + + uint32_t unused_4; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_port_phy_cfg_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_port_phy_qcfg */ +/* Description: This command queries the PHY configuration for the port. */ +/* Input (24 bytes) */ + +struct hwrm_port_phy_qcfg_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* Port ID of port that is to be queried. */ + uint16_t port_id; + + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (96 bytes) */ +struct hwrm_port_phy_qcfg_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* This value indicates the current link status. */ + /* There is no link or cable detected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_NO_LINK (UINT32_C(0x0) << 0) + /* There is no link, but a cable has been detected. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SIGNAL (UINT32_C(0x1) << 0) + /* There is a link. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_LINK (UINT32_C(0x2) << 0) + uint8_t link; + + uint8_t unused_0; + + /* This value indicates the current link speed of the connection. */ + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100MB \ + (UINT32_C(0x1) << 0) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_1GB \ + (UINT32_C(0xa) << 0) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2GB \ + (UINT32_C(0x14) << 0) + /* 2.5Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_2_5GB \ + (UINT32_C(0x19) << 0) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10GB \ + (UINT32_C(0x64) << 0) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_20GB \ + (UINT32_C(0xc8) << 0) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_25GB \ + (UINT32_C(0xfa) << 0) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_40GB \ + (UINT32_C(0x190) << 0) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_50GB \ + (UINT32_C(0x1f4) << 0) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_100GB \ + (UINT32_C(0x3e8) << 0) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_SPEED_10MB \ + (UINT32_C(0xffff) << 0) + uint16_t link_speed; + + /* This value is indicates the duplex of the current connection. */ + /* Half Duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_HALF (UINT32_C(0x0) << 0) + /* Full duplex connection. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_DUPLEX_FULL (UINT32_C(0x1) << 0) + uint8_t duplex; + + /* + * This value is used to indicate the current pause configuration. When + * autoneg is enabled, this value represents the autoneg results of + * pause configuration. + */ + /* + * When this bit is '1', Generation of tx pause messages is supported. + * Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages is supported. + * Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PAUSE_RX UINT32_C(0x2) + uint8_t pause; + + /* + * The supported speeds for the port. This is a bit mask. For each speed + * that is supported, the corrresponding bit will be set to '1'. + */ + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2GB \ + UINT32_C(0x10) + /* 2.5Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_SUPPORT_SPEEDS_10MB \ + UINT32_C(0x2000) + uint16_t support_speeds; + + /* + * Current setting of forced link speed. When the link speed is not + * being forced, this value shall be set to 0. + */ + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100MB \ + (UINT32_C(0x1) << 0) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_1GB \ + (UINT32_C(0xa) << 0) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2GB \ + (UINT32_C(0x14) << 0) + /* 2.5Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_2_5GB \ + (UINT32_C(0x19) << 0) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10GB \ + (UINT32_C(0x64) << 0) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_20GB \ + (UINT32_C(0xc8) << 0) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_25GB \ + (UINT32_C(0xfa) << 0) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_40GB \ + (UINT32_C(0x190) << 0) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_50GB \ + (UINT32_C(0x1f4) << 0) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_100GB \ + (UINT32_C(0x3e8) << 0) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_LINK_SPEED_10MB \ + (UINT32_C(0xffff) << 0) + uint16_t force_link_speed; + + /* Current setting of auto negotiation mode. */ + /* + * Disable autoneg or autoneg disabled. No speeds are selected. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_NONE \ + (UINT32_C(0x0) << 0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ALL_SPEEDS \ + (UINT32_C(0x1) << 0) + /* + * Select only the auto_link_speed speed for autoneg mode. This + * mode has been DEPRECATED. An HWRM client should not use this + * mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_SPEED \ + (UINT32_C(0x2) << 0) + /* + * Select the auto_link_speed or any speed below that speed for + * autoneg. This mode has been DEPRECATED. An HWRM client should + * not use this mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_ONE_OR_BELOW \ + (UINT32_C(0x3) << 0) + /* + * Select the speeds based on the corresponding link speed mask + * value that is provided. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_MODE_SPEED_MASK \ + (UINT32_C(0x4) << 0) + uint8_t auto_mode; + + /* + * Current setting of pause autonegotiation. Move autoneg_pause flag + * here. + */ + /* + * When this bit is '1', Generation of tx pause messages has been + * requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_TX UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages has been + * requested. Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_RX UINT32_C(0x2) + /* + * When set to 1, the advertisement of pause is enabled. # When the + * auto_mode is not set to none and this flag is set to 1, then the + * auto_pause bits on this port are being advertised and autoneg pause + * results are being interpreted. # When the auto_mode is not set to + * none and this flag is set to 0, the pause is forced as indicated in + * force_pause, and also advertised as auto_pause bits, but the autoneg + * results are not interpreted since the pause configuration is being + * forced. # When the auto_mode is set to none and this flag is set to + * 1, auto_pause bits should be ignored and should be set to 0. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_PAUSE_AUTONEG_PAUSE \ + UINT32_C(0x4) + uint8_t auto_pause; + + /* + * Current setting for auto_link_speed. This field is only valid when + * auto_mode is set to "one_speed" or "one_or_below". + */ + /* 100Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100MB \ + (UINT32_C(0x1) << 0) + /* 1Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_1GB \ + (UINT32_C(0xa) << 0) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2GB \ + (UINT32_C(0x14) << 0) + /* 2.5Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_2_5GB \ + (UINT32_C(0x19) << 0) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10GB \ + (UINT32_C(0x64) << 0) + /* 20Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_20GB \ + (UINT32_C(0xc8) << 0) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_25GB \ + (UINT32_C(0xfa) << 0) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_40GB \ + (UINT32_C(0x190) << 0) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_50GB \ + (UINT32_C(0x1f4) << 0) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_100GB \ + (UINT32_C(0x3e8) << 0) + /* 10Mb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_10MB \ + (UINT32_C(0xffff) << 0) + uint16_t auto_link_speed; + + /* + * Current setting for auto_link_speed_mask that is used to advertise + * speeds during autonegotiation. This field is only valid when + * auto_mode is set to "mask". The speeds specified in this field shall + * be a subset of supported speeds on this port. + */ + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2GB \ + UINT32_C(0x10) + /* 2.5Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_AUTO_LINK_SPEED_MASK_10MB \ + UINT32_C(0x2000) + uint16_t auto_link_speed_mask; + + /* Current setting for wirespeed. */ + /* Wirespeed feature is disabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_OFF (UINT32_C(0x0) << 0) + /* Wirespeed feature is enabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_WIRESPEED_ON (UINT32_C(0x1) << 0) + uint8_t wirespeed; + + /* Current setting for loopback. */ + /* No loopback is selected. Normal operation. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_NONE (UINT32_C(0x0) << 0) + /* + * The HW will be configured with local loopback such that host + * data is sent back to the host without modification. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_LOCAL (UINT32_C(0x1) << 0) + /* + * The HW will be configured with remote loopback such that port + * logic will send packets back out the transmitter that are + * received. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LPBK_REMOTE (UINT32_C(0x2) << 0) + uint8_t lpbk; + + /* + * Current setting of forced pause. When the pause configuration is not + * being forced, then this value shall be set to 0. + */ + /* + * When this bit is '1', Generation of tx pause messages is supported. + * Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages is supported. + * Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_FORCE_PAUSE_RX \ + UINT32_C(0x2) + uint8_t force_pause; + + /* + * This value indicates the current status of the optics module on this + * port. + */ + /* Module is inserted and accepted */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NONE \ + (UINT32_C(0x0) << 0) + /* Module is rejected and transmit side Laser is disabled. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_DISABLETX \ + (UINT32_C(0x1) << 0) + /* Module mismatch warning. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_WARNINGMSG \ + (UINT32_C(0x2) << 0) + /* Module is rejected and powered down. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_PWRDOWN \ + (UINT32_C(0x3) << 0) + /* Module is not inserted. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTINSERTED \ + (UINT32_C(0x4) << 0) + /* Module status is not applicable. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MODULE_STATUS_NOTAPPLICABLE \ + (UINT32_C(0xff) << 0) + uint8_t module_status; + + /* Current setting for preemphasis. */ + uint32_t preemphasis; + + /* This field represents the major version of the PHY. */ + uint8_t phy_maj; + + /* This field represents the minor version of the PHY. */ + uint8_t phy_min; + + /* This field represents the build version of the PHY. */ + uint8_t phy_bld; + + /* This value represents a PHY type. */ + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_UNKNOWN \ + (UINT32_C(0x0) << 0) + /* BASE-CR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASECR \ + (UINT32_C(0x1) << 0) + /* BASE-KR4 (Deprecated) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR4 \ + (UINT32_C(0x2) << 0) + /* BASE-LR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASELR \ + (UINT32_C(0x3) << 0) + /* BASE-SR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASESR \ + (UINT32_C(0x4) << 0) + /* BASE-KR2 (Deprecated) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR2 \ + (UINT32_C(0x5) << 0) + /* BASE-KX */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKX \ + (UINT32_C(0x6) << 0) + /* BASE-KR */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASEKR \ + (UINT32_C(0x7) << 0) + /* BASE-T */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASET \ + (UINT32_C(0x8) << 0) + /* EEE capable BASE-T */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_BASETE \ + (UINT32_C(0x9) << 0) + /* SGMII connected external PHY */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_TYPE_SGMIIEXTPHY \ + (UINT32_C(0xa) << 0) + uint8_t phy_type; + + /* This value represents a media type. */ + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_UNKNOWN \ + (UINT32_C(0x0) << 0) + /* Twisted Pair */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_TP (UINT32_C(0x1) << 0) + /* Direct Attached Copper */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_DAC \ + (UINT32_C(0x2) << 0) + /* Fiber */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_MEDIA_TYPE_FIBRE \ + (UINT32_C(0x3) << 0) + uint8_t media_type; + + /* This value represents a transceiver type. */ + /* PHY and MAC are in the same package */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_INTERNAL \ + (UINT32_C(0x1) << 0) + /* PHY and MAC are in different packages */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_PKG_TYPE_XCVR_EXTERNAL \ + (UINT32_C(0x2) << 0) + uint8_t xcvr_pkg_type; + + /* + * This field represents flags related to EEE configuration. These EEE + * configuration flags are valid only when the auto_mode is not set to + * none (in other words autonegotiation is enabled). + */ + /* This field represents PHY address. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_MASK UINT32_C(0x1f) + #define HWRM_PORT_PHY_QCFG_OUTPUT_PHY_ADDR_SFT 0 + /* + * When set to 1, Energy Efficient Ethernet (EEE) mode is enabled. + * Speeds for autoneg with EEE mode enabled are based on + * eee_link_speed_mask. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ENABLED \ + UINT32_C(0x20) + /* + * This flag is valid only when eee_enabled is set to 1. # If + * eee_enabled is set to 0, then EEE mode is disabled and this flag + * shall be ignored. # If eee_enabled is set to 1 and this flag is set + * to 1, then Energy Efficient Ethernet (EEE) mode is enabled and in + * use. # If eee_enabled is set to 1 and this flag is set to 0, then + * Energy Efficient Ethernet (EEE) mode is enabled but is currently not + * in use. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_ACTIVE \ + UINT32_C(0x40) + /* + * This flag is valid only when eee_enabled is set to 1. # If + * eee_enabled is set to 0, then EEE mode is disabled and this flag + * shall be ignored. # If eee_enabled is set to 1 and this flag is set + * to 1, then Energy Efficient Ethernet (EEE) mode is enabled and TX LPI + * is enabled. # If eee_enabled is set to 1 and this flag is set to 0, + * then Energy Efficient Ethernet (EEE) mode is enabled but TX LPI is + * disabled. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_EEE_TX_LPI \ + UINT32_C(0x80) + /* + * This field represents flags related to EEE configuration. These EEE + * configuration flags are valid only when the auto_mode is not set to + * none (in other words autonegotiation is enabled). + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_MASK \ + UINT32_C(0xe0) + #define HWRM_PORT_PHY_QCFG_OUTPUT_EEE_CONFIG_SFT 5 + uint8_t eee_config_phy_addr; + + /* Reserved field, set to 0 */ + /* + * When set to 1, the parallel detection is used to determine the speed + * of the link partner. Parallel detection is used when a + * autonegotiation capable device is connected to a link parter that is + * not capable of autonegotiation. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_PARALLEL_DETECT \ + UINT32_C(0x1) + /* Reserved field, set to 0 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_MASK UINT32_C(0xfe) + #define HWRM_PORT_PHY_QCFG_OUTPUT_RESERVED_SFT 1 + uint8_t parallel_detect; + + /* + * The advertised speeds for the port by the link partner. Each + * advertised speed will be set to '1'. + */ + /* 100Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MBHD \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100MB \ + UINT32_C(0x2) + /* 1Gb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GBHD \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_1GB \ + UINT32_C(0x8) + /* 2Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2GB \ + UINT32_C(0x10) + /* 2.5Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_2_5GB \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10GB \ + UINT32_C(0x40) + /* 20Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_20GB \ + UINT32_C(0x80) + /* 25Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_25GB \ + UINT32_C(0x100) + /* 40Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_40GB \ + UINT32_C(0x200) + /* 50Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_50GB \ + UINT32_C(0x400) + /* 100Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_100GB \ + UINT32_C(0x800) + /* 10Mb link speed (Half-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MBHD \ + UINT32_C(0x1000) + /* 10Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_SPEEDS_10MB \ + UINT32_C(0x2000) + uint16_t link_partner_adv_speeds; + + /* + * The advertised autoneg for the port by the link partner. This field + * is deprecated and should be set to 0. + */ + /* + * Disable autoneg or autoneg disabled. No speeds are selected. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_NONE \ + (UINT32_C(0x0) << 0) + /* Select all possible speeds for autoneg mode. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ALL_SPEEDS\ + (UINT32_C(0x1) << 0) + /* + * Select only the auto_link_speed speed for autoneg mode. This + * mode has been DEPRECATED. An HWRM client should not use this + * mode. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_SPEED \ + (UINT32_C(0x2) << 0) + /* + * Select the auto_link_speed or any speed below that speed for + * autoneg. This mode has been DEPRECATED. An HWRM client should + * not use this mode. + */ + #define \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_ONE_OR_BELOW \ + (UINT32_C(0x3) << 0) + /* + * Select the speeds based on the corresponding link speed mask + * value that is provided. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_AUTO_MODE_SPEED_MASK\ + (UINT32_C(0x4) << 0) + uint8_t link_partner_adv_auto_mode; + + /* The advertised pause settings on the port by the link partner. */ + /* + * When this bit is '1', Generation of tx pause messages is supported. + * Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_TX \ + UINT32_C(0x1) + /* + * When this bit is '1', Reception of rx pause messages is supported. + * Disabled otherwise. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_PAUSE_RX \ + UINT32_C(0x2) + uint8_t link_partner_adv_pause; + + /* + * Current setting for link speed mask that is used to advertise speeds + * during autonegotiation when EEE is enabled. This field is valid only + * when eee_enabled flags is set to 1. The speeds specified in this + * field shall be a subset of speeds specified in auto_link_speed_mask. + */ + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_ADV_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + uint16_t adv_eee_link_speed_mask; + + /* + * Current setting for link speed mask that is advertised by the link + * partner when EEE is enabled. This field is valid only when + * eee_enabled flags is set to 1. + */ + /* Reserved */ + #define \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD1 \ + UINT32_C(0x1) + /* 100Mb link speed (Full-duplex) */ + #define \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_100MB \ + UINT32_C(0x2) + /* Reserved */ + #define \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD2 \ + UINT32_C(0x4) + /* 1Gb link speed (Full-duplex) */ + #define \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_1GB \ + UINT32_C(0x8) + /* Reserved */ + #define \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD3 \ + UINT32_C(0x10) + /* Reserved */ + #define \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_RSVD4 \ + UINT32_C(0x20) + /* 10Gb link speed */ + #define \ + HWRM_PORT_PHY_QCFG_OUTPUT_LINK_PARTNER_ADV_EEE_LINK_SPEED_MASK_10GB \ + UINT32_C(0x40) + uint16_t link_partner_adv_eee_link_speed_mask; + + /* This value represents transceiver identifier type. */ + /* + * Current setting of TX LPI timer in microseconds. This field is valid + * only when_eee_enabled flag is set to 1 and tx_lpi_enabled is set to + * 1. + */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_MASK \ + UINT32_C(0xffffff) + #define HWRM_PORT_PHY_QCFG_OUTPUT_TX_LPI_TIMER_SFT 0 + /* This value represents transceiver identifier type. */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_MASK \ + UINT32_C(0xff000000) + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFT \ + 24 + /* Unknown */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_UNKNOWN \ + (UINT32_C(0x0) << 24) + /* SFP/SFP+/SFP28 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_SFP \ + (UINT32_C(0x3) << 24) + /* QSFP */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP \ + (UINT32_C(0xc) << 24) + /* QSFP+ */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFPPLUS \ + (UINT32_C(0xd) << 24) + /* QSFP28 */ + #define HWRM_PORT_PHY_QCFG_OUTPUT_XCVR_IDENTIFIER_TYPE_QSFP28 \ + (UINT32_C(0x11) << 24) + uint32_t xcvr_identifier_type_tx_lpi_timer; + + uint32_t unused_1; + + /* + * Up to 16 bytes of null padded ASCII string representing PHY vendor. + * If the string is set to null, then the vendor name is not available. + */ + char phy_vendor_name[16]; + + /* + * Up to 16 bytes of null padded ASCII string that identifies vendor + * specific part number of the PHY. If the string is set to null, then + * the vendor specific part number is not available. + */ + char phy_vendor_partnumber[16]; + + uint32_t unused_2; + uint8_t unused_3; + uint8_t unused_4; + uint8_t unused_5; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_ver_get */ +/* + * Description: This function is called by a driver to determine the HWRM + * interface version supported by the HWRM firmware, the version of HWRM + * firmware implementation, the name of HWRM firmware, the versions of other + * embedded firmwares, and the names of other embedded firmwares, etc. Any + * interface or firmware version with major = 0, minor = 0, and update = 0 shall + * be considered an invalid version. + */ + +/* Input (24 bytes) */ +struct hwrm_ver_get_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * This field represents the major version of HWRM interface + * specification supported by the driver HWRM implementation. The + * interface major version is intended to change only when non backward + * compatible changes are made to the HWRM interface specification. + */ + uint8_t hwrm_intf_maj; + + /* + * This field represents the minor version of HWRM interface + * specification supported by the driver HWRM implementation. A change + * in interface minor version is used to reflect significant backward + * compatible modification to HWRM interface specification. This can be + * due to addition or removal of functionality. HWRM interface + * specifications with the same major version but different minor + * versions are compatible. + */ + uint8_t hwrm_intf_min; + + /* + * This field represents the update version of HWRM interface + * specification supported by the driver HWRM implementation. The + * interface update version is used to reflect minor changes or bug + * fixes to a released HWRM interface specification. + */ + uint8_t hwrm_intf_upd; + + uint8_t unused_0[5]; +} __attribute__((packed)); + +/* Output (128 bytes) */ +struct hwrm_ver_get_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* + * This field represents the major version of HWRM interface + * specification supported by the HWRM implementation. The interface + * major version is intended to change only when non backward compatible + * changes are made to the HWRM interface specification. A HWRM + * implementation that is compliant with this specification shall + * provide value of 1 in this field. + */ + uint8_t hwrm_intf_maj; + + /* + * This field represents the minor version of HWRM interface + * specification supported by the HWRM implementation. A change in + * interface minor version is used to reflect significant backward + * compatible modification to HWRM interface specification. This can be + * due to addition or removal of functionality. HWRM interface + * specifications with the same major version but different minor + * versions are compatible. A HWRM implementation that is compliant with + * this specification shall provide value of 0 in this field. + */ + uint8_t hwrm_intf_min; + + /* + * This field represents the update version of HWRM interface + * specification supported by the HWRM implementation. The interface + * update version is used to reflect minor changes or bug fixes to a + * released HWRM interface specification. A HWRM implementation that is + * compliant with this specification shall provide value of 1 in this + * field. + */ + uint8_t hwrm_intf_upd; + + uint8_t hwrm_intf_rsvd; + + /* + * This field represents the major version of HWRM firmware. A change in + * firmware major version represents a major firmware release. + */ + uint8_t hwrm_fw_maj; + + /* + * This field represents the minor version of HWRM firmware. A change in + * firmware minor version represents significant firmware functionality + * changes. + */ + uint8_t hwrm_fw_min; + + /* + * This field represents the build version of HWRM firmware. A change in + * firmware build version represents bug fixes to a released firmware. + */ + uint8_t hwrm_fw_bld; + + /* + * This field is a reserved field. This field can be used to represent + * firmware branches or customer specific releases tied to a specific + * (major,minor,update) version of the HWRM firmware. + */ + uint8_t hwrm_fw_rsvd; + + /* + * This field represents the major version of mgmt firmware. A change in + * major version represents a major release. + */ + uint8_t mgmt_fw_maj; + + /* + * This field represents the minor version of mgmt firmware. A change in + * minor version represents significant functionality changes. + */ + uint8_t mgmt_fw_min; + + /* + * This field represents the build version of mgmt firmware. A change in + * update version represents bug fixes. + */ + uint8_t mgmt_fw_bld; + + /* + * This field is a reserved field. This field can be used to represent + * firmware branches or customer specific releases tied to a specific + * (major,minor,update) version + */ + uint8_t mgmt_fw_rsvd; + + /* + * This field represents the major version of network control firmware. + * A change in major version represents a major release. + */ + uint8_t netctrl_fw_maj; + + /* + * This field represents the minor version of network control firmware. + * A change in minor version represents significant functionality + * changes. + */ + uint8_t netctrl_fw_min; + + /* + * This field represents the build version of network control firmware. + * A change in update version represents bug fixes. + */ + uint8_t netctrl_fw_bld; + + /* + * This field is a reserved field. This field can be used to represent + * firmware branches or customer specific releases tied to a specific + * (major,minor,update) version + */ + uint8_t netctrl_fw_rsvd; + + /* + * This field is reserved for future use. The responder should set it to + * 0. The requester should ignore this field. + */ + uint32_t reserved1; + + /* + * This field represents the major version of RoCE firmware. A change in + * major version represents a major release. + */ + uint8_t roce_fw_maj; + + /* + * This field represents the minor version of RoCE firmware. A change in + * minor version represents significant functionality changes. + */ + uint8_t roce_fw_min; + + /* + * This field represents the build version of RoCE firmware. A change in + * update version represents bug fixes. + */ + uint8_t roce_fw_bld; + + /* + * This field is a reserved field. This field can be used to represent + * firmware branches or customer specific releases tied to a specific + * (major,minor,update) version + */ + uint8_t roce_fw_rsvd; + + /* + * This field represents the name of HWRM FW (ASCII chars without NULL + * at the end). + */ + char hwrm_fw_name[16]; + + /* + * This field represents the name of mgmt FW (ASCII chars without NULL + * at the end). + */ + char mgmt_fw_name[16]; + + /* + * This field represents the name of network control firmware (ASCII + * chars without NULL at the end). + */ + char netctrl_fw_name[16]; + + /* + * This field is reserved for future use. The responder should set it to + * 0. The requester should ignore this field. + */ + uint32_t reserved2[4]; + + /* + * This field represents the name of RoCE FW (ASCII chars without NULL + * at the end). + */ + char roce_fw_name[16]; + + /* This field returns the chip number. */ + uint16_t chip_num; + + /* This field returns the revision of chip. */ + uint8_t chip_rev; + + /* This field returns the chip metal number. */ + uint8_t chip_metal; + + /* This field returns the bond id of the chip. */ + uint8_t chip_bond_id; + + /* + * This value indicates the type of platform used for chip + * implementation. + */ + /* ASIC */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_ASIC \ + (UINT32_C(0x0) << 0) + /* FPGA platform of the chip. */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_FPGA \ + (UINT32_C(0x1) << 0) + /* Palladium platform of the chip. */ + #define HWRM_VER_GET_OUTPUT_CHIP_PLATFORM_TYPE_PALLADIUM \ + (UINT32_C(0x2) << 0) + uint8_t chip_platform_type; + + /* + * This field returns the maximum value of request window that is + * supported by the HWRM. The request window is mapped into device + * address space using MMIO. + */ + uint16_t max_req_win_len; + + /* + * This field returns the maximum value of response buffer in bytes. If + * a request specifies the response buffer length that is greater than + * this value, then the HWRM should fail it. The value of this field + * shall be 4KB or more. + */ + uint16_t max_resp_len; + + /* + * This field returns the default request timeout value in milliseconds. + */ + uint16_t def_req_timeout; + + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_queue_qportcfg */ +/* + * Description: This function is called by a driver to query queue configuration + * of a port. # The HWRM shall at least advertise one queue with lossy service + * profile. # The driver shall use this command to query queue ids before + * configuring or using any queues. # If a service profile is not set for a + * queue, then the driver shall not use that queue without configuring a service + * profile for it. # If the driver is not allowed to configure service profiles, + * then the driver shall only use queues for which service profiles are pre- + * configured. + */ + +/* Input (24 bytes) */ +struct hwrm_queue_qportcfg_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * Enumeration denoting the RX, TX type of the resource. This + * enumeration is used for resources that are similar for both TX and RX + * paths of the chip. + */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH \ + UINT32_C(0x1) + /* tx path */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_TX \ + (UINT32_C(0x0) << 0) + /* rx path */ + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX \ + (UINT32_C(0x1) << 0) + #define HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_LAST \ + HWRM_QUEUE_QPORTCFG_INPUT_FLAGS_PATH_RX + uint32_t flags; + + /* + * Port ID of port for which the queue configuration is being queried. + * This field is only required when sent by IPC. + */ + uint16_t port_id; + + uint16_t unused_0; +} __attribute__((packed)); + +/* hwrm_ring_alloc */ +/* + * Description: This command allocates and does basic preparation for a ring. + */ + +/* Input (80 bytes) */ +struct hwrm_ring_alloc_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* This bit must be '1' for the Reserved1 field to be configured. */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED1 UINT32_C(0x1) + /* This bit must be '1' for the Reserved2 field to be configured. */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED2 UINT32_C(0x2) + /* This bit must be '1' for the Reserved3 field to be configured. */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED3 UINT32_C(0x4) + /* + * This bit must be '1' for the stat_ctx_id_valid field to be + * configured. + */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_STAT_CTX_ID_VALID UINT32_C(0x8) + /* This bit must be '1' for the Reserved4 field to be configured. */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_RESERVED4 UINT32_C(0x10) + /* This bit must be '1' for the max_bw_valid field to be configured. */ + #define HWRM_RING_ALLOC_INPUT_ENABLES_MAX_BW_VALID UINT32_C(0x20) + uint32_t enables; + + /* Ring Type. */ + /* Completion Ring (CR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_CMPL (UINT32_C(0x0) << 0) + /* TX Ring (TR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_TX (UINT32_C(0x1) << 0) + /* RX Ring (RR) */ + #define HWRM_RING_ALLOC_INPUT_RING_TYPE_RX (UINT32_C(0x2) << 0) + uint8_t ring_type; + + uint8_t unused_0; + uint16_t unused_1; + + /* This value is a pointer to the page table for the Ring. */ + uint64_t page_tbl_addr; + + /* First Byte Offset of the first entry in the first page. */ + uint32_t fbo; + + /* + * Actual page size in 2^page_size. The supported range is increments in + * powers of 2 from 16 bytes to 1GB. - 4 = 16 B Page size is 16 B. - 12 + * = 4 KB Page size is 4 KB. - 13 = 8 KB Page size is 8 KB. - 16 = 64 KB + * Page size is 64 KB. - 22 = 2 MB Page size is 2 MB. - 23 = 4 MB Page + * size is 4 MB. - 31 = 1 GB Page size is 1 GB. + */ + uint8_t page_size; + + /* + * This value indicates the depth of page table. For this version of the + * specification, value other than 0 or 1 shall be considered as an + * invalid value. When the page_tbl_depth = 0, then it is treated as a + * special case with the following. 1. FBO and page size fields are not + * valid. 2. page_tbl_addr is the physical address of the first element + * of the ring. + */ + uint8_t page_tbl_depth; + + uint8_t unused_2; + uint8_t unused_3; + + /* + * Number of 16B units in the ring. Minimum size for a ring is 16 16B + * entries. + */ + uint32_t length; + + /* + * Logical ring number for the ring to be allocated. This value + * determines the position in the doorbell area where the update to the + * ring will be made. For completion rings, this value is also the MSI-X + * vector number for the function the completion ring is associated + * with. + */ + uint16_t logical_id; + + /* + * This field is used only when ring_type is a TX ring. This value + * indicates what completion ring the TX ring is associated with. + */ + uint16_t cmpl_ring_id; + + /* + * This field is used only when ring_type is a TX ring. This value + * indicates what CoS queue the TX ring is associated with. + */ + uint16_t queue_id; + + uint8_t unused_4; + uint8_t unused_5; + + /* This field is reserved for the future use. It shall be set to 0. */ + uint32_t reserved1; + /* This field is reserved for the future use. It shall be set to 0. */ + uint16_t reserved2; + + uint8_t unused_6; + uint8_t unused_7; + /* This field is reserved for the future use. It shall be set to 0. */ + uint32_t reserved3; + + /* + * This field is used only when ring_type is a TX ring. This input + * indicates what statistics context this ring should be associated + * with. + */ + uint32_t stat_ctx_id; + + /* This field is reserved for the future use. It shall be set to 0. */ + uint32_t reserved4; + + /* + * This field is used only when ring_type is a TX ring. Maximum BW + * allocated to this TX ring in Mbps. The HWRM will translate this value + * into byte counter and time interval used for this ring inside the + * device. + */ + uint32_t max_bw; + + /* + * This field is used only when ring_type is a Completion ring. This + * value indicates what interrupt mode should be used on this completion + * ring. Note: In the legacy interrupt mode, no more than 16 completion + * rings are allowed. + */ + /* Legacy INTA */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_LEGACY (UINT32_C(0x0) << 0) + /* Reserved */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_RSVD (UINT32_C(0x1) << 0) + /* MSI-X */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_MSIX (UINT32_C(0x2) << 0) + /* No Interrupt - Polled mode */ + #define HWRM_RING_ALLOC_INPUT_INT_MODE_POLL (UINT32_C(0x3) << 0) + uint8_t int_mode; + + uint8_t unused_8[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ + +struct hwrm_ring_alloc_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* Physical number of ring allocated. */ + uint16_t ring_id; + + /* Logical number of ring allocated. */ + uint16_t logical_ring_id; + + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_ring_free */ +/* + * Description: This command is used to free a ring and associated resources. + */ +/* Input (24 bytes) */ + +struct hwrm_ring_free_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* Ring Type. */ + /* Completion Ring (CR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_CMPL (UINT32_C(0x0) << 0) + /* TX Ring (TR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_TX (UINT32_C(0x1) << 0) + /* RX Ring (RR) */ + #define HWRM_RING_FREE_INPUT_RING_TYPE_RX (UINT32_C(0x2) << 0) + uint8_t ring_type; + + uint8_t unused_0; + + /* Physical number of ring allocated. */ + uint16_t ring_id; + + uint32_t unused_1; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_ring_free_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_ring_grp_alloc */ +/* + * Description: This API allocates and does basic preparation for a ring group. + */ + +/* Input (24 bytes) */ +struct hwrm_ring_grp_alloc_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* This value identifies the CR associated with the ring group. */ + uint16_t cr; + + /* This value identifies the main RR associated with the ring group. */ + uint16_t rr; + + /* + * This value identifies the aggregation RR associated with the ring + * group. If this value is 0xFF... (All Fs), then no Aggregation ring + * will be set. + */ + uint16_t ar; + + /* + * This value identifies the statistics context associated with the ring + * group. + */ + uint16_t sc; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_ring_grp_alloc_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* + * This is the ring group ID value. Use this value to program the + * default ring group for the VNIC or as table entries in an RSS/COS + * context. + */ + uint32_t ring_group_id; + + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_ring_grp_free */ +/* + * Description: This API frees a ring group and associated resources. # If a + * ring in the ring group is reset or free, then the associated rings in the + * ring group shall also be reset/free using hwrm_ring_free. # A function driver + * shall always use hwrm_ring_grp_free after freeing all rings in a group. # As + * a part of executing this command, the HWRM shall reset all associated ring + * group resources. + */ + +/* Input (24 bytes) */ +struct hwrm_ring_grp_free_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* This is the ring group ID value. */ + uint32_t ring_group_id; + + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_ring_grp_free_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_stat_ctx_alloc */ +/* + * Description: This command allocates and does basic preparation for a stat + * context. + */ + +/* Input (32 bytes) */ +struct hwrm_stat_ctx_alloc_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* This is the address for statistic block. */ + uint64_t stats_dma_addr; + + /* + * The statistic block update period in ms. e.g. 250ms, 500ms, 750ms, + * 1000ms. + */ + uint32_t update_period_ms; + + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_stat_ctx_alloc_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* This is the statistics context ID value. */ + uint32_t stat_ctx_id; + + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_stat_ctx_clr_stats */ +/* Description: This command clears statistics of a context. */ + +/* Input (24 bytes) */ +struct hwrm_stat_ctx_clr_stats_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_stat_ctx_clr_stats_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_stat_ctx_free */ +/* Description: This command is used to free a stat context. */ +/* Input (24 bytes) */ + +struct hwrm_stat_ctx_free_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* ID of the statistics context that is being queried. */ + uint32_t stat_ctx_id; + + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ + +struct hwrm_stat_ctx_free_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* This is the statistics context ID value. */ + uint32_t stat_ctx_id; + + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_vnic_alloc */ +/* + * Description: This VNIC is a resource in the RX side of the chip that is used + * to represent a virtual host "interface". # At the time of VNIC allocation or + * configuration, the function can specify whether it wants the requested VNIC + * to be the default VNIC for the function or not. # If a function requests + * allocation of a VNIC for the first time and a VNIC is successfully allocated + * by the HWRM, then the HWRM shall make the allocated VNIC as the default VNIC + * for that function. # The default VNIC shall be used for the default action + * for a partition or function. # For each VNIC allocated on a function, a + * mapping on the RX side to map the allocated VNIC to source virtual interface + * shall be performed by the HWRM. This should be hidden to the function driver + * requesting the VNIC allocation. This enables broadcast/multicast replication + * with source knockout. # If multicast replication with source knockout is + * enabled, then the internal VNIC to SVIF mapping data structures shall be + * programmed at the time of VNIC allocation. + */ + +/* Input (24 bytes) */ +struct hwrm_vnic_alloc_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * When this bit is '1', this VNIC is requested to be the default VNIC + * for this function. + */ + #define HWRM_VNIC_ALLOC_INPUT_FLAGS_DEFAULT UINT32_C(0x1) + uint32_t flags; + + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_vnic_alloc_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* Logical vnic ID */ + uint32_t vnic_id; + + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_vnic_cfg */ +/* Description: Configure the RX VNIC structure. */ + +/* Input (40 bytes) */ +struct hwrm_vnic_cfg_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * When this bit is '1', the VNIC is requested to be the default VNIC + * for the function. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_DEFAULT UINT32_C(0x1) + /* + * When this bit is '1', the VNIC is being configured to strip VLAN in + * the RX path. If set to '0', then VLAN stripping is disabled on this + * VNIC. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_VLAN_STRIP_MODE UINT32_C(0x2) + /* + * When this bit is '1', the VNIC is being configured to buffer receive + * packets in the hardware until the host posts new receive buffers. If + * set to '0', then bd_stall is being configured to be disabled on this + * VNIC. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_BD_STALL_MODE UINT32_C(0x4) + /* + * When this bit is '1', the VNIC is being configured to receive both + * RoCE and non-RoCE traffic. If set to '0', then this VNIC is not + * configured to be operating in dual VNIC mode. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_DUAL_VNIC_MODE UINT32_C(0x8) + /* + * When this flag is set to '1', the VNIC is requested to be configured + * to receive only RoCE traffic. If this flag is set to '0', then this + * flag shall be ignored by the HWRM. If roce_dual_vnic_mode flag is set + * to '1', then the HWRM client shall not set this flag to '1'. + */ + #define HWRM_VNIC_CFG_INPUT_FLAGS_ROCE_ONLY_VNIC_MODE UINT32_C(0x10) + uint32_t flags; + + /* This bit must be '1' for the dflt_ring_grp field to be configured. */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_DFLT_RING_GRP UINT32_C(0x1) + /* This bit must be '1' for the rss_rule field to be configured. */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_RSS_RULE UINT32_C(0x2) + /* This bit must be '1' for the cos_rule field to be configured. */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_COS_RULE UINT32_C(0x4) + /* This bit must be '1' for the lb_rule field to be configured. */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_LB_RULE UINT32_C(0x8) + /* This bit must be '1' for the mru field to be configured. */ + #define HWRM_VNIC_CFG_INPUT_ENABLES_MRU UINT32_C(0x10) + uint32_t enables; + + /* Logical vnic ID */ + uint16_t vnic_id; + + /* + * Default Completion ring for the VNIC. This ring will be chosen if + * packet does not match any RSS rules and if there is no COS rule. + */ + uint16_t dflt_ring_grp; + + /* + * RSS ID for RSS rule/table structure. 0xFF... (All Fs) if there is no + * RSS rule. + */ + uint16_t rss_rule; + + /* + * RSS ID for COS rule/table structure. 0xFF... (All Fs) if there is no + * COS rule. + */ + uint16_t cos_rule; + + /* + * RSS ID for load balancing rule/table structure. 0xFF... (All Fs) if + * there is no LB rule. + */ + uint16_t lb_rule; + + /* + * The maximum receive unit of the vnic. Each vnic is associated with a + * function. The vnic mru value overwrites the mru setting of the + * associated function. The HWRM shall make sure that vnic mru does not + * exceed the mru of the port the function is associated with. + */ + uint16_t mru; + + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_vnic_cfg_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_vnic_free */ +/* + * Description: Free a VNIC resource. Idle any resources associated with the + * VNIC as well as the VNIC. Reset and release all resources associated with the + * VNIC. + */ + +/* Input (24 bytes) */ +struct hwrm_vnic_free_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* Logical vnic ID */ + uint32_t vnic_id; + + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_vnic_free_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_vnic_rss_cfg */ +/* Description: This function is used to enable RSS configuration. */ + +/* Input (48 bytes) */ +struct hwrm_vnic_rss_cfg_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * When this bit is '1', the RSS hash shall be computed over source and + * destination IPv4 addresses of IPv4 packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV4 UINT32_C(0x1) + /* + * When this bit is '1', the RSS hash shall be computed over + * source/destination IPv4 addresses and source/destination ports of + * TCP/IPv4 packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV4 UINT32_C(0x2) + /* + * When this bit is '1', the RSS hash shall be computed over + * source/destination IPv4 addresses and source/destination ports of + * UDP/IPv4 packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV4 UINT32_C(0x4) + /* + * When this bit is '1', the RSS hash shall be computed over source and + * destination IPv4 addresses of IPv6 packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_IPV6 UINT32_C(0x8) + /* + * When this bit is '1', the RSS hash shall be computed over + * source/destination IPv6 addresses and source/destination ports of + * TCP/IPv6 packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_TCP_IPV6 UINT32_C(0x10) + /* + * When this bit is '1', the RSS hash shall be computed over + * source/destination IPv6 addresses and source/destination ports of + * UDP/IPv6 packets. + */ + #define HWRM_VNIC_RSS_CFG_INPUT_HASH_TYPE_UDP_IPV6 UINT32_C(0x20) + uint32_t hash_type; + + uint32_t unused_0; + + /* This is the address for rss ring group table */ + uint64_t ring_grp_tbl_addr; + + /* This is the address for rss hash key table */ + uint64_t hash_key_tbl_addr; + + /* Index to the rss indirection table. */ + uint16_t rss_ctx_idx; + + uint16_t unused_1[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_vnic_rss_cfg_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* Input (16 bytes) */ +struct hwrm_vnic_rss_cos_lb_ctx_alloc_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; +} __attribute__((packed)); + +/* Output (16 bytes) */ + +struct hwrm_vnic_rss_cos_lb_ctx_alloc_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* rss_cos_lb_ctx_id is 16 b */ + uint16_t rss_cos_lb_ctx_id; + + uint8_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + uint8_t unused_4; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_vnic_rss_cos_lb_ctx_free */ +/* Description: This function can be used to free COS/Load Balance context. */ +/* Input (24 bytes) */ + +struct hwrm_vnic_rss_cos_lb_ctx_free_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* rss_cos_lb_ctx_id is 16 b */ + uint16_t rss_cos_lb_ctx_id; + + uint16_t unused_0[3]; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_vnic_rss_cos_lb_ctx_free_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* Output (32 bytes) */ +struct hwrm_queue_qportcfg_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + /* The maximum number of queues that can be configured. */ + uint8_t max_configurable_queues; + + /* The maximum number of lossless queues that can be configured. */ + uint8_t max_configurable_lossless_queues; + + /* + * 0 - Not allowed. Non-zero - Allowed. If this value is non-zero, then + * the HWRM shall allow the host SW driver to configure queues using + * hwrm_queue_cfg. + */ + uint8_t queue_cfg_allowed; + + /* + * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then + * the HWRM shall allow the host SW driver to configure queue buffers + * using hwrm_queue_buffers_cfg. + */ + uint8_t queue_buffers_cfg_allowed; + + /* + * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then + * the HWRM shall allow the host SW driver to configure PFC using + * hwrm_queue_pfcenable_cfg. + */ + uint8_t queue_pfcenable_cfg_allowed; + + /* + * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then + * the HWRM shall allow the host SW driver to configure Priority to CoS + * mapping using hwrm_queue_pri2cos_cfg. + */ + uint8_t queue_pri2cos_cfg_allowed; + + /* + * 0 - Not allowed. Non-zero - Allowed If this value is non-zero, then + * the HWRM shall allow the host SW driver to configure CoS Bandwidth + * configuration using hwrm_queue_cos2bw_cfg. + */ + uint8_t queue_cos2bw_cfg_allowed; + + /* ID of CoS Queue 0. FF - Invalid id */ + uint8_t queue_id0; + + /* This value is applicable to CoS queues only. */ + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSY \ + (UINT32_C(0x0) << 0) + /* Lossless */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_LOSSLESS \ + (UINT32_C(0x1) << 0) + /* + * Set to 0xFF... (All Fs) if there is no service profile + * specified + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID0_SERVICE_PROFILE_UNKNOWN \ + (UINT32_C(0xff) << 0) + uint8_t queue_id0_service_profile; + + /* ID of CoS Queue 1. FF - Invalid id */ + uint8_t queue_id1; + /* This value is applicable to CoS queues only. */ + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSY \ + (UINT32_C(0x0) << 0) + /* Lossless */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_LOSSLESS \ + (UINT32_C(0x1) << 0) + /* + * Set to 0xFF... (All Fs) if there is no service profile + * specified + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID1_SERVICE_PROFILE_UNKNOWN \ + (UINT32_C(0xff) << 0) + uint8_t queue_id1_service_profile; + + /* ID of CoS Queue 2. FF - Invalid id */ + uint8_t queue_id2; + /* This value is applicable to CoS queues only. */ + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSY \ + (UINT32_C(0x0) << 0) + /* Lossless */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_LOSSLESS \ + (UINT32_C(0x1) << 0) + /* + * Set to 0xFF... (All Fs) if there is no service profile + * specified + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID2_SERVICE_PROFILE_UNKNOWN \ + (UINT32_C(0xff) << 0) + uint8_t queue_id2_service_profile; + + /* ID of CoS Queue 3. FF - Invalid id */ + uint8_t queue_id3; + + /* This value is applicable to CoS queues only. */ + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSY \ + (UINT32_C(0x0) << 0) + /* Lossless */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_LOSSLESS \ + (UINT32_C(0x1) << 0) + /* + * Set to 0xFF... (All Fs) if there is no service profile + * specified + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID3_SERVICE_PROFILE_UNKNOWN \ + (UINT32_C(0xff) << 0) + uint8_t queue_id3_service_profile; + + /* ID of CoS Queue 4. FF - Invalid id */ + uint8_t queue_id4; + /* This value is applicable to CoS queues only. */ + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSY \ + (UINT32_C(0x0) << 0) + /* Lossless */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_LOSSLESS \ + (UINT32_C(0x1) << 0) + /* + * Set to 0xFF... (All Fs) if there is no service profile + * specified + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID4_SERVICE_PROFILE_UNKNOWN \ + (UINT32_C(0xff) << 0) + uint8_t queue_id4_service_profile; + + /* ID of CoS Queue 5. FF - Invalid id */ + uint8_t queue_id5; + + /* This value is applicable to CoS queues only. */ + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSY \ + (UINT32_C(0x0) << 0) + /* Lossless */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_LOSSLESS \ + (UINT32_C(0x1) << 0) + /* + * Set to 0xFF... (All Fs) if there is no service profile + * specified + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID5_SERVICE_PROFILE_UNKNOWN \ + (UINT32_C(0xff) << 0) + uint8_t queue_id5_service_profile; + + /* ID of CoS Queue 6. FF - Invalid id */ + uint8_t queue_id6_service_profile; + /* This value is applicable to CoS queues only. */ + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSY \ + (UINT32_C(0x0) << 0) + /* Lossless */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_LOSSLESS \ + (UINT32_C(0x1) << 0) + /* + * Set to 0xFF... (All Fs) if there is no service profile + * specified + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID6_SERVICE_PROFILE_UNKNOWN \ + (UINT32_C(0xff) << 0) + uint8_t queue_id6; + + /* ID of CoS Queue 7. FF - Invalid id */ + uint8_t queue_id7; + + /* This value is applicable to CoS queues only. */ + /* Lossy (best-effort) */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSY \ + (UINT32_C(0x0) << 0) + /* Lossless */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_LOSSLESS \ + (UINT32_C(0x1) << 0) + /* + * Set to 0xFF... (All Fs) if there is no service profile + * specified + */ + #define HWRM_QUEUE_QPORTCFG_OUTPUT_QUEUE_ID7_SERVICE_PROFILE_UNKNOWN \ + (UINT32_C(0xff) << 0) + uint8_t queue_id7_service_profile; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_func_drv_rgtr */ +/* + * Description: This command is used by the function driver to register its + * information with the HWRM. A function driver shall implement this command. A + * function driver shall use this command during the driver initialization right + * after the HWRM version discovery and default ring resources allocation. + */ + +/* Input (80 bytes) */ +struct hwrm_func_drv_rgtr_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * When this bit is '1', the function driver is requesting all requests + * from its children VF drivers to be forwarded to itself. This flag can + * only be set by the PF driver. If a VF driver sets this flag, it + * should be ignored by the HWRM. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_ALL_MODE UINT32_C(0x1) + /* + * When this bit is '1', the function is requesting none of the requests + * from its children VF drivers to be forwarded to itself. This flag can + * only be set by the PF driver. If a VF driver sets this flag, it + * should be ignored by the HWRM. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_FLAGS_FWD_NONE_MODE UINT32_C(0x2) + uint32_t flags; + + /* This bit must be '1' for the os_type field to be configured. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_OS_TYPE UINT32_C(0x1) + /* This bit must be '1' for the ver field to be configured. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VER UINT32_C(0x2) + /* This bit must be '1' for the timestamp field to be configured. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_TIMESTAMP UINT32_C(0x4) + /* This bit must be '1' for the vf_req_fwd field to be configured. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_VF_REQ_FWD UINT32_C(0x8) + /* + * This bit must be '1' for the async_event_fwd field to be configured. + */ + #define HWRM_FUNC_DRV_RGTR_INPUT_ENABLES_ASYNC_EVENT_FWD \ + UINT32_C(0x10) + uint32_t enables; + + /* This value indicates the type of OS. */ + /* Unknown */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_UNKNOWN \ + (UINT32_C(0x0) << 0) + /* Other OS not listed below. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_OTHER \ + (UINT32_C(0x1) << 0) + /* MSDOS OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_MSDOS \ + (UINT32_C(0xe) << 0) + /* Windows OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WINDOWS \ + (UINT32_C(0x12) << 0) + /* Solaris OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_SOLARIS \ + (UINT32_C(0x1d) << 0) + /* Linux OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_LINUX \ + (UINT32_C(0x24) << 0) + /* FreeBSD OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_FREEBSD \ + (UINT32_C(0x2a) << 0) + /* VMware ESXi OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_ESXI \ + (UINT32_C(0x68) << 0) + /* Microsoft Windows 8 64-bit OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN864 \ + (UINT32_C(0x73) << 0) + /* Microsoft Windows Server 2012 R2 OS. */ + #define HWRM_FUNC_DRV_RGTR_INPUT_OS_TYPE_WIN2012R2 \ + (UINT32_C(0x74) << 0) + uint16_t os_type; + + /* This is the major version of the driver. */ + uint8_t ver_maj; + + /* This is the minor version of the driver. */ + uint8_t ver_min; + + /* This is the update version of the driver. */ + uint8_t ver_upd; + + uint8_t unused_0; + uint16_t unused_1; + + /* + * This is a 32-bit timestamp provided by the driver for keep alive. The + * timestamp is in multiples of 1ms. + */ + uint32_t timestamp; + + uint32_t unused_2; + + /* + * This is a 256-bit bit mask provided by the PF driver for letting the + * HWRM know what commands issued by the VF driver to the HWRM should be + * forwarded to the PF driver. Nth bit refers to the Nth req_type. + * Setting Nth bit to 1 indicates that requests from the VF driver with + * req_type equal to N shall be forwarded to the parent PF driver. This + * field is not valid for the VF driver. + */ + uint32_t vf_req_fwd[8]; + + /* + * This is a 256-bit bit mask provided by the function driver (PF or VF + * driver) to indicate the list of asynchronous event completions to be + * forwarded. Nth bit refers to the Nth event_id. Setting Nth bit to 1 + * by the function driver shall result in the HWRM forwarding + * asynchronous event completion with event_id equal to N. If all bits + * are set to 0 (value of 0), then the HWRM shall not forward any + * asynchronous event completion to this function driver. + */ + uint32_t async_event_fwd[8]; +} __attribute__((packed)); + +/* Output (16 bytes) */ + +struct hwrm_func_drv_rgtr_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +/* hwrm_func_drv_unrgtr */ +/* + * Description: This command is used by the function driver to un register with + * the HWRM. A function driver shall implement this command. A function driver + * shall use this command during the driver unloading. + */ +/* Input (24 bytes) */ + +struct hwrm_func_drv_unrgtr_input { + /* + * This value indicates what type of request this is. The format for the + * rest of the command is determined by this field. + */ + uint16_t req_type; + + /* + * This value indicates the what completion ring the request will be + * optionally completed on. If the value is -1, then no CR completion + * will be generated. Any other value must be a valid CR ring_id value + * for this function. + */ + uint16_t cmpl_ring; + + /* This value indicates the command sequence number. */ + uint16_t seq_id; + + /* + * Target ID of this command. 0x0 - 0xFFF8 - Used for function ids + * 0xFFF8 - 0xFFFE - Reserved for internal processors 0xFFFF - HWRM + */ + uint16_t target_id; + + /* + * This is the host address where the response will be written when the + * request is complete. This area must be 16B aligned and must be + * cleared to zero before the request is made. + */ + uint64_t resp_addr; + + /* + * When this bit is '1', the function driver is notifying the HWRM to + * prepare for the shutdown. + */ + #define HWRM_FUNC_DRV_UNRGTR_INPUT_FLAGS_PREPARE_FOR_SHUTDOWN \ + UINT32_C(0x1) + uint32_t flags; + + uint32_t unused_0; +} __attribute__((packed)); + +/* Output (16 bytes) */ +struct hwrm_func_drv_unrgtr_output { + /* + * Pass/Fail or error type Note: receiver to verify the in parameters, + * and fail the call with an error when appropriate + */ + uint16_t error_code; + + /* This field returns the type of original request. */ + uint16_t req_type; + + /* This field provides original sequence number of the command. */ + uint16_t seq_id; + + /* + * This field is the length of the response in bytes. The last byte of + * the response is a valid flag that will read as '1' when the command + * has been completely written to memory. + */ + uint16_t resp_len; + + uint32_t unused_0; + uint8_t unused_1; + uint8_t unused_2; + uint8_t unused_3; + + /* + * This field is used in Output records to indicate that the output is + * completely written to RAM. This field should be read as '1' to + * indicate that the output has been completely written. When writing a + * command completion or response to an internal processor, the order of + * writes has to be such that this field is written last. + */ + uint8_t valid; +} __attribute__((packed)); + +#endif diff --git a/drivers/net/bnxt/rte_pmd_bnxt_version.map b/drivers/net/bnxt/rte_pmd_bnxt_version.map new file mode 100644 index 00000000..349c6e1c --- /dev/null +++ b/drivers/net/bnxt/rte_pmd_bnxt_version.map @@ -0,0 +1,4 @@ +DPDK_16.04 { + + local: *; +}; diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile index 10c794c4..504f2e8b 100644 --- a/drivers/net/bonding/Makefile +++ b/drivers/net/bonding/Makefile @@ -64,5 +64,7 @@ DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_kvargs DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_cmdline +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_BOND) += lib/librte_ring include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.c b/drivers/net/bonding/rte_eth_bond_8023ad.c index 8b4db507..48a50e4e 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.c +++ b/drivers/net/bonding/rte_eth_bond_8023ad.c @@ -39,9 +39,12 @@ #include <rte_malloc.h> #include <rte_errno.h> #include <rte_cycles.h> +#include <rte_compat.h> #include "rte_eth_bond_private.h" +static void bond_mode_8023ad_ext_periodic_cb(void *arg); + #ifdef RTE_LIBRTE_BOND_DEBUG_8023AD #define MODE4_DEBUG(fmt, ...) RTE_LOG(DEBUG, PMD, "%6u [Port %u: %s] " fmt, \ bond_dbg_get_time_diff_ms(), slave_id, \ @@ -512,7 +515,7 @@ mux_machine(struct bond_dev_private *internals, uint8_t slave_id) if (!ACTOR_STATE(port, SYNCHRONIZATION)) { /* attach mux to aggregator */ - RTE_VERIFY((port->actor_state & (STATE_COLLECTING | + RTE_ASSERT((port->actor_state & (STATE_COLLECTING | STATE_DISTRIBUTING)) == 0); ACTOR_STATE_SET(port, SYNCHRONIZATION); @@ -813,7 +816,7 @@ bond_mode_8023ad_periodic_cb(void *arg) struct lacpdu_header *lacp; lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *); - RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP); + RTE_ASSERT(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP); /* This is LACP frame so pass it to rx_machine */ rx_machine(internals, slave_id, &lacp->lacpdu); @@ -856,8 +859,9 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id) uint16_t q_id; /* Given slave mus not be in active list */ - RTE_VERIFY(find_slave_by_id(internals->active_slaves, + RTE_ASSERT(find_slave_by_id(internals->active_slaves, internals->active_slave_count, slave_id) == internals->active_slave_count); + RTE_SET_USED(internals); /* used only for assert when enabled */ memcpy(&port->actor, &initial, sizeof(struct port_params)); /* Standard requires that port ID must be grater than 0. @@ -880,8 +884,8 @@ bond_mode_8023ad_activate_slave(struct rte_eth_dev *bond_dev, uint8_t slave_id) if (port->mbuf_pool != NULL) return; - RTE_VERIFY(port->rx_ring == NULL); - RTE_VERIFY(port->tx_ring == NULL); + RTE_ASSERT(port->rx_ring == NULL); + RTE_ASSERT(port->tx_ring == NULL); socket_id = rte_eth_devices[slave_id].data->numa_node; element_size = sizeof(struct slow_protocol_frame) + sizeof(struct rte_mbuf) @@ -939,7 +943,7 @@ bond_mode_8023ad_deactivate_slave(struct rte_eth_dev *bond_dev, uint8_t i; /* Given slave must be in active list */ - RTE_VERIFY(find_slave_by_id(internals->active_slaves, + RTE_ASSERT(find_slave_by_id(internals->active_slaves, internals->active_slave_count, slave_id) < internals->active_slave_count); /* Exclude slave from transmit policy. If this slave is an aggregator @@ -1004,7 +1008,7 @@ bond_mode_8023ad_mac_address_update(struct rte_eth_dev *bond_dev) bond_mode_8023ad_start(bond_dev); } -void +static void bond_mode_8023ad_conf_get(struct rte_eth_dev *dev, struct rte_eth_bond_8023ad_conf *conf) { @@ -1022,26 +1026,36 @@ bond_mode_8023ad_conf_get(struct rte_eth_dev *dev, conf->rx_marker_period_ms = mode4->rx_marker_timeout / ms_ticks; } -void -bond_mode_8023ad_setup(struct rte_eth_dev *dev, +static void +bond_mode_8023ad_conf_get_v1607(struct rte_eth_dev *dev, struct rte_eth_bond_8023ad_conf *conf) { - struct rte_eth_bond_8023ad_conf def_conf; struct bond_dev_private *internals = dev->data->dev_private; struct mode8023ad_private *mode4 = &internals->mode4; - uint64_t ms_ticks = rte_get_tsc_hz() / 1000; - if (conf == NULL) { - conf = &def_conf; - conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS; - conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS; - conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS; - conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS; - conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS; - conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS; - conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS; - conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS; - } + bond_mode_8023ad_conf_get(dev, conf); + conf->slowrx_cb = mode4->slowrx_cb; +} + +static void +bond_mode_8023ad_conf_get_default(struct rte_eth_bond_8023ad_conf *conf) +{ + conf->fast_periodic_ms = BOND_8023AD_FAST_PERIODIC_MS; + conf->slow_periodic_ms = BOND_8023AD_SLOW_PERIODIC_MS; + conf->short_timeout_ms = BOND_8023AD_SHORT_TIMEOUT_MS; + conf->long_timeout_ms = BOND_8023AD_LONG_TIMEOUT_MS; + conf->aggregate_wait_timeout_ms = BOND_8023AD_AGGREGATE_WAIT_TIMEOUT_MS; + conf->tx_period_ms = BOND_8023AD_TX_MACHINE_PERIOD_MS; + conf->rx_marker_period_ms = BOND_8023AD_RX_MARKER_PERIOD_MS; + conf->update_timeout_ms = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS; + conf->slowrx_cb = NULL; +} + +static void +bond_mode_8023ad_conf_assign(struct mode8023ad_private *mode4, + struct rte_eth_bond_8023ad_conf *conf) +{ + uint64_t ms_ticks = rte_get_tsc_hz() / 1000; mode4->fast_periodic_timeout = conf->fast_periodic_ms * ms_ticks; mode4->slow_periodic_timeout = conf->slow_periodic_ms * ms_ticks; @@ -1053,6 +1067,48 @@ bond_mode_8023ad_setup(struct rte_eth_dev *dev, mode4->update_timeout_us = conf->update_timeout_ms * 1000; } +static void +bond_mode_8023ad_setup_v1604(struct rte_eth_dev *dev, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct rte_eth_bond_8023ad_conf def_conf; + struct bond_dev_private *internals = dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + + if (conf == NULL) { + conf = &def_conf; + bond_mode_8023ad_conf_get_default(conf); + } + + bond_mode_8023ad_stop(dev); + bond_mode_8023ad_conf_assign(mode4, conf); + + if (dev->data->dev_started) + bond_mode_8023ad_start(dev); +} + + +void +bond_mode_8023ad_setup(struct rte_eth_dev *dev, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct rte_eth_bond_8023ad_conf def_conf; + struct bond_dev_private *internals = dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + + if (conf == NULL) { + conf = &def_conf; + bond_mode_8023ad_conf_get_default(conf); + } + + bond_mode_8023ad_stop(dev); + bond_mode_8023ad_conf_assign(mode4, conf); + mode4->slowrx_cb = conf->slowrx_cb; + + if (dev->data->dev_started) + bond_mode_8023ad_start(dev); +} + int bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev) { @@ -1068,13 +1124,28 @@ bond_mode_8023ad_enable(struct rte_eth_dev *bond_dev) int bond_mode_8023ad_start(struct rte_eth_dev *bond_dev) { - return rte_eal_alarm_set(BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000, - &bond_mode_8023ad_periodic_cb, bond_dev); + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + static const uint64_t us = BOND_MODE_8023AX_UPDATE_TIMEOUT_MS * 1000; + + if (mode4->slowrx_cb) + return rte_eal_alarm_set(us, &bond_mode_8023ad_ext_periodic_cb, + bond_dev); + + return rte_eal_alarm_set(us, &bond_mode_8023ad_periodic_cb, bond_dev); } void bond_mode_8023ad_stop(struct rte_eth_dev *bond_dev) { + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + + if (mode4->slowrx_cb) { + rte_eal_alarm_cancel(&bond_mode_8023ad_ext_periodic_cb, + bond_dev); + return; + } rte_eal_alarm_cancel(&bond_mode_8023ad_periodic_cb, bond_dev); } @@ -1143,7 +1214,7 @@ free_out: } int -rte_eth_bond_8023ad_conf_get(uint8_t port_id, +rte_eth_bond_8023ad_conf_get_v1604(uint8_t port_id, struct rte_eth_bond_8023ad_conf *conf) { struct rte_eth_dev *bond_dev; @@ -1158,9 +1229,10 @@ rte_eth_bond_8023ad_conf_get(uint8_t port_id, bond_mode_8023ad_conf_get(bond_dev, conf); return 0; } +VERSION_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1604, 16.04); int -rte_eth_bond_8023ad_setup(uint8_t port_id, +rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id, struct rte_eth_bond_8023ad_conf *conf) { struct rte_eth_dev *bond_dev; @@ -1168,6 +1240,25 @@ rte_eth_bond_8023ad_setup(uint8_t port_id, if (valid_bonded_port_id(port_id) != 0) return -EINVAL; + if (conf == NULL) + return -EINVAL; + + bond_dev = &rte_eth_devices[port_id]; + bond_mode_8023ad_conf_get_v1607(bond_dev, conf); + return 0; +} +BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_conf_get, _v1607, 16.07); +MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_conf_get(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf), + rte_eth_bond_8023ad_conf_get_v1607); + +static int +bond_8023ad_setup_validate(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf) +{ + if (valid_bonded_port_id(port_id) != 0) + return -EINVAL; + if (conf != NULL) { /* Basic sanity check */ if (conf->slow_periodic_ms == 0 || @@ -1183,11 +1274,47 @@ rte_eth_bond_8023ad_setup(uint8_t port_id, } } + return 0; +} + +int +rte_eth_bond_8023ad_setup_v1604(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct rte_eth_dev *bond_dev; + int err; + + err = bond_8023ad_setup_validate(port_id, conf); + if (err != 0) + return err; + + bond_dev = &rte_eth_devices[port_id]; + bond_mode_8023ad_setup_v1604(bond_dev, conf); + + return 0; +} +VERSION_SYMBOL(rte_eth_bond_8023ad_setup, _v1604, 16.04); + +int +rte_eth_bond_8023ad_setup_v1607(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf) +{ + struct rte_eth_dev *bond_dev; + int err; + + err = bond_8023ad_setup_validate(port_id, conf); + if (err != 0) + return err; + bond_dev = &rte_eth_devices[port_id]; bond_mode_8023ad_setup(bond_dev, conf); return 0; } +BIND_DEFAULT_SYMBOL(rte_eth_bond_8023ad_setup, _v1607, 16.07); +MAP_STATIC_SYMBOL(int rte_eth_bond_8023ad_setup(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf), + rte_eth_bond_8023ad_setup_v1607); int rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id, @@ -1221,3 +1348,160 @@ rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id, info->agg_port_id = port->aggregator_port_id; return 0; } + +static int +bond_8023ad_ext_validate(uint8_t port_id, uint8_t slave_id) +{ + struct rte_eth_dev *bond_dev; + struct bond_dev_private *internals; + struct mode8023ad_private *mode4; + + if (rte_eth_bond_mode_get(port_id) != BONDING_MODE_8023AD) + return -EINVAL; + + bond_dev = &rte_eth_devices[port_id]; + + if (!bond_dev->data->dev_started) + return -EINVAL; + + internals = bond_dev->data->dev_private; + if (find_slave_by_id(internals->active_slaves, + internals->active_slave_count, slave_id) == + internals->active_slave_count) + return -EINVAL; + + mode4 = &internals->mode4; + if (mode4->slowrx_cb == NULL) + return -EINVAL; + + return 0; +} + +int +rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled) +{ + struct port *port; + int res; + + res = bond_8023ad_ext_validate(port_id, slave_id); + if (res != 0) + return res; + + port = &mode_8023ad_ports[slave_id]; + + if (enabled) + ACTOR_STATE_SET(port, COLLECTING); + else + ACTOR_STATE_CLR(port, COLLECTING); + + return 0; +} + +int +rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled) +{ + struct port *port; + int res; + + res = bond_8023ad_ext_validate(port_id, slave_id); + if (res != 0) + return res; + + port = &mode_8023ad_ports[slave_id]; + + if (enabled) + ACTOR_STATE_SET(port, DISTRIBUTING); + else + ACTOR_STATE_CLR(port, DISTRIBUTING); + + return 0; +} + +int +rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id) +{ + struct port *port; + int err; + + err = bond_8023ad_ext_validate(port_id, slave_id); + if (err != 0) + return err; + + port = &mode_8023ad_ports[slave_id]; + return ACTOR_STATE(port, DISTRIBUTING); +} + +int +rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id) +{ + struct port *port; + int err; + + err = bond_8023ad_ext_validate(port_id, slave_id); + if (err != 0) + return err; + + port = &mode_8023ad_ports[slave_id]; + return ACTOR_STATE(port, COLLECTING); +} + +int +rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id, + struct rte_mbuf *lacp_pkt) +{ + struct port *port; + int res; + + res = bond_8023ad_ext_validate(port_id, slave_id); + if (res != 0) + return res; + + port = &mode_8023ad_ports[slave_id]; + + if (rte_pktmbuf_pkt_len(lacp_pkt) < sizeof(struct lacpdu_header)) + return -EINVAL; + + struct lacpdu_header *lacp; + + /* only enqueue LACPDUs */ + lacp = rte_pktmbuf_mtod(lacp_pkt, struct lacpdu_header *); + if (lacp->lacpdu.subtype != SLOW_SUBTYPE_LACP) + return -EINVAL; + + MODE4_DEBUG("sending LACP frame\n"); + + return rte_ring_enqueue(port->tx_ring, lacp_pkt); +} + +static void +bond_mode_8023ad_ext_periodic_cb(void *arg) +{ + struct rte_eth_dev *bond_dev = arg; + struct bond_dev_private *internals = bond_dev->data->dev_private; + struct mode8023ad_private *mode4 = &internals->mode4; + struct port *port; + void *pkt = NULL; + uint16_t i, slave_id; + + for (i = 0; i < internals->active_slave_count; i++) { + slave_id = internals->active_slaves[i]; + port = &mode_8023ad_ports[slave_id]; + + if (rte_ring_dequeue(port->rx_ring, &pkt) == 0) { + struct rte_mbuf *lacp_pkt = pkt; + struct lacpdu_header *lacp; + + lacp = rte_pktmbuf_mtod(lacp_pkt, + struct lacpdu_header *); + RTE_VERIFY(lacp->lacpdu.subtype == SLOW_SUBTYPE_LACP); + + /* This is LACP frame so pass it to rx callback. + * Callback is responsible for freeing mbuf. + */ + mode4->slowrx_cb(slave_id, lacp_pkt); + } + } + + rte_eal_alarm_set(internals->mode4.update_timeout_us, + bond_mode_8023ad_ext_periodic_cb, arg); +} diff --git a/drivers/net/bonding/rte_eth_bond_8023ad.h b/drivers/net/bonding/rte_eth_bond_8023ad.h index ebd0e934..1de34bc8 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad.h +++ b/drivers/net/bonding/rte_eth_bond_8023ad.h @@ -64,6 +64,9 @@ extern "C" { #define MARKER_TLV_TYPE_INFO 0x01 #define MARKER_TLV_TYPE_RESP 0x02 +typedef void (*rte_eth_bond_8023ad_ext_slowrx_fn)(uint8_t slave_id, + struct rte_mbuf *lacp_pkt); + enum rte_bond_8023ad_selection { UNSELECTED, STANDBY, @@ -157,6 +160,7 @@ struct rte_eth_bond_8023ad_conf { uint32_t tx_period_ms; uint32_t rx_marker_period_ms; uint32_t update_timeout_ms; + rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb; }; struct rte_eth_bond_8023ad_slave_info { @@ -183,6 +187,12 @@ struct rte_eth_bond_8023ad_slave_info { int rte_eth_bond_8023ad_conf_get(uint8_t port_id, struct rte_eth_bond_8023ad_conf *conf); +int +rte_eth_bond_8023ad_conf_get_v1604(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf); +int +rte_eth_bond_8023ad_conf_get_v1607(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf); /** * @internal @@ -198,6 +208,12 @@ rte_eth_bond_8023ad_conf_get(uint8_t port_id, int rte_eth_bond_8023ad_setup(uint8_t port_id, struct rte_eth_bond_8023ad_conf *conf); +int +rte_eth_bond_8023ad_setup_v1604(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf); +int +rte_eth_bond_8023ad_setup_v1607(uint8_t port_id, + struct rte_eth_bond_8023ad_conf *conf); /** * @internal @@ -219,4 +235,71 @@ rte_eth_bond_8023ad_slave_info(uint8_t port_id, uint8_t slave_id, } #endif +/** + * Configure a slave port to start collecting. + * + * @param port_id Bonding device id + * @param slave_id Port id of valid slave. + * @param enabled Non-zero when collection enabled. + * @return + * 0 - if ok + * -EINVAL if slave is not valid. + */ +int +rte_eth_bond_8023ad_ext_collect(uint8_t port_id, uint8_t slave_id, int enabled); + +/** + * Get COLLECTING flag from slave port actor state. + * + * @param port_id Bonding device id + * @param slave_id Port id of valid slave. + * @return + * 0 - if not set + * 1 - if set + * -EINVAL if slave is not valid. + */ +int +rte_eth_bond_8023ad_ext_collect_get(uint8_t port_id, uint8_t slave_id); + +/** + * Configure a slave port to start distributing. + * + * @param port_id Bonding device id + * @param slave_id Port id of valid slave. + * @param enabled Non-zero when distribution enabled. + * @return + * 0 - if ok + * -EINVAL if slave is not valid. + */ +int +rte_eth_bond_8023ad_ext_distrib(uint8_t port_id, uint8_t slave_id, int enabled); + +/** + * Get DISTRIBUTING flag from slave port actor state. + * + * @param port_id Bonding device id + * @param slave_id Port id of valid slave. + * @return + * 0 - if not set + * 1 - if set + * -EINVAL if slave is not valid. + */ +int +rte_eth_bond_8023ad_ext_distrib_get(uint8_t port_id, uint8_t slave_id); + +/** + * LACPDU transmit path for external 802.3ad state machine. Caller retains + * ownership of the packet on failure. + * + * @param port_id Bonding device id + * @param slave_id Port ID of valid slave device. + * @param lacp_pkt mbuf containing LACPDU. + * + * @return + * 0 on success, negative value otherwise. + */ +int +rte_eth_bond_8023ad_ext_slowtx(uint8_t port_id, uint8_t slave_id, + struct rte_mbuf *lacp_pkt); + #endif /* RTE_ETH_BOND_8023AD_H_ */ diff --git a/drivers/net/bonding/rte_eth_bond_8023ad_private.h b/drivers/net/bonding/rte_eth_bond_8023ad_private.h index 8adee70b..ca8858be 100644 --- a/drivers/net/bonding/rte_eth_bond_8023ad_private.h +++ b/drivers/net/bonding/rte_eth_bond_8023ad_private.h @@ -173,6 +173,8 @@ struct mode8023ad_private { uint64_t tx_period_timeout; uint64_t rx_marker_timeout; uint64_t update_timeout_us; + rte_eth_bond_8023ad_ext_slowrx_fn slowrx_cb; + uint8_t external_sm; }; /** @@ -185,18 +187,6 @@ extern struct port mode_8023ad_ports[]; /* Forward declaration */ struct bond_dev_private; -/** - * @internal - * - * Get configuration of bonded interface. - * - * - * @param dev Bonded interface - * @param conf returned configuration - */ -void -bond_mode_8023ad_conf_get(struct rte_eth_dev *dev, - struct rte_eth_bond_8023ad_conf *conf); /** * @internal diff --git a/drivers/net/bonding/rte_eth_bond_alb.c b/drivers/net/bonding/rte_eth_bond_alb.c index 3157543e..38f5c4d4 100644 --- a/drivers/net/bonding/rte_eth_bond_alb.c +++ b/drivers/net/bonding/rte_eth_bond_alb.c @@ -90,14 +90,14 @@ bond_mode_alb_enable(struct rte_eth_dev *bond_dev) if (internals->mode6.mempool == NULL) { RTE_LOG(ERR, PMD, "%s: Failed to initialize ALB mempool.\n", bond_dev->data->name); - rte_panic( - "Failed to allocate memory pool ('%s')\n" - "for bond device '%s'\n", - mem_name, bond_dev->data->name); + goto mempool_alloc_error; } } return 0; + +mempool_alloc_error: + return -ENOMEM; } void bond_mode_alb_arp_recv(struct ether_hdr *eth_h, uint16_t offset, diff --git a/drivers/net/bonding/rte_eth_bond_api.c b/drivers/net/bonding/rte_eth_bond_api.c index e9247b5f..203ebe9e 100644 --- a/drivers/net/bonding/rte_eth_bond_api.c +++ b/drivers/net/bonding/rte_eth_bond_api.c @@ -60,18 +60,14 @@ check_for_bonded_ethdev(const struct rte_eth_dev *eth_dev) int valid_bonded_port_id(uint8_t port_id) { - if (!rte_eth_dev_is_valid_port(port_id)) - return -1; - + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); return check_for_bonded_ethdev(&rte_eth_devices[port_id]); } int valid_slave_port_id(uint8_t port_id) { - /* Verify that port id's are valid */ - if (!rte_eth_dev_is_valid_port(port_id)) - return -1; + RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -1); /* Verify that port_id refers to a non bonded port */ if (check_for_bonded_ethdev(&rte_eth_devices[port_id]) == 0) @@ -95,7 +91,7 @@ activate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id) internals->tlb_slaves_order[active_count] = port_id; } - RTE_VERIFY(internals->active_slave_count < + RTE_ASSERT(internals->active_slave_count < (RTE_DIM(internals->active_slaves) - 1)); internals->active_slaves[internals->active_slave_count] = port_id; @@ -134,7 +130,7 @@ deactivate_slave(struct rte_eth_dev *eth_dev, uint8_t port_id) sizeof(internals->active_slaves[0])); } - RTE_VERIFY(active_count < RTE_DIM(internals->active_slaves)); + RTE_ASSERT(active_count < RTE_DIM(internals->active_slaves)); internals->active_slave_count = active_count; if (eth_dev->data->dev_started) { @@ -247,6 +243,8 @@ rte_eth_bond_create(const char *name, uint8_t mode, uint8_t socket_id) internals->active_slave_count = 0; internals->rx_offload_capa = 0; internals->tx_offload_capa = 0; + internals->candidate_max_rx_pktlen = 0; + internals->max_rx_pktlen = 0; /* Initially allow to choose any offload type */ internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; @@ -331,9 +329,15 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id) /* Add slave details to bonded device */ slave_eth_dev->data->dev_flags |= RTE_ETH_DEV_BONDED_SLAVE; - slave_add(internals, slave_eth_dev); rte_eth_dev_info_get(slave_port_id, &dev_info); + if (dev_info.max_rx_pktlen < internals->max_rx_pktlen) { + RTE_BOND_LOG(ERR, "Slave (port %u) max_rx_pktlen too small", + slave_port_id); + return -1; + } + + slave_add(internals, slave_eth_dev); /* We need to store slaves reta_size to be able to synchronize RETA for all * slave devices even if its sizes are different. @@ -365,6 +369,9 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id) internals->tx_offload_capa = dev_info.tx_offload_capa; internals->flow_type_rss_offloads = dev_info.flow_type_rss_offloads; + /* Inherit first slave's max rx packet size */ + internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen; + } else { /* Check slave link properties are supported if props are set, * all slaves must be the same */ @@ -391,6 +398,9 @@ __eth_bond_slave_add_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id) if (internals->reta_size > dev_info.reta_size) internals->reta_size = dev_info.reta_size; + if (!internals->max_rx_pktlen && + dev_info.max_rx_pktlen < internals->candidate_max_rx_pktlen) + internals->candidate_max_rx_pktlen = dev_info.max_rx_pktlen; } bonded_eth_dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf &= @@ -536,6 +546,8 @@ __eth_bond_slave_remove_lock_free(uint8_t bonded_port_id, uint8_t slave_port_id) internals->tx_offload_capa = 0; internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; internals->reta_size = 0; + internals->candidate_max_rx_pktlen = 0; + internals->max_rx_pktlen = 0; } return 0; } diff --git a/drivers/net/bonding/rte_eth_bond_pmd.c b/drivers/net/bonding/rte_eth_bond_pmd.c index 54788cf8..9a2518fb 100644 --- a/drivers/net/bonding/rte_eth_bond_pmd.c +++ b/drivers/net/bonding/rte_eth_bond_pmd.c @@ -1608,11 +1608,11 @@ bond_ethdev_stop(struct rte_eth_dev *eth_dev) for (i = 0; i < internals->active_slave_count; i++) { port = &mode_8023ad_ports[internals->active_slaves[i]]; - RTE_VERIFY(port->rx_ring != NULL); + RTE_ASSERT(port->rx_ring != NULL); while (rte_ring_dequeue(port->rx_ring, &pkt) != -ENOENT) rte_pktmbuf_free(pkt); - RTE_VERIFY(port->tx_ring != NULL); + RTE_ASSERT(port->tx_ring != NULL); while (rte_ring_dequeue(port->tx_ring, &pkt) != -ENOENT) rte_pktmbuf_free(pkt); } @@ -1650,7 +1650,8 @@ bond_ethdev_info(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) dev_info->max_mac_addrs = 1; - dev_info->max_rx_pktlen = (uint32_t)2048; + dev_info->max_rx_pktlen = internals->candidate_max_rx_pktlen ? + internals->candidate_max_rx_pktlen : 2048; dev_info->max_rx_queues = (uint16_t)128; dev_info->max_tx_queues = (uint16_t)512; @@ -1836,7 +1837,6 @@ bond_ethdev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->imissed += slave_stats.imissed; stats->ierrors += slave_stats.ierrors; stats->oerrors += slave_stats.oerrors; - stats->imcasts += slave_stats.imcasts; stats->rx_nombuf += slave_stats.rx_nombuf; for (j = 0; j < RTE_ETHDEV_QUEUE_STAT_CNTRS; j++) { @@ -2294,6 +2294,9 @@ bond_ethdev_configure(struct rte_eth_dev *dev) } } + /* set the max_rx_pktlen */ + internals->max_rx_pktlen = internals->candidate_max_rx_pktlen; + /* * if no kvlist, it means that this bonded device has been created * through the bonding api. diff --git a/drivers/net/bonding/rte_eth_bond_private.h b/drivers/net/bonding/rte_eth_bond_private.h index 83123978..2bdc9efa 100644 --- a/drivers/net/bonding/rte_eth_bond_private.h +++ b/drivers/net/bonding/rte_eth_bond_private.h @@ -169,6 +169,9 @@ struct bond_dev_private { struct rte_kvargs *kvlist; uint8_t slave_update_idx; + + uint32_t candidate_max_rx_pktlen; + uint32_t max_rx_pktlen; }; extern const struct eth_dev_ops default_dev_ops; diff --git a/drivers/net/bonding/rte_eth_bond_version.map b/drivers/net/bonding/rte_eth_bond_version.map index 22bd9200..2de0a7d3 100644 --- a/drivers/net/bonding/rte_eth_bond_version.map +++ b/drivers/net/bonding/rte_eth_bond_version.map @@ -27,3 +27,19 @@ DPDK_2.1 { rte_eth_bond_free; } DPDK_2.0; + +DPDK_16.04 { +}; + +DPDK_16.07 { + global: + + rte_eth_bond_8023ad_ext_collect; + rte_eth_bond_8023ad_ext_collect_get; + rte_eth_bond_8023ad_ext_distrib; + rte_eth_bond_8023ad_ext_distrib_get; + rte_eth_bond_8023ad_ext_slowtx; + rte_eth_bond_8023ad_conf_get; + rte_eth_bond_8023ad_setup; + +} DPDK_16.04; diff --git a/drivers/net/cxgbe/Makefile b/drivers/net/cxgbe/Makefile index 07119764..bfcc3159 100644 --- a/drivers/net/cxgbe/Makefile +++ b/drivers/net/cxgbe/Makefile @@ -45,7 +45,7 @@ EXPORT_MAP := rte_pmd_cxgbe_version.map LIBABIVER := 1 -ifeq ($(CC), icc) +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # @@ -54,9 +54,11 @@ else # # CFLAGS for gcc/clang # -ifeq ($(shell test $(CC) = gcc && test $(GCC_VERSION) -ge 44 && echo 1), 1) +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +ifeq ($(shell test $(GCC_VERSION) -ge 44 && echo 1), 1) CFLAGS += -Wno-deprecated endif +endif CFLAGS_BASE_DRIVER = endif @@ -82,6 +84,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += t4_hw.c # this lib depends upon: DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_eal lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_mempool lib/librte_mbuf -DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_net lib/librte_malloc +DEPDIRS-$(CONFIG_RTE_LIBRTE_CXGBE_PMD) += lib/librte_net include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/cxgbe/base/adapter.h b/drivers/net/cxgbe/base/adapter.h index a5225c0e..5e3bd509 100644 --- a/drivers/net/cxgbe/base/adapter.h +++ b/drivers/net/cxgbe/base/adapter.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2014-2015 Chelsio Communications. + * Copyright(c) 2014-2016 Chelsio Communications. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -318,6 +318,9 @@ struct adapter { unsigned int mbox; /* associated mailbox */ unsigned int pf; /* associated physical function id */ + unsigned int vpd_busy; + unsigned int vpd_flag; + int use_unpacked_mode; /* unpacked rx mode state */ }; @@ -427,6 +430,139 @@ static inline void t4_write_reg64(struct adapter *adapter, u32 reg_addr, CXGBE_WRITE_REG64(adapter, reg_addr, val); } +#define PCI_STATUS 0x06 /* 16 bits */ +#define PCI_STATUS_CAP_LIST 0x10 /* Support Capability List */ +#define PCI_CAPABILITY_LIST 0x34 +/* Offset of first capability list entry */ +#define PCI_CAP_ID_EXP 0x10 /* PCI Express */ +#define PCI_CAP_LIST_ID 0 /* Capability ID */ +#define PCI_CAP_LIST_NEXT 1 /* Next capability in the list */ +#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ +#define PCI_CAP_ID_VPD 0x03 /* Vital Product Data */ +#define PCI_VPD_ADDR 2 /* Address to access (15 bits!) */ +#define PCI_VPD_ADDR_F 0x8000 /* Write 0, 1 indicates completion */ +#define PCI_VPD_DATA 4 /* 32-bits of data returned here */ + +/** + * t4_os_pci_write_cfg4 - 32-bit write to PCI config space + * @adapter: the adapter + * @addr: the register address + * @val: the value to write + * + * Write a 32-bit value into the given register in PCI config space. + */ +static inline void t4_os_pci_write_cfg4(struct adapter *adapter, size_t addr, + off_t val) +{ + u32 val32 = val; + + if (rte_eal_pci_write_config(adapter->pdev, &val32, sizeof(val32), + addr) < 0) + dev_err(adapter, "Can't write to PCI config space\n"); +} + +/** + * t4_os_pci_read_cfg4 - read a 32-bit value from PCI config space + * @adapter: the adapter + * @addr: the register address + * @val: where to store the value read + * + * Read a 32-bit value from the given register in PCI config space. + */ +static inline void t4_os_pci_read_cfg4(struct adapter *adapter, size_t addr, + u32 *val) +{ + if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val), + addr) < 0) + dev_err(adapter, "Can't read from PCI config space\n"); +} + +/** + * t4_os_pci_write_cfg2 - 16-bit write to PCI config space + * @adapter: the adapter + * @addr: the register address + * @val: the value to write + * + * Write a 16-bit value into the given register in PCI config space. + */ +static inline void t4_os_pci_write_cfg2(struct adapter *adapter, size_t addr, + off_t val) +{ + u16 val16 = val; + + if (rte_eal_pci_write_config(adapter->pdev, &val16, sizeof(val16), + addr) < 0) + dev_err(adapter, "Can't write to PCI config space\n"); +} + +/** + * t4_os_pci_read_cfg2 - read a 16-bit value from PCI config space + * @adapter: the adapter + * @addr: the register address + * @val: where to store the value read + * + * Read a 16-bit value from the given register in PCI config space. + */ +static inline void t4_os_pci_read_cfg2(struct adapter *adapter, size_t addr, + u16 *val) +{ + if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val), + addr) < 0) + dev_err(adapter, "Can't read from PCI config space\n"); +} + +/** + * t4_os_pci_read_cfg - read a 8-bit value from PCI config space + * @adapter: the adapter + * @addr: the register address + * @val: where to store the value read + * + * Read a 8-bit value from the given register in PCI config space. + */ +static inline void t4_os_pci_read_cfg(struct adapter *adapter, size_t addr, + u8 *val) +{ + if (rte_eal_pci_read_config(adapter->pdev, val, sizeof(*val), + addr) < 0) + dev_err(adapter, "Can't read from PCI config space\n"); +} + +/** + * t4_os_find_pci_capability - lookup a capability in the PCI capability list + * @adapter: the adapter + * @cap: the capability + * + * Return the address of the given capability within the PCI capability list. + */ +static inline int t4_os_find_pci_capability(struct adapter *adapter, int cap) +{ + u16 status; + int ttl = 48; + u8 pos = 0; + u8 id = 0; + + t4_os_pci_read_cfg2(adapter, PCI_STATUS, &status); + if (!(status & PCI_STATUS_CAP_LIST)) { + dev_err(adapter, "PCIe capability reading failed\n"); + return -1; + } + + t4_os_pci_read_cfg(adapter, PCI_CAPABILITY_LIST, &pos); + while (ttl-- && pos >= 0x40) { + pos &= ~3; + t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_ID), &id); + + if (id == 0xff) + break; + + if (id == cap) + return (int)pos; + + t4_os_pci_read_cfg(adapter, (pos + PCI_CAP_LIST_NEXT), &pos); + } + return 0; +} + /** * t4_os_set_hw_addr - store a port's MAC address in SW * @adapter: the adapter diff --git a/drivers/net/cxgbe/base/common.h b/drivers/net/cxgbe/base/common.h index cf2e82dd..11f139c9 100644 --- a/drivers/net/cxgbe/base/common.h +++ b/drivers/net/cxgbe/base/common.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2014-2015 Chelsio Communications. + * Copyright(c) 2014-2016 Chelsio Communications. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -50,6 +50,10 @@ enum { }; enum { + T5_REGMAP_SIZE = (332 * 1024), +}; + +enum { MEMWIN0_APERTURE = 2048, MEMWIN0_BASE = 0x1b800, }; @@ -398,4 +402,9 @@ int t4_init_sge_params(struct adapter *adapter); int t4_init_tp_params(struct adapter *adap); int t4_filter_field_shift(const struct adapter *adap, unsigned int filter_sel); int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl); +unsigned int t4_get_regs_len(struct adapter *adap); +void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size); +int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data); +int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data); +int t4_seeprom_wp(struct adapter *adapter, int enable); #endif /* __CHELSIO_COMMON_H */ diff --git a/drivers/net/cxgbe/base/t4_hw.c b/drivers/net/cxgbe/base/t4_hw.c index 79af8067..7e79adf6 100644 --- a/drivers/net/cxgbe/base/t4_hw.c +++ b/drivers/net/cxgbe/base/t4_hw.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2014-2015 Chelsio Communications. + * Copyright(c) 2014-2016 Chelsio Communications. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -570,6 +570,1031 @@ int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, } /** + * t4_get_regs_len - return the size of the chips register set + * @adapter: the adapter + * + * Returns the size of the chip's BAR0 register space. + */ +unsigned int t4_get_regs_len(struct adapter *adapter) +{ + unsigned int chip_version = CHELSIO_CHIP_VERSION(adapter->params.chip); + + switch (chip_version) { + case CHELSIO_T5: + return T5_REGMAP_SIZE; + } + + dev_err(adapter, + "Unsupported chip version %d\n", chip_version); + return 0; +} + +/** + * t4_get_regs - read chip registers into provided buffer + * @adap: the adapter + * @buf: register buffer + * @buf_size: size (in bytes) of register buffer + * + * If the provided register buffer isn't large enough for the chip's + * full register range, the register dump will be truncated to the + * register buffer's size. + */ +void t4_get_regs(struct adapter *adap, void *buf, size_t buf_size) +{ + static const unsigned int t5_reg_ranges[] = { + 0x1008, 0x10c0, + 0x10cc, 0x10f8, + 0x1100, 0x1100, + 0x110c, 0x1148, + 0x1180, 0x1184, + 0x1190, 0x1194, + 0x11a0, 0x11a4, + 0x11b0, 0x11b4, + 0x11fc, 0x123c, + 0x1280, 0x173c, + 0x1800, 0x18fc, + 0x3000, 0x3028, + 0x3060, 0x30b0, + 0x30b8, 0x30d8, + 0x30e0, 0x30fc, + 0x3140, 0x357c, + 0x35a8, 0x35cc, + 0x35ec, 0x35ec, + 0x3600, 0x5624, + 0x56cc, 0x56ec, + 0x56f4, 0x5720, + 0x5728, 0x575c, + 0x580c, 0x5814, + 0x5890, 0x589c, + 0x58a4, 0x58ac, + 0x58b8, 0x58bc, + 0x5940, 0x59c8, + 0x59d0, 0x59dc, + 0x59fc, 0x5a18, + 0x5a60, 0x5a70, + 0x5a80, 0x5a9c, + 0x5b94, 0x5bfc, + 0x6000, 0x6020, + 0x6028, 0x6040, + 0x6058, 0x609c, + 0x60a8, 0x614c, + 0x7700, 0x7798, + 0x77c0, 0x78fc, + 0x7b00, 0x7b58, + 0x7b60, 0x7b84, + 0x7b8c, 0x7c54, + 0x7d00, 0x7d38, + 0x7d40, 0x7d80, + 0x7d8c, 0x7ddc, + 0x7de4, 0x7e04, + 0x7e10, 0x7e1c, + 0x7e24, 0x7e38, + 0x7e40, 0x7e44, + 0x7e4c, 0x7e78, + 0x7e80, 0x7edc, + 0x7ee8, 0x7efc, + 0x8dc0, 0x8de0, + 0x8df8, 0x8e04, + 0x8e10, 0x8e84, + 0x8ea0, 0x8f84, + 0x8fc0, 0x9058, + 0x9060, 0x9060, + 0x9068, 0x90f8, + 0x9400, 0x9408, + 0x9410, 0x9470, + 0x9600, 0x9600, + 0x9608, 0x9638, + 0x9640, 0x96f4, + 0x9800, 0x9808, + 0x9820, 0x983c, + 0x9850, 0x9864, + 0x9c00, 0x9c6c, + 0x9c80, 0x9cec, + 0x9d00, 0x9d6c, + 0x9d80, 0x9dec, + 0x9e00, 0x9e6c, + 0x9e80, 0x9eec, + 0x9f00, 0x9f6c, + 0x9f80, 0xa020, + 0xd004, 0xd004, + 0xd010, 0xd03c, + 0xdfc0, 0xdfe0, + 0xe000, 0x1106c, + 0x11074, 0x11088, + 0x1109c, 0x1117c, + 0x11190, 0x11204, + 0x19040, 0x1906c, + 0x19078, 0x19080, + 0x1908c, 0x190e8, + 0x190f0, 0x190f8, + 0x19100, 0x19110, + 0x19120, 0x19124, + 0x19150, 0x19194, + 0x1919c, 0x191b0, + 0x191d0, 0x191e8, + 0x19238, 0x19290, + 0x193f8, 0x19428, + 0x19430, 0x19444, + 0x1944c, 0x1946c, + 0x19474, 0x19474, + 0x19490, 0x194cc, + 0x194f0, 0x194f8, + 0x19c00, 0x19c08, + 0x19c10, 0x19c60, + 0x19c94, 0x19ce4, + 0x19cf0, 0x19d40, + 0x19d50, 0x19d94, + 0x19da0, 0x19de8, + 0x19df0, 0x19e10, + 0x19e50, 0x19e90, + 0x19ea0, 0x19f24, + 0x19f34, 0x19f34, + 0x19f40, 0x19f50, + 0x19f90, 0x19fb4, + 0x19fc4, 0x19fe4, + 0x1a000, 0x1a004, + 0x1a010, 0x1a06c, + 0x1a0b0, 0x1a0e4, + 0x1a0ec, 0x1a0f8, + 0x1a100, 0x1a108, + 0x1a114, 0x1a120, + 0x1a128, 0x1a130, + 0x1a138, 0x1a138, + 0x1a190, 0x1a1c4, + 0x1a1fc, 0x1a1fc, + 0x1e008, 0x1e00c, + 0x1e040, 0x1e044, + 0x1e04c, 0x1e04c, + 0x1e284, 0x1e290, + 0x1e2c0, 0x1e2c0, + 0x1e2e0, 0x1e2e0, + 0x1e300, 0x1e384, + 0x1e3c0, 0x1e3c8, + 0x1e408, 0x1e40c, + 0x1e440, 0x1e444, + 0x1e44c, 0x1e44c, + 0x1e684, 0x1e690, + 0x1e6c0, 0x1e6c0, + 0x1e6e0, 0x1e6e0, + 0x1e700, 0x1e784, + 0x1e7c0, 0x1e7c8, + 0x1e808, 0x1e80c, + 0x1e840, 0x1e844, + 0x1e84c, 0x1e84c, + 0x1ea84, 0x1ea90, + 0x1eac0, 0x1eac0, + 0x1eae0, 0x1eae0, + 0x1eb00, 0x1eb84, + 0x1ebc0, 0x1ebc8, + 0x1ec08, 0x1ec0c, + 0x1ec40, 0x1ec44, + 0x1ec4c, 0x1ec4c, + 0x1ee84, 0x1ee90, + 0x1eec0, 0x1eec0, + 0x1eee0, 0x1eee0, + 0x1ef00, 0x1ef84, + 0x1efc0, 0x1efc8, + 0x1f008, 0x1f00c, + 0x1f040, 0x1f044, + 0x1f04c, 0x1f04c, + 0x1f284, 0x1f290, + 0x1f2c0, 0x1f2c0, + 0x1f2e0, 0x1f2e0, + 0x1f300, 0x1f384, + 0x1f3c0, 0x1f3c8, + 0x1f408, 0x1f40c, + 0x1f440, 0x1f444, + 0x1f44c, 0x1f44c, + 0x1f684, 0x1f690, + 0x1f6c0, 0x1f6c0, + 0x1f6e0, 0x1f6e0, + 0x1f700, 0x1f784, + 0x1f7c0, 0x1f7c8, + 0x1f808, 0x1f80c, + 0x1f840, 0x1f844, + 0x1f84c, 0x1f84c, + 0x1fa84, 0x1fa90, + 0x1fac0, 0x1fac0, + 0x1fae0, 0x1fae0, + 0x1fb00, 0x1fb84, + 0x1fbc0, 0x1fbc8, + 0x1fc08, 0x1fc0c, + 0x1fc40, 0x1fc44, + 0x1fc4c, 0x1fc4c, + 0x1fe84, 0x1fe90, + 0x1fec0, 0x1fec0, + 0x1fee0, 0x1fee0, + 0x1ff00, 0x1ff84, + 0x1ffc0, 0x1ffc8, + 0x30000, 0x30030, + 0x30038, 0x30038, + 0x30040, 0x30040, + 0x30100, 0x30144, + 0x30190, 0x301a0, + 0x301a8, 0x301b8, + 0x301c4, 0x301c8, + 0x301d0, 0x301d0, + 0x30200, 0x30318, + 0x30400, 0x304b4, + 0x304c0, 0x3052c, + 0x30540, 0x3061c, + 0x30800, 0x30828, + 0x30834, 0x30834, + 0x308c0, 0x30908, + 0x30910, 0x309ac, + 0x30a00, 0x30a14, + 0x30a1c, 0x30a2c, + 0x30a44, 0x30a50, + 0x30a74, 0x30a74, + 0x30a7c, 0x30afc, + 0x30b08, 0x30c24, + 0x30d00, 0x30d00, + 0x30d08, 0x30d14, + 0x30d1c, 0x30d20, + 0x30d3c, 0x30d3c, + 0x30d48, 0x30d50, + 0x31200, 0x3120c, + 0x31220, 0x31220, + 0x31240, 0x31240, + 0x31600, 0x3160c, + 0x31a00, 0x31a1c, + 0x31e00, 0x31e20, + 0x31e38, 0x31e3c, + 0x31e80, 0x31e80, + 0x31e88, 0x31ea8, + 0x31eb0, 0x31eb4, + 0x31ec8, 0x31ed4, + 0x31fb8, 0x32004, + 0x32200, 0x32200, + 0x32208, 0x32240, + 0x32248, 0x32280, + 0x32288, 0x322c0, + 0x322c8, 0x322fc, + 0x32600, 0x32630, + 0x32a00, 0x32abc, + 0x32b00, 0x32b10, + 0x32b20, 0x32b30, + 0x32b40, 0x32b50, + 0x32b60, 0x32b70, + 0x33000, 0x33028, + 0x33030, 0x33048, + 0x33060, 0x33068, + 0x33070, 0x3309c, + 0x330f0, 0x33128, + 0x33130, 0x33148, + 0x33160, 0x33168, + 0x33170, 0x3319c, + 0x331f0, 0x33238, + 0x33240, 0x33240, + 0x33248, 0x33250, + 0x3325c, 0x33264, + 0x33270, 0x332b8, + 0x332c0, 0x332e4, + 0x332f8, 0x33338, + 0x33340, 0x33340, + 0x33348, 0x33350, + 0x3335c, 0x33364, + 0x33370, 0x333b8, + 0x333c0, 0x333e4, + 0x333f8, 0x33428, + 0x33430, 0x33448, + 0x33460, 0x33468, + 0x33470, 0x3349c, + 0x334f0, 0x33528, + 0x33530, 0x33548, + 0x33560, 0x33568, + 0x33570, 0x3359c, + 0x335f0, 0x33638, + 0x33640, 0x33640, + 0x33648, 0x33650, + 0x3365c, 0x33664, + 0x33670, 0x336b8, + 0x336c0, 0x336e4, + 0x336f8, 0x33738, + 0x33740, 0x33740, + 0x33748, 0x33750, + 0x3375c, 0x33764, + 0x33770, 0x337b8, + 0x337c0, 0x337e4, + 0x337f8, 0x337fc, + 0x33814, 0x33814, + 0x3382c, 0x3382c, + 0x33880, 0x3388c, + 0x338e8, 0x338ec, + 0x33900, 0x33928, + 0x33930, 0x33948, + 0x33960, 0x33968, + 0x33970, 0x3399c, + 0x339f0, 0x33a38, + 0x33a40, 0x33a40, + 0x33a48, 0x33a50, + 0x33a5c, 0x33a64, + 0x33a70, 0x33ab8, + 0x33ac0, 0x33ae4, + 0x33af8, 0x33b10, + 0x33b28, 0x33b28, + 0x33b3c, 0x33b50, + 0x33bf0, 0x33c10, + 0x33c28, 0x33c28, + 0x33c3c, 0x33c50, + 0x33cf0, 0x33cfc, + 0x34000, 0x34030, + 0x34038, 0x34038, + 0x34040, 0x34040, + 0x34100, 0x34144, + 0x34190, 0x341a0, + 0x341a8, 0x341b8, + 0x341c4, 0x341c8, + 0x341d0, 0x341d0, + 0x34200, 0x34318, + 0x34400, 0x344b4, + 0x344c0, 0x3452c, + 0x34540, 0x3461c, + 0x34800, 0x34828, + 0x34834, 0x34834, + 0x348c0, 0x34908, + 0x34910, 0x349ac, + 0x34a00, 0x34a14, + 0x34a1c, 0x34a2c, + 0x34a44, 0x34a50, + 0x34a74, 0x34a74, + 0x34a7c, 0x34afc, + 0x34b08, 0x34c24, + 0x34d00, 0x34d00, + 0x34d08, 0x34d14, + 0x34d1c, 0x34d20, + 0x34d3c, 0x34d3c, + 0x34d48, 0x34d50, + 0x35200, 0x3520c, + 0x35220, 0x35220, + 0x35240, 0x35240, + 0x35600, 0x3560c, + 0x35a00, 0x35a1c, + 0x35e00, 0x35e20, + 0x35e38, 0x35e3c, + 0x35e80, 0x35e80, + 0x35e88, 0x35ea8, + 0x35eb0, 0x35eb4, + 0x35ec8, 0x35ed4, + 0x35fb8, 0x36004, + 0x36200, 0x36200, + 0x36208, 0x36240, + 0x36248, 0x36280, + 0x36288, 0x362c0, + 0x362c8, 0x362fc, + 0x36600, 0x36630, + 0x36a00, 0x36abc, + 0x36b00, 0x36b10, + 0x36b20, 0x36b30, + 0x36b40, 0x36b50, + 0x36b60, 0x36b70, + 0x37000, 0x37028, + 0x37030, 0x37048, + 0x37060, 0x37068, + 0x37070, 0x3709c, + 0x370f0, 0x37128, + 0x37130, 0x37148, + 0x37160, 0x37168, + 0x37170, 0x3719c, + 0x371f0, 0x37238, + 0x37240, 0x37240, + 0x37248, 0x37250, + 0x3725c, 0x37264, + 0x37270, 0x372b8, + 0x372c0, 0x372e4, + 0x372f8, 0x37338, + 0x37340, 0x37340, + 0x37348, 0x37350, + 0x3735c, 0x37364, + 0x37370, 0x373b8, + 0x373c0, 0x373e4, + 0x373f8, 0x37428, + 0x37430, 0x37448, + 0x37460, 0x37468, + 0x37470, 0x3749c, + 0x374f0, 0x37528, + 0x37530, 0x37548, + 0x37560, 0x37568, + 0x37570, 0x3759c, + 0x375f0, 0x37638, + 0x37640, 0x37640, + 0x37648, 0x37650, + 0x3765c, 0x37664, + 0x37670, 0x376b8, + 0x376c0, 0x376e4, + 0x376f8, 0x37738, + 0x37740, 0x37740, + 0x37748, 0x37750, + 0x3775c, 0x37764, + 0x37770, 0x377b8, + 0x377c0, 0x377e4, + 0x377f8, 0x377fc, + 0x37814, 0x37814, + 0x3782c, 0x3782c, + 0x37880, 0x3788c, + 0x378e8, 0x378ec, + 0x37900, 0x37928, + 0x37930, 0x37948, + 0x37960, 0x37968, + 0x37970, 0x3799c, + 0x379f0, 0x37a38, + 0x37a40, 0x37a40, + 0x37a48, 0x37a50, + 0x37a5c, 0x37a64, + 0x37a70, 0x37ab8, + 0x37ac0, 0x37ae4, + 0x37af8, 0x37b10, + 0x37b28, 0x37b28, + 0x37b3c, 0x37b50, + 0x37bf0, 0x37c10, + 0x37c28, 0x37c28, + 0x37c3c, 0x37c50, + 0x37cf0, 0x37cfc, + 0x38000, 0x38030, + 0x38038, 0x38038, + 0x38040, 0x38040, + 0x38100, 0x38144, + 0x38190, 0x381a0, + 0x381a8, 0x381b8, + 0x381c4, 0x381c8, + 0x381d0, 0x381d0, + 0x38200, 0x38318, + 0x38400, 0x384b4, + 0x384c0, 0x3852c, + 0x38540, 0x3861c, + 0x38800, 0x38828, + 0x38834, 0x38834, + 0x388c0, 0x38908, + 0x38910, 0x389ac, + 0x38a00, 0x38a14, + 0x38a1c, 0x38a2c, + 0x38a44, 0x38a50, + 0x38a74, 0x38a74, + 0x38a7c, 0x38afc, + 0x38b08, 0x38c24, + 0x38d00, 0x38d00, + 0x38d08, 0x38d14, + 0x38d1c, 0x38d20, + 0x38d3c, 0x38d3c, + 0x38d48, 0x38d50, + 0x39200, 0x3920c, + 0x39220, 0x39220, + 0x39240, 0x39240, + 0x39600, 0x3960c, + 0x39a00, 0x39a1c, + 0x39e00, 0x39e20, + 0x39e38, 0x39e3c, + 0x39e80, 0x39e80, + 0x39e88, 0x39ea8, + 0x39eb0, 0x39eb4, + 0x39ec8, 0x39ed4, + 0x39fb8, 0x3a004, + 0x3a200, 0x3a200, + 0x3a208, 0x3a240, + 0x3a248, 0x3a280, + 0x3a288, 0x3a2c0, + 0x3a2c8, 0x3a2fc, + 0x3a600, 0x3a630, + 0x3aa00, 0x3aabc, + 0x3ab00, 0x3ab10, + 0x3ab20, 0x3ab30, + 0x3ab40, 0x3ab50, + 0x3ab60, 0x3ab70, + 0x3b000, 0x3b028, + 0x3b030, 0x3b048, + 0x3b060, 0x3b068, + 0x3b070, 0x3b09c, + 0x3b0f0, 0x3b128, + 0x3b130, 0x3b148, + 0x3b160, 0x3b168, + 0x3b170, 0x3b19c, + 0x3b1f0, 0x3b238, + 0x3b240, 0x3b240, + 0x3b248, 0x3b250, + 0x3b25c, 0x3b264, + 0x3b270, 0x3b2b8, + 0x3b2c0, 0x3b2e4, + 0x3b2f8, 0x3b338, + 0x3b340, 0x3b340, + 0x3b348, 0x3b350, + 0x3b35c, 0x3b364, + 0x3b370, 0x3b3b8, + 0x3b3c0, 0x3b3e4, + 0x3b3f8, 0x3b428, + 0x3b430, 0x3b448, + 0x3b460, 0x3b468, + 0x3b470, 0x3b49c, + 0x3b4f0, 0x3b528, + 0x3b530, 0x3b548, + 0x3b560, 0x3b568, + 0x3b570, 0x3b59c, + 0x3b5f0, 0x3b638, + 0x3b640, 0x3b640, + 0x3b648, 0x3b650, + 0x3b65c, 0x3b664, + 0x3b670, 0x3b6b8, + 0x3b6c0, 0x3b6e4, + 0x3b6f8, 0x3b738, + 0x3b740, 0x3b740, + 0x3b748, 0x3b750, + 0x3b75c, 0x3b764, + 0x3b770, 0x3b7b8, + 0x3b7c0, 0x3b7e4, + 0x3b7f8, 0x3b7fc, + 0x3b814, 0x3b814, + 0x3b82c, 0x3b82c, + 0x3b880, 0x3b88c, + 0x3b8e8, 0x3b8ec, + 0x3b900, 0x3b928, + 0x3b930, 0x3b948, + 0x3b960, 0x3b968, + 0x3b970, 0x3b99c, + 0x3b9f0, 0x3ba38, + 0x3ba40, 0x3ba40, + 0x3ba48, 0x3ba50, + 0x3ba5c, 0x3ba64, + 0x3ba70, 0x3bab8, + 0x3bac0, 0x3bae4, + 0x3baf8, 0x3bb10, + 0x3bb28, 0x3bb28, + 0x3bb3c, 0x3bb50, + 0x3bbf0, 0x3bc10, + 0x3bc28, 0x3bc28, + 0x3bc3c, 0x3bc50, + 0x3bcf0, 0x3bcfc, + 0x3c000, 0x3c030, + 0x3c038, 0x3c038, + 0x3c040, 0x3c040, + 0x3c100, 0x3c144, + 0x3c190, 0x3c1a0, + 0x3c1a8, 0x3c1b8, + 0x3c1c4, 0x3c1c8, + 0x3c1d0, 0x3c1d0, + 0x3c200, 0x3c318, + 0x3c400, 0x3c4b4, + 0x3c4c0, 0x3c52c, + 0x3c540, 0x3c61c, + 0x3c800, 0x3c828, + 0x3c834, 0x3c834, + 0x3c8c0, 0x3c908, + 0x3c910, 0x3c9ac, + 0x3ca00, 0x3ca14, + 0x3ca1c, 0x3ca2c, + 0x3ca44, 0x3ca50, + 0x3ca74, 0x3ca74, + 0x3ca7c, 0x3cafc, + 0x3cb08, 0x3cc24, + 0x3cd00, 0x3cd00, + 0x3cd08, 0x3cd14, + 0x3cd1c, 0x3cd20, + 0x3cd3c, 0x3cd3c, + 0x3cd48, 0x3cd50, + 0x3d200, 0x3d20c, + 0x3d220, 0x3d220, + 0x3d240, 0x3d240, + 0x3d600, 0x3d60c, + 0x3da00, 0x3da1c, + 0x3de00, 0x3de20, + 0x3de38, 0x3de3c, + 0x3de80, 0x3de80, + 0x3de88, 0x3dea8, + 0x3deb0, 0x3deb4, + 0x3dec8, 0x3ded4, + 0x3dfb8, 0x3e004, + 0x3e200, 0x3e200, + 0x3e208, 0x3e240, + 0x3e248, 0x3e280, + 0x3e288, 0x3e2c0, + 0x3e2c8, 0x3e2fc, + 0x3e600, 0x3e630, + 0x3ea00, 0x3eabc, + 0x3eb00, 0x3eb10, + 0x3eb20, 0x3eb30, + 0x3eb40, 0x3eb50, + 0x3eb60, 0x3eb70, + 0x3f000, 0x3f028, + 0x3f030, 0x3f048, + 0x3f060, 0x3f068, + 0x3f070, 0x3f09c, + 0x3f0f0, 0x3f128, + 0x3f130, 0x3f148, + 0x3f160, 0x3f168, + 0x3f170, 0x3f19c, + 0x3f1f0, 0x3f238, + 0x3f240, 0x3f240, + 0x3f248, 0x3f250, + 0x3f25c, 0x3f264, + 0x3f270, 0x3f2b8, + 0x3f2c0, 0x3f2e4, + 0x3f2f8, 0x3f338, + 0x3f340, 0x3f340, + 0x3f348, 0x3f350, + 0x3f35c, 0x3f364, + 0x3f370, 0x3f3b8, + 0x3f3c0, 0x3f3e4, + 0x3f3f8, 0x3f428, + 0x3f430, 0x3f448, + 0x3f460, 0x3f468, + 0x3f470, 0x3f49c, + 0x3f4f0, 0x3f528, + 0x3f530, 0x3f548, + 0x3f560, 0x3f568, + 0x3f570, 0x3f59c, + 0x3f5f0, 0x3f638, + 0x3f640, 0x3f640, + 0x3f648, 0x3f650, + 0x3f65c, 0x3f664, + 0x3f670, 0x3f6b8, + 0x3f6c0, 0x3f6e4, + 0x3f6f8, 0x3f738, + 0x3f740, 0x3f740, + 0x3f748, 0x3f750, + 0x3f75c, 0x3f764, + 0x3f770, 0x3f7b8, + 0x3f7c0, 0x3f7e4, + 0x3f7f8, 0x3f7fc, + 0x3f814, 0x3f814, + 0x3f82c, 0x3f82c, + 0x3f880, 0x3f88c, + 0x3f8e8, 0x3f8ec, + 0x3f900, 0x3f928, + 0x3f930, 0x3f948, + 0x3f960, 0x3f968, + 0x3f970, 0x3f99c, + 0x3f9f0, 0x3fa38, + 0x3fa40, 0x3fa40, + 0x3fa48, 0x3fa50, + 0x3fa5c, 0x3fa64, + 0x3fa70, 0x3fab8, + 0x3fac0, 0x3fae4, + 0x3faf8, 0x3fb10, + 0x3fb28, 0x3fb28, + 0x3fb3c, 0x3fb50, + 0x3fbf0, 0x3fc10, + 0x3fc28, 0x3fc28, + 0x3fc3c, 0x3fc50, + 0x3fcf0, 0x3fcfc, + 0x40000, 0x4000c, + 0x40040, 0x40050, + 0x40060, 0x40068, + 0x4007c, 0x4008c, + 0x40094, 0x400b0, + 0x400c0, 0x40144, + 0x40180, 0x4018c, + 0x40200, 0x40254, + 0x40260, 0x40264, + 0x40270, 0x40288, + 0x40290, 0x40298, + 0x402ac, 0x402c8, + 0x402d0, 0x402e0, + 0x402f0, 0x402f0, + 0x40300, 0x4033c, + 0x403f8, 0x403fc, + 0x41304, 0x413c4, + 0x41400, 0x4140c, + 0x41414, 0x4141c, + 0x41480, 0x414d0, + 0x44000, 0x44054, + 0x4405c, 0x44078, + 0x440c0, 0x44174, + 0x44180, 0x441ac, + 0x441b4, 0x441b8, + 0x441c0, 0x44254, + 0x4425c, 0x44278, + 0x442c0, 0x44374, + 0x44380, 0x443ac, + 0x443b4, 0x443b8, + 0x443c0, 0x44454, + 0x4445c, 0x44478, + 0x444c0, 0x44574, + 0x44580, 0x445ac, + 0x445b4, 0x445b8, + 0x445c0, 0x44654, + 0x4465c, 0x44678, + 0x446c0, 0x44774, + 0x44780, 0x447ac, + 0x447b4, 0x447b8, + 0x447c0, 0x44854, + 0x4485c, 0x44878, + 0x448c0, 0x44974, + 0x44980, 0x449ac, + 0x449b4, 0x449b8, + 0x449c0, 0x449fc, + 0x45000, 0x45004, + 0x45010, 0x45030, + 0x45040, 0x45060, + 0x45068, 0x45068, + 0x45080, 0x45084, + 0x450a0, 0x450b0, + 0x45200, 0x45204, + 0x45210, 0x45230, + 0x45240, 0x45260, + 0x45268, 0x45268, + 0x45280, 0x45284, + 0x452a0, 0x452b0, + 0x460c0, 0x460e4, + 0x47000, 0x4703c, + 0x47044, 0x4708c, + 0x47200, 0x47250, + 0x47400, 0x47408, + 0x47414, 0x47420, + 0x47600, 0x47618, + 0x47800, 0x47814, + 0x48000, 0x4800c, + 0x48040, 0x48050, + 0x48060, 0x48068, + 0x4807c, 0x4808c, + 0x48094, 0x480b0, + 0x480c0, 0x48144, + 0x48180, 0x4818c, + 0x48200, 0x48254, + 0x48260, 0x48264, + 0x48270, 0x48288, + 0x48290, 0x48298, + 0x482ac, 0x482c8, + 0x482d0, 0x482e0, + 0x482f0, 0x482f0, + 0x48300, 0x4833c, + 0x483f8, 0x483fc, + 0x49304, 0x493c4, + 0x49400, 0x4940c, + 0x49414, 0x4941c, + 0x49480, 0x494d0, + 0x4c000, 0x4c054, + 0x4c05c, 0x4c078, + 0x4c0c0, 0x4c174, + 0x4c180, 0x4c1ac, + 0x4c1b4, 0x4c1b8, + 0x4c1c0, 0x4c254, + 0x4c25c, 0x4c278, + 0x4c2c0, 0x4c374, + 0x4c380, 0x4c3ac, + 0x4c3b4, 0x4c3b8, + 0x4c3c0, 0x4c454, + 0x4c45c, 0x4c478, + 0x4c4c0, 0x4c574, + 0x4c580, 0x4c5ac, + 0x4c5b4, 0x4c5b8, + 0x4c5c0, 0x4c654, + 0x4c65c, 0x4c678, + 0x4c6c0, 0x4c774, + 0x4c780, 0x4c7ac, + 0x4c7b4, 0x4c7b8, + 0x4c7c0, 0x4c854, + 0x4c85c, 0x4c878, + 0x4c8c0, 0x4c974, + 0x4c980, 0x4c9ac, + 0x4c9b4, 0x4c9b8, + 0x4c9c0, 0x4c9fc, + 0x4d000, 0x4d004, + 0x4d010, 0x4d030, + 0x4d040, 0x4d060, + 0x4d068, 0x4d068, + 0x4d080, 0x4d084, + 0x4d0a0, 0x4d0b0, + 0x4d200, 0x4d204, + 0x4d210, 0x4d230, + 0x4d240, 0x4d260, + 0x4d268, 0x4d268, + 0x4d280, 0x4d284, + 0x4d2a0, 0x4d2b0, + 0x4e0c0, 0x4e0e4, + 0x4f000, 0x4f03c, + 0x4f044, 0x4f08c, + 0x4f200, 0x4f250, + 0x4f400, 0x4f408, + 0x4f414, 0x4f420, + 0x4f600, 0x4f618, + 0x4f800, 0x4f814, + 0x50000, 0x50084, + 0x50090, 0x500cc, + 0x50400, 0x50400, + 0x50800, 0x50884, + 0x50890, 0x508cc, + 0x50c00, 0x50c00, + 0x51000, 0x5101c, + 0x51300, 0x51308, + }; + + u32 *buf_end = (u32 *)((char *)buf + buf_size); + const unsigned int *reg_ranges; + int reg_ranges_size, range; + unsigned int chip_version = CHELSIO_CHIP_VERSION(adap->params.chip); + + /* Select the right set of register ranges to dump depending on the + * adapter chip type. + */ + switch (chip_version) { + case CHELSIO_T5: + reg_ranges = t5_reg_ranges; + reg_ranges_size = ARRAY_SIZE(t5_reg_ranges); + break; + + default: + dev_err(adap, + "Unsupported chip version %d\n", chip_version); + return; + } + + /* Clear the register buffer and insert the appropriate register + * values selected by the above register ranges. + */ + memset(buf, 0, buf_size); + for (range = 0; range < reg_ranges_size; range += 2) { + unsigned int reg = reg_ranges[range]; + unsigned int last_reg = reg_ranges[range + 1]; + u32 *bufp = (u32 *)((char *)buf + reg); + + /* Iterate across the register range filling in the register + * buffer but don't write past the end of the register buffer. + */ + while (reg <= last_reg && bufp < buf_end) { + *bufp++ = t4_read_reg(adap, reg); + reg += sizeof(u32); + } + } +} + +/* EEPROM reads take a few tens of us while writes can take a bit over 5 ms. */ +#define EEPROM_DELAY 10 /* 10us per poll spin */ +#define EEPROM_MAX_POLL 5000 /* x 5000 == 50ms */ + +#define EEPROM_STAT_ADDR 0x7bfc + +/** + * Small utility function to wait till any outstanding VPD Access is complete. + * We have a per-adapter state variable "VPD Busy" to indicate when we have a + * VPD Access in flight. This allows us to handle the problem of having a + * previous VPD Access time out and prevent an attempt to inject a new VPD + * Request before any in-flight VPD request has completed. + */ +static int t4_seeprom_wait(struct adapter *adapter) +{ + unsigned int base = adapter->params.pci.vpd_cap_addr; + int max_poll; + + /* If no VPD Access is in flight, we can just return success right + * away. + */ + if (!adapter->vpd_busy) + return 0; + + /* Poll the VPD Capability Address/Flag register waiting for it + * to indicate that the operation is complete. + */ + max_poll = EEPROM_MAX_POLL; + do { + u16 val; + + udelay(EEPROM_DELAY); + t4_os_pci_read_cfg2(adapter, base + PCI_VPD_ADDR, &val); + + /* If the operation is complete, mark the VPD as no longer + * busy and return success. + */ + if ((val & PCI_VPD_ADDR_F) == adapter->vpd_flag) { + adapter->vpd_busy = 0; + return 0; + } + } while (--max_poll); + + /* Failure! Note that we leave the VPD Busy status set in order to + * avoid pushing a new VPD Access request into the VPD Capability till + * the current operation eventually succeeds. It's a bug to issue a + * new request when an existing request is in flight and will result + * in corrupt hardware state. + */ + return -ETIMEDOUT; +} + +/** + * t4_seeprom_read - read a serial EEPROM location + * @adapter: adapter to read + * @addr: EEPROM virtual address + * @data: where to store the read data + * + * Read a 32-bit word from a location in serial EEPROM using the card's PCI + * VPD capability. Note that this function must be called with a virtual + * address. + */ +int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data) +{ + unsigned int base = adapter->params.pci.vpd_cap_addr; + int ret; + + /* VPD Accesses must alway be 4-byte aligned! + */ + if (addr >= EEPROMVSIZE || (addr & 3)) + return -EINVAL; + + /* Wait for any previous operation which may still be in flight to + * complete. + */ + ret = t4_seeprom_wait(adapter); + if (ret) { + dev_err(adapter, "VPD still busy from previous operation\n"); + return ret; + } + + /* Issue our new VPD Read request, mark the VPD as being busy and wait + * for our request to complete. If it doesn't complete, note the + * error and return it to our caller. Note that we do not reset the + * VPD Busy status! + */ + t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, (u16)addr); + adapter->vpd_busy = 1; + adapter->vpd_flag = PCI_VPD_ADDR_F; + ret = t4_seeprom_wait(adapter); + if (ret) { + dev_err(adapter, "VPD read of address %#x failed\n", addr); + return ret; + } + + /* Grab the returned data, swizzle it into our endianness and + * return success. + */ + t4_os_pci_read_cfg4(adapter, base + PCI_VPD_DATA, data); + *data = le32_to_cpu(*data); + return 0; +} + +/** + * t4_seeprom_write - write a serial EEPROM location + * @adapter: adapter to write + * @addr: virtual EEPROM address + * @data: value to write + * + * Write a 32-bit word to a location in serial EEPROM using the card's PCI + * VPD capability. Note that this function must be called with a virtual + * address. + */ +int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data) +{ + unsigned int base = adapter->params.pci.vpd_cap_addr; + int ret; + u32 stats_reg; + int max_poll; + + /* VPD Accesses must alway be 4-byte aligned! + */ + if (addr >= EEPROMVSIZE || (addr & 3)) + return -EINVAL; + + /* Wait for any previous operation which may still be in flight to + * complete. + */ + ret = t4_seeprom_wait(adapter); + if (ret) { + dev_err(adapter, "VPD still busy from previous operation\n"); + return ret; + } + + /* Issue our new VPD Read request, mark the VPD as being busy and wait + * for our request to complete. If it doesn't complete, note the + * error and return it to our caller. Note that we do not reset the + * VPD Busy status! + */ + t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, + cpu_to_le32(data)); + t4_os_pci_write_cfg2(adapter, base + PCI_VPD_ADDR, + (u16)addr | PCI_VPD_ADDR_F); + adapter->vpd_busy = 1; + adapter->vpd_flag = 0; + ret = t4_seeprom_wait(adapter); + if (ret) { + dev_err(adapter, "VPD write of address %#x failed\n", addr); + return ret; + } + + /* Reset PCI_VPD_DATA register after a transaction and wait for our + * request to complete. If it doesn't complete, return error. + */ + t4_os_pci_write_cfg4(adapter, base + PCI_VPD_DATA, 0); + max_poll = EEPROM_MAX_POLL; + do { + udelay(EEPROM_DELAY); + t4_seeprom_read(adapter, EEPROM_STAT_ADDR, &stats_reg); + } while ((stats_reg & 0x1) && --max_poll); + if (!max_poll) + return -ETIMEDOUT; + + /* Return success! */ + return 0; +} + +/** + * t4_seeprom_wp - enable/disable EEPROM write protection + * @adapter: the adapter + * @enable: whether to enable or disable write protection + * + * Enables or disables write protection on the serial EEPROM. + */ +int t4_seeprom_wp(struct adapter *adapter, int enable) +{ + return t4_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0); +} + +/** * t4_config_rss_range - configure a portion of the RSS mapping table * @adapter: the adapter * @mbox: mbox to use for the FW command @@ -2326,6 +3351,21 @@ int t4_get_flash_params(struct adapter *adapter) return 0; } +static void set_pcie_completion_timeout(struct adapter *adapter, + u8 range) +{ + u32 pcie_cap; + u16 val; + + pcie_cap = t4_os_find_pci_capability(adapter, PCI_CAP_ID_EXP); + if (pcie_cap) { + t4_os_pci_read_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, &val); + val &= 0xfff0; + val |= range; + t4_os_pci_write_cfg2(adapter, pcie_cap + PCI_EXP_DEVCTL2, val); + } +} + /** * t4_prep_adapter - prepare SW and HW for operation * @adapter: the adapter @@ -2369,6 +3409,9 @@ int t4_prep_adapter(struct adapter *adapter) return -EINVAL; } + adapter->params.pci.vpd_cap_addr = + t4_os_find_pci_capability(adapter, PCI_CAP_ID_VPD); + ret = t4_get_flash_params(adapter); if (ret < 0) return ret; @@ -2384,6 +3427,8 @@ int t4_prep_adapter(struct adapter *adapter) adapter->params.portvec = 1; adapter->params.vpd.cclk = 50000; + /* Set pci completion timeout value to 4 seconds. */ + set_pcie_completion_timeout(adapter, 0xd); return 0; } diff --git a/drivers/net/cxgbe/base/t4_hw.h b/drivers/net/cxgbe/base/t4_hw.h index bf623cf4..5e62c417 100644 --- a/drivers/net/cxgbe/base/t4_hw.h +++ b/drivers/net/cxgbe/base/t4_hw.h @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2014-2015 Chelsio Communications. + * Copyright(c) 2014-2016 Chelsio Communications. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -36,6 +36,9 @@ enum { NCHAN = 4, /* # of HW channels */ + EEPROMSIZE = 17408, /* Serial EEPROM physical size */ + EEPROMVSIZE = 32768, /* Serial EEPROM virtual address space size */ + EEPROMPFSIZE = 1024, /* EEPROM writable area size for PFn, n>0 */ NMTUS = 16, /* size of MTU table */ NCCTRL_WIN = 32, /* # of congestion control windows */ MBOX_LEN = 64, /* mailbox size in bytes */ diff --git a/drivers/net/cxgbe/cxgbe_ethdev.c b/drivers/net/cxgbe/cxgbe_ethdev.c index bb134e50..6c130ed2 100644 --- a/drivers/net/cxgbe/cxgbe_ethdev.c +++ b/drivers/net/cxgbe/cxgbe_ethdev.c @@ -656,7 +656,6 @@ static void cxgbe_dev_stats_get(struct rte_eth_dev *eth_dev, /* RX Stats */ eth_stats->ipackets = ps.rx_frames; eth_stats->ibytes = ps.rx_octets; - eth_stats->imcasts = ps.rx_mcast_frames; eth_stats->imissed = ps.rx_ovflow0 + ps.rx_ovflow1 + ps.rx_ovflow2 + ps.rx_ovflow3 + ps.rx_trunc0 + ps.rx_trunc1 + @@ -782,6 +781,168 @@ cxgbe_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) return NULL; } +static int cxgbe_get_eeprom_length(struct rte_eth_dev *dev) +{ + RTE_SET_USED(dev); + return EEPROMSIZE; +} + +/** + * eeprom_ptov - translate a physical EEPROM address to virtual + * @phys_addr: the physical EEPROM address + * @fn: the PCI function number + * @sz: size of function-specific area + * + * Translate a physical EEPROM address to virtual. The first 1K is + * accessed through virtual addresses starting at 31K, the rest is + * accessed through virtual addresses starting at 0. + * + * The mapping is as follows: + * [0..1K) -> [31K..32K) + * [1K..1K+A) -> [31K-A..31K) + * [1K+A..ES) -> [0..ES-A-1K) + * + * where A = @fn * @sz, and ES = EEPROM size. + */ +static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz) +{ + fn *= sz; + if (phys_addr < 1024) + return phys_addr + (31 << 10); + if (phys_addr < 1024 + fn) + return fn + phys_addr - 1024; + if (phys_addr < EEPROMSIZE) + return phys_addr - 1024 - fn; + if (phys_addr < EEPROMVSIZE) + return phys_addr - 1024; + return -EINVAL; +} + +/* The next two routines implement eeprom read/write from physical addresses. + */ +static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v) +{ + int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); + + if (vaddr >= 0) + vaddr = t4_seeprom_read(adap, vaddr, v); + return vaddr < 0 ? vaddr : 0; +} + +static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v) +{ + int vaddr = eeprom_ptov(phys_addr, adap->pf, EEPROMPFSIZE); + + if (vaddr >= 0) + vaddr = t4_seeprom_write(adap, vaddr, v); + return vaddr < 0 ? vaddr : 0; +} + +#define EEPROM_MAGIC 0x38E2F10C + +static int cxgbe_get_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *e) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + u32 i, err = 0; + u8 *buf = rte_zmalloc(NULL, EEPROMSIZE, 0); + + if (!buf) + return -ENOMEM; + + e->magic = EEPROM_MAGIC; + for (i = e->offset & ~3; !err && i < e->offset + e->length; i += 4) + err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]); + + if (!err) + rte_memcpy(e->data, buf + e->offset, e->length); + rte_free(buf); + return err; +} + +static int cxgbe_set_eeprom(struct rte_eth_dev *dev, + struct rte_dev_eeprom_info *eeprom) +{ + struct port_info *pi = (struct port_info *)(dev->data->dev_private); + struct adapter *adapter = pi->adapter; + u8 *buf; + int err = 0; + u32 aligned_offset, aligned_len, *p; + + if (eeprom->magic != EEPROM_MAGIC) + return -EINVAL; + + aligned_offset = eeprom->offset & ~3; + aligned_len = (eeprom->length + (eeprom->offset & 3) + 3) & ~3; + + if (adapter->pf > 0) { + u32 start = 1024 + adapter->pf * EEPROMPFSIZE; + + if (aligned_offset < start || + aligned_offset + aligned_len > start + EEPROMPFSIZE) + return -EPERM; + } + + if (aligned_offset != eeprom->offset || aligned_len != eeprom->length) { + /* RMW possibly needed for first or last words. + */ + buf = rte_zmalloc(NULL, aligned_len, 0); + if (!buf) + return -ENOMEM; + err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf); + if (!err && aligned_len > 4) + err = eeprom_rd_phys(adapter, + aligned_offset + aligned_len - 4, + (u32 *)&buf[aligned_len - 4]); + if (err) + goto out; + rte_memcpy(buf + (eeprom->offset & 3), eeprom->data, + eeprom->length); + } else { + buf = eeprom->data; + } + + err = t4_seeprom_wp(adapter, false); + if (err) + goto out; + + for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) { + err = eeprom_wr_phys(adapter, aligned_offset, *p); + aligned_offset += 4; + } + + if (!err) + err = t4_seeprom_wp(adapter, true); +out: + if (buf != eeprom->data) + rte_free(buf); + return err; +} + +static int cxgbe_get_regs_len(struct rte_eth_dev *eth_dev) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + + return t4_get_regs_len(adapter) / sizeof(uint32_t); +} + +static int cxgbe_get_regs(struct rte_eth_dev *eth_dev, + struct rte_dev_reg_info *regs) +{ + struct port_info *pi = (struct port_info *)(eth_dev->data->dev_private); + struct adapter *adapter = pi->adapter; + + regs->length = cxgbe_get_regs_len(eth_dev); + regs->version = CHELSIO_CHIP_VERSION(adapter->params.chip) | + (CHELSIO_CHIP_RELEASE(adapter->params.chip) << 10) | + (1 << 16); + t4_get_regs(adapter, regs->data, (regs->length * sizeof(uint32_t))); + + return 0; +} + static const struct eth_dev_ops cxgbe_eth_dev_ops = { .dev_start = cxgbe_dev_start, .dev_stop = cxgbe_dev_stop, @@ -807,6 +968,11 @@ static const struct eth_dev_ops cxgbe_eth_dev_ops = { .stats_reset = cxgbe_dev_stats_reset, .flow_ctrl_get = cxgbe_flow_ctrl_get, .flow_ctrl_set = cxgbe_flow_ctrl_set, + .get_eeprom_length = cxgbe_get_eeprom_length, + .get_eeprom = cxgbe_get_eeprom, + .set_eeprom = cxgbe_set_eeprom, + .get_reg_length = cxgbe_get_regs_len, + .get_reg = cxgbe_get_regs, }; /* diff --git a/drivers/net/e1000/Makefile b/drivers/net/e1000/Makefile index f4879e67..57a60f0f 100644 --- a/drivers/net/e1000/Makefile +++ b/drivers/net/e1000/Makefile @@ -43,21 +43,23 @@ EXPORT_MAP := rte_pmd_e1000_version.map LIBABIVER := 1 -ifeq ($(CC), icc) +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # CFLAGS_BASE_DRIVER = -wd177 -wd181 -wd188 -wd869 -wd2259 else # -# CFLAGS for gcc +# CFLAGS for gcc/clang # CFLAGS_BASE_DRIVER = -Wno-uninitialized -Wno-unused-parameter CFLAGS_BASE_DRIVER += -Wno-unused-variable +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1) CFLAGS_BASE_DRIVER += -Wno-misleading-indentation endif endif +endif # # Add extra flags for base driver files (also known as shared code) diff --git a/drivers/net/e1000/base/e1000_phy.c b/drivers/net/e1000/base/e1000_phy.c index d43b7ce0..33f478b1 100644 --- a/drivers/net/e1000/base/e1000_phy.c +++ b/drivers/net/e1000/base/e1000_phy.c @@ -4153,12 +4153,13 @@ s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data) *data = E1000_READ_REG(hw, E1000_MPHY_DATA); /* Disable access to mPHY if it was originally disabled */ - if (locked) + if (locked) { ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, E1000_MPHY_DIS_ACCESS); + } return E1000_SUCCESS; } @@ -4218,12 +4219,13 @@ s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data, E1000_WRITE_REG(hw, E1000_MPHY_DATA, data); /* Disable access to mPHY if it was originally disabled */ - if (locked) + if (locked) { ready = e1000_is_mphy_ready(hw); if (!ready) return -E1000_ERR_PHY; E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, E1000_MPHY_DIS_ACCESS); + } return E1000_SUCCESS; } diff --git a/drivers/net/e1000/em_rxtx.c b/drivers/net/e1000/em_rxtx.c index 441ccad8..6d8750a8 100644 --- a/drivers/net/e1000/em_rxtx.c +++ b/drivers/net/e1000/em_rxtx.c @@ -78,16 +78,6 @@ #define E1000_RXDCTL_GRAN 0x01000000 /* RXDCTL Granularity */ -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} - /** * Structure associated with each descriptor of the RX ring of a RX queue. */ @@ -639,7 +629,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status) uint64_t pkt_flags; /* Check if VLAN present */ - pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0); + pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? + PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0); return pkt_flags; } @@ -729,7 +720,7 @@ eth_em_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) status, (unsigned) rte_le_to_cpu_16(rxd.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", @@ -909,7 +900,7 @@ eth_em_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) status, (unsigned) rte_le_to_cpu_16(rxd.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -1561,7 +1552,7 @@ em_alloc_rx_queue_mbufs(struct em_rx_queue *rxq) /* Initialize software ring entries */ for (i = 0; i < rxq->nb_rx_desc; i++) { volatile struct e1000_rx_desc *rxd; - struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed " diff --git a/drivers/net/e1000/igb_ethdev.c b/drivers/net/e1000/igb_ethdev.c index e0053fec..5067d208 100644 --- a/drivers/net/e1000/igb_ethdev.c +++ b/drivers/net/e1000/igb_ethdev.c @@ -86,9 +86,24 @@ #define E1000_INCVALUE_82576 (16 << IGB_82576_TSYNC_SHIFT) #define E1000_TSAUXC_DISABLE_SYSTIME 0x80000000 +#define E1000_VTIVAR_MISC 0x01740 +#define E1000_VTIVAR_MISC_MASK 0xFF +#define E1000_VTIVAR_VALID 0x80 +#define E1000_VTIVAR_MISC_MAILBOX 0 +#define E1000_VTIVAR_MISC_INTR_MASK 0x3 + +/* External VLAN Enable bit mask */ +#define E1000_CTRL_EXT_EXT_VLAN (1 << 26) + +/* External VLAN Ether Type bit mask and shift */ +#define E1000_VET_VET_EXT 0xFFFF0000 +#define E1000_VET_VET_EXT_SHIFT 16 + static int eth_igb_configure(struct rte_eth_dev *dev); static int eth_igb_start(struct rte_eth_dev *dev); static void eth_igb_stop(struct rte_eth_dev *dev); +static int eth_igb_dev_set_link_up(struct rte_eth_dev *dev); +static int eth_igb_dev_set_link_down(struct rte_eth_dev *dev); static void eth_igb_close(struct rte_eth_dev *dev); static void eth_igb_promiscuous_enable(struct rte_eth_dev *dev); static void eth_igb_promiscuous_disable(struct rte_eth_dev *dev); @@ -99,7 +114,10 @@ static int eth_igb_link_update(struct rte_eth_dev *dev, static void eth_igb_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); static int eth_igb_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); +static int eth_igb_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit); static void eth_igb_stats_reset(struct rte_eth_dev *dev); static void eth_igb_xstats_reset(struct rte_eth_dev *dev); static void eth_igb_infos_get(struct rte_eth_dev *dev, @@ -164,7 +182,10 @@ static int eth_igbvf_link_update(struct e1000_hw *hw); static void eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats); static int eth_igbvf_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); +static int eth_igbvf_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit); static void eth_igbvf_stats_reset(struct rte_eth_dev *dev); static int igbvf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); @@ -259,6 +280,9 @@ static void eth_igb_assign_msix_vector(struct e1000_hw *hw, int8_t direction, static void eth_igb_write_ivar(struct e1000_hw *hw, uint8_t msix_vector, uint8_t index, uint8_t offset); static void eth_igb_configure_msix_intr(struct rte_eth_dev *dev); +static void eth_igbvf_interrupt_handler(struct rte_intr_handle *handle, + void *param); +static void igbvf_mbx_process(struct rte_eth_dev *dev); /* * Define VF Stats MACRO for Non "cleared on read" register @@ -316,6 +340,8 @@ static const struct eth_dev_ops eth_igb_ops = { .dev_configure = eth_igb_configure, .dev_start = eth_igb_start, .dev_stop = eth_igb_stop, + .dev_set_link_up = eth_igb_dev_set_link_up, + .dev_set_link_down = eth_igb_dev_set_link_down, .dev_close = eth_igb_close, .promiscuous_enable = eth_igb_promiscuous_enable, .promiscuous_disable = eth_igb_promiscuous_disable, @@ -324,6 +350,7 @@ static const struct eth_dev_ops eth_igb_ops = { .link_update = eth_igb_link_update, .stats_get = eth_igb_stats_get, .xstats_get = eth_igb_xstats_get, + .xstats_get_names = eth_igb_xstats_get_names, .stats_reset = eth_igb_stats_reset, .xstats_reset = eth_igb_xstats_reset, .dev_infos_get = eth_igb_infos_get, @@ -385,6 +412,7 @@ static const struct eth_dev_ops igbvf_eth_dev_ops = { .link_update = eth_igb_link_update, .stats_get = eth_igbvf_stats_get, .xstats_get = eth_igbvf_xstats_get, + .xstats_get_names = eth_igbvf_xstats_get_names, .stats_reset = eth_igbvf_stats_reset, .xstats_reset = eth_igbvf_stats_reset, .vlan_filter_set = igbvf_vlan_filter_set, @@ -554,6 +582,41 @@ igb_intr_disable(struct e1000_hw *hw) E1000_WRITE_FLUSH(hw); } +static inline void +igbvf_intr_enable(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* only for mailbox */ + E1000_WRITE_REG(hw, E1000_EIAM, 1 << E1000_VTIVAR_MISC_MAILBOX); + E1000_WRITE_REG(hw, E1000_EIAC, 1 << E1000_VTIVAR_MISC_MAILBOX); + E1000_WRITE_REG(hw, E1000_EIMS, 1 << E1000_VTIVAR_MISC_MAILBOX); + E1000_WRITE_FLUSH(hw); +} + +/* only for mailbox now. If RX/TX needed, should extend this function. */ +static void +igbvf_set_ivar_map(struct e1000_hw *hw, uint8_t msix_vector) +{ + uint32_t tmp = 0; + + /* mailbox */ + tmp |= (msix_vector & E1000_VTIVAR_MISC_INTR_MASK); + tmp |= E1000_VTIVAR_VALID; + E1000_WRITE_REG(hw, E1000_VTIVAR_MISC, tmp); +} + +static void +eth_igbvf_configure_msix_intr(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + /* Configure VF other cause ivar */ + igbvf_set_ivar_map(hw, E1000_VTIVAR_MISC_MAILBOX); +} + static inline int32_t igb_pf_reset_hw(struct e1000_hw *hw) { @@ -942,6 +1005,10 @@ eth_igbvf_dev_init(struct rte_eth_dev *eth_dev) eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, "igb_mac_82576_vf"); + rte_intr_callback_register(&pci_dev->intr_handle, + eth_igbvf_interrupt_handler, + (void *)eth_dev); + return 0; } @@ -950,6 +1017,7 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) { struct e1000_adapter *adapter = E1000_DEV_PRIVATE(eth_dev->data->dev_private); + struct rte_pci_device *pci_dev = eth_dev->pci_dev; PMD_INIT_FUNC_TRACE(); @@ -966,6 +1034,12 @@ eth_igbvf_dev_uninit(struct rte_eth_dev *eth_dev) rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; + /* disable uio intr before callback unregister */ + rte_intr_disable(&pci_dev->intr_handle); + rte_intr_callback_unregister(&pci_dev->intr_handle, + eth_igbvf_interrupt_handler, + (void *)eth_dev); + return 0; } @@ -1143,7 +1217,7 @@ eth_igb_start(struct rte_eth_dev *dev) rte_intr_disable(intr_handle); /* Power up the phy. Needed to make the link go Up */ - e1000_power_up_phy(hw); + eth_igb_dev_set_link_up(dev); /* * Packet Buffer Allocation (PBA) @@ -1349,10 +1423,7 @@ eth_igb_stop(struct rte_eth_dev *dev) } /* Power down the phy. Needed to make the link go Down */ - if (hw->phy.media_type == e1000_media_type_copper) - e1000_power_down_phy(hw); - else - e1000_shutdown_fiber_serdes_link(hw); + eth_igb_dev_set_link_down(dev); igb_dev_clear_queues(dev); @@ -1399,6 +1470,32 @@ eth_igb_stop(struct rte_eth_dev *dev) } } +static int +eth_igb_dev_set_link_up(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->phy.media_type == e1000_media_type_copper) + e1000_power_up_phy(hw); + else + e1000_power_up_fiber_serdes_link(hw); + + return 0; +} + +static int +eth_igb_dev_set_link_down(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + if (hw->phy.media_type == e1000_media_type_copper) + e1000_power_down_phy(hw); + else + e1000_shutdown_fiber_serdes_link(hw); + + return 0; +} + static void eth_igb_close(struct rte_eth_dev *dev) { @@ -1691,8 +1788,27 @@ eth_igb_xstats_reset(struct rte_eth_dev *dev) memset(stats, 0, sizeof(*stats)); } +static int eth_igb_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned i; + + if (xstats_names == NULL) + return IGB_NB_XSTATS; + + /* Note: limit checked in rte_eth_xstats_names() */ + + for (i = 0; i < IGB_NB_XSTATS; i++) { + snprintf(xstats_names[i].name, sizeof(xstats_names[i].name), + "%s", rte_igb_stats_strings[i].name); + } + + return IGB_NB_XSTATS; +} + static int -eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1713,8 +1829,7 @@ eth_igb_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Extended stats */ for (i = 0; i < IGB_NB_XSTATS; i++) { - snprintf(xstats[i].name, sizeof(xstats[i].name), - "%s", rte_igb_stats_strings[i].name); + xstats[i].id = i; xstats[i].value = *(uint64_t *)(((char *)hw_stats) + rte_igb_stats_strings[i].offset); } @@ -1762,8 +1877,23 @@ igbvf_read_stats_registers(struct e1000_hw *hw, struct e1000_vf_stats *hw_stats) hw_stats->last_gotlbc, hw_stats->gotlbc); } +static int eth_igbvf_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned i; + + if (xstats_names != NULL) + for (i = 0; i < IGBVF_NB_XSTATS; i++) { + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), "%s", + rte_igbvf_stats_strings[i].name); + } + return IGBVF_NB_XSTATS; +} + static int -eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); @@ -1780,8 +1910,7 @@ eth_igbvf_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, return 0; for (i = 0; i < IGBVF_NB_XSTATS; i++) { - snprintf(xstats[i].name, sizeof(xstats[i].name), "%s", - rte_igbvf_stats_strings[i].name); + xstats[i].id = i; xstats[i].value = *(uint64_t *)(((char *)hw_stats) + rte_igbvf_stats_strings[i].offset); } @@ -1805,11 +1934,6 @@ eth_igbvf_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *rte_stats) rte_stats->ibytes = hw_stats->gorc; rte_stats->opackets = hw_stats->gptc; rte_stats->obytes = hw_stats->gotc; - rte_stats->imcasts = hw_stats->mprc; - rte_stats->ilbpackets = hw_stats->gprlbc; - rte_stats->ilbbytes = hw_stats->gorlbc; - rte_stats->olbpackets = hw_stats->gptlbc; - rte_stats->olbbytes = hw_stats->gotlbc; } static void @@ -2242,21 +2366,25 @@ eth_igb_vlan_tpid_set(struct rte_eth_dev *dev, { struct e1000_hw *hw = E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); - uint32_t reg = ETHER_TYPE_VLAN; - int ret = 0; + uint32_t reg, qinq; - switch (vlan_type) { - case ETH_VLAN_TYPE_INNER: - reg |= (tpid << 16); + qinq = E1000_READ_REG(hw, E1000_CTRL_EXT); + qinq &= E1000_CTRL_EXT_EXT_VLAN; + + /* only outer TPID of double VLAN can be configured*/ + if (qinq && vlan_type == ETH_VLAN_TYPE_OUTER) { + reg = E1000_READ_REG(hw, E1000_VET); + reg = (reg & (~E1000_VET_VET_EXT)) | + ((uint32_t)tpid << E1000_VET_VET_EXT_SHIFT); E1000_WRITE_REG(hw, E1000_VET, reg); - break; - default: - ret = -EINVAL; - PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type); - break; + + return 0; } - return ret; + /* all other TPID values are read-only*/ + PMD_DRV_LOG(ERR, "Not supported"); + + return -ENOTSUP; } static void @@ -2569,6 +2697,69 @@ eth_igb_interrupt_handler(__rte_unused struct rte_intr_handle *handle, } static int +eth_igbvf_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t eicr; + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + igbvf_intr_disable(hw); + + /* read-on-clear nic registers here */ + eicr = E1000_READ_REG(hw, E1000_EICR); + intr->flags = 0; + + if (eicr == E1000_VTIVAR_MISC_MAILBOX) + intr->flags |= E1000_FLAG_MAILBOX; + + return 0; +} + +void igbvf_mbx_process(struct rte_eth_dev *dev) +{ + struct e1000_hw *hw = + E1000_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct e1000_mbx_info *mbx = &hw->mbx; + u32 in_msg = 0; + + if (mbx->ops.read(hw, &in_msg, 1, 0)) + return; + + /* PF reset VF event */ + if (in_msg == E1000_PF_CONTROL_MSG) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET); +} + +static int +eth_igbvf_interrupt_action(struct rte_eth_dev *dev) +{ + struct e1000_interrupt *intr = + E1000_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (intr->flags & E1000_FLAG_MAILBOX) { + igbvf_mbx_process(dev); + intr->flags &= ~E1000_FLAG_MAILBOX; + } + + igbvf_intr_enable(dev); + rte_intr_enable(&dev->pci_dev->intr_handle); + + return 0; +} + +static void +eth_igbvf_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + eth_igbvf_interrupt_get_status(dev); + eth_igbvf_interrupt_action(dev); +} + +static int eth_igb_led_on(struct rte_eth_dev *dev) { struct e1000_hw *hw; @@ -2839,6 +3030,8 @@ igbvf_dev_start(struct rte_eth_dev *dev) struct e1000_adapter *adapter = E1000_DEV_PRIVATE(dev->data->dev_private); int ret; + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + uint32_t intr_vector = 0; PMD_INIT_FUNC_TRACE(); @@ -2858,12 +3051,41 @@ igbvf_dev_start(struct rte_eth_dev *dev) return ret; } + /* check and configure queue intr-vector mapping */ + if (dev->data->dev_conf.intr_conf.rxq != 0) { + intr_vector = dev->data->nb_rx_queues; + ret = rte_intr_efd_enable(intr_handle, intr_vector); + if (ret) + return ret; + } + + if (rte_intr_dp_is_en(intr_handle) && !intr_handle->intr_vec) { + intr_handle->intr_vec = + rte_zmalloc("intr_vec", + dev->data->nb_rx_queues * sizeof(int), 0); + if (!intr_handle->intr_vec) { + PMD_INIT_LOG(ERR, "Failed to allocate %d rx_queues" + " intr_vec\n", dev->data->nb_rx_queues); + return -ENOMEM; + } + } + + eth_igbvf_configure_msix_intr(dev); + + /* enable uio/vfio intr/eventfd mapping */ + rte_intr_enable(intr_handle); + + /* resume enabled intr since hw reset */ + igbvf_intr_enable(dev); + return 0; } static void igbvf_dev_stop(struct rte_eth_dev *dev) { + struct rte_intr_handle *intr_handle = &dev->pci_dev->intr_handle; + PMD_INIT_FUNC_TRACE(); igbvf_stop_adapter(dev); @@ -2875,6 +3097,16 @@ igbvf_dev_stop(struct rte_eth_dev *dev) igbvf_set_vfta_all(dev,0); igb_dev_clear_queues(dev); + + /* disable intr eventfd mapping */ + rte_intr_disable(intr_handle); + + /* Clean datapath event and queue/vec mapping */ + rte_intr_efd_disable(intr_handle); + if (intr_handle->intr_vec) { + rte_free(intr_handle->intr_vec); + intr_handle->intr_vec = NULL; + } } static void diff --git a/drivers/net/e1000/igb_rxtx.c b/drivers/net/e1000/igb_rxtx.c index 4a987e3c..9d80a0b3 100644 --- a/drivers/net/e1000/igb_rxtx.c +++ b/drivers/net/e1000/igb_rxtx.c @@ -79,16 +79,6 @@ PKT_TX_L4_MASK | \ PKT_TX_TCP_SEG) -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} - /** * Structure associated with each descriptor of the RX ring of a RX queue. */ @@ -739,7 +729,8 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status) uint64_t pkt_flags; /* Check if VLAN present */ - pkt_flags = (rx_status & E1000_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0; + pkt_flags = ((rx_status & E1000_RXD_STAT_VP) ? + PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED : 0); #if defined(RTE_LIBRTE_IEEE1588) if (rx_status & E1000_RXD_STAT_TMST) @@ -838,7 +829,7 @@ eth_igb_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -1021,7 +1012,7 @@ eth_igb_recv_scattered_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -1957,7 +1948,7 @@ igb_alloc_rx_queue_mbufs(struct igb_rx_queue *rxq) /* Initialize software ring entries. */ for (i = 0; i < rxq->nb_rx_desc; i++) { volatile union e1000_adv_rx_desc *rxd; - struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed " diff --git a/drivers/net/ena/Makefile b/drivers/net/ena/Makefile index ac2b55dc..a0d3358d 100644 --- a/drivers/net/ena/Makefile +++ b/drivers/net/ena/Makefile @@ -54,7 +54,7 @@ SRCS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += ena_eth_com.c # this lib depends upon: DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_eal lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_mempool lib/librte_mbuf -DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_net lib/librte_malloc +DEPDIRS-$(CONFIG_RTE_LIBRTE_ENA_PMD) += lib/librte_net CFLAGS += $(INCLUDES) diff --git a/drivers/net/ena/base/ena_plat_dpdk.h b/drivers/net/ena/base/ena_plat_dpdk.h index aab2ac86..5f693301 100644 --- a/drivers/net/ena/base/ena_plat_dpdk.h +++ b/drivers/net/ena/base/ena_plat_dpdk.h @@ -93,14 +93,18 @@ typedef uint64_t dma_addr_t; #define ENA_GET_SYSTEM_USECS() \ (rte_get_timer_cycles() * US_PER_S / rte_get_timer_hz()) +#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG #define ENA_ASSERT(cond, format, arg...) \ do { \ if (unlikely(!(cond))) { \ - printf("Assertion failed on %s:%s:%d: " format, \ - __FILE__, __func__, __LINE__, ##arg); \ - rte_exit(EXIT_FAILURE, "ASSERTION FAILED\n"); \ + RTE_LOG(ERR, PMD, format, ##arg); \ + rte_panic("line %d\tassert \"" #cond "\"" \ + "failed\n", __LINE__); \ } \ } while (0) +#else +#define ENA_ASSERT(cond, format, arg...) do {} while (0) +#endif #define ENA_MAX32(x, y) RTE_MAX((x), (y)) #define ENA_MAX16(x, y) RTE_MAX((x), (y)) diff --git a/drivers/net/ena/ena_ethdev.c b/drivers/net/ena/ena_ethdev.c index 02af67a2..e157587b 100644 --- a/drivers/net/ena/ena_ethdev.c +++ b/drivers/net/ena/ena_ethdev.c @@ -605,7 +605,6 @@ static void ena_stats_restart(struct rte_eth_dev *dev) rte_atomic64_init(&adapter->drv_stats->ierrors); rte_atomic64_init(&adapter->drv_stats->oerrors); - rte_atomic64_init(&adapter->drv_stats->imcasts); rte_atomic64_init(&adapter->drv_stats->rx_nombuf); } @@ -643,7 +642,6 @@ static void ena_stats_get(struct rte_eth_dev *dev, /* Driver related stats */ stats->ierrors = rte_atomic64_read(&adapter->drv_stats->ierrors); stats->oerrors = rte_atomic64_read(&adapter->drv_stats->oerrors); - stats->imcasts = rte_atomic64_read(&adapter->drv_stats->imcasts); stats->rx_nombuf = rte_atomic64_read(&adapter->drv_stats->rx_nombuf); } diff --git a/drivers/net/ena/ena_ethdev.h b/drivers/net/ena/ena_ethdev.h index ba6f01e6..aca853c1 100644 --- a/drivers/net/ena/ena_ethdev.h +++ b/drivers/net/ena/ena_ethdev.h @@ -121,7 +121,6 @@ enum ena_adapter_state { struct ena_driver_stats { rte_atomic64_t ierrors; rte_atomic64_t oerrors; - rte_atomic64_t imcasts; rte_atomic64_t rx_nombuf; }; diff --git a/drivers/net/enic/Makefile b/drivers/net/enic/Makefile index f3162741..3926b795 100644 --- a/drivers/net/enic/Makefile +++ b/drivers/net/enic/Makefile @@ -53,7 +53,7 @@ VPATH += $(SRCDIR)/src # SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_main.c -SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rx.c +SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_rxtx.c SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_clsf.c SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += enic_res.c SRCS-$(CONFIG_RTE_LIBRTE_ENIC_PMD) += base/vnic_cq.c diff --git a/drivers/net/enic/base/enic_vnic_wq.h b/drivers/net/enic/base/enic_vnic_wq.h deleted file mode 100644 index b0191093..00000000 --- a/drivers/net/enic/base/enic_vnic_wq.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2008-2015 Cisco Systems, Inc. All rights reserved. - * Copyright 2007 Nuova Systems, Inc. All rights reserved. - * - * Copyright (c) 2015, Cisco Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -#ifndef _ENIC_VNIC_WQ_H_ -#define _ENIC_VNIC_WQ_H_ - -#include "vnic_dev.h" -#include "vnic_cq.h" - -static inline void enic_vnic_post_wq_index(struct vnic_wq *wq) -{ - struct vnic_wq_buf *buf = wq->to_use; - - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); - iowrite32(buf->index, &wq->ctrl->posted_index); -} - -static inline void enic_vnic_post_wq(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, - unsigned int len, int sop, - uint8_t desc_skip_cnt, uint8_t cq_entry, - uint8_t compressed_send, uint64_t wrid) -{ - struct vnic_wq_buf *buf = wq->to_use; - - buf->sop = sop; - buf->cq_entry = cq_entry; - buf->compressed_send = compressed_send; - buf->desc_skip_cnt = desc_skip_cnt; - buf->os_buf = os_buf; - buf->dma_addr = dma_addr; - buf->len = len; - buf->wr_id = wrid; - - buf = buf->next; - wq->ring.desc_avail -= desc_skip_cnt; - wq->to_use = buf; - - if (cq_entry) - enic_vnic_post_wq_index(wq); -} - -#endif /* _ENIC_VNIC_WQ_H_ */ diff --git a/drivers/net/enic/base/rq_enet_desc.h b/drivers/net/enic/base/rq_enet_desc.h index 7292d9dc..13e24b43 100644 --- a/drivers/net/enic/base/rq_enet_desc.h +++ b/drivers/net/enic/base/rq_enet_desc.h @@ -55,7 +55,7 @@ enum rq_enet_type_types { #define RQ_ENET_TYPE_BITS 2 #define RQ_ENET_TYPE_MASK ((1 << RQ_ENET_TYPE_BITS) - 1) -static inline void rq_enet_desc_enc(struct rq_enet_desc *desc, +static inline void rq_enet_desc_enc(volatile struct rq_enet_desc *desc, u64 address, u8 type, u16 length) { desc->address = cpu_to_le64(address); diff --git a/drivers/net/enic/base/vnic_cq.h b/drivers/net/enic/base/vnic_cq.h index 922391b3..13ab87ca 100644 --- a/drivers/net/enic/base/vnic_cq.h +++ b/drivers/net/enic/base/vnic_cq.h @@ -90,50 +90,6 @@ struct vnic_cq { #endif }; -static inline unsigned int vnic_cq_service(struct vnic_cq *cq, - unsigned int work_to_do, - int (*q_service)(struct vnic_dev *vdev, struct cq_desc *cq_desc, - u8 type, u16 q_number, u16 completed_index, void *opaque), - void *opaque) -{ - struct cq_desc *cq_desc; - unsigned int work_done = 0; - u16 q_number, completed_index; - u8 type, color; - struct rte_mbuf **rx_pkts = opaque; - unsigned int ret; - - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); - cq_desc_dec(cq_desc, &type, &color, - &q_number, &completed_index); - - while (color != cq->last_color) { - if (opaque) - opaque = (void *)&(rx_pkts[work_done]); - - ret = (*q_service)(cq->vdev, cq_desc, type, - q_number, completed_index, opaque); - cq->to_clean++; - if (cq->to_clean == cq->ring.desc_count) { - cq->to_clean = 0; - cq->last_color = cq->last_color ? 0 : 1; - } - - cq_desc = (struct cq_desc *)((u8 *)cq->ring.descs + - cq->ring.desc_size * cq->to_clean); - cq_desc_dec(cq_desc, &type, &color, - &q_number, &completed_index); - - if (ret) - work_done++; - if (work_done >= work_to_do) - break; - } - - return work_done; -} - void vnic_cq_free(struct vnic_cq *cq); int vnic_cq_alloc(struct vnic_dev *vdev, struct vnic_cq *cq, unsigned int index, unsigned int socket_id, diff --git a/drivers/net/enic/base/vnic_dev.c b/drivers/net/enic/base/vnic_dev.c index e8a50287..fc2e4cc3 100644 --- a/drivers/net/enic/base/vnic_dev.c +++ b/drivers/net/enic/base/vnic_dev.c @@ -83,7 +83,7 @@ struct vnic_dev { struct vnic_intr_coal_timer_info intr_coal_timer_info; void *(*alloc_consistent)(void *priv, size_t size, dma_addr_t *dma_handle, u8 *name); - void (*free_consistent)(struct rte_pci_device *hwdev, + void (*free_consistent)(void *priv, size_t size, void *vaddr, dma_addr_t dma_handle); }; @@ -101,7 +101,7 @@ void *vnic_dev_priv(struct vnic_dev *vdev) void vnic_register_cbacks(struct vnic_dev *vdev, void *(*alloc_consistent)(void *priv, size_t size, dma_addr_t *dma_handle, u8 *name), - void (*free_consistent)(struct rte_pci_device *hwdev, + void (*free_consistent)(void *priv, size_t size, void *vaddr, dma_addr_t dma_handle)) { @@ -807,7 +807,7 @@ int vnic_dev_notify_unsetcmd(struct vnic_dev *vdev) int vnic_dev_notify_unset(struct vnic_dev *vdev) { if (vdev->notify && !vnic_dev_in_reset(vdev)) { - vdev->free_consistent(vdev->pdev, + vdev->free_consistent(vdev->priv, sizeof(struct vnic_devcmd_notify), vdev->notify, vdev->notify_pa); @@ -924,16 +924,16 @@ void vnic_dev_unregister(struct vnic_dev *vdev) { if (vdev) { if (vdev->notify) - vdev->free_consistent(vdev->pdev, + vdev->free_consistent(vdev->priv, sizeof(struct vnic_devcmd_notify), vdev->notify, vdev->notify_pa); if (vdev->stats) - vdev->free_consistent(vdev->pdev, + vdev->free_consistent(vdev->priv, sizeof(struct vnic_stats), vdev->stats, vdev->stats_pa); if (vdev->fw_info) - vdev->free_consistent(vdev->pdev, + vdev->free_consistent(vdev->priv, sizeof(struct vnic_devcmd_fw_info), vdev->fw_info, vdev->fw_info_pa); kfree(vdev); @@ -1041,7 +1041,7 @@ int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry, ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait); *entry = (u16)a0; - vdev->free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa); + vdev->free_consistent(vdev->priv, tlv_size, tlv_va, tlv_pa); } else if (cmd == CLSF_DEL) { a0 = *entry; ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait); diff --git a/drivers/net/enic/base/vnic_dev.h b/drivers/net/enic/base/vnic_dev.h index 113d6acc..689442f3 100644 --- a/drivers/net/enic/base/vnic_dev.h +++ b/drivers/net/enic/base/vnic_dev.h @@ -102,7 +102,7 @@ unsigned int vnic_dev_get_res_count(struct vnic_dev *vdev, void vnic_register_cbacks(struct vnic_dev *vdev, void *(*alloc_consistent)(void *priv, size_t size, dma_addr_t *dma_handle, u8 *name), - void (*free_consistent)(struct rte_pci_device *hwdev, + void (*free_consistent)(void *priv, size_t size, void *vaddr, dma_addr_t dma_handle)); void __iomem *vnic_dev_get_res(struct vnic_dev *vdev, enum vnic_res_type type, diff --git a/drivers/net/enic/base/vnic_enet.h b/drivers/net/enic/base/vnic_enet.h index cc34998f..50622479 100644 --- a/drivers/net/enic/base/vnic_enet.h +++ b/drivers/net/enic/base/vnic_enet.h @@ -35,6 +35,10 @@ #ifndef _VNIC_ENIC_H_ #define _VNIC_ENIC_H_ +/* Hardware intr coalesce timer is in units of 1.5us */ +#define INTR_COALESCE_USEC_TO_HW(usec) ((usec) * 2 / 3) +#define INTR_COALESCE_HW_TO_USEC(usec) ((usec) * 3 / 2) + /* Device-specific region: enet configuration */ struct vnic_enet_config { u32 flags; @@ -50,6 +54,12 @@ struct vnic_enet_config { u16 vf_rq_count; u16 num_arfs; u64 mem_paddr; + u16 rdma_qp_id; + u16 rdma_qp_count; + u16 rdma_resgrp; + u32 rdma_mr_id; + u32 rdma_mr_count; + u32 max_pkt_size; }; #define VENETF_TSO 0x1 /* TSO enabled */ @@ -64,9 +74,14 @@ struct vnic_enet_config { #define VENETF_RSSHASH_IPV6_EX 0x200 /* Hash on IPv6 extended fields */ #define VENETF_RSSHASH_TCPIPV6_EX 0x400 /* Hash on TCP + IPv6 ext. fields */ #define VENETF_LOOP 0x800 /* Loopback enabled */ -#define VENETF_VMQ 0x4000 /* using VMQ flag for VMware NETQ */ +#define VENETF_FAILOVER 0x1000 /* Fabric failover enabled */ +#define VENETF_USPACE_NIC 0x2000 /* vHPC enabled */ +#define VENETF_VMQ 0x4000 /* VMQ enabled */ +#define VENETF_ARFS 0x8000 /* ARFS enabled */ #define VENETF_VXLAN 0x10000 /* VxLAN offload */ #define VENETF_NVGRE 0x20000 /* NVGRE offload */ +#define VENETF_GRPINTR 0x40000 /* group interrupt */ + #define VENET_INTR_TYPE_MIN 0 /* Timer specs min interrupt spacing */ #define VENET_INTR_TYPE_IDLE 1 /* Timer specs idle time before irq */ diff --git a/drivers/net/enic/base/vnic_rq.c b/drivers/net/enic/base/vnic_rq.c index cb62c5e5..0e700a12 100644 --- a/drivers/net/enic/base/vnic_rq.c +++ b/drivers/net/enic/base/vnic_rq.c @@ -84,11 +84,12 @@ void vnic_rq_init_start(struct vnic_rq *rq, unsigned int cq_index, iowrite32(cq_index, &rq->ctrl->cq_index); iowrite32(error_interrupt_enable, &rq->ctrl->error_interrupt_enable); iowrite32(error_interrupt_offset, &rq->ctrl->error_interrupt_offset); - iowrite32(0, &rq->ctrl->dropped_packet_count); iowrite32(0, &rq->ctrl->error_status); iowrite32(fetch_index, &rq->ctrl->fetch_index); iowrite32(posted_index, &rq->ctrl->posted_index); - + if (rq->is_sop) + iowrite32(((rq->is_sop << 10) | rq->data_queue_idx), + &rq->ctrl->data_ring); } void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, @@ -96,6 +97,7 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, unsigned int error_interrupt_offset) { u32 fetch_index = 0; + /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); @@ -110,6 +112,8 @@ void vnic_rq_init(struct vnic_rq *rq, unsigned int cq_index, error_interrupt_offset); rq->rxst_idx = 0; rq->tot_pkts = 0; + rq->pkt_first_seg = NULL; + rq->pkt_last_seg = NULL; } void vnic_rq_error_out(struct vnic_rq *rq, unsigned int error) diff --git a/drivers/net/enic/base/vnic_rq.h b/drivers/net/enic/base/vnic_rq.h index e083ccc2..fd9e1704 100644 --- a/drivers/net/enic/base/vnic_rq.h +++ b/drivers/net/enic/base/vnic_rq.h @@ -60,10 +60,18 @@ struct vnic_rq_ctrl { u32 pad7; u32 error_status; /* 0x48 */ u32 pad8; - u32 dropped_packet_count; /* 0x50 */ + u32 tcp_sn; /* 0x50 */ u32 pad9; - u32 dropped_packet_count_rc; /* 0x58 */ + u32 unused; /* 0x58 */ u32 pad10; + u32 dca_select; /* 0x60 */ + u32 pad11; + u32 dca_value; /* 0x68 */ + u32 pad12; + u32 data_ring; /* 0x70 */ + u32 pad13; + u32 header_split; /* 0x78 */ + u32 pad14; }; struct vnic_rq { @@ -82,6 +90,12 @@ struct vnic_rq { struct rte_mempool *mp; uint16_t rxst_idx; uint32_t tot_pkts; + uint16_t data_queue_idx; + uint8_t is_sop; + uint8_t in_use; + struct rte_mbuf *pkt_first_seg; + struct rte_mbuf *pkt_last_seg; + unsigned int max_mbufs_per_pkt; }; static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq) diff --git a/drivers/net/enic/base/vnic_wq.c b/drivers/net/enic/base/vnic_wq.c index a3ef4170..9b9ff4d7 100644 --- a/drivers/net/enic/base/vnic_wq.c +++ b/drivers/net/enic/base/vnic_wq.c @@ -59,71 +59,30 @@ int vnic_wq_alloc_ring(struct vnic_dev *vdev, struct vnic_wq *wq, static int vnic_wq_alloc_bufs(struct vnic_wq *wq) { - struct vnic_wq_buf *buf; - unsigned int i, j, count = wq->ring.desc_count; - unsigned int blks = VNIC_WQ_BUF_BLKS_NEEDED(count); - - for (i = 0; i < blks; i++) { - wq->bufs[i] = kzalloc(VNIC_WQ_BUF_BLK_SZ(count), GFP_ATOMIC); - if (!wq->bufs[i]) - return -ENOMEM; - } - - for (i = 0; i < blks; i++) { - buf = wq->bufs[i]; - for (j = 0; j < VNIC_WQ_BUF_BLK_ENTRIES(count); j++) { - buf->index = i * VNIC_WQ_BUF_BLK_ENTRIES(count) + j; - buf->desc = (u8 *)wq->ring.descs + - wq->ring.desc_size * buf->index; - if (buf->index + 1 == count) { - buf->next = wq->bufs[0]; - break; - } else if (j + 1 == VNIC_WQ_BUF_BLK_ENTRIES(count)) { - buf->next = wq->bufs[i + 1]; - } else { - buf->next = buf + 1; - buf++; - } - } - } - - wq->to_use = wq->to_clean = wq->bufs[0]; - + unsigned int count = wq->ring.desc_count; + /* Allocate the mbuf ring */ + wq->bufs = (struct vnic_wq_buf *)rte_zmalloc_socket("wq->bufs", + sizeof(struct vnic_wq_buf) * count, + RTE_CACHE_LINE_SIZE, wq->socket_id); + wq->head_idx = 0; + wq->tail_idx = 0; + if (wq->bufs == NULL) + return -ENOMEM; return 0; } void vnic_wq_free(struct vnic_wq *wq) { struct vnic_dev *vdev; - unsigned int i; vdev = wq->vdev; vnic_dev_free_desc_ring(vdev, &wq->ring); - for (i = 0; i < VNIC_WQ_BUF_BLKS_MAX; i++) { - if (wq->bufs[i]) { - kfree(wq->bufs[i]); - wq->bufs[i] = NULL; - } - } - + rte_free(wq->bufs); wq->ctrl = NULL; } -int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count, - unsigned int desc_size) -{ - int mem_size = 0; - - mem_size += vnic_dev_desc_ring_size(&wq->ring, desc_count, desc_size); - - mem_size += VNIC_WQ_BUF_BLKS_NEEDED(wq->ring.desc_count) * - VNIC_WQ_BUF_BLK_SZ(wq->ring.desc_count); - - return mem_size; -} - int vnic_wq_alloc(struct vnic_dev *vdev, struct vnic_wq *wq, unsigned int index, unsigned int desc_count, unsigned int desc_size) @@ -172,9 +131,8 @@ void vnic_wq_init_start(struct vnic_wq *wq, unsigned int cq_index, iowrite32(error_interrupt_offset, &wq->ctrl->error_interrupt_offset); iowrite32(0, &wq->ctrl->error_status); - wq->to_use = wq->to_clean = - &wq->bufs[fetch_index / VNIC_WQ_BUF_BLK_ENTRIES(count)] - [fetch_index % VNIC_WQ_BUF_BLK_ENTRIES(count)]; + wq->head_idx = fetch_index; + wq->tail_idx = wq->head_idx; } void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, @@ -184,6 +142,7 @@ void vnic_wq_init(struct vnic_wq *wq, unsigned int cq_index, vnic_wq_init_start(wq, cq_index, 0, 0, error_interrupt_enable, error_interrupt_offset); + wq->last_completed_index = 0; } void vnic_wq_error_out(struct vnic_wq *wq, unsigned int error) @@ -220,21 +179,24 @@ int vnic_wq_disable(struct vnic_wq *wq) } void vnic_wq_clean(struct vnic_wq *wq, - void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) + void (*buf_clean)(struct vnic_wq_buf *buf)) { struct vnic_wq_buf *buf; + unsigned int to_clean = wq->tail_idx; - buf = wq->to_clean; + buf = &wq->bufs[to_clean]; while (vnic_wq_desc_used(wq) > 0) { - (*buf_clean)(wq, buf); + (*buf_clean)(buf); + to_clean = buf_idx_incr(wq->ring.desc_count, to_clean); - buf = wq->to_clean = buf->next; + buf = &wq->bufs[to_clean]; wq->ring.desc_avail++; } - wq->to_use = wq->to_clean = wq->bufs[0]; + wq->head_idx = 0; + wq->tail_idx = 0; iowrite32(0, &wq->ctrl->fetch_index); iowrite32(0, &wq->ctrl->posted_index); diff --git a/drivers/net/enic/base/vnic_wq.h b/drivers/net/enic/base/vnic_wq.h index c23de625..38a217f1 100644 --- a/drivers/net/enic/base/vnic_wq.h +++ b/drivers/net/enic/base/vnic_wq.h @@ -38,6 +38,7 @@ #include "vnic_dev.h" #include "vnic_cq.h" +#include <rte_memzone.h> /* Work queue control */ struct vnic_wq_ctrl { @@ -64,42 +65,23 @@ struct vnic_wq_ctrl { u32 pad9; }; +/* 16 bytes */ struct vnic_wq_buf { - struct vnic_wq_buf *next; - dma_addr_t dma_addr; - void *os_buf; - unsigned int len; - unsigned int index; - int sop; - void *desc; - uint64_t wr_id; /* Cookie */ - uint8_t cq_entry; /* Gets completion event from hw */ - uint8_t desc_skip_cnt; /* Num descs to occupy */ - uint8_t compressed_send; /* Both hdr and payload in one desc */ + struct rte_mempool *pool; + void *mb; }; -/* Break the vnic_wq_buf allocations into blocks of 32/64 entries */ -#define VNIC_WQ_BUF_MIN_BLK_ENTRIES 32 -#define VNIC_WQ_BUF_DFLT_BLK_ENTRIES 64 -#define VNIC_WQ_BUF_BLK_ENTRIES(entries) \ - ((unsigned int)((entries < VNIC_WQ_BUF_DFLT_BLK_ENTRIES) ? \ - VNIC_WQ_BUF_MIN_BLK_ENTRIES : VNIC_WQ_BUF_DFLT_BLK_ENTRIES)) -#define VNIC_WQ_BUF_BLK_SZ(entries) \ - (VNIC_WQ_BUF_BLK_ENTRIES(entries) * sizeof(struct vnic_wq_buf)) -#define VNIC_WQ_BUF_BLKS_NEEDED(entries) \ - DIV_ROUND_UP(entries, VNIC_WQ_BUF_BLK_ENTRIES(entries)) -#define VNIC_WQ_BUF_BLKS_MAX VNIC_WQ_BUF_BLKS_NEEDED(4096) - struct vnic_wq { unsigned int index; struct vnic_dev *vdev; struct vnic_wq_ctrl __iomem *ctrl; /* memory-mapped */ struct vnic_dev_ring ring; - struct vnic_wq_buf *bufs[VNIC_WQ_BUF_BLKS_MAX]; - struct vnic_wq_buf *to_use; - struct vnic_wq_buf *to_clean; - unsigned int pkts_outstanding; + struct vnic_wq_buf *bufs; + unsigned int head_idx; + unsigned int tail_idx; unsigned int socket_id; + const struct rte_memzone *cqmsg_rz; + uint16_t last_completed_index; }; static inline unsigned int vnic_wq_desc_avail(struct vnic_wq *wq) @@ -114,11 +96,6 @@ static inline unsigned int vnic_wq_desc_used(struct vnic_wq *wq) return wq->ring.desc_count - wq->ring.desc_avail - 1; } -static inline void *vnic_wq_next_desc(struct vnic_wq *wq) -{ - return wq->to_use->desc; -} - #define PI_LOG2_CACHE_LINE_SIZE 5 #define PI_INDEX_BITS 12 #define PI_INDEX_MASK ((1U << PI_INDEX_BITS) - 1) @@ -191,73 +168,13 @@ static inline u64 vnic_cached_posted_index(dma_addr_t addr, unsigned int len, PI_PREFETCH_ADDR_MASK) << PI_PREFETCH_ADDR_OFF); } -static inline void vnic_wq_post(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, - unsigned int len, int sop, int eop, - uint8_t desc_skip_cnt, uint8_t cq_entry, - uint8_t compressed_send, uint64_t wrid) -{ - struct vnic_wq_buf *buf = wq->to_use; - - buf->sop = sop; - buf->cq_entry = cq_entry; - buf->compressed_send = compressed_send; - buf->desc_skip_cnt = desc_skip_cnt; - buf->os_buf = os_buf; - buf->dma_addr = dma_addr; - buf->len = len; - buf->wr_id = wrid; - - buf = buf->next; - if (eop) { -#ifdef DO_PREFETCH - uint64_t wr = vnic_cached_posted_index(dma_addr, len, - buf->index); -#endif - /* Adding write memory barrier prevents compiler and/or CPU - * reordering, thus avoiding descriptor posting before - * descriptor is initialized. Otherwise, hardware can read - * stale descriptor fields. - */ - wmb(); -#ifdef DO_PREFETCH - /* Intel chipsets seem to limit the rate of PIOs that we can - * push on the bus. Thus, it is very important to do a single - * 64 bit write here. With two 32-bit writes, my maximum - * pkt/sec rate was cut almost in half. -AJF - */ - iowrite64((uint64_t)wr, &wq->ctrl->posted_index); -#else - iowrite32(buf->index, &wq->ctrl->posted_index); -#endif - } - wq->to_use = buf; - - wq->ring.desc_avail -= desc_skip_cnt; -} - -static inline void vnic_wq_service(struct vnic_wq *wq, - struct cq_desc *cq_desc, u16 completed_index, - void (*buf_service)(struct vnic_wq *wq, - struct cq_desc *cq_desc, struct vnic_wq_buf *buf, void *opaque), - void *opaque) +static inline uint32_t +buf_idx_incr(uint32_t n_descriptors, uint32_t idx) { - struct vnic_wq_buf *buf; - - buf = wq->to_clean; - while (1) { - - (*buf_service)(wq, cq_desc, buf, opaque); - - wq->ring.desc_avail++; - - wq->to_clean = buf->next; - - if (buf->index == completed_index) - break; - - buf = wq->to_clean; - } + idx++; + if (unlikely(idx == n_descriptors)) + idx = 0; + return idx; } void vnic_wq_free(struct vnic_wq *wq); @@ -275,8 +192,5 @@ unsigned int vnic_wq_error_status(struct vnic_wq *wq); void vnic_wq_enable(struct vnic_wq *wq); int vnic_wq_disable(struct vnic_wq *wq); void vnic_wq_clean(struct vnic_wq *wq, - void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)); -int vnic_wq_mem_size(struct vnic_wq *wq, unsigned int desc_count, - unsigned int desc_size); - + void (*buf_clean)(struct vnic_wq_buf *buf)); #endif /* _VNIC_WQ_H_ */ diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h index 8c914f5b..53fed0b8 100644 --- a/drivers/net/enic/enic.h +++ b/drivers/net/enic/enic.h @@ -46,6 +46,8 @@ #include "vnic_rss.h" #include "enic_res.h" #include "cq_enet_desc.h" +#include <sys/queue.h> +#include <rte_spinlock.h> #define DRV_NAME "enic_pmd" #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Poll-mode Driver" @@ -53,8 +55,11 @@ #define DRV_COPYRIGHT "Copyright 2008-2015 Cisco Systems, Inc" #define ENIC_WQ_MAX 8 -#define ENIC_RQ_MAX 8 -#define ENIC_CQ_MAX (ENIC_WQ_MAX + ENIC_RQ_MAX) +/* With Rx scatter support, we use two RQs on VIC per RQ used by app. Both + * RQs use the same CQ. + */ +#define ENIC_RQ_MAX 16 +#define ENIC_CQ_MAX (ENIC_WQ_MAX + (ENIC_RQ_MAX / 2)) #define ENIC_INTR_MAX (ENIC_CQ_MAX + 2) #define VLAN_ETH_HLEN 18 @@ -62,7 +67,6 @@ #define ENICPMD_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) #define ENICPMD_BDF_LENGTH 13 /* 0000:00:00.0'\0' */ -#define PKT_TX_TCP_UDP_CKSUM 0x6000 #define ENIC_CALC_IP_CKSUM 1 #define ENIC_CALC_TCP_UDP_CKSUM 2 #define ENIC_MAX_MTU 9000 @@ -91,6 +95,16 @@ struct enic_fdir { struct enic_fdir_node *nodes[ENICPMD_FDIR_MAX]; }; +struct enic_soft_stats { + rte_atomic64_t rx_nombuf; + rte_atomic64_t rx_packet_errors; +}; + +struct enic_memzone_entry { + const struct rte_memzone *rz; + LIST_ENTRY(enic_memzone_entry) entries; +}; + /* Per-instance private data structure */ struct enic { struct enic *next; @@ -114,6 +128,7 @@ struct enic { u8 ig_vlan_strip_en; int link_status; u8 hw_ip_checksum; + u16 max_mtu; unsigned int flags; unsigned int priv_flags; @@ -133,11 +148,38 @@ struct enic { /* interrupt resource */ struct vnic_intr intr; unsigned int intr_count; + + /* software counters */ + struct enic_soft_stats soft_stats; + + /* linked list storing memory allocations */ + LIST_HEAD(enic_memzone_list, enic_memzone_entry) memzone_list; + rte_spinlock_t memzone_list_lock; + }; +static inline unsigned int enic_sop_rq(unsigned int rq) +{ + return rq * 2; +} + +static inline unsigned int enic_data_rq(unsigned int rq) +{ + return rq * 2 + 1; +} + +static inline unsigned int enic_vnic_rq_count(struct enic *enic) +{ + return enic->rq_count * 2; +} + static inline unsigned int enic_cq_rq(__rte_unused struct enic *enic, unsigned int rq) { - return rq; + /* Scatter rx uses two receive queues together with one + * completion queue, so the completion queue number is no + * longer the same as the rq number. + */ + return rq / 2; } static inline unsigned int enic_cq_wq(struct enic *enic, unsigned int wq) @@ -155,14 +197,40 @@ static inline struct enic *pmd_priv(struct rte_eth_dev *eth_dev) return (struct enic *)eth_dev->data->dev_private; } -#define RTE_LIBRTE_ENIC_ASSERT_ENABLE -#ifdef RTE_LIBRTE_ENIC_ASSERT_ENABLE -#define ASSERT(x) do { \ - if (!(x)) \ - rte_panic("ENIC: x"); \ -} while (0) +static inline uint32_t +enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) +{ + uint32_t d = i0 + i1; + d -= (d >= n_descriptors) ? n_descriptors : 0; + return d; +} + +static inline uint32_t +enic_ring_sub(uint32_t n_descriptors, uint32_t i0, uint32_t i1) +{ + int32_t d = i1 - i0; + return (uint32_t)((d < 0) ? ((int32_t)n_descriptors + d) : d); +} + +static inline uint32_t +enic_ring_incr(uint32_t n_descriptors, uint32_t idx) +{ + idx++; + if (unlikely(idx == n_descriptors)) + idx = 0; + return idx; +} + +#if RTE_LOG_LEVEL >= RTE_LOG_DEBUG +#define ENIC_ASSERT(cond) \ + do { \ + if (unlikely(!(cond))) { \ + rte_panic("line %d\tassert \"" #cond "\"" \ + "failed\n", __LINE__); \ + } \ + } while (0) #else -#define ASSERT(x) +#define ENIC_ASSERT(cond) do {} while (0) #endif extern void enic_fdir_stats_get(struct enic *enic, @@ -209,5 +277,7 @@ extern int enic_clsf_init(struct enic *enic); extern void enic_clsf_destroy(struct enic *enic); uint16_t enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts); - +uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); +int enic_set_mtu(struct enic *enic, uint16_t new_mtu); #endif /* _ENIC_H_ */ diff --git a/drivers/net/enic/enic_clsf.c b/drivers/net/enic/enic_clsf.c index edb56e1d..3365176a 100644 --- a/drivers/net/enic/enic_clsf.c +++ b/drivers/net/enic/enic_clsf.c @@ -148,9 +148,13 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) enic->fdir.nodes[pos] = NULL; if (unlikely(key->rq_index == queue)) { /* Nothing to be done */ + enic->fdir.stats.f_add++; pos = rte_hash_add_key(enic->fdir.hash, params); + if (pos < 0) { + dev_err(enic, "Add hash key failed\n"); + return pos; + } enic->fdir.nodes[pos] = key; - enic->fdir.stats.f_add++; dev_warning(enic, "FDIR rule is already present\n"); return 0; @@ -213,6 +217,11 @@ int enic_fdir_add_fltr(struct enic *enic, struct rte_eth_fdir_filter *params) } pos = rte_hash_add_key(enic->fdir.hash, params); + if (pos < 0) { + dev_err(enic, "Add hash key failed\n"); + return pos; + } + enic->fdir.nodes[pos] = key; return 0; } @@ -239,15 +248,16 @@ void enic_clsf_destroy(struct enic *enic) int enic_clsf_init(struct enic *enic) { + char clsf_name[RTE_HASH_NAMESIZE]; struct rte_hash_parameters hash_params = { - .name = "enicpmd_clsf_hash", + .name = clsf_name, .entries = ENICPMD_CLSF_HASH_ENTRIES, .key_len = sizeof(struct rte_eth_fdir_filter), .hash_func = DEFAULT_HASH_FUNC, .hash_func_init_val = 0, .socket_id = SOCKET_ID_ANY, }; - + snprintf(clsf_name, RTE_HASH_NAMESIZE, "enic_clsf_%s", enic->bdf_name); enic->fdir.hash = rte_hash_create(&hash_params); memset(&enic->fdir.stats, 0, sizeof(enic->fdir.stats)); enic->fdir.stats.free = ENICPMD_FDIR_MAX; diff --git a/drivers/net/enic/enic_ethdev.c b/drivers/net/enic/enic_ethdev.c index 6bea9405..a7ce064f 100644 --- a/drivers/net/enic/enic_ethdev.c +++ b/drivers/net/enic/enic_ethdev.c @@ -269,14 +269,18 @@ static int enicpmd_dev_rx_queue_setup(struct rte_eth_dev *eth_dev, struct enic *enic = pmd_priv(eth_dev); ENICPMD_FUNC_TRACE(); - if (queue_idx >= ENIC_RQ_MAX) { + /* With Rx scatter support, two RQs are now used on VIC per RQ used + * by the application. + */ + if (queue_idx * 2 >= ENIC_RQ_MAX) { dev_err(enic, - "Max number of RX queues exceeded. Max is %d\n", + "Max number of RX queues exceeded. Max is %d. This PMD uses 2 RQs on VIC per RQ used by DPDK.\n", ENIC_RQ_MAX); return -EINVAL; } - eth_dev->data->rx_queues[queue_idx] = (void *)&enic->rq[queue_idx]; + eth_dev->data->rx_queues[queue_idx] = + (void *)&enic->rq[enic_sop_rq(queue_idx)]; ret = enic_alloc_rq(enic, queue_idx, socket_id, mp, nb_desc); if (ret) { @@ -435,7 +439,8 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, device_info->max_rx_queues = enic->rq_count; device_info->max_tx_queues = enic->wq_count; device_info->min_rx_bufsize = ENIC_MIN_MTU; - device_info->max_rx_pktlen = enic->config.mtu; + device_info->max_rx_pktlen = enic->rte_dev->data->mtu + + ETHER_HDR_LEN + 4; device_info->max_mac_addrs = 1; device_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP | @@ -455,8 +460,12 @@ static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev, static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev) { static const uint32_t ptypes[] = { - RTE_PTYPE_L3_IPV4, - RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN, + RTE_PTYPE_L3_IPV6_EXT_UNKNOWN, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, + RTE_PTYPE_L4_NONFRAG, RTE_PTYPE_UNKNOWN }; @@ -519,69 +528,12 @@ static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, __rte_unused ui enic_del_mac_address(enic); } - -static uint16_t enicpmd_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, - uint16_t nb_pkts) +static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu) { - uint16_t index; - unsigned int frags; - unsigned int pkt_len; - unsigned int seg_len; - unsigned int inc_len; - unsigned int nb_segs; - struct rte_mbuf *tx_pkt, *next_tx_pkt; - struct vnic_wq *wq = (struct vnic_wq *)tx_queue; - struct enic *enic = vnic_dev_priv(wq->vdev); - unsigned short vlan_id; - unsigned short ol_flags; - uint8_t last_seg, eop; - unsigned int host_tx_descs = 0; - - for (index = 0; index < nb_pkts; index++) { - tx_pkt = *tx_pkts++; - inc_len = 0; - nb_segs = tx_pkt->nb_segs; - if (nb_segs > vnic_wq_desc_avail(wq)) { - if (index > 0) - enic_post_wq_index(wq); - - /* wq cleanup and try again */ - if (!enic_cleanup_wq(enic, wq) || - (nb_segs > vnic_wq_desc_avail(wq))) { - return index; - } - } - - pkt_len = tx_pkt->pkt_len; - vlan_id = tx_pkt->vlan_tci; - ol_flags = tx_pkt->ol_flags; - for (frags = 0; inc_len < pkt_len; frags++) { - if (!tx_pkt) - break; - next_tx_pkt = tx_pkt->next; - seg_len = tx_pkt->data_len; - inc_len += seg_len; - - host_tx_descs++; - last_seg = 0; - eop = 0; - if ((pkt_len == inc_len) || !next_tx_pkt) { - eop = 1; - /* post if last packet in batch or > thresh */ - if ((index == (nb_pkts - 1)) || - (host_tx_descs > ENIC_TX_POST_THRESH)) { - last_seg = 1; - host_tx_descs = 0; - } - } - enic_send_pkt(enic, wq, tx_pkt, (unsigned short)seg_len, - !frags, eop, last_seg, ol_flags, vlan_id); - tx_pkt = next_tx_pkt; - } - } + struct enic *enic = pmd_priv(eth_dev); - enic_cleanup_wq(enic, wq); - return index; + ENICPMD_FUNC_TRACE(); + return enic_set_mtu(enic, mtu); } static const struct eth_dev_ops enicpmd_eth_dev_ops = { @@ -601,7 +553,7 @@ static const struct eth_dev_ops enicpmd_eth_dev_ops = { .queue_stats_mapping_set = NULL, .dev_infos_get = enicpmd_dev_info_get, .dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get, - .mtu_set = NULL, + .mtu_set = enicpmd_mtu_set, .vlan_filter_set = enicpmd_vlan_filter_set, .vlan_tpid_set = NULL, .vlan_offload_set = enicpmd_vlan_offload_set, @@ -642,7 +594,7 @@ static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev) enic->rte_dev = eth_dev; eth_dev->dev_ops = &enicpmd_eth_dev_ops; eth_dev->rx_pkt_burst = &enic_recv_pkts; - eth_dev->tx_pkt_burst = &enicpmd_xmit_pkts; + eth_dev->tx_pkt_burst = &enic_xmit_pkts; pdev = eth_dev->pci_dev; rte_eth_copy_pci_info(eth_dev, pdev); diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c index e3da51db..dc831b48 100644 --- a/drivers/net/enic/enic_main.c +++ b/drivers/net/enic/enic_main.c @@ -58,18 +58,6 @@ #include "vnic_cq.h" #include "vnic_intr.h" #include "vnic_nic.h" -#include "enic_vnic_wq.h" - -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} - static inline int enic_is_sriov_vf(struct enic *enic) { @@ -92,7 +80,7 @@ static int is_eth_addr_valid(uint8_t *addr) } static void -enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) +enic_rxmbuf_queue_release(__rte_unused struct enic *enic, struct vnic_rq *rq) { uint16_t i; @@ -101,7 +89,7 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) return; } - for (i = 0; i < enic->config.rq_desc_count; i++) { + for (i = 0; i < rq->ring.desc_count; i++) { if (rq->mbuf_ring[i]) { rte_pktmbuf_free_seg(rq->mbuf_ring[i]); rq->mbuf_ring[i] = NULL; @@ -109,38 +97,17 @@ enic_rxmbuf_queue_release(struct enic *enic, struct vnic_rq *rq) } } - void enic_set_hdr_split_size(struct enic *enic, u16 split_hdr_size) { vnic_set_hdr_split_size(enic->vdev, split_hdr_size); } -static void enic_free_wq_buf(__rte_unused struct vnic_wq *wq, struct vnic_wq_buf *buf) +static void enic_free_wq_buf(struct vnic_wq_buf *buf) { - struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->os_buf; + struct rte_mbuf *mbuf = (struct rte_mbuf *)buf->mb; rte_mempool_put(mbuf->pool, mbuf); - buf->os_buf = NULL; -} - -static void enic_wq_free_buf(struct vnic_wq *wq, - __rte_unused struct cq_desc *cq_desc, - struct vnic_wq_buf *buf, - __rte_unused void *opaque) -{ - enic_free_wq_buf(wq, buf); -} - -static int enic_wq_service(struct vnic_dev *vdev, struct cq_desc *cq_desc, - __rte_unused u8 type, u16 q_number, u16 completed_index, void *opaque) -{ - struct enic *enic = vnic_dev_priv(vdev); - - vnic_wq_service(&enic->wq[q_number], cq_desc, - completed_index, enic_wq_free_buf, - opaque); - - return 0; + buf->mb = NULL; } static void enic_log_q_error(struct enic *enic) @@ -155,7 +122,7 @@ static void enic_log_q_error(struct enic *enic) error_status); } - for (i = 0; i < enic->rq_count; i++) { + for (i = 0; i < enic_vnic_rq_count(enic); i++) { error_status = vnic_rq_error_status(&enic->rq[i]); if (error_status) dev_err(enic, "RQ[%d] error_status %d\n", i, @@ -163,93 +130,62 @@ static void enic_log_q_error(struct enic *enic) } } -unsigned int enic_cleanup_wq(struct enic *enic, struct vnic_wq *wq) +static void enic_clear_soft_stats(struct enic *enic) { - unsigned int cq = enic_cq_wq(enic, wq->index); - - /* Return the work done */ - return vnic_cq_service(&enic->cq[cq], - -1 /*wq_work_to_do*/, enic_wq_service, NULL); + struct enic_soft_stats *soft_stats = &enic->soft_stats; + rte_atomic64_clear(&soft_stats->rx_nombuf); + rte_atomic64_clear(&soft_stats->rx_packet_errors); } -void enic_post_wq_index(struct vnic_wq *wq) +static void enic_init_soft_stats(struct enic *enic) { - enic_vnic_post_wq_index(wq); -} - -void enic_send_pkt(struct enic *enic, struct vnic_wq *wq, - struct rte_mbuf *tx_pkt, unsigned short len, - uint8_t sop, uint8_t eop, uint8_t cq_entry, - uint16_t ol_flags, uint16_t vlan_tag) -{ - struct wq_enet_desc *desc = vnic_wq_next_desc(wq); - uint16_t mss = 0; - uint8_t vlan_tag_insert = 0; - uint64_t bus_addr = (dma_addr_t) - (tx_pkt->buf_physaddr + tx_pkt->data_off); - - if (sop) { - if (ol_flags & PKT_TX_VLAN_PKT) - vlan_tag_insert = 1; - - if (enic->hw_ip_checksum) { - if (ol_flags & PKT_TX_IP_CKSUM) - mss |= ENIC_CALC_IP_CKSUM; - - if (ol_flags & PKT_TX_TCP_UDP_CKSUM) - mss |= ENIC_CALC_TCP_UDP_CKSUM; - } - } - - wq_enet_desc_enc(desc, - bus_addr, - len, - mss, - 0 /* header_length */, - 0 /* offload_mode WQ_ENET_OFFLOAD_MODE_CSUM */, - eop, - cq_entry, - 0 /* fcoe_encap */, - vlan_tag_insert, - vlan_tag, - 0 /* loopback */); - - enic_vnic_post_wq(wq, (void *)tx_pkt, bus_addr, len, - sop, - 1 /*desc_skip_cnt*/, - cq_entry, - 0 /*compressed send*/, - 0 /*wrid*/); + struct enic_soft_stats *soft_stats = &enic->soft_stats; + rte_atomic64_init(&soft_stats->rx_nombuf); + rte_atomic64_init(&soft_stats->rx_packet_errors); + enic_clear_soft_stats(enic); } void enic_dev_stats_clear(struct enic *enic) { if (vnic_dev_stats_clear(enic->vdev)) dev_err(enic, "Error in clearing stats\n"); + enic_clear_soft_stats(enic); } void enic_dev_stats_get(struct enic *enic, struct rte_eth_stats *r_stats) { struct vnic_stats *stats; + struct enic_soft_stats *soft_stats = &enic->soft_stats; + int64_t rx_truncated; + uint64_t rx_packet_errors; if (vnic_dev_stats_dump(enic->vdev, &stats)) { dev_err(enic, "Error in getting stats\n"); return; } - r_stats->ipackets = stats->rx.rx_frames_ok; + /* The number of truncated packets can only be calculated by + * subtracting a hardware counter from error packets received by + * the driver. Note: this causes transient inaccuracies in the + * ipackets count. Also, the length of truncated packets are + * counted in ibytes even though truncated packets are dropped + * which can make ibytes be slightly higher than it should be. + */ + rx_packet_errors = rte_atomic64_read(&soft_stats->rx_packet_errors); + rx_truncated = rx_packet_errors - stats->rx.rx_errors; + + r_stats->ipackets = stats->rx.rx_frames_ok - rx_truncated; r_stats->opackets = stats->tx.tx_frames_ok; r_stats->ibytes = stats->rx.rx_bytes_ok; r_stats->obytes = stats->tx.tx_bytes_ok; - r_stats->ierrors = stats->rx.rx_errors; + r_stats->ierrors = stats->rx.rx_errors + stats->rx.rx_drop; r_stats->oerrors = stats->tx.tx_errors; - r_stats->imissed = stats->rx.rx_drop; + r_stats->imissed = stats->rx.rx_no_bufs + rx_truncated; - r_stats->imcasts = stats->rx.rx_multicast_frames_ok; - r_stats->rx_nombuf = stats->rx.rx_no_bufs; + r_stats->rx_nombuf = rte_atomic64_read(&soft_stats->rx_nombuf); } void enic_del_mac_address(struct enic *enic) @@ -298,12 +234,35 @@ void enic_init_vnic_resources(struct enic *enic) unsigned int error_interrupt_enable = 1; unsigned int error_interrupt_offset = 0; unsigned int index = 0; + unsigned int cq_idx; + struct vnic_rq *data_rq; for (index = 0; index < enic->rq_count; index++) { - vnic_rq_init(&enic->rq[index], - enic_cq_rq(enic, index), + cq_idx = enic_cq_rq(enic, enic_sop_rq(index)); + + vnic_rq_init(&enic->rq[enic_sop_rq(index)], + cq_idx, error_interrupt_enable, error_interrupt_offset); + + data_rq = &enic->rq[enic_data_rq(index)]; + if (data_rq->in_use) + vnic_rq_init(data_rq, + cq_idx, + error_interrupt_enable, + error_interrupt_offset); + + vnic_cq_init(&enic->cq[cq_idx], + 0 /* flow_control_enable */, + 1 /* color_enable */, + 0 /* cq_head */, + 0 /* cq_tail */, + 1 /* cq_tail_color */, + 0 /* interrupt_enable */, + 1 /* cq_entry_enable */, + 0 /* cq_message_enable */, + 0 /* interrupt offset */, + 0 /* cq_message_addr */); } for (index = 0; index < enic->wq_count; index++) { @@ -311,22 +270,19 @@ void enic_init_vnic_resources(struct enic *enic) enic_cq_wq(enic, index), error_interrupt_enable, error_interrupt_offset); - } - vnic_dev_stats_clear(enic->vdev); - - for (index = 0; index < enic->cq_count; index++) { - vnic_cq_init(&enic->cq[index], + cq_idx = enic_cq_wq(enic, index); + vnic_cq_init(&enic->cq[cq_idx], 0 /* flow_control_enable */, 1 /* color_enable */, 0 /* cq_head */, 0 /* cq_tail */, 1 /* cq_tail_color */, 0 /* interrupt_enable */, - 1 /* cq_entry_enable */, - 0 /* cq_message_enable */, + 0 /* cq_entry_enable */, + 1 /* cq_message_enable */, 0 /* interrupt offset */, - 0 /* cq_message_addr */); + (u64)enic->wq[index].cqmsg_rz->phys_addr); } vnic_intr_init(&enic->intr, @@ -344,30 +300,35 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) unsigned i; dma_addr_t dma_addr; + if (!rq->in_use) + return 0; + dev_debug(enic, "queue %u, allocating %u rx queue mbufs\n", rq->index, rq->ring.desc_count); for (i = 0; i < rq->ring.desc_count; i++, rqd++) { - mb = rte_rxmbuf_alloc(rq->mp); + mb = rte_mbuf_raw_alloc(rq->mp); if (mb == NULL) { dev_err(enic, "RX mbuf alloc failed queue_id=%u\n", (unsigned)rq->index); return -ENOMEM; } - dma_addr = (dma_addr_t)(mb->buf_physaddr + mb->data_off); - - rq_enet_desc_enc(rqd, dma_addr, RQ_ENET_TYPE_ONLY_SOP, - mb->buf_len); + dma_addr = (dma_addr_t)(mb->buf_physaddr + + RTE_PKTMBUF_HEADROOM); + rq_enet_desc_enc(rqd, dma_addr, + (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP + : RQ_ENET_TYPE_NOT_SOP), + mb->buf_len - RTE_PKTMBUF_HEADROOM); rq->mbuf_ring[i] = mb; } /* make sure all prior writes are complete before doing the PIO write */ rte_rmb(); - /* Post all but the last 2 cache lines' worth of descriptors */ - rq->posted_index = rq->ring.desc_count - (2 * RTE_CACHE_LINE_SIZE - / sizeof(struct rq_enet_desc)); + /* Post all but the last buffer to VIC. */ + rq->posted_index = rq->ring.desc_count - 1; + rq->rx_nb_hold = 0; dev_debug(enic, "port=%u, qidx=%u, Write %u posted idx, %u sw held\n", @@ -380,12 +341,14 @@ enic_alloc_rx_queue_mbufs(struct enic *enic, struct vnic_rq *rq) } static void * -enic_alloc_consistent(__rte_unused void *priv, size_t size, +enic_alloc_consistent(void *priv, size_t size, dma_addr_t *dma_handle, u8 *name) { void *vaddr; const struct rte_memzone *rz; *dma_handle = 0; + struct enic *enic = (struct enic *)priv; + struct enic_memzone_entry *mze; rz = rte_memzone_reserve_aligned((const char *)name, size, SOCKET_ID_ANY, 0, ENIC_ALIGN); @@ -398,16 +361,49 @@ enic_alloc_consistent(__rte_unused void *priv, size_t size, vaddr = rz->addr; *dma_handle = (dma_addr_t)rz->phys_addr; + mze = rte_malloc("enic memzone entry", + sizeof(struct enic_memzone_entry), 0); + + if (!mze) { + pr_err("%s : Failed to allocate memory for memzone list\n", + __func__); + rte_memzone_free(rz); + } + + mze->rz = rz; + + rte_spinlock_lock(&enic->memzone_list_lock); + LIST_INSERT_HEAD(&enic->memzone_list, mze, entries); + rte_spinlock_unlock(&enic->memzone_list_lock); + return vaddr; } static void -enic_free_consistent(__rte_unused struct rte_pci_device *hwdev, - __rte_unused size_t size, - __rte_unused void *vaddr, - __rte_unused dma_addr_t dma_handle) +enic_free_consistent(void *priv, + __rte_unused size_t size, + void *vaddr, + dma_addr_t dma_handle) { - /* Nothing to be done */ + struct enic_memzone_entry *mze; + struct enic *enic = (struct enic *)priv; + + rte_spinlock_lock(&enic->memzone_list_lock); + LIST_FOREACH(mze, &enic->memzone_list, entries) { + if (mze->rz->addr == vaddr && + mze->rz->phys_addr == dma_handle) + break; + } + if (mze == NULL) { + rte_spinlock_unlock(&enic->memzone_list_lock); + dev_warning(enic, + "Tried to free memory, but couldn't find it in the memzone list\n"); + return; + } + LIST_REMOVE(mze, entries); + rte_spinlock_unlock(&enic->memzone_list_lock); + rte_memzone_free(mze->rz); + rte_free(mze); } static void @@ -436,17 +432,28 @@ int enic_enable(struct enic *enic) "Flow director feature will not work\n"); for (index = 0; index < enic->rq_count; index++) { - err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]); + err = enic_alloc_rx_queue_mbufs(enic, + &enic->rq[enic_sop_rq(index)]); + if (err) { + dev_err(enic, "Failed to alloc sop RX queue mbufs\n"); + return err; + } + err = enic_alloc_rx_queue_mbufs(enic, + &enic->rq[enic_data_rq(index)]); if (err) { - dev_err(enic, "Failed to alloc RX queue mbufs\n"); + /* release the allocated mbufs for the sop rq*/ + enic_rxmbuf_queue_release(enic, + &enic->rq[enic_sop_rq(index)]); + + dev_err(enic, "Failed to alloc data RX queue mbufs\n"); return err; } } for (index = 0; index < enic->wq_count; index++) - vnic_wq_enable(&enic->wq[index]); + enic_start_wq(enic, index); for (index = 0; index < enic->rq_count; index++) - vnic_rq_enable(&enic->rq[index]); + enic_start_rq(enic, index); vnic_dev_enable_wait(enic->vdev); @@ -466,7 +473,7 @@ int enic_alloc_intr_resources(struct enic *enic) dev_info(enic, "vNIC resources used: "\ "wq %d rq %d cq %d intr %d\n", - enic->wq_count, enic->rq_count, + enic->wq_count, enic_vnic_rq_count(enic), enic->cq_count, enic->intr_count); err = vnic_intr_alloc(enic->vdev, &enic->intr, 0); @@ -478,14 +485,32 @@ int enic_alloc_intr_resources(struct enic *enic) void enic_free_rq(void *rxq) { - struct vnic_rq *rq = (struct vnic_rq *)rxq; - struct enic *enic = vnic_dev_priv(rq->vdev); + struct vnic_rq *rq_sop, *rq_data; + struct enic *enic; + + if (rxq == NULL) + return; + + rq_sop = (struct vnic_rq *)rxq; + enic = vnic_dev_priv(rq_sop->vdev); + rq_data = &enic->rq[rq_sop->data_queue_idx]; + + enic_rxmbuf_queue_release(enic, rq_sop); + if (rq_data->in_use) + enic_rxmbuf_queue_release(enic, rq_data); + + rte_free(rq_sop->mbuf_ring); + if (rq_data->in_use) + rte_free(rq_data->mbuf_ring); + + rq_sop->mbuf_ring = NULL; + rq_data->mbuf_ring = NULL; - enic_rxmbuf_queue_release(enic, rq); - rte_free(rq->mbuf_ring); - rq->mbuf_ring = NULL; - vnic_rq_free(rq); - vnic_cq_free(&enic->cq[rq->index]); + vnic_rq_free(rq_sop); + if (rq_data->in_use) + vnic_rq_free(rq_data); + + vnic_cq_free(&enic->cq[rq_sop->index]); } void enic_start_wq(struct enic *enic, uint16_t queue_idx) @@ -500,12 +525,32 @@ int enic_stop_wq(struct enic *enic, uint16_t queue_idx) void enic_start_rq(struct enic *enic, uint16_t queue_idx) { - vnic_rq_enable(&enic->rq[queue_idx]); + struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)]; + struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx]; + + if (rq_data->in_use) + vnic_rq_enable(rq_data); + rte_mb(); + vnic_rq_enable(rq_sop); + } int enic_stop_rq(struct enic *enic, uint16_t queue_idx) { - return vnic_rq_disable(&enic->rq[queue_idx]); + int ret1 = 0, ret2 = 0; + + struct vnic_rq *rq_sop = &enic->rq[enic_sop_rq(queue_idx)]; + struct vnic_rq *rq_data = &enic->rq[rq_sop->data_queue_idx]; + + ret2 = vnic_rq_disable(rq_sop); + rte_mb(); + if (rq_data->in_use) + ret1 = vnic_rq_disable(rq_data); + + if (ret2) + return ret2; + else + return ret1; } int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, @@ -513,62 +558,156 @@ int enic_alloc_rq(struct enic *enic, uint16_t queue_idx, uint16_t nb_desc) { int rc; - struct vnic_rq *rq = &enic->rq[queue_idx]; + uint16_t sop_queue_idx = enic_sop_rq(queue_idx); + uint16_t data_queue_idx = enic_data_rq(queue_idx); + struct vnic_rq *rq_sop = &enic->rq[sop_queue_idx]; + struct vnic_rq *rq_data = &enic->rq[data_queue_idx]; + unsigned int mbuf_size, mbufs_per_pkt; + unsigned int nb_sop_desc, nb_data_desc; + uint16_t min_sop, max_sop, min_data, max_data; + + rq_sop->is_sop = 1; + rq_sop->data_queue_idx = data_queue_idx; + rq_data->is_sop = 0; + rq_data->data_queue_idx = 0; + rq_sop->socket_id = socket_id; + rq_sop->mp = mp; + rq_data->socket_id = socket_id; + rq_data->mp = mp; + rq_sop->in_use = 1; + + mbuf_size = (uint16_t)(rte_pktmbuf_data_room_size(mp) - + RTE_PKTMBUF_HEADROOM); + + if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) { + dev_info(enic, "Scatter rx mode enabled\n"); + /* ceil((mtu + ETHER_HDR_LEN + 4)/mbuf_size) */ + mbufs_per_pkt = ((enic->config.mtu + ETHER_HDR_LEN + 4) + + (mbuf_size - 1)) / mbuf_size; + } else { + dev_info(enic, "Scatter rx mode disabled\n"); + mbufs_per_pkt = 1; + } - rq->socket_id = socket_id; - rq->mp = mp; + if (mbufs_per_pkt > 1) { + dev_info(enic, "Scatter rx mode in use\n"); + rq_data->in_use = 1; + } else { + dev_info(enic, "Scatter rx mode not being used\n"); + rq_data->in_use = 0; + } - if (nb_desc) { - if (nb_desc > enic->config.rq_desc_count) { - dev_warning(enic, - "RQ %d - number of rx desc in cmd line (%d)"\ - "is greater than that in the UCSM/CIMC adapter"\ - "policy. Applying the value in the adapter "\ - "policy (%d).\n", - queue_idx, nb_desc, enic->config.rq_desc_count); - nb_desc = enic->config.rq_desc_count; - } - dev_info(enic, "RX Queues - effective number of descs:%d\n", - nb_desc); + /* number of descriptors have to be a multiple of 32 */ + nb_sop_desc = (nb_desc / mbufs_per_pkt) & ~0x1F; + nb_data_desc = (nb_desc - nb_sop_desc) & ~0x1F; + + rq_sop->max_mbufs_per_pkt = mbufs_per_pkt; + rq_data->max_mbufs_per_pkt = mbufs_per_pkt; + + if (mbufs_per_pkt > 1) { + min_sop = 64; + max_sop = ((enic->config.rq_desc_count / + (mbufs_per_pkt - 1)) & ~0x1F); + min_data = min_sop * (mbufs_per_pkt - 1); + max_data = enic->config.rq_desc_count; + } else { + min_sop = 64; + max_sop = enic->config.rq_desc_count; + min_data = 0; + max_data = 0; } - /* Allocate queue resources */ - rc = vnic_rq_alloc(enic->vdev, rq, queue_idx, - nb_desc, sizeof(struct rq_enet_desc)); + if (nb_desc < (min_sop + min_data)) { + dev_warning(enic, + "Number of rx descs too low, adjusting to minimum\n"); + nb_sop_desc = min_sop; + nb_data_desc = min_data; + } else if (nb_desc > (max_sop + max_data)) { + dev_warning(enic, + "Number of rx_descs too high, adjusting to maximum\n"); + nb_sop_desc = max_sop; + nb_data_desc = max_data; + } + if (mbufs_per_pkt > 1) { + dev_info(enic, "For mtu %d and mbuf size %d valid rx descriptor range is %d to %d\n", + enic->config.mtu, mbuf_size, min_sop + min_data, + max_sop + max_data); + } + dev_info(enic, "Using %d rx descriptors (sop %d, data %d)\n", + nb_sop_desc + nb_data_desc, nb_sop_desc, nb_data_desc); + + /* Allocate sop queue resources */ + rc = vnic_rq_alloc(enic->vdev, rq_sop, sop_queue_idx, + nb_sop_desc, sizeof(struct rq_enet_desc)); if (rc) { - dev_err(enic, "error in allocation of rq\n"); + dev_err(enic, "error in allocation of sop rq\n"); goto err_exit; } - + nb_sop_desc = rq_sop->ring.desc_count; + + if (rq_data->in_use) { + /* Allocate data queue resources */ + rc = vnic_rq_alloc(enic->vdev, rq_data, data_queue_idx, + nb_data_desc, + sizeof(struct rq_enet_desc)); + if (rc) { + dev_err(enic, "error in allocation of data rq\n"); + goto err_free_rq_sop; + } + nb_data_desc = rq_data->ring.desc_count; + } rc = vnic_cq_alloc(enic->vdev, &enic->cq[queue_idx], queue_idx, - socket_id, nb_desc, - sizeof(struct cq_enet_rq_desc)); + socket_id, nb_sop_desc + nb_data_desc, + sizeof(struct cq_enet_rq_desc)); if (rc) { dev_err(enic, "error in allocation of cq for rq\n"); - goto err_free_rq_exit; + goto err_free_rq_data; } - /* Allocate the mbuf ring */ - rq->mbuf_ring = (struct rte_mbuf **)rte_zmalloc_socket("rq->mbuf_ring", - sizeof(struct rte_mbuf *) * nb_desc, - RTE_CACHE_LINE_SIZE, rq->socket_id); + /* Allocate the mbuf rings */ + rq_sop->mbuf_ring = (struct rte_mbuf **) + rte_zmalloc_socket("rq->mbuf_ring", + sizeof(struct rte_mbuf *) * nb_sop_desc, + RTE_CACHE_LINE_SIZE, rq_sop->socket_id); + if (rq_sop->mbuf_ring == NULL) + goto err_free_cq; + + if (rq_data->in_use) { + rq_data->mbuf_ring = (struct rte_mbuf **) + rte_zmalloc_socket("rq->mbuf_ring", + sizeof(struct rte_mbuf *) * nb_data_desc, + RTE_CACHE_LINE_SIZE, rq_sop->socket_id); + if (rq_data->mbuf_ring == NULL) + goto err_free_sop_mbuf; + } - if (rq->mbuf_ring != NULL) - return 0; + return 0; +err_free_sop_mbuf: + rte_free(rq_sop->mbuf_ring); +err_free_cq: /* cleanup on error */ vnic_cq_free(&enic->cq[queue_idx]); -err_free_rq_exit: - vnic_rq_free(rq); +err_free_rq_data: + if (rq_data->in_use) + vnic_rq_free(rq_data); +err_free_rq_sop: + vnic_rq_free(rq_sop); err_exit: return -ENOMEM; } void enic_free_wq(void *txq) { - struct vnic_wq *wq = (struct vnic_wq *)txq; - struct enic *enic = vnic_dev_priv(wq->vdev); + struct vnic_wq *wq; + struct enic *enic; + + if (txq == NULL) + return; + wq = (struct vnic_wq *)txq; + enic = vnic_dev_priv(wq->vdev); + rte_memzone_free(wq->cqmsg_rz); vnic_wq_free(wq); vnic_cq_free(&enic->cq[enic->rq_count + wq->index]); } @@ -579,6 +718,8 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, int err; struct vnic_wq *wq = &enic->wq[queue_idx]; unsigned int cq_index = enic_cq_wq(enic, queue_idx); + char name[NAME_MAX]; + static int instance; wq->socket_id = socket_id; if (nb_desc) { @@ -614,6 +755,18 @@ int enic_alloc_wq(struct enic *enic, uint16_t queue_idx, dev_err(enic, "error in allocation of cq for wq\n"); } + /* setup up CQ message */ + snprintf((char *)name, sizeof(name), + "vnic_cqmsg-%s-%d-%d", enic->bdf_name, queue_idx, + instance++); + + wq->cqmsg_rz = rte_memzone_reserve_aligned((const char *)name, + sizeof(uint32_t), + SOCKET_ID_ANY, 0, + ENIC_ALIGN); + if (!wq->cqmsg_rz) + return -ENOMEM; + return err; } @@ -637,10 +790,12 @@ int enic_disable(struct enic *enic) if (err) return err; } - for (i = 0; i < enic->rq_count; i++) { - err = vnic_rq_disable(&enic->rq[i]); - if (err) - return err; + for (i = 0; i < enic_vnic_rq_count(enic); i++) { + if (enic->rq[i].in_use) { + err = vnic_rq_disable(&enic->rq[i]); + if (err) + return err; + } } vnic_dev_set_reset_flag(enic->vdev, 1); @@ -649,8 +804,9 @@ int enic_disable(struct enic *enic) for (i = 0; i < enic->wq_count; i++) vnic_wq_clean(&enic->wq[i], enic_free_wq_buf); - for (i = 0; i < enic->rq_count; i++) - vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); + for (i = 0; i < enic_vnic_rq_count(enic); i++) + if (enic->rq[i].in_use) + vnic_rq_clean(&enic->rq[i], enic_free_rq_buf); for (i = 0; i < enic->cq_count; i++) vnic_cq_clean(&enic->cq[i]); vnic_intr_clean(&enic->intr); @@ -723,7 +879,7 @@ static int enic_set_rsskey(struct enic *enic) rss_key_buf_pa, sizeof(union vnic_rss_key)); - enic_free_consistent(enic->pdev, sizeof(union vnic_rss_key), + enic_free_consistent(enic, sizeof(union vnic_rss_key), rss_key_buf_va, rss_key_buf_pa); return err; @@ -744,13 +900,14 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) return -ENOMEM; for (i = 0; i < (1 << rss_hash_bits); i++) - (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count; + (*rss_cpu_buf_va).cpu[i / 4].b[i % 4] = + enic_sop_rq(i % enic->rq_count); err = enic_set_rss_cpu(enic, rss_cpu_buf_pa, sizeof(union vnic_rss_cpu)); - enic_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu), + enic_free_consistent(enic, sizeof(union vnic_rss_cpu), rss_cpu_buf_va, rss_cpu_buf_pa); return err; @@ -806,6 +963,8 @@ int enic_setup_finish(struct enic *enic) { int ret; + enic_init_soft_stats(enic); + ret = enic_set_rss_nic_cfg(enic); if (ret) { dev_err(enic, "Failed to config nic, aborting.\n"); @@ -851,21 +1010,79 @@ static void enic_dev_deinit(struct enic *enic) int enic_set_vnic_res(struct enic *enic) { struct rte_eth_dev *eth_dev = enic->rte_dev; + int rc = 0; - if ((enic->rq_count < eth_dev->data->nb_rx_queues) || - (enic->wq_count < eth_dev->data->nb_tx_queues)) { - dev_err(dev, "Not enough resources configured, aborting\n"); - return -1; + /* With Rx scatter support, two RQs are now used per RQ used by + * the application. + */ + if (enic->rq_count < (eth_dev->data->nb_rx_queues * 2)) { + dev_err(dev, "Not enough Receive queues. Requested:%u which uses %d RQs on VIC, Configured:%u\n", + eth_dev->data->nb_rx_queues, + eth_dev->data->nb_rx_queues * 2, enic->rq_count); + rc = -EINVAL; + } + if (enic->wq_count < eth_dev->data->nb_tx_queues) { + dev_err(dev, "Not enough Transmit queues. Requested:%u, Configured:%u\n", + eth_dev->data->nb_tx_queues, enic->wq_count); + rc = -EINVAL; } - enic->rq_count = eth_dev->data->nb_rx_queues; - enic->wq_count = eth_dev->data->nb_tx_queues; if (enic->cq_count < (enic->rq_count + enic->wq_count)) { - dev_err(dev, "Not enough resources configured, aborting\n"); - return -1; + dev_err(dev, "Not enough Completion queues. Required:%u, Configured:%u\n", + enic->rq_count + enic->wq_count, enic->cq_count); + rc = -EINVAL; + } + + if (rc == 0) { + enic->rq_count = eth_dev->data->nb_rx_queues; + enic->wq_count = eth_dev->data->nb_tx_queues; + enic->cq_count = enic->rq_count + enic->wq_count; } - enic->cq_count = enic->rq_count + enic->wq_count; + return rc; +} + +/* The Cisco NIC can send and receive packets up to a max packet size + * determined by the NIC type and firmware. There is also an MTU + * configured into the NIC via the CIMC/UCSM management interface + * which can be overridden by this function (up to the max packet size). + * Depending on the network setup, doing so may cause packet drops + * and unexpected behavior. + */ +int enic_set_mtu(struct enic *enic, uint16_t new_mtu) +{ + uint16_t old_mtu; /* previous setting */ + uint16_t config_mtu; /* Value configured into NIC via CIMC/UCSM */ + struct rte_eth_dev *eth_dev = enic->rte_dev; + + old_mtu = eth_dev->data->mtu; + config_mtu = enic->config.mtu; + + /* only works with Rx scatter disabled */ + if (enic->rte_dev->data->dev_conf.rxmode.enable_scatter) + return -ENOTSUP; + + if (new_mtu > enic->max_mtu) { + dev_err(enic, + "MTU not updated: requested (%u) greater than max (%u)\n", + new_mtu, enic->max_mtu); + return -EINVAL; + } + if (new_mtu < ENIC_MIN_MTU) { + dev_info(enic, + "MTU not updated: requested (%u) less than min (%u)\n", + new_mtu, ENIC_MIN_MTU); + return -EINVAL; + } + if (new_mtu > config_mtu) + dev_warning(enic, + "MTU (%u) is greater than value configured in NIC (%u)\n", + new_mtu, config_mtu); + + /* update the mtu */ + eth_dev->data->mtu = new_mtu; + + dev_info(enic, "MTU changed from %u to %u\n", old_mtu, new_mtu); return 0; } @@ -920,6 +1137,9 @@ int enic_probe(struct enic *enic) goto err_out; } + LIST_INIT(&enic->memzone_list); + rte_spinlock_init(&enic->memzone_list_lock); + vnic_register_cbacks(enic->vdev, enic_alloc_consistent, enic_free_consistent); diff --git a/drivers/net/enic/enic_res.c b/drivers/net/enic/enic_res.c index ebe379dd..b271d340 100644 --- a/drivers/net/enic/enic_res.c +++ b/drivers/net/enic/enic_res.c @@ -83,6 +83,20 @@ int enic_get_vnic_config(struct enic *enic) GET_CONFIG(intr_timer_usec); GET_CONFIG(loop_tag); GET_CONFIG(num_arfs); + GET_CONFIG(max_pkt_size); + + /* max packet size is only defined in newer VIC firmware + * and will be 0 for legacy firmware and VICs + */ + if (c->max_pkt_size > ENIC_DEFAULT_MAX_PKT_SIZE) + enic->max_mtu = c->max_pkt_size - (ETHER_HDR_LEN + 4); + else + enic->max_mtu = ENIC_DEFAULT_MAX_PKT_SIZE - (ETHER_HDR_LEN + 4); + if (c->mtu == 0) + c->mtu = 1500; + + enic->rte_dev->data->mtu = min_t(u16, enic->max_mtu, + max_t(u16, ENIC_MIN_MTU, c->mtu)); c->wq_desc_count = min_t(u32, ENIC_MAX_WQ_DESCS, @@ -96,21 +110,16 @@ int enic_get_vnic_config(struct enic *enic) c->rq_desc_count)); c->rq_desc_count &= 0xffffffe0; /* must be aligned to groups of 32 */ - if (c->mtu == 0) - c->mtu = 1500; - c->mtu = min_t(u16, ENIC_MAX_MTU, - max_t(u16, ENIC_MIN_MTU, - c->mtu)); - c->intr_timer_usec = min_t(u32, c->intr_timer_usec, vnic_dev_get_intr_coal_timer_max(enic->vdev)); dev_info(enic_get_dev(enic), "vNIC MAC addr %02x:%02x:%02x:%02x:%02x:%02x " - "wq/rq %d/%d mtu %d\n", + "wq/rq %d/%d mtu %d, max mtu:%d\n", enic->mac_addr[0], enic->mac_addr[1], enic->mac_addr[2], enic->mac_addr[3], enic->mac_addr[4], enic->mac_addr[5], - c->wq_desc_count, c->rq_desc_count, c->mtu); + c->wq_desc_count, c->rq_desc_count, + enic->rte_dev->data->mtu, enic->max_mtu); dev_info(enic_get_dev(enic), "vNIC csum tx/rx %s/%s " "rss %s intr mode %s type %s timer %d usec " "loopback tag 0x%04x\n", @@ -196,8 +205,9 @@ void enic_free_vnic_resources(struct enic *enic) for (i = 0; i < enic->wq_count; i++) vnic_wq_free(&enic->wq[i]); - for (i = 0; i < enic->rq_count; i++) - vnic_rq_free(&enic->rq[i]); + for (i = 0; i < enic_vnic_rq_count(enic); i++) + if (enic->rq[i].in_use) + vnic_rq_free(&enic->rq[i]); for (i = 0; i < enic->cq_count; i++) vnic_cq_free(&enic->cq[i]); vnic_intr_free(&enic->intr); diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h index 00fa71de..303530ef 100644 --- a/drivers/net/enic/enic_res.h +++ b/drivers/net/enic/enic_res.h @@ -46,95 +46,19 @@ #define ENIC_MAX_RQ_DESCS 4096 #define ENIC_MIN_MTU 68 -#define ENIC_MAX_MTU 9000 + +/* Does not include (possible) inserted VLAN tag and FCS */ +#define ENIC_DEFAULT_MAX_PKT_SIZE 9022 #define ENIC_MULTICAST_PERFECT_FILTERS 32 #define ENIC_UNICAST_PERFECT_FILTERS 32 #define ENIC_NON_TSO_MAX_DESC 16 #define ENIC_DEFAULT_RX_FREE_THRESH 32 -#define ENIC_TX_POST_THRESH (ENIC_MIN_WQ_DESCS / 2) +#define ENIC_TX_XMIT_MAX 64 #define ENIC_SETTING(enic, f) ((enic->config.flags & VENETF_##f) ? 1 : 0) -static inline void enic_queue_wq_desc_ex(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, - unsigned int mss_or_csum_offset, unsigned int hdr_len, - int vlan_tag_insert, unsigned int vlan_tag, - int offload_mode, int cq_entry, int sop, int eop, int loopback) -{ - struct wq_enet_desc *desc = vnic_wq_next_desc(wq); - u8 desc_skip_cnt = 1; - u8 compressed_send = 0; - u64 wrid = 0; - - wq_enet_desc_enc(desc, - (u64)dma_addr | VNIC_PADDR_TARGET, - (u16)len, - (u16)mss_or_csum_offset, - (u16)hdr_len, (u8)offload_mode, - (u8)eop, (u8)cq_entry, - 0, /* fcoe_encap */ - (u8)vlan_tag_insert, - (u16)vlan_tag, - (u8)loopback); - - vnic_wq_post(wq, os_buf, dma_addr, len, sop, eop, desc_skip_cnt, - (u8)cq_entry, compressed_send, wrid); -} - -static inline void enic_queue_wq_desc_cont(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, - int eop, int loopback) -{ - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, - 0, 0, 0, 0, 0, - eop, 0 /* !SOP */, eop, loopback); -} - -static inline void enic_queue_wq_desc(struct vnic_wq *wq, void *os_buf, - dma_addr_t dma_addr, unsigned int len, int vlan_tag_insert, - unsigned int vlan_tag, int eop, int loopback) -{ - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, - 0, 0, vlan_tag_insert, vlan_tag, - WQ_ENET_OFFLOAD_MODE_CSUM, - eop, 1 /* SOP */, eop, loopback); -} - -static inline void enic_queue_wq_desc_csum(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, - int ip_csum, int tcpudp_csum, int vlan_tag_insert, - unsigned int vlan_tag, int eop, int loopback) -{ - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, - (ip_csum ? 1 : 0) + (tcpudp_csum ? 2 : 0), - 0, vlan_tag_insert, vlan_tag, - WQ_ENET_OFFLOAD_MODE_CSUM, - eop, 1 /* SOP */, eop, loopback); -} - -static inline void enic_queue_wq_desc_csum_l4(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, - unsigned int csum_offset, unsigned int hdr_len, - int vlan_tag_insert, unsigned int vlan_tag, int eop, int loopback) -{ - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, - csum_offset, hdr_len, vlan_tag_insert, vlan_tag, - WQ_ENET_OFFLOAD_MODE_CSUM_L4, - eop, 1 /* SOP */, eop, loopback); -} - -static inline void enic_queue_wq_desc_tso(struct vnic_wq *wq, - void *os_buf, dma_addr_t dma_addr, unsigned int len, - unsigned int mss, unsigned int hdr_len, int vlan_tag_insert, - unsigned int vlan_tag, int eop, int loopback) -{ - enic_queue_wq_desc_ex(wq, os_buf, dma_addr, len, - mss, hdr_len, vlan_tag_insert, vlan_tag, - WQ_ENET_OFFLOAD_MODE_TSO, - eop, 1 /* SOP */, eop, loopback); -} struct enic; diff --git a/drivers/net/enic/enic_rx.c b/drivers/net/enic/enic_rx.c deleted file mode 100644 index 232987a5..00000000 --- a/drivers/net/enic/enic_rx.c +++ /dev/null @@ -1,359 +0,0 @@ -/* - * Copyright 2008-2014 Cisco Systems, Inc. All rights reserved. - * Copyright 2007 Nuova Systems, Inc. All rights reserved. - * - * Copyright (c) 2014, Cisco Systems, Inc. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS - * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE - * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, - * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER - * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN - * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - * - */ - -#include <rte_mbuf.h> -#include <rte_ethdev.h> -#include <rte_prefetch.h> - -#include "enic_compat.h" -#include "rq_enet_desc.h" -#include "enic.h" - -#define RTE_PMD_USE_PREFETCH - -#ifdef RTE_PMD_USE_PREFETCH -/* - * Prefetch a cache line into all cache levels. - */ -#define rte_enic_prefetch(p) rte_prefetch0(p) -#else -#define rte_enic_prefetch(p) do {} while (0) -#endif - -#ifdef RTE_PMD_PACKET_PREFETCH -#define rte_packet_prefetch(p) rte_prefetch1(p) -#else -#define rte_packet_prefetch(p) do {} while (0) -#endif - -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} - -static inline uint16_t -enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) -{ - return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; -} - -static inline uint16_t -enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) -{ - return(le16_to_cpu(crd->bytes_written_flags) & - ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK); -} - -static inline uint8_t -enic_cq_rx_desc_packet_error(uint16_t bwflags) -{ - return((bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == - CQ_ENET_RQ_DESC_FLAGS_TRUNCATED); -} - -static inline uint8_t -enic_cq_rx_desc_eop(uint16_t ciflags) -{ - return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) - == CQ_ENET_RQ_DESC_FLAGS_EOP; -} - -static inline uint8_t -enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) -{ - return ((le16_to_cpu(cqrd->q_number_rss_type_flags) & - CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == - CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC); -} - -static inline uint8_t -enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) -{ - return ((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == - CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK); -} - -static inline uint8_t -enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) -{ - return((cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == - CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK); -} - -static inline uint8_t -enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) -{ - return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> - CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); -} - -static inline uint32_t -enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) -{ - return le32_to_cpu(cqrd->rss_hash); -} - -static inline uint16_t -enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) -{ - return le16_to_cpu(cqrd->vlan); -} - -static inline uint16_t -enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - return le16_to_cpu(cqrd->bytes_written_flags) & - CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; -} - -static inline uint8_t -enic_cq_rx_to_pkt_err_flags(struct cq_desc *cqd, uint64_t *pkt_err_flags_out) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint16_t bwflags; - int ret = 0; - uint64_t pkt_err_flags = 0; - - bwflags = enic_cq_rx_desc_bwflags(cqrd); - if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) { - pkt_err_flags = PKT_RX_MAC_ERR; - ret = 1; - } - *pkt_err_flags_out = pkt_err_flags; - return ret; -} - -/* - * Lookup table to translate RX CQ flags to mbuf flags. - */ -static inline uint32_t -enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint8_t cqrd_flags = cqrd->flags; - static const uint32_t cq_type_table[128] __rte_cache_aligned = { - [32] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, - [34] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 - | RTE_PTYPE_L4_UDP, - [36] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 - | RTE_PTYPE_L4_TCP, - [96] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 - | RTE_PTYPE_L4_FRAG, - [16] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, - [18] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 - | RTE_PTYPE_L4_UDP, - [20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 - | RTE_PTYPE_L4_TCP, - [80] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6 - | RTE_PTYPE_L4_FRAG, - /* All others reserved */ - }; - cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT - | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 - | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; - return cq_type_table[cqrd_flags]; -} - -static inline void -enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) -{ - struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; - uint16_t ciflags, bwflags, pkt_flags = 0; - ciflags = enic_cq_rx_desc_ciflags(cqrd); - bwflags = enic_cq_rx_desc_bwflags(cqrd); - - mbuf->ol_flags = 0; - - /* flags are meaningless if !EOP */ - if (unlikely(!enic_cq_rx_desc_eop(ciflags))) - goto mbuf_flags_done; - - /* VLAN stripping */ - if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { - pkt_flags |= PKT_RX_VLAN_PKT; - mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); - } else { - mbuf->vlan_tci = 0; - } - - /* RSS flag */ - if (enic_cq_rx_desc_rss_type(cqrd)) { - pkt_flags |= PKT_RX_RSS_HASH; - mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); - } - - /* checksum flags */ - if (!enic_cq_rx_desc_csum_not_calc(cqrd) && - (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { - if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) - pkt_flags |= PKT_RX_IP_CKSUM_BAD; - if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { - if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) - pkt_flags |= PKT_RX_L4_CKSUM_BAD; - } - } - - mbuf_flags_done: - mbuf->ol_flags = pkt_flags; -} - -static inline uint32_t -enic_ring_add(uint32_t n_descriptors, uint32_t i0, uint32_t i1) -{ - uint32_t d = i0 + i1; - ASSERT(i0 < n_descriptors); - ASSERT(i1 < n_descriptors); - d -= (d >= n_descriptors) ? n_descriptors : 0; - return d; -} - - -uint16_t -enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, - uint16_t nb_pkts) -{ - struct vnic_rq *rq = rx_queue; - struct enic *enic = vnic_dev_priv(rq->vdev); - unsigned int rx_id; - struct rte_mbuf *nmb, *rxmb; - uint16_t nb_rx = 0; - uint16_t nb_hold; - struct vnic_cq *cq; - volatile struct cq_desc *cqd_ptr; - uint8_t color; - - cq = &enic->cq[enic_cq_rq(enic, rq->index)]; - rx_id = cq->to_clean; /* index of cqd, rqd, mbuf_table */ - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; - - nb_hold = rq->rx_nb_hold; /* mbufs held by software */ - - while (nb_rx < nb_pkts) { - volatile struct rq_enet_desc *rqd_ptr; - dma_addr_t dma_addr; - struct cq_desc cqd; - uint64_t ol_err_flags; - uint8_t packet_error; - - /* Check for pkts available */ - color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) - & CQ_DESC_COLOR_MASK; - if (color == cq->last_color) - break; - - /* Get the cq descriptor and rq pointer */ - cqd = *cqd_ptr; - rqd_ptr = (struct rq_enet_desc *)(rq->ring.descs) + rx_id; - - /* allocate a new mbuf */ - nmb = rte_rxmbuf_alloc(rq->mp); - if (nmb == NULL) { - dev_err(enic, "RX mbuf alloc failed port=%u qid=%u", - enic->port_id, (unsigned)rq->index); - rte_eth_devices[enic->port_id]. - data->rx_mbuf_alloc_failed++; - break; - } - - /* A packet error means descriptor and data are untrusted */ - packet_error = enic_cq_rx_to_pkt_err_flags(&cqd, &ol_err_flags); - - /* Get the mbuf to return and replace with one just allocated */ - rxmb = rq->mbuf_ring[rx_id]; - rq->mbuf_ring[rx_id] = nmb; - - /* Increment cqd, rqd, mbuf_table index */ - rx_id++; - if (unlikely(rx_id == rq->ring.desc_count)) { - rx_id = 0; - cq->last_color = cq->last_color ? 0 : 1; - } - - /* Prefetch next mbuf & desc while processing current one */ - cqd_ptr = (struct cq_desc *)(cq->ring.descs) + rx_id; - rte_enic_prefetch(cqd_ptr); - rte_enic_prefetch(rq->mbuf_ring[rx_id]); - rte_enic_prefetch((struct rq_enet_desc *)(rq->ring.descs) - + rx_id); - - /* Push descriptor for newly allocated mbuf */ - dma_addr = (dma_addr_t)(nmb->buf_physaddr + nmb->data_off); - rqd_ptr->address = rte_cpu_to_le_64(dma_addr); - rqd_ptr->length_type = cpu_to_le16(nmb->buf_len); - - /* Fill in the rest of the mbuf */ - rxmb->data_off = RTE_PKTMBUF_HEADROOM; - rxmb->nb_segs = 1; - rxmb->next = NULL; - rxmb->port = enic->port_id; - if (!packet_error) { - rxmb->pkt_len = enic_cq_rx_desc_n_bytes(&cqd); - rxmb->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); - enic_cq_rx_to_pkt_flags(&cqd, rxmb); - } else { - rxmb->pkt_len = 0; - rxmb->packet_type = 0; - rxmb->ol_flags = 0; - } - rxmb->data_len = rxmb->pkt_len; - - /* prefetch mbuf data for caller */ - rte_packet_prefetch(RTE_PTR_ADD(rxmb->buf_addr, - RTE_PKTMBUF_HEADROOM)); - - /* store the mbuf address into the next entry of the array */ - rx_pkts[nb_rx++] = rxmb; - } - - nb_hold += nb_rx; - cq->to_clean = rx_id; - - if (nb_hold > rq->rx_free_thresh) { - rq->posted_index = enic_ring_add(rq->ring.desc_count, - rq->posted_index, nb_hold); - nb_hold = 0; - rte_mb(); - iowrite32(rq->posted_index, &rq->ctrl->posted_index); - } - - rq->rx_nb_hold = nb_hold; - - return nb_rx; -} diff --git a/drivers/net/enic/enic_rxtx.c b/drivers/net/enic/enic_rxtx.c new file mode 100644 index 00000000..5ac1d69c --- /dev/null +++ b/drivers/net/enic/enic_rxtx.c @@ -0,0 +1,547 @@ +/* Copyright 2008-2016 Cisco Systems, Inc. All rights reserved. + * Copyright 2007 Nuova Systems, Inc. All rights reserved. + * + * Copyright (c) 2014, Cisco Systems, Inc. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_mbuf.h> +#include <rte_ethdev.h> +#include <rte_prefetch.h> + +#include "enic_compat.h" +#include "rq_enet_desc.h" +#include "enic.h" + +#define RTE_PMD_USE_PREFETCH + +#ifdef RTE_PMD_USE_PREFETCH +/*Prefetch a cache line into all cache levels. */ +#define rte_enic_prefetch(p) rte_prefetch0(p) +#else +#define rte_enic_prefetch(p) do {} while (0) +#endif + +#ifdef RTE_PMD_PACKET_PREFETCH +#define rte_packet_prefetch(p) rte_prefetch1(p) +#else +#define rte_packet_prefetch(p) do {} while (0) +#endif + +static inline uint16_t +enic_cq_rx_desc_ciflags(struct cq_enet_rq_desc *crd) +{ + return le16_to_cpu(crd->completed_index_flags) & ~CQ_DESC_COMP_NDX_MASK; +} + +static inline uint16_t +enic_cq_rx_desc_bwflags(struct cq_enet_rq_desc *crd) +{ + return le16_to_cpu(crd->bytes_written_flags) & + ~CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; +} + +static inline uint8_t +enic_cq_rx_desc_packet_error(uint16_t bwflags) +{ + return (bwflags & CQ_ENET_RQ_DESC_FLAGS_TRUNCATED) == + CQ_ENET_RQ_DESC_FLAGS_TRUNCATED; +} + +static inline uint8_t +enic_cq_rx_desc_eop(uint16_t ciflags) +{ + return (ciflags & CQ_ENET_RQ_DESC_FLAGS_EOP) + == CQ_ENET_RQ_DESC_FLAGS_EOP; +} + +static inline uint8_t +enic_cq_rx_desc_csum_not_calc(struct cq_enet_rq_desc *cqrd) +{ + return (le16_to_cpu(cqrd->q_number_rss_type_flags) & + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC) == + CQ_ENET_RQ_DESC_FLAGS_CSUM_NOT_CALC; +} + +static inline uint8_t +enic_cq_rx_desc_ipv4_csum_ok(struct cq_enet_rq_desc *cqrd) +{ + return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK) == + CQ_ENET_RQ_DESC_FLAGS_IPV4_CSUM_OK; +} + +static inline uint8_t +enic_cq_rx_desc_tcp_udp_csum_ok(struct cq_enet_rq_desc *cqrd) +{ + return (cqrd->flags & CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK) == + CQ_ENET_RQ_DESC_FLAGS_TCP_UDP_CSUM_OK; +} + +static inline uint8_t +enic_cq_rx_desc_rss_type(struct cq_enet_rq_desc *cqrd) +{ + return (uint8_t)((le16_to_cpu(cqrd->q_number_rss_type_flags) >> + CQ_DESC_Q_NUM_BITS) & CQ_ENET_RQ_DESC_RSS_TYPE_MASK); +} + +static inline uint32_t +enic_cq_rx_desc_rss_hash(struct cq_enet_rq_desc *cqrd) +{ + return le32_to_cpu(cqrd->rss_hash); +} + +static inline uint16_t +enic_cq_rx_desc_vlan(struct cq_enet_rq_desc *cqrd) +{ + return le16_to_cpu(cqrd->vlan); +} + +static inline uint16_t +enic_cq_rx_desc_n_bytes(struct cq_desc *cqd) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + return le16_to_cpu(cqrd->bytes_written_flags) & + CQ_ENET_RQ_DESC_BYTES_WRITTEN_MASK; +} + +static inline uint8_t +enic_cq_rx_check_err(struct cq_desc *cqd) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint16_t bwflags; + + bwflags = enic_cq_rx_desc_bwflags(cqrd); + if (unlikely(enic_cq_rx_desc_packet_error(bwflags))) + return 1; + return 0; +} + +/* Lookup table to translate RX CQ flags to mbuf flags. */ +static inline uint32_t +enic_cq_rx_flags_to_pkt_type(struct cq_desc *cqd) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint8_t cqrd_flags = cqrd->flags; + static const uint32_t cq_type_table[128] __rte_cache_aligned = { + [0x00] = RTE_PTYPE_UNKNOWN, + [0x20] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_NONFRAG, + [0x22] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_UDP, + [0x24] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_TCP, + [0x60] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_FRAG, + [0x62] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_UDP, + [0x64] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN + | RTE_PTYPE_L4_TCP, + [0x10] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_NONFRAG, + [0x12] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_UDP, + [0x14] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_TCP, + [0x50] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_FRAG, + [0x52] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_UDP, + [0x54] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN + | RTE_PTYPE_L4_TCP, + /* All others reserved */ + }; + cqrd_flags &= CQ_ENET_RQ_DESC_FLAGS_IPV4_FRAGMENT + | CQ_ENET_RQ_DESC_FLAGS_IPV4 | CQ_ENET_RQ_DESC_FLAGS_IPV6 + | CQ_ENET_RQ_DESC_FLAGS_TCP | CQ_ENET_RQ_DESC_FLAGS_UDP; + return cq_type_table[cqrd_flags]; +} + +static inline void +enic_cq_rx_to_pkt_flags(struct cq_desc *cqd, struct rte_mbuf *mbuf) +{ + struct cq_enet_rq_desc *cqrd = (struct cq_enet_rq_desc *)cqd; + uint16_t ciflags, bwflags, pkt_flags = 0; + ciflags = enic_cq_rx_desc_ciflags(cqrd); + bwflags = enic_cq_rx_desc_bwflags(cqrd); + + mbuf->ol_flags = 0; + + /* flags are meaningless if !EOP */ + if (unlikely(!enic_cq_rx_desc_eop(ciflags))) + goto mbuf_flags_done; + + /* VLAN stripping */ + if (bwflags & CQ_ENET_RQ_DESC_FLAGS_VLAN_STRIPPED) { + pkt_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + mbuf->vlan_tci = enic_cq_rx_desc_vlan(cqrd); + } else { + mbuf->vlan_tci = 0; + } + + /* RSS flag */ + if (enic_cq_rx_desc_rss_type(cqrd)) { + pkt_flags |= PKT_RX_RSS_HASH; + mbuf->hash.rss = enic_cq_rx_desc_rss_hash(cqrd); + } + + /* checksum flags */ + if (!enic_cq_rx_desc_csum_not_calc(cqrd) && + (mbuf->packet_type & RTE_PTYPE_L3_IPV4)) { + if (unlikely(!enic_cq_rx_desc_ipv4_csum_ok(cqrd))) + pkt_flags |= PKT_RX_IP_CKSUM_BAD; + if (mbuf->packet_type & (RTE_PTYPE_L4_UDP | RTE_PTYPE_L4_TCP)) { + if (unlikely(!enic_cq_rx_desc_tcp_udp_csum_ok(cqrd))) + pkt_flags |= PKT_RX_L4_CKSUM_BAD; + } + } + + mbuf_flags_done: + mbuf->ol_flags = pkt_flags; +} + +uint16_t +enic_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct vnic_rq *sop_rq = rx_queue; + struct vnic_rq *data_rq; + struct vnic_rq *rq; + struct enic *enic = vnic_dev_priv(sop_rq->vdev); + uint16_t cq_idx; + uint16_t rq_idx; + uint16_t rq_num; + struct rte_mbuf *nmb, *rxmb; + uint16_t nb_rx = 0; + struct vnic_cq *cq; + volatile struct cq_desc *cqd_ptr; + uint8_t color; + uint16_t seg_length; + struct rte_mbuf *first_seg = sop_rq->pkt_first_seg; + struct rte_mbuf *last_seg = sop_rq->pkt_last_seg; + + cq = &enic->cq[enic_cq_rq(enic, sop_rq->index)]; + cq_idx = cq->to_clean; /* index of cqd, rqd, mbuf_table */ + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; + + data_rq = &enic->rq[sop_rq->data_queue_idx]; + + while (nb_rx < nb_pkts) { + volatile struct rq_enet_desc *rqd_ptr; + dma_addr_t dma_addr; + struct cq_desc cqd; + uint8_t packet_error; + uint16_t ciflags; + + /* Check for pkts available */ + color = (cqd_ptr->type_color >> CQ_DESC_COLOR_SHIFT) + & CQ_DESC_COLOR_MASK; + if (color == cq->last_color) + break; + + /* Get the cq descriptor and extract rq info from it */ + cqd = *cqd_ptr; + rq_num = cqd.q_number & CQ_DESC_Q_NUM_MASK; + rq_idx = cqd.completed_index & CQ_DESC_COMP_NDX_MASK; + + rq = &enic->rq[rq_num]; + rqd_ptr = ((struct rq_enet_desc *)rq->ring.descs) + rq_idx; + + /* allocate a new mbuf */ + nmb = rte_mbuf_raw_alloc(rq->mp); + if (nmb == NULL) { + rte_atomic64_inc(&enic->soft_stats.rx_nombuf); + break; + } + + /* A packet error means descriptor and data are untrusted */ + packet_error = enic_cq_rx_check_err(&cqd); + + /* Get the mbuf to return and replace with one just allocated */ + rxmb = rq->mbuf_ring[rq_idx]; + rq->mbuf_ring[rq_idx] = nmb; + + /* Increment cqd, rqd, mbuf_table index */ + cq_idx++; + if (unlikely(cq_idx == cq->ring.desc_count)) { + cq_idx = 0; + cq->last_color = cq->last_color ? 0 : 1; + } + + /* Prefetch next mbuf & desc while processing current one */ + cqd_ptr = (struct cq_desc *)(cq->ring.descs) + cq_idx; + rte_enic_prefetch(cqd_ptr); + + ciflags = enic_cq_rx_desc_ciflags( + (struct cq_enet_rq_desc *)&cqd); + + /* Push descriptor for newly allocated mbuf */ + dma_addr = (dma_addr_t)(nmb->buf_physaddr + + RTE_PKTMBUF_HEADROOM); + rq_enet_desc_enc(rqd_ptr, dma_addr, + (rq->is_sop ? RQ_ENET_TYPE_ONLY_SOP + : RQ_ENET_TYPE_NOT_SOP), + nmb->buf_len - RTE_PKTMBUF_HEADROOM); + + /* Fill in the rest of the mbuf */ + seg_length = enic_cq_rx_desc_n_bytes(&cqd); + + if (rq->is_sop) { + first_seg = rxmb; + first_seg->nb_segs = 1; + first_seg->pkt_len = seg_length; + } else { + first_seg->pkt_len = (uint16_t)(first_seg->pkt_len + + seg_length); + first_seg->nb_segs++; + last_seg->next = rxmb; + } + + rxmb->next = NULL; + rxmb->port = enic->port_id; + rxmb->data_len = seg_length; + + rq->rx_nb_hold++; + + if (!(enic_cq_rx_desc_eop(ciflags))) { + last_seg = rxmb; + continue; + } + + /* cq rx flags are only valid if eop bit is set */ + first_seg->packet_type = enic_cq_rx_flags_to_pkt_type(&cqd); + enic_cq_rx_to_pkt_flags(&cqd, first_seg); + + if (unlikely(packet_error)) { + rte_pktmbuf_free(first_seg); + rte_atomic64_inc(&enic->soft_stats.rx_packet_errors); + continue; + } + + + /* prefetch mbuf data for caller */ + rte_packet_prefetch(RTE_PTR_ADD(first_seg->buf_addr, + RTE_PKTMBUF_HEADROOM)); + + /* store the mbuf address into the next entry of the array */ + rx_pkts[nb_rx++] = first_seg; + } + + sop_rq->pkt_first_seg = first_seg; + sop_rq->pkt_last_seg = last_seg; + + cq->to_clean = cq_idx; + + if ((sop_rq->rx_nb_hold + data_rq->rx_nb_hold) > + sop_rq->rx_free_thresh) { + if (data_rq->in_use) { + data_rq->posted_index = + enic_ring_add(data_rq->ring.desc_count, + data_rq->posted_index, + data_rq->rx_nb_hold); + data_rq->rx_nb_hold = 0; + } + sop_rq->posted_index = enic_ring_add(sop_rq->ring.desc_count, + sop_rq->posted_index, + sop_rq->rx_nb_hold); + sop_rq->rx_nb_hold = 0; + + rte_mb(); + if (data_rq->in_use) + iowrite32(data_rq->posted_index, + &data_rq->ctrl->posted_index); + rte_compiler_barrier(); + iowrite32(sop_rq->posted_index, &sop_rq->ctrl->posted_index); + } + + + return nb_rx; +} + +static inline void enic_free_wq_bufs(struct vnic_wq *wq, u16 completed_index) +{ + struct vnic_wq_buf *buf; + struct rte_mbuf *m, *free[ENIC_MAX_WQ_DESCS]; + unsigned int nb_to_free, nb_free = 0, i; + struct rte_mempool *pool; + unsigned int tail_idx; + unsigned int desc_count = wq->ring.desc_count; + + nb_to_free = enic_ring_sub(desc_count, wq->tail_idx, completed_index) + + 1; + tail_idx = wq->tail_idx; + buf = &wq->bufs[tail_idx]; + pool = ((struct rte_mbuf *)buf->mb)->pool; + for (i = 0; i < nb_to_free; i++) { + buf = &wq->bufs[tail_idx]; + m = (struct rte_mbuf *)(buf->mb); + if (likely(m->pool == pool)) { + ENIC_ASSERT(nb_free < ENIC_MAX_WQ_DESCS); + free[nb_free++] = m; + } else { + rte_mempool_put_bulk(pool, (void *)free, nb_free); + free[0] = m; + nb_free = 1; + pool = m->pool; + } + tail_idx = enic_ring_incr(desc_count, tail_idx); + buf->mb = NULL; + } + + rte_mempool_put_bulk(pool, (void **)free, nb_free); + + wq->tail_idx = tail_idx; + wq->ring.desc_avail += nb_to_free; +} + +unsigned int enic_cleanup_wq(__rte_unused struct enic *enic, struct vnic_wq *wq) +{ + u16 completed_index; + + completed_index = *((uint32_t *)wq->cqmsg_rz->addr) & 0xffff; + + if (wq->last_completed_index != completed_index) { + enic_free_wq_bufs(wq, completed_index); + wq->last_completed_index = completed_index; + } + return 0; +} + +uint16_t enic_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint16_t index; + unsigned int pkt_len, data_len; + unsigned int nb_segs; + struct rte_mbuf *tx_pkt; + struct vnic_wq *wq = (struct vnic_wq *)tx_queue; + struct enic *enic = vnic_dev_priv(wq->vdev); + unsigned short vlan_id; + uint64_t ol_flags; + uint64_t ol_flags_mask; + unsigned int wq_desc_avail; + int head_idx; + struct vnic_wq_buf *buf; + unsigned int desc_count; + struct wq_enet_desc *descs, *desc_p, desc_tmp; + uint16_t mss; + uint8_t vlan_tag_insert; + uint8_t eop; + uint64_t bus_addr; + + enic_cleanup_wq(enic, wq); + wq_desc_avail = vnic_wq_desc_avail(wq); + head_idx = wq->head_idx; + desc_count = wq->ring.desc_count; + ol_flags_mask = PKT_TX_VLAN_PKT | PKT_TX_IP_CKSUM | PKT_TX_L4_MASK; + + nb_pkts = RTE_MIN(nb_pkts, ENIC_TX_XMIT_MAX); + + for (index = 0; index < nb_pkts; index++) { + tx_pkt = *tx_pkts++; + nb_segs = tx_pkt->nb_segs; + if (nb_segs > wq_desc_avail) { + if (index > 0) + goto post; + goto done; + } + + pkt_len = tx_pkt->pkt_len; + data_len = tx_pkt->data_len; + ol_flags = tx_pkt->ol_flags; + mss = 0; + vlan_id = 0; + vlan_tag_insert = 0; + bus_addr = (dma_addr_t) + (tx_pkt->buf_physaddr + tx_pkt->data_off); + + descs = (struct wq_enet_desc *)wq->ring.descs; + desc_p = descs + head_idx; + + eop = (data_len == pkt_len); + + if (ol_flags & ol_flags_mask) { + if (ol_flags & PKT_TX_VLAN_PKT) { + vlan_tag_insert = 1; + vlan_id = tx_pkt->vlan_tci; + } + + if (ol_flags & PKT_TX_IP_CKSUM) + mss |= ENIC_CALC_IP_CKSUM; + + /* Nic uses just 1 bit for UDP and TCP */ + switch (ol_flags & PKT_TX_L4_MASK) { + case PKT_TX_TCP_CKSUM: + case PKT_TX_UDP_CKSUM: + mss |= ENIC_CALC_TCP_UDP_CKSUM; + break; + } + } + + wq_enet_desc_enc(&desc_tmp, bus_addr, data_len, mss, 0, 0, eop, + eop, 0, vlan_tag_insert, vlan_id, 0); + + *desc_p = desc_tmp; + buf = &wq->bufs[head_idx]; + buf->mb = (void *)tx_pkt; + head_idx = enic_ring_incr(desc_count, head_idx); + wq_desc_avail--; + + if (!eop) { + for (tx_pkt = tx_pkt->next; tx_pkt; tx_pkt = + tx_pkt->next) { + data_len = tx_pkt->data_len; + + if (tx_pkt->next == NULL) + eop = 1; + desc_p = descs + head_idx; + bus_addr = (dma_addr_t)(tx_pkt->buf_physaddr + + tx_pkt->data_off); + wq_enet_desc_enc((struct wq_enet_desc *) + &desc_tmp, bus_addr, data_len, + mss, 0, 0, eop, eop, 0, + vlan_tag_insert, vlan_id, 0); + + *desc_p = desc_tmp; + buf = &wq->bufs[head_idx]; + buf->mb = (void *)tx_pkt; + head_idx = enic_ring_incr(desc_count, head_idx); + wq_desc_avail--; + } + } + } + post: + rte_wmb(); + iowrite32(head_idx, &wq->ctrl->posted_index); + done: + wq->ring.desc_avail = wq_desc_avail; + wq->head_idx = head_idx; + + return index; +} + + diff --git a/drivers/net/fm10k/Makefile b/drivers/net/fm10k/Makefile index 602a2d2d..afcbd1d8 100644 --- a/drivers/net/fm10k/Makefile +++ b/drivers/net/fm10k/Makefile @@ -43,13 +43,14 @@ EXPORT_MAP := rte_pmd_fm10k_version.map LIBABIVER := 1 -ifeq ($(CC), icc) +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 -else ifeq ($(CC), clang) +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) + # ## CFLAGS for clang # @@ -99,5 +100,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_FM10K_INC_VECTOR) += fm10k_rxtx_vec.c DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_eal lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_mempool lib/librte_mbuf DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_net +DEPDIRS-$(CONFIG_RTE_LIBRTE_FM10K_PMD) += lib/librte_kvargs include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/fm10k/fm10k_ethdev.c b/drivers/net/fm10k/fm10k_ethdev.c index c2d377f1..eb77705e 100644 --- a/drivers/net/fm10k/fm10k_ethdev.c +++ b/drivers/net/fm10k/fm10k_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2013-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2013-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -947,7 +947,7 @@ fm10k_dev_promiscuous_enable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; fm10k_mbx_lock(hw); @@ -969,7 +969,7 @@ fm10k_dev_promiscuous_disable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; if (dev->data->all_multicast == 1) @@ -995,7 +995,7 @@ fm10k_dev_allmulticast_enable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; /* If promiscuous mode is enabled, it doesn't make sense to enable @@ -1026,7 +1026,7 @@ fm10k_dev_allmulticast_disable(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); /* Return if it didn't acquire valid glort range */ - if (!fm10k_glort_valid(hw)) + if ((hw->mac.type == fm10k_mac_pf) && !fm10k_glort_valid(hw)) return; if (dev->data->promiscuous) { @@ -1256,8 +1256,46 @@ fm10k_link_update(struct rte_eth_dev *dev, return 0; } +static int fm10k_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) +{ + unsigned i, q; + unsigned count = 0; + + if (xstats_names != NULL) { + /* Note: limit checked in rte_eth_xstats_names() */ + + /* Global stats */ + for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", fm10k_hw_stats_strings[count].name); + count++; + } + + /* PF queue stats */ + for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { + for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_q%u_%s", q, + fm10k_hw_stats_rx_q_strings[i].name); + count++; + } + for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_q%u_%s", q, + fm10k_hw_stats_tx_q_strings[i].name); + count++; + } + } + } + return FM10K_NB_XSTATS; +} + static int -fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct fm10k_hw_stats *hw_stats = @@ -1269,8 +1307,6 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Global stats */ for (i = 0; i < FM10K_NB_HW_XSTATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "%s", fm10k_hw_stats_strings[count].name); xstats[count].value = *(uint64_t *)(((char *)hw_stats) + fm10k_hw_stats_strings[count].offset); count++; @@ -1279,18 +1315,12 @@ fm10k_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* PF queue stats */ for (q = 0; q < FM10K_MAX_QUEUES_PF; q++) { for (i = 0; i < FM10K_NB_RX_Q_XSTATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "rx_q%u_%s", q, - fm10k_hw_stats_rx_q_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)&hw_stats->q[q]) + fm10k_hw_stats_rx_q_strings[i].offset); count++; } for (i = 0; i < FM10K_NB_TX_Q_XSTATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "tx_q%u_%s", q, - fm10k_hw_stats_tx_q_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)&hw_stats->q[q]) + fm10k_hw_stats_tx_q_strings[i].offset); @@ -2629,6 +2659,7 @@ static const struct eth_dev_ops fm10k_eth_dev_ops = { .allmulticast_disable = fm10k_dev_allmulticast_disable, .stats_get = fm10k_stats_get, .xstats_get = fm10k_xstats_get, + .xstats_get_names = fm10k_xstats_get_names, .stats_reset = fm10k_stats_reset, .xstats_reset = fm10k_stats_reset, .link_update = fm10k_link_update, diff --git a/drivers/net/fm10k/fm10k_rxtx.c b/drivers/net/fm10k/fm10k_rxtx.c index 81ed4e79..dd92a91e 100644 --- a/drivers/net/fm10k/fm10k_rxtx.c +++ b/drivers/net/fm10k/fm10k_rxtx.c @@ -96,22 +96,6 @@ rx_desc_to_ol_flags(struct rte_mbuf *m, const union fm10k_rx_desc *d) if (d->w.pkt_info & FM10K_RXD_RSSTYPE_MASK) m->ol_flags |= PKT_RX_RSS_HASH; - - if (unlikely((d->d.staterr & - (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE)) == - (FM10K_RXD_STATUS_IPCS | FM10K_RXD_STATUS_IPE))) - m->ol_flags |= PKT_RX_IP_CKSUM_BAD; - - if (unlikely((d->d.staterr & - (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E)) == - (FM10K_RXD_STATUS_L4CS | FM10K_RXD_STATUS_L4E))) - m->ol_flags |= PKT_RX_L4_CKSUM_BAD; - - if (unlikely(d->d.staterr & FM10K_RXD_STATUS_HBO)) - m->ol_flags |= PKT_RX_HBUF_OVERFLOW; - - if (unlikely(d->d.staterr & FM10K_RXD_STATUS_RXE)) - m->ol_flags |= PKT_RX_RECIP_ERR; } uint16_t diff --git a/drivers/net/fm10k/fm10k_rxtx_vec.c b/drivers/net/fm10k/fm10k_rxtx_vec.c index f8efe8f5..9ea747e1 100644 --- a/drivers/net/fm10k/fm10k_rxtx_vec.c +++ b/drivers/net/fm10k/fm10k_rxtx_vec.c @@ -101,7 +101,7 @@ fm10k_desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) const __m128i rxe_flag = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, PKT_RX_RECIP_ERR, 0); + 0, 0, 0, 0); /* map rss type to rss hash flag */ const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0, @@ -487,10 +487,10 @@ fm10k_recv_raw_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, rte_compiler_barrier(); if (split_packet) { - rte_prefetch0(&rx_pkts[pos]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 1]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 2]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 3]->cacheline1); + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); } /* D.1 pkt 3,4 convert format from desc to pktmbuf */ @@ -606,8 +606,11 @@ fm10k_reassemble_packets(struct fm10k_rx_queue *rxq, if (!split_flags[buf_idx]) { /* it's the last packet of the set */ +#ifdef RTE_LIBRTE_FM10K_RX_OLFLAGS_ENABLE start->hash = end->hash; start->ol_flags = end->ol_flags; + start->packet_type = end->packet_type; +#endif pkts[pkt_idx++] = start; start = end = NULL; } diff --git a/drivers/net/i40e/Makefile b/drivers/net/i40e/Makefile index 6dd6eaab..53fe145f 100644 --- a/drivers/net/i40e/Makefile +++ b/drivers/net/i40e/Makefile @@ -48,9 +48,9 @@ LIBABIVER := 1 # Add extra flags for base driver files (also known as shared code) # to disable warnings # -ifeq ($(CC), icc) +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) CFLAGS_BASE_DRIVER = -wd593 -wd188 -else ifeq ($(CC), clang) +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) CFLAGS_BASE_DRIVER += -Wno-sign-compare CFLAGS_BASE_DRIVER += -Wno-unused-value CFLAGS_BASE_DRIVER += -Wno-unused-parameter @@ -85,7 +85,7 @@ VPATH += $(SRCDIR)/base # # all source are stored in SRCS-y -# base driver is based on the package of dpdk-i40e.2016.01.07.14.tar.gz. +# base driver is based on the package of dpdk-i40e.2016.04.18.12.tar.gz. # SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_adminq.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_common.c @@ -102,9 +102,16 @@ SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_ethdev_vf.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_pf.c SRCS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += i40e_fdir.c +# vector PMD driver needs SSE4.1 support +ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSE4_1,$(CFLAGS)),) +CFLAGS_i40e_rxtx_vec.o += -msse4.1 +endif + + # this lib depends upon: DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_eal lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_mempool lib/librte_mbuf DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_net +DEPDIRS-$(CONFIG_RTE_LIBRTE_I40E_PMD) += lib/librte_kvargs include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/i40e/base/i40e_adminq.c b/drivers/net/i40e/base/i40e_adminq.c index 222add40..0d3a83fa 100644 --- a/drivers/net/i40e/base/i40e_adminq.c +++ b/drivers/net/i40e/base/i40e_adminq.c @@ -37,18 +37,6 @@ POSSIBILITY OF SUCH DAMAGE. #include "i40e_adminq.h" #include "i40e_prototype.h" -#ifdef PF_DRIVER -/** - * i40e_is_nvm_update_op - return true if this is an NVM update operation - * @desc: API request descriptor - **/ -STATIC INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc) -{ - return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase) || - desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update)); -} - -#endif /* PF_DRIVER */ /** * i40e_adminq_init_regs - Initialize AdminQ registers * @hw: pointer to the hardware structure @@ -584,6 +572,26 @@ shutdown_arq_out: i40e_release_spinlock(&hw->aq.arq_spinlock); return ret_code; } +#ifdef PF_DRIVER + +/** + * i40e_resume_aq - resume AQ processing from 0 + * @hw: pointer to the hardware structure + **/ +STATIC void i40e_resume_aq(struct i40e_hw *hw) +{ + /* Registers are reset after PF reset */ + hw->aq.asq.next_to_use = 0; + hw->aq.asq.next_to_clean = 0; + + i40e_config_asq_regs(hw); + + hw->aq.arq.next_to_use = 0; + hw->aq.arq.next_to_clean = 0; + + i40e_config_arq_regs(hw); +} +#endif /* PF_DRIVER */ /** * i40e_init_adminq - main initialization routine for Admin Queue @@ -598,12 +606,15 @@ shutdown_arq_out: **/ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) { - enum i40e_status_code ret_code; #ifdef PF_DRIVER - u16 eetrack_lo, eetrack_hi; u16 cfg_ptr, oem_hi, oem_lo; + u16 eetrack_lo, eetrack_hi; +#endif + enum i40e_status_code ret_code; +#ifdef PF_DRIVER int retry = 0; #endif + /* verify input for valid configuration */ if ((hw->aq.num_arq_entries == 0) || (hw->aq.num_asq_entries == 0) || @@ -612,8 +623,6 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) ret_code = I40E_ERR_CONFIG; goto init_adminq_exit; } - - /* initialize spin locks */ i40e_init_spinlock(&hw->aq.asq_spinlock); i40e_init_spinlock(&hw->aq.arq_spinlock); @@ -680,13 +689,9 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw) /* pre-emptive resource lock release */ i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL); - hw->aq.nvm_release_on_done = false; + hw->nvm_release_on_done = false; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - ret_code = i40e_aq_set_hmc_resource_profile(hw, - I40E_HMC_PROFILE_DEFAULT, - 0, - NULL); #endif /* PF_DRIVER */ ret_code = I40E_SUCCESS; @@ -720,8 +725,6 @@ enum i40e_status_code i40e_shutdown_adminq(struct i40e_hw *hw) i40e_shutdown_asq(hw); i40e_shutdown_arq(hw); - - /* destroy the spinlocks */ i40e_destroy_spinlock(&hw->aq.asq_spinlock); i40e_destroy_spinlock(&hw->aq.arq_spinlock); @@ -747,7 +750,6 @@ u16 i40e_clean_asq(struct i40e_hw *hw) desc = I40E_ADMINQ_DESC(*asq, ntc); details = I40E_ADMINQ_DETAILS(*asq, ntc); - while (rd32(hw, hw->aq.asq.head) != ntc) { i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head)); @@ -780,7 +782,11 @@ u16 i40e_clean_asq(struct i40e_hw *hw) * Returns true if the firmware has processed all descriptors on the * admin send queue. Returns false if there are still requests pending. **/ +#ifdef VF_DRIVER bool i40e_asq_done(struct i40e_hw *hw) +#else +STATIC bool i40e_asq_done(struct i40e_hw *hw) +#endif { /* AQ designers suggest use of head for better * timing reliability than DD bit @@ -938,7 +944,6 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, */ if (i40e_asq_done(hw)) break; - /* ugh! delay while spin_lock */ i40e_msec_delay(1); total_delay++; } while (total_delay < hw->aq.asq_cmd_timeout); @@ -1120,27 +1125,8 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw, hw->aq.arq.next_to_use = ntu; #ifdef PF_DRIVER - if (i40e_is_nvm_update_op(&e->desc)) { - if (hw->aq.nvm_release_on_done) { - i40e_release_nvm(hw); - hw->aq.nvm_release_on_done = false; - } - - switch (hw->nvmupd_state) { - case I40E_NVMUPD_STATE_INIT_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; - break; - - case I40E_NVMUPD_STATE_WRITE_WAIT: - hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; - break; - - default: - break; - } - } - -#endif + i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode)); +#endif /* PF_DRIVER */ clean_arq_element_out: /* Set pending if needed, unlock and return */ if (pending != NULL) @@ -1151,16 +1137,3 @@ clean_arq_element_err: return ret_code; } -void i40e_resume_aq(struct i40e_hw *hw) -{ - /* Registers are reset after PF reset */ - hw->aq.asq.next_to_use = 0; - hw->aq.asq.next_to_clean = 0; - - i40e_config_asq_regs(hw); - - hw->aq.arq.next_to_use = 0; - hw->aq.arq.next_to_clean = 0; - - i40e_config_arq_regs(hw); -} diff --git a/drivers/net/i40e/base/i40e_adminq.h b/drivers/net/i40e/base/i40e_adminq.h index 40c86d9d..750973c5 100644 --- a/drivers/net/i40e/base/i40e_adminq.h +++ b/drivers/net/i40e/base/i40e_adminq.h @@ -104,7 +104,6 @@ struct i40e_adminq_info { u32 fw_build; /* firmware build number */ u16 api_maj_ver; /* api major version */ u16 api_min_ver; /* api minor version */ - bool nvm_release_on_done; struct i40e_spinlock asq_spinlock; /* Send queue spinlock */ struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */ @@ -158,8 +157,8 @@ STATIC INLINE int i40e_aq_rc_to_posix(int aq_ret, int aq_rc) } /* general information */ -#define I40E_AQ_LARGE_BUF 512 -#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */ +#define I40E_AQ_LARGE_BUF 512 +#define I40E_ASQ_CMD_TIMEOUT 250 /* msecs */ #ifdef I40E_ESS_SUPPORT #define I40E_ASQ_CMD_TIMEOUT_ESS 50000 /* msecs */ #endif diff --git a/drivers/net/i40e/base/i40e_adminq_cmd.h b/drivers/net/i40e/base/i40e_adminq_cmd.h index fe9d5b51..2b7a7608 100644 --- a/drivers/net/i40e/base/i40e_adminq_cmd.h +++ b/drivers/net/i40e/base/i40e_adminq_cmd.h @@ -224,10 +224,6 @@ enum i40e_admin_queue_opc { i40e_aqc_opc_resume_port_tx = 0x041C, i40e_aqc_opc_configure_partition_bw = 0x041D, - /* hmc */ - i40e_aqc_opc_query_hmc_resource_profile = 0x0500, - i40e_aqc_opc_set_hmc_resource_profile = 0x0501, - /* phy commands*/ i40e_aqc_opc_get_phy_abilities = 0x0600, i40e_aqc_opc_set_phy_config = 0x0601, @@ -450,6 +446,7 @@ struct i40e_aqc_list_capabilities_element_resp { #define I40E_AQ_CAP_ID_SDP 0x0062 #define I40E_AQ_CAP_ID_MDIO 0x0063 #define I40E_AQ_CAP_ID_WSR_PROT 0x0064 +#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080 #define I40E_AQ_CAP_ID_FLEX10 0x00F1 #define I40E_AQ_CAP_ID_CEM 0x00F2 @@ -931,7 +928,7 @@ struct i40e_aqc_vsi_properties_data { u8 up_enable_bits; u8 sched_reserved; /* outer up section */ - __le32 outer_up_table; /* same structure and defines as ingress table */ + __le32 outer_up_table; /* same structure and defines as ingress tbl */ u8 cmd_reserved[8]; /* last 32 bytes are written by FW */ __le16 qs_handle[8]; @@ -1571,7 +1568,8 @@ struct i40e_aqc_configure_switching_comp_ets_bw_limit_data { u8 reserved1[28]; }; -I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_configure_switching_comp_ets_bw_limit_data); +I40E_CHECK_STRUCT_LEN(0x40, + i40e_aqc_configure_switching_comp_ets_bw_limit_data); /* Configure Switching Component Bandwidth Allocation per Tc * (indirect 0x0417) @@ -1646,27 +1644,6 @@ struct i40e_aqc_configure_partition_bw_data { I40E_CHECK_STRUCT_LEN(0x22, i40e_aqc_configure_partition_bw_data); -/* Get and set the active HMC resource profile and status. - * (direct 0x0500) and (direct 0x0501) - */ -struct i40e_aq_get_set_hmc_resource_profile { - u8 pm_profile; - u8 pe_vf_enabled; - u8 reserved[14]; -}; - -I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile); - -enum i40e_aq_hmc_profile { - /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */ - I40E_HMC_PROFILE_DEFAULT = 1, - I40E_HMC_PROFILE_FAVOR_VF = 2, - I40E_HMC_PROFILE_EQUAL = 3, -}; - -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF -#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F - /* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */ /* set in param0 for get phy abilities to report qualified modules */ @@ -1702,6 +1679,10 @@ enum i40e_aq_phy_type { I40E_PHY_TYPE_1000BASE_LX = 0x1C, I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D, I40E_PHY_TYPE_20GBASE_KR2 = 0x1E, + I40E_PHY_TYPE_25GBASE_KR = 0x1F, + I40E_PHY_TYPE_25GBASE_CR = 0x20, + I40E_PHY_TYPE_25GBASE_SR = 0x21, + I40E_PHY_TYPE_25GBASE_LR = 0x22, I40E_PHY_TYPE_MAX }; @@ -1918,7 +1899,10 @@ struct i40e_aqc_set_phy_debug { #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01 #define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02 +/* Disable link manageability on a single port */ #define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10 +/* Disable link manageability on all ports needs both bits 4 and 5 */ +#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20 u8 reserved[15]; }; @@ -1966,7 +1950,7 @@ struct i40e_aqc_nvm_config_read { #define I40E_AQ_ANVM_READ_SINGLE_FEATURE 0 #define I40E_AQ_ANVM_READ_MULTIPLE_FEATURES 1 __le16 element_count; - __le16 element_id; /* Feature/field ID */ + __le16 element_id; /* Feature/field ID */ __le16 element_id_msw; /* MSWord of field ID */ __le32 address_high; __le32 address_low; @@ -1987,9 +1971,10 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_nvm_config_write); /* Used for 0x0704 as well as for 0x0705 commands */ #define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT 1 -#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) -#define I40E_AQ_ANVM_FEATURE 0 -#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_MASK \ + (1 << I40E_AQ_ANVM_FEATURE_OR_IMMEDIATE_SHIFT) +#define I40E_AQ_ANVM_FEATURE 0 +#define I40E_AQ_ANVM_IMMEDIATE_FIELD (1 << FEATURE_OR_IMMEDIATE_SHIFT) struct i40e_aqc_nvm_config_data_feature { __le16 feature_id; #define I40E_AQ_ANVM_FEATURE_OPTION_OEM_ONLY 0x01 @@ -2013,7 +1998,7 @@ I40E_CHECK_STRUCT_LEN(0xc, i40e_aqc_nvm_config_data_immediate_field); /* OEM Post Update (indirect 0x0720) * no command data struct used */ - struct i40e_aqc_nvm_oem_post_update { +struct i40e_aqc_nvm_oem_post_update { #define I40E_AQ_NVM_OEM_POST_UPDATE_EXTERNAL_DATA 0x01 u8 sel_data; u8 reserved[7]; @@ -2303,7 +2288,8 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_lldp_set_local_mib_resp); */ struct i40e_aqc_lldp_stop_start_specific_agent { #define I40E_AQC_START_SPECIFIC_AGENT_SHIFT 0 -#define I40E_AQC_START_SPECIFIC_AGENT_MASK (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) +#define I40E_AQC_START_SPECIFIC_AGENT_MASK \ + (1 << I40E_AQC_START_SPECIFIC_AGENT_SHIFT) u8 command; u8 reserved[15]; }; @@ -2325,7 +2311,7 @@ struct i40e_aqc_add_udp_tunnel { I40E_CHECK_CMD_LENGTH(i40e_aqc_add_udp_tunnel); struct i40e_aqc_add_udp_tunnel_completion { - __le16 udp_port; + __le16 udp_port; u8 filter_entry_index; u8 multiple_pfs; #define I40E_AQC_SINGLE_PF 0x0 diff --git a/drivers/net/i40e/base/i40e_common.c b/drivers/net/i40e/base/i40e_common.c index ef3425e1..98ed4b68 100644 --- a/drivers/net/i40e/base/i40e_common.c +++ b/drivers/net/i40e/base/i40e_common.c @@ -67,6 +67,8 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_10G_BASE_T4: case I40E_DEV_ID_20G_KR2: case I40E_DEV_ID_20G_KR2_A: + case I40E_DEV_ID_25G_B: + case I40E_DEV_ID_25G_SFP28: hw->mac.type = I40E_MAC_XL710; break; #ifdef X722_SUPPORT @@ -78,6 +80,8 @@ STATIC enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw) case I40E_DEV_ID_SFP_X722: case I40E_DEV_ID_1G_BASE_T_X722: case I40E_DEV_ID_10G_BASE_T_X722: + case I40E_DEV_ID_SFP_I_X722: + case I40E_DEV_ID_QSFP_I_X722: hw->mac.type = I40E_MAC_X722; break; #endif @@ -371,14 +375,16 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, /* the most we could have left is 16 bytes, pad with zeros */ if (i < len) { char d_buf[16]; - int j; + int j, i_sav; + i_sav = i; memset(d_buf, 0, sizeof(d_buf)); for (j = 0; i < len; j++, i++) d_buf[j] = buf[i]; i40e_debug(hw, mask, "\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n", - i, d_buf[0], d_buf[1], d_buf[2], d_buf[3], + i_sav, d_buf[0], d_buf[1], + d_buf[2], d_buf[3], d_buf[4], d_buf[5], d_buf[6], d_buf[7], d_buf[8], d_buf[9], d_buf[10], d_buf[11], d_buf[12], d_buf[13], d_buf[14], d_buf[15]); @@ -1317,8 +1323,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw) I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; - /* It can take upto 15 secs for GRST steady state */ - grst_del = grst_del * 20; /* bump it to 16 secs max to be safe */ + grst_del = grst_del * 20; for (cnt = 0; cnt < grst_del; cnt++) { reg = rd32(hw, I40E_GLGEN_RSTAT); @@ -2215,10 +2220,12 @@ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, * @seid: vsi number * @set: set unicast promiscuous enable/disable * @cmd_details: pointer to command details structure or NULL + * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc **/ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, u16 seid, bool set, - struct i40e_asq_cmd_details *cmd_details) + struct i40e_asq_cmd_details *cmd_details, + bool rx_only_promisc) { struct i40e_aq_desc desc; struct i40e_aqc_set_vsi_promiscuous_modes *cmd = @@ -2231,8 +2238,9 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, if (set) { flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST; - if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || - (hw->aq.api_maj_ver > 1)) + if (rx_only_promisc && + (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) || + (hw->aq.api_maj_ver > 1))) flags |= I40E_AQC_SET_VSI_PROMISC_TX; } @@ -3040,10 +3048,7 @@ enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid, u16 *rules_used, u16 *rules_free) { /* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */ - if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) { - if (!rule_id) - return I40E_ERR_PARAM; - } else { + if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) { /* count and mr_list shall be valid for rule_type INGRESS VLAN * mirroring. For other rule_type, count and rule_type should * not matter. @@ -3240,67 +3245,6 @@ enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw, } /** - * i40e_aq_get_hmc_resource_profile - * @hw: pointer to the hw struct - * @profile: type of profile the HMC is to be set as - * @pe_vf_enabled_count: the number of PE enabled VFs the system has - * @cmd_details: pointer to command details structure or NULL - * - * query the HMC profile of the device. - **/ -enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw, - enum i40e_aq_hmc_profile *profile, - u8 *pe_vf_enabled_count, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aq_get_set_hmc_resource_profile *resp = - (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; - enum i40e_status_code status; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_query_hmc_resource_profile); - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile & - I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK); - *pe_vf_enabled_count = resp->pe_vf_enabled & - I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK; - - return status; -} - -/** - * i40e_aq_set_hmc_resource_profile - * @hw: pointer to the hw struct - * @profile: type of profile the HMC is to be set as - * @pe_vf_enabled_count: the number of PE enabled VFs the system has - * @cmd_details: pointer to command details structure or NULL - * - * set the HMC profile of the device. - **/ -enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, - enum i40e_aq_hmc_profile profile, - u8 pe_vf_enabled_count, - struct i40e_asq_cmd_details *cmd_details) -{ - struct i40e_aq_desc desc; - struct i40e_aq_get_set_hmc_resource_profile *cmd = - (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw; - enum i40e_status_code status; - - i40e_fill_default_direct_cmd_desc(&desc, - i40e_aqc_opc_set_hmc_resource_profile); - - cmd->pm_profile = (u8)profile; - cmd->pe_vf_enabled = pe_vf_enabled_count; - - status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details); - - return status; -} - -/** * i40e_aq_request_resource * @hw: pointer to the hw struct * @resource: resource id @@ -3728,7 +3672,7 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, p->num_msix_vectors = number; i40e_debug(hw, I40E_DEBUG_INIT, "HW Capability: MSIX vector count = %d\n", - p->num_msix_vectors_vf); + p->num_msix_vectors); break; case I40E_AQ_CAP_ID_VF_MSIX: p->num_msix_vectors_vf = number; @@ -3818,6 +3762,12 @@ STATIC void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff, "HW Capability: wr_csr_prot = 0x%llX\n\n", (p->wr_csr_prot & 0xffff)); break; + case I40E_AQ_CAP_ID_NVM_MGMT: + if (number & I40E_NVM_MGMT_SEC_REV_DISABLED) + p->sec_rev_disabled = true; + if (number & I40E_NVM_MGMT_UPDATE_DISABLED) + p->update_disabled = true; + break; #ifdef X722_SUPPORT case I40E_AQ_CAP_ID_WOL_AND_PROXY: hw->num_wol_proxy_filters = (u16)number; @@ -4486,7 +4436,7 @@ enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid, } /** - * i40_aq_add_pvirt - Instantiate a Port Virtualizer on a port + * i40e_aq_add_pvirt - Instantiate a Port Virtualizer on a port * @hw: pointer to the hw struct * @flags: component flags * @mac_seid: uplink seid (MAC SEID) @@ -5484,6 +5434,35 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, } /** + * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue + * @filters: list of cloud filters + * @filter_count: length of list + * + * There's an issue in the device where the Geneve VNI layout needs + * to be shifted 1 byte over from the VxLAN VNI + **/ +STATIC void i40e_fix_up_geneve_vni( + struct i40e_aqc_add_remove_cloud_filters_element_data *filters, + u8 filter_count) +{ + struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters; + int i; + + for (i = 0; i < filter_count; i++) { + u16 tnl_type; + u32 ti; + + tnl_type = (le16_to_cpu(f[i].flags) & + I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >> + I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT; + if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) { + ti = le32_to_cpu(f[i].tenant_id); + f[i].tenant_id = cpu_to_le32(ti << 8); + } + } +} + +/** * i40e_aq_add_cloud_filters * @hw: pointer to the hardware structure * @seid: VSI seid to add cloud filters from @@ -5503,8 +5482,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw, struct i40e_aq_desc desc; struct i40e_aqc_add_remove_cloud_filters *cmd = (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw; - u16 buff_len; enum i40e_status_code status; + u16 buff_len; i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_add_cloud_filters); @@ -5515,6 +5494,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw, cmd->num_filters = filter_count; cmd->seid = CPU_TO_LE16(seid); + i40e_fix_up_geneve_vni(filters, filter_count); + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; @@ -5552,6 +5533,8 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw, cmd->num_filters = filter_count; cmd->seid = CPU_TO_LE16(seid); + i40e_fix_up_geneve_vni(filters, filter_count); + status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL); return status; diff --git a/drivers/net/i40e/base/i40e_devids.h b/drivers/net/i40e/base/i40e_devids.h index f8443405..ed73e1d2 100644 --- a/drivers/net/i40e/base/i40e_devids.h +++ b/drivers/net/i40e/base/i40e_devids.h @@ -49,6 +49,8 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_DEV_ID_20G_KR2 0x1587 #define I40E_DEV_ID_20G_KR2_A 0x1588 #define I40E_DEV_ID_10G_BASE_T4 0x1589 +#define I40E_DEV_ID_25G_B 0x158A +#define I40E_DEV_ID_25G_SFP28 0x158B #if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT) #define I40E_DEV_ID_VF 0x154C #define I40E_DEV_ID_VF_HV 0x1571 @@ -65,6 +67,8 @@ POSSIBILITY OF SUCH DAMAGE. #define I40E_DEV_ID_SFP_X722 0x37D0 #define I40E_DEV_ID_1G_BASE_T_X722 0x37D1 #define I40E_DEV_ID_10G_BASE_T_X722 0x37D2 +#define I40E_DEV_ID_SFP_I_X722 0x37D3 +#define I40E_DEV_ID_QSFP_I_X722 0x37D4 #if defined(INTEGRATED_VF) || defined(VF_DRIVER) || defined(I40E_NDIS_SUPPORT) #define I40E_DEV_ID_X722_VF 0x37CD #define I40E_DEV_ID_X722_VF_HV 0x37D9 diff --git a/drivers/net/i40e/base/i40e_nvm.c b/drivers/net/i40e/base/i40e_nvm.c index f4e4eaa4..4fa1220b 100644 --- a/drivers/net/i40e/base/i40e_nvm.c +++ b/drivers/net/i40e/base/i40e_nvm.c @@ -872,10 +872,10 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, /* early check for status command and debug msgs */ upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno); - i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", + i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n", i40e_nvm_update_state_str[upd_cmd], hw->nvmupd_state, - hw->aq.nvm_release_on_done, + hw->nvm_release_on_done, hw->nvm_wait_opcode, cmd->command, cmd->config, cmd->offset, cmd->data_size); if (upd_cmd == I40E_NVMUPD_INVALID) { @@ -889,7 +889,18 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, * going into the state machine */ if (upd_cmd == I40E_NVMUPD_STATUS) { + if (!cmd->data_size) { + *perrno = -EFAULT; + return I40E_ERR_BUF_TOO_SHORT; + } + bytes[0] = hw->nvmupd_state; + + if (cmd->data_size >= 4) { + bytes[1] = 0; + *((u16 *)&bytes[2]) = hw->nvm_wait_opcode; + } + return I40E_SUCCESS; } @@ -908,6 +919,14 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, case I40E_NVMUPD_STATE_INIT_WAIT: case I40E_NVMUPD_STATE_WRITE_WAIT: + /* if we need to stop waiting for an event, clear + * the wait info and return before doing anything else + */ + if (cmd->offset == 0xffff) { + i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode); + return I40E_SUCCESS; + } + status = I40E_ERR_NOT_READY; *perrno = -EBUSY; break; @@ -980,7 +999,8 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, if (status) { i40e_release_nvm(hw); } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -996,7 +1016,8 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, if (status) { i40e_release_nvm(hw); } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -1009,10 +1030,12 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, hw->aq.asq_last_status); } else { status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); - if (status) + if (status) { i40e_release_nvm(hw); - else + } else { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } } break; @@ -1030,7 +1053,8 @@ STATIC enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw, -EIO; i40e_release_nvm(hw); } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } } @@ -1125,8 +1149,10 @@ retry: switch (upd_cmd) { case I40E_NVMUPD_WRITE_CON: status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno); - if (!status) + if (!status) { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; + } break; case I40E_NVMUPD_WRITE_LCB: @@ -1138,7 +1164,8 @@ retry: -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; @@ -1153,6 +1180,7 @@ retry: -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT; } break; @@ -1167,7 +1195,8 @@ retry: -EIO; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; } else { - hw->aq.nvm_release_on_done = true; + hw->nvm_release_on_done = true; + hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update; hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; } break; @@ -1217,6 +1246,37 @@ retry: } /** + * i40e_nvmupd_check_wait_event - handle NVM update operation events + * @hw: pointer to the hardware structure + * @opcode: the event that just happened + **/ +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode) +{ + if (opcode == hw->nvm_wait_opcode) { + i40e_debug(hw, I40E_DEBUG_NVM, + "NVMUPD: clearing wait on opcode 0x%04x\n", opcode); + if (hw->nvm_release_on_done) { + i40e_release_nvm(hw); + hw->nvm_release_on_done = false; + } + hw->nvm_wait_opcode = 0; + + switch (hw->nvmupd_state) { + case I40E_NVMUPD_STATE_INIT_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT; + break; + + case I40E_NVMUPD_STATE_WRITE_WAIT: + hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING; + break; + + default: + break; + } + } +} + +/** * i40e_nvmupd_validate_command - Validate given command * @hw: pointer to hardware structure * @cmd: pointer to nvm update command buffer @@ -1378,6 +1438,12 @@ STATIC enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw, *perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status); } + /* should we wait for a followup event? */ + if (cmd->offset) { + hw->nvm_wait_opcode = cmd->offset; + hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT; + } + return status; } diff --git a/drivers/net/i40e/base/i40e_osdep.h b/drivers/net/i40e/base/i40e_osdep.h index 8c84ed82..38e7ba5b 100644 --- a/drivers/net/i40e/base/i40e_osdep.h +++ b/drivers/net/i40e/base/i40e_osdep.h @@ -204,6 +204,13 @@ struct i40e_virt_mem { #define LE32_TO_CPU(c) rte_le_to_cpu_32(c) #define LE64_TO_CPU(k) rte_le_to_cpu_64(k) +#define cpu_to_le16(o) rte_cpu_to_le_16(o) +#define cpu_to_le32(s) rte_cpu_to_le_32(s) +#define cpu_to_le64(h) rte_cpu_to_le_64(h) +#define le16_to_cpu(a) rte_le_to_cpu_16(a) +#define le32_to_cpu(c) rte_le_to_cpu_32(c) +#define le64_to_cpu(k) rte_le_to_cpu_64(k) + /* SW spinlock */ struct i40e_spinlock { rte_spinlock_t spinlock; diff --git a/drivers/net/i40e/base/i40e_prototype.h b/drivers/net/i40e/base/i40e_prototype.h index 674430de..03dda937 100644 --- a/drivers/net/i40e/base/i40e_prototype.h +++ b/drivers/net/i40e/base/i40e_prototype.h @@ -67,14 +67,15 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw, void *buff, /* can be NULL */ u16 buff_size, struct i40e_asq_cmd_details *cmd_details); +#ifdef VF_DRIVER bool i40e_asq_done(struct i40e_hw *hw); +#endif /* debug function for adminq */ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc, void *buffer, u16 buf_len); void i40e_idle_aq(struct i40e_hw *hw); -void i40e_resume_aq(struct i40e_hw *hw); bool i40e_check_asq_alive(struct i40e_hw *hw); enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading); #ifdef X722_SUPPORT @@ -165,7 +166,8 @@ enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw, u16 vsi_id, bool set_filter, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw, - u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); + u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details, + bool rx_only_promisc); enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw, u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw, @@ -345,10 +347,6 @@ enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw, enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw, u8 tcmap, bool request, u8 *tcmap_ret, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw, - enum i40e_aq_hmc_profile *profile, - u8 *pe_vf_enabled_count, - struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit( struct i40e_hw *hw, u16 seid, struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data, @@ -359,10 +357,6 @@ enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw, struct i40e_asq_cmd_details *cmd_details); -enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw, - enum i40e_aq_hmc_profile profile, - u8 pe_vf_enabled_count, - struct i40e_asq_cmd_details *cmd_details); enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw, u16 seid, u16 credit, u8 max_bw, struct i40e_asq_cmd_details *cmd_details); @@ -411,7 +405,6 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw, u16 vsi, struct i40e_aqc_add_remove_cloud_filters_element_data *filters, u8 filter_count); - enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw, u32 reg_addr0, u32 *reg_val0, u32 reg_addr1, u32 *reg_val1); @@ -473,6 +466,7 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw, enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw, struct i40e_nvm_access *cmd, u8 *bytes, int *); +void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode); void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status); #endif /* PF_DRIVER */ diff --git a/drivers/net/i40e/base/i40e_type.h b/drivers/net/i40e/base/i40e_type.h index d5ca67af..5349419f 100644 --- a/drivers/net/i40e/base/i40e_type.h +++ b/drivers/net/i40e/base/i40e_type.h @@ -188,7 +188,7 @@ enum i40e_memcpy_type { }; #ifdef X722_SUPPORT -#define I40E_FW_API_VERSION_MINOR_X722 0x0004 +#define I40E_FW_API_VERSION_MINOR_X722 0x0005 #endif #define I40E_FW_API_VERSION_MINOR_X710 0x0005 @@ -376,6 +376,11 @@ struct i40e_hw_capabilities { #define I40E_FLEX10_STATUS_DCC_ERROR 0x1 #define I40E_FLEX10_STATUS_VC_MODE 0x2 + bool sec_rev_disabled; + bool update_disabled; +#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1 +#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2 + bool mgmt_cem; bool ieee_1588; bool iwarp; @@ -655,6 +660,8 @@ struct i40e_hw { enum i40e_nvmupd_state nvmupd_state; struct i40e_aq_desc nvm_wb_desc; struct i40e_virt_mem nvm_buff; + bool nvm_release_on_done; + u16 nvm_wait_opcode; /* HMC info */ struct i40e_hmc_info hmc; /* HMC info struct */ @@ -1631,4 +1638,37 @@ struct i40e_lldp_variables { /* RSS Hash Table Size */ #define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000 + +/* INPUT SET MASK for RSS, flow director, and flexible payload */ +#define I40E_L3_SRC_SHIFT 47 +#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT) +#define I40E_L3_V6_SRC_SHIFT 43 +#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT) +#define I40E_L3_DST_SHIFT 35 +#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT) +#define I40E_L3_V6_DST_SHIFT 35 +#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT) +#define I40E_L4_SRC_SHIFT 34 +#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT) +#define I40E_L4_DST_SHIFT 33 +#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT) +#define I40E_VERIFY_TAG_SHIFT 31 +#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT) + +#define I40E_FLEX_50_SHIFT 13 +#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT) +#define I40E_FLEX_51_SHIFT 12 +#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT) +#define I40E_FLEX_52_SHIFT 11 +#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT) +#define I40E_FLEX_53_SHIFT 10 +#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT) +#define I40E_FLEX_54_SHIFT 9 +#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT) +#define I40E_FLEX_55_SHIFT 8 +#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT) +#define I40E_FLEX_56_SHIFT 7 +#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT) +#define I40E_FLEX_57_SHIFT 6 +#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT) #endif /* _I40E_TYPE_H_ */ diff --git a/drivers/net/i40e/base/i40e_virtchnl.h b/drivers/net/i40e/base/i40e_virtchnl.h index 26208f3f..fd51ec32 100644 --- a/drivers/net/i40e/base/i40e_virtchnl.h +++ b/drivers/net/i40e/base/i40e_virtchnl.h @@ -87,10 +87,15 @@ enum i40e_virtchnl_ops { I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14, I40E_VIRTCHNL_OP_GET_STATS = 15, I40E_VIRTCHNL_OP_FCOE = 16, - I40E_VIRTCHNL_OP_EVENT = 17, + I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */ #ifdef I40E_SOL_VF_SUPPORT I40E_VIRTCHNL_OP_GET_ADDNL_SOL_CONFIG = 19, #endif + I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23, + I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24, + I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25, + I40E_VIRTCHNL_OP_SET_RSS_HENA = 26, + }; /* Virtual channel message descriptor. This overlays the admin queue @@ -164,6 +169,7 @@ struct i40e_virtchnl_vsi_resource { #define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000 #define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000 #define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000 +#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000 struct i40e_virtchnl_vf_resource { u16 num_vsis; @@ -172,8 +178,8 @@ struct i40e_virtchnl_vf_resource { u16 max_mtu; u32 vf_offload_flags; - u32 max_fcoe_contexts; - u32 max_fcoe_filters; + u32 rss_key_size; + u32 rss_lut_size; struct i40e_virtchnl_vsi_resource vsi_res[1]; }; @@ -349,6 +355,39 @@ struct i40e_virtchnl_promisc_info { * PF replies with struct i40e_eth_stats in an external buffer. */ +/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY + * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT + * VF sends these messages to configure RSS. Only supported if both PF + * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during + * configuration negotiation. If this is the case, then the rss fields in + * the vf resource struct are valid. + * Both the key and LUT are initialized to 0 by the PF, meaning that + * RSS is effectively disabled until set up by the VF. + */ +struct i40e_virtchnl_rss_key { + u16 vsi_id; + u16 key_len; + u8 key[1]; /* RSS hash key, packed bytes */ +}; + +struct i40e_virtchnl_rss_lut { + u16 vsi_id; + u16 lut_entries; + u8 lut[1]; /* RSS lookup table*/ +}; + +/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS + * I40E_VIRTCHNL_OP_SET_RSS_HENA + * VF sends these messages to get and set the hash filter enable bits for RSS. + * By default, the PF sets these to all possible traffic types that the + * hardware supports. The VF can query this value if it wants to change the + * traffic types that are hashed by the hardware. + * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h + */ +struct i40e_virtchnl_rss_hena { + u64 hena; +}; + /* I40E_VIRTCHNL_OP_EVENT * PF sends this message to inform the VF driver of events that may affect it. * No direct response is expected from the VF, though it may generate other diff --git a/drivers/net/i40e/i40e_ethdev.c b/drivers/net/i40e/i40e_ethdev.c index bc28d3c3..f414d938 100644 --- a/drivers/net/i40e/i40e_ethdev.c +++ b/drivers/net/i40e/i40e_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -63,6 +63,9 @@ #include "i40e_pf.h" #include "i40e_regs.h" +#define ETH_I40E_FLOATING_VEB_ARG "enable_floating_veb" +#define ETH_I40E_FLOATING_VEB_LIST_ARG "floating_veb_list" + #define I40E_CLEAR_PXE_WAIT_MS 200 /* Maximun number of capability elements */ @@ -305,7 +308,10 @@ static int i40e_dev_set_link_down(struct rte_eth_dev *dev); static void i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int i40e_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); +static int i40e_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit); static void i40e_dev_stats_reset(struct rte_eth_dev *dev); static int i40e_dev_queue_stats_mapping_set(struct rte_eth_dev *dev, uint16_t queue_id, @@ -447,6 +453,8 @@ static int i40e_get_eeprom(struct rte_eth_dev *dev, static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); +static int i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu); + static const struct rte_pci_id pci_id_i40e_map[] = { #define RTE_PCI_DEV_ID_DECL_I40E(vend, dev) {RTE_PCI_DEVICE(vend, dev)}, #include "rte_pci_dev_ids.h" @@ -467,6 +475,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .link_update = i40e_dev_link_update, .stats_get = i40e_dev_stats_get, .xstats_get = i40e_dev_xstats_get, + .xstats_get_names = i40e_dev_xstats_get_names, .stats_reset = i40e_dev_stats_reset, .xstats_reset = i40e_dev_stats_reset, .queue_stats_mapping_set = i40e_dev_queue_stats_mapping_set, @@ -520,6 +529,7 @@ static const struct eth_dev_ops i40e_eth_dev_ops = { .get_eeprom_length = i40e_get_eeprom_length, .get_eeprom = i40e_get_eeprom, .mac_addr_set = i40e_set_default_mac_addr, + .mtu_set = i40e_dev_mtu_set, }; /* store statistics names and its offset in stats structure */ @@ -751,6 +761,161 @@ i40e_add_tx_flow_control_drop_filter(struct i40e_pf *pf) } static int +floating_veb_list_handler(__rte_unused const char *key, + const char *floating_veb_value, + void *opaque) +{ + int idx = 0; + unsigned int count = 0; + char *end = NULL; + int min, max; + bool *vf_floating_veb = opaque; + + while (isblank(*floating_veb_value)) + floating_veb_value++; + + /* Reset floating VEB configuration for VFs */ + for (idx = 0; idx < I40E_MAX_VF; idx++) + vf_floating_veb[idx] = false; + + min = I40E_MAX_VF; + do { + while (isblank(*floating_veb_value)) + floating_veb_value++; + if (*floating_veb_value == '\0') + return -1; + errno = 0; + idx = strtoul(floating_veb_value, &end, 10); + if (errno || end == NULL) + return -1; + while (isblank(*end)) + end++; + if (*end == '-') { + min = idx; + } else if ((*end == ';') || (*end == '\0')) { + max = idx; + if (min == I40E_MAX_VF) + min = idx; + if (max >= I40E_MAX_VF) + max = I40E_MAX_VF - 1; + for (idx = min; idx <= max; idx++) { + vf_floating_veb[idx] = true; + count++; + } + min = I40E_MAX_VF; + } else { + return -1; + } + floating_veb_value = end + 1; + } while (*end != '\0'); + + if (count == 0) + return -1; + + return 0; +} + +static void +config_vf_floating_veb(struct rte_devargs *devargs, + uint16_t floating_veb, + bool *vf_floating_veb) +{ + struct rte_kvargs *kvlist; + int i; + const char *floating_veb_list = ETH_I40E_FLOATING_VEB_LIST_ARG; + + if (!floating_veb) + return; + /* All the VFs attach to the floating VEB by default + * when the floating VEB is enabled. + */ + for (i = 0; i < I40E_MAX_VF; i++) + vf_floating_veb[i] = true; + + if (devargs == NULL) + return; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return; + + if (!rte_kvargs_count(kvlist, floating_veb_list)) { + rte_kvargs_free(kvlist); + return; + } + /* When the floating_veb_list parameter exists, all the VFs + * will attach to the legacy VEB firstly, then configure VFs + * to the floating VEB according to the floating_veb_list. + */ + if (rte_kvargs_process(kvlist, floating_veb_list, + floating_veb_list_handler, + vf_floating_veb) < 0) { + rte_kvargs_free(kvlist); + return; + } + rte_kvargs_free(kvlist); +} + +static int +i40e_check_floating_handler(__rte_unused const char *key, + const char *value, + __rte_unused void *opaque) +{ + if (strcmp(value, "1")) + return -1; + + return 0; +} + +static int +is_floating_veb_supported(struct rte_devargs *devargs) +{ + struct rte_kvargs *kvlist; + const char *floating_veb_key = ETH_I40E_FLOATING_VEB_ARG; + + if (devargs == NULL) + return 0; + + kvlist = rte_kvargs_parse(devargs->args, NULL); + if (kvlist == NULL) + return 0; + + if (!rte_kvargs_count(kvlist, floating_veb_key)) { + rte_kvargs_free(kvlist); + return 0; + } + /* Floating VEB is enabled when there's key-value: + * enable_floating_veb=1 + */ + if (rte_kvargs_process(kvlist, floating_veb_key, + i40e_check_floating_handler, NULL) < 0) { + rte_kvargs_free(kvlist); + return 0; + } + rte_kvargs_free(kvlist); + + return 1; +} + +static void +config_floating_veb(struct rte_eth_dev *dev) +{ + struct rte_pci_device *pci_dev = dev->pci_dev; + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct i40e_hw *hw = I40E_DEV_PRIVATE_TO_HW(dev->data->dev_private); + + memset(pf->floating_veb_list, 0, sizeof(pf->floating_veb_list)); + + if (hw->aq.fw_maj_ver >= FLOATING_VEB_SUPPORTED_FW_MAJ) { + pf->floating_veb = is_floating_veb_supported(pci_dev->devargs); + config_vf_floating_veb(pci_dev->devargs, pf->floating_veb, + pf->floating_veb_list); + } else { + pf->floating_veb = false; + } +} + +static int eth_i40e_dev_init(struct rte_eth_dev *dev) { struct rte_pci_device *pci_dev; @@ -843,6 +1008,8 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) ((hw->nvm.version >> 4) & 0xff), (hw->nvm.version & 0xf), hw->nvm.eetrack); + /* Need the special FW version to support floating VEB */ + config_floating_veb(dev); /* Clear PXE mode */ i40e_clear_pxe_mode(hw); @@ -920,12 +1087,6 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) "VLAN ether type"); goto err_setup_pf_switch; } - ret = i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, ETHER_TYPE_VLAN); - if (ret != I40E_SUCCESS) { - PMD_INIT_LOG(ERR, "Failed to set the default outer " - "VLAN ether type"); - goto err_setup_pf_switch; - } /* PF setup, which includes VSI setup */ ret = i40e_pf_setup(pf); @@ -934,6 +1095,9 @@ eth_i40e_dev_init(struct rte_eth_dev *dev) goto err_setup_pf_switch; } + /* reset all stats of the device, including pf and main vsi */ + i40e_dev_stats_reset(dev); + vsi = pf->main_vsi; /* Disable double vlan by default */ @@ -1400,15 +1564,58 @@ i40e_parse_link_speeds(uint16_t link_speeds) } static int -i40e_phy_conf_link(__rte_unused struct i40e_hw *hw, - __rte_unused uint8_t abilities, - __rte_unused uint8_t force_speed) -{ - /* Skip any phy config on both 10G and 40G interfaces, as a workaround - * for the link control limitation of that all link control should be - * handled by firmware. It should follow up if link control will be - * opened to software driver in future firmware versions. - */ +i40e_phy_conf_link(struct i40e_hw *hw, + uint8_t abilities, + uint8_t force_speed) +{ + enum i40e_status_code status; + struct i40e_aq_get_phy_abilities_resp phy_ab; + struct i40e_aq_set_phy_config phy_conf; + const uint8_t mask = I40E_AQ_PHY_FLAG_PAUSE_TX | + I40E_AQ_PHY_FLAG_PAUSE_RX | + I40E_AQ_PHY_FLAG_PAUSE_RX | + I40E_AQ_PHY_FLAG_LOW_POWER; + const uint8_t advt = I40E_LINK_SPEED_40GB | + I40E_LINK_SPEED_10GB | + I40E_LINK_SPEED_1GB | + I40E_LINK_SPEED_100MB; + int ret = -ENOTSUP; + + + status = i40e_aq_get_phy_capabilities(hw, false, false, &phy_ab, + NULL); + if (status) + return ret; + + memset(&phy_conf, 0, sizeof(phy_conf)); + + /* bits 0-2 use the values from get_phy_abilities_resp */ + abilities &= ~mask; + abilities |= phy_ab.abilities & mask; + + /* update ablities and speed */ + if (abilities & I40E_AQ_PHY_AN_ENABLED) + phy_conf.link_speed = advt; + else + phy_conf.link_speed = force_speed; + + phy_conf.abilities = abilities; + + /* use get_phy_abilities_resp value for the rest */ + phy_conf.phy_type = phy_ab.phy_type; + phy_conf.eee_capability = phy_ab.eee_capability; + phy_conf.eeer = phy_ab.eeer_val; + phy_conf.low_power_ctrl = phy_ab.d3_lpan; + + PMD_DRV_LOG(DEBUG, "\tCurrent: abilities %x, link_speed %x", + phy_ab.abilities, phy_ab.link_speed); + PMD_DRV_LOG(DEBUG, "\tConfig: abilities %x, link_speed %x", + phy_conf.abilities, phy_conf.link_speed); + + status = i40e_aq_set_phy_config(hw, &phy_conf, NULL); + if (status) + return ret; + return I40E_SUCCESS; } @@ -1424,8 +1631,13 @@ i40e_apply_link_speed(struct rte_eth_dev *dev) abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK; if (!(conf->link_speeds & ETH_LINK_SPEED_FIXED)) abilities |= I40E_AQ_PHY_AN_ENABLED; - else - abilities |= I40E_AQ_PHY_LINK_ENABLED; + abilities |= I40E_AQ_PHY_LINK_ENABLED; + + /* Skip changing speed on 40G interfaces, FW does not support */ + if (i40e_is_40G_device(hw->device_id)) { + speed = I40E_LINK_SPEED_UNKNOWN; + abilities |= I40E_AQ_PHY_AN_ENABLED; + } return i40e_phy_conf_link(hw, abilities, speed); } @@ -1661,7 +1873,7 @@ i40e_dev_promiscuous_enable(struct rte_eth_dev *dev) int status; status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, - true, NULL); + true, NULL, true); if (status != I40E_SUCCESS) PMD_DRV_LOG(ERR, "Failed to enable unicast promiscuous"); @@ -1681,7 +1893,7 @@ i40e_dev_promiscuous_disable(struct rte_eth_dev *dev) int status; status = i40e_aq_set_vsi_unicast_promiscuous(hw, vsi->seid, - false, NULL); + false, NULL, true); if (status != I40E_SUCCESS) PMD_DRV_LOG(ERR, "Failed to disable unicast promiscuous"); @@ -1735,7 +1947,7 @@ i40e_dev_set_link_up(struct rte_eth_dev *dev) * Set device link down. */ static int -i40e_dev_set_link_down(__rte_unused struct rte_eth_dev *dev) +i40e_dev_set_link_down(struct rte_eth_dev *dev) { uint8_t speed = I40E_LINK_SPEED_UNKNOWN; uint8_t abilities = I40E_AQ_PHY_ENABLE_ATOMIC_LINK; @@ -2099,7 +2311,6 @@ i40e_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->obytes = ns->eth.tx_bytes; stats->oerrors = ns->eth.tx_errors + pf->main_vsi->eth_stats.tx_errors; - stats->imcasts = pf->main_vsi->eth_stats.rx_multicast; /* Rx Errors */ stats->imissed = ns->eth.rx_discards + @@ -2203,8 +2414,58 @@ i40e_xstats_calc_num(void) (I40E_NB_TXQ_PRIO_XSTATS * 8); } +static int i40e_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned count = 0; + unsigned i, prio; + + if (xstats_names == NULL) + return i40e_xstats_calc_num(); + + /* Note: limit checked in rte_eth_xstats_names() */ + + /* Get stats from i40e_eth_stats struct */ + for (i = 0; i < I40E_NB_ETH_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", rte_i40e_stats_strings[i].name); + count++; + } + + /* Get individiual stats from i40e_hw_port struct */ + for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", rte_i40e_hw_port_strings[i].name); + count++; + } + + for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) { + for (prio = 0; prio < 8; prio++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", prio, + rte_i40e_rxq_prio_strings[i].name); + count++; + } + } + + for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) { + for (prio = 0; prio < 8; prio++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", prio, + rte_i40e_txq_prio_strings[i].name); + count++; + } + } + return count; +} + static int -i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); @@ -2225,8 +2486,6 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Get stats from i40e_eth_stats struct */ for (i = 0; i < I40E_NB_ETH_XSTATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "%s", rte_i40e_stats_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)&hw_stats->eth) + rte_i40e_stats_strings[i].offset); count++; @@ -2234,19 +2493,13 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Get individiual stats from i40e_hw_port struct */ for (i = 0; i < I40E_NB_HW_PORT_XSTATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "%s", rte_i40e_hw_port_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)hw_stats) + - rte_i40e_hw_port_strings[i].offset); + rte_i40e_hw_port_strings[i].offset); count++; } for (i = 0; i < I40E_NB_RXQ_PRIO_XSTATS; i++) { for (prio = 0; prio < 8; prio++) { - snprintf(xstats[count].name, - sizeof(xstats[count].name), - "rx_priority%u_%s", prio, - rte_i40e_rxq_prio_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_i40e_rxq_prio_strings[i].offset + @@ -2257,10 +2510,6 @@ i40e_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, for (i = 0; i < I40E_NB_TXQ_PRIO_XSTATS; i++) { for (prio = 0; prio < 8; prio++) { - snprintf(xstats[count].name, - sizeof(xstats[count].name), - "tx_priority%u_%s", prio, - rte_i40e_txq_prio_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_i40e_txq_prio_strings[i].offset + @@ -2390,13 +2639,24 @@ i40e_vlan_tpid_set(struct rte_eth_dev *dev, uint64_t reg_r = 0, reg_w = 0; uint16_t reg_id = 0; int ret = 0; + int qinq = dev->data->dev_conf.rxmode.hw_vlan_extend; switch (vlan_type) { case ETH_VLAN_TYPE_OUTER: - reg_id = 2; + if (qinq) + reg_id = 2; + else + reg_id = 3; break; case ETH_VLAN_TYPE_INNER: - reg_id = 3; + if (qinq) + reg_id = 3; + else { + ret = -EINVAL; + PMD_DRV_LOG(ERR, + "Unsupported vlan type in single vlan.\n"); + return ret; + } break; default: ret = -EINVAL; @@ -2458,8 +2718,14 @@ i40e_vlan_offload_set(struct rte_eth_dev *dev, int mask) } if (mask & ETH_VLAN_EXTEND_MASK) { - if (dev->data->dev_conf.rxmode.hw_vlan_extend) + if (dev->data->dev_conf.rxmode.hw_vlan_extend) { i40e_vsi_config_double_vlan(vsi, TRUE); + /* Set global registers with default ether type value */ + i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_OUTER, + ETHER_TYPE_VLAN); + i40e_vlan_tpid_set(dev, ETH_VLAN_TYPE_INNER, + ETHER_TYPE_VLAN); + } else i40e_vsi_config_double_vlan(vsi, FALSE); } @@ -3716,21 +3982,27 @@ i40e_veb_release(struct i40e_veb *veb) struct i40e_vsi *vsi; struct i40e_hw *hw; - if (veb == NULL || veb->associate_vsi == NULL) + if (veb == NULL) return -EINVAL; if (!TAILQ_EMPTY(&veb->head)) { PMD_DRV_LOG(ERR, "VEB still has VSI attached, can't remove"); return -EACCES; } + /* associate_vsi field is NULL for floating VEB */ + if (veb->associate_vsi != NULL) { + vsi = veb->associate_vsi; + hw = I40E_VSI_TO_HW(vsi); - vsi = veb->associate_vsi; - hw = I40E_VSI_TO_HW(vsi); + vsi->uplink_seid = veb->uplink_seid; + vsi->veb = NULL; + } else { + veb->associate_pf->main_vsi->floating_veb = NULL; + hw = I40E_VSI_TO_HW(veb->associate_pf->main_vsi); + } - vsi->uplink_seid = veb->uplink_seid; i40e_aq_delete_element(hw, veb->seid, NULL); rte_free(veb); - vsi->veb = NULL; return I40E_SUCCESS; } @@ -3742,9 +4014,9 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) int ret; struct i40e_hw *hw; - if (NULL == pf || vsi == NULL) { - PMD_DRV_LOG(ERR, "veb setup failed, " - "associated VSI shouldn't null"); + if (pf == NULL) { + PMD_DRV_LOG(ERR, + "veb setup failed, associated PF shouldn't null"); return NULL; } hw = I40E_PF_TO_HW(pf); @@ -3756,11 +4028,19 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) } veb->associate_vsi = vsi; + veb->associate_pf = pf; TAILQ_INIT(&veb->head); - veb->uplink_seid = vsi->uplink_seid; + veb->uplink_seid = vsi ? vsi->uplink_seid : 0; - ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid, - I40E_DEFAULT_TCMAP, false, &veb->seid, false, NULL); + /* create floating veb if vsi is NULL */ + if (vsi != NULL) { + ret = i40e_aq_add_veb(hw, veb->uplink_seid, vsi->seid, + I40E_DEFAULT_TCMAP, false, + &veb->seid, false, NULL); + } else { + ret = i40e_aq_add_veb(hw, 0, 0, I40E_DEFAULT_TCMAP, + true, &veb->seid, false, NULL); + } if (ret != I40E_SUCCESS) { PMD_DRV_LOG(ERR, "Add veb failed, aq_err: %d", @@ -3776,10 +4056,10 @@ i40e_veb_setup(struct i40e_pf *pf, struct i40e_vsi *vsi) hw->aq.asq_last_status); goto fail; } - /* Get VEB bandwidth, to be implemented */ /* Now associated vsi binding to the VEB, set uplink to this VEB */ - vsi->uplink_seid = veb->seid; + if (vsi) + vsi->uplink_seid = veb->seid; return veb; fail: @@ -3795,6 +4075,7 @@ i40e_vsi_release(struct i40e_vsi *vsi) struct i40e_vsi_list *vsi_list; int ret; struct i40e_mac_filter *f; + uint16_t user_param = vsi->user_param; if (!vsi) return I40E_SUCCESS; @@ -3812,12 +4093,22 @@ i40e_vsi_release(struct i40e_vsi *vsi) i40e_veb_release(vsi->veb); } + if (vsi->floating_veb) { + TAILQ_FOREACH(vsi_list, &vsi->floating_veb->head, list) { + if (i40e_vsi_release(vsi_list->vsi) != I40E_SUCCESS) + return -1; + TAILQ_REMOVE(&vsi->floating_veb->head, vsi_list, list); + } + } + /* Remove all macvlan filters of the VSI */ i40e_vsi_remove_all_macvlan_filter(vsi); TAILQ_FOREACH(f, &vsi->mac_list, next) rte_free(f); - if (vsi->type != I40E_VSI_MAIN) { + if (vsi->type != I40E_VSI_MAIN && + ((vsi->type != I40E_VSI_SRIOV) || + !pf->floating_veb_list[user_param])) { /* Remove vsi from parent's sibling list */ if (vsi->parent_vsi == NULL || vsi->parent_vsi->veb == NULL) { PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL"); @@ -3831,6 +4122,24 @@ i40e_vsi_release(struct i40e_vsi *vsi) if (ret != I40E_SUCCESS) PMD_DRV_LOG(ERR, "Failed to delete element"); } + + if ((vsi->type == I40E_VSI_SRIOV) && + pf->floating_veb_list[user_param]) { + /* Remove vsi from parent's sibling list */ + if (vsi->parent_vsi == NULL || + vsi->parent_vsi->floating_veb == NULL) { + PMD_DRV_LOG(ERR, "VSI's parent VSI is NULL"); + return I40E_ERR_PARAM; + } + TAILQ_REMOVE(&vsi->parent_vsi->floating_veb->head, + &vsi->sib_vsi_list, list); + + /* Remove all switch element of the VSI */ + ret = i40e_aq_delete_element(hw, vsi->seid, NULL); + if (ret != I40E_SUCCESS) + PMD_DRV_LOG(ERR, "Failed to delete element"); + } + i40e_res_pool_free(&pf->qp_pool, vsi->base_queue); if (vsi->type != I40E_VSI_SRIOV) @@ -3999,7 +4308,8 @@ i40e_vsi_setup(struct i40e_pf *pf, struct ether_addr broadcast = {.addr_bytes = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}}; - if (type != I40E_VSI_MAIN && uplink_vsi == NULL) { + if (type != I40E_VSI_MAIN && type != I40E_VSI_SRIOV && + uplink_vsi == NULL) { PMD_DRV_LOG(ERR, "VSI setup failed, " "VSI link shouldn't be NULL"); return NULL; @@ -4011,11 +4321,18 @@ i40e_vsi_setup(struct i40e_pf *pf, return NULL; } - /* If uplink vsi didn't setup VEB, create one first */ - if (type != I40E_VSI_MAIN && uplink_vsi->veb == NULL) { + /* two situations + * 1.type is not MAIN and uplink vsi is not NULL + * If uplink vsi didn't setup VEB, create one first under veb field + * 2.type is SRIOV and the uplink is NULL + * If floating VEB is NULL, create one veb under floating veb field + */ + + if (type != I40E_VSI_MAIN && uplink_vsi != NULL && + uplink_vsi->veb == NULL) { uplink_vsi->veb = i40e_veb_setup(pf, uplink_vsi); - if (NULL == uplink_vsi->veb) { + if (uplink_vsi->veb == NULL) { PMD_DRV_LOG(ERR, "VEB setup failed"); return NULL; } @@ -4023,6 +4340,16 @@ i40e_vsi_setup(struct i40e_pf *pf, i40e_enable_pf_lb(pf); } + if (type == I40E_VSI_SRIOV && uplink_vsi == NULL && + pf->main_vsi->floating_veb == NULL) { + pf->main_vsi->floating_veb = i40e_veb_setup(pf, uplink_vsi); + + if (pf->main_vsi->floating_veb == NULL) { + PMD_DRV_LOG(ERR, "VEB setup failed"); + return NULL; + } + } + vsi = rte_zmalloc("i40e_vsi", sizeof(struct i40e_vsi), 0); if (!vsi) { PMD_DRV_LOG(ERR, "Failed to allocate memory for vsi"); @@ -4032,7 +4359,7 @@ i40e_vsi_setup(struct i40e_pf *pf, vsi->type = type; vsi->adapter = I40E_PF_TO_ADAPTER(pf); vsi->max_macaddrs = I40E_NUM_MACADDR_MAX; - vsi->parent_vsi = uplink_vsi; + vsi->parent_vsi = uplink_vsi ? uplink_vsi : pf->main_vsi; vsi->user_param = user_param; /* Allocate queues */ switch (vsi->type) { @@ -4186,7 +4513,10 @@ i40e_vsi_setup(struct i40e_pf *pf, * For other VSI, the uplink_seid equals to uplink VSI's * uplink_seid since they share same VEB */ - vsi->uplink_seid = uplink_vsi->uplink_seid; + if (uplink_vsi == NULL) + vsi->uplink_seid = pf->main_vsi->floating_veb->seid; + else + vsi->uplink_seid = uplink_vsi->uplink_seid; ctxt.pf_num = hw->pf_id; ctxt.vf_num = hw->func_caps.vf_base_id + user_param; ctxt.uplink_seid = vsi->uplink_seid; @@ -4294,8 +4624,13 @@ i40e_vsi_setup(struct i40e_pf *pf, vsi->seid = ctxt.seid; vsi->vsi_id = ctxt.vsi_number; vsi->sib_vsi_list.vsi = vsi; - TAILQ_INSERT_TAIL(&uplink_vsi->veb->head, - &vsi->sib_vsi_list, list); + if (vsi->type == I40E_VSI_SRIOV && uplink_vsi == NULL) { + TAILQ_INSERT_TAIL(&pf->main_vsi->floating_veb->head, + &vsi->sib_vsi_list, list); + } else { + TAILQ_INSERT_TAIL(&uplink_vsi->veb->head, + &vsi->sib_vsi_list, list); + } } /* MAC/VLAN configuration */ @@ -9031,6 +9366,7 @@ static int i40e_get_regs(struct rte_eth_dev *dev, arr_idx2++) { reg_offset = arr_idx * reg_info->stride1 + arr_idx2 * reg_info->stride2; + reg_offset += reg_info->base_addr; ptr_data[reg_offset >> 2] = i40e_read_rx_ctl(hw, reg_offset); } @@ -9046,6 +9382,7 @@ static int i40e_get_regs(struct rte_eth_dev *dev, arr_idx2++) { reg_offset = arr_idx * reg_info->stride1 + arr_idx2 * reg_info->stride2; + reg_offset += reg_info->base_addr; ptr_data[reg_offset >> 2] = I40E_READ_REG(hw, reg_offset); } @@ -9104,3 +9441,34 @@ static void i40e_set_default_mac_addr(struct rte_eth_dev *dev, /* Flags: 0x3 updates port address */ i40e_aq_mac_address_write(hw, 0x3, mac_addr->addr_bytes, NULL); } + +static int +i40e_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct i40e_pf *pf = I40E_DEV_PRIVATE_TO_PF(dev->data->dev_private); + struct rte_eth_dev_data *dev_data = pf->dev_data; + uint32_t frame_size = mtu + ETHER_HDR_LEN + + ETHER_CRC_LEN + I40E_VLAN_TAG_SIZE; + int ret = 0; + + /* check if mtu is within the allowed range */ + if ((mtu < ETHER_MIN_MTU) || (frame_size > I40E_FRAME_SIZE_MAX)) + return -EINVAL; + + /* mtu setting is forbidden if port is start */ + if (dev_data->dev_started) { + PMD_DRV_LOG(ERR, + "port %d must be stopped before configuration\n", + dev_data->port_id); + return -EBUSY; + } + + if (frame_size > ETHER_MAX_LEN) + dev_data->dev_conf.rxmode.jumbo_frame = 1; + else + dev_data->dev_conf.rxmode.jumbo_frame = 0; + + dev_data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + + return ret; +} diff --git a/drivers/net/i40e/i40e_ethdev.h b/drivers/net/i40e/i40e_ethdev.h index cfd23999..92c8fad0 100644 --- a/drivers/net/i40e/i40e_ethdev.h +++ b/drivers/net/i40e/i40e_ethdev.h @@ -36,6 +36,7 @@ #include <rte_eth_ctrl.h> #include <rte_time.h> +#include <rte_kvargs.h> #define I40E_VLAN_TAG_SIZE 4 @@ -55,6 +56,8 @@ #define I40E_VFTA_SIZE (4096 / I40E_UINT32_BIT_SIZE) /* Maximun number of MAC addresses */ #define I40E_NUM_MACADDR_MAX 64 +/* Maximum number of VFs */ +#define I40E_MAX_VF 128 /* * vlan_id is a 12 bit number. @@ -171,6 +174,10 @@ enum i40e_flxpld_layer_idx { #define I40E_QUEUE_ITR_INTERVAL_DEFAULT 32 /* 32 us */ #define I40E_QUEUE_ITR_INTERVAL_MAX 8160 /* 8160 us */ +/* Special FW support this floating VEB feature */ +#define FLOATING_VEB_SUPPORTED_FW_MAJ 5 +#define FLOATING_VEB_SUPPORTED_FW_MIN 0 + struct i40e_adapter; /** @@ -219,6 +226,7 @@ struct i40e_bw_info { struct i40e_veb { struct i40e_vsi_list_head head; struct i40e_vsi *associate_vsi; /* Associate VSI who owns the VEB */ + struct i40e_pf *associate_pf; /* Associate PF who owns the VEB */ uint16_t seid; /* The seid of VEB itself */ uint16_t uplink_seid; /* The uplink seid of this VEB */ uint16_t stats_idx; @@ -259,6 +267,7 @@ struct i40e_vsi { struct i40e_vsi_list sib_vsi_list; /* sibling vsi list */ struct i40e_vsi *parent_vsi; struct i40e_veb *veb; /* Associated veb, could be null */ + struct i40e_veb *floating_veb; /* Associated floating veb */ bool offset_loaded; enum i40e_vsi_type type; /* VSI types */ uint16_t vlan_num; /* Total VLAN number */ @@ -450,6 +459,9 @@ struct i40e_pf { struct i40e_fc_conf fc_conf; /* Flow control conf */ struct i40e_mirror_rule_list mirror_list; uint16_t nb_mirror_rule; /* The number of mirror rules */ + bool floating_veb; /* The flag to use the floating VEB */ + /* The floating enable flag for the specific VF */ + bool floating_veb_list[I40E_MAX_VF]; }; enum pending_msg { diff --git a/drivers/net/i40e/i40e_ethdev_vf.c b/drivers/net/i40e/i40e_ethdev_vf.c index 2bce69b3..7b6df1d8 100644 --- a/drivers/net/i40e/i40e_ethdev_vf.c +++ b/drivers/net/i40e/i40e_ethdev_vf.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -111,7 +111,10 @@ static int i40evf_dev_link_update(struct rte_eth_dev *dev, static void i40evf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); +static int i40evf_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit); static void i40evf_dev_xstats_reset(struct rte_eth_dev *dev); static int i40evf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on); @@ -196,6 +199,7 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .link_update = i40evf_dev_link_update, .stats_get = i40evf_dev_stats_get, .xstats_get = i40evf_dev_xstats_get, + .xstats_get_names = i40evf_dev_xstats_get_names, .xstats_reset = i40evf_dev_xstats_reset, .dev_close = i40evf_dev_close, .dev_infos_get = i40evf_dev_info_get, @@ -214,6 +218,9 @@ static const struct eth_dev_ops i40evf_eth_dev_ops = { .rx_descriptor_done = i40e_dev_rx_descriptor_done, .tx_queue_setup = i40e_dev_tx_queue_setup, .tx_queue_release = i40e_dev_tx_queue_release, + .rx_queue_count = i40e_dev_rx_queue_count, + .rxq_info_get = i40e_rxq_info_get, + .txq_info_get = i40e_txq_info_get, .mac_addr_add = i40evf_add_mac_addr, .mac_addr_remove = i40evf_del_mac_addr, .reta_update = i40evf_dev_rss_reta_update, @@ -327,8 +334,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) struct i40e_vf *vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); struct i40evf_arq_msg_info info; enum i40evf_aq_result ret; - int err = -1; - int i = 0; + int err, i = 0; if (_atomic_set_cmd(vf, args->ops)) return -1; @@ -349,19 +355,19 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) switch (args->ops) { case I40E_VIRTCHNL_OP_RESET_VF: /*no need to process in this function */ + err = 0; break; case I40E_VIRTCHNL_OP_VERSION: case I40E_VIRTCHNL_OP_GET_VF_RESOURCES: /* for init adminq commands, need to poll the response */ + err = -1; do { ret = i40evf_read_pfmsg(dev, &info); if (ret == I40EVF_MSG_CMD) { err = 0; break; - } else if (ret == I40EVF_MSG_ERR) { - err = -1; + } else if (ret == I40EVF_MSG_ERR) break; - } rte_delay_ms(ASQ_DELAY_MS); /* If don't read msg or read sys event, continue */ } while (i++ < MAX_TRY_TIMES); @@ -370,6 +376,7 @@ i40evf_execute_vf_cmd(struct rte_eth_dev *dev, struct vf_cmd_info *args) default: /* for other adminq in running time, waiting the cmd done flag */ + err = -1; do { if (vf->pend_cmd == I40E_VIRTCHNL_OP_UNKNOWN) { err = 0; @@ -538,7 +545,7 @@ i40evf_config_vlan_pvid(struct rte_eth_dev *dev, struct vf_cmd_info args; struct i40e_virtchnl_pvid_info tpid_info; - if (dev == NULL || info == NULL) { + if (info == NULL) { PMD_DRV_LOG(ERR, "invalid parameters"); return I40E_ERR_PARAM; } @@ -981,8 +988,23 @@ i40evf_dev_xstats_reset(struct rte_eth_dev *dev) vf->vsi.eth_stats_offset = vf->vsi.eth_stats; } +static int i40evf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned i; + + if (xstats_names != NULL) + for (i = 0; i < I40EVF_NB_XSTATS; i++) { + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_i40evf_stats_strings[i].name); + } + return I40EVF_NB_XSTATS; +} + static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n) + struct rte_eth_xstat *xstats, unsigned n) { int ret; unsigned i; @@ -1000,8 +1022,7 @@ static int i40evf_dev_xstats_get(struct rte_eth_dev *dev, /* loop over xstats array and values from pstats */ for (i = 0; i < I40EVF_NB_XSTATS; i++) { - snprintf(xstats[i].name, sizeof(xstats[i].name), - "%s", rte_i40evf_stats_strings[i].name); + xstats[i].id = i; xstats[i].value = *(uint64_t *)(((char *)pstats) + rte_i40evf_stats_strings[i].offset); } @@ -1567,6 +1588,8 @@ i40evf_dev_configure(struct rte_eth_dev *dev) { struct i40e_adapter *ad = I40E_DEV_PRIVATE_TO_ADAPTER(dev->data->dev_private); + struct rte_eth_conf *conf = &dev->data->dev_conf; + struct i40e_vf *vf; /* Initialize to TRUE. If any of Rx queues doesn't meet the bulk * allocation or vector Rx preconditions we will reset it. @@ -1576,6 +1599,19 @@ i40evf_dev_configure(struct rte_eth_dev *dev) ad->tx_simple_allowed = true; ad->tx_vec_allowed = true; + /* For non-DPDK PF drivers, VF has no ability to disable HW + * CRC strip, and is implicitly enabled by the PF. + */ + if (!conf->rxmode.hw_strip_crc) { + vf = I40EVF_DEV_PRIVATE_TO_VF(dev->data->dev_private); + if ((vf->version_major == I40E_VIRTCHNL_VERSION_MAJOR) && + (vf->version_minor <= I40E_VIRTCHNL_VERSION_MINOR)) { + /* Peer is running non-DPDK PF driver. */ + PMD_INIT_LOG(ERR, "VF can't disable HW CRC Strip"); + return -EINVAL; + } + } + return i40evf_init_vlan(dev); } diff --git a/drivers/net/i40e/i40e_fdir.c b/drivers/net/i40e/i40e_fdir.c index ff57e8a2..f65c4110 100644 --- a/drivers/net/i40e/i40e_fdir.c +++ b/drivers/net/i40e/i40e_fdir.c @@ -94,7 +94,9 @@ I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK) | \ (((fsize) << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT) & \ I40E_PRTQF_FLX_PIT_FSIZE_MASK) | \ - ((((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR) << \ + ((((dst_offset) == NONUSE_FLX_PIT_DEST_OFF ? \ + NONUSE_FLX_PIT_DEST_OFF : \ + ((dst_offset) + I40E_FLX_OFFSET_IN_FIELD_VECTOR)) << \ I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT) & \ I40E_PRTQF_FLX_PIT_DEST_OFF_MASK)) @@ -113,29 +115,11 @@ #define I40E_FLEX_WORD_MASK(off) (0x80 >> (off)) -static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq); -static int i40e_check_fdir_flex_conf( - const struct rte_eth_fdir_flex_conf *conf); -static void i40e_set_flx_pld_cfg(struct i40e_pf *pf, - const struct rte_eth_flex_payload_cfg *cfg); -static void i40e_set_flex_mask_on_pctype(struct i40e_pf *pf, - enum i40e_filter_pctype pctype, - const struct rte_eth_fdir_flex_mask *mask_cfg); -static int i40e_fdir_construct_pkt(struct i40e_pf *pf, - const struct rte_eth_fdir_input *fdir_input, - unsigned char *raw_pkt); -static int i40e_add_del_fdir_filter(struct rte_eth_dev *dev, - const struct rte_eth_fdir_filter *filter, - bool add); static int i40e_fdir_filter_programming(struct i40e_pf *pf, enum i40e_filter_pctype pctype, const struct rte_eth_fdir_filter *filter, bool add); static int i40e_fdir_flush(struct rte_eth_dev *dev); -static void i40e_fdir_info_get(struct rte_eth_dev *dev, - struct rte_eth_fdir_info *fdir); -static void i40e_fdir_stats_get(struct rte_eth_dev *dev, - struct rte_eth_fdir_stats *stat); static int i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) @@ -163,7 +147,7 @@ i40e_fdir_rx_queue_init(struct i40e_rx_queue *rxq) rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = 0; rx_ctx.l2tsel = 1; - rx_ctx.showiv = 1; + rx_ctx.showiv = 0; rx_ctx.prefena = 1; err = i40e_clear_lan_rx_queue_context(hw, rxq->reg_idx); @@ -1159,7 +1143,8 @@ i40e_fdir_filter_programming(struct i40e_pf *pf, fdirdp->dtype_cmd_cntindex |= rte_cpu_to_le_32(I40E_TXD_FLTR_QW1_CNT_ENA_MASK); fdirdp->dtype_cmd_cntindex |= - rte_cpu_to_le_32((pf->fdir.match_counter_index << + rte_cpu_to_le_32( + ((uint32_t)pf->fdir.match_counter_index << I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT) & I40E_TXD_FLTR_QW1_CNTINDEX_MASK); diff --git a/drivers/net/i40e/i40e_pf.c b/drivers/net/i40e/i40e_pf.c index 5afd61a0..d5b2d450 100644 --- a/drivers/net/i40e/i40e_pf.c +++ b/drivers/net/i40e/i40e_pf.c @@ -123,7 +123,8 @@ int i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) { uint32_t val, i; - struct i40e_hw *hw = I40E_PF_TO_HW(vf->pf); + struct i40e_hw *hw; + struct i40e_pf *pf; uint16_t vf_id, abs_vf_id, vf_msix_num; int ret; struct i40e_virtchnl_queue_select qsel; @@ -131,6 +132,8 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) if (vf == NULL) return -EINVAL; + pf = vf->pf; + hw = I40E_PF_TO_HW(vf->pf); vf_id = vf->vf_idx; abs_vf_id = vf_id + hw->func_caps.vf_base_id; @@ -224,8 +227,14 @@ i40e_pf_host_vf_reset(struct i40e_pf_vf *vf, bool do_hw_reset) I40E_WRITE_FLUSH(hw); /* Allocate resource again */ - vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV, - vf->pf->main_vsi, vf->vf_idx); + if (pf->floating_veb && pf->floating_veb_list[vf_id]) { + vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV, + NULL, vf->vf_idx); + } else { + vf->vsi = i40e_vsi_setup(vf->pf, I40E_VSI_SRIOV, + vf->pf->main_vsi, vf->vf_idx); + } + if (vf->vsi == NULL) { PMD_DRV_LOG(ERR, "Add vsi failed"); return -EFAULT; @@ -810,7 +819,7 @@ i40e_pf_host_process_cmd_config_promisc_mode( if (promisc->flags & I40E_FLAG_VF_UNICAST_PROMISC) unicast = TRUE; ret = i40e_aq_set_vsi_unicast_promiscuous(hw, - vf->vsi->seid, unicast, NULL); + vf->vsi->seid, unicast, NULL, true); if (ret != I40E_SUCCESS) goto send_msg; @@ -913,7 +922,7 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, /* AdminQ will pass absolute VF id, transfer to internal vf id */ uint16_t vf_id = abs_vf_id - hw->func_caps.vf_base_id; - if (!dev || vf_id > pf->vf_num - 1 || !pf->vfs) { + if (vf_id > pf->vf_num - 1 || !pf->vfs) { PMD_DRV_LOG(ERR, "invalid argument"); return; } @@ -996,11 +1005,9 @@ i40e_pf_host_handle_vf_msg(struct rte_eth_dev *dev, PMD_DRV_LOG(INFO, "OP_CFG_VLAN_PVID received"); i40e_pf_host_process_cmd_cfg_pvid(vf, msg, msglen); break; - /* Don't add command supported below, which will - * return an error code. + /* Don't add command supported below, which will + * return an error code. */ - case I40E_VIRTCHNL_OP_FCOE: - PMD_DRV_LOG(ERR, "OP_FCOE received, not supported"); default: PMD_DRV_LOG(ERR, "%u received, not supported", opcode); i40e_pf_host_send_msg_to_vf(vf, opcode, I40E_ERR_PARAM, diff --git a/drivers/net/i40e/i40e_rxtx.c b/drivers/net/i40e/i40e_rxtx.c index 4d35d83f..d3cfb98f 100644 --- a/drivers/net/i40e/i40e_rxtx.c +++ b/drivers/net/i40e/i40e_rxtx.c @@ -88,7 +88,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) { if (rte_le_to_cpu_64(rxdp->wb.qword1.status_error_len) & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)) { - mb->ol_flags |= PKT_RX_VLAN_PKT; + mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword0.lo_dword.l2tag1); PMD_RX_LOG(DEBUG, "Descriptor l2tag1: %u", @@ -99,7 +99,7 @@ i40e_rxd_to_vlan_tci(struct rte_mbuf *mb, volatile union i40e_rx_desc *rxdp) #ifndef RTE_LIBRTE_I40E_16BYTE_RX_DESC if (rte_le_to_cpu_16(rxdp->wb.qword2.ext_status) & (1 << I40E_RX_DESC_EXT_STATUS_L2TAG2P_SHIFT)) { - mb->ol_flags |= PKT_RX_QINQ_PKT; + mb->ol_flags |= PKT_RX_QINQ_STRIPPED; mb->vlan_tci_outer = mb->vlan_tci; mb->vlan_tci = rte_le_to_cpu_16(rxdp->wb.qword2.l2tag2_2); PMD_RX_LOG(DEBUG, "Descriptor l2tag2_1: %u, l2tag2_2: %u", @@ -140,27 +140,12 @@ i40e_rxd_error_to_pkt_flags(uint64_t qword) #define I40E_RX_ERR_BITS 0x3f if (likely((error_bits & I40E_RX_ERR_BITS) == 0)) return flags; - /* If RXE bit set, all other status bits are meaningless */ - if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RXE_SHIFT))) { - flags |= PKT_RX_MAC_ERR; - return flags; - } - - /* If RECIPE bit set, all other status indications should be ignored */ - if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_RECIPE_SHIFT))) { - flags |= PKT_RX_RECIP_ERR; - return flags; - } - if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_HBO_SHIFT))) - flags |= PKT_RX_HBUF_OVERFLOW; if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_IPE_SHIFT))) flags |= PKT_RX_IP_CKSUM_BAD; if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT))) flags |= PKT_RX_L4_CKSUM_BAD; if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_EIPE_SHIFT))) flags |= PKT_RX_EIP_CKSUM_BAD; - if (unlikely(error_bits & (1 << I40E_RX_DESC_ERROR_OVERSIZE_SHIFT))) - flags |= PKT_RX_OVERSIZE; return flags; } @@ -719,6 +704,33 @@ i40e_rxd_pkt_type_mapping(uint8_t ptype) RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN | RTE_PTYPE_INNER_L4_ICMP, + /* L2 NSH packet type */ + [154] = RTE_PTYPE_L2_ETHER_NSH, + [155] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [156] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [157] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [158] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [159] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [160] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + [161] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_FRAG, + [162] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_NONFRAG, + [163] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_UDP, + [164] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_TCP, + [165] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_SCTP, + [166] = RTE_PTYPE_L2_ETHER_NSH | RTE_PTYPE_L3_IPV6_EXT_UNKNOWN | + RTE_PTYPE_L4_ICMP, + /* All others reserved */ }; @@ -841,17 +853,6 @@ i40e_txd_enable_checksum(uint64_t ol_flags, } } -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - - return m; -} - /* Construct the tx flags */ static inline uint64_t i40e_build_ctob(uint32_t td_cmd, @@ -1225,7 +1226,7 @@ i40e_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) break; - nmb = rte_rxmbuf_alloc(rxq->mp); + nmb = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(!nmb)) break; rxd = *rxdp; @@ -1336,7 +1337,7 @@ i40e_recv_scattered_pkts(void *rx_queue, if (!(rx_status & (1 << I40E_RX_DESC_STATUS_DD_SHIFT))) break; - nmb = rte_rxmbuf_alloc(rxq->mp); + nmb = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(!nmb)) break; rxd = *rxdp; @@ -2598,8 +2599,8 @@ i40e_rx_queue_release_mbufs(struct i40e_rx_queue *rxq) return; } - if (!rxq || !rxq->sw_ring) { - PMD_DRV_LOG(DEBUG, "Pointer to rxq or sw_ring is NULL"); + if (!rxq->sw_ring) { + PMD_DRV_LOG(DEBUG, "Pointer to sw_ring is NULL"); return; } @@ -2774,7 +2775,7 @@ i40e_alloc_rx_queue_mbufs(struct i40e_rx_queue *rxq) for (i = 0; i < rxq->nb_rx_desc; i++) { volatile union i40e_rx_desc *rxd; - struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mp); + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(!mbuf)) { PMD_DRV_LOG(ERR, "Failed to allocate mbuf for RX"); @@ -2904,7 +2905,12 @@ i40e_rx_queue_init(struct i40e_rx_queue *rxq) rx_ctx.lrxqthresh = 2; rx_ctx.crcstrip = (rxq->crc_len == 0) ? 1 : 0; rx_ctx.l2tsel = 1; - rx_ctx.showiv = 1; + /* showiv indicates if inner VLAN is stripped inside of tunnel + * packet. When set it to 1, vlan information is stripped from + * the inner header, but the hardware does not put it in the + * descriptor. So set it zero by default. + */ + rx_ctx.showiv = 0; rx_ctx.prefena = 1; err = i40e_clear_lan_rx_queue_context(hw, pf_q); @@ -2981,13 +2987,15 @@ i40e_fdir_setup_tx_resources(struct i40e_pf *pf) struct i40e_tx_queue *txq; const struct rte_memzone *tz = NULL; uint32_t ring_size; - struct rte_eth_dev *dev = pf->adapter->eth_dev; + struct rte_eth_dev *dev; if (!pf) { PMD_DRV_LOG(ERR, "PF is not available"); return I40E_ERR_BAD_PTR; } + dev = pf->adapter->eth_dev; + /* Allocate the TX queue data structure. */ txq = rte_zmalloc_socket("i40e fdir tx queue", sizeof(struct i40e_tx_queue), @@ -3035,13 +3043,15 @@ i40e_fdir_setup_rx_resources(struct i40e_pf *pf) struct i40e_rx_queue *rxq; const struct rte_memzone *rz = NULL; uint32_t ring_size; - struct rte_eth_dev *dev = pf->adapter->eth_dev; + struct rte_eth_dev *dev; if (!pf) { PMD_DRV_LOG(ERR, "PF is not available"); return I40E_ERR_BAD_PTR; } + dev = pf->adapter->eth_dev; + /* Allocate the RX queue data structure. */ rxq = rte_zmalloc_socket("i40e fdir rx queue", sizeof(struct i40e_rx_queue), diff --git a/drivers/net/i40e/i40e_rxtx_vec.c b/drivers/net/i40e/i40e_rxtx_vec.c index 047aff53..05cb415e 100644 --- a/drivers/net/i40e/i40e_rxtx_vec.c +++ b/drivers/net/i40e/i40e_rxtx_vec.c @@ -144,23 +144,24 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) uint64_t dword; } vol; - /* mask everything except rss and vlan flags - *bit2 is for vlan tag, bits 13:12 for rss - */ + /* mask everything except RSS, flow director and VLAN flags + * bit2 is for VLAN tag, bit11 for flow director indication + * bit13:12 for RSS indication. + */ const __m128i rss_vlan_msk = _mm_set_epi16( 0x0000, 0x0000, 0x0000, 0x0000, - 0x3004, 0x3004, 0x3004, 0x3004); + 0x3804, 0x3804, 0x3804, 0x3804); /* map rss and vlan type to rss hash and vlan flag */ const __m128i vlan_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, PKT_RX_VLAN_PKT, + 0, 0, 0, PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED, 0, 0, 0, 0); const __m128i rss_flags = _mm_set_epi8(0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, - PKT_RX_FDIR, 0, PKT_RX_RSS_HASH, 0); + PKT_RX_RSS_HASH | PKT_RX_FDIR, PKT_RX_RSS_HASH, 0, 0, + 0, 0, PKT_RX_FDIR, 0); vlan0 = _mm_unpackhi_epi16(descs[0], descs[1]); vlan1 = _mm_unpackhi_epi16(descs[2], descs[3]); @@ -169,7 +170,7 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) vlan1 = _mm_and_si128(vlan0, rss_vlan_msk); vlan0 = _mm_shuffle_epi8(vlan_flags, vlan1); - rss = _mm_srli_epi16(vlan1, 12); + rss = _mm_srli_epi16(vlan1, 11); rss = _mm_shuffle_epi8(rss_flags, rss); vlan0 = _mm_or_si128(vlan0, rss); @@ -184,41 +185,7 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) #define desc_to_olflags_v(desc, rx_pkts) do {} while (0) #endif -#define PKTLEN_SHIFT (6) -#define PKTLEN_MASK (0x3FFF) -/* Handling the pkt len field is not aligned with 1byte, so shift is - * needed to let it align - */ -static inline void -desc_pktlen_align(__m128i descs[4]) -{ - __m128i pktlen0, pktlen1, zero; - union { - uint16_t e[4]; - uint64_t dword; - } vol; - - /* mask everything except pktlen field*/ - const __m128i pktlen_msk = _mm_set_epi32(PKTLEN_MASK, PKTLEN_MASK, - PKTLEN_MASK, PKTLEN_MASK); - - pktlen0 = _mm_unpackhi_epi32(descs[0], descs[2]); - pktlen1 = _mm_unpackhi_epi32(descs[1], descs[3]); - pktlen0 = _mm_unpackhi_epi32(pktlen0, pktlen1); - - zero = _mm_xor_si128(pktlen0, pktlen0); - - pktlen0 = _mm_srli_epi32(pktlen0, PKTLEN_SHIFT); - pktlen0 = _mm_and_si128(pktlen0, pktlen_msk); - - pktlen0 = _mm_packs_epi32(pktlen0, zero); - vol.dword = _mm_cvtsi128_si64(pktlen0); - /* let the descriptor byte 15-14 store the pkt len */ - *((uint16_t *)&descs[0]+7) = vol.e[0]; - *((uint16_t *)&descs[1]+7) = vol.e[1]; - *((uint16_t *)&descs[2]+7) = vol.e[2]; - *((uint16_t *)&descs[3]+7) = vol.e[3]; -} +#define PKTLEN_SHIFT 10 /* * Notice: @@ -331,18 +298,23 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); if (split_packet) { - rte_prefetch0(&rx_pkts[pos]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 1]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 2]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 3]->cacheline1); + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); } - /*shift the pktlen field*/ - desc_pktlen_align(descs); - /* avoid compiler reorder optimization */ rte_compiler_barrier(); + /* pkt 3,4 shift the pktlen field to be 16-bit aligned*/ + const __m128i len3 = _mm_slli_epi32(descs[3], PKTLEN_SHIFT); + const __m128i len2 = _mm_slli_epi32(descs[2], PKTLEN_SHIFT); + + /* merge the now-aligned packet length fields back in */ + descs[3] = _mm_blend_epi16(descs[3], len3, 0x80); + descs[2] = _mm_blend_epi16(descs[2], len2, 0x80); + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ pkt_mb4 = _mm_shuffle_epi8(descs[3], shuf_msk); pkt_mb3 = _mm_shuffle_epi8(descs[2], shuf_msk); @@ -358,6 +330,14 @@ _recv_raw_pkts_vec(struct i40e_rx_queue *rxq, struct rte_mbuf **rx_pkts, pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); pkt_mb3 = _mm_add_epi16(pkt_mb3, crc_adjust); + /* pkt 1,2 shift the pktlen field to be 16-bit aligned*/ + const __m128i len1 = _mm_slli_epi32(descs[1], PKTLEN_SHIFT); + const __m128i len0 = _mm_slli_epi32(descs[0], PKTLEN_SHIFT); + + /* merge the now-aligned packet length fields back in */ + descs[1] = _mm_blend_epi16(descs[1], len1, 0x80); + descs[0] = _mm_blend_epi16(descs[0], len0, 0x80); + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ pkt_mb2 = _mm_shuffle_epi8(descs[1], shuf_msk); pkt_mb1 = _mm_shuffle_epi8(descs[0], shuf_msk); @@ -751,6 +731,10 @@ i40e_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + /* need SSE4.1 support */ + if (!rte_cpu_get_flag_enabled(RTE_CPUFLAG_SSE4_1)) + return -1; + #ifndef RTE_LIBRTE_I40E_RX_OLFLAGS_ENABLE /* whithout rx ol_flags, no VP flag report */ if (rxmode->hw_vlan_strip != 0 || diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile index 50bf51c9..a6c71f34 100644 --- a/drivers/net/ixgbe/Makefile +++ b/drivers/net/ixgbe/Makefile @@ -43,7 +43,7 @@ EXPORT_MAP := rte_pmd_ixgbe_version.map LIBABIVER := 1 -ifeq ($(CC), icc) +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # @@ -51,7 +51,7 @@ CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 CFLAGS_ixgbe_rxtx.o += -wd3656 -else ifeq ($(CC), clang) +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) # # CFLAGS for clang # @@ -108,7 +108,11 @@ SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_rxtx.c SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_ethdev.c SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_fdir.c SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_pf.c -SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec.c +ifeq ($(CONFIG_RTE_ARCH_ARM64),y) +SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_neon.c +else +SRCS-$(CONFIG_RTE_IXGBE_INC_VECTOR) += ixgbe_rxtx_vec_sse.c +endif ifeq ($(CONFIG_RTE_NIC_BYPASS),y) SRCS-$(CONFIG_RTE_LIBRTE_IXGBE_PMD) += ixgbe_bypass.c diff --git a/drivers/net/ixgbe/base/README b/drivers/net/ixgbe/base/README index caa26640..76e78051 100644 --- a/drivers/net/ixgbe/base/README +++ b/drivers/net/ixgbe/base/README @@ -34,7 +34,7 @@ Intel® IXGBE driver =================== This directory contains source code of FreeBSD ixgbe driver of version -cid-10g-shared-code.2016.01.07 released by ND. The sub-directory of base/ +cid-10g-shared-code.2016.04.12 released by ND. The sub-directory of base/ contains the original source package. This driver is valid for the product(s) listed below diff --git a/drivers/net/ixgbe/base/ixgbe_82598.c b/drivers/net/ixgbe/base/ixgbe_82598.c index 9e65fffa..db808801 100644 --- a/drivers/net/ixgbe/base/ixgbe_82598.c +++ b/drivers/net/ixgbe/base/ixgbe_82598.c @@ -995,17 +995,20 @@ STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) * @vlan: VLAN id to write to VLAN filter * @vind: VMDq output index that maps queue to VLAN id in VFTA * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * @bypass_vlvf: boolean flag - unused * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) + bool vlan_on, bool bypass_vlvf) { u32 regindex; u32 bitindex; u32 bits; u32 vftabyte; + UNREFERENCED_1PARAMETER(bypass_vlvf); + DEBUGFUNC("ixgbe_set_vfta_82598"); if (vlan > 4095) diff --git a/drivers/net/ixgbe/base/ixgbe_82598.h b/drivers/net/ixgbe/base/ixgbe_82598.h index 89dd11a5..0326e70b 100644 --- a/drivers/net/ixgbe/base/ixgbe_82598.h +++ b/drivers/net/ixgbe/base/ixgbe_82598.h @@ -39,7 +39,8 @@ s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw); s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw); void ixgbe_enable_relaxed_ordering_82598(struct ixgbe_hw *hw); s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); -s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); +s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + bool vlvf_bypass); s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, diff --git a/drivers/net/ixgbe/base/ixgbe_82599.c b/drivers/net/ixgbe/base/ixgbe_82599.c index 154c1f10..5bc7c2b9 100644 --- a/drivers/net/ixgbe/base/ixgbe_82599.c +++ b/drivers/net/ixgbe/base/ixgbe_82599.c @@ -1176,11 +1176,14 @@ mac_reset_top: /* Add the SAN MAC address to the RAR only if it's a valid address */ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; diff --git a/drivers/net/ixgbe/base/ixgbe_api.c b/drivers/net/ixgbe/base/ixgbe_api.c index cf1e5169..17868676 100644 --- a/drivers/net/ixgbe/base/ixgbe_api.c +++ b/drivers/net/ixgbe/base/ixgbe_api.c @@ -209,6 +209,8 @@ s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) case IXGBE_DEV_ID_X550EM_A_KR: case IXGBE_DEV_ID_X550EM_A_KR_L: case IXGBE_DEV_ID_X550EM_A_SFP_N: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: case IXGBE_DEV_ID_X550EM_A_10G_T: @@ -1078,33 +1080,38 @@ s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) * ixgbe_set_vfta - Set VLAN filter table * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFTA - * @vlan_on: boolean flag to turn on/off VLAN in VFTA + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN + * @vlvf_bypass: boolean flag indicating updating the default pool is okay * * Turn on/off specified VLAN in the VLAN filter table. **/ -s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) +s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, + bool vlvf_bypass) { return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, - vlan_on), IXGBE_NOT_IMPLEMENTED); + vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED); } /** * ixgbe_set_vlvf - Set VLAN Pool Filter * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFVFB - * @vlan_on: boolean flag to turn on/off VLAN in VFVF - * @vfta_changed: pointer to boolean flag which indicates whether VFTA - * should be changed + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN in VLVF + * @vfta_delta: pointer to the difference between the current value of VFTA + * and the desired value + * @vfta: the desired value of the VFTA + * @vlvf_bypass: boolean flag indicating updating the default pool is okay * * Turn on/off specified bit in VLVF table. **/ s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, - bool *vfta_changed) + u32 *vfta_delta, u32 vfta, bool vlvf_bypass) { return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind, - vlan_on, vfta_changed), IXGBE_NOT_IMPLEMENTED); + vlan_on, vfta_delta, vfta, vlvf_bypass), + IXGBE_NOT_IMPLEMENTED); } /** @@ -1637,6 +1644,20 @@ void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) hw->mac.ops.release_swfw_sync(hw, mask); } +/** + * ixgbe_init_swfw_semaphore - Clean up SWFW semaphore + * @hw: pointer to hardware structure + * + * Attempts to acquire the SWFW semaphore through SW_FW_SYNC register. + * Regardless of whether is succeeds or not it then release the semaphore. + * This is function is called to recover from catastrophic failures that + * may have left the semaphore locked. + **/ +void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw) +{ + if (hw->mac.ops.init_swfw_sync) + hw->mac.ops.init_swfw_sync(hw); +} void ixgbe_disable_rx(struct ixgbe_hw *hw) { diff --git a/drivers/net/ixgbe/base/ixgbe_api.h b/drivers/net/ixgbe/base/ixgbe_api.h index ae26a6ac..3aad1da7 100644 --- a/drivers/net/ixgbe/base/ixgbe_api.h +++ b/drivers/net/ixgbe/base/ixgbe_api.h @@ -124,9 +124,10 @@ s32 ixgbe_enable_mc(struct ixgbe_hw *hw); s32 ixgbe_disable_mc(struct ixgbe_hw *hw); s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on); + u32 vind, bool vlan_on, bool vlvf_bypass); s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, bool *vfta_changed); + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass); s32 ixgbe_fc_enable(struct ixgbe_hw *hw); s32 ixgbe_setup_fc(struct ixgbe_hw *hw); s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, @@ -191,6 +192,7 @@ s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps); s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); +void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw); s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix); s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs); @@ -215,5 +217,7 @@ s32 ixgbe_handle_lasi(struct ixgbe_hw *hw); void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); void ixgbe_disable_rx(struct ixgbe_hw *hw); void ixgbe_enable_rx(struct ixgbe_hw *hw); +s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); #endif /* _IXGBE_API_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_common.c b/drivers/net/ixgbe/base/ixgbe_common.c index ec61408d..811875a4 100644 --- a/drivers/net/ixgbe/base/ixgbe_common.c +++ b/drivers/net/ixgbe/base/ixgbe_common.c @@ -135,6 +135,7 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) /* Flow Control */ mac->ops.fc_enable = ixgbe_fc_enable_generic; mac->ops.setup_fc = ixgbe_setup_fc_generic; + mac->ops.fc_autoneg = ixgbe_fc_autoneg; /* Link */ mac->ops.get_link_capabilities = NULL; @@ -1020,24 +1021,33 @@ s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices * @hw: pointer to the HW structure * - * Determines the LAN function id by reading memory-mapped registers - * and swaps the port value if requested. + * Determines the LAN function id by reading memory-mapped registers and swaps + * the port value if requested, and set MAC instance for devices that share + * CS4227. **/ void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) { struct ixgbe_bus_info *bus = &hw->bus; u32 reg; + u16 ee_ctrl_4; DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); reg = IXGBE_READ_REG(hw, IXGBE_STATUS); bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; - bus->lan_id = bus->func; + bus->lan_id = (u8)bus->func; /* check for a port swap */ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); if (reg & IXGBE_FACTPS_LFS) bus->func ^= 0x1; + + /* Get MAC instance from EEPROM for configuring CS4227 */ + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { + hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); + bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> + IXGBE_EE_CTRL_4_INST_ID_SHIFT; + } } /** @@ -2397,10 +2407,11 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) hw->mac.addr[4], hw->mac.addr[5]); hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); - - /* clear VMDq pool/queue selection for RAR 0 */ - hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); } + + /* clear VMDq pool/queue selection for RAR 0 */ + hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + hw->addr_ctrl.overflow_promisc = 0; hw->addr_ctrl.rar_used_count = 1; @@ -2729,7 +2740,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) } /* Negotiate the fc mode to use */ - ixgbe_fc_autoneg(hw); + hw->mac.ops.fc_autoneg(hw); /* Disable any previous flow control settings */ mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); @@ -2839,7 +2850,7 @@ out: * Find the intersection between advertised settings and link partner's * advertised settings **/ -STATIC s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, +s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) { if ((!(adv_reg)) || (!(lp_reg))) { @@ -3799,68 +3810,65 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) * return the VLVF index where this VLAN id should be placed * **/ -s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) { - u32 bits = 0; - u32 first_empty_slot = 0; - s32 regindex; + s32 regindex, first_empty_slot; + u32 bits; /* short cut the special case */ if (vlan == 0) return 0; - /* - * Search for the vlan id in the VLVF entries. Save off the first empty - * slot found along the way - */ - for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { + /* if vlvf_bypass is set we don't want to use an empty slot, we + * will simply bypass the VLVF if there are no entries present in the + * VLVF that contain our VLAN + */ + first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; + + /* add VLAN enable bit for comparison */ + vlan |= IXGBE_VLVF_VIEN; + + /* Search for the vlan id in the VLVF entries. Save off the first empty + * slot found along the way. + * + * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 + */ + for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); - if (!bits && !(first_empty_slot)) + if (bits == vlan) + return regindex; + if (!first_empty_slot && !bits) first_empty_slot = regindex; - else if ((bits & 0x0FFF) == vlan) - break; } - /* - * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan - * in the VLVF. Else use the first empty VLVF register for this - * vlan id. - */ - if (regindex >= IXGBE_VLVF_ENTRIES) { - if (first_empty_slot) - regindex = first_empty_slot; - else { - ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, - "No space in VLVF.\n"); - regindex = IXGBE_ERR_NO_SPACE; - } - } + /* If we are here then we didn't find the VLAN. Return first empty + * slot we found during our search, else error. + */ + if (!first_empty_slot) + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n"); - return regindex; + return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE; } /** * ixgbe_set_vfta_generic - Set VLAN filter table * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFVFB - * @vlan_on: boolean flag to turn on/off VLAN in VFVF + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN + * @vlvf_bypass: boolean flag indicating updating default pool is okay * * Turn on/off specified VLAN in the VLAN filter table. **/ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on) + bool vlan_on, bool vlvf_bypass) { - s32 regindex; - u32 bitindex; - u32 vfta; - u32 targetbit; - s32 ret_val = IXGBE_SUCCESS; - bool vfta_changed = false; + u32 regidx, vfta_delta, vfta; + s32 ret_val; DEBUGFUNC("ixgbe_set_vfta_generic"); - if (vlan > 4095) + if (vlan > 4095 || vind > 63) return IXGBE_ERR_PARAM; /* @@ -3875,33 +3883,32 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * bits[11-5]: which register * bits[4-0]: which bit in the register */ - regindex = (vlan >> 5) & 0x7F; - bitindex = vlan & 0x1F; - targetbit = (1 << bitindex); - vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); - - if (vlan_on) { - if (!(vfta & targetbit)) { - vfta |= targetbit; - vfta_changed = true; - } - } else { - if ((vfta & targetbit)) { - vfta &= ~targetbit; - vfta_changed = true; - } - } + regidx = vlan / 32; + vfta_delta = 1 << (vlan % 32); + vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); + + /* vfta_delta represents the difference between the current value + * of vfta and the value we want in the register. Since the diff + * is an XOR mask we can just update the vfta using an XOR + */ + vfta_delta &= vlan_on ? ~vfta : vfta; + vfta ^= vfta_delta; /* Part 2 * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF */ - ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, - &vfta_changed); - if (ret_val != IXGBE_SUCCESS) + ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta, + vfta, vlvf_bypass); + if (ret_val != IXGBE_SUCCESS) { + if (vlvf_bypass) + goto vfta_update; return ret_val; + } - if (vfta_changed) - IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); +vfta_update: + /* Update VFTA now that we are ready for traffic */ + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); return IXGBE_SUCCESS; } @@ -3910,21 +3917,25 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * ixgbe_set_vlvf_generic - Set VLAN Pool Filter * @hw: pointer to hardware structure * @vlan: VLAN id to write to VLAN filter - * @vind: VMDq output index that maps queue to VLAN id in VFVFB - * @vlan_on: boolean flag to turn on/off VLAN in VFVF - * @vfta_changed: pointer to boolean flag which indicates whether VFTA - * should be changed + * @vind: VMDq output index that maps queue to VLAN id in VLVFB + * @vlan_on: boolean flag to turn on/off VLAN in VLVF + * @vfta_delta: pointer to the difference between the current value of VFTA + * and the desired value + * @vfta: the desired value of the VFTA + * @vlvf_bypass: boolean flag indicating updating default pool is okay * * Turn on/off specified bit in VLVF table. **/ s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, bool *vfta_changed) + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass) { - u32 vt; + u32 bits; + s32 vlvf_index; DEBUGFUNC("ixgbe_set_vlvf_generic"); - if (vlan > 4095) + if (vlan > 4095 || vind > 63) return IXGBE_ERR_PARAM; /* If VT Mode is set @@ -3934,82 +3945,57 @@ s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, * Or !vlan_on * clear the pool bit and possibly the vind */ - vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); - if (vt & IXGBE_VT_CTL_VT_ENABLE) { - s32 vlvf_index; - u32 bits; - - vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); - if (vlvf_index < 0) - return vlvf_index; - - if (vlan_on) { - /* set the pool bit */ - if (vind < 32) { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index * 2)); - bits |= (1 << vind); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB(vlvf_index * 2), - bits); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index * 2) + 1)); - bits |= (1 << (vind - 32)); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB((vlvf_index * 2) + 1), - bits); - } - } else { - /* clear the pool bit */ - if (vind < 32) { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index * 2)); - bits &= ~(1 << vind); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB(vlvf_index * 2), - bits); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index * 2) + 1)); - } else { - bits = IXGBE_READ_REG(hw, - IXGBE_VLVFB((vlvf_index * 2) + 1)); - bits &= ~(1 << (vind - 32)); - IXGBE_WRITE_REG(hw, - IXGBE_VLVFB((vlvf_index * 2) + 1), - bits); - bits |= IXGBE_READ_REG(hw, - IXGBE_VLVFB(vlvf_index * 2)); - } - } + if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) + return IXGBE_SUCCESS; + vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); + if (vlvf_index < 0) + return vlvf_index; - /* - * If there are still bits set in the VLVFB registers - * for the VLAN ID indicated we need to see if the - * caller is requesting that we clear the VFTA entry bit. - * If the caller has requested that we clear the VFTA - * entry bit but there are still pools/VFs using this VLAN - * ID entry then ignore the request. We're not worried - * about the case where we're turning the VFTA VLAN ID - * entry bit on, only when requested to turn it off as - * there may be multiple pools and/or VFs using the - * VLAN ID entry. In that case we cannot clear the - * VFTA bit until all pools/VFs using that VLAN ID have also - * been cleared. This will be indicated by "bits" being - * zero. + bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); + + /* set the pool bit */ + bits |= 1 << (vind % 32); + if (vlan_on) + goto vlvf_update; + + /* clear the pool bit */ + bits ^= 1 << (vind % 32); + + if (!bits && + !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { + /* Clear VFTA first, then disable VLVF. Otherwise + * we run the risk of stray packets leaking into + * the PF via the default pool */ - if (bits) { - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), - (IXGBE_VLVF_VIEN | vlan)); - if ((!vlan_on) && (vfta_changed != NULL)) { - /* someone wants to clear the vfta entry - * but some pools/VFs are still using it. - * Ignore it. */ - *vfta_changed = false; - } - } else - IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + if (vfta_delta) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta); + + /* disable VLVF and clear remaining bit from pool */ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); + + return IXGBE_SUCCESS; } + /* If there are still bits set in the VLVFB registers + * for the VLAN ID indicated we need to see if the + * caller is requesting that we clear the VFTA entry bit. + * If the caller has requested that we clear the VFTA + * entry bit but there are still pools/VFs using this VLAN + * ID entry then ignore the request. We're not worried + * about the case where we're turning the VFTA VLAN ID + * entry bit on, only when requested to turn it off as + * there may be multiple pools and/or VFs using the + * VLAN ID entry. In that case we cannot clear the + * VFTA bit until all pools/VFs using that VLAN ID have also + * been cleared. This will be indicated by "bits" being + * zero. + */ + *vfta_delta = 0; + +vlvf_update: + /* record pool change and enable VLAN ID if not already enabled */ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); + IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); return IXGBE_SUCCESS; } @@ -4032,7 +4018,7 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); - IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset * 2) + 1), 0); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); } return IXGBE_SUCCESS; @@ -4218,43 +4204,25 @@ out: /** * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing * @hw: pointer to hardware structure - * @enable: enable or disable switch for anti-spoofing - * @pf: Physical Function pool - do not enable anti-spoofing for the PF + * @enable: enable or disable switch for MAC anti-spoofing + * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing * **/ -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) { - int j; - int pf_target_reg = pf >> 3; - int pf_target_shift = pf % 8; - u32 pfvfspoof = 0; + int vf_target_reg = vf >> 3; + int vf_target_shift = vf % 8; + u32 pfvfspoof; if (hw->mac.type == ixgbe_mac_82598EB) return; + pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); if (enable) - pfvfspoof = IXGBE_SPOOF_MACAS_MASK; - - /* - * PFVFSPOOF register array is size 8 with 8 bits assigned to - * MAC anti-spoof enables in each register array element. - */ - for (j = 0; j < pf_target_reg; j++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - - /* - * The PF should be allowed to spoof so that it can support - * emulation mode NICs. Do not set the bits assigned to the PF - */ - pfvfspoof &= (1 << pf_target_shift) - 1; - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); - - /* - * Remaining pools belong to the PF so they do not need to have - * anti-spoofing enabled. - */ - for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) - IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); + pfvfspoof |= (1 << vf_target_shift); + else + pfvfspoof &= ~(1 << vf_target_shift); + IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); } /** @@ -4363,8 +4331,9 @@ u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) * So we will leave this up to the caller to read back the data * in these cases. * - * Communicates with the manageability block. On success return IXGBE_SUCCESS - * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. + * Communicates with the manageability block. On success return IXGBE_SUCCESS + * else returns semaphore error when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. **/ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, u32 length, u32 timeout, bool return_data) @@ -4373,6 +4342,7 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, u32 hdr_size = sizeof(struct ixgbe_hic_hdr); u16 buf_len; u16 dword_len; + s32 status; DEBUGFUNC("ixgbe_host_interface_command"); @@ -4380,6 +4350,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); return IXGBE_ERR_HOST_INTERFACE_COMMAND; } + /* Take management host interface semaphore */ + status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + + if (status) + return status; + /* Set bit 9 of FWSTS clearing FW reset indication */ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); @@ -4388,13 +4364,15 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, hicr = IXGBE_READ_REG(hw, IXGBE_HICR); if ((hicr & IXGBE_HICR_EN) == 0) { DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } /* Calculate length in DWORDs. We must be DWORD aligned */ if ((length % (sizeof(u32))) != 0) { DEBUGOUT("Buffer length failure, not aligned to dword"); - return IXGBE_ERR_INVALID_ARGUMENT; + status = IXGBE_ERR_INVALID_ARGUMENT; + goto rel_out; } dword_len = length >> 2; @@ -4421,11 +4399,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Command has failed with no status valid.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } if (!return_data) - return 0; + goto rel_out; /* Calculate length in DWORDs */ dword_len = hdr_size >> 2; @@ -4439,11 +4418,12 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, /* If there is any thing in data position pull it in */ buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; if (buf_len == 0) - return 0; + goto rel_out; if (length < buf_len + hdr_size) { DEBUGOUT("Buffer not large enough for reply message.\n"); - return IXGBE_ERR_HOST_INTERFACE_COMMAND; + status = IXGBE_ERR_HOST_INTERFACE_COMMAND; + goto rel_out; } /* Calculate length in DWORDs, add 3 for odd lengths */ @@ -4455,7 +4435,10 @@ s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, IXGBE_LE32_TO_CPUS(&buffer[bi]); } - return 0; +rel_out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); + + return status; } /** @@ -4480,12 +4463,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); - if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) - != IXGBE_SUCCESS) { - ret_val = IXGBE_ERR_SWFW_SYNC; - goto out; - } - fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; @@ -4517,8 +4494,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, break; } - hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); -out: return ret_val; } diff --git a/drivers/net/ixgbe/base/ixgbe_common.h b/drivers/net/ixgbe/base/ixgbe_common.h index fd67a889..0545f85c 100644 --- a/drivers/net/ixgbe/base/ixgbe_common.h +++ b/drivers/net/ixgbe/base/ixgbe_common.h @@ -133,11 +133,12 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, - u32 vind, bool vlan_on); + u32 vind, bool vlan_on, bool vlvf_bypass); s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, - bool vlan_on, bool *vfta_changed); + bool vlan_on, u32 *vfta_delta, u32 vfta, + bool vlvf_bypass); s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); -s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan); +s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass); s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, @@ -147,7 +148,7 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, u16 *wwpn_prefix); s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs); -void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); +void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, diff --git a/drivers/net/ixgbe/base/ixgbe_mbx.h b/drivers/net/ixgbe/base/ixgbe_mbx.h index 4a120a3d..d775142d 100644 --- a/drivers/net/ixgbe/base/ixgbe_mbx.h +++ b/drivers/net/ixgbe/base/ixgbe_mbx.h @@ -109,7 +109,9 @@ enum ixgbe_pfvf_api_rev { #define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ /* mailbox API, version 1.2 VF requests */ -#define IXGBE_VF_UPDATE_XCAST_MODE 0x0C +#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ +#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ +#define IXGBE_VF_UPDATE_XCAST_MODE 0x0C /* GET_QUEUES return data indices within the mailbox */ #define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ diff --git a/drivers/net/ixgbe/base/ixgbe_osdep.h b/drivers/net/ixgbe/base/ixgbe_osdep.h index 40b0b512..31cc1bef 100644 --- a/drivers/net/ixgbe/base/ixgbe_osdep.h +++ b/drivers/net/ixgbe/base/ixgbe_osdep.h @@ -96,6 +96,7 @@ enum { #define IXGBE_NTOHL(_i) rte_be_to_cpu_32(_i) #define IXGBE_NTOHS(_i) rte_be_to_cpu_16(_i) #define IXGBE_CPU_TO_LE32(_i) rte_cpu_to_le_32(_i) +#define IXGBE_LE32_TO_CPU(_i) rte_le_to_cpu_32(_i) #define IXGBE_LE32_TO_CPUS(_i) rte_le_to_cpu_32(_i) #define IXGBE_CPU_TO_BE16(_i) rte_cpu_to_be_16(_i) #define IXGBE_CPU_TO_BE32(_i) rte_cpu_to_be_32(_i) diff --git a/drivers/net/ixgbe/base/ixgbe_phy.c b/drivers/net/ixgbe/base/ixgbe_phy.c index 6ed685e8..ed1b14f3 100644 --- a/drivers/net/ixgbe/base/ixgbe_phy.c +++ b/drivers/net/ixgbe/base/ixgbe_phy.c @@ -454,6 +454,9 @@ enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) case X557_PHY_ID: phy_type = ixgbe_phy_x550em_ext_t; break; + case IXGBE_M88E1500_E_PHY_ID: + phy_type = ixgbe_phy_m88; + break; default: phy_type = ixgbe_phy_unknown; break; @@ -615,13 +618,12 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, DEBUGFUNC("ixgbe_read_phy_reg_generic"); - if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { - status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, - phy_data); - hw->mac.ops.release_swfw_sync(hw, gssr); - } else { - status = IXGBE_ERR_SWFW_SYNC; - } + if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); + + hw->mac.ops.release_swfw_sync(hw, gssr); return status; } diff --git a/drivers/net/ixgbe/base/ixgbe_phy.h b/drivers/net/ixgbe/base/ixgbe_phy.h index 1a5affe5..281f9faf 100644 --- a/drivers/net/ixgbe/base/ixgbe_phy.h +++ b/drivers/net/ixgbe/base/ixgbe_phy.h @@ -89,8 +89,11 @@ POSSIBILITY OF SUCH DAMAGE. #define IXGBE_CS4227 0xBE /* CS4227 address */ #define IXGBE_CS4227_GLOBAL_ID_LSB 0 +#define IXGBE_CS4227_GLOBAL_ID_MSB 1 #define IXGBE_CS4227_SCRATCH 2 #define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 +#define IXGBE_CS4223_PHY_ID 0x7003/* Quad port */ +#define IXGBE_CS4227_PHY_ID 0x3003/* Dual port */ #define IXGBE_CS4227_RESET_PENDING 0x1357 #define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 #define IXGBE_CS4227_RETRIES 15 diff --git a/drivers/net/ixgbe/base/ixgbe_type.h b/drivers/net/ixgbe/base/ixgbe_type.h index 4dce2ac1..83818a96 100644 --- a/drivers/net/ixgbe/base/ixgbe_type.h +++ b/drivers/net/ixgbe/base/ixgbe_type.h @@ -133,12 +133,14 @@ POSSIBILITY OF SUCH DAMAGE. #define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 #define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 #define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 -#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15C6 -#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15C7 +#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 +#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 #define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 #define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA #define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC #define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE +#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 +#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 #define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA #define IXGBE_DEV_ID_X550EM_X_KR 0x15AB #define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC @@ -194,7 +196,7 @@ POSSIBILITY OF SUCH DAMAGE. #define IXGBE_FLA_X540 IXGBE_FLA #define IXGBE_FLA_X550 IXGBE_FLA #define IXGBE_FLA_X550EM_x IXGBE_FLA -#define IXGBE_FLA_X550EM_a 0x15F6C +#define IXGBE_FLA_X550EM_a 0x15F68 #define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA) #define IXGBE_EEMNGCTL 0x10110 @@ -1078,16 +1080,40 @@ struct ixgbe_dmac_config { #define IXGBE_PCIEPIPEDAT 0x11008 #define IXGBE_GSCL_1 0x11010 #define IXGBE_GSCL_2 0x11014 +#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1 +#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2 #define IXGBE_GSCL_3 0x11018 #define IXGBE_GSCL_4 0x1101C #define IXGBE_GSCN_0 0x11020 #define IXGBE_GSCN_1 0x11024 #define IXGBE_GSCN_2 0x11028 #define IXGBE_GSCN_3 0x1102C +#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0 +#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1 +#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2 +#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3 #define IXGBE_FACTPS 0x10150 #define IXGBE_FACTPS_X540 IXGBE_FACTPS +#define IXGBE_GSCL_1_X550 0x11800 +#define IXGBE_GSCL_2_X550 0x11804 +#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550 +#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550 +#define IXGBE_GSCN_0_X550 0x11820 +#define IXGBE_GSCN_1_X550 0x11824 +#define IXGBE_GSCN_2_X550 0x11828 +#define IXGBE_GSCN_3_X550 0x1182C +#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550 +#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550 +#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550 +#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550 #define IXGBE_FACTPS_X550 IXGBE_FACTPS #define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS +#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550 +#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550 +#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550 +#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550 +#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550 +#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550 #define IXGBE_FACTPS_X550EM_a 0x15FEC #define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS) @@ -1124,6 +1150,10 @@ struct ixgbe_dmac_config { #define IXGBE_GSCL_6_82599 0x11034 #define IXGBE_GSCL_7_82599 0x11038 #define IXGBE_GSCL_8_82599 0x1103C +#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599 +#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599 +#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599 +#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599 #define IXGBE_PHYADR_82599 0x11040 #define IXGBE_PHYDAT_82599 0x11044 #define IXGBE_PHYCTL_82599 0x11048 @@ -1134,10 +1164,22 @@ struct ixgbe_dmac_config { #define IXGBE_CIAD_82599 IXGBE_CIAD #define IXGBE_CIAA_X540 IXGBE_CIAA #define IXGBE_CIAD_X540 IXGBE_CIAD +#define IXGBE_GSCL_5_X550 0x11810 +#define IXGBE_GSCL_6_X550 0x11814 +#define IXGBE_GSCL_7_X550 0x11818 +#define IXGBE_GSCL_8_X550 0x1181C +#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550 +#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550 +#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550 +#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550 #define IXGBE_CIAA_X550 0x11508 #define IXGBE_CIAD_X550 0x11510 #define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 #define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 +#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550 +#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550 +#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550 +#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550 #define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 #define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 #define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA) @@ -1472,6 +1514,7 @@ struct ixgbe_dmac_config { #define IXGBE_CORECTL_WRITE_CMD 0x00010000 /* Device Type definitions for new protocol MDIO commands */ +#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 #define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 #define IXGBE_MDIO_PCS_DEV_TYPE 0x3 #define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 @@ -1606,7 +1649,8 @@ struct ixgbe_dmac_config { #define ATH_PHY_ID 0x03429050 /* PHY Types */ -#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 +#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0 +#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0 /* Special PHY Init Routine */ #define IXGBE_PHY_INIT_OFFSET_NL 0x002B @@ -2247,6 +2291,9 @@ enum { #define IXGBE_PBANUM_PTR_GUARD 0xFAFA #define IXGBE_EEPROM_CHECKSUM 0x3F #define IXGBE_EEPROM_SUM 0xBABA +#define IXGBE_EEPROM_CTRL_4 0x45 +#define IXGBE_EE_CTRL_4_INST_ID 0x10 +#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4 #define IXGBE_PCIE_ANALOG_PTR 0x03 #define IXGBE_ATLAS0_CONFIG_PTR 0x04 #define IXGBE_PHY_PTR 0x04 @@ -2358,6 +2405,7 @@ enum { #define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 #define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 #define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7) #define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 #define IXGBE_FW_LESM_STATE_1 0x1 #define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ @@ -2820,7 +2868,7 @@ enum { #define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P))) #define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P))) #define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P))) -#define IXGBE_PVFTTDLEN(P) (0x06008 + (0x40 * (P))) +#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P))) #define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) #define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) #define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) @@ -3003,6 +3051,12 @@ enum ixgbe_fdir_pballoc_type { /* Host Interface Command Structures */ +#ifdef C99 +#pragma pack(push, 1) +#else +#pragma pack(1) +#endif /* C99 */ + struct ixgbe_hic_hdr { u8 cmd; u8 buf_len; @@ -3080,17 +3134,22 @@ struct ixgbe_hic_internal_phy_req { struct ixgbe_hic_hdr hdr; u8 port_number; u8 command_type; - u16 address; + __be16 address; u16 rsv1; - u32 write_data; + __le32 write_data; u16 pad; }; struct ixgbe_hic_internal_phy_resp { struct ixgbe_hic_hdr hdr; - u32 read_data; + __le32 read_data; }; +#ifdef C99 +#pragma pack(pop) +#else +#pragma pack() +#endif /* C99 */ /* Transmit Descriptor - Legacy */ struct ixgbe_legacy_tx_desc { @@ -3246,6 +3305,7 @@ typedef u32 ixgbe_autoneg_advertised; /* Link speed */ typedef u32 ixgbe_link_speed; #define IXGBE_LINK_SPEED_UNKNOWN 0 +#define IXGBE_LINK_SPEED_10_FULL 0x0004 #define IXGBE_LINK_SPEED_100_FULL 0x0008 #define IXGBE_LINK_SPEED_1GB_FULL 0x0020 #define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 @@ -3511,6 +3571,8 @@ enum ixgbe_phy_type { ixgbe_phy_qsfp_intel, ixgbe_phy_qsfp_unknown, ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ + ixgbe_phy_sgmii, + ixgbe_phy_m88, ixgbe_phy_generic }; @@ -3567,6 +3629,14 @@ enum ixgbe_fc_mode { ixgbe_fc_default }; +/* Master/slave control */ +enum ixgbe_ms_type { + ixgbe_ms_hw_default = 0, + ixgbe_ms_force_master, + ixgbe_ms_force_slave, + ixgbe_ms_auto +}; + /* Smart Speed Settings */ #define IXGBE_SMARTSPEED_MAX_RETRIES 3 enum ixgbe_smart_speed { @@ -3626,7 +3696,8 @@ struct ixgbe_bus_info { enum ixgbe_bus_type type; u16 func; - u16 lan_id; + u8 lan_id; + u16 instance_id; }; /* Flow control parameters */ @@ -3766,6 +3837,7 @@ struct ixgbe_mac_operations { s32 (*enable_sec_rx_path)(struct ixgbe_hw *); s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); void (*release_swfw_sync)(struct ixgbe_hw *, u32); + void (*init_swfw_sync)(struct ixgbe_hw *); s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); @@ -3805,8 +3877,9 @@ struct ixgbe_mac_operations { s32 (*enable_mc)(struct ixgbe_hw *); s32 (*disable_mc)(struct ixgbe_hw *); s32 (*clear_vfta)(struct ixgbe_hw *); - s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); - s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, bool *); + s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); + s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, u32 *, u32, + bool); s32 (*init_uta_tables)(struct ixgbe_hw *); void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); @@ -3814,6 +3887,7 @@ struct ixgbe_mac_operations { /* Flow Control */ s32 (*fc_enable)(struct ixgbe_hw *); s32 (*setup_fc)(struct ixgbe_hw *); + void (*fc_autoneg)(struct ixgbe_hw *); /* Manageability interface */ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); @@ -3941,6 +4015,8 @@ struct ixgbe_phy_info { bool reset_disable; ixgbe_autoneg_advertised autoneg_advertised; ixgbe_link_speed speeds_supported; + enum ixgbe_ms_type ms_type; + enum ixgbe_ms_type original_ms_type; enum ixgbe_smart_speed smart_speed; bool smart_speed_active; bool multispeed_fiber; @@ -4057,8 +4133,12 @@ struct ixgbe_hw { #define IXGBE_FUSES0_REV_MASK (3 << 6) #define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) +#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) #define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) #define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) +#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) +#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) #define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) #define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) #define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) @@ -4072,18 +4152,29 @@ struct ixgbe_hw { #define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) #define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) #define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12) +#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) #define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) +#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) #define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) #define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) #define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) +#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0) +#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10) +#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11) + +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12) +#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19) + #define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) #define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) #define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) @@ -4116,6 +4207,11 @@ struct ixgbe_hw { #define IXGBE_SB_IOSF_TARGET_KR_PHY 0 #define IXGBE_NW_MNG_IF_SEL 0x00011178 +#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1 << 1) +#define IXGBE_NW_MNG_IF_SEL_ENABLE_10_100M (1 << 23) #define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 +#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ + (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) #endif /* _IXGBE_TYPE_H_ */ diff --git a/drivers/net/ixgbe/base/ixgbe_vf.c b/drivers/net/ixgbe/base/ixgbe_vf.c index 40dc1c8c..a75074a5 100644 --- a/drivers/net/ixgbe/base/ixgbe_vf.c +++ b/drivers/net/ixgbe/base/ixgbe_vf.c @@ -362,8 +362,10 @@ s32 ixgbe_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, /* if nacked the address was rejected, use "perm_addr" */ if (!ret_val && - (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) + (msgbuf[0] == (IXGBE_VF_SET_MAC_ADDR | IXGBE_VT_MSGTYPE_NACK))) { ixgbe_get_mac_addr_vf(hw, hw->mac.addr); + return IXGBE_ERR_MBX; + } return ret_val; } @@ -422,13 +424,15 @@ s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, * @vlan: 12 bit VLAN ID * @vind: unused by VF drivers * @vlan_on: if true then set bit, else clear bit + * @vlvf_bypass: boolean flag indicating updating default pool is okay **/ -s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on) +s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass) { struct ixgbe_mbx_info *mbx = &hw->mbx; u32 msgbuf[2]; s32 ret_val; - UNREFERENCED_1PARAMETER(vind); + UNREFERENCED_2PARAMETER(vind, vlvf_bypass); msgbuf[0] = IXGBE_VF_SET_VLAN; msgbuf[1] = vlan; diff --git a/drivers/net/ixgbe/base/ixgbe_vf.h b/drivers/net/ixgbe/base/ixgbe_vf.h index 411152a4..8851cb82 100644 --- a/drivers/net/ixgbe/base/ixgbe_vf.h +++ b/drivers/net/ixgbe/base/ixgbe_vf.h @@ -31,8 +31,8 @@ POSSIBILITY OF SUCH DAMAGE. ***************************************************************************/ -#ifndef __IXGBE_VF_H__ -#define __IXGBE_VF_H__ +#ifndef _IXGBE_VF_H_ +#define _IXGBE_VF_H_ #define IXGBE_VF_IRQ_CLEAR_MASK 7 #define IXGBE_VF_MAX_TX_QUEUES 8 @@ -131,7 +131,8 @@ s32 ixgbevf_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr); s32 ixgbe_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, u32 mc_addr_count, ixgbe_mc_addr_itr, bool clear); -s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on); +s32 ixgbe_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bool vlan_on, bool vlvf_bypass); void ixgbevf_rlpml_set_vf(struct ixgbe_hw *hw, u16 max_size); int ixgbevf_negotiate_api_version(struct ixgbe_hw *hw, int api); int ixgbevf_get_queues(struct ixgbe_hw *hw, unsigned int *num_tcs, diff --git a/drivers/net/ixgbe/base/ixgbe_x540.c b/drivers/net/ixgbe/base/ixgbe_x540.c index 9ade1b5e..31dead0d 100644 --- a/drivers/net/ixgbe/base/ixgbe_x540.c +++ b/drivers/net/ixgbe/base/ixgbe_x540.c @@ -99,6 +99,7 @@ s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540; mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540; + mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540; mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; @@ -268,11 +269,14 @@ mac_reset_top: /* Add the SAN MAC address to the RAR only if it's a valid address */ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { - hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, - hw->mac.san_addr, 0, IXGBE_RAH_AV); - /* Save the SAN MAC RAR index */ hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, + hw->mac.san_addr, 0, IXGBE_RAH_AV); + + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, + IXGBE_CLEAR_VMDQ_ALL); /* Reserve the last RAR for the SAN MAC address */ hw->mac.num_rar_entries--; @@ -943,6 +947,25 @@ STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) } /** + * ixgbe_init_swfw_sync_X540 - Release hardware semaphore + * @hw: pointer to hardware structure + * + * This function reset hardware semaphore bits for a semaphore that may + * have be left locked due to a catastrophic failure. + **/ +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) +{ + /* First try to grab the semaphore but we don't need to bother + * looking to see whether we got the lock or not since we do + * the same thing regardless of whether we got the lock or not. + * We got the lock - we release it. + * We timeout trying to get the lock - we force its release. + */ + ixgbe_get_swfw_sync_semaphore(hw); + ixgbe_release_swfw_sync_semaphore(hw); +} + +/** * ixgbe_blink_led_start_X540 - Blink LED based on index. * @hw: pointer to hardware structure * @index: led number to blink diff --git a/drivers/net/ixgbe/base/ixgbe_x540.h b/drivers/net/ixgbe/base/ixgbe_x540.h index 42c08a82..e4baf6ff 100644 --- a/drivers/net/ixgbe/base/ixgbe_x540.h +++ b/drivers/net/ixgbe/base/ixgbe_x540.h @@ -59,6 +59,7 @@ s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); +void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); diff --git a/drivers/net/ixgbe/base/ixgbe_x550.c b/drivers/net/ixgbe/base/ixgbe_x550.c index 0bbaa55b..aa6e859f 100644 --- a/drivers/net/ixgbe/base/ixgbe_x550.c +++ b/drivers/net/ixgbe/base/ixgbe_x550.c @@ -39,8 +39,8 @@ POSSIBILITY OF SUCH DAMAGE. #include "ixgbe_phy.h" STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed); -static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); -static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); +STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); +STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); /** * ixgbe_init_ops_X550 - Inits func ptrs and MAC type @@ -329,6 +329,137 @@ STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) } /** + * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @dev_type: always unused + * @phy_data: Pointer to read data from PHY register + */ +STATIC s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr, + u32 dev_type, u16 *phy_data) +{ + u32 i, data, command; + UNREFERENCED_1PARAMETER(dev_type); + + /* Setup and write the read command */ + command = (reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the access completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if (!(command & IXGBE_MSCA_MDI_COMMAND)) + break; + } + + if (command & IXGBE_MSCA_MDI_COMMAND) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY read command did not complete.\n"); + return IXGBE_ERR_PHY; + } + + /* Read operation is complete. Get the data from MSRWD */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)data; + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @dev_type: always unused + * @phy_data: Data to write to the PHY register + */ +STATIC s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr, + u32 dev_type, u16 phy_data) +{ + u32 i, command; + UNREFERENCED_1PARAMETER(dev_type); + + /* Put the data in the MDI single read and write data register*/ + IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); + + /* Setup and write the write command */ + command = (reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | + (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | + IXGBE_MSCA_MDI_COMMAND; + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + + /* Check every 10 usec to see if the access completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { + usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if (!(command & IXGBE_MSCA_MDI_COMMAND)) + break; + } + + if (command & IXGBE_MSCA_MDI_COMMAND) { + ERROR_REPORT1(IXGBE_ERROR_POLLING, + "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_identify_phy_1g - Get 1g PHY type based on device id + * @hw: pointer to hardware structure + * + * Returns error code + */ +STATIC s32 ixgbe_identify_phy_1g(struct ixgbe_hw *hw) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + u16 phy_id_high; + u16 phy_id_low; + s32 rc; + + rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (rc) + return rc; + + rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_MDIO_PHY_ID_HIGH, 0, + &phy_id_high); + if (rc) + goto rel_out; + + rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_MDIO_PHY_ID_LOW, 0, + &phy_id_low); + if (rc) + goto rel_out; + + hw->phy.id = (u32)phy_id_high << 16; + hw->phy.id |= phy_id_low & IXGBE_PHY_REVISION_MASK; + hw->phy.revision = (u32)phy_id_low & ~IXGBE_PHY_REVISION_MASK; + +rel_out: + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + return rc; +} + +/** * ixgbe_identify_phy_x550em - Get PHY type based on device id * @hw: pointer to hardware structure * @@ -338,7 +469,8 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) { switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_A_SFP: - hw->phy.phy_semaphore_mask = IXGBE_GSSR_TOKEN_SM; + hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; + hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; if (hw->bus.lan_id) hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; else @@ -364,10 +496,17 @@ STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) break; case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T: - case IXGBE_DEV_ID_X550EM_A_1G_T_L: case IXGBE_DEV_ID_X550EM_A_10G_T: return ixgbe_identify_phy_generic(hw); + case IXGBE_DEV_ID_X550EM_A_1G_T: + case IXGBE_DEV_ID_X550EM_A_1G_T_L: + hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; + hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; + if (hw->bus.lan_id) + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; + else + hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; + return ixgbe_identify_phy_1g(hw); default: break; } @@ -397,7 +536,7 @@ STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, * * Returns an error code on error. **/ -static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, +STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) { return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); @@ -412,7 +551,7 @@ static s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, * * Returns an error code on error. **/ -static s32 +STATIC s32 ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) { @@ -428,7 +567,7 @@ ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, * * Returns an error code on error. **/ -static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, +STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) { return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); @@ -443,7 +582,7 @@ static s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, * * Returns an error code on error. **/ -static s32 +STATIC s32 ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) { @@ -512,8 +651,8 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) link->addr = IXGBE_CS4227; } if (hw->mac.type == ixgbe_mac_X550EM_a) { - mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a; - mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a; + mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; + mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a; mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a; } @@ -527,12 +666,21 @@ s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) mac->ops.setup_fc = ixgbe_setup_fc_generic; + else if (hw->mac.type == ixgbe_mac_X550EM_a) { + mac->ops.setup_fc = ixgbe_setup_fc_x550a; + mac->ops.fc_autoneg = ixgbe_fc_autoneg_x550a; + } else mac->ops.setup_fc = ixgbe_setup_fc_X550em; - - if (hw->device_id != IXGBE_DEV_ID_X550EM_X_KR) + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + break; + default: mac->ops.setup_eee = NULL; + } /* PHY */ phy->ops.init = ixgbe_init_phy_ops_X550em; @@ -717,6 +865,116 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) } /** + * ixgbe_enable_eee_x550 - Enable EEE support + * @hw: pointer to hardware structure + */ +STATIC s32 ixgbe_enable_eee_x550(struct ixgbe_hw *hw) +{ + u16 autoneg_eee_reg; + u32 link_reg; + s32 status; + + if (hw->mac.type == ixgbe_mac_X550) { + /* Advertise EEE capability */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_eee_reg); + + autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT | + IXGBE_AUTO_NEG_1000BASE_EEE_ADVT | + IXGBE_AUTO_NEG_100BASE_EEE_ADVT); + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_eee_reg); + return IXGBE_SUCCESS; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg); + if (status != IXGBE_SUCCESS) + return status; + + link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX; + + /* Don't advertise FEC capability when EEE enabled. */ + link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; + + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg); + if (status != IXGBE_SUCCESS) + return status; + break; + default: + break; + } + + return IXGBE_SUCCESS; +} + +/** + * ixgbe_disable_eee_x550 - Disable EEE support + * @hw: pointer to hardware structure + */ +STATIC s32 ixgbe_disable_eee_x550(struct ixgbe_hw *hw) +{ + u16 autoneg_eee_reg; + u32 link_reg; + s32 status; + + if (hw->mac.type == ixgbe_mac_X550) { + /* Disable advertised EEE capability */ + hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_eee_reg); + + autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT | + IXGBE_AUTO_NEG_1000BASE_EEE_ADVT | + IXGBE_AUTO_NEG_100BASE_EEE_ADVT); + + hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, + IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_eee_reg); + return IXGBE_SUCCESS; + } + + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg); + if (status != IXGBE_SUCCESS) + return status; + + link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR | + IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX); + + /* Advertise FEC capability when EEE is disabled. */ + link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; + + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg); + if (status != IXGBE_SUCCESS) + return status; + break; + default: + break; + } + + return IXGBE_SUCCESS; +} + +/** * ixgbe_setup_eee_X550 - Enable/disable EEE support * @hw: pointer to the HW structure * @enable_eee: boolean flag to enable EEE @@ -728,10 +986,8 @@ s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) **/ s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee) { - u32 eeer; - u16 autoneg_eee_reg; - u32 link_reg; s32 status; + u32 eeer; DEBUGFUNC("ixgbe_setup_eee_X550"); @@ -740,75 +996,20 @@ s32 ixgbe_setup_eee_X550(struct ixgbe_hw *hw, bool enable_eee) if (enable_eee) { eeer |= (IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN); - if (hw->mac.type == ixgbe_mac_X550) { - /* Advertise EEE capability */ - hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg); - - autoneg_eee_reg |= (IXGBE_AUTO_NEG_10GBASE_EEE_ADVT | - IXGBE_AUTO_NEG_1000BASE_EEE_ADVT | - IXGBE_AUTO_NEG_100BASE_EEE_ADVT); - - hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg); - } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { - /* Not supported on first revision of X550EM_x. */ - if ((hw->mac.type == ixgbe_mac_X550EM_x) && - !(IXGBE_FUSES0_REV_MASK & - IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) - return IXGBE_SUCCESS; - - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg); - if (status != IXGBE_SUCCESS) - return status; - - link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR | - IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX; - - /* Don't advertise FEC capability when EEE enabled. */ - link_reg &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; - - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg); - if (status != IXGBE_SUCCESS) - return status; - } + /* Not supported on first revision of X550EM_x. */ + if ((hw->mac.type == ixgbe_mac_X550EM_x) && + !(IXGBE_FUSES0_REV_MASK & + IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) + return IXGBE_SUCCESS; + status = ixgbe_enable_eee_x550(hw); + if (status) + return status; } else { eeer &= ~(IXGBE_EEER_TX_LPI_EN | IXGBE_EEER_RX_LPI_EN); - if (hw->mac.type == ixgbe_mac_X550) { - /* Disable advertised EEE capability */ - hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_eee_reg); - - autoneg_eee_reg &= ~(IXGBE_AUTO_NEG_10GBASE_EEE_ADVT | - IXGBE_AUTO_NEG_1000BASE_EEE_ADVT | - IXGBE_AUTO_NEG_100BASE_EEE_ADVT); - - hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_EEE_ADVT, - IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_eee_reg); - } else if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, &link_reg); - if (status != IXGBE_SUCCESS) - return status; - - link_reg &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR | - IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX); - - /* Advertise FEC capability when EEE is disabled. */ - link_reg |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC; - - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, link_reg); - if (status != IXGBE_SUCCESS) - return status; - } + status = ixgbe_disable_eee_x550(hw); + if (status) + return status; } IXGBE_WRITE_REG(hw, IXGBE_EEER, eeer); @@ -1007,6 +1208,7 @@ s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; token_cmd.port_number = hw->bus.lan_id; token_cmd.command_type = FW_PHY_TOKEN_REQ; token_cmd.pad = 0; @@ -1037,6 +1239,7 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; token_cmd.hdr.cmd_or_resp.cmd_resv = 0; + token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; token_cmd.port_number = hw->bus.lan_id; token_cmd.command_type = FW_PHY_TOKEN_REL; token_cmd.pad = 0; @@ -1048,6 +1251,8 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) return status; if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) return IXGBE_SUCCESS; + + DEBUGOUT("Put PHY Token host interface command failed"); return IXGBE_ERR_FW_RESP_INVALID; } @@ -1060,23 +1265,24 @@ s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) * @data: Data to write to the register **/ s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 data) + u32 device_type, u32 data) { struct ixgbe_hic_internal_phy_req write_cmd; s32 status; UNREFERENCED_1PARAMETER(device_type); + memset(&write_cmd, 0, sizeof(write_cmd)); write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; write_cmd.port_number = hw->bus.lan_id; write_cmd.command_type = FW_INT_PHY_REQ_WRITE; - write_cmd.address = (u16)reg_addr; - write_cmd.rsv1 = 0; - write_cmd.write_data = data; - write_cmd.pad = 0; + write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr); + write_cmd.write_data = IXGBE_CPU_TO_LE32(data); status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd, - sizeof(write_cmd), IXGBE_HI_COMMAND_TIMEOUT, false); + sizeof(write_cmd), + IXGBE_HI_COMMAND_TIMEOUT, false); return status; } @@ -1090,26 +1296,29 @@ s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, * @data: Pointer to read data from the register **/ s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, - u32 device_type, u32 *data) + u32 device_type, u32 *data) { - struct ixgbe_hic_internal_phy_req read_cmd; + union { + struct ixgbe_hic_internal_phy_req cmd; + struct ixgbe_hic_internal_phy_resp rsp; + } hic; s32 status; UNREFERENCED_1PARAMETER(device_type); - read_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; - read_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; - read_cmd.port_number = hw->bus.lan_id; - read_cmd.command_type = FW_INT_PHY_REQ_READ; - read_cmd.address = (u16)reg_addr; - read_cmd.rsv1 = 0; - read_cmd.write_data = 0; - read_cmd.pad = 0; + memset(&hic, 0, sizeof(hic)); + hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; + hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; + hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; + hic.cmd.port_number = hw->bus.lan_id; + hic.cmd.command_type = FW_INT_PHY_REQ_READ; + hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr); - status = ixgbe_host_interface_command(hw, (u32 *)&read_cmd, - sizeof(read_cmd), IXGBE_HI_COMMAND_TIMEOUT, true); + status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, + sizeof(hic.cmd), + IXGBE_HI_COMMAND_TIMEOUT, true); /* Extract the register value from the response. */ - *data = ((struct ixgbe_hic_internal_phy_resp *)&read_cmd)->read_data; + *data = IXGBE_LE32_TO_CPU(hic.rsp.read_data); return status; } @@ -1286,10 +1495,18 @@ enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) break; case IXGBE_DEV_ID_X550EM_X_1G_T: case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_10G_T: + media_type = ixgbe_media_type_copper; + break; + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: + media_type = ixgbe_media_type_backplane; + hw->phy.type = ixgbe_phy_sgmii; + break; case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: - case IXGBE_DEV_ID_X550EM_A_10G_T: media_type = ixgbe_media_type_copper; + hw->phy.type = ixgbe_phy_m88; break; default: media_type = ixgbe_media_type_unknown; @@ -1382,6 +1599,57 @@ s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) } /** + * ixgbe_setup_sgmii - Set up link for sgmii + * @hw: pointer to hardware structure + */ +STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + struct ixgbe_mac_info *mac = &hw->mac; + u32 lval, sval; + s32 rc; + UNREFERENCED_2PARAMETER(speed, autoneg_wait_to_complete); + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); + if (rc) + return rc; + + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; + lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + if (rc) + return rc; + + rc = mac->ops.read_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); + if (rc) + return rc; + + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; + sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, sval); + if (rc) + return rc; + + lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + rc = mac->ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, lval); + + return rc; +} + +/** * ixgbe_init_mac_link_ops_X550em - init mac link function pointers * @hw: pointer to hardware structure */ @@ -1391,8 +1659,8 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) DEBUGFUNC("ixgbe_init_mac_link_ops_X550em"); - switch (hw->mac.ops.get_media_type(hw)) { - case ixgbe_media_type_fiber: + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: /* CS4227 does not support autoneg, so disable the laser control * functions for SFP+ fiber */ @@ -1400,17 +1668,28 @@ void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) mac->ops.enable_tx_laser = NULL; mac->ops.flap_tx_laser = NULL; mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; - mac->ops.setup_mac_link = ixgbe_setup_mac_link_sfp_x550em; mac->ops.set_rate_select_speed = ixgbe_set_soft_rate_select_speed; + if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) || + (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP)) + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550a; + else + mac->ops.setup_mac_link = + ixgbe_setup_mac_link_sfp_x550em; break; case ixgbe_media_type_copper: mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; mac->ops.check_link = ixgbe_check_link_t_X550em; break; + case ixgbe_media_type_backplane: + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || + hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) + mac->ops.setup_link = ixgbe_setup_sgmii; + break; default: break; - } + } } /** @@ -1447,8 +1726,19 @@ s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, else *speed = IXGBE_LINK_SPEED_10GB_FULL; } else { - *speed = IXGBE_LINK_SPEED_10GB_FULL | - IXGBE_LINK_SPEED_1GB_FULL; + switch (hw->phy.type) { + case ixgbe_phy_m88: + *speed = IXGBE_LINK_SPEED_100_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + break; + case ixgbe_phy_sgmii: + *speed = IXGBE_LINK_SPEED_1GB_FULL; + break; + default: + *speed = IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + break; + } *autoneg = true; } @@ -1644,9 +1934,9 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, s32 status; u32 reg_val; - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status) return status; @@ -1664,14 +1954,210 @@ STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, /* Restart auto-negotiation. */ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); return status; } /** + * ixgbe_set_master_slave_mode - Set up PHY for master/slave mode + * @hw: pointer to hardware structure + * + * Must be called while holding the PHY semaphore and token + */ +STATIC s32 ixgbe_set_master_slave_mode(struct ixgbe_hw *hw) +{ + u16 phy_data; + s32 rc; + + /* Resolve master/slave mode */ + rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_M88E1500_1000T_CTRL, 0, + &phy_data); + if (rc) + return rc; + + /* load defaults for future use */ + if (phy_data & IXGBE_M88E1500_1000T_CTRL_MS_ENABLE) { + if (phy_data & IXGBE_M88E1500_1000T_CTRL_MS_VALUE) + hw->phy.original_ms_type = ixgbe_ms_force_master; + else + hw->phy.original_ms_type = ixgbe_ms_force_slave; + } else { + hw->phy.original_ms_type = ixgbe_ms_auto; + } + + switch (hw->phy.ms_type) { + case ixgbe_ms_force_master: + phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_ENABLE; + phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_VALUE; + break; + case ixgbe_ms_force_slave: + phy_data |= IXGBE_M88E1500_1000T_CTRL_MS_ENABLE; + phy_data &= ~IXGBE_M88E1500_1000T_CTRL_MS_VALUE; + break; + case ixgbe_ms_auto: + phy_data &= ~IXGBE_M88E1500_1000T_CTRL_MS_ENABLE; + break; + default: + break; + } + + return ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_1000T_CTRL, 0, + phy_data); +} + +/** + * ixgbe_reset_phy_m88_nolock - Reset m88 PHY without locking + * @hw: pointer to hardware structure + * + * Must be called while holding the PHY semaphore and token + */ +STATIC s32 ixgbe_reset_phy_m88_nolock(struct ixgbe_hw *hw) +{ + s32 rc; + + rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 1); + if (rc) + return rc; + + rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_FIBER_CTRL, 0, + IXGBE_M88E1500_FIBER_CTRL_RESET | + IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL | + IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB); + if (rc) + goto res_out; + + rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 18); + if (rc) + goto res_out; + + rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_GEN_CTRL, 0, + IXGBE_M88E1500_GEN_CTRL_RESET | + IXGBE_M88E1500_GEN_CTRL_SGMII_COPPER); + if (rc) + goto res_out; + + rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0); + if (rc) + goto res_out; + + rc = ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_COPPER_CTRL, 0, + IXGBE_M88E1500_COPPER_CTRL_RESET | + IXGBE_M88E1500_COPPER_CTRL_AN_EN | + IXGBE_M88E1500_COPPER_CTRL_RESTART_AN | + IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX | + IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB); + +res_out: + ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0); + return rc; +} + +/** + * ixgbe_reset_phy_m88 - Reset m88 PHY + * @hw: pointer to hardware structure + */ +STATIC s32 ixgbe_reset_phy_m88(struct ixgbe_hw *hw) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + s32 rc; + + if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) + return IXGBE_SUCCESS; + + rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (rc) + return rc; + + rc = ixgbe_reset_phy_m88_nolock(hw); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return rc; +} + +/** + * ixgbe_setup_m88 - setup m88 PHY + * @hw: pointer to hardware structure + */ +STATIC s32 ixgbe_setup_m88(struct ixgbe_hw *hw) +{ + u32 swfw_mask = hw->phy.phy_semaphore_mask; + struct ixgbe_phy_info *phy = &hw->phy; + u16 phy_data; + s32 rc; + + if (phy->reset_disable || ixgbe_check_reset_blocked(hw)) + return IXGBE_SUCCESS; + + rc = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); + if (rc) + return rc; + + rc = ixgbe_read_phy_reg_mdi_22(hw, IXGBE_M88E1500_PHY_SPEC_CTRL, 0, + &phy_data); + if (rc) + goto rel_out; + + /* Enable downshift and setting it to X6 */ + phy_data &= ~IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE; + phy_data |= IXGBE_M88E1500_PSCR_DOWNSHIFT_6X; + phy_data |= IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE; + rc = ixgbe_write_phy_reg_mdi_22(hw, + IXGBE_M88E1500_PHY_SPEC_CTRL, 0, + phy_data); + if (rc) + goto rel_out; + + ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0); + + /* Commit the changes */ + rc = ixgbe_reset_phy_m88_nolock(hw); + if (rc) { + DEBUGOUT("Error committing the PHY changes\n"); + goto rel_out; + } + + rc = ixgbe_set_master_slave_mode(hw); + + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return rc; + +rel_out: + ixgbe_write_phy_reg_mdi_22(hw, IXGBE_M88E1500_PAGE_ADDR, 0, 0); + hw->mac.ops.release_swfw_sync(hw, swfw_mask); + return rc; +} + +/** + * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register + * @hw: pointer to hardware structure + * + * Read NW_MNG_IF_SEL register and save field values, and check for valid field + * values. + **/ +STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) +{ + /* Save NW management interface connected on board. This is used + * to determine internal PHY mode. + */ + hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); + + /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set + * PHY address. This register field was has only been used for X552. + */ + if (hw->mac.type == ixgbe_mac_X550EM_a && + hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { + hw->phy.addr = (hw->phy.nw_mng_if_sel & + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> + IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; + } + + return IXGBE_SUCCESS; +} + +/** * ixgbe_init_phy_ops_X550em - PHY/SFP specific init * @hw: pointer to hardware structure * @@ -1688,14 +2174,11 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) hw->mac.ops.set_lan_id(hw); + ixgbe_read_mng_if_sel_x550em(hw); + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ixgbe_setup_mux_ctl(hw); - - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode. - */ - phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em; } @@ -1722,11 +2205,6 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.write_reg = ixgbe_write_phy_reg_x550em; break; case ixgbe_phy_x550em_ext_t: - /* Save NW management interface connected on board. This is used - * to determine internal PHY mode - */ - phy->nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); - /* If internal link mode is XFI, then setup iXFI internal link, * else setup KR now. */ @@ -1742,6 +2220,15 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; phy->ops.reset = ixgbe_reset_phy_t_X550em; break; + case ixgbe_phy_sgmii: + phy->ops.setup_link = NULL; + break; + case ixgbe_phy_m88: + phy->ops.setup_link = ixgbe_setup_m88; + phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22; + phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22; + phy->ops.reset = ixgbe_reset_phy_m88; + break; default: break; } @@ -1752,12 +2239,14 @@ s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) * ixgbe_set_mdio_speed - Set MDIO clock speed * @hw: pointer to hardware structure */ -static void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) +STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) { u32 hlreg0; switch (hw->device_id) { case IXGBE_DEV_ID_X550EM_X_10G_T: + case IXGBE_DEV_ID_X550EM_A_SGMII: + case IXGBE_DEV_ID_X550EM_A_SGMII_L: case IXGBE_DEV_ID_X550EM_A_1G_T: case IXGBE_DEV_ID_X550EM_A_1G_T_L: case IXGBE_DEV_ID_X550EM_A_10G_T: @@ -1933,10 +2422,13 @@ s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) * ixgbe_setup_kr_x550em - Configure the KR PHY. * @hw: pointer to hardware structure * - * Configures the integrated KR PHY. + * Configures the integrated KR PHY for X550EM_x. **/ s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) { + if (hw->mac.type != ixgbe_mac_X550EM_x) + return IXGBE_SUCCESS; + return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); } @@ -2019,6 +2511,99 @@ s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, } /** + * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP + * @hw: pointer to hardware structure + * + * Configure the the integrated PHY for SFP support. + **/ +s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) +{ + s32 ret_val; + u16 reg_phy_ext; + bool setup_linear = false; + u32 reg_slice, reg_phy_int, slice_offset; + + UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); + + /* Check if SFP module is supported and linear */ + ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); + + /* If no SFP module present, then return success. Return success since + * SFP not present error is not excepted in the setup MAC link flow. + */ + if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) + return IXGBE_SUCCESS; + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { + /* Configure internal PHY for native SFI */ + ret_val = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + if (setup_linear) { + reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LIMITING; + reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LINEAR; + } else { + reg_phy_int |= IXGBE_KRM_AN_CNTL_8_LIMITING; + reg_phy_int &= ~IXGBE_KRM_AN_CNTL_8_LINEAR; + } + + ret_val = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_8(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* Setup XFI/SFI internal link. */ + ret_val = ixgbe_setup_ixfi_x550em(hw, &speed); + } else { + /* Configure internal PHY for KR/KX. */ + ixgbe_setup_kr_speed_x550em(hw, speed); + + if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) { + /* Find Address */ + DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n"); + return IXGBE_ERR_PHY_ADDR_INVALID; + } + + /* Get external PHY device id */ + ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_GLOBAL_ID_MSB, + IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); + + if (ret_val != IXGBE_SUCCESS) + return ret_val; + + /* When configuring quad port CS4223, the MAC instance is part + * of the slice offset. + */ + if (reg_phy_ext == IXGBE_CS4223_PHY_ID) + slice_offset = (hw->bus.lan_id + + (hw->bus.instance_id << 1)) << 12; + else + slice_offset = hw->bus.lan_id << 12; + + /* Configure CS4227/CS4223 LINE side to proper mode. */ + reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; + if (setup_linear) + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; + else + reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; + ret_val = hw->phy.ops.write_reg(hw, reg_slice, + IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); + } + return ret_val; +} + +/** * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration * @hw: pointer to hardware structure * @@ -2261,57 +2846,57 @@ s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw) u32 reg_val; /* Disable AN and force speed to 10G Serial. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status != IXGBE_SUCCESS) return status; /* Set near-end loopback clocks. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B; reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status != IXGBE_SUCCESS) return status; /* Set loopback enable. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); if (status != IXGBE_SUCCESS) return status; /* Training bypass. */ - status = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (status != IXGBE_SUCCESS) return status; reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS; - status = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); return status; } @@ -3147,24 +3732,30 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw) goto out; } - if (hw->device_id == IXGBE_DEV_ID_X550EM_X_KR) { - ret_val = ixgbe_read_iosf_sb_reg_x550(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); + switch (hw->device_id) { + case IXGBE_DEV_ID_X550EM_X_KR: + case IXGBE_DEV_ID_X550EM_A_KR: + case IXGBE_DEV_ID_X550EM_A_KR_L: + ret_val = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); if (ret_val != IXGBE_SUCCESS) goto out; reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | - IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); if (pause) reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; if (asm_dir) reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; - ret_val = ixgbe_write_iosf_sb_reg_x550(hw, - IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), - IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); + ret_val = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); /* This device does not fully support AN. */ hw->fc.disable_fc_autoneg = true; + break; + default: + break; } out: @@ -3172,6 +3763,183 @@ out: } /** + * ixgbe_fc_autoneg_x550a - Enable flow control IEEE clause 37 + * @hw: pointer to hardware structure + * + * Enable flow control according to IEEE clause 37. + **/ +void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw) +{ + u32 link_s1, lp_an_page_low, an_cntl_1; + s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; + ixgbe_link_speed speed; + bool link_up; + + /* AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. + */ + if (hw->fc.disable_fc_autoneg) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "Flow control autoneg is disabled"); + goto out; + } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); + if (!link_up) { + ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; + } + + /* Check at auto-negotiation has completed */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_S1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); + + if (status != IXGBE_SUCCESS || + (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; + } + + /* Read the 10g AN autoc and LP ability registers and resolve + * local flow control settings accordingly + */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; + } + + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; + } + + status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, + IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, + IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, + IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); + +out: + if (status == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; + hw->fc.current_mode = hw->fc.requested_mode; + } +} + +/** + * ixgbe_setup_fc_x550em - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw) +{ + s32 status = IXGBE_SUCCESS; + u32 an_cntl, link_ctrl = 0; + + DEBUGFUNC("ixgbe_setup_fc_x550em"); + + /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { + ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, + "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + return IXGBE_ERR_INVALID_LINK_SETTINGS; + } + + if (hw->fc.requested_mode == ixgbe_fc_default) + hw->fc.requested_mode = ixgbe_fc_full; + + /* Set up the 1G and 10G flow control advertisement registers so the + * HW will be able to do FC autoneg once the cable is plugged in. If + * we link at 10G, the 1G advertisement is harmless and vice versa. + */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + return status; + } + + /* The possible values of fc.requested_mode are: + * 0: Flow control is completely disabled + * 1: Rx flow control is enabled (we can receive pause frames, + * but not send pause frames). + * 2: Tx flow control is enabled (we can send pause frames but + * we do not support receiving pause frames). + * 3: Both Rx and Tx flow control (symmetric) are enabled. + * other: Invalid. + */ + switch (hw->fc.requested_mode) { + case ixgbe_fc_none: + /* Flow control completely disabled by software override. */ + an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); + break; + case ixgbe_fc_tx_pause: + /* Tx Flow control is enabled, and Rx Flow control is + * disabled by software override. + */ + an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; + break; + case ixgbe_fc_rx_pause: + /* Rx Flow control is enabled and Tx Flow control is + * disabled by software override. Since there really + * isn't a way to advertise that we are capable of RX + * Pause ONLY, we will advertise that we support both + * symmetric and asymmetric Rx PAUSE, as such we fall + * through to the fc_full statement. Later, we will + * disable the adapter's ability to send PAUSE frames. + */ + case ixgbe_fc_full: + /* Flow control (both Rx and Tx) is enabled by SW override. */ + an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | + IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; + break; + default: + ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, + "Flow control param set incorrectly\n"); + return IXGBE_ERR_CONFIG; + } + + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); + + /* Restart auto-negotiation. */ + status = hw->mac.ops.read_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); + + if (status != IXGBE_SUCCESS) { + DEBUGOUT("Auto-Negotiation did not complete\n"); + return status; + } + + link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; + status = hw->mac.ops.write_iosf_sb_reg(hw, + IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), + IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); + + return status; +} + +/** * ixgbe_set_mux - Set mux for port 1 access with CS4227 * @hw: pointer to hardware structure * @state: set mux if 1, clear if 0 @@ -3238,7 +4006,7 @@ void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) * * Acquires the SWFW semaphore and get the shared phy token as needed */ -static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) +STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; int retries = FW_PHY_TOKEN_RETRIES; @@ -3247,17 +4015,22 @@ static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a"); while (--retries) { + status = IXGBE_SUCCESS; if (hmask) status = ixgbe_acquire_swfw_sync_X540(hw, hmask); if (status) - break; + return status; if (!(mask & IXGBE_GSSR_TOKEN_SM)) - break; + return IXGBE_SUCCESS; + status = ixgbe_get_phy_token(hw); - if (status != IXGBE_ERR_TOKEN_RETRY) - break; + if (status == IXGBE_SUCCESS) + return IXGBE_SUCCESS; + if (hmask) ixgbe_release_swfw_sync_X540(hw, hmask); + if (status != IXGBE_ERR_TOKEN_RETRY) + return status; msec_delay(FW_PHY_TOKEN_DELAY); } @@ -3271,7 +4044,7 @@ static s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) * * Releases the SWFW semaphore and puts the shared phy token as needed */ -static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) +STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) { u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; @@ -3285,6 +4058,63 @@ static void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) } /** + * ixgbe_read_phy_reg_x550a - Reads specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit address of PHY register to read + * @phy_data: Pointer to read data from PHY register + * + * Reads a value from a specified PHY register using the SWFW lock and PHY + * Token. The PHY Token is needed since the MDIO is shared between to MAC + * instances. + **/ +s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) +{ + s32 status; + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + + DEBUGFUNC("ixgbe_read_phy_reg_x550a"); + + if (hw->mac.ops.acquire_swfw_sync(hw, mask)) + return IXGBE_ERR_SWFW_SYNC; + + status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); + + hw->mac.ops.release_swfw_sync(hw, mask); + + return status; +} + +/** + * ixgbe_write_phy_reg_x550a - Writes specified PHY register + * @hw: pointer to hardware structure + * @reg_addr: 32 bit PHY register to write + * @device_type: 5 bit device type + * @phy_data: Data to write to the PHY register + * + * Writes a value to specified PHY register using the SWFW lock and PHY Token. + * The PHY Token is needed since the MDIO is shared between to MAC instances. + **/ +s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) +{ + s32 status; + u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; + + DEBUGFUNC("ixgbe_write_phy_reg_x550a"); + + if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) { + status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, mask); + } else { + status = IXGBE_ERR_SWFW_SYNC; + } + + return status; +} + +/** * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt * @hw: pointer to hardware structure * diff --git a/drivers/net/ixgbe/base/ixgbe_x550.h b/drivers/net/ixgbe/base/ixgbe_x550.h index a8c0a678..27d5d02f 100644 --- a/drivers/net/ixgbe/base/ixgbe_x550.h +++ b/drivers/net/ixgbe/base/ixgbe_x550.h @@ -36,6 +36,49 @@ POSSIBILITY OF SUCH DAMAGE. #include "ixgbe_type.h" +/* More phy definitions */ +#define IXGBE_M88E1500_COPPER_CTRL 0x0/* Page 0 reg */ +#define IXGBE_M88E1500_COPPER_CTRL_RESET 0x8000 +#define IXGBE_M88E1500_COPPER_CTRL_AN_EN 0x1000 +#define IXGBE_M88E1500_COPPER_CTRL_RESTART_AN 0x0200 +#define IXGBE_M88E1500_COPPER_CTRL_FULL_DUPLEX 0x0100 +#define IXGBE_M88E1500_COPPER_CTRL_SPEED_MSB 0x0040 +#define IXGBE_M88E1500_1000T_CTRL 0x09 /* 1000Base-T Ctrl Reg */ +/* 1=Configure PHY as Master 0=Configure PHY as Slave */ +#define IXGBE_M88E1500_1000T_CTRL_MS_VALUE 0x0800 +/* 1=Master/Slave manual config value 0=Automatic Master/Slave config */ +#define IXGBE_M88E1500_1000T_CTRL_MS_ENABLE 0x1000 +#define IXGBE_M88E1500_1000T_STATUS 0x0A /* 1000Base-T Status Reg */ +#define IXGBE_M88E1500_AUTO_COPPER_SGMII 0x2 +#define IXGBE_M88E1500_AUTO_COPPER_BASEX 0x3 +#define IXGBE_M88E1500_STATUS_LINK 0x0004 /* Interface Link Bit */ +#define IXGBE_M88E1500_MAC_CTRL_1 0x10 +#define IXGBE_M88E1500_MAC_CTRL_1_MODE_MASK 0x0380 /* Mode Select */ +#define IXGBE_M88E1500_CFG_REG_1 0x0010 +#define IXGBE_M88E1500_CFG_REG_2 0x0011 +#define IXGBE_M88E1500_CFG_REG_3 0x0007 +#define IXGBE_M88E1500_MODE 0x0014 +#define IXGBE_M88E1500_PAGE_ADDR 0x16/* Page Offset reg */ +#define IXGBE_M88E1500_FIBER_CTRL 0x0/* Page 1 reg */ +#define IXGBE_M88E1500_FIBER_CTRL_RESET 0x8000 +#define IXGBE_M88E1500_FIBER_CTRL_SPEED_LSB 0x2000 +#define IXGBE_M88E1500_FIBER_CTRL_POWER_DOWN 0x0800 +#define IXGBE_M88E1500_FIBER_CTRL_DUPLEX_FULL 0x0100 +#define IXGBE_M88E1500_FIBER_CTRL_SPEED_MSB 0x0040 +#define IXGBE_M88E1500_EEE_CTRL_1 0x0/* Page 18 reg */ +#define IXGBE_M88E1500_EEE_CTRL_1_MS 0x0001/* EEE Master/Slave */ +#define IXGBE_M88E1500_GEN_CTRL 0x14/* Page 18 reg */ +#define IXGBE_M88E1500_GEN_CTRL_RESET 0x8000 +#define IXGBE_M88E1500_GEN_CTRL_SGMII_COPPER 0x0001/* Mode bits 0-2 */ + +/* M88E1500 Specific Registers */ +#define IXGBE_M88E1500_PHY_SPEC_CTRL 0x10 /* PHY Specific Ctrl Reg */ +#define IXGBE_M88E1500_PHY_SPEC_STATUS 0x11 /* PHY Specific Stat Reg */ + +#define IXGBE_M88E1500_PSCR_DOWNSHIFT_ENABLE 0x0800 +#define IXGBE_M88E1500_PSCR_DOWNSHIFT_MASK 0x7000 +#define IXGBE_M88E1500_PSCR_DOWNSHIFT_6X 0x5000 + s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw); s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw); s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw); @@ -100,6 +143,15 @@ s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw); s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, ixgbe_link_speed speed, bool autoneg_wait_to_complete); +s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); +s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +s32 ixgbe_setup_fc_x550a(struct ixgbe_hw *hw); +void ixgbe_fc_autoneg_x550a(struct ixgbe_hw *hw); s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw); s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed speed, diff --git a/drivers/net/ixgbe/ixgbe_82599_bypass.c b/drivers/net/ixgbe/ixgbe_82599_bypass.c index 21c42eac..de9fa5a7 100644 --- a/drivers/net/ixgbe/ixgbe_82599_bypass.c +++ b/drivers/net/ixgbe/ixgbe_82599_bypass.c @@ -297,8 +297,8 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw) { int rc; - if ((rc = ixgbe_init_hw(hw)) == 0 && - hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { + rc = ixgbe_init_hw(hw); + if (rc == 0 && hw->device_id == IXGBE_DEV_ID_82599_BYPASS) { hw->mac.ops.setup_link = &ixgbe_setup_mac_link_multispeed_fixed_fiber; @@ -306,8 +306,8 @@ ixgbe_bypass_init_hw(struct ixgbe_hw *hw) hw->mac.ops.get_media_type = &ixgbe_bypass_get_media_type; hw->mac.ops.disable_tx_laser = NULL; - hw->mac.ops.enable_tx_laser = NULL; - hw->mac.ops.flap_tx_laser = NULL; + hw->mac.ops.enable_tx_laser = NULL; + hw->mac.ops.flap_tx_laser = NULL; } return rc; diff --git a/drivers/net/ixgbe/ixgbe_bypass.c b/drivers/net/ixgbe/ixgbe_bypass.c index 73f608b9..70069284 100644 --- a/drivers/net/ixgbe/ixgbe_bypass.c +++ b/drivers/net/ixgbe/ixgbe_bypass.c @@ -82,7 +82,7 @@ ixgbe_bypass_set_time(struct ixgbe_adapter *adapter) BYPASS_CTL1_VALID_M | BYPASS_CTL1_OFFTRST_M; value = (sec & BYPASS_CTL1_TIME_M) | - BYPASS_CTL1_VALID | + BYPASS_CTL1_VALID | BYPASS_CTL1_OFFTRST; FUNC_PTR_OR_RET(adapter->bps.ops.bypass_set); @@ -275,8 +275,8 @@ s32 ixgbe_bypass_wd_timeout_store(struct rte_eth_dev *dev, u32 timeout) { struct ixgbe_hw *hw; - u32 status; - u32 mask; + u32 status; + u32 mask; s32 ret_val; struct ixgbe_adapter *adapter = IXGBE_DEV_TO_ADPATER(dev); diff --git a/drivers/net/ixgbe/ixgbe_bypass.h b/drivers/net/ixgbe/ixgbe_bypass.h index fcd97743..5f5c63e3 100644 --- a/drivers/net/ixgbe/ixgbe_bypass.h +++ b/drivers/net/ixgbe/ixgbe_bypass.h @@ -37,10 +37,10 @@ #ifdef RTE_NIC_BYPASS struct ixgbe_bypass_mac_ops { - s32 (*bypass_rw) (struct ixgbe_hw *hw, u32 cmd, u32 *status); - bool (*bypass_valid_rd) (u32 in_reg, u32 out_reg); - s32 (*bypass_set) (struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action); - s32 (*bypass_rd_eep) (struct ixgbe_hw *hw, u32 addr, u8 *value); + s32 (*bypass_rw)(struct ixgbe_hw *hw, u32 cmd, u32 *status); + bool (*bypass_valid_rd)(u32 in_reg, u32 out_reg); + s32 (*bypass_set)(struct ixgbe_hw *hw, u32 cmd, u32 event, u32 action); + s32 (*bypass_rd_eep)(struct ixgbe_hw *hw, u32 addr, u8 *value); }; struct ixgbe_bypass_info { diff --git a/drivers/net/ixgbe/ixgbe_bypass_defines.h b/drivers/net/ixgbe/ixgbe_bypass_defines.h index 22570acf..cafcb278 100644 --- a/drivers/net/ixgbe/ixgbe_bypass_defines.h +++ b/drivers/net/ixgbe/ixgbe_bypass_defines.h @@ -136,7 +136,7 @@ enum ixgbe_state_t { #define BYPASS_LOG_EVENT_SHIFT 28 #define BYPASS_LOG_CLEAR_SHIFT 24 /* bit offset */ #define IXGBE_DEV_TO_ADPATER(dev) \ - ((struct ixgbe_adapter*)(dev->data->dev_private)) + ((struct ixgbe_adapter *)(dev->data->dev_private)) /* extractions from ixgbe_phy.h */ #define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 diff --git a/drivers/net/ixgbe/ixgbe_ethdev.c b/drivers/net/ixgbe/ixgbe_ethdev.c index 3f1ebc15..0629b426 100644 --- a/drivers/net/ixgbe/ixgbe_ethdev.c +++ b/drivers/net/ixgbe/ixgbe_ethdev.c @@ -150,6 +150,7 @@ #define IXGBE_VMVIR_TAGA_ETAG_INSERT 0x08000000 #define IXGBE_VMTIR(_i) (0x00017000 + ((_i) * 4)) /* 64 of these (0-63) */ #define IXGBE_QDE_STRIP_TAG 0x00000004 +#define IXGBE_VTEICR_MASK 0x07 enum ixgbevf_xcast_modes { IXGBEVF_XCAST_MODE_NONE = 0, @@ -157,6 +158,9 @@ enum ixgbevf_xcast_modes { IXGBEVF_XCAST_MODE_ALLMULTI, }; +#define IXGBE_EXVET_VET_EXT_SHIFT 16 +#define IXGBE_DMATXCTL_VT_MASK 0xFFFF0000 + static int eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev); static int eth_ixgbe_dev_uninit(struct rte_eth_dev *eth_dev); static int ixgbe_dev_configure(struct rte_eth_dev *dev); @@ -174,11 +178,15 @@ static int ixgbe_dev_link_update(struct rte_eth_dev *dev, static void ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int ixgbe_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); static int ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); static void ixgbe_dev_stats_reset(struct rte_eth_dev *dev); static void ixgbe_dev_xstats_reset(struct rte_eth_dev *dev); +static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); +static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit); static int ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, uint16_t queue_id, uint8_t stat_idx, @@ -232,7 +240,7 @@ static void ixgbe_add_rar(struct rte_eth_dev *dev, struct ether_addr *mac_addr, static void ixgbe_remove_rar(struct rte_eth_dev *dev, uint32_t index); static void ixgbe_set_default_mac_addr(struct rte_eth_dev *dev, struct ether_addr *mac_addr); -static void ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config); +static void ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config); /* For Virtual Function support */ static int eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev); @@ -264,14 +272,14 @@ static void ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev); /* For Eth VMDQ APIs support */ static int ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct - ether_addr* mac_addr,uint8_t on); -static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev,uint8_t on); + ether_addr * mac_addr, uint8_t on); +static int ixgbe_uc_all_hash_table_set(struct rte_eth_dev *dev, uint8_t on); static int ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, uint16_t rx_mask, uint8_t on); -static int ixgbe_set_pool_rx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); -static int ixgbe_set_pool_tx(struct rte_eth_dev *dev,uint16_t pool,uint8_t on); +static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on); +static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on); static int ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, - uint64_t pool_mask,uint8_t vlan_on); + uint64_t pool_mask, uint8_t vlan_on); static int ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on); @@ -361,6 +369,8 @@ static int ixgbe_timesync_read_time(struct rte_eth_dev *dev, struct timespec *timestamp); static int ixgbe_timesync_write_time(struct rte_eth_dev *dev, const struct timespec *timestamp); +static void ixgbevf_dev_interrupt_handler(struct rte_intr_handle *handle, + void *param); static int ixgbe_dev_l2_tunnel_eth_type_conf (struct rte_eth_dev *dev, struct rte_eth_l2_tunnel_conf *l2_tunnel); @@ -397,21 +407,21 @@ static int ixgbe_dev_udp_tunnel_port_del(struct rte_eth_dev *dev, last = latest; \ } -#define IXGBE_SET_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_SET_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] |= 1 << bit;\ } while (0) -#define IXGBE_CLEAR_HWSTRIP(h, q) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_CLEAR_HWSTRIP(h, q) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (h)->bitmap[idx] &= ~(1 << bit);\ } while (0) -#define IXGBE_GET_HWSTRIP(h, q, r) do{\ - uint32_t idx = (q) / (sizeof ((h)->bitmap[0]) * NBBY); \ - uint32_t bit = (q) % (sizeof ((h)->bitmap[0]) * NBBY); \ +#define IXGBE_GET_HWSTRIP(h, q, r) do {\ + uint32_t idx = (q) / (sizeof((h)->bitmap[0]) * NBBY); \ + uint32_t bit = (q) % (sizeof((h)->bitmap[0]) * NBBY); \ (r) = (h)->bitmap[idx] >> bit & 1;\ } while (0) @@ -466,6 +476,7 @@ static const struct eth_dev_ops ixgbe_eth_dev_ops = { .xstats_get = ixgbe_dev_xstats_get, .stats_reset = ixgbe_dev_stats_reset, .xstats_reset = ixgbe_dev_xstats_reset, + .xstats_get_names = ixgbe_dev_xstats_get_names, .queue_stats_mapping_set = ixgbe_dev_queue_stats_mapping_set, .dev_infos_get = ixgbe_dev_info_get, .dev_supported_ptypes_get = ixgbe_dev_supported_ptypes_get, @@ -555,6 +566,7 @@ static const struct eth_dev_ops ixgbevf_eth_dev_ops = { .xstats_get = ixgbevf_dev_xstats_get, .stats_reset = ixgbevf_dev_stats_reset, .xstats_reset = ixgbevf_dev_stats_reset, + .xstats_get_names = ixgbevf_dev_xstats_get_names, .dev_close = ixgbevf_dev_close, .allmulticast_enable = ixgbevf_dev_allmulticast_enable, .allmulticast_disable = ixgbevf_dev_allmulticast_disable, @@ -685,6 +697,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_rxq_strings[] = { #define IXGBE_NB_RXQ_PRIO_STATS (sizeof(rte_ixgbe_rxq_strings) / \ sizeof(rte_ixgbe_rxq_strings[0])) +#define IXGBE_NB_RXQ_PRIO_VALUES 8 static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { {"xon_packets", offsetof(struct ixgbe_hw_stats, pxontxc)}, @@ -695,6 +708,7 @@ static const struct rte_ixgbe_xstats_name_off rte_ixgbe_txq_strings[] = { #define IXGBE_NB_TXQ_PRIO_STATS (sizeof(rte_ixgbe_txq_strings) / \ sizeof(rte_ixgbe_txq_strings[0])) +#define IXGBE_NB_TXQ_PRIO_VALUES 8 static const struct rte_ixgbe_xstats_name_off rte_ixgbevf_stats_strings[] = { {"rx_multicast_packets", offsetof(struct ixgbevf_hw_stats, vfmprc)}, @@ -901,8 +915,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, PMD_INIT_LOG(DEBUG, "Write 0x%x to RX IXGBE stat mapping reg:%d", stat_mappings->rqsmr[n], n); IXGBE_WRITE_REG(hw, IXGBE_RQSMR(n), stat_mappings->rqsmr[n]); - } - else { + } else { PMD_INIT_LOG(DEBUG, "Write 0x%x to TX IXGBE stat mapping reg:%d", stat_mappings->tqsm[n], n); IXGBE_WRITE_REG(hw, IXGBE_TQSM(n), stat_mappings->tqsm[n]); @@ -911,7 +924,7 @@ ixgbe_dev_queue_stats_mapping_set(struct rte_eth_dev *eth_dev, } static void -ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) +ixgbe_restore_statistics_mapping(struct rte_eth_dev *dev) { struct ixgbe_stat_mapping_registers *stat_mappings = IXGBE_DEV_PRIVATE_TO_STAT_MAPPINGS(dev->data->dev_private); @@ -929,7 +942,7 @@ ixgbe_restore_statistics_mapping(struct rte_eth_dev * dev) } static void -ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) +ixgbe_dcb_init(struct ixgbe_hw *hw, struct ixgbe_dcb_config *dcb_config) { uint8_t i; struct ixgbe_dcb_tc_config *tc; @@ -952,7 +965,7 @@ ixgbe_dcb_init(struct ixgbe_hw *hw,struct ixgbe_dcb_config *dcb_config) tc = &dcb_config->tc_config[0]; tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; - for (i = 0; i< IXGBE_DCB_MAX_BW_GROUP; i++) { + for (i = 0; i < IXGBE_DCB_MAX_BW_GROUP; i++) { dcb_config->bw_percentage[IXGBE_DCB_TX_CONFIG][i] = 100; dcb_config->bw_percentage[IXGBE_DCB_RX_CONFIG][i] = 100; } @@ -1016,7 +1029,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -1039,17 +1052,18 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) * has already done this work. Only check we don't need a different * RX and TX function. */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { struct ixgbe_tx_queue *txq; /* TX queue function in primary, set by last queue initialized - * Tx queue may not initialized by primary process */ + * Tx queue may not initialized by primary process + */ if (eth_dev->data->tx_queues) { txq = eth_dev->data->tx_queues[eth_dev->data->nb_tx_queues-1]; ixgbe_set_tx_function(eth_dev, txq); } else { /* Use default TX function if we get here */ PMD_INIT_LOG(NOTICE, "No TX queues configured yet. " - "Using default TX function."); + "Using default TX function."); } ixgbe_set_rx_function(eth_dev); @@ -1086,7 +1100,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Initialize DCB configuration*/ memset(dcb_config, 0, sizeof(struct ixgbe_dcb_config)); - ixgbe_dcb_init(hw,dcb_config); + ixgbe_dcb_init(hw, dcb_config); /* Get Hardware Flow Control setting */ hw->fc.requested_mode = ixgbe_fc_full; hw->fc.current_mode = ixgbe_fc_full; @@ -1127,11 +1141,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) if (diag == IXGBE_ERR_EEPROM_VERSION) { PMD_INIT_LOG(ERR, "This device is a pre-production adapter/" - "LOM. Please be aware there may be issues associated " - "with your hardware."); + "LOM. Please be aware there may be issues associated " + "with your hardware."); PMD_INIT_LOG(ERR, "If you are experiencing problems " - "please contact your Intel or hardware representative " - "who provided you with this hardware."); + "please contact your Intel or hardware representative " + "who provided you with this hardware."); } else if (diag == IXGBE_ERR_SFP_NOT_SUPPORTED) PMD_INIT_LOG(ERR, "Unsupported SFP+ Module"); if (diag) { @@ -1150,12 +1164,12 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - hw->mac.num_rar_entries, 0); + hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %u bytes needed to store " - "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + "Failed to allocate %u bytes needed to store " + "MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } /* Copy the permanent MAC address */ @@ -1164,11 +1178,11 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing hash filter MAC addresses */ eth_dev->data->hash_mac_addrs = rte_zmalloc("ixgbe", ETHER_ADDR_LEN * - IXGBE_VMDQ_NUM_UC_MAC, 0); + IXGBE_VMDQ_NUM_UC_MAC, 0); if (eth_dev->data->hash_mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %d bytes needed to store MAC addresses", - ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); + "Failed to allocate %d bytes needed to store MAC addresses", + ETHER_ADDR_LEN * IXGBE_VMDQ_NUM_UC_MAC); return -ENOMEM; } @@ -1198,8 +1212,8 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) (int) hw->mac.type, (int) hw->phy.type); PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", - eth_dev->data->port_id, pci_dev->id.vendor_id, - pci_dev->id.device_id); + eth_dev->data->port_id, pci_dev->id.vendor_id, + pci_dev->id.device_id); rte_intr_callback_register(&pci_dev->intr_handle, ixgbe_dev_interrupt_handler, @@ -1214,7 +1228,7 @@ eth_ixgbe_dev_init(struct rte_eth_dev *eth_dev) /* initialize 5tuple filter list */ TAILQ_INIT(&filter_info->fivetuple_list); memset(filter_info->fivetuple_mask, 0, - sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); + sizeof(uint32_t) * IXGBE_5TUPLE_ARRAY_SIZE); return 0; } @@ -1313,7 +1327,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) struct rte_pci_device *pci_dev; struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(eth_dev->data->dev_private); struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(eth_dev->data->dev_private); @@ -1327,8 +1341,9 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* for secondary processes, we don't initialise any further as primary * has already done this work. Only check we don't need a different - * RX function */ - if (rte_eal_process_type() != RTE_PROC_PRIMARY){ + * RX function + */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { struct ixgbe_tx_queue *txq; /* TX queue function in primary, set by last queue initialized * Tx queue may not initialized by primary process @@ -1339,7 +1354,7 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) } else { /* Use default TX function if we get here */ PMD_INIT_LOG(NOTICE, - "No TX queues configured yet. Using default TX function."); + "No TX queues configured yet. Using default TX function."); } ixgbe_set_rx_function(eth_dev); @@ -1398,12 +1413,12 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* Allocate memory for storing MAC addresses */ eth_dev->data->mac_addrs = rte_zmalloc("ixgbevf", ETHER_ADDR_LEN * - hw->mac.num_rar_entries, 0); + hw->mac.num_rar_entries, 0); if (eth_dev->data->mac_addrs == NULL) { PMD_INIT_LOG(ERR, - "Failed to allocate %u bytes needed to store " - "MAC addresses", - ETHER_ADDR_LEN * hw->mac.num_rar_entries); + "Failed to allocate %u bytes needed to store " + "MAC addresses", + ETHER_ADDR_LEN * hw->mac.num_rar_entries); return -ENOMEM; } @@ -1433,14 +1448,20 @@ eth_ixgbevf_dev_init(struct rte_eth_dev *eth_dev) /* reset the hardware with the new settings */ diag = hw->mac.ops.start_hw(hw); switch (diag) { - case 0: - break; + case 0: + break; - default: - PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); - return -EIO; + default: + PMD_INIT_LOG(ERR, "VF Initialization Failure: %d", diag); + return -EIO; } + rte_intr_callback_register(&pci_dev->intr_handle, + ixgbevf_dev_interrupt_handler, + (void *)eth_dev); + rte_intr_enable(&pci_dev->intr_handle); + ixgbevf_intr_enable(hw); + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x mac.type=%s", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id, "ixgbe_mac_82599_vf"); @@ -1454,6 +1475,7 @@ static int eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) { struct ixgbe_hw *hw; + struct rte_pci_device *pci_dev = eth_dev->pci_dev; PMD_INIT_FUNC_TRACE(); @@ -1475,6 +1497,11 @@ eth_ixgbevf_dev_uninit(struct rte_eth_dev *eth_dev) rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; + rte_intr_disable(&pci_dev->intr_handle); + rte_intr_callback_unregister(&pci_dev->intr_handle, + ixgbevf_dev_interrupt_handler, + (void *)eth_dev); + return 0; } @@ -1537,7 +1564,7 @@ ixgbe_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vfta; uint32_t vid_idx; @@ -1575,15 +1602,47 @@ ixgbe_vlan_tpid_set(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int ret = 0; + uint32_t reg; + uint32_t qinq; + + qinq = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + qinq &= IXGBE_DMATXCTL_GDV; switch (vlan_type) { case ETH_VLAN_TYPE_INNER: - /* Only the high 16-bits is valid */ - IXGBE_WRITE_REG(hw, IXGBE_EXVET, tpid << 16); + if (qinq) { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } else { + ret = -ENOTSUP; + PMD_DRV_LOG(ERR, "Inner type is not supported" + " by single VLAN"); + } + break; + case ETH_VLAN_TYPE_OUTER: + if (qinq) { + /* Only the high 16-bits is valid */ + IXGBE_WRITE_REG(hw, IXGBE_EXVET, (uint32_t)tpid << + IXGBE_EXVET_VET_EXT_SHIFT); + } else { + reg = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + reg = (reg & (~IXGBE_VLNCTRL_VET)) | (uint32_t)tpid; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, reg); + reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg = (reg & (~IXGBE_DMATXCTL_VT_MASK)) + | ((uint32_t)tpid << IXGBE_DMATXCTL_VT_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); + } + break; default: ret = -EINVAL; - PMD_DRV_LOG(ERR, "Unsupported vlan type %d\n", vlan_type); + PMD_DRV_LOG(ERR, "Unsupported VLAN type %d", vlan_type); break; } @@ -1611,7 +1670,7 @@ ixgbe_vlan_hw_filter_enable(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vlnctrl; uint16_t i; @@ -1635,6 +1694,7 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) { struct ixgbe_hwstrip *hwstrip = IXGBE_DEV_PRIVATE_TO_HWSTRIP_BITMAP(dev->data->dev_private); + struct ixgbe_rx_queue *rxq; if (queue >= IXGBE_MAX_RX_QUEUE_NUM) return; @@ -1643,6 +1703,16 @@ ixgbe_vlan_hw_strip_bitmap_set(struct rte_eth_dev *dev, uint16_t queue, bool on) IXGBE_SET_HWSTRIP(hwstrip, queue); else IXGBE_CLEAR_HWSTRIP(hwstrip, queue); + + if (queue >= dev->data->nb_rx_queues) + return; + + rxq = dev->data->rx_queues[queue]; + + if (on) + rxq->vlan_flags = PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; + else + rxq->vlan_flags = PKT_RX_VLAN_PKT; } static void @@ -1659,12 +1729,12 @@ ixgbe_vlan_hw_strip_disable(struct rte_eth_dev *dev, uint16_t queue) PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - ctrl &= ~IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl &= ~IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 0); } @@ -1683,12 +1753,12 @@ ixgbe_vlan_hw_strip_enable(struct rte_eth_dev *dev, uint16_t queue) PMD_INIT_LOG(NOTICE, "82598EB not support queue level hw strip"); return; } - else { - /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ - ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); - ctrl |= IXGBE_RXDCTL_VME; - IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - } + + /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ + ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(queue)); + ctrl |= IXGBE_RXDCTL_VME; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); + /* record those setting for HW strip per queue */ ixgbe_vlan_hw_strip_bitmap_set(dev, queue, 1); } @@ -1707,8 +1777,7 @@ ixgbe_vlan_hw_strip_disable_all(struct rte_eth_dev *dev) ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ctrl &= ~IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ for (i = 0; i < dev->data->nb_rx_queues; i++) { ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); @@ -1735,8 +1804,7 @@ ixgbe_vlan_hw_strip_enable_all(struct rte_eth_dev *dev) ctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ctrl |= IXGBE_VLNCTRL_VME; IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, ctrl); - } - else { + } else { /* Other 10G NIC, the VLAN strip can be setup per queue in RXDCTL */ for (i = 0; i < dev->data->nb_rx_queues; i++) { ctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); @@ -1836,6 +1904,7 @@ ixgbe_vmdq_vlan_hw_filter_enable(struct rte_eth_dev *dev) IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* VLNCTRL: enable vlan filtering and allow all vlan tags through */ uint32_t vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); } @@ -2116,7 +2185,8 @@ ixgbe_dev_start(struct rte_eth_dev *dev) ixgbe_stop_adapter(hw); /* reinitialize adapter - * this calls reset and start */ + * this calls reset and start + */ status = ixgbe_pf_reset_hw(hw); if (status != 0) return -1; @@ -2251,7 +2321,7 @@ skip_link_setup: /* resume enabled intr since hw reset */ ixgbe_enable_intr(dev); - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; ixgbe_vlan_offload_set(dev, mask); @@ -2471,8 +2541,8 @@ ixgbe_read_stats_registers(struct ixgbe_hw *hw, hw_stats->mspdc += IXGBE_READ_REG(hw, IXGBE_MSPDC); for (i = 0; i < 8; i++) { - uint32_t mp; - mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + uint32_t mp = IXGBE_READ_REG(hw, IXGBE_MPC(i)); + /* global total per queue */ hw_stats->mpc[i] += mp; /* Running comprehensive total for stats display */ @@ -2664,15 +2734,15 @@ ixgbe_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) /* Rx Errors */ stats->imissed = total_missed_rx; stats->ierrors = hw_stats->crcerrs + - hw_stats->mspdc + - hw_stats->rlec + - hw_stats->ruc + - hw_stats->roc + - hw_stats->illerrc + - hw_stats->errbc + - hw_stats->rfc + - hw_stats->fccrc + - hw_stats->fclast; + hw_stats->mspdc + + hw_stats->rlec + + hw_stats->ruc + + hw_stats->roc + + hw_stats->illerrc + + hw_stats->errbc + + hw_stats->rfc + + hw_stats->fccrc + + hw_stats->fclast; /* Tx Errors */ stats->oerrors = 0; @@ -2694,12 +2764,76 @@ ixgbe_dev_stats_reset(struct rte_eth_dev *dev) /* This function calculates the number of xstats based on the current config */ static unsigned ixgbe_xstats_calc_num(void) { - return IXGBE_NB_HW_STATS + (IXGBE_NB_RXQ_PRIO_STATS * 8) + - (IXGBE_NB_TXQ_PRIO_STATS * 8); + return IXGBE_NB_HW_STATS + + (IXGBE_NB_RXQ_PRIO_STATS * IXGBE_NB_RXQ_PRIO_VALUES) + + (IXGBE_NB_TXQ_PRIO_STATS * IXGBE_NB_TXQ_PRIO_VALUES); +} + +static int ixgbe_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, __rte_unused unsigned limit) +{ + const unsigned cnt_stats = ixgbe_xstats_calc_num(); + unsigned stat, i, count; + + if (xstats_names != NULL) { + count = 0; + + /* Note: limit >= cnt_stats checked upstream + * in rte_eth_xstats_names() + */ + + /* Extended stats from ixgbe_hw_stats */ + for (i = 0; i < IXGBE_NB_HW_STATS; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "%s", + rte_ixgbe_stats_strings[i].name); + count++; + } + + /* RX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_priority%u_%s", i, + rte_ixgbe_rxq_strings[stat].name); + count++; + } + } + + /* TX Priority Stats */ + for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_priority%u_%s", i, + rte_ixgbe_txq_strings[stat].name); + count++; + } + } + } + return cnt_stats; +} + +static int ixgbevf_dev_xstats_get_names(__rte_unused struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, unsigned limit) +{ + unsigned i; + + if (limit < IXGBEVF_NB_XSTATS && xstats_names != NULL) + return -ENOMEM; + + if (xstats_names != NULL) + for (i = 0; i < IXGBEVF_NB_XSTATS; i++) + snprintf(xstats_names[i].name, + sizeof(xstats_names[i].name), + "%s", rte_ixgbevf_stats_strings[i].name); + return IXGBEVF_NB_XSTATS; } static int -ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct ixgbe_hw *hw = @@ -2731,8 +2865,6 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Extended stats from ixgbe_hw_stats */ count = 0; for (i = 0; i < IXGBE_NB_HW_STATS; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), "%s", - rte_ixgbe_stats_strings[i].name); xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_stats_strings[i].offset); count++; @@ -2740,10 +2872,7 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* RX Priority Stats */ for (stat = 0; stat < IXGBE_NB_RXQ_PRIO_STATS; stat++) { - for (i = 0; i < 8; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "rx_priority%u_%s", i, - rte_ixgbe_rxq_strings[stat].name); + for (i = 0; i < IXGBE_NB_RXQ_PRIO_VALUES; i++) { xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_rxq_strings[stat].offset + (sizeof(uint64_t) * i)); @@ -2753,17 +2882,13 @@ ixgbe_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* TX Priority Stats */ for (stat = 0; stat < IXGBE_NB_TXQ_PRIO_STATS; stat++) { - for (i = 0; i < 8; i++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "tx_priority%u_%s", i, - rte_ixgbe_txq_strings[stat].name); + for (i = 0; i < IXGBE_NB_TXQ_PRIO_VALUES; i++) { xstats[count].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbe_txq_strings[stat].offset + (sizeof(uint64_t) * i)); count++; } } - return count; } @@ -2786,7 +2911,7 @@ static void ixgbevf_update_stats(struct rte_eth_dev *dev) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Good Rx packet, include VF loopback */ @@ -2811,7 +2936,7 @@ ixgbevf_update_stats(struct rte_eth_dev *dev) } static int -ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) @@ -2828,8 +2953,6 @@ ixgbevf_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, /* Extended stats */ for (i = 0; i < IXGBEVF_NB_XSTATS; i++) { - snprintf(xstats[i].name, sizeof(xstats[i].name), - "%s", rte_ixgbevf_stats_strings[i].name); xstats[i].value = *(uint64_t *)(((char *)hw_stats) + rte_ixgbevf_stats_strings[i].offset); } @@ -2852,14 +2975,12 @@ ixgbevf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->ibytes = hw_stats->vfgorc; stats->opackets = hw_stats->vfgptc; stats->obytes = hw_stats->vfgotc; - stats->imcasts = hw_stats->vfmprc; - /* stats->imcasts should be removed as imcasts is deprecated */ } static void ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) { - struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats*) + struct ixgbevf_hw_stats *hw_stats = (struct ixgbevf_hw_stats *) IXGBE_DEV_PRIVATE_TO_STATS(dev->data->dev_private); /* Sync HW register to the last stats */ @@ -2870,8 +2991,6 @@ ixgbevf_dev_stats_reset(struct rte_eth_dev *dev) hw_stats->vfgorc = 0; hw_stats->vfgptc = 0; hw_stats->vfgotc = 0; - hw_stats->vfmprc = 0; - } static void @@ -3356,7 +3475,7 @@ ixgbe_dev_interrupt_action(struct rte_eth_dev *dev) if (intr_enable_delay) { if (rte_eal_alarm_set(timeout * 1000, - ixgbe_dev_interrupt_delayed_handler, (void*)dev) < 0) + ixgbe_dev_interrupt_delayed_handler, (void *)dev) < 0) PMD_DRV_LOG(ERR, "Error setting alarm"); } else { PMD_DRV_LOG(DEBUG, "enable intr immediately"); @@ -3575,7 +3694,7 @@ ixgbe_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) * Enable flow control according to the current settings. */ static int -ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) +ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw, uint8_t tc_num) { int ret_val = 0; uint32_t mflcn_reg, fccfg_reg; @@ -3622,13 +3741,13 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_rx_pause: /* @@ -3645,20 +3764,20 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) * and the TX pause can not be disabled */ nb_rx_en = 0; - for (i =0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { reg = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); if (reg & IXGBE_FCRTH_FCEN) nb_rx_en++; } if (nb_rx_en > 1) - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_tx_pause: /* * Tx Flow control is enabled, and Rx Flow control is * disabled by software override. */ - fccfg_reg |=IXGBE_FCCFG_TFCE_PRIORITY; + fccfg_reg |= IXGBE_FCCFG_TFCE_PRIORITY; break; case ixgbe_fc_full: /* Flow control (both Rx and Tx) is enabled by SW override. */ @@ -3669,7 +3788,6 @@ ixgbe_dcb_pfc_enable_generic(struct ixgbe_hw *hw,uint8_t tc_num) PMD_DRV_LOG(DEBUG, "Flow control param set incorrectly"); ret_val = IXGBE_ERR_CONFIG; goto out; - break; } /* Set 802.3x based flow control settings. */ @@ -3708,13 +3826,13 @@ out: } static int -ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev,uint8_t tc_num) +ixgbe_dcb_pfc_enable(struct rte_eth_dev *dev, uint8_t tc_num) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); int32_t ret_val = IXGBE_NOT_IMPLEMENTED; if (hw->mac.type != ixgbe_mac_82598EB) { - ret_val = ixgbe_dcb_pfc_enable_generic(hw,tc_num); + ret_val = ixgbe_dcb_pfc_enable_generic(hw, tc_num); } return ret_val; } @@ -3728,9 +3846,9 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p uint8_t tc_num; uint8_t map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; struct ixgbe_hw *hw = - IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_dcb_config *dcb_config = - IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_DCB_CFG(dev->data->dev_private); enum ixgbe_fc_mode rte_fcmode_2_ixgbe_fcmode[] = { ixgbe_fc_none, @@ -3763,7 +3881,7 @@ ixgbe_priority_flow_ctrl_set(struct rte_eth_dev *dev, struct rte_eth_pfc_conf *p hw->fc.low_water[tc_num] = pfc_conf->fc.low_water; hw->fc.high_water[tc_num] = pfc_conf->fc.high_water; - err = ixgbe_dcb_pfc_enable(dev,tc_num); + err = ixgbe_dcb_pfc_enable(dev, tc_num); /* Not negotiated is not an error case */ if ((err == IXGBE_SUCCESS) || (err == IXGBE_ERR_FC_NOT_NEGOTIATED)) @@ -3911,7 +4029,8 @@ ixgbe_dev_mtu_set(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ + * feature has not been enabled before. + */ if (!dev->data->scattered_rx && (frame_size + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) @@ -3971,7 +4090,7 @@ ixgbevf_intr_enable(struct ixgbe_hw *hw) static int ixgbevf_dev_configure(struct rte_eth_dev *dev) { - struct rte_eth_conf* conf = &dev->data->dev_conf; + struct rte_eth_conf *conf = &dev->data->dev_conf; struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)dev->data->dev_private; @@ -4033,10 +4152,10 @@ ixgbevf_dev_start(struct rte_eth_dev *dev) } /* Set vfta */ - ixgbevf_set_vfta_all(dev,1); + ixgbevf_set_vfta_all(dev, 1); /* Set HW strip */ - mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | \ + mask = ETH_VLAN_STRIP_MASK | ETH_VLAN_FILTER_MASK | ETH_VLAN_EXTEND_MASK; ixgbevf_vlan_offload_set(dev, mask); @@ -4077,6 +4196,8 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) PMD_INIT_FUNC_TRACE(); + ixgbevf_intr_disable(hw); + hw->adapter_stopped = 1; ixgbe_stop_adapter(hw); @@ -4084,7 +4205,7 @@ ixgbevf_dev_stop(struct rte_eth_dev *dev) * Clear what we set, but we still keep shadow_vfta to * restore after device starts */ - ixgbevf_set_vfta_all(dev,0); + ixgbevf_set_vfta_all(dev, 0); /* Clear stored conf */ dev->data->scattered_rx = 0; @@ -4123,18 +4244,19 @@ ixgbevf_dev_close(struct rte_eth_dev *dev) static void ixgbevf_set_vfta_all(struct rte_eth_dev *dev, bool on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); int i = 0, j = 0, vfta = 0, mask = 1; - for (i = 0; i < IXGBE_VFTA_SIZE; i++){ + for (i = 0; i < IXGBE_VFTA_SIZE; i++) { vfta = shadow_vfta->vfta[i]; if (vfta) { mask = 1; - for (j = 0; j < 32; j++){ + for (j = 0; j < 32; j++) { if (vfta & mask) - ixgbe_set_vfta(hw, (i<<5)+j, 0, on); - mask<<=1; + ixgbe_set_vfta(hw, (i<<5)+j, 0, + on, false); + mask <<= 1; } } } @@ -4146,7 +4268,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - struct ixgbe_vfta * shadow_vfta = + struct ixgbe_vfta *shadow_vfta = IXGBE_DEV_PRIVATE_TO_VFTA(dev->data->dev_private); uint32_t vid_idx = 0; uint32_t vid_bit = 0; @@ -4155,7 +4277,7 @@ ixgbevf_vlan_filter_set(struct rte_eth_dev *dev, uint16_t vlan_id, int on) PMD_INIT_FUNC_TRACE(); /* vind is not used in VF driver, set to 0, check ixgbe_set_vfta_vf */ - ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on); + ret = ixgbe_set_vfta(hw, vlan_id, 0, !!on, false); if (ret) { PMD_INIT_LOG(ERR, "Unable to set VF vlan"); return ret; @@ -4191,7 +4313,7 @@ ixgbevf_vlan_strip_queue_set(struct rte_eth_dev *dev, uint16_t queue, int on) ctrl &= ~IXGBE_RXDCTL_VME; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(queue), ctrl); - ixgbe_vlan_hw_strip_bitmap_set( dev, queue, on); + ixgbe_vlan_hw_strip_bitmap_set(dev, queue, on); } static void @@ -4207,7 +4329,7 @@ ixgbevf_vlan_offload_set(struct rte_eth_dev *dev, int mask) on = !!(dev->data->dev_conf.rxmode.hw_vlan_strip); for (i = 0; i < hw->mac.max_rx_queues; i++) - ixgbevf_vlan_strip_queue_set(dev,i,on); + ixgbevf_vlan_strip_queue_set(dev, i, on); } } @@ -4227,9 +4349,10 @@ ixgbe_vmdq_mode_check(struct ixgbe_hw *hw) } static uint32_t -ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) +ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr *uc_addr) { uint32_t vector = 0; + switch (hw->mac.mc_filter_type) { case 0: /* use bits [47:36] of the address */ vector = ((uc_addr->addr_bytes[4] >> 4) | @@ -4257,8 +4380,8 @@ ixgbe_uta_vector(struct ixgbe_hw *hw, struct ether_addr* uc_addr) } static int -ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, - uint8_t on) +ixgbe_uc_hash_table_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr, + uint8_t on) { uint32_t vector; uint32_t uta_idx; @@ -4279,7 +4402,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, if (hw->mac.type < ixgbe_mac_82599EB) return -ENOTSUP; - vector = ixgbe_uta_vector(hw,mac_addr); + vector = ixgbe_uta_vector(hw, mac_addr); uta_idx = (vector >> ixgbe_uta_bit_shift) & ixgbe_uta_idx_mask; uta_shift = vector & ixgbe_uta_bit_mask; @@ -4304,7 +4427,7 @@ ixgbe_uc_hash_table_set(struct rte_eth_dev *dev,struct ether_addr* mac_addr, IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); else - IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL,hw->mac.mc_filter_type); + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); return 0; } @@ -4389,7 +4512,7 @@ ixgbe_set_pool_rx_mode(struct rte_eth_dev *dev, uint16_t pool, static int ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) { - uint32_t reg,addr; + uint32_t reg, addr; uint32_t val; const uint8_t bit1 = 0x1; @@ -4399,16 +4522,26 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) if (ixgbe_vmdq_mode_check(hw) < 0) return -ENOTSUP; - addr = IXGBE_VFRE(pool >= ETH_64_POOLS/2); + if (pool >= ETH_64_POOLS) + return -EINVAL; + + /* for pool >= 32, set bit in PFVFRE[1], otherwise PFVFRE[0] */ + if (pool >= 32) { + addr = IXGBE_VFRE(1); + val = bit1 << (pool - 32); + } else { + addr = IXGBE_VFRE(0); + val = bit1 << pool; + } + reg = IXGBE_READ_REG(hw, addr); - val = bit1 << pool; if (on) reg |= val; else reg &= ~val; - IXGBE_WRITE_REG(hw, addr,reg); + IXGBE_WRITE_REG(hw, addr, reg); return 0; } @@ -4416,7 +4549,7 @@ ixgbe_set_pool_rx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) static int ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) { - uint32_t reg,addr; + uint32_t reg, addr; uint32_t val; const uint8_t bit1 = 0x1; @@ -4426,16 +4559,26 @@ ixgbe_set_pool_tx(struct rte_eth_dev *dev, uint16_t pool, uint8_t on) if (ixgbe_vmdq_mode_check(hw) < 0) return -ENOTSUP; - addr = IXGBE_VFTE(pool >= ETH_64_POOLS/2); + if (pool >= ETH_64_POOLS) + return -EINVAL; + + /* for pool >= 32, set bit in PFVFTE[1], otherwise PFVFTE[0] */ + if (pool >= 32) { + addr = IXGBE_VFTE(1); + val = bit1 << (pool - 32); + } else { + addr = IXGBE_VFTE(0); + val = bit1 << pool; + } + reg = IXGBE_READ_REG(hw, addr); - val = bit1 << pool; if (on) reg |= val; else reg &= ~val; - IXGBE_WRITE_REG(hw, addr,reg); + IXGBE_WRITE_REG(hw, addr, reg); return 0; } @@ -4453,7 +4596,8 @@ ixgbe_set_pool_vlan_filter(struct rte_eth_dev *dev, uint16_t vlan, return -ENOTSUP; for (pool_idx = 0; pool_idx < ETH_64_POOLS; pool_idx++) { if (pool_mask & ((uint64_t)(1ULL << pool_idx))) { - ret = hw->mac.ops.set_vfta(hw,vlan,pool_idx,vlan_on); + ret = hw->mac.ops.set_vfta(hw, vlan, pool_idx, + vlan_on, false); if (ret < 0) return ret; } @@ -4475,7 +4619,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, struct rte_eth_mirror_conf *mirror_conf, uint8_t rule_id, uint8_t on) { - uint32_t mr_ctl,vlvf; + uint32_t mr_ctl, vlvf; uint32_t mp_lsb = 0; uint32_t mv_msb = 0; uint32_t mv_lsb = 0; @@ -4488,7 +4632,7 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, const uint8_t vlan_mask_offset = 32; const uint8_t dst_pool_offset = 8; const uint8_t rule_mr_offset = 4; - const uint8_t mirror_rule_mask= 0x0F; + const uint8_t mirror_rule_mask = 0x0F; struct ixgbe_mirror_info *mr_info = (IXGBE_DEV_PRIVATE_TO_PFDATA(dev->data->dev_private)); @@ -4511,11 +4655,12 @@ ixgbe_mirror_rule_set(struct rte_eth_dev *dev, if (mirror_conf->rule_type & ETH_MIRROR_VLAN) { mirror_type |= IXGBE_MRCTL_VLME; /* Check if vlan id is valid and find conresponding VLAN ID index in VLVF */ - for (i = 0;i < IXGBE_VLVF_ENTRIES; i++) { + for (i = 0; i < IXGBE_VLVF_ENTRIES; i++) { if (mirror_conf->vlan.vlan_mask & (1ULL << i)) { /* search vlan id related pool vlan filter index */ reg_index = ixgbe_find_vlvf_slot(hw, - mirror_conf->vlan.vlan_id[i]); + mirror_conf->vlan.vlan_id[i], + false); if (reg_index < 0) return -EINVAL; vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_index)); @@ -4800,6 +4945,9 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) uint32_t q_idx; uint32_t vector_idx = IXGBE_MISC_VEC_ID; + /* Configure VF other cause ivar */ + ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); + /* won't configure msix register if no mapping is done * between intr vector and event fd. */ @@ -4814,9 +4962,6 @@ ixgbevf_configure_msix(struct rte_eth_dev *dev) ixgbevf_set_ivar_map(hw, 0, q_idx, vector_idx); intr_handle->intr_vec[q_idx] = vector_idx; } - - /* Configure VF other cause ivar */ - ixgbevf_set_ivar_map(hw, -1, 1, vector_idx); } /** @@ -5306,7 +5451,8 @@ ixgbevf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) return -EINVAL; /* refuse mtu that requires the support of scattered packets when this - * feature has not been enabled before. */ + * feature has not been enabled before. + */ if (!dev->data->scattered_rx && (max_frame + 2 * IXGBE_VLAN_TAG_SIZE > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM)) @@ -7135,6 +7281,67 @@ ixgbevf_dev_allmulticast_disable(struct rte_eth_dev *dev) ixgbevf_update_xcast_mode(hw, IXGBEVF_XCAST_MODE_NONE); } +static void ixgbevf_mbx_process(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + u32 in_msg = 0; + + if (ixgbe_read_mbx(hw, &in_msg, 1, 0)) + return; + + /* PF reset VF event */ + if (in_msg == IXGBE_PF_CONTROL_MSG) + _rte_eth_dev_callback_process(dev, RTE_ETH_EVENT_INTR_RESET); +} + +static int +ixgbevf_dev_interrupt_get_status(struct rte_eth_dev *dev) +{ + uint32_t eicr; + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + ixgbevf_intr_disable(hw); + + /* read-on-clear nic registers here */ + eicr = IXGBE_READ_REG(hw, IXGBE_VTEICR); + intr->flags = 0; + + /* only one misc vector supported - mailbox */ + eicr &= IXGBE_VTEICR_MASK; + if (eicr == IXGBE_MISC_VEC_ID) + intr->flags |= IXGBE_FLAG_MAILBOX; + + return 0; +} + +static int +ixgbevf_dev_interrupt_action(struct rte_eth_dev *dev) +{ + struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); + struct ixgbe_interrupt *intr = + IXGBE_DEV_PRIVATE_TO_INTR(dev->data->dev_private); + + if (intr->flags & IXGBE_FLAG_MAILBOX) { + ixgbevf_mbx_process(dev); + intr->flags &= ~IXGBE_FLAG_MAILBOX; + } + + ixgbevf_intr_enable(hw); + + return 0; +} + +static void +ixgbevf_dev_interrupt_handler(__rte_unused struct rte_intr_handle *handle, + void *param) +{ + struct rte_eth_dev *dev = (struct rte_eth_dev *)param; + + ixgbevf_dev_interrupt_get_status(dev); + ixgbevf_dev_interrupt_action(dev); +} + static struct rte_driver rte_ixgbe_driver = { .type = PMD_PDEV, .init = rte_ixgbe_pmd_init, diff --git a/drivers/net/ixgbe/ixgbe_fdir.c b/drivers/net/ixgbe/ixgbe_fdir.c index 2e4c353a..861c7cbe 100644 --- a/drivers/net/ixgbe/ixgbe_fdir.c +++ b/drivers/net/ixgbe/ixgbe_fdir.c @@ -189,14 +189,13 @@ fdir_enable_82599(struct ixgbe_hw *hw, uint32_t fdirctrl) IXGBE_WRITE_FLUSH(hw); for (i = 0; i < IXGBE_FDIR_INIT_DONE_POLL; i++) { if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & - IXGBE_FDIRCTRL_INIT_DONE) + IXGBE_FDIRCTRL_INIT_DONE) break; msec_delay(1); } if (i >= IXGBE_FDIR_INIT_DONE_POLL) { - PMD_INIT_LOG(ERR, "Flow Director poll time exceeded " - "during enabling!"); + PMD_INIT_LOG(ERR, "Flow Director poll time exceeded during enabling!"); return -ETIMEDOUT; } return 0; @@ -282,6 +281,7 @@ static inline uint32_t reverse_fdir_bitmasks(uint16_t hi_dword, uint16_t lo_dword) { uint32_t mask = hi_dword << 16; + mask |= lo_dword; mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); @@ -810,8 +810,10 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16); /* Process bits 0 and 16 */ - if (key & 0x0001) hash_result ^= lo_hash_dword; - if (key & 0x00010000) hash_result ^= hi_hash_dword; + if (key & 0x0001) + hash_result ^= lo_hash_dword; + if (key & 0x00010000) + hash_result ^= hi_hash_dword; /* * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to @@ -822,9 +824,11 @@ ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input, /* process the remaining 30 bits in the key 2 bits at a time */ - for (i = 15; i; i-- ) { - if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i; - if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i; + for (i = 15; i; i--) { + if (key & (0x0001 << i)) + hash_result ^= lo_hash_dword >> i; + if (key & (0x00010000 << i)) + hash_result ^= hi_hash_dword >> i; } return hash_result; @@ -1016,7 +1020,7 @@ fdir_add_signature_filter_82599(struct ixgbe_hw *hw, /* configure FDIRCMD register */ fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW | - IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; fdircmd |= (uint32_t)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; @@ -1080,9 +1084,9 @@ fdir_erase_filter_82599(struct ixgbe_hw *hw, uint32_t fdirhash) */ static int ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, - const struct rte_eth_fdir_filter *fdir_filter, - bool del, - bool update) + const struct rte_eth_fdir_filter *fdir_filter, + bool del, + bool update) { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); uint32_t fdircmd_flags; @@ -1092,7 +1096,7 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, bool is_perfect = FALSE; int err; struct ixgbe_hw_fdir_info *info = - IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; if (fdir_mode == RTE_FDIR_MODE_NONE) @@ -1109,12 +1113,12 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, hw->mac.type == ixgbe_mac_X550EM_x || hw->mac.type == ixgbe_mac_X550EM_a) && (fdir_filter->input.flow_type == - RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) && + RTE_ETH_FLOW_NONFRAG_IPV4_OTHER) && (info->mask.src_port_mask != 0 || info->mask.dst_port_mask != 0)) { PMD_DRV_LOG(ERR, "By this device," - " IPv4-other is not supported without" - " L4 protocol and ports masked!"); + " IPv4-other is not supported without" + " L4 protocol and ports masked!"); return -ENOTSUP; } @@ -1132,16 +1136,16 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, if (is_perfect) { if (input.formatted.flow_type & IXGBE_ATR_L4TYPE_IPV6_MASK) { PMD_DRV_LOG(ERR, "IPv6 is not supported in" - " perfect mode!"); + " perfect mode!"); return -ENOTSUP; } fdirhash = atr_compute_perfect_hash_82599(&input, - dev->data->dev_conf.fdir_conf.pballoc); + dev->data->dev_conf.fdir_conf.pballoc); fdirhash |= fdir_filter->soft_id << - IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; + IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; } else fdirhash = atr_compute_sig_hash_82599(&input, - dev->data->dev_conf.fdir_conf.pballoc); + dev->data->dev_conf.fdir_conf.pballoc); if (del) { err = fdir_erase_filter_82599(hw, fdirhash); @@ -1159,22 +1163,22 @@ ixgbe_add_del_fdir_filter(struct rte_eth_dev *dev, fdircmd_flags |= IXGBE_FDIRCMD_DROP; } else { PMD_DRV_LOG(ERR, "Drop option is not supported in" - " signature mode."); + " signature mode."); return -EINVAL; } } else if (fdir_filter->action.behavior == RTE_ETH_FDIR_ACCEPT && - fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM) + fdir_filter->action.rx_queue < IXGBE_MAX_RX_QUEUE_NUM) queue = (uint8_t)fdir_filter->action.rx_queue; else return -EINVAL; if (is_perfect) { err = fdir_write_perfect_filter_82599(hw, &input, queue, - fdircmd_flags, fdirhash, - fdir_mode); + fdircmd_flags, fdirhash, + fdir_mode); } else { err = fdir_add_signature_filter_82599(hw, &input, queue, - fdircmd_flags, fdirhash); + fdircmd_flags, fdirhash); } if (err < 0) PMD_DRV_LOG(ERR, "Fail to add FDIR filter!"); @@ -1269,22 +1273,22 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st { struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); struct ixgbe_hw_fdir_info *info = - IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_FDIR_INFO(dev->data->dev_private); uint32_t reg, max_num; enum rte_fdir_mode fdir_mode = dev->data->dev_conf.fdir_conf.mode; /* Get the information from registers */ reg = IXGBE_READ_REG(hw, IXGBE_FDIRFREE); info->collision = (uint16_t)((reg & IXGBE_FDIRFREE_COLL_MASK) >> - IXGBE_FDIRFREE_COLL_SHIFT); + IXGBE_FDIRFREE_COLL_SHIFT); info->free = (uint16_t)((reg & IXGBE_FDIRFREE_FREE_MASK) >> - IXGBE_FDIRFREE_FREE_SHIFT); + IXGBE_FDIRFREE_FREE_SHIFT); reg = IXGBE_READ_REG(hw, IXGBE_FDIRLEN); info->maxhash = (uint16_t)((reg & IXGBE_FDIRLEN_MAXHASH_MASK) >> - IXGBE_FDIRLEN_MAXHASH_SHIFT); + IXGBE_FDIRLEN_MAXHASH_SHIFT); info->maxlen = (uint8_t)((reg & IXGBE_FDIRLEN_MAXLEN_MASK) >> - IXGBE_FDIRLEN_MAXLEN_SHIFT); + IXGBE_FDIRLEN_MAXLEN_SHIFT); reg = IXGBE_READ_REG(hw, IXGBE_FDIRUSTAT); info->remove += (reg & IXGBE_FDIRUSTAT_REMOVE_MASK) >> @@ -1310,10 +1314,10 @@ ixgbe_fdir_stats_get(struct rte_eth_dev *dev, struct rte_eth_fdir_stats *fdir_st reg = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); max_num = (1 << (FDIRENTRIES_NUM_SHIFT + - (reg & FDIRCTRL_PBALLOC_MASK))); + (reg & FDIRCTRL_PBALLOC_MASK))); if (fdir_mode >= RTE_FDIR_MODE_PERFECT && fdir_mode <= RTE_FDIR_MODE_PERFECT_TUNNEL) - fdir_stats->guarant_cnt = max_num - fdir_stats->free; + fdir_stats->guarant_cnt = max_num - fdir_stats->free; else if (fdir_mode == RTE_FDIR_MODE_SIGNATURE) fdir_stats->guarant_cnt = max_num * 4 - fdir_stats->free; diff --git a/drivers/net/ixgbe/ixgbe_pf.c b/drivers/net/ixgbe/ixgbe_pf.c index a2787d90..56393ff2 100644 --- a/drivers/net/ixgbe/ixgbe_pf.c +++ b/drivers/net/ixgbe/ixgbe_pf.c @@ -97,9 +97,9 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) struct ixgbe_vf_info **vfinfo = IXGBE_DEV_PRIVATE_TO_P_VFDATA(eth_dev->data->dev_private); struct ixgbe_mirror_info *mirror_info = - IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_PFDATA(eth_dev->data->dev_private); struct ixgbe_uta_info *uta_info = - IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private); + IXGBE_DEV_PRIVATE_TO_UTA(eth_dev->data->dev_private); struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(eth_dev->data->dev_private); uint16_t vf_num; @@ -108,15 +108,16 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) PMD_INIT_FUNC_TRACE(); RTE_ETH_DEV_SRIOV(eth_dev).active = 0; - if (0 == (vf_num = dev_num_vf(eth_dev))) + vf_num = dev_num_vf(eth_dev); + if (vf_num == 0) return; *vfinfo = rte_zmalloc("vf_info", sizeof(struct ixgbe_vf_info) * vf_num, 0); if (*vfinfo == NULL) rte_panic("Cannot allocate memory for private VF data\n"); - memset(mirror_info,0,sizeof(struct ixgbe_mirror_info)); - memset(uta_info,0,sizeof(struct ixgbe_uta_info)); + memset(mirror_info, 0, sizeof(struct ixgbe_mirror_info)); + memset(uta_info, 0, sizeof(struct ixgbe_uta_info)); hw->mac.mc_filter_type = 0; if (vf_num >= ETH_32_POOLS) { @@ -141,8 +142,6 @@ void ixgbe_pf_host_init(struct rte_eth_dev *eth_dev) /* set mb interrupt mask */ ixgbe_mb_intr_setup(eth_dev); - - return; } void ixgbe_pf_host_uninit(struct rte_eth_dev *eth_dev) @@ -220,7 +219,8 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) uint32_t vlanctrl; int i; - if (0 == (vf_num = dev_num_vf(eth_dev))) + vf_num = dev_num_vf(eth_dev); + if (vf_num == 0) return -1; /* enable VMDq and set the default pool for PF */ @@ -280,19 +280,18 @@ int ixgbe_pf_host_configure(struct rte_eth_dev *eth_dev) } IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); - IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); - /* + /* * enable vlan filtering and allow all vlan tags through */ - vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); - vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ - IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); + vlanctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); + vlanctrl |= IXGBE_VLNCTRL_VFE; /* enable vlan filters */ + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlanctrl); - /* VFTA - enable all vlan filters */ - for (i = 0; i < IXGBE_MAX_VFTA; i++) { - IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); - } + /* VFTA - enable all vlan filters */ + for (i = 0; i < IXGBE_MAX_VFTA; i++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), 0xFFFFFFFF); /* Enable MAC Anti-Spoofing */ hw->mac.ops.set_mac_anti_spoofing(hw, FALSE, vf_num); @@ -481,7 +480,7 @@ ixgbe_vf_set_mac_addr(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) int rar_entry = hw->mac.num_rar_entries - (vf + 1); uint8_t *new_mac = (uint8_t *)(&msgbuf[1]); - if (is_valid_assigned_ether_addr((struct ether_addr*)new_mac)) { + if (is_valid_assigned_ether_addr((struct ether_addr *)new_mac)) { rte_memcpy(vfinfo[vf].vf_mac_addresses, new_mac, 6); return hw->mac.ops.set_rar(hw, rar_entry, new_mac, vf, IXGBE_RAH_AV); } @@ -545,7 +544,7 @@ ixgbe_vf_set_vlan(struct rte_eth_dev *dev, uint32_t vf, uint32_t *msgbuf) vfinfo[vf].vlan_count++; else if (vfinfo[vf].vlan_count) vfinfo[vf].vlan_count--; - return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add); + return hw->mac.ops.set_vfta(hw, vid, vf, (bool)add, false); } static int @@ -678,6 +677,7 @@ ixgbe_rcv_msg_from_vf(struct rte_eth_dev *dev, uint16_t vf) /* perform VF reset */ if (msgbuf[0] == IXGBE_VF_RESET) { int ret = ixgbe_vf_reset(dev, vf, msgbuf); + vfinfo[vf].clear_to_send = true; return ret; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx.c b/drivers/net/ixgbe/ixgbe_rxtx.c index 9fb38a6c..8a306b06 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.c +++ b/drivers/net/ixgbe/ixgbe_rxtx.c @@ -88,17 +88,6 @@ PKT_TX_TCP_SEG | \ PKT_TX_OUTER_IP_CKSUM) -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} - - #if 1 #define RTE_PMD_USE_PREFETCH #endif @@ -352,6 +341,7 @@ ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, nb_tx = 0; while (nb_pkts) { uint16_t ret, n; + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_TX_MAX_BURST); ret = tx_xmit_pkts(tx_queue, &(tx_pkts[nb_tx]), n); nb_tx = (uint16_t)(nb_tx + ret); @@ -478,30 +468,28 @@ ixgbe_set_xmit_ctx(struct ixgbe_tx_queue *txq, */ static inline uint32_t what_advctx_update(struct ixgbe_tx_queue *txq, uint64_t flags, - union ixgbe_tx_offload tx_offload) + union ixgbe_tx_offload tx_offload) { /* If match with the current used context */ if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && - (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == - (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] - & tx_offload.data[0])) && - (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == - (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] - & tx_offload.data[1])))) { - return txq->ctx_curr; - } + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) + return txq->ctx_curr; /* What if match with the next context */ txq->ctx_curr ^= 1; if (likely((txq->ctx_cache[txq->ctx_curr].flags == flags) && - (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == - (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] - & tx_offload.data[0])) && - (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == - (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] - & tx_offload.data[1])))) { - return txq->ctx_curr; - } + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[0] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[0] + & tx_offload.data[0])) && + (txq->ctx_cache[txq->ctx_curr].tx_offload.data[1] == + (txq->ctx_cache[txq->ctx_curr].tx_offload_mask.data[1] + & tx_offload.data[1])))) + return txq->ctx_curr; /* Mismatch, use the previous context */ return IXGBE_CTX_NUM; @@ -511,6 +499,7 @@ static inline uint32_t tx_desc_cksum_flags_to_olinfo(uint64_t ol_flags) { uint32_t tmp = 0; + if ((ol_flags & PKT_TX_L4_MASK) != PKT_TX_L4_NO_CKSUM) tmp |= IXGBE_ADVTXD_POPTS_TXSM; if (ol_flags & PKT_TX_IP_CKSUM) @@ -524,6 +513,7 @@ static inline uint32_t tx_desc_ol_flags_to_cmdtype(uint64_t ol_flags) { uint32_t cmdtype = 0; + if (ol_flags & PKT_TX_VLAN_PKT) cmdtype |= IXGBE_ADVTXD_DCMD_VLE; if (ol_flags & PKT_TX_TCP_SEG) @@ -561,8 +551,7 @@ ixgbe_xmit_cleanup(struct ixgbe_tx_queue *txq) /* Check to make sure the last descriptor to clean is done */ desc_to_clean_to = sw_ring[desc_to_clean_to].last_id; status = txr[desc_to_clean_to].wb.status; - if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) - { + if (!(status & rte_cpu_to_le_32(IXGBE_TXD_STAT_DD))) { PMD_TX_FREE_LOG(DEBUG, "TX descriptor %4u is not done" "(port=%d queue=%d)", @@ -920,24 +909,40 @@ end_of_tx: * RX functions * **********************************************************************/ -#define IXGBE_PACKET_TYPE_IPV4 0X01 -#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11 -#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21 -#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41 -#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03 -#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43 -#define IXGBE_PACKET_TYPE_IPV6 0X04 -#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14 -#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24 -#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C -#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C -#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C -#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05 -#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15 -#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25 -#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D -#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D -#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D + +#define IXGBE_PACKET_TYPE_ETHER 0X00 +#define IXGBE_PACKET_TYPE_IPV4 0X01 +#define IXGBE_PACKET_TYPE_IPV4_TCP 0X11 +#define IXGBE_PACKET_TYPE_IPV4_UDP 0X21 +#define IXGBE_PACKET_TYPE_IPV4_SCTP 0X41 +#define IXGBE_PACKET_TYPE_IPV4_EXT 0X03 +#define IXGBE_PACKET_TYPE_IPV4_EXT_TCP 0X13 +#define IXGBE_PACKET_TYPE_IPV4_EXT_UDP 0X23 +#define IXGBE_PACKET_TYPE_IPV4_EXT_SCTP 0X43 +#define IXGBE_PACKET_TYPE_IPV6 0X04 +#define IXGBE_PACKET_TYPE_IPV6_TCP 0X14 +#define IXGBE_PACKET_TYPE_IPV6_UDP 0X24 +#define IXGBE_PACKET_TYPE_IPV6_SCTP 0X44 +#define IXGBE_PACKET_TYPE_IPV6_EXT 0X0C +#define IXGBE_PACKET_TYPE_IPV6_EXT_TCP 0X1C +#define IXGBE_PACKET_TYPE_IPV6_EXT_UDP 0X2C +#define IXGBE_PACKET_TYPE_IPV6_EXT_SCTP 0X4C +#define IXGBE_PACKET_TYPE_IPV4_IPV6 0X05 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_TCP 0X15 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_UDP 0X25 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP 0X45 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6 0X07 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP 0X17 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP 0X27 +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP 0X47 +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT 0X0D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP 0X1D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP 0X2D +#define IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP 0X4D +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT 0X0F +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP 0X1F +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP 0X2F +#define IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP 0X4F #define IXGBE_PACKET_TYPE_NVGRE 0X00 #define IXGBE_PACKET_TYPE_NVGRE_IPV4 0X01 @@ -945,13 +950,17 @@ end_of_tx: #define IXGBE_PACKET_TYPE_NVGRE_IPV4_UDP 0X21 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_SCTP 0X41 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT 0X03 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP 0X13 +#define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP 0X23 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_SCTP 0X43 #define IXGBE_PACKET_TYPE_NVGRE_IPV6 0X04 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_TCP 0X14 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_UDP 0X24 +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP 0X44 #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT 0X0C #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_TCP 0X1C #define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_UDP 0X2C +#define IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP 0X4C #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6 0X05 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_TCP 0X15 #define IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP 0X25 @@ -965,13 +974,17 @@ end_of_tx: #define IXGBE_PACKET_TYPE_VXLAN_IPV4_UDP 0xA1 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_SCTP 0xC1 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT 0x83 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP 0X93 +#define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP 0XA3 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_SCTP 0XC3 #define IXGBE_PACKET_TYPE_VXLAN_IPV6 0X84 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_TCP 0X94 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_UDP 0XA4 +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP 0XC4 #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT 0X8C #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_TCP 0X9C #define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_UDP 0XAC +#define IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP 0XCC #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6 0X85 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_TCP 0X95 #define IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP 0XA5 @@ -993,48 +1006,88 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) */ static const uint32_t ptype_table[IXGBE_PACKET_TYPE_MAX] __rte_cache_aligned = { + [IXGBE_PACKET_TYPE_ETHER] = RTE_PTYPE_L2_ETHER, [IXGBE_PACKET_TYPE_IPV4] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4, + [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, [IXGBE_PACKET_TYPE_IPV4_EXT] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT, + [IXGBE_PACKET_TYPE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, [IXGBE_PACKET_TYPE_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6, - [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_SCTP, [IXGBE_PACKET_TYPE_IPV6_EXT] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT, - [IXGBE_PACKET_TYPE_IPV4_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV6_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + RTE_PTYPE_INNER_L3_IPV6, [IXGBE_PACKET_TYPE_IPV4_IPV6_TCP] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | - RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, - [IXGBE_PACKET_TYPE_IPV4_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, - [IXGBE_PACKET_TYPE_IPV6_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, [IXGBE_PACKET_TYPE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, - [IXGBE_PACKET_TYPE_IPV4_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_SCTP, - [IXGBE_PACKET_TYPE_IPV4_EXT_SCTP] = RTE_PTYPE_L2_ETHER | - RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_IPV4_EXT_IPV6_EXT_SCTP] = + RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_IP | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, }; static const uint32_t @@ -1087,6 +1140,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | + RTE_PTYPE_INNER_L4_SCTP, [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4, @@ -1094,6 +1151,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_NVGRE_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | + RTE_PTYPE_INNER_L4_SCTP, [IXGBE_PACKET_TYPE_NVGRE_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | @@ -1106,6 +1167,14 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_NVGRE_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_TUNNEL_GRE | + RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | + RTE_PTYPE_INNER_L4_UDP, [IXGBE_PACKET_TYPE_VXLAN] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | @@ -1162,6 +1231,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6 | RTE_PTYPE_INNER_L4_SCTP, [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_UDP] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | @@ -1170,6 +1243,10 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_UDP, + [IXGBE_PACKET_TYPE_VXLAN_IPV6_EXT_SCTP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV6_EXT | RTE_PTYPE_INNER_L4_SCTP, [IXGBE_PACKET_TYPE_VXLAN_IPV4_IPV6_EXT_UDP] = RTE_PTYPE_L2_ETHER | RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | @@ -1182,6 +1259,14 @@ ixgbe_rxd_pkt_info_to_pkt_type(uint32_t pkt_info, uint16_t ptype_mask) RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_SCTP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_TCP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_TCP, + [IXGBE_PACKET_TYPE_VXLAN_IPV4_EXT_UDP] = RTE_PTYPE_L2_ETHER | + RTE_PTYPE_L3_IPV4_EXT_UNKNOWN | RTE_PTYPE_L4_UDP | + RTE_PTYPE_TUNNEL_VXLAN | RTE_PTYPE_INNER_L2_ETHER | + RTE_PTYPE_INNER_L3_IPV4_EXT | RTE_PTYPE_INNER_L4_UDP, }; if (unlikely(pkt_info & IXGBE_RXDADV_PKTTYPE_ETQF)) @@ -1232,7 +1317,7 @@ ixgbe_rxd_pkt_info_to_pkt_flags(uint16_t pkt_info) } static inline uint64_t -rx_desc_status_to_pkt_flags(uint32_t rx_status) +rx_desc_status_to_pkt_flags(uint32_t rx_status, uint64_t vlan_flags) { uint64_t pkt_flags; @@ -1241,7 +1326,7 @@ rx_desc_status_to_pkt_flags(uint32_t rx_status) * Do not check whether L3/L4 rx checksum done by NIC or not, * That can be found from rte_eth_rxmode.hw_ip_checksum flag */ - pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? PKT_RX_VLAN_PKT : 0; + pkt_flags = (rx_status & IXGBE_RXD_STAT_VP) ? vlan_flags : 0; #ifdef RTE_LIBRTE_IEEE1588 if (rx_status & IXGBE_RXD_STAT_TMST) @@ -1298,6 +1383,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) uint32_t pkt_info[LOOK_AHEAD]; int i, j, nb_rx = 0; uint32_t status; + uint64_t vlan_flags = rxq->vlan_flags; /* get references to current descriptor and S/W ring entry */ rxdp = &rxq->rx_ring[rxq->rx_tail]; @@ -1313,8 +1399,7 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) * reference packets that are ready to be received. */ for (i = 0; i < RTE_PMD_IXGBE_RX_MAX_BURST; - i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) - { + i += LOOK_AHEAD, rxdp += LOOK_AHEAD, rxep += LOOK_AHEAD) { /* Read desc statuses backwards to avoid race condition */ for (j = LOOK_AHEAD-1; j >= 0; --j) s[j] = rte_le_to_cpu_32(rxdp[j].wb.upper.status_error); @@ -1340,7 +1425,8 @@ ixgbe_rx_scan_hw_ring(struct ixgbe_rx_queue *rxq) mb->vlan_tci = rte_le_to_cpu_16(rxdp[j].wb.upper.vlan); /* convert descriptor fields to rte mbuf flags */ - pkt_flags = rx_desc_status_to_pkt_flags(s[j]); + pkt_flags = rx_desc_status_to_pkt_flags(s[j], + vlan_flags); pkt_flags |= rx_desc_error_to_pkt_flags(s[j]); pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags ((uint16_t)pkt_info[j]); @@ -1472,6 +1558,7 @@ rx_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, if (ixgbe_rx_alloc_bufs(rxq, true) != 0) { int i, j; + PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, (unsigned) rxq->queue_id); @@ -1523,6 +1610,7 @@ ixgbe_recv_pkts_bulk_alloc(void *rx_queue, struct rte_mbuf **rx_pkts, nb_rx = 0; while (nb_pkts) { uint16_t ret, n; + n = (uint16_t)RTE_MIN(nb_pkts, RTE_PMD_IXGBE_RX_MAX_BURST); ret = rx_recv_pkts(rx_queue, &rx_pkts[nb_rx], n); nb_rx = (uint16_t)(nb_rx + ret); @@ -1554,6 +1642,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_rx; uint16_t nb_hold; uint64_t pkt_flags; + uint64_t vlan_flags; nb_rx = 0; nb_hold = 0; @@ -1561,6 +1650,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, rx_id = rxq->rx_tail; rx_ring = rxq->rx_ring; sw_ring = rxq->sw_ring; + vlan_flags = rxq->vlan_flags; while (nb_rx < nb_pkts) { /* * The order of operations here is important as the DD status @@ -1608,7 +1698,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, (unsigned) rx_id, (unsigned) staterr, (unsigned) rte_le_to_cpu_16(rxd.wb.upper.length)); - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed port_id=%u " "queue_id=%u", (unsigned) rxq->port_id, @@ -1670,7 +1760,7 @@ ixgbe_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, /* Only valid if PKT_RX_VLAN_PKT set in pkt_flags */ rxm->vlan_tci = rte_le_to_cpu_16(rxd.wb.upper.vlan); - pkt_flags = rx_desc_status_to_pkt_flags(staterr); + pkt_flags = rx_desc_status_to_pkt_flags(staterr, vlan_flags); pkt_flags = pkt_flags | rx_desc_error_to_pkt_flags(staterr); pkt_flags = pkt_flags | ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); @@ -1763,7 +1853,7 @@ ixgbe_fill_cluster_head_buf( */ head->vlan_tci = rte_le_to_cpu_16(desc->wb.upper.vlan); pkt_info = rte_le_to_cpu_32(desc->wb.lower.lo_dword.data); - pkt_flags = rx_desc_status_to_pkt_flags(staterr); + pkt_flags = rx_desc_status_to_pkt_flags(staterr, rxq->vlan_flags); pkt_flags |= rx_desc_error_to_pkt_flags(staterr); pkt_flags |= ixgbe_rxd_pkt_info_to_pkt_flags((uint16_t)pkt_info); head->ol_flags = pkt_flags; @@ -1879,7 +1969,7 @@ next_desc: rte_le_to_cpu_16(rxd.wb.upper.length)); if (!bulk_alloc) { - nmb = rte_rxmbuf_alloc(rxq->mb_pool); + nmb = rte_mbuf_raw_alloc(rxq->mb_pool); if (nmb == NULL) { PMD_RX_LOG(DEBUG, "RX mbuf alloc failed " "port_id=%u queue_id=%u", @@ -1889,8 +1979,7 @@ next_desc: rx_mbuf_alloc_failed++; break; } - } - else if (nb_hold > rxq->rx_free_thresh) { + } else if (nb_hold > rxq->rx_free_thresh) { uint16_t next_rdt = rxq->rx_free_trigger; if (!ixgbe_rx_alloc_bufs(rxq, false)) { @@ -2151,6 +2240,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) prev = (uint16_t) (txq->nb_tx_desc - 1); for (i = 0; i < txq->nb_tx_desc; i++) { volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; + txd->wb.status = rte_cpu_to_le_32(IXGBE_TXD_STAT_DD); txe[i].mbuf = NULL; txe[i].last_id = i; @@ -2170,7 +2260,7 @@ ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); txq->ctx_curr = 0; - memset((void*)&txq->ctx_cache, 0, + memset((void *)&txq->ctx_cache, 0, IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); } @@ -2443,6 +2533,7 @@ ixgbe_rx_queue_release_mbufs(struct ixgbe_rx_queue *rxq) if (rxq->rx_nb_avail) { for (i = 0; i < rxq->rx_nb_avail; ++i) { struct rte_mbuf *mb; + mb = rxq->rx_stage[rxq->rx_next_avail + i]; rte_pktmbuf_free_seg(mb); } @@ -2665,7 +2756,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, /* * Zero init all the descriptors in the ring. */ - memset (rz->addr, 0, RX_RING_SZ); + memset(rz->addr, 0, RX_RING_SZ); /* * Modified to setup VFRDT for Virtual Function @@ -2679,8 +2770,7 @@ ixgbe_dev_rx_queue_setup(struct rte_eth_dev *dev, IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDT(queue_idx)); rxq->rdh_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_VFRDH(queue_idx)); - } - else { + } else { rxq->rdt_reg_addr = IXGBE_PCI_REG_ADDR(hw, IXGBE_RDT(rxq->reg_idx)); rxq->rdh_reg_addr = @@ -2816,6 +2906,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_tx_queues; i++) { struct ixgbe_tx_queue *txq = dev->data->tx_queues[i]; + if (txq != NULL) { txq->ops->release_mbufs(txq); txq->ops->reset(txq); @@ -2824,6 +2915,7 @@ ixgbe_dev_clear_queues(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + if (rxq != NULL) { ixgbe_rx_queue_release_mbufs(rxq); ixgbe_reset_rx_queue(adapter, rxq); @@ -3139,6 +3231,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) } for (i = 0; i < nb_tcs; i++) { uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); + rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); /* clear 10 bits. */ rxpbsize |= (pbsize << IXGBE_RXPBSIZE_SHIFT); /* set value */ @@ -3147,14 +3240,15 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) /* zero alloc all unused TCs */ for (i = nb_tcs; i < ETH_DCB_NUM_USER_PRIORITIES; i++) { uint32_t rxpbsize = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); - rxpbsize &= (~( 0x3FF << IXGBE_RXPBSIZE_SHIFT )); + + rxpbsize &= (~(0x3FF << IXGBE_RXPBSIZE_SHIFT)); /* clear 10 bits. */ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); } /* MRQC: enable vmdq and dcb */ - mrqc = ((num_pools == ETH_16_POOLS) ? \ - IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN ); + mrqc = (num_pools == ETH_16_POOLS) ? + IXGBE_MRQC_VMDQRT8TCEN : IXGBE_MRQC_VMDQRT4TCEN; IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); /* PFVTCTL: turn on virtualisation and set the default pool */ @@ -3192,7 +3286,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) } /* VFRE: pool enabling for receive - 16 or 32 */ - IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), \ + IXGBE_WRITE_REG(hw, IXGBE_VFRE(0), num_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); /* @@ -3205,7 +3299,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */ for (i = 0; i < cfg->nb_pool_maps; i++) { /* set vlan id in VF register and set the valid bit */ - IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | (cfg->pool_map[i].vlan_id & 0xFFF))); /* * Put the allowed pools in VFB reg. As we only have 16 or 32 @@ -3223,7 +3317,7 @@ ixgbe_vmdq_dcb_configure(struct rte_eth_dev *dev) */ static void ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) + struct ixgbe_dcb_config *dcb_config) { uint32_t reg; uint32_t q; @@ -3238,18 +3332,17 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw, /* Enable DCB for Tx with 8 TCs */ if (dcb_config->num_tcs.pg_tcs == 8) { reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; - } - else { + } else { reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; } if (dcb_config->vt_mode) - reg |= IXGBE_MTQC_VT_ENA; + reg |= IXGBE_MTQC_VT_ENA; IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); /* Disable drop for all queues */ for (q = 0; q < 128; q++) IXGBE_WRITE_REG(hw, IXGBE_QDE, - (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); /* Enable the Tx desc arbiter */ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); @@ -3261,7 +3354,6 @@ ixgbe_dcb_tx_hw_config(struct ixgbe_hw *hw, reg |= IXGBE_SECTX_DCB; IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); } - return; } /** @@ -3285,25 +3377,23 @@ ixgbe_vmdq_dcb_hw_tx_config(struct rte_eth_dev *dev, vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ? 0xFFFF : 0xFFFFFFFF); /*Configure general DCB TX parameters*/ - ixgbe_dcb_tx_hw_config(hw,dcb_config); - return; + ixgbe_dcb_tx_hw_config(hw, dcb_config); } static void ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, - struct ixgbe_dcb_config *dcb_config) + struct ixgbe_dcb_config *dcb_config) { struct rte_eth_vmdq_dcb_conf *vmdq_rx_conf = &dev->data->dev_conf.rx_adv_conf.vmdq_dcb_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ - if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS ) { + if (vmdq_rx_conf->nb_queue_pools == ETH_16_POOLS) { dcb_config->num_tcs.pg_tcs = ETH_8_TCS; dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; - } - else { + } else { dcb_config->num_tcs.pg_tcs = ETH_4_TCS; dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } @@ -3318,19 +3408,18 @@ ixgbe_vmdq_dcb_rx_config(struct rte_eth_dev *dev, static void ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, - struct ixgbe_dcb_config *dcb_config) + struct ixgbe_dcb_config *dcb_config) { struct rte_eth_vmdq_dcb_tx_conf *vmdq_tx_conf = &dev->data->dev_conf.tx_adv_conf.vmdq_dcb_tx_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; /* convert rte_eth_conf.rx_adv_conf to struct ixgbe_dcb_config */ - if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS ) { + if (vmdq_tx_conf->nb_queue_pools == ETH_16_POOLS) { dcb_config->num_tcs.pg_tcs = ETH_8_TCS; dcb_config->num_tcs.pfc_tcs = ETH_8_TCS; - } - else { + } else { dcb_config->num_tcs.pg_tcs = ETH_4_TCS; dcb_config->num_tcs.pfc_tcs = ETH_4_TCS; } @@ -3342,7 +3431,6 @@ ixgbe_dcb_vt_tx_config(struct rte_eth_dev *dev, tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = (uint8_t)(1 << j); } - return; } static void @@ -3352,7 +3440,7 @@ ixgbe_dcb_rx_config(struct rte_eth_dev *dev, struct rte_eth_dcb_rx_conf *rx_conf = &dev->data->dev_conf.rx_adv_conf.dcb_rx_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; dcb_config->num_tcs.pg_tcs = (uint8_t)rx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)rx_conf->nb_tcs; @@ -3373,7 +3461,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, struct rte_eth_dcb_tx_conf *tx_conf = &dev->data->dev_conf.tx_adv_conf.dcb_tx_conf; struct ixgbe_dcb_tc_config *tc; - uint8_t i,j; + uint8_t i, j; dcb_config->num_tcs.pg_tcs = (uint8_t)tx_conf->nb_tcs; dcb_config->num_tcs.pfc_tcs = (uint8_t)tx_conf->nb_tcs; @@ -3394,7 +3482,7 @@ ixgbe_dcb_tx_config(struct rte_eth_dev *dev, */ static void ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, - struct ixgbe_dcb_config *dcb_config) + struct ixgbe_dcb_config *dcb_config) { uint32_t reg; uint32_t vlanctrl; @@ -3454,13 +3542,11 @@ ixgbe_dcb_rx_hw_config(struct ixgbe_hw *hw, */ reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); - - return; } static void ixgbe_dcb_hw_arbite_rx_config(struct ixgbe_hw *hw, uint16_t *refill, - uint16_t *max,uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) + uint16_t *max, uint8_t *bwg_id, uint8_t *tsa, uint8_t *map) { switch (hw->mac.type) { case ixgbe_mac_82598EB: @@ -3485,16 +3571,16 @@ ixgbe_dcb_hw_arbite_tx_config(struct ixgbe_hw *hw, uint16_t *refill, uint16_t *m { switch (hw->mac.type) { case ixgbe_mac_82598EB: - ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id,tsa); - ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id,tsa); + ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, tsa); + ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, tsa); break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: case ixgbe_mac_X550: case ixgbe_mac_X550EM_x: case ixgbe_mac_X550EM_a: - ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,tsa); - ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,tsa, map); + ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, tsa); + ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, tsa, map); break; default: break; @@ -3515,7 +3601,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, struct ixgbe_dcb_config *dcb_config) { int ret = 0; - uint8_t i,pfc_en,nb_tcs; + uint8_t i, pfc_en, nb_tcs; uint16_t pbsize, rx_buffer_size; uint8_t config_dcb_rx = 0; uint8_t config_dcb_tx = 0; @@ -3529,7 +3615,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, struct ixgbe_hw *hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - switch(dev->data->dev_conf.rxmode.mq_mode){ + switch (dev->data->dev_conf.rxmode.mq_mode) { case ETH_MQ_RX_VMDQ_DCB: dcb_config->vt_mode = true; if (hw->mac.type != ixgbe_mac_82598EB) { @@ -3560,10 +3646,12 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, case ETH_MQ_TX_VMDQ_DCB: dcb_config->vt_mode = true; config_dcb_tx = DCB_TX_CONFIG; - /* get DCB and VT TX configuration parameters from rte_eth_conf */ - ixgbe_dcb_vt_tx_config(dev,dcb_config); + /* get DCB and VT TX configuration parameters + * from rte_eth_conf + */ + ixgbe_dcb_vt_tx_config(dev, dcb_config); /*Configure general VMDQ and DCB TX parameters*/ - ixgbe_vmdq_dcb_hw_tx_config(dev,dcb_config); + ixgbe_vmdq_dcb_hw_tx_config(dev, dcb_config); break; case ETH_MQ_TX_DCB: @@ -3586,8 +3674,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Avoid un-configured priority mapping to TC0 */ uint8_t j = 4; uint8_t mask = 0xFF; + for (i = 0; i < ETH_DCB_NUM_USER_PRIORITIES - 4; i++) - mask = (uint8_t)(mask & (~ (1 << map[i]))); + mask = (uint8_t)(mask & (~(1 << map[i]))); for (i = 0; mask && (i < IXGBE_DCB_MAX_TRAFFIC_CLASS); i++) { if ((mask & 0x1) && (j < ETH_DCB_NUM_USER_PRIORITIES)) map[j++] = i; @@ -3623,6 +3712,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, /* Set RX buffer size */ pbsize = (uint16_t)(rx_buffer_size / nb_tcs); uint32_t rxpbsize = pbsize << IXGBE_RXPBSIZE_SHIFT; + for (i = 0; i < nb_tcs; i++) { IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpbsize); } @@ -3632,9 +3722,12 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, } } if (config_dcb_tx) { - /* Only support an equally distributed Tx packet buffer strategy. */ + /* Only support an equally distributed + * Tx packet buffer strategy. + */ uint32_t txpktsize = IXGBE_TXPBSIZE_MAX / nb_tcs; uint32_t txpbthresh = (txpktsize / DCB_TX_PB) - IXGBE_TXPKT_SIZE_MAX; + for (i = 0; i < nb_tcs; i++) { IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); @@ -3647,9 +3740,9 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, } /*Calculates traffic class credits*/ - ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame, + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame, IXGBE_DCB_TX_CONFIG); - ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config,max_frame, + ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_config, max_frame, IXGBE_DCB_RX_CONFIG); if (config_dcb_rx) { @@ -3659,7 +3752,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_RX_CONFIG, bwgid); ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_RX_CONFIG, tsa); /* Configure PG(ETS) RX */ - ixgbe_dcb_hw_arbite_rx_config(hw,refill,max,bwgid,tsa,map); + ixgbe_dcb_hw_arbite_rx_config(hw, refill, max, bwgid, tsa, map); } if (config_dcb_tx) { @@ -3669,7 +3762,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); /* Configure PG(ETS) TX */ - ixgbe_dcb_hw_arbite_tx_config(hw,refill,max,bwgid,tsa,map); + ixgbe_dcb_hw_arbite_tx_config(hw, refill, max, bwgid, tsa, map); } /*Configure queue statistics registers*/ @@ -3683,7 +3776,7 @@ ixgbe_dcb_hw_configure(struct rte_eth_dev *dev, * If the TC count is 8,and the default high_water is 48, * the low_water is 16 as default. */ - hw->fc.high_water[i] = (pbsize * 3 ) / 4; + hw->fc.high_water[i] = (pbsize * 3) / 4; hw->fc.low_water[i] = pbsize / 4; /* Enable pfc for this TC */ tc = &dcb_config->tc_config[i]; @@ -3721,8 +3814,6 @@ void ixgbe_configure_dcb(struct rte_eth_dev *dev) /** Configure DCB hardware **/ ixgbe_dcb_hw_configure(dev, dcb_cfg); - - return; } /* @@ -3787,7 +3878,7 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev) /* PFVLVF, PFVLVFB: set up filters for vlan tags as configured */ for (i = 0; i < cfg->nb_pool_maps; i++) { /* set vlan id in VF register and set the valid bit */ - IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | \ + IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), (IXGBE_VLVF_VIEN | (cfg->pool_map[i].vlan_id & IXGBE_RXD_VLAN_ID_MASK))); /* * Put the allowed pools in VFB reg. As we only have 16 or 64 @@ -3795,12 +3886,11 @@ ixgbe_vmdq_rx_hw_configure(struct rte_eth_dev *dev) * i.e. bits 0-31 */ if (((cfg->pool_map[i].pools >> 32) & UINT32_MAX) == 0) - IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i*2), \ + IXGBE_WRITE_REG(hw, IXGBE_VLVFB(i * 2), (cfg->pool_map[i].pools & UINT32_MAX)); else - IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i*2+1)), \ - ((cfg->pool_map[i].pools >> 32) \ - & UINT32_MAX)); + IXGBE_WRITE_REG(hw, IXGBE_VLVFB((i * 2 + 1)), + ((cfg->pool_map[i].pools >> 32) & UINT32_MAX)); } @@ -3840,7 +3930,7 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) /* Disable drop for all queues */ for (q = 0; q < IXGBE_MAX_RX_QUEUE_NUM; q++) IXGBE_WRITE_REG(hw, IXGBE_QDE, - (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); + (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); /* Enable the Tx desc arbiter */ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); @@ -3848,8 +3938,6 @@ ixgbe_vmdq_tx_hw_configure(struct ixgbe_hw *hw) IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); IXGBE_WRITE_FLUSH(hw); - - return; } static int __attribute__((cold)) @@ -3857,12 +3945,13 @@ ixgbe_alloc_rx_queue_mbufs(struct ixgbe_rx_queue *rxq) { struct ixgbe_rx_entry *rxe = rxq->sw_ring; uint64_t dma_addr; - unsigned i; + unsigned int i; /* Initialize software ring entries */ for (i = 0; i < rxq->nb_rx_desc; i++) { volatile union ixgbe_adv_rx_desc *rxd; - struct rte_mbuf *mbuf = rte_rxmbuf_alloc(rxq->mb_pool); + struct rte_mbuf *mbuf = rte_mbuf_raw_alloc(rxq->mb_pool); + if (mbuf == NULL) { PMD_INIT_LOG(ERR, "RX mbuf alloc failed queue_id=%u", (unsigned) rxq->queue_id); @@ -4253,6 +4342,7 @@ ixgbe_set_rx_function(struct rte_eth_dev *dev) for (i = 0; i < dev->data->nb_rx_queues; i++) { struct ixgbe_rx_queue *rxq = dev->data->rx_queues[i]; + rxq->rx_using_sse = rx_using_sse; } } @@ -4305,6 +4395,7 @@ ixgbe_set_rsc(struct rte_eth_dev *dev) /* RFCTL configuration */ if (rsc_capable) { uint32_t rfctl = IXGBE_READ_REG(hw, IXGBE_RFCTL); + if (rx_conf->enable_lro) /* * Since NFS packets coalescing is not supported - clear @@ -4498,6 +4589,7 @@ ixgbe_dev_rx_init(struct rte_eth_dev *dev) if (hw->mac.type == ixgbe_mac_82599EB) { /* Must setup the PSRTYPE register */ uint32_t psrtype; + psrtype = IXGBE_PSRTYPE_TCPHDR | IXGBE_PSRTYPE_UDPHDR | IXGBE_PSRTYPE_IPV4HDR | @@ -4597,7 +4689,8 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); /* Enable TX CRC (checksum offload requirement) and hw padding - * (TSO requirement) */ + * (TSO requirement) + */ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); hlreg0 |= (IXGBE_HLREG0_TXCRCEN | IXGBE_HLREG0_TXPADEN); IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); @@ -4622,26 +4715,26 @@ ixgbe_dev_tx_init(struct rte_eth_dev *dev) * bookkeeping if things aren't delivered in order. */ switch (hw->mac.type) { - case ixgbe_mac_82598EB: - txctrl = IXGBE_READ_REG(hw, - IXGBE_DCA_TXCTRL(txq->reg_idx)); - txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx), - txctrl); - break; + case ixgbe_mac_82598EB: + txctrl = IXGBE_READ_REG(hw, + IXGBE_DCA_TXCTRL(txq->reg_idx)); + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(txq->reg_idx), + txctrl); + break; - case ixgbe_mac_82599EB: - case ixgbe_mac_X540: - case ixgbe_mac_X550: - case ixgbe_mac_X550EM_x: - case ixgbe_mac_X550EM_a: - default: - txctrl = IXGBE_READ_REG(hw, + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: + case ixgbe_mac_X550: + case ixgbe_mac_X550EM_x: + case ixgbe_mac_X550EM_a: + default: + txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx)); - txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; - IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx), - txctrl); - break; + txctrl &= ~IXGBE_DCA_TXCTRL_DESC_WRO_EN; + IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(txq->reg_idx), + txctrl); + break; } } @@ -4813,12 +4906,12 @@ ixgbe_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id) rxdctl &= ~IXGBE_RXDCTL_ENABLE; IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rxq->reg_idx), rxdctl); - /* Wait until RX Enable ready */ + /* Wait until RX Enable bit clear */ poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; do { rte_delay_ms(1); rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rxq->reg_idx)); - } while (--poll_ms && (rxdctl | IXGBE_RXDCTL_ENABLE)); + } while (--poll_ms && (rxdctl & IXGBE_RXDCTL_ENABLE)); if (!poll_ms) PMD_INIT_LOG(ERR, "Could not disable Rx Queue %d", rx_queue_id); @@ -4892,48 +4985,48 @@ ixgbe_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id) PMD_INIT_FUNC_TRACE(); hw = IXGBE_DEV_PRIVATE_TO_HW(dev->data->dev_private); - if (tx_queue_id < dev->data->nb_tx_queues) { - txq = dev->data->tx_queues[tx_queue_id]; + if (tx_queue_id >= dev->data->nb_tx_queues) + return -1; - /* Wait until TX queue is empty */ - if (hw->mac.type == ixgbe_mac_82599EB) { - poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; - do { - rte_delay_us(RTE_IXGBE_WAIT_100_US); - txtdh = IXGBE_READ_REG(hw, - IXGBE_TDH(txq->reg_idx)); - txtdt = IXGBE_READ_REG(hw, - IXGBE_TDT(txq->reg_idx)); - } while (--poll_ms && (txtdh != txtdt)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Tx Queue %d is not empty " - "when stopping.", tx_queue_id); - } + txq = dev->data->tx_queues[tx_queue_id]; - txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); - txdctl &= ~IXGBE_TXDCTL_ENABLE; - IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + /* Wait until TX queue is empty */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_us(RTE_IXGBE_WAIT_100_US); + txtdh = IXGBE_READ_REG(hw, + IXGBE_TDH(txq->reg_idx)); + txtdt = IXGBE_READ_REG(hw, + IXGBE_TDT(txq->reg_idx)); + } while (--poll_ms && (txtdh != txtdt)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Tx Queue %d is not empty " + "when stopping.", tx_queue_id); + } - /* Wait until TX Enable ready */ - if (hw->mac.type == ixgbe_mac_82599EB) { - poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; - do { - rte_delay_ms(1); - txdctl = IXGBE_READ_REG(hw, + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); + txdctl &= ~IXGBE_TXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(txq->reg_idx), txdctl); + + /* Wait until TX Enable bit clear */ + if (hw->mac.type == ixgbe_mac_82599EB) { + poll_ms = RTE_IXGBE_REGISTER_POLL_WAIT_10_MS; + do { + rte_delay_ms(1); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(txq->reg_idx)); - } while (--poll_ms && (txdctl | IXGBE_TXDCTL_ENABLE)); - if (!poll_ms) - PMD_INIT_LOG(ERR, "Could not disable " - "Tx Queue %d", tx_queue_id); - } + } while (--poll_ms && (txdctl & IXGBE_TXDCTL_ENABLE)); + if (!poll_ms) + PMD_INIT_LOG(ERR, "Could not disable " + "Tx Queue %d", tx_queue_id); + } - if (txq->ops != NULL) { - txq->ops->release_mbufs(txq); - txq->ops->reset(txq); - } - dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; - } else - return -1; + if (txq->ops != NULL) { + txq->ops->release_mbufs(txq); + txq->ops->reset(txq); + } + dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED; return 0; } diff --git a/drivers/net/ixgbe/ixgbe_rxtx.h b/drivers/net/ixgbe/ixgbe_rxtx.h index 3691a19d..2608b364 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx.h +++ b/drivers/net/ixgbe/ixgbe_rxtx.h @@ -146,6 +146,8 @@ struct ixgbe_rx_queue { uint8_t crc_len; /**< 0 if CRC stripped, 4 otherwise. */ uint8_t drop_en; /**< If not 0, set SRRCTL.Drop_En. */ uint8_t rx_deferred_start; /**< not in global dev start. */ + /** flags to set in mbuf when a vlan is detected. */ + uint64_t vlan_flags; /** need to alloc dummy mbuf, for wraparound when scanning hw ring */ struct rte_mbuf fake_mbuf; /** hold packets to return to application */ diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h new file mode 100644 index 00000000..62b82013 --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_common.h @@ -0,0 +1,326 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _IXGBE_RXTX_VEC_COMMON_H_ +#define _IXGBE_RXTX_VEC_COMMON_H_ +#include <stdint.h> +#include <rte_ethdev.h> + +#include "ixgbe_ethdev.h" +#include "ixgbe_rxtx.h" + +static inline uint16_t +reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs, + uint16_t nb_bufs, uint8_t *split_flags) +{ + struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/ + struct rte_mbuf *start = rxq->pkt_first_seg; + struct rte_mbuf *end = rxq->pkt_last_seg; + unsigned int pkt_idx, buf_idx; + + for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { + if (end != NULL) { + /* processing a split packet */ + end->next = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + + start->nb_segs++; + start->pkt_len += rx_bufs[buf_idx]->data_len; + end = end->next; + + if (!split_flags[buf_idx]) { + /* it's the last packet of the set */ + start->hash = end->hash; + start->ol_flags = end->ol_flags; + /* we need to strip crc for the whole packet */ + start->pkt_len -= rxq->crc_len; + if (end->data_len > rxq->crc_len) + end->data_len -= rxq->crc_len; + else { + /* free up last mbuf */ + struct rte_mbuf *secondlast = start; + + start->nb_segs--; + while (secondlast->next != end) + secondlast = secondlast->next; + secondlast->data_len -= (rxq->crc_len - + end->data_len); + secondlast->next = NULL; + rte_pktmbuf_free_seg(end); + } + pkts[pkt_idx++] = start; + start = end = NULL; + } + } else { + /* not processing a split packet */ + if (!split_flags[buf_idx]) { + /* not a split packet, save and skip */ + pkts[pkt_idx++] = rx_bufs[buf_idx]; + continue; + } + end = start = rx_bufs[buf_idx]; + rx_bufs[buf_idx]->data_len += rxq->crc_len; + rx_bufs[buf_idx]->pkt_len += rxq->crc_len; + } + } + + /* save the partial packet for next time */ + rxq->pkt_first_seg = start; + rxq->pkt_last_seg = end; + memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); + return pkt_idx; +} + +static inline int __attribute__((always_inline)) +ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) +{ + struct ixgbe_tx_entry_v *txep; + uint32_t status; + uint32_t n; + uint32_t i; + int nb_free = 0; + struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ]; + + /* check DD bit on threshold descriptor */ + status = txq->tx_ring[txq->tx_next_dd].wb.status; + if (!(status & IXGBE_ADVTXD_STAT_DD)) + return 0; + + n = txq->tx_rs_thresh; + + /* + * first buffer to free from S/W ring is at index + * tx_next_dd - (tx_rs_thresh-1) + */ + txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)]; + m = __rte_pktmbuf_prefree_seg(txep[0].mbuf); + if (likely(m != NULL)) { + free[0] = m; + nb_free = 1; + for (i = 1; i < n; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (likely(m != NULL)) { + if (likely(m->pool == free[0]->pool)) + free[nb_free++] = m; + else { + rte_mempool_put_bulk(free[0]->pool, + (void *)free, nb_free); + free[0] = m; + nb_free = 1; + } + } + } + rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); + } else { + for (i = 1; i < n; i++) { + m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); + if (m != NULL) + rte_mempool_put(m->pool, m); + } + } + + /* buffers were freed, update counters */ + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); + txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); + if (txq->tx_next_dd >= txq->nb_tx_desc) + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + + return txq->tx_rs_thresh; +} + +static inline void __attribute__((always_inline)) +tx_backlog_entry(struct ixgbe_tx_entry_v *txep, + struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + + for (i = 0; i < (int)nb_pkts; ++i) + txep[i].mbuf = tx_pkts[i]; +} + +static inline void +_ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq) +{ + unsigned int i; + struct ixgbe_tx_entry_v *txe; + const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); + + if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc) + return; + + /* release the used mbufs in sw_ring */ + for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1); + i != txq->tx_tail; + i = (i + 1) & max_desc) { + txe = &txq->sw_ring_v[i]; + rte_pktmbuf_free_seg(txe->mbuf); + } + txq->nb_tx_free = max_desc; + + /* reset tx_entry */ + for (i = 0; i < txq->nb_tx_desc; i++) { + txe = &txq->sw_ring_v[i]; + txe->mbuf = NULL; + } +} + +static inline void +_ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq) +{ + const unsigned int mask = rxq->nb_rx_desc - 1; + unsigned int i; + + if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) + return; + + /* free all mbufs that are valid in the ring */ + for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) + rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); + rxq->rxrearm_nb = rxq->nb_rx_desc; + + /* set all entries to NULL */ + memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); +} + +static inline void +_ixgbe_tx_free_swring_vec(struct ixgbe_tx_queue *txq) +{ + if (txq == NULL) + return; + + if (txq->sw_ring != NULL) { + rte_free(txq->sw_ring_v - 1); + txq->sw_ring_v = NULL; + } +} + +static inline void +_ixgbe_reset_tx_queue_vec(struct ixgbe_tx_queue *txq) +{ + static const union ixgbe_adv_tx_desc zeroed_desc = { { 0 } }; + struct ixgbe_tx_entry_v *txe = txq->sw_ring_v; + uint16_t i; + + /* Zero out HW ring memory */ + for (i = 0; i < txq->nb_tx_desc; i++) + txq->tx_ring[i] = zeroed_desc; + + /* Initialize SW ring entries */ + for (i = 0; i < txq->nb_tx_desc; i++) { + volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; + + txd->wb.status = IXGBE_TXD_STAT_DD; + txe[i].mbuf = NULL; + } + + txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + txq->tx_tail = 0; + txq->nb_tx_used = 0; + /* + * Always allow 1 descriptor to be un-allocated to avoid + * a H/W race condition + */ + txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); + txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); + txq->ctx_curr = 0; + memset((void *)&txq->ctx_cache, 0, + IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); +} + +static inline int +ixgbe_rxq_vec_setup_default(struct ixgbe_rx_queue *rxq) +{ + uintptr_t p; + struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ + + mb_def.nb_segs = 1; + mb_def.data_off = RTE_PKTMBUF_HEADROOM; + mb_def.port = rxq->port_id; + rte_mbuf_refcnt_set(&mb_def, 1); + + /* prevent compiler reordering: rearm_data covers previous fields */ + rte_compiler_barrier(); + p = (uintptr_t)&mb_def.rearm_data; + rxq->mbuf_initializer = *(uint64_t *)p; + return 0; +} + +static inline int +ixgbe_txq_vec_setup_default(struct ixgbe_tx_queue *txq, + const struct ixgbe_txq_ops *txq_ops) +{ + if (txq->sw_ring_v == NULL) + return -1; + + /* leave the first one for overflow */ + txq->sw_ring_v = txq->sw_ring_v + 1; + txq->ops = txq_ops; + + return 0; +} + +static inline int +ixgbe_rx_vec_dev_conf_condition_check_default(struct rte_eth_dev *dev) +{ +#ifndef RTE_LIBRTE_IEEE1588 + struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; + struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; + +#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE + /* whithout rx ol_flags, no VP flag report */ + if (rxmode->hw_vlan_strip != 0 || + rxmode->hw_vlan_extend != 0) + return -1; +#endif + + /* no fdir support */ + if (fconf->mode != RTE_FDIR_MODE_NONE) + return -1; + + /* + * - no csum error report support + * - no header split support + */ + if (rxmode->hw_ip_checksum == 1 || + rxmode->header_split == 1) + return -1; + + return 0; +#else + RTE_SET_USED(dev); + return -1; +#endif +} +#endif diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c new file mode 100644 index 00000000..64a329ea --- /dev/null +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_neon.c @@ -0,0 +1,560 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <rte_ethdev.h> +#include <rte_malloc.h> + +#include "ixgbe_ethdev.h" +#include "ixgbe_rxtx.h" +#include "ixgbe_rxtx_vec_common.h" + +#include <arm_neon.h> + +#pragma GCC diagnostic ignored "-Wcast-qual" + +static inline void +ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) +{ + int i; + uint16_t rx_id; + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *rxep = &rxq->sw_ring[rxq->rxrearm_start]; + struct rte_mbuf *mb0, *mb1; + uint64x2_t dma_addr0, dma_addr1; + uint64x2_t zero = vdupq_n_u64(0); + uint64_t paddr; + uint8x8_t p; + + rxdp = rxq->rx_ring + rxq->rxrearm_start; + + /* Pull 'n' more MBUFs into the software ring */ + if (unlikely(rte_mempool_get_bulk(rxq->mb_pool, + (void *)rxep, + RTE_IXGBE_RXQ_REARM_THRESH) < 0)) { + if (rxq->rxrearm_nb + RTE_IXGBE_RXQ_REARM_THRESH >= + rxq->nb_rx_desc) { + for (i = 0; i < RTE_IXGBE_DESCS_PER_LOOP; i++) { + rxep[i].mbuf = &rxq->fake_mbuf; + vst1q_u64((uint64_t *)&rxdp[i].read, + zero); + } + } + rte_eth_devices[rxq->port_id].data->rx_mbuf_alloc_failed += + RTE_IXGBE_RXQ_REARM_THRESH; + return; + } + + p = vld1_u8((uint8_t *)&rxq->mbuf_initializer); + + /* Initialize the mbufs in vector, process 2 mbufs in one loop */ + for (i = 0; i < RTE_IXGBE_RXQ_REARM_THRESH; i += 2, rxep += 2) { + mb0 = rxep[0].mbuf; + mb1 = rxep[1].mbuf; + + /* + * Flush mbuf with pkt template. + * Data to be rearmed is 6 bytes long. + * Though, RX will overwrite ol_flags that are coming next + * anyway. So overwrite whole 8 bytes with one load: + * 6 bytes of rearm_data plus first 2 bytes of ol_flags. + */ + vst1_u8((uint8_t *)&mb0->rearm_data, p); + paddr = mb0->buf_physaddr + RTE_PKTMBUF_HEADROOM; + dma_addr0 = vsetq_lane_u64(paddr, zero, 0); + /* flush desc with pa dma_addr */ + vst1q_u64((uint64_t *)&rxdp++->read, dma_addr0); + + vst1_u8((uint8_t *)&mb1->rearm_data, p); + paddr = mb1->buf_physaddr + RTE_PKTMBUF_HEADROOM; + dma_addr1 = vsetq_lane_u64(paddr, zero, 0); + vst1q_u64((uint64_t *)&rxdp++->read, dma_addr1); + } + + rxq->rxrearm_start += RTE_IXGBE_RXQ_REARM_THRESH; + if (rxq->rxrearm_start >= rxq->nb_rx_desc) + rxq->rxrearm_start = 0; + + rxq->rxrearm_nb -= RTE_IXGBE_RXQ_REARM_THRESH; + + rx_id = (uint16_t)((rxq->rxrearm_start == 0) ? + (rxq->nb_rx_desc - 1) : (rxq->rxrearm_start - 1)); + + /* Update the tail pointer on the NIC */ + IXGBE_PCI_REG_WRITE(rxq->rdt_reg_addr, rx_id); +} + +/* Handling the offload flags (olflags) field takes computation + * time when receiving packets. Therefore we provide a flag to disable + * the processing of the olflags field when they are not needed. This + * gives improved performance, at the cost of losing the offload info + * in the received packet + */ +#ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE + +#define VTAG_SHIFT (3) + +static inline void +desc_to_olflags_v(uint8x16x2_t sterr_tmp1, uint8x16x2_t sterr_tmp2, + uint8x16_t staterr, struct rte_mbuf **rx_pkts) +{ + uint8x16_t ptype; + uint8x16_t vtag; + + union { + uint8_t e[4]; + uint32_t word; + } vol; + + const uint8x16_t pkttype_msk = { + PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, + PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}; + + const uint8x16_t rsstype_msk = { + 0x0F, 0x0F, 0x0F, 0x0F, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00}; + + const uint8x16_t rss_flags = { + 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, + 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, + PKT_RX_RSS_HASH, 0, 0, 0, + 0, 0, 0, PKT_RX_FDIR}; + + ptype = vzipq_u8(sterr_tmp1.val[0], sterr_tmp2.val[0]).val[0]; + ptype = vandq_u8(ptype, rsstype_msk); + ptype = vqtbl1q_u8(rss_flags, ptype); + + vtag = vshrq_n_u8(staterr, VTAG_SHIFT); + vtag = vandq_u8(vtag, pkttype_msk); + vtag = vorrq_u8(ptype, vtag); + + vol.word = vgetq_lane_u32(vreinterpretq_u32_u8(vtag), 0); + + rx_pkts[0]->ol_flags = vol.e[0]; + rx_pkts[1]->ol_flags = vol.e[1]; + rx_pkts[2]->ol_flags = vol.e[2]; + rx_pkts[3]->ol_flags = vol.e[3]; +} +#else +#define desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, rx_pkts) +#endif + +/* + * vPMD raw receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + * - don't support ol_flags for rss and csum err + */ + +#define IXGBE_VPMD_DESC_DD_MASK 0x01010101 +#define IXGBE_VPMD_DESC_EOP_MASK 0x02020202 + +static inline uint16_t +_recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts, uint8_t *split_packet) +{ + volatile union ixgbe_adv_rx_desc *rxdp; + struct ixgbe_rx_entry *sw_ring; + uint16_t nb_pkts_recd; + int pos; + uint64_t var; + uint8x16_t shuf_msk = { + 0xFF, 0xFF, + 0xFF, 0xFF, /* skip 32 bits pkt_type */ + 12, 13, /* octet 12~13, low 16 bits pkt_len */ + 0xFF, 0xFF, /* skip high 16 bits pkt_len, zero out */ + 12, 13, /* octet 12~13, 16 bits data_len */ + 14, 15, /* octet 14~15, low 16 bits vlan_macip */ + 4, 5, 6, 7 /* octet 4~7, 32bits rss */ + }; + uint16x8_t crc_adjust = {0, 0, rxq->crc_len, 0, + rxq->crc_len, 0, 0, 0}; + + /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ + nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); + + /* nb_pkts has to be floor-aligned to RTE_IXGBE_DESCS_PER_LOOP */ + nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP); + + /* Just the act of getting into the function from the application is + * going to cost about 7 cycles + */ + rxdp = rxq->rx_ring + rxq->rx_tail; + + rte_prefetch_non_temporal(rxdp); + + /* See if we need to rearm the RX queue - gives the prefetch a bit + * of time to act + */ + if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH) + ixgbe_rxq_rearm(rxq); + + /* Before we start moving massive data around, check to see if + * there is actually a packet available + */ + if (!(rxdp->wb.upper.status_error & + rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) + return 0; + + /* Cache is empty -> need to scan the buffer rings, but first move + * the next 'n' mbufs into the cache + */ + sw_ring = &rxq->sw_ring[rxq->rx_tail]; + + /* A. load 4 packet in one loop + * B. copy 4 mbuf point from swring to rx_pkts + * C. calc the number of DD bits among the 4 packets + * [C*. extract the end-of-packet bit, if requested] + * D. fill info. from desc to mbuf + */ + for (pos = 0, nb_pkts_recd = 0; pos < nb_pkts; + pos += RTE_IXGBE_DESCS_PER_LOOP, + rxdp += RTE_IXGBE_DESCS_PER_LOOP) { + uint64x2_t descs[RTE_IXGBE_DESCS_PER_LOOP]; + uint8x16_t pkt_mb1, pkt_mb2, pkt_mb3, pkt_mb4; + uint8x16x2_t sterr_tmp1, sterr_tmp2; + uint64x2_t mbp1, mbp2; + uint8x16_t staterr; + uint16x8_t tmp; + uint32_t stat; + + /* B.1 load 1 mbuf point */ + mbp1 = vld1q_u64((uint64_t *)&sw_ring[pos]); + + /* Read desc statuses backwards to avoid race condition */ + /* A.1 load 4 pkts desc */ + descs[3] = vld1q_u64((uint64_t *)(rxdp + 3)); + rte_rmb(); + + /* B.2 copy 2 mbuf point into rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[pos], mbp1); + + /* B.1 load 1 mbuf point */ + mbp2 = vld1q_u64((uint64_t *)&sw_ring[pos + 2]); + + descs[2] = vld1q_u64((uint64_t *)(rxdp + 2)); + /* B.1 load 2 mbuf point */ + descs[1] = vld1q_u64((uint64_t *)(rxdp + 1)); + descs[0] = vld1q_u64((uint64_t *)(rxdp)); + + /* B.2 copy 2 mbuf point into rx_pkts */ + vst1q_u64((uint64_t *)&rx_pkts[pos + 2], mbp2); + + if (split_packet) { + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); + } + + /* D.1 pkt 3,4 convert format from desc to pktmbuf */ + pkt_mb4 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[3]), shuf_msk); + pkt_mb3 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[2]), shuf_msk); + + /* D.1 pkt 1,2 convert format from desc to pktmbuf */ + pkt_mb2 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[1]), shuf_msk); + pkt_mb1 = vqtbl1q_u8(vreinterpretq_u8_u64(descs[0]), shuf_msk); + + /* C.1 4=>2 filter staterr info only */ + sterr_tmp2 = vzipq_u8(vreinterpretq_u8_u64(descs[1]), + vreinterpretq_u8_u64(descs[3])); + /* C.1 4=>2 filter staterr info only */ + sterr_tmp1 = vzipq_u8(vreinterpretq_u8_u64(descs[0]), + vreinterpretq_u8_u64(descs[2])); + + /* C.2 get 4 pkts staterr value */ + staterr = vzipq_u8(sterr_tmp1.val[1], sterr_tmp2.val[1]).val[0]; + stat = vgetq_lane_u32(vreinterpretq_u32_u8(staterr), 0); + + /* set ol_flags with vlan packet type */ + desc_to_olflags_v(sterr_tmp1, sterr_tmp2, staterr, + &rx_pkts[pos]); + + /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb4), crc_adjust); + pkt_mb4 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb3), crc_adjust); + pkt_mb3 = vreinterpretq_u8_u16(tmp); + + /* D.3 copy final 3,4 data to rx_pkts */ + vst1q_u8((void *)&rx_pkts[pos + 3]->rx_descriptor_fields1, + pkt_mb4); + vst1q_u8((void *)&rx_pkts[pos + 2]->rx_descriptor_fields1, + pkt_mb3); + + /* D.2 pkt 1,2 set in_port/nb_seg and remove crc */ + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb2), crc_adjust); + pkt_mb2 = vreinterpretq_u8_u16(tmp); + tmp = vsubq_u16(vreinterpretq_u16_u8(pkt_mb1), crc_adjust); + pkt_mb1 = vreinterpretq_u8_u16(tmp); + + /* C* extract and record EOP bit */ + if (split_packet) { + /* and with mask to extract bits, flipping 1-0 */ + *(int *)split_packet = ~stat & IXGBE_VPMD_DESC_EOP_MASK; + + split_packet += RTE_IXGBE_DESCS_PER_LOOP; + + /* zero-out next pointers */ + rx_pkts[pos]->next = NULL; + rx_pkts[pos + 1]->next = NULL; + rx_pkts[pos + 2]->next = NULL; + rx_pkts[pos + 3]->next = NULL; + } + + rte_prefetch_non_temporal(rxdp + RTE_IXGBE_DESCS_PER_LOOP); + + /* D.3 copy final 1,2 data to rx_pkts */ + vst1q_u8((uint8_t *)&rx_pkts[pos + 1]->rx_descriptor_fields1, + pkt_mb2); + vst1q_u8((uint8_t *)&rx_pkts[pos]->rx_descriptor_fields1, + pkt_mb1); + + /* C.4 calc avaialbe number of desc */ + var = __builtin_popcount(stat & IXGBE_VPMD_DESC_DD_MASK); + nb_pkts_recd += var; + if (likely(var != RTE_IXGBE_DESCS_PER_LOOP)) + break; + } + + /* Update our internal tail pointer */ + rxq->rx_tail = (uint16_t)(rxq->rx_tail + nb_pkts_recd); + rxq->rx_tail = (uint16_t)(rxq->rx_tail & (rxq->nb_rx_desc - 1)); + rxq->rxrearm_nb = (uint16_t)(rxq->rxrearm_nb + nb_pkts_recd); + + return nb_pkts_recd; +} + +/* + * vPMD receive routine, only accept(nb_pkts >= RTE_IXGBE_DESCS_PER_LOOP) + * + * Notice: + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + * - don't support ol_flags for rss and csum err + */ +uint16_t +ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); +} + +/* + * vPMD receive routine that reassembles scattered packets + * + * Notice: + * - don't support ol_flags for rss and csum err + * - nb_pkts < RTE_IXGBE_DESCS_PER_LOOP, just return no packet + * - nb_pkts > RTE_IXGBE_MAX_RX_BURST, only scan RTE_IXGBE_MAX_RX_BURST + * numbers of DD bit + * - floor align nb_pkts to a RTE_IXGBE_DESC_PER_LOOP power-of-two + */ +uint16_t +ixgbe_recv_scattered_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_rx_queue *rxq = rx_queue; + uint8_t split_flags[RTE_IXGBE_MAX_RX_BURST] = {0}; + + /* get some new buffers */ + uint16_t nb_bufs = _recv_raw_pkts_vec(rxq, rx_pkts, nb_pkts, + split_flags); + if (nb_bufs == 0) + return 0; + + /* happy day case, full burst + no packets to be joined */ + const uint64_t *split_fl64 = (uint64_t *)split_flags; + if (rxq->pkt_first_seg == NULL && + split_fl64[0] == 0 && split_fl64[1] == 0 && + split_fl64[2] == 0 && split_fl64[3] == 0) + return nb_bufs; + + /* reassemble any packets that need reassembly*/ + unsigned int i = 0; + if (rxq->pkt_first_seg == NULL) { + /* find the first split flag, and only reassemble then*/ + while (i < nb_bufs && !split_flags[i]) + i++; + if (i == nb_bufs) + return nb_bufs; + } + return i + reassemble_packets(rxq, &rx_pkts[i], nb_bufs - i, + &split_flags[i]); +} + +static inline void +vtx1(volatile union ixgbe_adv_tx_desc *txdp, + struct rte_mbuf *pkt, uint64_t flags) +{ + uint64x2_t descriptor = { + pkt->buf_physaddr + pkt->data_off, + (uint64_t)pkt->pkt_len << 46 | flags | pkt->data_len}; + + vst1q_u64((uint64_t *)&txdp->read, descriptor); +} + +static inline void +vtx(volatile union ixgbe_adv_tx_desc *txdp, + struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) +{ + int i; + + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) + vtx1(txdp, *pkt, flags); +} + +uint16_t +ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + struct ixgbe_tx_queue *txq = (struct ixgbe_tx_queue *)tx_queue; + volatile union ixgbe_adv_tx_desc *txdp; + struct ixgbe_tx_entry_v *txep; + uint16_t n, nb_commit, tx_id; + uint64_t flags = DCMD_DTYP_FLAGS; + uint64_t rs = IXGBE_ADVTXD_DCMD_RS | DCMD_DTYP_FLAGS; + int i; + + /* cross rx_thresh boundary is not allowed */ + nb_pkts = RTE_MIN(nb_pkts, txq->tx_rs_thresh); + + if (txq->nb_tx_free < txq->tx_free_thresh) + ixgbe_tx_free_bufs(txq); + + nb_commit = nb_pkts = (uint16_t)RTE_MIN(txq->nb_tx_free, nb_pkts); + if (unlikely(nb_pkts == 0)) + return 0; + + tx_id = txq->tx_tail; + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring_v[tx_id]; + + txq->nb_tx_free = (uint16_t)(txq->nb_tx_free - nb_pkts); + + n = (uint16_t)(txq->nb_tx_desc - tx_id); + if (nb_commit >= n) { + tx_backlog_entry(txep, tx_pkts, n); + + for (i = 0; i < n - 1; ++i, ++tx_pkts, ++txdp) + vtx1(txdp, *tx_pkts, flags); + + vtx1(txdp, *tx_pkts++, rs); + + nb_commit = (uint16_t)(nb_commit - n); + + tx_id = 0; + txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); + + /* avoid reach the end of ring */ + txdp = &txq->tx_ring[tx_id]; + txep = &txq->sw_ring_v[tx_id]; + } + + tx_backlog_entry(txep, tx_pkts, nb_commit); + + vtx(txdp, tx_pkts, nb_commit, flags); + + tx_id = (uint16_t)(tx_id + nb_commit); + if (tx_id > txq->tx_next_rs) { + txq->tx_ring[txq->tx_next_rs].read.cmd_type_len |= + rte_cpu_to_le_32(IXGBE_ADVTXD_DCMD_RS); + txq->tx_next_rs = (uint16_t)(txq->tx_next_rs + + txq->tx_rs_thresh); + } + + txq->tx_tail = tx_id; + + IXGBE_PCI_REG_WRITE(txq->tdt_reg_addr, txq->tx_tail); + + return nb_pkts; +} + +static void __attribute__((cold)) +ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq) +{ + _ixgbe_tx_queue_release_mbufs_vec(txq); +} + +void __attribute__((cold)) +ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq) +{ + _ixgbe_rx_queue_release_mbufs_vec(rxq); +} + +static void __attribute__((cold)) +ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) +{ + _ixgbe_tx_free_swring_vec(txq); +} + +static void __attribute__((cold)) +ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) +{ + _ixgbe_reset_tx_queue_vec(txq); +} + +static const struct ixgbe_txq_ops vec_txq_ops = { + .release_mbufs = ixgbe_tx_queue_release_mbufs_vec, + .free_swring = ixgbe_tx_free_swring, + .reset = ixgbe_reset_tx_queue, +}; + +int __attribute__((cold)) +ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq) +{ + return ixgbe_rxq_vec_setup_default(rxq); +} + +int __attribute__((cold)) +ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq) +{ + return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops); +} + +int __attribute__((cold)) +ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) +{ + return ixgbe_rx_vec_dev_conf_condition_check_default(dev); +} diff --git a/drivers/net/ixgbe/ixgbe_rxtx_vec.c b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c index 50407043..4f95debd 100644 --- a/drivers/net/ixgbe/ixgbe_rxtx_vec.c +++ b/drivers/net/ixgbe/ixgbe_rxtx_vec_sse.c @@ -37,6 +37,7 @@ #include "ixgbe_ethdev.h" #include "ixgbe_rxtx.h" +#include "ixgbe_rxtx_vec_common.h" #include <tmmintrin.h> @@ -140,10 +141,9 @@ ixgbe_rxq_rearm(struct ixgbe_rx_queue *rxq) */ #ifdef RTE_IXGBE_RX_OLFLAGS_ENABLE -#define VTAG_SHIFT (3) - static inline void -desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) +desc_to_olflags_v(__m128i descs[4], uint8_t vlan_flags, + struct rte_mbuf **rx_pkts) { __m128i ptype0, ptype1, vtag0, vtag1; union { @@ -151,11 +151,6 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) uint64_t dword; } vol; - /* pkt type + vlan olflags mask */ - const __m128i pkttype_msk = _mm_set_epi16( - 0x0000, 0x0000, 0x0000, 0x0000, - PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT, PKT_RX_VLAN_PKT); - /* mask everything except rss type */ const __m128i rsstype_msk = _mm_set_epi16( 0x0000, 0x0000, 0x0000, 0x0000, @@ -167,6 +162,19 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, 0, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, PKT_RX_RSS_HASH, 0); + /* mask everything except vlan present bit */ + const __m128i vlan_msk = _mm_set_epi16( + 0x0000, 0x0000, + 0x0000, 0x0000, + IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP, + IXGBE_RXD_STAT_VP, IXGBE_RXD_STAT_VP); + /* map vlan present (0x8) to ol_flags */ + const __m128i vlan_map = _mm_set_epi8( + 0, 0, 0, 0, + 0, 0, 0, vlan_flags, + 0, 0, 0, 0, + 0, 0, 0, 0); + ptype0 = _mm_unpacklo_epi16(descs[0], descs[1]); ptype1 = _mm_unpacklo_epi16(descs[2], descs[3]); vtag0 = _mm_unpackhi_epi16(descs[0], descs[1]); @@ -177,8 +185,8 @@ desc_to_olflags_v(__m128i descs[4], struct rte_mbuf **rx_pkts) ptype0 = _mm_shuffle_epi8(rss_flags, ptype0); vtag1 = _mm_unpacklo_epi32(vtag0, vtag1); - vtag1 = _mm_srli_epi16(vtag1, VTAG_SHIFT); - vtag1 = _mm_and_si128(vtag1, pkttype_msk); + vtag1 = _mm_and_si128(vtag1, vlan_msk); + vtag1 = _mm_shuffle_epi8(vlan_map, vtag1); vtag1 = _mm_or_si128(ptype0, vtag1); vol.dword = _mm_cvtsi128_si64(vtag1); @@ -220,6 +228,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, 0, 0 /* ignore pkt_type field */ ); __m128i dd_check, eop_check; + uint8_t vlan_flags; /* nb_pkts shall be less equal than RTE_IXGBE_MAX_RX_BURST */ nb_pkts = RTE_MIN(nb_pkts, RTE_IXGBE_MAX_RX_BURST); @@ -228,18 +237,21 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_IXGBE_DESCS_PER_LOOP); /* Just the act of getting into the function from the application is - * going to cost about 7 cycles */ + * going to cost about 7 cycles + */ rxdp = rxq->rx_ring + rxq->rx_tail; _mm_prefetch((const void *)rxdp, _MM_HINT_T0); /* See if we need to rearm the RX queue - gives the prefetch a bit - * of time to act */ + * of time to act + */ if (rxq->rxrearm_nb > RTE_IXGBE_RXQ_REARM_THRESH) ixgbe_rxq_rearm(rxq); /* Before we start moving massive data around, check to see if - * there is actually a packet available */ + * there is actually a packet available + */ if (!(rxdp->wb.upper.status_error & rte_cpu_to_le_32(IXGBE_RXDADV_STAT_DD))) return 0; @@ -262,9 +274,14 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, ); /* Cache is empty -> need to scan the buffer rings, but first move - * the next 'n' mbufs into the cache */ + * the next 'n' mbufs into the cache + */ sw_ring = &rxq->sw_ring[rxq->rx_tail]; + /* ensure these 2 flags are in the lower 8 bits */ + RTE_BUILD_BUG_ON((PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED) > UINT8_MAX); + vlan_flags = rxq->vlan_flags & UINT8_MAX; + /* A. load 4 packet in one loop * [A*. mask out 4 unused dirty field in desc] * B. copy 4 mbuf point from swring to rx_pkts @@ -302,10 +319,10 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, _mm_storeu_si128((__m128i *)&rx_pkts[pos+2], mbp2); if (split_packet) { - rte_prefetch0(&rx_pkts[pos]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 1]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 2]->cacheline1); - rte_prefetch0(&rx_pkts[pos + 3]->cacheline1); + rte_mbuf_prefetch_part2(rx_pkts[pos]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 1]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 2]); + rte_mbuf_prefetch_part2(rx_pkts[pos + 3]); } /* avoid compiler reorder optimization */ @@ -325,7 +342,7 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, sterr_tmp1 = _mm_unpackhi_epi32(descs[1], descs[0]); /* set ol_flags with vlan packet type */ - desc_to_olflags_v(descs, &rx_pkts[pos]); + desc_to_olflags_v(descs, vlan_flags, &rx_pkts[pos]); /* D.2 pkt 3,4 set in_port/nb_seg and remove crc */ pkt_mb4 = _mm_add_epi16(pkt_mb4, crc_adjust); @@ -359,7 +376,8 @@ _recv_raw_pkts_vec(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_pkts, /* the staterr values are not in order, as the count * count of dd bits doesn't care. However, for end of * packet tracking, we do care, so shuffle. This also - * compresses the 32-bit values to 8-bit */ + * compresses the 32-bit values to 8-bit + */ eop_bits = _mm_shuffle_epi8(eop_bits, eop_shuf_mask); /* store the resulting 32-bit value */ *(int *)split_packet = _mm_cvtsi128_si32(eop_bits); @@ -414,69 +432,6 @@ ixgbe_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, return _recv_raw_pkts_vec(rx_queue, rx_pkts, nb_pkts, NULL); } -static inline uint16_t -reassemble_packets(struct ixgbe_rx_queue *rxq, struct rte_mbuf **rx_bufs, - uint16_t nb_bufs, uint8_t *split_flags) -{ - struct rte_mbuf *pkts[nb_bufs]; /*finished pkts*/ - struct rte_mbuf *start = rxq->pkt_first_seg; - struct rte_mbuf *end = rxq->pkt_last_seg; - unsigned pkt_idx, buf_idx; - - for (buf_idx = 0, pkt_idx = 0; buf_idx < nb_bufs; buf_idx++) { - if (end != NULL) { - /* processing a split packet */ - end->next = rx_bufs[buf_idx]; - rx_bufs[buf_idx]->data_len += rxq->crc_len; - - start->nb_segs++; - start->pkt_len += rx_bufs[buf_idx]->data_len; - end = end->next; - - if (!split_flags[buf_idx]) { - /* it's the last packet of the set */ - start->hash = end->hash; - start->ol_flags = end->ol_flags; - /* we need to strip crc for the whole packet */ - start->pkt_len -= rxq->crc_len; - if (end->data_len > rxq->crc_len) - end->data_len -= rxq->crc_len; - else { - /* free up last mbuf */ - struct rte_mbuf *secondlast = start; - - start->nb_segs--; - while (secondlast->next != end) - secondlast = secondlast->next; - secondlast->data_len -= (rxq->crc_len - - end->data_len); - secondlast->next = NULL; - rte_pktmbuf_free_seg(end); - end = secondlast; - } - pkts[pkt_idx++] = start; - start = end = NULL; - } - } else { - /* not processing a split packet */ - if (!split_flags[buf_idx]) { - /* not a split packet, save and skip */ - pkts[pkt_idx++] = rx_bufs[buf_idx]; - continue; - } - end = start = rx_bufs[buf_idx]; - rx_bufs[buf_idx]->data_len += rxq->crc_len; - rx_bufs[buf_idx]->pkt_len += rxq->crc_len; - } - } - - /* save the partial packet for next time */ - rxq->pkt_first_seg = start; - rxq->pkt_last_seg = end; - memcpy(rx_bufs, pkts, pkt_idx * (sizeof(*pkts))); - return pkt_idx; -} - /* * vPMD receive routine that reassembles scattered packets * @@ -535,76 +490,11 @@ vtx(volatile union ixgbe_adv_tx_desc *txdp, struct rte_mbuf **pkt, uint16_t nb_pkts, uint64_t flags) { int i; + for (i = 0; i < nb_pkts; ++i, ++txdp, ++pkt) vtx1(txdp, *pkt, flags); } -static inline int __attribute__((always_inline)) -ixgbe_tx_free_bufs(struct ixgbe_tx_queue *txq) -{ - struct ixgbe_tx_entry_v *txep; - uint32_t status; - uint32_t n; - uint32_t i; - int nb_free = 0; - struct rte_mbuf *m, *free[RTE_IXGBE_TX_MAX_FREE_BUF_SZ]; - - /* check DD bit on threshold descriptor */ - status = txq->tx_ring[txq->tx_next_dd].wb.status; - if (!(status & IXGBE_ADVTXD_STAT_DD)) - return 0; - - n = txq->tx_rs_thresh; - - /* - * first buffer to free from S/W ring is at index - * tx_next_dd - (tx_rs_thresh-1) - */ - txep = &txq->sw_ring_v[txq->tx_next_dd - (n - 1)]; - m = __rte_pktmbuf_prefree_seg(txep[0].mbuf); - if (likely(m != NULL)) { - free[0] = m; - nb_free = 1; - for (i = 1; i < n; i++) { - m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); - if (likely(m != NULL)) { - if (likely(m->pool == free[0]->pool)) - free[nb_free++] = m; - else { - rte_mempool_put_bulk(free[0]->pool, - (void *)free, nb_free); - free[0] = m; - nb_free = 1; - } - } - } - rte_mempool_put_bulk(free[0]->pool, (void **)free, nb_free); - } else { - for (i = 1; i < n; i++) { - m = __rte_pktmbuf_prefree_seg(txep[i].mbuf); - if (m != NULL) - rte_mempool_put(m->pool, m); - } - } - - /* buffers were freed, update counters */ - txq->nb_tx_free = (uint16_t)(txq->nb_tx_free + txq->tx_rs_thresh); - txq->tx_next_dd = (uint16_t)(txq->tx_next_dd + txq->tx_rs_thresh); - if (txq->tx_next_dd >= txq->nb_tx_desc) - txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); - - return txq->tx_rs_thresh; -} - -static inline void __attribute__((always_inline)) -tx_backlog_entry(struct ixgbe_tx_entry_v *txep, - struct rte_mbuf **tx_pkts, uint16_t nb_pkts) -{ - int i; - for (i = 0; i < (int)nb_pkts; ++i) - txep[i].mbuf = tx_pkts[i]; -} - uint16_t ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) @@ -675,91 +565,25 @@ ixgbe_xmit_pkts_vec(void *tx_queue, struct rte_mbuf **tx_pkts, static void __attribute__((cold)) ixgbe_tx_queue_release_mbufs_vec(struct ixgbe_tx_queue *txq) { - unsigned i; - struct ixgbe_tx_entry_v *txe; - const uint16_t max_desc = (uint16_t)(txq->nb_tx_desc - 1); - - if (txq->sw_ring == NULL || txq->nb_tx_free == max_desc) - return; - - /* release the used mbufs in sw_ring */ - for (i = txq->tx_next_dd - (txq->tx_rs_thresh - 1); - i != txq->tx_tail; - i = (i + 1) & max_desc) { - txe = &txq->sw_ring_v[i]; - rte_pktmbuf_free_seg(txe->mbuf); - } - txq->nb_tx_free = max_desc; - - /* reset tx_entry */ - for (i = 0; i < txq->nb_tx_desc; i++) { - txe = &txq->sw_ring_v[i]; - txe->mbuf = NULL; - } + _ixgbe_tx_queue_release_mbufs_vec(txq); } void __attribute__((cold)) ixgbe_rx_queue_release_mbufs_vec(struct ixgbe_rx_queue *rxq) { - const unsigned mask = rxq->nb_rx_desc - 1; - unsigned i; - - if (rxq->sw_ring == NULL || rxq->rxrearm_nb >= rxq->nb_rx_desc) - return; - - /* free all mbufs that are valid in the ring */ - for (i = rxq->rx_tail; i != rxq->rxrearm_start; i = (i + 1) & mask) - rte_pktmbuf_free_seg(rxq->sw_ring[i].mbuf); - rxq->rxrearm_nb = rxq->nb_rx_desc; - - /* set all entries to NULL */ - memset(rxq->sw_ring, 0, sizeof(rxq->sw_ring[0]) * rxq->nb_rx_desc); + _ixgbe_rx_queue_release_mbufs_vec(rxq); } static void __attribute__((cold)) ixgbe_tx_free_swring(struct ixgbe_tx_queue *txq) { - if (txq == NULL) - return; - - if (txq->sw_ring != NULL) { - rte_free(txq->sw_ring_v - 1); - txq->sw_ring_v = NULL; - } + _ixgbe_tx_free_swring_vec(txq); } static void __attribute__((cold)) ixgbe_reset_tx_queue(struct ixgbe_tx_queue *txq) { - static const union ixgbe_adv_tx_desc zeroed_desc = {{0}}; - struct ixgbe_tx_entry_v *txe = txq->sw_ring_v; - uint16_t i; - - /* Zero out HW ring memory */ - for (i = 0; i < txq->nb_tx_desc; i++) - txq->tx_ring[i] = zeroed_desc; - - /* Initialize SW ring entries */ - for (i = 0; i < txq->nb_tx_desc; i++) { - volatile union ixgbe_adv_tx_desc *txd = &txq->tx_ring[i]; - txd->wb.status = IXGBE_TXD_STAT_DD; - txe[i].mbuf = NULL; - } - - txq->tx_next_dd = (uint16_t)(txq->tx_rs_thresh - 1); - txq->tx_next_rs = (uint16_t)(txq->tx_rs_thresh - 1); - - txq->tx_tail = 0; - txq->nb_tx_used = 0; - /* - * Always allow 1 descriptor to be un-allocated to avoid - * a H/W race condition - */ - txq->last_desc_cleaned = (uint16_t)(txq->nb_tx_desc - 1); - txq->nb_tx_free = (uint16_t)(txq->nb_tx_desc - 1); - txq->ctx_curr = 0; - memset((void *)&txq->ctx_cache, 0, - IXGBE_CTX_NUM * sizeof(struct ixgbe_advctx_info)); + _ixgbe_reset_tx_queue_vec(txq); } static const struct ixgbe_txq_ops vec_txq_ops = { @@ -771,63 +595,17 @@ static const struct ixgbe_txq_ops vec_txq_ops = { int __attribute__((cold)) ixgbe_rxq_vec_setup(struct ixgbe_rx_queue *rxq) { - uintptr_t p; - struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ - - mb_def.nb_segs = 1; - mb_def.data_off = RTE_PKTMBUF_HEADROOM; - mb_def.port = rxq->port_id; - rte_mbuf_refcnt_set(&mb_def, 1); - - /* prevent compiler reordering: rearm_data covers previous fields */ - rte_compiler_barrier(); - p = (uintptr_t)&mb_def.rearm_data; - rxq->mbuf_initializer = *(uint64_t *)p; - return 0; + return ixgbe_rxq_vec_setup_default(rxq); } int __attribute__((cold)) ixgbe_txq_vec_setup(struct ixgbe_tx_queue *txq) { - if (txq->sw_ring_v == NULL) - return -1; - - /* leave the first one for overflow */ - txq->sw_ring_v = txq->sw_ring_v + 1; - txq->ops = &vec_txq_ops; - - return 0; + return ixgbe_txq_vec_setup_default(txq, &vec_txq_ops); } int __attribute__((cold)) ixgbe_rx_vec_dev_conf_condition_check(struct rte_eth_dev *dev) { -#ifndef RTE_LIBRTE_IEEE1588 - struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; - struct rte_fdir_conf *fconf = &dev->data->dev_conf.fdir_conf; - -#ifndef RTE_IXGBE_RX_OLFLAGS_ENABLE - /* whithout rx ol_flags, no VP flag report */ - if (rxmode->hw_vlan_strip != 0 || - rxmode->hw_vlan_extend != 0) - return -1; -#endif - - /* no fdir support */ - if (fconf->mode != RTE_FDIR_MODE_NONE) - return -1; - - /* - * - no csum error report support - * - no header split support - */ - if (rxmode->hw_ip_checksum == 1 || - rxmode->header_split == 1) - return -1; - - return 0; -#else - RTE_SET_USED(dev); - return -1; -#endif + return ixgbe_rx_vec_dev_conf_condition_check_default(dev); } diff --git a/drivers/net/mlx4/Makefile b/drivers/net/mlx4/Makefile index d2f56927..efed953e 100644 --- a/drivers/net/mlx4/Makefile +++ b/drivers/net/mlx4/Makefile @@ -48,6 +48,8 @@ CFLAGS += -O3 CFLAGS += -std=gnu99 -Wall -Wextra CFLAGS += -g CFLAGS += -I. +CFLAGS += -D_BSD_SOURCE +CFLAGS += -D_DEFAULT_SOURCE CFLAGS += -D_XOPEN_SOURCE=600 CFLAGS += $(WERROR_FLAGS) LDLIBS += -libverbs @@ -98,7 +100,9 @@ ifndef V AUTOCONF_OUTPUT := >/dev/null endif -mlx4_autoconf.h: $(RTE_SDK)/scripts/auto-config-h.sh +mlx4_autoconf.h.new: FORCE + +mlx4_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh $Q $(RM) -f -- '$@' $Q sh -- '$<' '$@' \ RSS_SUPPORT \ @@ -118,9 +122,16 @@ mlx4_autoconf.h: $(RTE_SDK)/scripts/auto-config-h.sh enum IBV_EXP_QP_BURST_CREATE_DISABLE_ETH_LOOPBACK \ $(AUTOCONF_OUTPUT) +# Create mlx4_autoconf.h or update it in case it differs from the new one. + +mlx4_autoconf.h: mlx4_autoconf.h.new + $Q [ -f '$@' ] && \ + cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \ + mv '$<' '$@' + mlx4.o: mlx4_autoconf.h clean_mlx4: FORCE - $Q rm -f -- mlx4_autoconf.h + $Q rm -f -- mlx4_autoconf.h mlx4_autoconf.h.new clean: clean_mlx4 diff --git a/drivers/net/mlx4/mlx4.c b/drivers/net/mlx4/mlx4.c index 4f21dbe2..f8ed42b8 100644 --- a/drivers/net/mlx4/mlx4.c +++ b/drivers/net/mlx4/mlx4.c @@ -54,7 +54,6 @@ #include <sys/ioctl.h> #include <sys/socket.h> #include <netinet/in.h> -#include <linux/if.h> #include <linux/ethtool.h> #include <linux/sockios.h> #include <fcntl.h> @@ -197,7 +196,6 @@ struct rxq { unsigned int sp:1; /* Use scattered RX elements. */ unsigned int csum:1; /* Enable checksum offloading. */ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ - uint32_t mb_len; /* Length of a mp-issued mbuf. */ struct mlx4_rxq_stats stats; /* RX queue counters. */ unsigned int socket; /* CPU socket ID for allocations. */ struct ibv_exp_res_domain *rd; /* Resource Domain. */ @@ -659,7 +657,15 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu) static int priv_set_mtu(struct priv *priv, uint16_t mtu) { - return priv_set_sysfs_ulong(priv, "mtu", mtu); + uint16_t new_mtu; + + if (priv_set_sysfs_ulong(priv, "mtu", mtu) || + priv_get_mtu(priv, &new_mtu)) + return -1; + if (new_mtu == mtu) + return 0; + errno = EINVAL; + return -1; } /** @@ -998,7 +1004,7 @@ txq_alloc_elts(struct txq *txq, unsigned int elts_n) } mr_linear = ibv_reg_mr(txq->priv->pd, elts_linear, sizeof(*elts_linear), - (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); + IBV_ACCESS_LOCAL_WRITE); if (mr_linear == NULL) { ERROR("%p: unable to configure MR, ibv_reg_mr() failed", (void *)txq); @@ -1198,8 +1204,71 @@ txq_complete(struct txq *txq) return 0; } +struct mlx4_check_mempool_data { + int ret; + char *start; + char *end; +}; + +/* Called by mlx4_check_mempool() when iterating the memory chunks. */ +static void mlx4_check_mempool_cb(struct rte_mempool *mp, + void *opaque, struct rte_mempool_memhdr *memhdr, + unsigned mem_idx) +{ + struct mlx4_check_mempool_data *data = opaque; + + (void)mp; + (void)mem_idx; + + /* It already failed, skip the next chunks. */ + if (data->ret != 0) + return; + /* It is the first chunk. */ + if (data->start == NULL && data->end == NULL) { + data->start = memhdr->addr; + data->end = data->start + memhdr->len; + return; + } + if (data->end == memhdr->addr) { + data->end += memhdr->len; + return; + } + if (data->start == (char *)memhdr->addr + memhdr->len) { + data->start -= memhdr->len; + return; + } + /* Error, mempool is not virtually contigous. */ + data->ret = -1; +} + +/** + * Check if a mempool can be used: it must be virtually contiguous. + * + * @param[in] mp + * Pointer to memory pool. + * @param[out] start + * Pointer to the start address of the mempool virtual memory area + * @param[out] end + * Pointer to the end address of the mempool virtual memory area + * + * @return + * 0 on success (mempool is virtually contiguous), -1 on error. + */ +static int mlx4_check_mempool(struct rte_mempool *mp, uintptr_t *start, + uintptr_t *end) +{ + struct mlx4_check_mempool_data data; + + memset(&data, 0, sizeof(data)); + rte_mempool_mem_iter(mp, mlx4_check_mempool_cb, &data); + *start = (uintptr_t)data.start; + *end = (uintptr_t)data.end; + + return data.ret; +} + /* For best performance, this function should not be inlined. */ -static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, const struct rte_mempool *) +static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, struct rte_mempool *) __attribute__((noinline)); /** @@ -1214,15 +1283,21 @@ static struct ibv_mr *mlx4_mp2mr(struct ibv_pd *, const struct rte_mempool *) * Memory region pointer, NULL in case of error. */ static struct ibv_mr * -mlx4_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp) +mlx4_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp) { const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - uintptr_t start = mp->elt_va_start; - uintptr_t end = mp->elt_va_end; + uintptr_t start; + uintptr_t end; unsigned int i; + if (mlx4_check_mempool(mp, &start, &end) != 0) { + ERROR("mempool %p: not virtually contiguous", + (void *)mp); + return NULL; + } + DEBUG("mempool %p area start=%p end=%p size=%zu", - (const void *)mp, (void *)start, (void *)end, + (void *)mp, (void *)start, (void *)end, (size_t)(end - start)); /* Round start and end to page boundary if found in memory segments. */ for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { @@ -1236,12 +1311,12 @@ mlx4_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp) end = RTE_ALIGN_CEIL(end, align); } DEBUG("mempool %p using start=%p end=%p size=%zu for MR", - (const void *)mp, (void *)start, (void *)end, + (void *)mp, (void *)start, (void *)end, (size_t)(end - start)); return ibv_reg_mr(pd, (void *)start, end - start, - IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); + IBV_ACCESS_LOCAL_WRITE); } /** @@ -1276,7 +1351,7 @@ txq_mb2mp(struct rte_mbuf *buf) * mr->lkey on success, (uint32_t)-1 on failure. */ static uint32_t -txq_mp2mr(struct txq *txq, const struct rte_mempool *mp) +txq_mp2mr(struct txq *txq, struct rte_mempool *mp) { unsigned int i; struct ibv_mr *mr; @@ -1294,7 +1369,7 @@ txq_mp2mr(struct txq *txq, const struct rte_mempool *mp) } /* Add a new entry, register MR first. */ DEBUG("%p: discovered new memory pool \"%s\" (%p)", - (void *)txq, mp->name, (const void *)mp); + (void *)txq, mp->name, (void *)mp); mr = mlx4_mp2mr(txq->priv->pd, mp); if (unlikely(mr == NULL)) { DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", @@ -1315,12 +1390,11 @@ txq_mp2mr(struct txq *txq, const struct rte_mempool *mp) txq->mp2mr[i].mr = mr; txq->mp2mr[i].lkey = mr->lkey; DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, - (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey); + (void *)txq, mp->name, (void *)mp, txq->mp2mr[i].lkey); return txq->mp2mr[i].lkey; } struct txq_mp2mr_mbuf_check_data { - const struct rte_mempool *mp; int ret; }; @@ -1328,34 +1402,26 @@ struct txq_mp2mr_mbuf_check_data { * Callback function for rte_mempool_obj_iter() to check whether a given * mempool object looks like a mbuf. * - * @param[in, out] arg - * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer - * and return value. - * @param[in] start - * Object start address. - * @param[in] end - * Object end address. + * @param[in] mp + * The mempool pointer + * @param[in] arg + * Context data (struct txq_mp2mr_mbuf_check_data). Contains the + * return value. + * @param[in] obj + * Object address. * @param index - * Unused. - * - * @return - * Nonzero value when object is not a mbuf. + * Object index, unused. */ static void -txq_mp2mr_mbuf_check(void *arg, void *start, void *end, - uint32_t index __rte_unused) +txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, + uint32_t index __rte_unused) { struct txq_mp2mr_mbuf_check_data *data = arg; - struct rte_mbuf *buf = - (void *)((uintptr_t)start + data->mp->header_size); + struct rte_mbuf *buf = obj; - (void)index; /* Check whether mbuf structure fits element size and whether mempool * pointer is valid. */ - if (((uintptr_t)end >= (uintptr_t)(buf + 1)) && - (buf->pool == data->mp)) - data->ret = 0; - else + if (sizeof(*buf) > mp->elt_size || buf->pool != mp) data->ret = -1; } @@ -1369,28 +1435,16 @@ txq_mp2mr_mbuf_check(void *arg, void *start, void *end, * Pointer to TX queue structure. */ static void -txq_mp2mr_iter(const struct rte_mempool *mp, void *arg) +txq_mp2mr_iter(struct rte_mempool *mp, void *arg) { struct txq *txq = arg; struct txq_mp2mr_mbuf_check_data data = { - .mp = mp, - .ret = -1, + .ret = 0, }; - /* Discard empty mempools. */ - if (mp->size == 0) - return; /* Register mempool only if the first element looks like a mbuf. */ - rte_mempool_obj_iter((void *)mp->elt_va_start, - 1, - mp->header_size + mp->elt_size + mp->trailer_size, - 1, - mp->elt_pa, - mp->pg_num, - mp->pg_shift, - txq_mp2mr_mbuf_check, - &data); - if (data.ret) + if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || + data.ret == -1) return; txq_mp2mr(txq, mp); } @@ -3075,7 +3129,7 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * cacheline while allocating rep. */ rte_prefetch0(seg); - rep = __rte_mbuf_raw_alloc(rxq->mp); + rep = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(rep == NULL)) { /* * Unable to allocate a replacement mbuf, @@ -3104,7 +3158,6 @@ mlx4_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) rep->ol_flags = -1; #endif assert(rep->buf_len == seg->buf_len); - assert(rep->buf_len == rxq->mb_len); /* Reconfigure sge to use rep instead of seg. */ assert(sge->lkey == rxq->mr->lkey); sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom); @@ -3235,8 +3288,8 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) * Fetch initial bytes of packet descriptor into a * cacheline while allocating rep. */ - rte_prefetch0(seg); - rte_prefetch0(&seg->cacheline1); + rte_mbuf_prefetch_part1(seg); + rte_mbuf_prefetch_part2(seg); ret = rxq->if_cq->poll_length_flags(rxq->cq, NULL, NULL, &flags); if (unlikely(ret < 0)) { @@ -3274,7 +3327,7 @@ mlx4_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) if (ret == 0) break; len = ret; - rep = __rte_mbuf_raw_alloc(rxq->mp); + rep = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(rep == NULL)) { /* * Unable to allocate a replacement mbuf, @@ -3525,6 +3578,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) unsigned int i, k; struct ibv_exp_qp_attr mod; struct ibv_recv_wr *bad_wr; + unsigned int mb_len; int err; int parent = (rxq == &priv->rxq_parent); @@ -3533,6 +3587,7 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) (void *)dev, (void *)rxq); return EINVAL; } + mb_len = rte_pktmbuf_data_room_size(rxq->mp); DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq); /* Number of descriptors and mbufs currently allocated. */ desc_n = (tmpl.elts_n * (tmpl.sp ? MLX4_PMD_SGE_WR_N : 1)); @@ -3547,9 +3602,10 @@ rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) rxq->csum_l2tun = tmpl.csum_l2tun; } /* Enable scattered packets support for this queue if necessary. */ + assert(mb_len >= RTE_PKTMBUF_HEADROOM); if ((dev->data->dev_conf.rxmode.jumbo_frame) && (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { + (mb_len - RTE_PKTMBUF_HEADROOM))) { tmpl.sp = 1; desc_n /= MLX4_PMD_SGE_WR_N; } else @@ -3740,7 +3796,7 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, } attr; enum ibv_exp_query_intf_status status; struct ibv_recv_wr *bad_wr; - struct rte_mbuf *buf; + unsigned int mb_len; int ret = 0; int parent = (rxq == &priv->rxq_parent); @@ -3756,31 +3812,22 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, desc = 1; goto skip_mr; } + mb_len = rte_pktmbuf_data_room_size(mp); if ((desc == 0) || (desc % MLX4_PMD_SGE_WR_N)) { ERROR("%p: invalid number of RX descriptors (must be a" " multiple of %d)", (void *)dev, MLX4_PMD_SGE_WR_N); return EINVAL; } - /* Get mbuf length. */ - buf = rte_pktmbuf_alloc(mp); - if (buf == NULL) { - ERROR("%p: unable to allocate mbuf", (void *)dev); - return ENOMEM; - } - tmpl.mb_len = buf->buf_len; - assert((rte_pktmbuf_headroom(buf) + - rte_pktmbuf_tailroom(buf)) == tmpl.mb_len); - assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM); - rte_pktmbuf_free(buf); /* Toggle RX checksum offload if hardware supports it. */ if (priv->hw_csum) tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; if (priv->hw_csum_l2tun) tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; /* Enable scattered packets support for this queue if necessary. */ + assert(mb_len >= RTE_PKTMBUF_HEADROOM); if ((dev->data->dev_conf.rxmode.jumbo_frame) && (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { + (mb_len - RTE_PKTMBUF_HEADROOM))) { tmpl.sp = 1; desc /= MLX4_PMD_SGE_WR_N; } @@ -4723,7 +4770,7 @@ mlx4_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete) memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); - ifr.ifr_data = &edata; + ifr.ifr_data = (void *)&edata; if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", strerror(errno)); @@ -4817,6 +4864,7 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) /* Reconfigure each RX queue. */ for (i = 0; (i != priv->rxqs_n); ++i) { struct rxq *rxq = (*priv->rxqs)[i]; + unsigned int mb_len; unsigned int max_frame_len; int sp; @@ -4826,7 +4874,9 @@ mlx4_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * toggle scattered support (sp) if necessary. */ max_frame_len = (priv->mtu + ETHER_HDR_LEN + (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN)); - sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM)); + mb_len = rte_pktmbuf_data_room_size(rxq->mp); + assert(mb_len >= RTE_PKTMBUF_HEADROOM); + sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM)); /* Provide new values to rxq_setup(). */ dev->data->dev_conf.rxmode.jumbo_frame = sp; dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len; @@ -4882,7 +4932,7 @@ mlx4_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) if (mlx4_is_secondary()) return -E_RTE_SECONDARY; - ifr.ifr_data = ðpause; + ifr.ifr_data = (void *)ðpause; priv_lock(priv); if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { ret = errno; @@ -4932,7 +4982,7 @@ mlx4_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) if (mlx4_is_secondary()) return -E_RTE_SECONDARY; - ifr.ifr_data = ðpause; + ifr.ifr_data = (void *)ðpause; ethpause.autoneg = fc_conf->autoneg; if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || (fc_conf->mode & RTE_FC_RX_PAUSE)) @@ -5358,7 +5408,7 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) rte_eal_alarm_cancel(mlx4_dev_link_status_handler, dev); priv->pending_alarm = 0; priv->intr_handle.fd = 0; - priv->intr_handle.type = 0; + priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; } /** @@ -5757,22 +5807,16 @@ error: static const struct rte_pci_id mlx4_pci_id_map[] = { { - .vendor_id = PCI_VENDOR_ID_MELLANOX, - .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX3, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX3) }, { - .vendor_id = PCI_VENDOR_ID_MELLANOX, - .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX3PRO) }, { - .vendor_id = PCI_VENDOR_ID_MELLANOX, - .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX3VF, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX3VF) }, { .vendor_id = 0 diff --git a/drivers/net/mlx5/Makefile b/drivers/net/mlx5/Makefile index 92bfa070..f6d39388 100644 --- a/drivers/net/mlx5/Makefile +++ b/drivers/net/mlx5/Makefile @@ -47,18 +47,22 @@ SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_vlan.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_stats.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_rss.c SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_fdir.c +SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += mlx5_mr.c # Dependencies. DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mbuf DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_mempool +DEPDIRS-$(CONFIG_RTE_LIBRTE_MLX5_PMD) += lib/librte_kvargs # Basic CFLAGS. CFLAGS += -O3 CFLAGS += -std=gnu99 -Wall -Wextra CFLAGS += -g CFLAGS += -I. +CFLAGS += -D_BSD_SOURCE +CFLAGS += -D_DEFAULT_SOURCE CFLAGS += -D_XOPEN_SOURCE=600 CFLAGS += $(WERROR_FLAGS) CFLAGS += -Wno-strict-prototypes @@ -83,14 +87,6 @@ else CFLAGS += -DNDEBUG -UPEDANTIC endif -ifdef CONFIG_RTE_LIBRTE_MLX5_SGE_WR_N -CFLAGS += -DMLX5_PMD_SGE_WR_N=$(CONFIG_RTE_LIBRTE_MLX5_SGE_WR_N) -endif - -ifdef CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE -CFLAGS += -DMLX5_PMD_MAX_INLINE=$(CONFIG_RTE_LIBRTE_MLX5_MAX_INLINE) -endif - ifdef CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE CFLAGS += -DMLX5_PMD_TX_MP_CACHE=$(CONFIG_RTE_LIBRTE_MLX5_TX_MP_CACHE) endif @@ -106,50 +102,31 @@ ifndef V AUTOCONF_OUTPUT := >/dev/null endif -mlx5_autoconf.h: $(RTE_SDK)/scripts/auto-config-h.sh +mlx5_autoconf.h.new: FORCE + +mlx5_autoconf.h.new: $(RTE_SDK)/scripts/auto-config-h.sh $Q $(RM) -f -- '$@' $Q sh -- '$<' '$@' \ - HAVE_EXP_QUERY_DEVICE \ - infiniband/verbs.h \ - type 'struct ibv_exp_device_attr' $(AUTOCONF_OUTPUT) - $Q sh -- '$<' '$@' \ - HAVE_FLOW_SPEC_IPV6 \ - infiniband/verbs.h \ - type 'struct ibv_exp_flow_spec_ipv6' $(AUTOCONF_OUTPUT) - $Q sh -- '$<' '$@' \ - HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR \ - infiniband/verbs.h \ - enum IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR \ - $(AUTOCONF_OUTPUT) - $Q sh -- '$<' '$@' \ - HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS \ - infiniband/verbs.h \ - enum IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS \ + HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE \ + infiniband/verbs_exp.h \ + enum IBV_EXP_CQ_COMPRESSED_CQE \ $(AUTOCONF_OUTPUT) $Q sh -- '$<' '$@' \ - HAVE_EXP_CQ_RX_TCP_PACKET \ - infiniband/verbs.h \ - enum IBV_EXP_CQ_RX_TCP_PACKET \ - $(AUTOCONF_OUTPUT) - $Q sh -- '$<' '$@' \ - HAVE_VERBS_FCS \ - infiniband/verbs.h \ - enum IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS \ - $(AUTOCONF_OUTPUT) - $Q sh -- '$<' '$@' \ - HAVE_VERBS_RX_END_PADDING \ - infiniband/verbs.h \ - enum IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING \ - $(AUTOCONF_OUTPUT) - $Q sh -- '$<' '$@' \ - HAVE_VERBS_VLAN_INSERTION \ - infiniband/verbs.h \ - enum IBV_EXP_RECEIVE_WQ_CVLAN_INSERTION \ + HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE \ + infiniband/mlx5_hw.h \ + enum MLX5_ETH_VLAN_INLINE_HEADER_SIZE \ $(AUTOCONF_OUTPUT) +# Create mlx5_autoconf.h or update it in case it differs from the new one. + +mlx5_autoconf.h: mlx5_autoconf.h.new + $Q [ -f '$@' ] && \ + cmp '$<' '$@' $(AUTOCONF_OUTPUT) || \ + mv '$<' '$@' + $(SRCS-$(CONFIG_RTE_LIBRTE_MLX5_PMD):.c=.o): mlx5_autoconf.h clean_mlx5: FORCE - $Q rm -f -- mlx5_autoconf.h + $Q rm -f -- mlx5_autoconf.h mlx5_autoconf.h.new clean: clean_mlx5 diff --git a/drivers/net/mlx5/mlx5.c b/drivers/net/mlx5/mlx5.c index 041cfc33..5aa4adc6 100644 --- a/drivers/net/mlx5/mlx5.c +++ b/drivers/net/mlx5/mlx5.c @@ -37,6 +37,7 @@ #include <assert.h> #include <stdint.h> #include <stdlib.h> +#include <errno.h> #include <net/if.h> /* Verbs header. */ @@ -57,6 +58,7 @@ #include <rte_ethdev.h> #include <rte_pci.h> #include <rte_common.h> +#include <rte_kvargs.h> #ifdef PEDANTIC #pragma GCC diagnostic error "-pedantic" #endif @@ -67,6 +69,21 @@ #include "mlx5_autoconf.h" #include "mlx5_defs.h" +/* Device parameter to enable RX completion queue compression. */ +#define MLX5_RXQ_CQE_COMP_EN "rxq_cqe_comp_en" + +/* Device parameter to configure inline send. */ +#define MLX5_TXQ_INLINE "txq_inline" + +/* + * Device parameter to configure the number of TX queues threshold for + * enabling inline send. + */ +#define MLX5_TXQS_MIN_INLINE "txqs_min_inline" + +/* Device parameter to enable multi-packet send WQEs. */ +#define MLX5_TXQ_MPW_EN "txq_mpw_en" + /** * Retrieve integer value from environment variable. * @@ -98,7 +115,6 @@ static void mlx5_dev_close(struct rte_eth_dev *dev) { struct priv *priv = mlx5_get_priv(dev); - void *tmp; unsigned int i; priv_lock(priv); @@ -122,12 +138,15 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_rx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->rxqs_n); ++i) { - tmp = (*priv->rxqs)[i]; - if (tmp == NULL) + struct rxq *rxq = (*priv->rxqs)[i]; + struct rxq_ctrl *rxq_ctrl; + + if (rxq == NULL) continue; + rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); (*priv->rxqs)[i] = NULL; - rxq_cleanup(tmp); - rte_free(tmp); + rxq_cleanup(rxq_ctrl); + rte_free(rxq_ctrl); } priv->rxqs_n = 0; priv->rxqs = NULL; @@ -136,12 +155,15 @@ mlx5_dev_close(struct rte_eth_dev *dev) /* XXX race condition if mlx5_tx_burst() is still running. */ usleep(1000); for (i = 0; (i != priv->txqs_n); ++i) { - tmp = (*priv->txqs)[i]; - if (tmp == NULL) + struct txq *txq = (*priv->txqs)[i]; + struct txq_ctrl *txq_ctrl; + + if (txq == NULL) continue; + txq_ctrl = container_of(txq, struct txq_ctrl, txq); (*priv->txqs)[i] = NULL; - txq_cleanup(tmp); - rte_free(tmp); + txq_cleanup(txq_ctrl); + rte_free(txq_ctrl); } priv->txqs_n = 0; priv->txqs = NULL; @@ -190,17 +212,13 @@ static const struct eth_dev_ops mlx5_dev_ops = { .mac_addr_add = mlx5_mac_addr_add, .mac_addr_set = mlx5_mac_addr_set, .mtu_set = mlx5_dev_set_mtu, -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS .vlan_strip_queue_set = mlx5_vlan_strip_queue_set, .vlan_offload_set = mlx5_vlan_offload_set, -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ .reta_update = mlx5_dev_rss_reta_update, .reta_query = mlx5_dev_rss_reta_query, .rss_hash_update = mlx5_rss_hash_update, .rss_hash_conf_get = mlx5_rss_hash_conf_get, -#ifdef MLX5_FDIR_SUPPORT .filter_ctrl = mlx5_dev_filter_ctrl, -#endif /* MLX5_FDIR_SUPPORT */ }; static struct { @@ -236,6 +254,90 @@ mlx5_dev_idx(struct rte_pci_addr *pci_addr) return ret; } +/** + * Verify and store value for device argument. + * + * @param[in] key + * Key argument to verify. + * @param[in] val + * Value associated with key. + * @param opaque + * User data. + * + * @return + * 0 on success, negative errno value on failure. + */ +static int +mlx5_args_check(const char *key, const char *val, void *opaque) +{ + struct priv *priv = opaque; + unsigned long tmp; + + errno = 0; + tmp = strtoul(val, NULL, 0); + if (errno) { + WARN("%s: \"%s\" is not a valid integer", key, val); + return errno; + } + if (strcmp(MLX5_RXQ_CQE_COMP_EN, key) == 0) { + priv->cqe_comp = !!tmp; + } else if (strcmp(MLX5_TXQ_INLINE, key) == 0) { + priv->txq_inline = tmp; + } else if (strcmp(MLX5_TXQS_MIN_INLINE, key) == 0) { + priv->txqs_inline = tmp; + } else if (strcmp(MLX5_TXQ_MPW_EN, key) == 0) { + priv->mps = !!tmp; + } else { + WARN("%s: unknown parameter", key); + return -EINVAL; + } + return 0; +} + +/** + * Parse device parameters. + * + * @param priv + * Pointer to private structure. + * @param devargs + * Device arguments structure. + * + * @return + * 0 on success, errno value on failure. + */ +static int +mlx5_args(struct priv *priv, struct rte_devargs *devargs) +{ + const char **params = (const char *[]){ + MLX5_RXQ_CQE_COMP_EN, + MLX5_TXQ_INLINE, + MLX5_TXQS_MIN_INLINE, + MLX5_TXQ_MPW_EN, + NULL, + }; + struct rte_kvargs *kvlist; + int ret = 0; + int i; + + if (devargs == NULL) + return 0; + /* Following UGLY cast is done to pass checkpatch. */ + kvlist = rte_kvargs_parse(devargs->args, params); + if (kvlist == NULL) + return 0; + /* Process parameters. */ + for (i = 0; (params[i] != NULL); ++i) { + if (rte_kvargs_count(kvlist, params[i])) { + ret = rte_kvargs_process(kvlist, params[i], + mlx5_args_check, priv); + if (ret != 0) + return ret; + } + } + rte_kvargs_free(kvlist); + return 0; +} + static struct eth_driver mlx5_driver; /** @@ -260,7 +362,7 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) int err = 0; struct ibv_context *attr_ctx = NULL; struct ibv_device_attr device_attr; - unsigned int vf; + unsigned int sriov; unsigned int mps; int idx; int i; @@ -303,17 +405,17 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) (pci_dev->addr.devid != pci_addr.devid) || (pci_dev->addr.function != pci_addr.function)) continue; - vf = ((pci_dev->id.device_id == + sriov = ((pci_dev->id.device_id == PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) || (pci_dev->id.device_id == PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF)); /* Multi-packet send is only supported by ConnectX-4 Lx PF. */ mps = (pci_dev->id.device_id == PCI_DEVICE_ID_MELLANOX_CONNECTX4LX); - INFO("PCI information matches, using device \"%s\" (VF: %s," - " MPS: %s)", + INFO("PCI information matches, using device \"%s\"" + " (SR-IOV: %s, MPS: %s)", list[i]->name, - vf ? "true" : "false", + sriov ? "true" : "false", mps ? "true" : "false"); attr_ctx = ibv_open_device(list[i]); err = errno; @@ -347,23 +449,16 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) struct ibv_pd *pd = NULL; struct priv *priv = NULL; struct rte_eth_dev *eth_dev; -#ifdef HAVE_EXP_QUERY_DEVICE struct ibv_exp_device_attr exp_device_attr; -#endif /* HAVE_EXP_QUERY_DEVICE */ struct ether_addr mac; + uint16_t num_vfs = 0; -#ifdef HAVE_EXP_QUERY_DEVICE exp_device_attr.comp_mask = IBV_EXP_DEVICE_ATTR_EXP_CAP_FLAGS | IBV_EXP_DEVICE_ATTR_RX_HASH | -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS IBV_EXP_DEVICE_ATTR_VLAN_OFFLOADS | -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ -#ifdef HAVE_VERBS_RX_END_PADDING IBV_EXP_DEVICE_ATTR_RX_PAD_END_ALIGN | -#endif /* HAVE_VERBS_RX_END_PADDING */ 0; -#endif /* HAVE_EXP_QUERY_DEVICE */ DEBUG("using port %u (%08" PRIx32 ")", port, test); @@ -414,7 +509,14 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->port = port; priv->pd = pd; priv->mtu = ETHER_MTU; -#ifdef HAVE_EXP_QUERY_DEVICE + priv->mps = mps; /* Enable MPW by default if supported. */ + priv->cqe_comp = 1; /* Enable compression by default. */ + err = mlx5_args(priv, pci_dev->devargs); + if (err) { + ERROR("failed to process device arguments: %s", + strerror(err)); + goto port_error; + } if (ibv_exp_query_device(ctx, &exp_device_attr)) { ERROR("ibv_exp_query_device() failed"); goto port_error; @@ -440,32 +542,28 @@ mlx5_pci_devinit(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE; DEBUG("maximum RX indirection table size is %u", priv->ind_table_max_size); -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS priv->hw_vlan_strip = !!(exp_device_attr.wq_vlan_offloads_cap & IBV_EXP_RECEIVE_WQ_CVLAN_STRIP); -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ DEBUG("VLAN stripping is %ssupported", (priv->hw_vlan_strip ? "" : "not ")); -#ifdef HAVE_VERBS_FCS priv->hw_fcs_strip = !!(exp_device_attr.exp_device_cap_flags & IBV_EXP_DEVICE_SCATTER_FCS); -#endif /* HAVE_VERBS_FCS */ DEBUG("FCS stripping configuration is %ssupported", (priv->hw_fcs_strip ? "" : "not ")); -#ifdef HAVE_VERBS_RX_END_PADDING priv->hw_padding = !!exp_device_attr.rx_pad_end_addr_align; -#endif /* HAVE_VERBS_RX_END_PADDING */ DEBUG("hardware RX end alignment padding is %ssupported", (priv->hw_padding ? "" : "not ")); -#else /* HAVE_EXP_QUERY_DEVICE */ - priv->ind_table_max_size = RSS_INDIRECTION_TABLE_SIZE; -#endif /* HAVE_EXP_QUERY_DEVICE */ - - priv->vf = vf; - priv->mps = mps; + priv_get_num_vfs(priv, &num_vfs); + priv->sriov = (num_vfs || sriov); + if (priv->mps && !mps) { + ERROR("multi-packet send not supported on this device" + " (" MLX5_TXQ_MPW_EN ")"); + err = ENOTSUP; + goto port_error; + } /* Allocate and register default RSS hash keys. */ priv->rss_conf = rte_calloc(__func__, hash_rxq_init_n, sizeof((*priv->rss_conf)[0]), 0); @@ -608,28 +706,20 @@ error: static const struct rte_pci_id mlx5_pci_id_map[] = { { - .vendor_id = PCI_VENDOR_ID_MELLANOX, - .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX4) }, { - .vendor_id = PCI_VENDOR_ID_MELLANOX, - .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4VF, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX4VF) }, { - .vendor_id = PCI_VENDOR_ID_MELLANOX, - .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LX, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX4LX) }, { - .vendor_id = PCI_VENDOR_ID_MELLANOX, - .device_id = PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID + RTE_PCI_DEVICE(PCI_VENDOR_ID_MELLANOX, + PCI_DEVICE_ID_MELLANOX_CONNECTX4LXVF) }, { .vendor_id = 0 diff --git a/drivers/net/mlx5/mlx5.h b/drivers/net/mlx5/mlx5.h index 24876625..3a866098 100644 --- a/drivers/net/mlx5/mlx5.h +++ b/drivers/net/mlx5/mlx5.h @@ -39,7 +39,6 @@ #include <limits.h> #include <net/if.h> #include <netinet/in.h> -#include <linux/if.h> /* Verbs header. */ /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ @@ -69,6 +68,11 @@ #include "mlx5_autoconf.h" #include "mlx5_defs.h" +#if !defined(HAVE_VERBS_IBV_EXP_CQ_COMPRESSED_CQE) || \ + !defined(HAVE_VERBS_MLX5_ETH_VLAN_INLINE_HEADER_SIZE) +#error Mellanox OFED >= 3.3 is required, please refer to the documentation. +#endif + enum { PCI_VENDOR_ID_MELLANOX = 0x15b3, }; @@ -105,9 +109,12 @@ struct priv { unsigned int hw_vlan_strip:1; /* VLAN stripping is supported. */ unsigned int hw_fcs_strip:1; /* FCS stripping is supported. */ unsigned int hw_padding:1; /* End alignment padding is supported. */ - unsigned int vf:1; /* This is a VF device. */ + unsigned int sriov:1; /* This is a VF or PF with VF devices. */ unsigned int mps:1; /* Whether multi-packet send is supported. */ + unsigned int cqe_comp:1; /* Whether CQE compression is enabled. */ unsigned int pending_alarm:1; /* An alarm is pending. */ + unsigned int txq_inline; /* Maximum packet size for inlining. */ + unsigned int txqs_inline; /* Queue number threshold for inlining. */ /* RX/TX queues. */ unsigned int rxqs_n; /* RX queues array size. */ unsigned int txqs_n; /* TX queues array size. */ @@ -173,6 +180,7 @@ struct priv *mlx5_get_priv(struct rte_eth_dev *dev); int mlx5_is_secondary(void); int priv_get_ifname(const struct priv *, char (*)[IF_NAMESIZE]); int priv_ifreq(const struct priv *, int req, struct ifreq *); +int priv_get_num_vfs(struct priv *, uint16_t *); int priv_get_mtu(struct priv *, uint16_t *); int priv_set_flags(struct priv *, unsigned int, unsigned int); int mlx5_dev_configure(struct rte_eth_dev *); @@ -191,6 +199,8 @@ void priv_dev_interrupt_handler_install(struct priv *, struct rte_eth_dev *); int mlx5_set_link_down(struct rte_eth_dev *dev); int mlx5_set_link_up(struct rte_eth_dev *dev); struct priv *mlx5_secondary_data_setup(struct priv *priv); +void priv_select_tx_function(struct priv *); +void priv_select_rx_function(struct priv *); /* mlx5_mac.c */ diff --git a/drivers/net/mlx5/mlx5_defs.h b/drivers/net/mlx5/mlx5_defs.h index 09207d9c..cc2a6f3e 100644 --- a/drivers/net/mlx5/mlx5_defs.h +++ b/drivers/net/mlx5/mlx5_defs.h @@ -48,22 +48,15 @@ /* Maximum number of special flows. */ #define MLX5_MAX_SPECIAL_FLOWS 4 -/* Request send completion once in every 64 sends, might be less. */ -#define MLX5_PMD_TX_PER_COMP_REQ 64 +/* + * Request TX completion every time descriptors reach this threshold since + * the previous request. Must be a power of two for performance reasons. + */ +#define MLX5_TX_COMP_THRESH 32 /* RSS Indirection table size. */ #define RSS_INDIRECTION_TABLE_SIZE 256 -/* Maximum number of Scatter/Gather Elements per Work Request. */ -#ifndef MLX5_PMD_SGE_WR_N -#define MLX5_PMD_SGE_WR_N 4 -#endif - -/* Maximum size for inline data. */ -#ifndef MLX5_PMD_MAX_INLINE -#define MLX5_PMD_MAX_INLINE 0 -#endif - /* * Maximum number of cached Memory Pools (MPs) per TX queue. Each RTE MP * from which buffers are to be transmitted will have to be mapped by this @@ -86,13 +79,4 @@ /* Alarm timeout. */ #define MLX5_ALARM_TIMEOUT_US 100000 -/* - * Extended flow priorities necessary to support flow director are available - * since MLNX_OFED 3.2. Considering this version adds support for VLAN - * offloads as well, their availability means flow director can be used. - */ -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS -#define MLX5_FDIR_SUPPORT 1 -#endif - #endif /* RTE_PMD_MLX5_DEFS_H_ */ diff --git a/drivers/net/mlx5/mlx5_ethdev.c b/drivers/net/mlx5/mlx5_ethdev.c index 36b369e7..0e7ed019 100644 --- a/drivers/net/mlx5/mlx5_ethdev.c +++ b/drivers/net/mlx5/mlx5_ethdev.c @@ -44,7 +44,6 @@ #include <sys/ioctl.h> #include <sys/socket.h> #include <netinet/in.h> -#include <linux/if.h> #include <linux/ethtool.h> #include <linux/sockios.h> #include <fcntl.h> @@ -363,6 +362,38 @@ priv_ifreq(const struct priv *priv, int req, struct ifreq *ifr) } /** + * Return the number of active VFs for the current device. + * + * @param[in] priv + * Pointer to private structure. + * @param[out] num_vfs + * Number of active VFs. + * + * @return + * 0 on success, -1 on failure and errno is set. + */ +int +priv_get_num_vfs(struct priv *priv, uint16_t *num_vfs) +{ + /* The sysfs entry name depends on the operating system. */ + const char **name = (const char *[]){ + "device/sriov_numvfs", + "device/mlx5_num_vfs", + NULL, + }; + int ret; + + do { + unsigned long ulong_num_vfs; + + ret = priv_get_sysfs_ulong(priv, *name, &ulong_num_vfs); + if (!ret) + *num_vfs = ulong_num_vfs; + } while (*(++name) && ret); + return ret; +} + +/** * Get device MTU. * * @param priv @@ -398,7 +429,15 @@ priv_get_mtu(struct priv *priv, uint16_t *mtu) static int priv_set_mtu(struct priv *priv, uint16_t mtu) { - return priv_set_sysfs_ulong(priv, "mtu", mtu); + uint16_t new_mtu; + + if (priv_set_sysfs_ulong(priv, "mtu", mtu) || + priv_get_mtu(priv, &new_mtu)) + return -1; + if (new_mtu == mtu) + return 0; + errno = EINVAL; + return -1; } /** @@ -545,7 +584,8 @@ mlx5_dev_infos_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *info) DEV_RX_OFFLOAD_UDP_CKSUM | DEV_RX_OFFLOAD_TCP_CKSUM) : 0); - info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; + if (!priv->mps) + info->tx_offload_capa = DEV_TX_OFFLOAD_VLAN_INSERT; if (priv->hw_csum) info->tx_offload_capa |= (DEV_TX_OFFLOAD_IPV4_CKSUM | @@ -584,8 +624,7 @@ mlx5_dev_supported_ptypes_get(struct rte_eth_dev *dev) }; - if (dev->rx_pkt_burst == mlx5_rx_burst || - dev->rx_pkt_burst == mlx5_rx_burst_sp) + if (dev->rx_pkt_burst == mlx5_rx_burst) return ptypes; return NULL; } @@ -617,7 +656,7 @@ mlx5_link_update_unlocked(struct rte_eth_dev *dev, int wait_to_complete) memset(&dev_link, 0, sizeof(dev_link)); dev_link.link_status = ((ifr.ifr_flags & IFF_UP) && (ifr.ifr_flags & IFF_RUNNING)); - ifr.ifr_data = &edata; + ifr.ifr_data = (void *)&edata; if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { WARN("ioctl(SIOCETHTOOL, ETHTOOL_GSET) failed: %s", strerror(errno)); @@ -686,6 +725,9 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) unsigned int i; uint16_t (*rx_func)(void *, struct rte_mbuf **, uint16_t) = mlx5_rx_burst; + unsigned int max_frame_len; + int rehash; + int restart = priv->started; if (mlx5_is_secondary()) return -E_RTE_SECONDARY; @@ -699,7 +741,6 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) goto out; } else DEBUG("adapter port %u MTU set to %u", priv->port, mtu); - priv->mtu = mtu; /* Temporarily replace RX handler with a fake one, assuming it has not * been copied elsewhere. */ dev->rx_pkt_burst = removed_rx_burst; @@ -707,33 +748,94 @@ mlx5_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) * removed_rx_burst() instead. */ rte_wmb(); usleep(1000); + /* MTU does not include header and CRC. */ + max_frame_len = ETHER_HDR_LEN + mtu + ETHER_CRC_LEN; + /* Check if at least one queue is going to need a SGE update. */ + for (i = 0; i != priv->rxqs_n; ++i) { + struct rxq *rxq = (*priv->rxqs)[i]; + unsigned int mb_len; + unsigned int size = RTE_PKTMBUF_HEADROOM + max_frame_len; + unsigned int sges_n; + + if (rxq == NULL) + continue; + mb_len = rte_pktmbuf_data_room_size(rxq->mp); + assert(mb_len >= RTE_PKTMBUF_HEADROOM); + /* + * Determine the number of SGEs needed for a full packet + * and round it to the next power of two. + */ + sges_n = log2above((size / mb_len) + !!(size % mb_len)); + if (sges_n != rxq->sges_n) + break; + } + /* + * If all queues have the right number of SGEs, a simple rehash + * of their buffers is enough, otherwise SGE information can only + * be updated in a queue by recreating it. All resources that depend + * on queues (flows, indirection tables) must be recreated as well in + * that case. + */ + rehash = (i == priv->rxqs_n); + if (!rehash) { + /* Clean up everything as with mlx5_dev_stop(). */ + priv_special_flow_disable_all(priv); + priv_mac_addrs_disable(priv); + priv_destroy_hash_rxqs(priv); + priv_fdir_disable(priv); + priv_dev_interrupt_handler_uninstall(priv, dev); + } +recover: /* Reconfigure each RX queue. */ for (i = 0; (i != priv->rxqs_n); ++i) { struct rxq *rxq = (*priv->rxqs)[i]; - unsigned int max_frame_len; + struct rxq_ctrl *rxq_ctrl = + container_of(rxq, struct rxq_ctrl, rxq); int sp; + unsigned int mb_len; + unsigned int tmp; if (rxq == NULL) continue; - /* Calculate new maximum frame length according to MTU and - * toggle scattered support (sp) if necessary. */ - max_frame_len = (priv->mtu + ETHER_HDR_LEN + - (ETHER_MAX_VLAN_FRAME_LEN - ETHER_MAX_LEN)); - sp = (max_frame_len > (rxq->mb_len - RTE_PKTMBUF_HEADROOM)); + mb_len = rte_pktmbuf_data_room_size(rxq->mp); + assert(mb_len >= RTE_PKTMBUF_HEADROOM); + /* Toggle scattered support (sp) if necessary. */ + sp = (max_frame_len > (mb_len - RTE_PKTMBUF_HEADROOM)); /* Provide new values to rxq_setup(). */ dev->data->dev_conf.rxmode.jumbo_frame = sp; dev->data->dev_conf.rxmode.max_rx_pkt_len = max_frame_len; - ret = rxq_rehash(dev, rxq); - if (ret) { - /* Force SP RX if that queue requires it and abort. */ - if (rxq->sp) - rx_func = mlx5_rx_burst_sp; - break; + if (rehash) + ret = rxq_rehash(dev, rxq_ctrl); + else + ret = rxq_ctrl_setup(dev, rxq_ctrl, rxq->elts_n, + rxq_ctrl->socket, NULL, rxq->mp); + if (!ret) + continue; + /* Attempt to roll back in case of error. */ + tmp = (mb_len << rxq->sges_n) - RTE_PKTMBUF_HEADROOM; + if (max_frame_len != tmp) { + max_frame_len = tmp; + goto recover; } - /* Scattered burst function takes priority. */ - if (rxq->sp) - rx_func = mlx5_rx_burst_sp; + /* Double fault, disable RX. */ + break; + } + /* + * Use a safe RX burst function in case of error, otherwise mimic + * mlx5_dev_start(). + */ + if (ret) { + ERROR("unable to reconfigure RX queues, RX disabled"); + rx_func = removed_rx_burst; + } else if (restart && + !rehash && + !priv_create_hash_rxqs(priv) && + !priv_rehash_flows(priv)) { + if (dev->data->dev_conf.fdir_conf.mode == RTE_FDIR_MODE_NONE) + priv_fdir_enable(priv); + priv_dev_interrupt_handler_install(priv, dev); } + priv->mtu = mtu; /* Burst functions can now be called again. */ rte_wmb(); dev->rx_pkt_burst = rx_func; @@ -767,7 +869,7 @@ mlx5_dev_get_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) if (mlx5_is_secondary()) return -E_RTE_SECONDARY; - ifr.ifr_data = ðpause; + ifr.ifr_data = (void *)ðpause; priv_lock(priv); if (priv_ifreq(priv, SIOCETHTOOL, &ifr)) { ret = errno; @@ -818,7 +920,7 @@ mlx5_dev_set_flow_ctrl(struct rte_eth_dev *dev, struct rte_eth_fc_conf *fc_conf) if (mlx5_is_secondary()) return -E_RTE_SECONDARY; - ifr.ifr_data = ðpause; + ifr.ifr_data = (void *)ðpause; ethpause.autoneg = fc_conf->autoneg; if (((fc_conf->mode & RTE_FC_FULL) == RTE_FC_FULL) || (fc_conf->mode & RTE_FC_RX_PAUSE)) @@ -1012,7 +1114,7 @@ priv_dev_interrupt_handler_uninstall(struct priv *priv, struct rte_eth_dev *dev) rte_eal_alarm_cancel(mlx5_dev_link_status_handler, dev); priv->pending_alarm = 0; priv->intr_handle.fd = 0; - priv->intr_handle.type = 0; + priv->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN; } /** @@ -1061,23 +1163,13 @@ priv_set_link(struct priv *priv, int up) { struct rte_eth_dev *dev = priv->dev; int err; - unsigned int i; if (up) { err = priv_set_flags(priv, ~IFF_UP, IFF_UP); if (err) return err; - for (i = 0; i < priv->rxqs_n; i++) - if ((*priv->rxqs)[i]->sp) - break; - /* Check if an sp queue exists. - * Note: Some old frames might be received. - */ - if (i == priv->rxqs_n) - dev->rx_pkt_burst = mlx5_rx_burst; - else - dev->rx_pkt_burst = mlx5_rx_burst_sp; - dev->tx_pkt_burst = mlx5_tx_burst; + priv_select_tx_function(priv); + priv_select_rx_function(priv); } else { err = priv_set_flags(priv, ~IFF_UP, ~IFF_UP); if (err) @@ -1209,34 +1301,40 @@ mlx5_secondary_data_setup(struct priv *priv) /* TX queues. */ for (i = 0; i != nb_tx_queues; ++i) { struct txq *primary_txq = (*sd->primary_priv->txqs)[i]; - struct txq *txq; + struct txq_ctrl *primary_txq_ctrl; + struct txq_ctrl *txq_ctrl; if (primary_txq == NULL) continue; - txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, - primary_txq->socket); - if (txq != NULL) { - if (txq_setup(priv->dev, - txq, - primary_txq->elts_n * MLX5_PMD_SGE_WR_N, - primary_txq->socket, - NULL) == 0) { - txq->stats.idx = primary_txq->stats.idx; - tx_queues[i] = txq; + primary_txq_ctrl = container_of(primary_txq, + struct txq_ctrl, txq); + txq_ctrl = rte_calloc_socket("TXQ", 1, sizeof(*txq_ctrl), 0, + primary_txq_ctrl->socket); + if (txq_ctrl != NULL) { + if (txq_ctrl_setup(priv->dev, + primary_txq_ctrl, + primary_txq->elts_n, + primary_txq_ctrl->socket, + NULL) == 0) { + txq_ctrl->txq.stats.idx = + primary_txq->stats.idx; + tx_queues[i] = &txq_ctrl->txq; continue; } - rte_free(txq); + rte_free(txq_ctrl); } while (i) { - txq = tx_queues[--i]; - txq_cleanup(txq); - rte_free(txq); + txq_ctrl = tx_queues[--i]; + txq_cleanup(txq_ctrl); + rte_free(txq_ctrl); } goto error; } /* RX queues. */ for (i = 0; i != nb_rx_queues; ++i) { - struct rxq *primary_rxq = (*sd->primary_priv->rxqs)[i]; + struct rxq_ctrl *primary_rxq = + container_of((*sd->primary_priv->rxqs)[i], + struct rxq_ctrl, rxq); if (primary_rxq == NULL) continue; @@ -1263,13 +1361,11 @@ mlx5_secondary_data_setup(struct priv *priv) rte_mb(); priv->dev->data = &sd->data; rte_mb(); - priv->dev->tx_pkt_burst = mlx5_tx_burst; - priv->dev->rx_pkt_burst = removed_rx_burst; + priv_select_tx_function(priv); + priv_select_rx_function(priv); priv_unlock(priv); end: /* More sanity checks. */ - assert(priv->dev->tx_pkt_burst == mlx5_tx_burst); - assert(priv->dev->rx_pkt_burst == removed_rx_burst); assert(priv->dev->data == &sd->data); rte_spinlock_unlock(&sd->lock); return priv; @@ -1280,3 +1376,42 @@ error: rte_spinlock_unlock(&sd->lock); return NULL; } + +/** + * Configure the TX function to use. + * + * @param priv + * Pointer to private structure. + */ +void +priv_select_tx_function(struct priv *priv) +{ + priv->dev->tx_pkt_burst = mlx5_tx_burst; + /* Display warning for unsupported configurations. */ + if (priv->sriov && priv->mps) + WARN("multi-packet send WQE cannot be used on a SR-IOV setup"); + /* Select appropriate TX function. */ + if ((priv->sriov == 0) && priv->mps && priv->txq_inline) { + priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw_inline; + DEBUG("selected MPW inline TX function"); + } else if ((priv->sriov == 0) && priv->mps) { + priv->dev->tx_pkt_burst = mlx5_tx_burst_mpw; + DEBUG("selected MPW TX function"); + } else if (priv->txq_inline && (priv->txqs_n >= priv->txqs_inline)) { + priv->dev->tx_pkt_burst = mlx5_tx_burst_inline; + DEBUG("selected inline TX function (%u >= %u queues)", + priv->txqs_n, priv->txqs_inline); + } +} + +/** + * Configure the RX function to use. + * + * @param priv + * Pointer to private structure. + */ +void +priv_select_rx_function(struct priv *priv) +{ + priv->dev->rx_pkt_burst = mlx5_rx_burst; +} diff --git a/drivers/net/mlx5/mlx5_fdir.c b/drivers/net/mlx5/mlx5_fdir.c index 63e43ad9..73eb00ec 100644 --- a/drivers/net/mlx5/mlx5_fdir.c +++ b/drivers/net/mlx5/mlx5_fdir.c @@ -122,7 +122,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, case RTE_ETH_FLOW_NONFRAG_IPV4_OTHER: desc->type = HASH_RXQ_IPV4; break; -#ifdef HAVE_FLOW_SPEC_IPV6 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: desc->type = HASH_RXQ_UDPV6; break; @@ -132,7 +131,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, case RTE_ETH_FLOW_NONFRAG_IPV6_OTHER: desc->type = HASH_RXQ_IPV6; break; -#endif /* HAVE_FLOW_SPEC_IPV6 */ default: break; } @@ -147,7 +145,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, desc->src_ip[0] = fdir_filter->input.flow.ip4_flow.src_ip; desc->dst_ip[0] = fdir_filter->input.flow.ip4_flow.dst_ip; break; -#ifdef HAVE_FLOW_SPEC_IPV6 case RTE_ETH_FLOW_NONFRAG_IPV6_UDP: case RTE_ETH_FLOW_NONFRAG_IPV6_TCP: desc->src_port = fdir_filter->input.flow.udp6_flow.src_port; @@ -161,7 +158,6 @@ fdir_filter_to_flow_desc(const struct rte_eth_fdir_filter *fdir_filter, fdir_filter->input.flow.ipv6_flow.dst_ip, sizeof(desc->dst_ip)); break; -#endif /* HAVE_FLOW_SPEC_IPV6 */ default: break; } @@ -211,7 +207,6 @@ priv_fdir_overlap(const struct priv *priv, (desc2->dst_ip[0] & mask->ipv4_mask.dst_ip))) return 0; break; -#ifdef HAVE_FLOW_SPEC_IPV6 case HASH_RXQ_IPV6: case HASH_RXQ_UDPV6: case HASH_RXQ_TCPV6: @@ -222,7 +217,6 @@ priv_fdir_overlap(const struct priv *priv, (desc2->dst_ip[i] & mask->ipv6_mask.dst_ip[i]))) return 0; break; -#endif /* HAVE_FLOW_SPEC_IPV6 */ default: break; } @@ -258,9 +252,7 @@ priv_fdir_flow_add(struct priv *priv, uintptr_t spec_offset = (uintptr_t)&data->spec; struct ibv_exp_flow_spec_eth *spec_eth; struct ibv_exp_flow_spec_ipv4 *spec_ipv4; -#ifdef HAVE_FLOW_SPEC_IPV6 struct ibv_exp_flow_spec_ipv6 *spec_ipv6; -#endif /* HAVE_FLOW_SPEC_IPV6 */ struct ibv_exp_flow_spec_tcp_udp *spec_tcp_udp; struct mlx5_fdir_filter *iter_fdir_filter; unsigned int i; @@ -334,7 +326,6 @@ priv_fdir_flow_add(struct priv *priv, spec_offset += spec_ipv4->size; break; -#ifdef HAVE_FLOW_SPEC_IPV6 case HASH_RXQ_IPV6: case HASH_RXQ_UDPV6: case HASH_RXQ_TCPV6: @@ -368,7 +359,6 @@ priv_fdir_flow_add(struct priv *priv, spec_offset += spec_ipv6->size; break; -#endif /* HAVE_FLOW_SPEC_IPV6 */ default: ERROR("invalid flow attribute type"); return EINVAL; @@ -424,7 +414,9 @@ create_flow: static struct fdir_queue * priv_get_fdir_queue(struct priv *priv, uint16_t idx) { - struct fdir_queue *fdir_queue = &(*priv->rxqs)[idx]->fdir_queue; + struct rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[idx], struct rxq_ctrl, rxq); + struct fdir_queue *fdir_queue = &rxq_ctrl->fdir_queue; struct ibv_exp_rwq_ind_table *ind_table = NULL; struct ibv_qp *qp = NULL; struct ibv_exp_rwq_ind_table_init_attr ind_init_attr; @@ -439,7 +431,7 @@ priv_get_fdir_queue(struct priv *priv, uint16_t idx) ind_init_attr = (struct ibv_exp_rwq_ind_table_init_attr){ .pd = priv->pd, .log_ind_tbl_size = 0, - .ind_tbl = &((*priv->rxqs)[idx]->wq), + .ind_tbl = &rxq_ctrl->wq, .comp_mask = 0, }; @@ -629,8 +621,10 @@ priv_fdir_disable(struct priv *priv) /* Run on every RX queue to destroy related flow director QP and * indirection table. */ for (i = 0; (i != priv->rxqs_n); i++) { - fdir_queue = &(*priv->rxqs)[i]->fdir_queue; + struct rxq_ctrl *rxq_ctrl = + container_of((*priv->rxqs)[i], struct rxq_ctrl, rxq); + fdir_queue = &rxq_ctrl->fdir_queue; if (fdir_queue->qp != NULL) { claim_zero(ibv_destroy_qp(fdir_queue->qp)); fdir_queue->qp = NULL; diff --git a/drivers/net/mlx5/mlx5_mac.c b/drivers/net/mlx5/mlx5_mac.c index c9cea485..f6b27bb8 100644 --- a/drivers/net/mlx5/mlx5_mac.c +++ b/drivers/net/mlx5/mlx5_mac.c @@ -38,7 +38,6 @@ #include <inttypes.h> #include <errno.h> #include <netinet/in.h> -#include <linux/if.h> #include <sys/ioctl.h> #include <arpa/inet.h> diff --git a/drivers/net/mlx5/mlx5_mr.c b/drivers/net/mlx5/mlx5_mr.c new file mode 100644 index 00000000..67dfefa8 --- /dev/null +++ b/drivers/net/mlx5/mlx5_mr.c @@ -0,0 +1,283 @@ +/*- + * BSD LICENSE + * + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/verbs.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* DPDK headers don't like -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <rte_mempool.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +#include "mlx5.h" +#include "mlx5_rxtx.h" + +struct mlx5_check_mempool_data { + int ret; + char *start; + char *end; +}; + +/* Called by mlx5_check_mempool() when iterating the memory chunks. */ +static void +mlx5_check_mempool_cb(struct rte_mempool *mp, + void *opaque, struct rte_mempool_memhdr *memhdr, + unsigned int mem_idx) +{ + struct mlx5_check_mempool_data *data = opaque; + + (void)mp; + (void)mem_idx; + + /* It already failed, skip the next chunks. */ + if (data->ret != 0) + return; + /* It is the first chunk. */ + if (data->start == NULL && data->end == NULL) { + data->start = memhdr->addr; + data->end = data->start + memhdr->len; + return; + } + if (data->end == memhdr->addr) { + data->end += memhdr->len; + return; + } + if (data->start == (char *)memhdr->addr + memhdr->len) { + data->start -= memhdr->len; + return; + } + /* Error, mempool is not virtually contiguous. */ + data->ret = -1; +} + +/** + * Check if a mempool can be used: it must be virtually contiguous. + * + * @param[in] mp + * Pointer to memory pool. + * @param[out] start + * Pointer to the start address of the mempool virtual memory area + * @param[out] end + * Pointer to the end address of the mempool virtual memory area + * + * @return + * 0 on success (mempool is virtually contiguous), -1 on error. + */ +static int mlx5_check_mempool(struct rte_mempool *mp, uintptr_t *start, + uintptr_t *end) +{ + struct mlx5_check_mempool_data data; + + memset(&data, 0, sizeof(data)); + rte_mempool_mem_iter(mp, mlx5_check_mempool_cb, &data); + *start = (uintptr_t)data.start; + *end = (uintptr_t)data.end; + + return data.ret; +} + +/** + * Register mempool as a memory region. + * + * @param pd + * Pointer to protection domain. + * @param mp + * Pointer to memory pool. + * + * @return + * Memory region pointer, NULL in case of error. + */ +struct ibv_mr * +mlx5_mp2mr(struct ibv_pd *pd, struct rte_mempool *mp) +{ + const struct rte_memseg *ms = rte_eal_get_physmem_layout(); + uintptr_t start; + uintptr_t end; + unsigned int i; + + if (mlx5_check_mempool(mp, &start, &end) != 0) { + ERROR("mempool %p: not virtually contiguous", + (void *)mp); + return NULL; + } + + DEBUG("mempool %p area start=%p end=%p size=%zu", + (void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); + /* Round start and end to page boundary if found in memory segments. */ + for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { + uintptr_t addr = (uintptr_t)ms[i].addr; + size_t len = ms[i].len; + unsigned int align = ms[i].hugepage_sz; + + if ((start > addr) && (start < addr + len)) + start = RTE_ALIGN_FLOOR(start, align); + if ((end > addr) && (end < addr + len)) + end = RTE_ALIGN_CEIL(end, align); + } + DEBUG("mempool %p using start=%p end=%p size=%zu for MR", + (void *)mp, (void *)start, (void *)end, + (size_t)(end - start)); + return ibv_reg_mr(pd, + (void *)start, + end - start, + IBV_ACCESS_LOCAL_WRITE); +} + +/** + * Register a Memory Region (MR) <-> Memory Pool (MP) association in + * txq->mp2mr[]. If mp2mr[] is full, remove an entry first. + * + * This function should only be called by txq_mp2mr(). + * + * @param txq + * Pointer to TX queue structure. + * @param[in] mp + * Memory Pool for which a Memory Region lkey must be returned. + * @param idx + * Index of the next available entry. + * + * @return + * mr->lkey on success, (uint32_t)-1 on failure. + */ +uint32_t +txq_mp2mr_reg(struct txq *txq, struct rte_mempool *mp, unsigned int idx) +{ + struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq); + struct ibv_mr *mr; + + /* Add a new entry, register MR first. */ + DEBUG("%p: discovered new memory pool \"%s\" (%p)", + (void *)txq_ctrl, mp->name, (void *)mp); + mr = mlx5_mp2mr(txq_ctrl->priv->pd, mp); + if (unlikely(mr == NULL)) { + DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", + (void *)txq_ctrl); + return (uint32_t)-1; + } + if (unlikely(idx == RTE_DIM(txq_ctrl->txq.mp2mr))) { + /* Table is full, remove oldest entry. */ + DEBUG("%p: MR <-> MP table full, dropping oldest entry.", + (void *)txq_ctrl); + --idx; + claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[0].mr)); + memmove(&txq_ctrl->txq.mp2mr[0], &txq_ctrl->txq.mp2mr[1], + (sizeof(txq_ctrl->txq.mp2mr) - + sizeof(txq_ctrl->txq.mp2mr[0]))); + } + /* Store the new entry. */ + txq_ctrl->txq.mp2mr[idx].mp = mp; + txq_ctrl->txq.mp2mr[idx].mr = mr; + txq_ctrl->txq.mp2mr[idx].lkey = htonl(mr->lkey); + DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, + (void *)txq_ctrl, mp->name, (void *)mp, + txq_ctrl->txq.mp2mr[idx].lkey); + return txq_ctrl->txq.mp2mr[idx].lkey; +} + +struct txq_mp2mr_mbuf_check_data { + int ret; +}; + +/** + * Callback function for rte_mempool_obj_iter() to check whether a given + * mempool object looks like a mbuf. + * + * @param[in] mp + * The mempool pointer + * @param[in] arg + * Context data (struct txq_mp2mr_mbuf_check_data). Contains the + * return value. + * @param[in] obj + * Object address. + * @param index + * Object index, unused. + */ +static void +txq_mp2mr_mbuf_check(struct rte_mempool *mp, void *arg, void *obj, + uint32_t index __rte_unused) +{ + struct txq_mp2mr_mbuf_check_data *data = arg; + struct rte_mbuf *buf = obj; + + /* + * Check whether mbuf structure fits element size and whether mempool + * pointer is valid. + */ + if (sizeof(*buf) > mp->elt_size || buf->pool != mp) + data->ret = -1; +} + +/** + * Iterator function for rte_mempool_walk() to register existing mempools and + * fill the MP to MR cache of a TX queue. + * + * @param[in] mp + * Memory Pool to register. + * @param *arg + * Pointer to TX queue structure. + */ +void +txq_mp2mr_iter(struct rte_mempool *mp, void *arg) +{ + struct txq_ctrl *txq_ctrl = arg; + struct txq_mp2mr_mbuf_check_data data = { + .ret = 0, + }; + unsigned int i; + + /* Register mempool only if the first element looks like a mbuf. */ + if (rte_mempool_obj_iter(mp, txq_mp2mr_mbuf_check, &data) == 0 || + data.ret == -1) + return; + for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) { + if (unlikely(txq_ctrl->txq.mp2mr[i].mp == NULL)) { + /* Unknown MP, add a new MR for it. */ + break; + } + if (txq_ctrl->txq.mp2mr[i].mp == mp) + return; + } + txq_mp2mr_reg(&txq_ctrl->txq, mp, i); +} diff --git a/drivers/net/mlx5/mlx5_prm.h b/drivers/net/mlx5/mlx5_prm.h new file mode 100644 index 00000000..5db219b3 --- /dev/null +++ b/drivers/net/mlx5/mlx5_prm.h @@ -0,0 +1,163 @@ +/*- + * BSD LICENSE + * + * Copyright 2016 6WIND S.A. + * Copyright 2016 Mellanox. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of 6WIND S.A. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef RTE_PMD_MLX5_PRM_H_ +#define RTE_PMD_MLX5_PRM_H_ + +/* Verbs header. */ +/* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ +#ifdef PEDANTIC +#pragma GCC diagnostic ignored "-pedantic" +#endif +#include <infiniband/mlx5_hw.h> +#ifdef PEDANTIC +#pragma GCC diagnostic error "-pedantic" +#endif + +/* Get CQE owner bit. */ +#define MLX5_CQE_OWNER(op_own) ((op_own) & MLX5_CQE_OWNER_MASK) + +/* Get CQE format. */ +#define MLX5_CQE_FORMAT(op_own) (((op_own) & MLX5E_CQE_FORMAT_MASK) >> 2) + +/* Get CQE opcode. */ +#define MLX5_CQE_OPCODE(op_own) (((op_own) & 0xf0) >> 4) + +/* Get CQE solicited event. */ +#define MLX5_CQE_SE(op_own) (((op_own) >> 1) & 1) + +/* Invalidate a CQE. */ +#define MLX5_CQE_INVALIDATE (MLX5_CQE_INVALID << 4) + +/* CQE value to inform that VLAN is stripped. */ +#define MLX5_CQE_VLAN_STRIPPED 0x1 + +/* Maximum number of packets a multi-packet WQE can handle. */ +#define MLX5_MPW_DSEG_MAX 5 + +/* Room for inline data in regular work queue element. */ +#define MLX5_WQE64_INL_DATA 12 + +/* Room for inline data in multi-packet WQE. */ +#define MLX5_MWQE64_INL_DATA 28 + +/* Subset of struct mlx5_wqe_eth_seg. */ +struct mlx5_wqe_eth_seg_small { + uint32_t rsvd0; + uint8_t cs_flags; + uint8_t rsvd1; + uint16_t mss; + uint32_t rsvd2; + uint16_t inline_hdr_sz; +}; + +/* Regular WQE. */ +struct mlx5_wqe_regular { + union { + struct mlx5_wqe_ctrl_seg ctrl; + uint32_t data[4]; + } ctrl; + struct mlx5_wqe_eth_seg eseg; + struct mlx5_wqe_data_seg dseg; +} __rte_aligned(64); + +/* Inline WQE. */ +struct mlx5_wqe_inl { + union { + struct mlx5_wqe_ctrl_seg ctrl; + uint32_t data[4]; + } ctrl; + struct mlx5_wqe_eth_seg eseg; + uint32_t byte_cnt; + uint8_t data[MLX5_WQE64_INL_DATA]; +} __rte_aligned(64); + +/* Multi-packet WQE. */ +struct mlx5_wqe_mpw { + union { + struct mlx5_wqe_ctrl_seg ctrl; + uint32_t data[4]; + } ctrl; + struct mlx5_wqe_eth_seg_small eseg; + struct mlx5_wqe_data_seg dseg[2]; +} __rte_aligned(64); + +/* Multi-packet WQE with inline. */ +struct mlx5_wqe_mpw_inl { + union { + struct mlx5_wqe_ctrl_seg ctrl; + uint32_t data[4]; + } ctrl; + struct mlx5_wqe_eth_seg_small eseg; + uint32_t byte_cnt; + uint8_t data[MLX5_MWQE64_INL_DATA]; +} __rte_aligned(64); + +/* Union of all WQE types. */ +union mlx5_wqe { + struct mlx5_wqe_regular wqe; + struct mlx5_wqe_inl inl; + struct mlx5_wqe_mpw mpw; + struct mlx5_wqe_mpw_inl mpw_inl; + uint8_t data[64]; +}; + +/* MPW session status. */ +enum mlx5_mpw_state { + MLX5_MPW_STATE_OPENED, + MLX5_MPW_INL_STATE_OPENED, + MLX5_MPW_STATE_CLOSED, +}; + +/* MPW session descriptor. */ +struct mlx5_mpw { + enum mlx5_mpw_state state; + unsigned int pkts_n; + unsigned int len; + unsigned int total_len; + volatile union mlx5_wqe *wqe; + union { + volatile struct mlx5_wqe_data_seg *dseg[MLX5_MPW_DSEG_MAX]; + volatile uint8_t *raw; + } data; +}; + +/* CQ element structure - should be equal to the cache line size */ +struct mlx5_cqe { +#if (RTE_CACHE_LINE_SIZE == 128) + uint8_t padding[64]; +#endif + struct mlx5_cqe64 cqe64; +}; + +#endif /* RTE_PMD_MLX5_PRM_H_ */ diff --git a/drivers/net/mlx5/mlx5_rxmode.c b/drivers/net/mlx5/mlx5_rxmode.c index 3a55f633..8b585554 100644 --- a/drivers/net/mlx5/mlx5_rxmode.c +++ b/drivers/net/mlx5/mlx5_rxmode.c @@ -67,11 +67,9 @@ static const struct special_flow_init special_flow_init[] = { 1 << HASH_RXQ_TCPV4 | 1 << HASH_RXQ_UDPV4 | 1 << HASH_RXQ_IPV4 | -#ifdef HAVE_FLOW_SPEC_IPV6 1 << HASH_RXQ_TCPV6 | 1 << HASH_RXQ_UDPV6 | 1 << HASH_RXQ_IPV6 | -#endif /* HAVE_FLOW_SPEC_IPV6 */ 1 << HASH_RXQ_ETH | 0, .per_vlan = 0, @@ -82,10 +80,8 @@ static const struct special_flow_init special_flow_init[] = { .hash_types = 1 << HASH_RXQ_UDPV4 | 1 << HASH_RXQ_IPV4 | -#ifdef HAVE_FLOW_SPEC_IPV6 1 << HASH_RXQ_UDPV6 | 1 << HASH_RXQ_IPV6 | -#endif /* HAVE_FLOW_SPEC_IPV6 */ 1 << HASH_RXQ_ETH | 0, .per_vlan = 0, @@ -96,15 +92,12 @@ static const struct special_flow_init special_flow_init[] = { .hash_types = 1 << HASH_RXQ_UDPV4 | 1 << HASH_RXQ_IPV4 | -#ifdef HAVE_FLOW_SPEC_IPV6 1 << HASH_RXQ_UDPV6 | 1 << HASH_RXQ_IPV6 | -#endif /* HAVE_FLOW_SPEC_IPV6 */ 1 << HASH_RXQ_ETH | 0, .per_vlan = 1, }, -#ifdef HAVE_FLOW_SPEC_IPV6 [HASH_RXQ_FLOW_TYPE_IPV6MULTI] = { .dst_mac_val = "\x33\x33\x00\x00\x00\x00", .dst_mac_mask = "\xff\xff\x00\x00\x00\x00", @@ -115,7 +108,6 @@ static const struct special_flow_init special_flow_init[] = { 0, .per_vlan = 1, }, -#endif /* HAVE_FLOW_SPEC_IPV6 */ }; /** @@ -355,7 +347,9 @@ priv_special_flow_enable_all(struct priv *priv) { enum hash_rxq_flow_type flow_type; - for (flow_type = 0; flow_type != HASH_RXQ_FLOW_TYPE_MAC; ++flow_type) { + for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC; + flow_type != HASH_RXQ_FLOW_TYPE_MAC; + ++flow_type) { int ret; ret = priv_special_flow_enable(priv, flow_type); @@ -380,7 +374,9 @@ priv_special_flow_disable_all(struct priv *priv) { enum hash_rxq_flow_type flow_type; - for (flow_type = 0; flow_type != HASH_RXQ_FLOW_TYPE_MAC; ++flow_type) + for (flow_type = HASH_RXQ_FLOW_TYPE_PROMISC; + flow_type != HASH_RXQ_FLOW_TYPE_MAC; + ++flow_type) priv_special_flow_disable(priv, flow_type); } diff --git a/drivers/net/mlx5/mlx5_rxq.c b/drivers/net/mlx5/mlx5_rxq.c index cbb017bb..29c137cd 100644 --- a/drivers/net/mlx5/mlx5_rxq.c +++ b/drivers/net/mlx5/mlx5_rxq.c @@ -43,6 +43,8 @@ #pragma GCC diagnostic ignored "-pedantic" #endif #include <infiniband/verbs.h> +#include <infiniband/arch.h> +#include <infiniband/mlx5_hw.h> #ifdef PEDANTIC #pragma GCC diagnostic error "-pedantic" #endif @@ -105,7 +107,6 @@ const struct hash_rxq_init hash_rxq_init[] = { }, .underlayer = &hash_rxq_init[HASH_RXQ_ETH], }, -#ifdef HAVE_FLOW_SPEC_IPV6 [HASH_RXQ_TCPV6] = { .hash_fields = (IBV_EXP_RX_HASH_SRC_IPV6 | IBV_EXP_RX_HASH_DST_IPV6 | @@ -144,7 +145,6 @@ const struct hash_rxq_init hash_rxq_init[] = { }, .underlayer = &hash_rxq_init[HASH_RXQ_ETH], }, -#endif /* HAVE_FLOW_SPEC_IPV6 */ [HASH_RXQ_ETH] = { .hash_fields = 0, .dpdk_rss_hf = 0, @@ -168,17 +168,11 @@ static const struct ind_table_init ind_table_init[] = { 1 << HASH_RXQ_TCPV4 | 1 << HASH_RXQ_UDPV4 | 1 << HASH_RXQ_IPV4 | -#ifdef HAVE_FLOW_SPEC_IPV6 1 << HASH_RXQ_TCPV6 | 1 << HASH_RXQ_UDPV6 | 1 << HASH_RXQ_IPV6 | -#endif /* HAVE_FLOW_SPEC_IPV6 */ 0, -#ifdef HAVE_FLOW_SPEC_IPV6 .hash_types_n = 6, -#else /* HAVE_FLOW_SPEC_IPV6 */ - .hash_types_n = 3, -#endif /* HAVE_FLOW_SPEC_IPV6 */ }, { .max_size = 1, @@ -243,12 +237,8 @@ priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr, init = &hash_rxq_init[type]; *flow_attr = (struct ibv_exp_flow_attr){ .type = IBV_EXP_FLOW_ATTR_NORMAL, -#ifdef MLX5_FDIR_SUPPORT /* Priorities < 3 are reserved for flow director. */ .priority = init->flow_priority + 3, -#else /* MLX5_FDIR_SUPPORT */ - .priority = init->flow_priority, -#endif /* MLX5_FDIR_SUPPORT */ .num_of_specs = 0, .port = priv->port, .flags = 0, @@ -279,7 +269,7 @@ priv_flow_attr(struct priv *priv, struct ibv_exp_flow_attr *flow_attr, static enum hash_rxq_type hash_rxq_type_from_pos(const struct ind_table_init *table, unsigned int pos) { - enum hash_rxq_type type = 0; + enum hash_rxq_type type = HASH_RXQ_TCPV4; assert(pos < table->hash_types_n); do { @@ -385,8 +375,13 @@ priv_create_hash_rxqs(struct priv *priv) DEBUG("indirection table extended to assume %u WQs", priv->reta_idx_n); } - for (i = 0; (i != priv->reta_idx_n); ++i) - wqs[i] = (*priv->rxqs)[(*priv->reta_idx)[i]]->wq; + for (i = 0; (i != priv->reta_idx_n); ++i) { + struct rxq_ctrl *rxq_ctrl; + + rxq_ctrl = container_of((*priv->rxqs)[(*priv->reta_idx)[i]], + struct rxq_ctrl, rxq); + wqs[i] = rxq_ctrl->wq; + } /* Get number of hash RX queues to configure. */ for (i = 0, hash_rxqs_n = 0; (i != ind_tables_n); ++i) hash_rxqs_n += ind_table_init[i].hash_types_n; @@ -589,9 +584,7 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) case HASH_RXQ_FLOW_TYPE_ALLMULTI: return !!priv->allmulti_req; case HASH_RXQ_FLOW_TYPE_BROADCAST: -#ifdef HAVE_FLOW_SPEC_IPV6 case HASH_RXQ_FLOW_TYPE_IPV6MULTI: -#endif /* HAVE_FLOW_SPEC_IPV6 */ /* If allmulti is enabled, broadcast and ipv6multi * are unnecessary. */ return !priv->allmulti_req; @@ -616,9 +609,11 @@ priv_allow_flow_type(struct priv *priv, enum hash_rxq_flow_type type) int priv_rehash_flows(struct priv *priv) { - unsigned int i; + enum hash_rxq_flow_type i; - for (i = 0; (i != RTE_DIM((*priv->hash_rxqs)[0].special_flow)); ++i) + for (i = HASH_RXQ_FLOW_TYPE_PROMISC; + i != RTE_DIM((*priv->hash_rxqs)[0].special_flow); + ++i) if (!priv_allow_flow_type(priv, i)) { priv_special_flow_disable(priv, i); } else { @@ -634,148 +629,9 @@ priv_rehash_flows(struct priv *priv) } /** - * Allocate RX queue elements with scattered packets support. - * - * @param rxq - * Pointer to RX queue structure. - * @param elts_n - * Number of elements to allocate. - * @param[in] pool - * If not NULL, fetch buffers from this array instead of allocating them - * with rte_pktmbuf_alloc(). - * - * @return - * 0 on success, errno value on failure. - */ -static int -rxq_alloc_elts_sp(struct rxq *rxq, unsigned int elts_n, - struct rte_mbuf **pool) -{ - unsigned int i; - struct rxq_elt_sp (*elts)[elts_n] = - rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0, - rxq->socket); - int ret = 0; - - if (elts == NULL) { - ERROR("%p: can't allocate packets array", (void *)rxq); - ret = ENOMEM; - goto error; - } - /* For each WR (packet). */ - for (i = 0; (i != elts_n); ++i) { - unsigned int j; - struct rxq_elt_sp *elt = &(*elts)[i]; - struct ibv_sge (*sges)[RTE_DIM(elt->sges)] = &elt->sges; - - /* These two arrays must have the same size. */ - assert(RTE_DIM(elt->sges) == RTE_DIM(elt->bufs)); - /* For each SGE (segment). */ - for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { - struct ibv_sge *sge = &(*sges)[j]; - struct rte_mbuf *buf; - - if (pool != NULL) { - buf = *(pool++); - assert(buf != NULL); - rte_pktmbuf_reset(buf); - } else - buf = rte_pktmbuf_alloc(rxq->mp); - if (buf == NULL) { - assert(pool == NULL); - ERROR("%p: empty mbuf pool", (void *)rxq); - ret = ENOMEM; - goto error; - } - elt->bufs[j] = buf; - /* Headroom is reserved by rte_pktmbuf_alloc(). */ - assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); - /* Buffer is supposed to be empty. */ - assert(rte_pktmbuf_data_len(buf) == 0); - assert(rte_pktmbuf_pkt_len(buf) == 0); - /* sge->addr must be able to store a pointer. */ - assert(sizeof(sge->addr) >= sizeof(uintptr_t)); - if (j == 0) { - /* The first SGE keeps its headroom. */ - sge->addr = rte_pktmbuf_mtod(buf, uintptr_t); - sge->length = (buf->buf_len - - RTE_PKTMBUF_HEADROOM); - } else { - /* Subsequent SGEs lose theirs. */ - assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); - SET_DATA_OFF(buf, 0); - sge->addr = (uintptr_t)buf->buf_addr; - sge->length = buf->buf_len; - } - sge->lkey = rxq->mr->lkey; - /* Redundant check for tailroom. */ - assert(sge->length == rte_pktmbuf_tailroom(buf)); - } - } - DEBUG("%p: allocated and configured %u WRs (%zu segments)", - (void *)rxq, elts_n, (elts_n * RTE_DIM((*elts)[0].sges))); - rxq->elts_n = elts_n; - rxq->elts_head = 0; - rxq->elts.sp = elts; - assert(ret == 0); - return 0; -error: - if (elts != NULL) { - assert(pool == NULL); - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - unsigned int j; - struct rxq_elt_sp *elt = &(*elts)[i]; - - for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { - struct rte_mbuf *buf = elt->bufs[j]; - - if (buf != NULL) - rte_pktmbuf_free_seg(buf); - } - } - rte_free(elts); - } - DEBUG("%p: failed, freed everything", (void *)rxq); - assert(ret > 0); - return ret; -} - -/** - * Free RX queue elements with scattered packets support. - * - * @param rxq - * Pointer to RX queue structure. - */ -static void -rxq_free_elts_sp(struct rxq *rxq) -{ - unsigned int i; - unsigned int elts_n = rxq->elts_n; - struct rxq_elt_sp (*elts)[elts_n] = rxq->elts.sp; - - DEBUG("%p: freeing WRs", (void *)rxq); - rxq->elts_n = 0; - rxq->elts.sp = NULL; - if (elts == NULL) - return; - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - unsigned int j; - struct rxq_elt_sp *elt = &(*elts)[i]; - - for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { - struct rte_mbuf *buf = elt->bufs[j]; - - if (buf != NULL) - rte_pktmbuf_free_seg(buf); - } - } - rte_free(elts); -} - -/** * Allocate RX queue elements. * - * @param rxq + * @param rxq_ctrl * Pointer to RX queue structure. * @param elts_n * Number of elements to allocate. @@ -787,73 +643,67 @@ rxq_free_elts_sp(struct rxq *rxq) * 0 on success, errno value on failure. */ static int -rxq_alloc_elts(struct rxq *rxq, unsigned int elts_n, struct rte_mbuf **pool) +rxq_alloc_elts(struct rxq_ctrl *rxq_ctrl, unsigned int elts_n, + struct rte_mbuf *(*pool)[]) { + const unsigned int sges_n = 1 << rxq_ctrl->rxq.sges_n; unsigned int i; - struct rxq_elt (*elts)[elts_n] = - rte_calloc_socket("RXQ elements", 1, sizeof(*elts), 0, - rxq->socket); int ret = 0; - if (elts == NULL) { - ERROR("%p: can't allocate packets array", (void *)rxq); - ret = ENOMEM; - goto error; - } - /* For each WR (packet). */ + /* Iterate on segments. */ for (i = 0; (i != elts_n); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct ibv_sge *sge = &(*elts)[i].sge; struct rte_mbuf *buf; + volatile struct mlx5_wqe_data_seg *scat = + &(*rxq_ctrl->rxq.wqes)[i]; if (pool != NULL) { - buf = *(pool++); + buf = (*pool)[i]; assert(buf != NULL); rte_pktmbuf_reset(buf); + rte_pktmbuf_refcnt_update(buf, 1); } else - buf = rte_pktmbuf_alloc(rxq->mp); + buf = rte_pktmbuf_alloc(rxq_ctrl->rxq.mp); if (buf == NULL) { assert(pool == NULL); - ERROR("%p: empty mbuf pool", (void *)rxq); + ERROR("%p: empty mbuf pool", (void *)rxq_ctrl); ret = ENOMEM; goto error; } - elt->buf = buf; /* Headroom is reserved by rte_pktmbuf_alloc(). */ assert(DATA_OFF(buf) == RTE_PKTMBUF_HEADROOM); /* Buffer is supposed to be empty. */ assert(rte_pktmbuf_data_len(buf) == 0); assert(rte_pktmbuf_pkt_len(buf) == 0); - /* sge->addr must be able to store a pointer. */ - assert(sizeof(sge->addr) >= sizeof(uintptr_t)); - /* SGE keeps its headroom. */ - sge->addr = (uintptr_t) - ((uint8_t *)buf->buf_addr + RTE_PKTMBUF_HEADROOM); - sge->length = (buf->buf_len - RTE_PKTMBUF_HEADROOM); - sge->lkey = rxq->mr->lkey; - /* Redundant check for tailroom. */ - assert(sge->length == rte_pktmbuf_tailroom(buf)); + assert(!buf->next); + /* Only the first segment keeps headroom. */ + if (i % sges_n) + SET_DATA_OFF(buf, 0); + PORT(buf) = rxq_ctrl->rxq.port_id; + DATA_LEN(buf) = rte_pktmbuf_tailroom(buf); + PKT_LEN(buf) = DATA_LEN(buf); + NB_SEGS(buf) = 1; + /* scat->addr must be able to store a pointer. */ + assert(sizeof(scat->addr) >= sizeof(uintptr_t)); + *scat = (struct mlx5_wqe_data_seg){ + .addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)), + .byte_count = htonl(DATA_LEN(buf)), + .lkey = htonl(rxq_ctrl->mr->lkey), + }; + (*rxq_ctrl->rxq.elts)[i] = buf; } - DEBUG("%p: allocated and configured %u single-segment WRs", - (void *)rxq, elts_n); - rxq->elts_n = elts_n; - rxq->elts_head = 0; - rxq->elts.no_sp = elts; + DEBUG("%p: allocated and configured %u segments (max %u packets)", + (void *)rxq_ctrl, elts_n, elts_n / (1 << rxq_ctrl->rxq.sges_n)); assert(ret == 0); return 0; error: - if (elts != NULL) { - assert(pool == NULL); - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct rte_mbuf *buf = elt->buf; - - if (buf != NULL) - rte_pktmbuf_free_seg(buf); - } - rte_free(elts); + assert(pool == NULL); + elts_n = i; + for (i = 0; (i != elts_n); ++i) { + if ((*rxq_ctrl->rxq.elts)[i] != NULL) + rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); + (*rxq_ctrl->rxq.elts)[i] = NULL; } - DEBUG("%p: failed, freed everything", (void *)rxq); + DEBUG("%p: failed, freed everything", (void *)rxq_ctrl); assert(ret > 0); return ret; } @@ -861,29 +711,23 @@ error: /** * Free RX queue elements. * - * @param rxq + * @param rxq_ctrl * Pointer to RX queue structure. */ static void -rxq_free_elts(struct rxq *rxq) +rxq_free_elts(struct rxq_ctrl *rxq_ctrl) { unsigned int i; - unsigned int elts_n = rxq->elts_n; - struct rxq_elt (*elts)[elts_n] = rxq->elts.no_sp; - DEBUG("%p: freeing WRs", (void *)rxq); - rxq->elts_n = 0; - rxq->elts.no_sp = NULL; - if (elts == NULL) + DEBUG("%p: freeing WRs", (void *)rxq_ctrl); + if (rxq_ctrl->rxq.elts == NULL) return; - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct rte_mbuf *buf = elt->buf; - if (buf != NULL) - rte_pktmbuf_free_seg(buf); + for (i = 0; (i != rxq_ctrl->rxq.elts_n); ++i) { + if ((*rxq_ctrl->rxq.elts)[i] != NULL) + rte_pktmbuf_free_seg((*rxq_ctrl->rxq.elts)[i]); + (*rxq_ctrl->rxq.elts)[i] = NULL; } - rte_free(elts); } /** @@ -891,65 +735,60 @@ rxq_free_elts(struct rxq *rxq) * * Destroy objects, free allocated memory and reset the structure for reuse. * - * @param rxq + * @param rxq_ctrl * Pointer to RX queue structure. */ void -rxq_cleanup(struct rxq *rxq) +rxq_cleanup(struct rxq_ctrl *rxq_ctrl) { struct ibv_exp_release_intf_params params; - DEBUG("cleaning up %p", (void *)rxq); - if (rxq->sp) - rxq_free_elts_sp(rxq); - else - rxq_free_elts(rxq); - rxq->poll = NULL; - rxq->recv = NULL; - if (rxq->if_wq != NULL) { - assert(rxq->priv != NULL); - assert(rxq->priv->ctx != NULL); - assert(rxq->wq != NULL); + DEBUG("cleaning up %p", (void *)rxq_ctrl); + rxq_free_elts(rxq_ctrl); + if (rxq_ctrl->if_wq != NULL) { + assert(rxq_ctrl->priv != NULL); + assert(rxq_ctrl->priv->ctx != NULL); + assert(rxq_ctrl->wq != NULL); params = (struct ibv_exp_release_intf_params){ .comp_mask = 0, }; - claim_zero(ibv_exp_release_intf(rxq->priv->ctx, - rxq->if_wq, + claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx, + rxq_ctrl->if_wq, ¶ms)); } - if (rxq->if_cq != NULL) { - assert(rxq->priv != NULL); - assert(rxq->priv->ctx != NULL); - assert(rxq->cq != NULL); + if (rxq_ctrl->if_cq != NULL) { + assert(rxq_ctrl->priv != NULL); + assert(rxq_ctrl->priv->ctx != NULL); + assert(rxq_ctrl->cq != NULL); params = (struct ibv_exp_release_intf_params){ .comp_mask = 0, }; - claim_zero(ibv_exp_release_intf(rxq->priv->ctx, - rxq->if_cq, + claim_zero(ibv_exp_release_intf(rxq_ctrl->priv->ctx, + rxq_ctrl->if_cq, ¶ms)); } - if (rxq->wq != NULL) - claim_zero(ibv_exp_destroy_wq(rxq->wq)); - if (rxq->cq != NULL) - claim_zero(ibv_destroy_cq(rxq->cq)); - if (rxq->rd != NULL) { + if (rxq_ctrl->wq != NULL) + claim_zero(ibv_exp_destroy_wq(rxq_ctrl->wq)); + if (rxq_ctrl->cq != NULL) + claim_zero(ibv_destroy_cq(rxq_ctrl->cq)); + if (rxq_ctrl->rd != NULL) { struct ibv_exp_destroy_res_domain_attr attr = { .comp_mask = 0, }; - assert(rxq->priv != NULL); - assert(rxq->priv->ctx != NULL); - claim_zero(ibv_exp_destroy_res_domain(rxq->priv->ctx, - rxq->rd, + assert(rxq_ctrl->priv != NULL); + assert(rxq_ctrl->priv->ctx != NULL); + claim_zero(ibv_exp_destroy_res_domain(rxq_ctrl->priv->ctx, + rxq_ctrl->rd, &attr)); } - if (rxq->mr != NULL) - claim_zero(ibv_dereg_mr(rxq->mr)); - memset(rxq, 0, sizeof(*rxq)); + if (rxq_ctrl->mr != NULL) + claim_zero(ibv_dereg_mr(rxq_ctrl->mr)); + memset(rxq_ctrl, 0, sizeof(*rxq_ctrl)); } /** - * Reconfigure a RX queue with new parameters. + * Reconfigure RX queue buffers. * * rxq_rehash() does not allocate mbufs, which, if not done from the right * thread (such as a control thread), may corrupt the pool. @@ -957,173 +796,109 @@ rxq_cleanup(struct rxq *rxq) * * @param dev * Pointer to Ethernet device structure. - * @param rxq + * @param rxq_ctrl * RX queue pointer. * * @return * 0 on success, errno value on failure. */ int -rxq_rehash(struct rte_eth_dev *dev, struct rxq *rxq) +rxq_rehash(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl) { - struct priv *priv = rxq->priv; - struct rxq tmpl = *rxq; - unsigned int mbuf_n; - unsigned int desc_n; - struct rte_mbuf **pool; - unsigned int i, k; + unsigned int elts_n = rxq_ctrl->rxq.elts_n; + unsigned int i; struct ibv_exp_wq_attr mod; int err; - DEBUG("%p: rehashing queue %p", (void *)dev, (void *)rxq); - /* Number of descriptors and mbufs currently allocated. */ - desc_n = (tmpl.elts_n * (tmpl.sp ? MLX5_PMD_SGE_WR_N : 1)); - mbuf_n = desc_n; - /* Toggle RX checksum offload if hardware supports it. */ - if (priv->hw_csum) { - tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; - rxq->csum = tmpl.csum; - } - if (priv->hw_csum_l2tun) { - tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; - rxq->csum_l2tun = tmpl.csum_l2tun; - } - /* Enable scattered packets support for this queue if necessary. */ - if ((dev->data->dev_conf.rxmode.jumbo_frame) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { - tmpl.sp = 1; - desc_n /= MLX5_PMD_SGE_WR_N; - } else - tmpl.sp = 0; - DEBUG("%p: %s scattered packets support (%u WRs)", - (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc_n); - /* If scatter mode is the same as before, nothing to do. */ - if (tmpl.sp == rxq->sp) { - DEBUG("%p: nothing to do", (void *)dev); - return 0; - } + DEBUG("%p: rehashing queue %p with %u SGE(s) per packet", + (void *)dev, (void *)rxq_ctrl, 1 << rxq_ctrl->rxq.sges_n); + assert(!(elts_n % (1 << rxq_ctrl->rxq.sges_n))); /* From now on, any failure will render the queue unusable. * Reinitialize WQ. */ mod = (struct ibv_exp_wq_attr){ .attr_mask = IBV_EXP_WQ_ATTR_STATE, .wq_state = IBV_EXP_WQS_RESET, }; - err = ibv_exp_modify_wq(tmpl.wq, &mod); + err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod); if (err) { ERROR("%p: cannot reset WQ: %s", (void *)dev, strerror(err)); assert(err > 0); return err; } - /* Allocate pool. */ - pool = rte_malloc(__func__, (mbuf_n * sizeof(*pool)), 0); - if (pool == NULL) { - ERROR("%p: cannot allocate memory", (void *)dev); - return ENOBUFS; - } /* Snatch mbufs from original queue. */ - k = 0; - if (rxq->sp) { - struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt_sp *elt = &(*elts)[i]; - unsigned int j; - - for (j = 0; (j != RTE_DIM(elt->bufs)); ++j) { - assert(elt->bufs[j] != NULL); - pool[k++] = elt->bufs[j]; - } - } - } else { - struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - struct rxq_elt *elt = &(*elts)[i]; - struct rte_mbuf *buf = elt->buf; + claim_zero(rxq_alloc_elts(rxq_ctrl, elts_n, rxq_ctrl->rxq.elts)); + for (i = 0; i != elts_n; ++i) { + struct rte_mbuf *buf = (*rxq_ctrl->rxq.elts)[i]; - pool[k++] = buf; - } + assert(rte_mbuf_refcnt_read(buf) == 2); + rte_pktmbuf_free_seg(buf); } - assert(k == mbuf_n); - tmpl.elts_n = 0; - tmpl.elts.sp = NULL; - assert((void *)&tmpl.elts.sp == (void *)&tmpl.elts.no_sp); - err = ((tmpl.sp) ? - rxq_alloc_elts_sp(&tmpl, desc_n, pool) : - rxq_alloc_elts(&tmpl, desc_n, pool)); - if (err) { - ERROR("%p: cannot reallocate WRs, aborting", (void *)dev); - rte_free(pool); - assert(err > 0); - return err; - } - assert(tmpl.elts_n == desc_n); - assert(tmpl.elts.sp != NULL); - rte_free(pool); - /* Clean up original data. */ - rxq->elts_n = 0; - rte_free(rxq->elts.sp); - rxq->elts.sp = NULL; /* Change queue state to ready. */ mod = (struct ibv_exp_wq_attr){ .attr_mask = IBV_EXP_WQ_ATTR_STATE, .wq_state = IBV_EXP_WQS_RDY, }; - err = ibv_exp_modify_wq(tmpl.wq, &mod); + err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod); if (err) { ERROR("%p: WQ state to IBV_EXP_WQS_RDY failed: %s", (void *)dev, strerror(err)); goto error; } - /* Post SGEs. */ - assert(tmpl.if_wq != NULL); - if (tmpl.sp) { - struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - err = tmpl.if_wq->recv_sg_list - (tmpl.wq, - (*elts)[i].sges, - RTE_DIM((*elts)[i].sges)); - if (err) - break; - } - } else { - struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - err = tmpl.if_wq->recv_burst( - tmpl.wq, - &(*elts)[i].sge, - 1); - if (err) - break; - } - } - if (err) { - ERROR("%p: failed to post SGEs with error %d", - (void *)dev, err); - /* Set err because it does not contain a valid errno value. */ - err = EIO; - goto error; - } - if (tmpl.sp) - tmpl.recv = tmpl.if_wq->recv_sg_list; - else - tmpl.recv = tmpl.if_wq->recv_burst; + /* Update doorbell counter. */ + rxq_ctrl->rxq.rq_ci = elts_n >> rxq_ctrl->rxq.sges_n; + rte_wmb(); + *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci); error: - *rxq = tmpl; assert(err >= 0); return err; } /** + * Initialize RX queue. + * + * @param tmpl + * Pointer to RX queue control template. + * + * @return + * 0 on success, errno value on failure. + */ +static inline int +rxq_setup(struct rxq_ctrl *tmpl) +{ + struct ibv_cq *ibcq = tmpl->cq; + struct mlx5_cq *cq = to_mxxx(cq, cq); + struct mlx5_rwq *rwq = container_of(tmpl->wq, struct mlx5_rwq, wq); + struct rte_mbuf *(*elts)[tmpl->rxq.elts_n] = + rte_calloc_socket("RXQ", 1, sizeof(*elts), 0, tmpl->socket); + + if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) { + ERROR("Wrong MLX5_CQE_SIZE environment variable value: " + "it should be set to %u", RTE_CACHE_LINE_SIZE); + return EINVAL; + } + if (elts == NULL) + return ENOMEM; + tmpl->rxq.rq_db = rwq->rq.db; + tmpl->rxq.cqe_n = ibcq->cqe + 1; + tmpl->rxq.cq_ci = 0; + tmpl->rxq.rq_ci = 0; + tmpl->rxq.cq_db = cq->dbrec; + tmpl->rxq.wqes = + (volatile struct mlx5_wqe_data_seg (*)[]) + (uintptr_t)rwq->rq.buff; + tmpl->rxq.cqes = + (volatile struct mlx5_cqe (*)[]) + (uintptr_t)cq->active_buf->buf; + tmpl->rxq.elts = elts; + return 0; +} + +/** * Configure a RX queue. * * @param dev * Pointer to Ethernet device structure. - * @param rxq + * @param rxq_ctrl * Pointer to RX queue structure. * @param desc * Number of descriptors to configure in queue. @@ -1138,15 +913,18 @@ error: * 0 on success, errno value on failure. */ int -rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, - unsigned int socket, const struct rte_eth_rxconf *conf, - struct rte_mempool *mp) +rxq_ctrl_setup(struct rte_eth_dev *dev, struct rxq_ctrl *rxq_ctrl, + uint16_t desc, unsigned int socket, + const struct rte_eth_rxconf *conf, struct rte_mempool *mp) { struct priv *priv = dev->data->dev_private; - struct rxq tmpl = { + struct rxq_ctrl tmpl = { .priv = priv, - .mp = mp, - .socket = socket + .socket = socket, + .rxq = { + .elts_n = desc, + .mp = mp, + }, }; struct ibv_exp_wq_attr mod; union { @@ -1154,44 +932,59 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, struct ibv_exp_cq_init_attr cq; struct ibv_exp_res_domain_init_attr rd; struct ibv_exp_wq_init_attr wq; + struct ibv_exp_cq_attr cq_attr; } attr; enum ibv_exp_query_intf_status status; - struct rte_mbuf *buf; + unsigned int mb_len = rte_pktmbuf_data_room_size(mp); + unsigned int cqe_n = desc - 1; + struct rte_mbuf *(*elts)[desc] = NULL; int ret = 0; - unsigned int i; - unsigned int cq_size = desc; (void)conf; /* Thresholds configuration (ignored). */ - if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) { - ERROR("%p: invalid number of RX descriptors (must be a" - " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N); - return EINVAL; + /* Enable scattered packets support for this queue if necessary. */ + assert(mb_len >= RTE_PKTMBUF_HEADROOM); + if ((dev->data->dev_conf.rxmode.jumbo_frame) && + (dev->data->dev_conf.rxmode.max_rx_pkt_len > + (mb_len - RTE_PKTMBUF_HEADROOM))) { + unsigned int size = + RTE_PKTMBUF_HEADROOM + + dev->data->dev_conf.rxmode.max_rx_pkt_len; + unsigned int sges_n; + + /* + * Determine the number of SGEs needed for a full packet + * and round it to the next power of two. + */ + sges_n = log2above((size / mb_len) + !!(size % mb_len)); + tmpl.rxq.sges_n = sges_n; + /* Make sure rxq.sges_n did not overflow. */ + size = mb_len * (1 << tmpl.rxq.sges_n); + size -= RTE_PKTMBUF_HEADROOM; + if (size < dev->data->dev_conf.rxmode.max_rx_pkt_len) { + ERROR("%p: too many SGEs (%u) needed to handle" + " requested maximum packet size %u", + (void *)dev, + 1 << sges_n, + dev->data->dev_conf.rxmode.max_rx_pkt_len); + return EOVERFLOW; + } } - /* Get mbuf length. */ - buf = rte_pktmbuf_alloc(mp); - if (buf == NULL) { - ERROR("%p: unable to allocate mbuf", (void *)dev); - return ENOMEM; + DEBUG("%p: maximum number of segments per packet: %u", + (void *)dev, 1 << tmpl.rxq.sges_n); + if (desc % (1 << tmpl.rxq.sges_n)) { + ERROR("%p: number of RX queue descriptors (%u) is not a" + " multiple of SGEs per packet (%u)", + (void *)dev, + desc, + 1 << tmpl.rxq.sges_n); + return EINVAL; } - tmpl.mb_len = buf->buf_len; - assert((rte_pktmbuf_headroom(buf) + - rte_pktmbuf_tailroom(buf)) == tmpl.mb_len); - assert(rte_pktmbuf_headroom(buf) == RTE_PKTMBUF_HEADROOM); - rte_pktmbuf_free(buf); /* Toggle RX checksum offload if hardware supports it. */ if (priv->hw_csum) - tmpl.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; + tmpl.rxq.csum = !!dev->data->dev_conf.rxmode.hw_ip_checksum; if (priv->hw_csum_l2tun) - tmpl.csum_l2tun = !!dev->data->dev_conf.rxmode.hw_ip_checksum; - /* Enable scattered packets support for this queue if necessary. */ - if ((dev->data->dev_conf.rxmode.jumbo_frame) && - (dev->data->dev_conf.rxmode.max_rx_pkt_len > - (tmpl.mb_len - RTE_PKTMBUF_HEADROOM))) { - tmpl.sp = 1; - desc /= MLX5_PMD_SGE_WR_N; - } - DEBUG("%p: %s scattered packets support (%u WRs)", - (void *)dev, (tmpl.sp ? "enabling" : "disabling"), desc); + tmpl.rxq.csum_l2tun = + !!dev->data->dev_conf.rxmode.hw_ip_checksum; /* Use the entire RX mempool as the memory region. */ tmpl.mr = mlx5_mp2mr(priv->pd, mp); if (tmpl.mr == NULL) { @@ -1217,7 +1010,12 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, .res_domain = tmpl.rd, }; - tmpl.cq = ibv_exp_create_cq(priv->ctx, cq_size, NULL, NULL, 0, + if (priv->cqe_comp) { + attr.cq.comp_mask |= IBV_EXP_CQ_INIT_ATTR_FLAGS; + attr.cq.flags |= IBV_EXP_CQ_COMPRESSED_CQE; + cqe_n = (desc * 2) - 1; /* Double the number of CQEs. */ + } + tmpl.cq = ibv_exp_create_cq(priv->ctx, cqe_n, NULL, NULL, 0, &attr.cq); if (tmpl.cq == NULL) { ret = ENOMEM; @@ -1230,64 +1028,51 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, DEBUG("priv->device_attr.max_sge is %d", priv->device_attr.max_sge); /* Configure VLAN stripping. */ - tmpl.vlan_strip = dev->data->dev_conf.rxmode.hw_vlan_strip; + tmpl.rxq.vlan_strip = (priv->hw_vlan_strip && + !!dev->data->dev_conf.rxmode.hw_vlan_strip); attr.wq = (struct ibv_exp_wq_init_attr){ .wq_context = NULL, /* Could be useful in the future. */ .wq_type = IBV_EXP_WQT_RQ, /* Max number of outstanding WRs. */ - .max_recv_wr = ((priv->device_attr.max_qp_wr < (int)cq_size) ? - priv->device_attr.max_qp_wr : - (int)cq_size), + .max_recv_wr = desc >> tmpl.rxq.sges_n, /* Max number of scatter/gather elements in a WR. */ - .max_recv_sge = ((priv->device_attr.max_sge < - MLX5_PMD_SGE_WR_N) ? - priv->device_attr.max_sge : - MLX5_PMD_SGE_WR_N), + .max_recv_sge = 1 << tmpl.rxq.sges_n, .pd = priv->pd, .cq = tmpl.cq, .comp_mask = IBV_EXP_CREATE_WQ_RES_DOMAIN | -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS IBV_EXP_CREATE_WQ_VLAN_OFFLOADS | -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ 0, .res_domain = tmpl.rd, -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS - .vlan_offloads = (tmpl.vlan_strip ? + .vlan_offloads = (tmpl.rxq.vlan_strip ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0), -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ }; - -#ifdef HAVE_VERBS_FCS /* By default, FCS (CRC) is stripped by hardware. */ if (dev->data->dev_conf.rxmode.hw_strip_crc) { - tmpl.crc_present = 0; + tmpl.rxq.crc_present = 0; } else if (priv->hw_fcs_strip) { /* Ask HW/Verbs to leave CRC in place when supported. */ attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_SCATTER_FCS; attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS; - tmpl.crc_present = 1; + tmpl.rxq.crc_present = 1; } else { WARN("%p: CRC stripping has been disabled but will still" " be performed by hardware, make sure MLNX_OFED and" " firmware are up to date", (void *)dev); - tmpl.crc_present = 0; + tmpl.rxq.crc_present = 0; } DEBUG("%p: CRC stripping is %s, %u bytes will be subtracted from" " incoming frames to hide it", (void *)dev, - tmpl.crc_present ? "disabled" : "enabled", - tmpl.crc_present << 2); -#endif /* HAVE_VERBS_FCS */ - -#ifdef HAVE_VERBS_RX_END_PADDING + tmpl.rxq.crc_present ? "disabled" : "enabled", + tmpl.rxq.crc_present << 2); if (!mlx5_getenv_int("MLX5_PMD_ENABLE_PADDING")) ; /* Nothing else to do. */ else if (priv->hw_padding) { INFO("%p: enabling packet padding on queue %p", - (void *)dev, (void *)rxq); + (void *)dev, (void *)rxq_ctrl); attr.wq.flags |= IBV_EXP_CREATE_WQ_FLAG_RX_END_PADDING; attr.wq.comp_mask |= IBV_EXP_CREATE_WQ_FLAGS; } else @@ -1295,7 +1080,6 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, " supported, make sure MLNX_OFED and firmware are" " up to date", (void *)dev); -#endif /* HAVE_VERBS_RX_END_PADDING */ tmpl.wq = ibv_exp_create_wq(priv->ctx, &attr.wq); if (tmpl.wq == NULL) { @@ -1304,23 +1088,25 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, (void *)dev, strerror(ret)); goto error; } - if (tmpl.sp) - ret = rxq_alloc_elts_sp(&tmpl, desc, NULL); - else - ret = rxq_alloc_elts(&tmpl, desc, NULL); - if (ret) { - ERROR("%p: RXQ allocation failed: %s", - (void *)dev, strerror(ret)); + /* + * Make sure number of WRs*SGEs match expectations since a queue + * cannot allocate more than "desc" buffers. + */ + if (((int)attr.wq.max_recv_wr != (desc >> tmpl.rxq.sges_n)) || + ((int)attr.wq.max_recv_sge != (1 << tmpl.rxq.sges_n))) { + ERROR("%p: requested %u*%u but got %u*%u WRs*SGEs", + (void *)dev, + (desc >> tmpl.rxq.sges_n), (1 << tmpl.rxq.sges_n), + attr.wq.max_recv_wr, attr.wq.max_recv_sge); + ret = EINVAL; goto error; } /* Save port ID. */ - tmpl.port_id = dev->data->port_id; - DEBUG("%p: RTE port ID: %u", (void *)rxq, tmpl.port_id); + tmpl.rxq.port_id = dev->data->port_id; + DEBUG("%p: RTE port ID: %u", (void *)rxq_ctrl, tmpl.rxq.port_id); attr.params = (struct ibv_exp_query_intf_params){ .intf_scope = IBV_EXP_INTF_GLOBAL, -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS .intf_version = 1, -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ .intf = IBV_EXP_INTF_CQ, .obj = tmpl.cq, }; @@ -1352,56 +1138,47 @@ rxq_setup(struct rte_eth_dev *dev, struct rxq *rxq, uint16_t desc, (void *)dev, strerror(ret)); goto error; } - /* Post SGEs. */ - if (tmpl.sp) { - struct rxq_elt_sp (*elts)[tmpl.elts_n] = tmpl.elts.sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - ret = tmpl.if_wq->recv_sg_list - (tmpl.wq, - (*elts)[i].sges, - RTE_DIM((*elts)[i].sges)); - if (ret) - break; - } - } else { - struct rxq_elt (*elts)[tmpl.elts_n] = tmpl.elts.no_sp; - - for (i = 0; (i != RTE_DIM(*elts)); ++i) { - ret = tmpl.if_wq->recv_burst( - tmpl.wq, - &(*elts)[i].sge, - 1); - if (ret) - break; - } + ret = rxq_setup(&tmpl); + if (ret) { + ERROR("%p: cannot initialize RX queue structure: %s", + (void *)dev, strerror(ret)); + goto error; } + /* Reuse buffers from original queue if possible. */ + if (rxq_ctrl->rxq.elts_n) { + assert(rxq_ctrl->rxq.elts_n == desc); + assert(rxq_ctrl->rxq.elts != tmpl.rxq.elts); + ret = rxq_alloc_elts(&tmpl, desc, rxq_ctrl->rxq.elts); + } else + ret = rxq_alloc_elts(&tmpl, desc, NULL); if (ret) { - ERROR("%p: failed to post SGEs with error %d", - (void *)dev, ret); - /* Set ret because it does not contain a valid errno value. */ - ret = EIO; + ERROR("%p: RXQ allocation failed: %s", + (void *)dev, strerror(ret)); goto error; } /* Clean up rxq in case we're reinitializing it. */ - DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq); - rxq_cleanup(rxq); - *rxq = tmpl; - DEBUG("%p: rxq updated with %p", (void *)rxq, (void *)&tmpl); + DEBUG("%p: cleaning-up old rxq just in case", (void *)rxq_ctrl); + rxq_cleanup(rxq_ctrl); + /* Move mbuf pointers to dedicated storage area in RX queue. */ + elts = (void *)(rxq_ctrl + 1); + rte_memcpy(elts, tmpl.rxq.elts, sizeof(*elts)); +#ifndef NDEBUG + memset(tmpl.rxq.elts, 0x55, sizeof(*elts)); +#endif + rte_free(tmpl.rxq.elts); + tmpl.rxq.elts = elts; + *rxq_ctrl = tmpl; + /* Update doorbell counter. */ + rxq_ctrl->rxq.rq_ci = desc >> rxq_ctrl->rxq.sges_n; + rte_wmb(); + *rxq_ctrl->rxq.rq_db = htonl(rxq_ctrl->rxq.rq_ci); + DEBUG("%p: rxq updated with %p", (void *)rxq_ctrl, (void *)&tmpl); assert(ret == 0); - /* Assign function in queue. */ -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS - rxq->poll = rxq->if_cq->poll_length_flags_cvlan; -#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ - rxq->poll = rxq->if_cq->poll_length_flags; -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ - if (rxq->sp) - rxq->recv = rxq->if_wq->recv_sg_list; - else - rxq->recv = rxq->if_wq->recv_burst; return 0; error: + elts = tmpl.rxq.elts; rxq_cleanup(&tmpl); + rte_free(elts); assert(ret > 0); return ret; } @@ -1432,12 +1209,19 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, { struct priv *priv = dev->data->dev_private; struct rxq *rxq = (*priv->rxqs)[idx]; + struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); int ret; if (mlx5_is_secondary()) return -E_RTE_SECONDARY; priv_lock(priv); + if (!rte_is_power_of_2(desc)) { + desc = 1 << log2above(desc); + WARN("%p: increased number of descriptors in RX queue %u" + " to the next power of two (%d)", + (void *)dev, idx, desc); + } DEBUG("%p: configuring queue %u for %u descriptors", (void *)dev, idx, desc); if (idx >= priv->rxqs_n) { @@ -1454,29 +1238,28 @@ mlx5_rx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return -EEXIST; } (*priv->rxqs)[idx] = NULL; - rxq_cleanup(rxq); + rxq_cleanup(rxq_ctrl); } else { - rxq = rte_calloc_socket("RXQ", 1, sizeof(*rxq), 0, socket); - if (rxq == NULL) { + rxq_ctrl = rte_calloc_socket("RXQ", 1, sizeof(*rxq_ctrl) + + desc * sizeof(struct rte_mbuf *), + 0, socket); + if (rxq_ctrl == NULL) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); priv_unlock(priv); return -ENOMEM; } } - ret = rxq_setup(dev, rxq, desc, socket, conf, mp); + ret = rxq_ctrl_setup(dev, rxq_ctrl, desc, socket, conf, mp); if (ret) - rte_free(rxq); + rte_free(rxq_ctrl); else { - rxq->stats.idx = idx; + rxq_ctrl->rxq.stats.idx = idx; DEBUG("%p: adding RX queue %p to list", - (void *)dev, (void *)rxq); - (*priv->rxqs)[idx] = rxq; + (void *)dev, (void *)rxq_ctrl); + (*priv->rxqs)[idx] = &rxq_ctrl->rxq; /* Update receive callback. */ - if (rxq->sp) - dev->rx_pkt_burst = mlx5_rx_burst_sp; - else - dev->rx_pkt_burst = mlx5_rx_burst; + dev->rx_pkt_burst = mlx5_rx_burst; } priv_unlock(priv); return -ret; @@ -1492,6 +1275,7 @@ void mlx5_rx_queue_release(void *dpdk_rxq) { struct rxq *rxq = (struct rxq *)dpdk_rxq; + struct rxq_ctrl *rxq_ctrl; struct priv *priv; unsigned int i; @@ -1500,17 +1284,18 @@ mlx5_rx_queue_release(void *dpdk_rxq) if (rxq == NULL) return; - priv = rxq->priv; + rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + priv = rxq_ctrl->priv; priv_lock(priv); for (i = 0; (i != priv->rxqs_n); ++i) if ((*priv->rxqs)[i] == rxq) { DEBUG("%p: removing RX queue %p from list", - (void *)priv->dev, (void *)rxq); + (void *)priv->dev, (void *)rxq_ctrl); (*priv->rxqs)[i] = NULL; break; } - rxq_cleanup(rxq); - rte_free(rxq); + rxq_cleanup(rxq_ctrl); + rte_free(rxq_ctrl); priv_unlock(priv); } @@ -1535,7 +1320,8 @@ mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct rxq *rxq = dpdk_rxq; - struct priv *priv = mlx5_secondary_data_setup(rxq->priv); + struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); + struct priv *priv = mlx5_secondary_data_setup(rxq_ctrl->priv); struct priv *primary_priv; unsigned int index; diff --git a/drivers/net/mlx5/mlx5_rxtx.c b/drivers/net/mlx5/mlx5_rxtx.c index 9d1380a0..0c352f3f 100644 --- a/drivers/net/mlx5/mlx5_rxtx.c +++ b/drivers/net/mlx5/mlx5_rxtx.c @@ -42,6 +42,8 @@ #pragma GCC diagnostic ignored "-pedantic" #endif #include <infiniband/verbs.h> +#include <infiniband/mlx5_hw.h> +#include <infiniband/arch.h> #ifdef PEDANTIC #pragma GCC diagnostic error "-pedantic" #endif @@ -55,7 +57,7 @@ #include <rte_prefetch.h> #include <rte_common.h> #include <rte_branch_prediction.h> -#include <rte_memory.h> +#include <rte_ether.h> #ifdef PEDANTIC #pragma GCC diagnostic error "-pedantic" #endif @@ -65,125 +67,161 @@ #include "mlx5_rxtx.h" #include "mlx5_autoconf.h" #include "mlx5_defs.h" +#include "mlx5_prm.h" + +#ifndef NDEBUG /** - * Manage TX completions. - * - * When sending a burst, mlx5_tx_burst() posts several WRs. - * To improve performance, a completion event is only required once every - * MLX5_PMD_TX_PER_COMP_REQ sends. Doing so discards completion information - * for other WRs, but this information would not be used anyway. + * Verify or set magic value in CQE. * - * @param txq - * Pointer to TX queue structure. + * @param cqe + * Pointer to CQE. * * @return - * 0 on success, -1 on failure. + * 0 the first time. */ -static int -txq_complete(struct txq *txq) +static inline int +check_cqe64_seen(volatile struct mlx5_cqe64 *cqe) { - unsigned int elts_comp = txq->elts_comp; - unsigned int elts_tail = txq->elts_tail; - unsigned int elts_free = txq->elts_tail; - const unsigned int elts_n = txq->elts_n; - int wcs_n; + static const uint8_t magic[] = "seen"; + volatile uint8_t (*buf)[sizeof(cqe->rsvd40)] = &cqe->rsvd40; + int ret = 1; + unsigned int i; - if (unlikely(elts_comp == 0)) - return 0; -#ifdef DEBUG_SEND - DEBUG("%p: processing %u work requests completions", - (void *)txq, elts_comp); -#endif - wcs_n = txq->poll_cnt(txq->cq, elts_comp); - if (unlikely(wcs_n == 0)) - return 0; - if (unlikely(wcs_n < 0)) { - DEBUG("%p: ibv_poll_cq() failed (wcs_n=%d)", - (void *)txq, wcs_n); - return -1; - } - elts_comp -= wcs_n; - assert(elts_comp <= txq->elts_comp); - /* - * Assume WC status is successful as nothing can be done about it - * anyway. - */ - elts_tail += wcs_n * txq->elts_comp_cd_init; - if (elts_tail >= elts_n) - elts_tail -= elts_n; + for (i = 0; i < sizeof(magic) && i < sizeof(*buf); ++i) + if (!ret || (*buf)[i] != magic[i]) { + ret = 0; + (*buf)[i] = magic[i]; + } + return ret; +} - while (elts_free != elts_tail) { - struct txq_elt *elt = &(*txq->elts)[elts_free]; - unsigned int elts_free_next = - (((elts_free + 1) == elts_n) ? 0 : elts_free + 1); - struct rte_mbuf *tmp = elt->buf; - struct txq_elt *elt_next = &(*txq->elts)[elts_free_next]; +#endif /* NDEBUG */ + +static inline int +check_cqe64(volatile struct mlx5_cqe64 *cqe, + unsigned int cqes_n, const uint16_t ci) + __attribute__((always_inline)); + +/** + * Check whether CQE is valid. + * + * @param cqe + * Pointer to CQE. + * @param cqes_n + * Size of completion queue. + * @param ci + * Consumer index. + * + * @return + * 0 on success, 1 on failure. + */ +static inline int +check_cqe64(volatile struct mlx5_cqe64 *cqe, + unsigned int cqes_n, const uint16_t ci) +{ + uint16_t idx = ci & cqes_n; + uint8_t op_own = cqe->op_own; + uint8_t op_owner = MLX5_CQE_OWNER(op_own); + uint8_t op_code = MLX5_CQE_OPCODE(op_own); + if (unlikely((op_owner != (!!(idx))) || (op_code == MLX5_CQE_INVALID))) + return 1; /* No CQE. */ #ifndef NDEBUG - /* Poisoning. */ - memset(elt, 0x66, sizeof(*elt)); -#endif - RTE_MBUF_PREFETCH_TO_FREE(elt_next->buf); - /* Faster than rte_pktmbuf_free(). */ - do { - struct rte_mbuf *next = NEXT(tmp); + if ((op_code == MLX5_CQE_RESP_ERR) || + (op_code == MLX5_CQE_REQ_ERR)) { + volatile struct mlx5_err_cqe *err_cqe = (volatile void *)cqe; + uint8_t syndrome = err_cqe->syndrome; - rte_pktmbuf_free_seg(tmp); - tmp = next; - } while (tmp != NULL); - elts_free = elts_free_next; + if ((syndrome == MLX5_CQE_SYNDROME_LOCAL_LENGTH_ERR) || + (syndrome == MLX5_CQE_SYNDROME_REMOTE_ABORTED_ERR)) + return 0; + if (!check_cqe64_seen(cqe)) + ERROR("unexpected CQE error %u (0x%02x)" + " syndrome 0x%02x", + op_code, op_code, syndrome); + return 1; + } else if ((op_code != MLX5_CQE_RESP_SEND) && + (op_code != MLX5_CQE_REQ)) { + if (!check_cqe64_seen(cqe)) + ERROR("unexpected CQE opcode %u (0x%02x)", + op_code, op_code); + return 1; } - - txq->elts_tail = elts_tail; - txq->elts_comp = elts_comp; +#endif /* NDEBUG */ return 0; } -/* For best performance, this function should not be inlined. */ -struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *) - __attribute__((noinline)); - /** - * Register mempool as a memory region. + * Manage TX completions. * - * @param pd - * Pointer to protection domain. - * @param mp - * Pointer to memory pool. + * When sending a burst, mlx5_tx_burst() posts several WRs. * - * @return - * Memory region pointer, NULL in case of error. + * @param txq + * Pointer to TX queue structure. */ -struct ibv_mr * -mlx5_mp2mr(struct ibv_pd *pd, const struct rte_mempool *mp) +static void +txq_complete(struct txq *txq) { - const struct rte_memseg *ms = rte_eal_get_physmem_layout(); - uintptr_t start = mp->elt_va_start; - uintptr_t end = mp->elt_va_end; - unsigned int i; + const unsigned int elts_n = txq->elts_n; + const unsigned int cqe_n = txq->cqe_n; + const unsigned int cqe_cnt = cqe_n - 1; + uint16_t elts_free = txq->elts_tail; + uint16_t elts_tail; + uint16_t cq_ci = txq->cq_ci; + volatile struct mlx5_cqe64 *cqe = NULL; + volatile union mlx5_wqe *wqe; + + do { + volatile struct mlx5_cqe64 *tmp; - DEBUG("mempool %p area start=%p end=%p size=%zu", - (const void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - /* Round start and end to page boundary if found in memory segments. */ - for (i = 0; (i < RTE_MAX_MEMSEG) && (ms[i].addr != NULL); ++i) { - uintptr_t addr = (uintptr_t)ms[i].addr; - size_t len = ms[i].len; - unsigned int align = ms[i].hugepage_sz; - - if ((start > addr) && (start < addr + len)) - start = RTE_ALIGN_FLOOR(start, align); - if ((end > addr) && (end < addr + len)) - end = RTE_ALIGN_CEIL(end, align); + tmp = &(*txq->cqes)[cq_ci & cqe_cnt].cqe64; + if (check_cqe64(tmp, cqe_n, cq_ci)) + break; + cqe = tmp; +#ifndef NDEBUG + if (MLX5_CQE_FORMAT(cqe->op_own) == MLX5_COMPRESSED) { + if (!check_cqe64_seen(cqe)) + ERROR("unexpected compressed CQE, TX stopped"); + return; + } + if ((MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_RESP_ERR) || + (MLX5_CQE_OPCODE(cqe->op_own) == MLX5_CQE_REQ_ERR)) { + if (!check_cqe64_seen(cqe)) + ERROR("unexpected error CQE, TX stopped"); + return; + } +#endif /* NDEBUG */ + ++cq_ci; + } while (1); + if (unlikely(cqe == NULL)) + return; + wqe = &(*txq->wqes)[htons(cqe->wqe_counter) & (txq->wqe_n - 1)]; + elts_tail = wqe->wqe.ctrl.data[3]; + assert(elts_tail < txq->wqe_n); + /* Free buffers. */ + while (elts_free != elts_tail) { + struct rte_mbuf *elt = (*txq->elts)[elts_free]; + unsigned int elts_free_next = + (elts_free + 1) & (elts_n - 1); + struct rte_mbuf *elt_next = (*txq->elts)[elts_free_next]; + +#ifndef NDEBUG + /* Poisoning. */ + memset(&(*txq->elts)[elts_free], + 0x66, + sizeof((*txq->elts)[elts_free])); +#endif + RTE_MBUF_PREFETCH_TO_FREE(elt_next); + /* Only one segment needs to be freed. */ + rte_pktmbuf_free_seg(elt); + elts_free = elts_free_next; } - DEBUG("mempool %p using start=%p end=%p size=%zu for MR", - (const void *)mp, (void *)start, (void *)end, - (size_t)(end - start)); - return ibv_reg_mr(pd, - (void *)start, - end - start, - IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE); + txq->cq_ci = cq_ci; + txq->elts_tail = elts_tail; + /* Update the consumer index. */ + rte_wmb(); + *txq->cq_db = htonl(cq_ci); } /** @@ -204,6 +242,10 @@ txq_mb2mp(struct rte_mbuf *buf) return buf->pool; } +static inline uint32_t +txq_mp2mr(struct txq *txq, struct rte_mempool *mp) + __attribute__((always_inline)); + /** * Get Memory Region (MR) <-> Memory Pool (MP) association from txq->mp2mr[]. * Add MP to txq->mp2mr[] if it's not registered yet. If mp2mr[] is full, @@ -217,11 +259,11 @@ txq_mb2mp(struct rte_mbuf *buf) * @return * mr->lkey on success, (uint32_t)-1 on failure. */ -static uint32_t -txq_mp2mr(struct txq *txq, const struct rte_mempool *mp) +static inline uint32_t +txq_mp2mr(struct txq *txq, struct rte_mempool *mp) { unsigned int i; - struct ibv_mr *mr; + uint32_t lkey = (uint32_t)-1; for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { if (unlikely(txq->mp2mr[i].mp == NULL)) { @@ -230,295 +272,681 @@ txq_mp2mr(struct txq *txq, const struct rte_mempool *mp) } if (txq->mp2mr[i].mp == mp) { assert(txq->mp2mr[i].lkey != (uint32_t)-1); - assert(txq->mp2mr[i].mr->lkey == txq->mp2mr[i].lkey); - return txq->mp2mr[i].lkey; + assert(htonl(txq->mp2mr[i].mr->lkey) == + txq->mp2mr[i].lkey); + lkey = txq->mp2mr[i].lkey; + break; } } - /* Add a new entry, register MR first. */ - DEBUG("%p: discovered new memory pool \"%s\" (%p)", - (void *)txq, mp->name, (const void *)mp); - mr = mlx5_mp2mr(txq->priv->pd, mp); - if (unlikely(mr == NULL)) { - DEBUG("%p: unable to configure MR, ibv_reg_mr() failed.", - (void *)txq); - return (uint32_t)-1; - } - if (unlikely(i == RTE_DIM(txq->mp2mr))) { - /* Table is full, remove oldest entry. */ - DEBUG("%p: MR <-> MP table full, dropping oldest entry.", - (void *)txq); - --i; - claim_zero(ibv_dereg_mr(txq->mp2mr[0].mr)); - memmove(&txq->mp2mr[0], &txq->mp2mr[1], - (sizeof(txq->mp2mr) - sizeof(txq->mp2mr[0]))); - } - /* Store the new entry. */ - txq->mp2mr[i].mp = mp; - txq->mp2mr[i].mr = mr; - txq->mp2mr[i].lkey = mr->lkey; - DEBUG("%p: new MR lkey for MP \"%s\" (%p): 0x%08" PRIu32, - (void *)txq, mp->name, (const void *)mp, txq->mp2mr[i].lkey); - return txq->mp2mr[i].lkey; + if (unlikely(lkey == (uint32_t)-1)) + lkey = txq_mp2mr_reg(txq, mp, i); + return lkey; } -struct txq_mp2mr_mbuf_check_data { - const struct rte_mempool *mp; - int ret; -}; - /** - * Callback function for rte_mempool_obj_iter() to check whether a given - * mempool object looks like a mbuf. - * - * @param[in, out] arg - * Context data (struct txq_mp2mr_mbuf_check_data). Contains mempool pointer - * and return value. - * @param[in] start - * Object start address. - * @param[in] end - * Object end address. - * @param index - * Unused. + * Write a regular WQE. * - * @return - * Nonzero value when object is not a mbuf. + * @param txq + * Pointer to TX queue structure. + * @param wqe + * Pointer to the WQE to fill. + * @param addr + * Buffer data address. + * @param length + * Packet length. + * @param lkey + * Memory region lkey. */ -static void -txq_mp2mr_mbuf_check(void *arg, void *start, void *end, - uint32_t index __rte_unused) +static inline void +mlx5_wqe_write(struct txq *txq, volatile union mlx5_wqe *wqe, + uintptr_t addr, uint32_t length, uint32_t lkey) { - struct txq_mp2mr_mbuf_check_data *data = arg; - struct rte_mbuf *buf = - (void *)((uintptr_t)start + data->mp->header_size); - - (void)index; - /* Check whether mbuf structure fits element size and whether mempool - * pointer is valid. */ - if (((uintptr_t)end >= (uintptr_t)(buf + 1)) && - (buf->pool == data->mp)) - data->ret = 0; - else - data->ret = -1; + wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); + wqe->wqe.ctrl.data[1] = htonl((txq->qp_num_8s) | 4); + wqe->wqe.ctrl.data[2] = 0; + wqe->wqe.ctrl.data[3] = 0; + wqe->inl.eseg.rsvd0 = 0; + wqe->inl.eseg.rsvd1 = 0; + wqe->inl.eseg.mss = 0; + wqe->inl.eseg.rsvd2 = 0; + wqe->wqe.eseg.inline_hdr_sz = htons(MLX5_ETH_INLINE_HEADER_SIZE); + /* Copy the first 16 bytes into inline header. */ + rte_memcpy((uint8_t *)(uintptr_t)wqe->wqe.eseg.inline_hdr_start, + (uint8_t *)(uintptr_t)addr, + MLX5_ETH_INLINE_HEADER_SIZE); + addr += MLX5_ETH_INLINE_HEADER_SIZE; + length -= MLX5_ETH_INLINE_HEADER_SIZE; + /* Store remaining data in data segment. */ + wqe->wqe.dseg.byte_count = htonl(length); + wqe->wqe.dseg.lkey = lkey; + wqe->wqe.dseg.addr = htonll(addr); + /* Increment consumer index. */ + ++txq->wqe_ci; } /** - * Iterator function for rte_mempool_walk() to register existing mempools and - * fill the MP to MR cache of a TX queue. + * Write a regular WQE with VLAN. * - * @param[in] mp - * Memory Pool to register. - * @param *arg + * @param txq * Pointer to TX queue structure. + * @param wqe + * Pointer to the WQE to fill. + * @param addr + * Buffer data address. + * @param length + * Packet length. + * @param lkey + * Memory region lkey. + * @param vlan_tci + * VLAN field to insert in packet. */ -void -txq_mp2mr_iter(const struct rte_mempool *mp, void *arg) +static inline void +mlx5_wqe_write_vlan(struct txq *txq, volatile union mlx5_wqe *wqe, + uintptr_t addr, uint32_t length, uint32_t lkey, + uint16_t vlan_tci) { - struct txq *txq = arg; - struct txq_mp2mr_mbuf_check_data data = { - .mp = mp, - .ret = -1, - }; + uint32_t vlan = htonl(0x81000000 | vlan_tci); - /* Discard empty mempools. */ - if (mp->size == 0) - return; - /* Register mempool only if the first element looks like a mbuf. */ - rte_mempool_obj_iter((void *)mp->elt_va_start, - 1, - mp->header_size + mp->elt_size + mp->trailer_size, - 1, - mp->elt_pa, - mp->pg_num, - mp->pg_shift, - txq_mp2mr_mbuf_check, - &data); - if (data.ret) - return; - txq_mp2mr(txq, mp); + wqe->wqe.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); + wqe->wqe.ctrl.data[1] = htonl((txq->qp_num_8s) | 4); + wqe->wqe.ctrl.data[2] = 0; + wqe->wqe.ctrl.data[3] = 0; + wqe->inl.eseg.rsvd0 = 0; + wqe->inl.eseg.rsvd1 = 0; + wqe->inl.eseg.mss = 0; + wqe->inl.eseg.rsvd2 = 0; + wqe->wqe.eseg.inline_hdr_sz = htons(MLX5_ETH_VLAN_INLINE_HEADER_SIZE); + /* + * Copy 12 bytes of source & destination MAC address. + * Copy 4 bytes of VLAN. + * Copy 2 bytes of Ether type. + */ + rte_memcpy((uint8_t *)(uintptr_t)wqe->wqe.eseg.inline_hdr_start, + (uint8_t *)(uintptr_t)addr, 12); + rte_memcpy((uint8_t *)((uintptr_t)wqe->wqe.eseg.inline_hdr_start + 12), + &vlan, sizeof(vlan)); + rte_memcpy((uint8_t *)((uintptr_t)wqe->wqe.eseg.inline_hdr_start + 16), + (uint8_t *)((uintptr_t)addr + 12), 2); + addr += MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan); + length -= MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan); + /* Store remaining data in data segment. */ + wqe->wqe.dseg.byte_count = htonl(length); + wqe->wqe.dseg.lkey = lkey; + wqe->wqe.dseg.addr = htonll(addr); + /* Increment consumer index. */ + ++txq->wqe_ci; } /** - * Insert VLAN using mbuf headroom space. + * Write a inline WQE. * - * @param buf - * Buffer for VLAN insertion. + * @param txq + * Pointer to TX queue structure. + * @param wqe + * Pointer to the WQE to fill. + * @param addr + * Buffer data address. + * @param length + * Packet length. + * @param lkey + * Memory region lkey. + */ +static inline void +mlx5_wqe_write_inline(struct txq *txq, volatile union mlx5_wqe *wqe, + uintptr_t addr, uint32_t length) +{ + uint32_t size; + uint16_t wqe_cnt = txq->wqe_n - 1; + uint16_t wqe_ci = txq->wqe_ci + 1; + + /* Copy the first 16 bytes into inline header. */ + rte_memcpy((void *)(uintptr_t)wqe->inl.eseg.inline_hdr_start, + (void *)(uintptr_t)addr, + MLX5_ETH_INLINE_HEADER_SIZE); + addr += MLX5_ETH_INLINE_HEADER_SIZE; + length -= MLX5_ETH_INLINE_HEADER_SIZE; + size = 3 + ((4 + length + 15) / 16); + wqe->inl.byte_cnt = htonl(length | MLX5_INLINE_SEG); + rte_memcpy((void *)(uintptr_t)&wqe->inl.data[0], + (void *)addr, MLX5_WQE64_INL_DATA); + addr += MLX5_WQE64_INL_DATA; + length -= MLX5_WQE64_INL_DATA; + while (length) { + volatile union mlx5_wqe *wqe_next = + &(*txq->wqes)[wqe_ci & wqe_cnt]; + uint32_t copy_bytes = (length > sizeof(*wqe)) ? + sizeof(*wqe) : + length; + + rte_mov64((uint8_t *)(uintptr_t)&wqe_next->data[0], + (uint8_t *)addr); + addr += copy_bytes; + length -= copy_bytes; + ++wqe_ci; + } + assert(size < 64); + wqe->inl.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); + wqe->inl.ctrl.data[1] = htonl(txq->qp_num_8s | size); + wqe->inl.ctrl.data[2] = 0; + wqe->inl.ctrl.data[3] = 0; + wqe->inl.eseg.rsvd0 = 0; + wqe->inl.eseg.rsvd1 = 0; + wqe->inl.eseg.mss = 0; + wqe->inl.eseg.rsvd2 = 0; + wqe->inl.eseg.inline_hdr_sz = htons(MLX5_ETH_INLINE_HEADER_SIZE); + /* Increment consumer index. */ + txq->wqe_ci = wqe_ci; +} + +/** + * Write a inline WQE with VLAN. * - * @return - * 0 on success, errno value on failure. + * @param txq + * Pointer to TX queue structure. + * @param wqe + * Pointer to the WQE to fill. + * @param addr + * Buffer data address. + * @param length + * Packet length. + * @param lkey + * Memory region lkey. + * @param vlan_tci + * VLAN field to insert in packet. */ -static inline int -insert_vlan_sw(struct rte_mbuf *buf) +static inline void +mlx5_wqe_write_inline_vlan(struct txq *txq, volatile union mlx5_wqe *wqe, + uintptr_t addr, uint32_t length, uint16_t vlan_tci) { - uintptr_t addr; - uint32_t vlan; - uint16_t head_room_len = rte_pktmbuf_headroom(buf); + uint32_t size; + uint32_t wqe_cnt = txq->wqe_n - 1; + uint16_t wqe_ci = txq->wqe_ci + 1; + uint32_t vlan = htonl(0x81000000 | vlan_tci); - if (head_room_len < 4) - return EINVAL; + /* + * Copy 12 bytes of source & destination MAC address. + * Copy 4 bytes of VLAN. + * Copy 2 bytes of Ether type. + */ + rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start, + (uint8_t *)addr, 12); + rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start + 12, + &vlan, sizeof(vlan)); + rte_memcpy((uint8_t *)(uintptr_t)wqe->inl.eseg.inline_hdr_start + 16, + ((uint8_t *)addr + 12), 2); + addr += MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan); + length -= MLX5_ETH_VLAN_INLINE_HEADER_SIZE - sizeof(vlan); + size = (sizeof(wqe->inl.ctrl.ctrl) + + sizeof(wqe->inl.eseg) + + sizeof(wqe->inl.byte_cnt) + + length + 15) / 16; + wqe->inl.byte_cnt = htonl(length | MLX5_INLINE_SEG); + rte_memcpy((void *)(uintptr_t)&wqe->inl.data[0], + (void *)addr, MLX5_WQE64_INL_DATA); + addr += MLX5_WQE64_INL_DATA; + length -= MLX5_WQE64_INL_DATA; + while (length) { + volatile union mlx5_wqe *wqe_next = + &(*txq->wqes)[wqe_ci & wqe_cnt]; + uint32_t copy_bytes = (length > sizeof(*wqe)) ? + sizeof(*wqe) : + length; - addr = rte_pktmbuf_mtod(buf, uintptr_t); - vlan = htonl(0x81000000 | buf->vlan_tci); - memmove((void *)(addr - 4), (void *)addr, 12); - memcpy((void *)(addr + 8), &vlan, sizeof(vlan)); + rte_mov64((uint8_t *)(uintptr_t)&wqe_next->data[0], + (uint8_t *)addr); + addr += copy_bytes; + length -= copy_bytes; + ++wqe_ci; + } + assert(size < 64); + wqe->inl.ctrl.data[0] = htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND); + wqe->inl.ctrl.data[1] = htonl(txq->qp_num_8s | size); + wqe->inl.ctrl.data[2] = 0; + wqe->inl.ctrl.data[3] = 0; + wqe->inl.eseg.rsvd0 = 0; + wqe->inl.eseg.rsvd1 = 0; + wqe->inl.eseg.mss = 0; + wqe->inl.eseg.rsvd2 = 0; + wqe->inl.eseg.inline_hdr_sz = htons(MLX5_ETH_VLAN_INLINE_HEADER_SIZE); + /* Increment consumer index. */ + txq->wqe_ci = wqe_ci; +} + +/** + * Ring TX queue doorbell. + * + * @param txq + * Pointer to TX queue structure. + */ +static inline void +mlx5_tx_dbrec(struct txq *txq) +{ + uint8_t *dst = (uint8_t *)((uintptr_t)txq->bf_reg + txq->bf_offset); + uint32_t data[4] = { + htonl((txq->wqe_ci << 8) | MLX5_OPCODE_SEND), + htonl(txq->qp_num_8s), + 0, + 0, + }; + rte_wmb(); + *txq->qp_db = htonl(txq->wqe_ci); + /* Ensure ordering between DB record and BF copy. */ + rte_wmb(); + rte_mov16(dst, (uint8_t *)data); + txq->bf_offset ^= txq->bf_buf_size; +} - SET_DATA_OFF(buf, head_room_len - 4); - DATA_LEN(buf) += 4; +/** + * Prefetch a CQE. + * + * @param txq + * Pointer to TX queue structure. + * @param cqe_ci + * CQE consumer index. + */ +static inline void +tx_prefetch_cqe(struct txq *txq, uint16_t ci) +{ + volatile struct mlx5_cqe64 *cqe; - return 0; + cqe = &(*txq->cqes)[ci & (txq->cqe_n - 1)].cqe64; + rte_prefetch0(cqe); } -#if MLX5_PMD_SGE_WR_N > 1 +/** + * Prefetch a WQE. + * + * @param txq + * Pointer to TX queue structure. + * @param wqe_ci + * WQE consumer index. + */ +static inline void +tx_prefetch_wqe(struct txq *txq, uint16_t ci) +{ + volatile union mlx5_wqe *wqe; + + wqe = &(*txq->wqes)[ci & (txq->wqe_n - 1)]; + rte_prefetch0(wqe); +} /** - * Copy scattered mbuf contents to a single linear buffer. + * DPDK callback for TX. * - * @param[out] linear - * Linear output buffer. - * @param[in] buf - * Scattered input buffer. + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. * * @return - * Number of bytes copied to the output buffer or 0 if not large enough. + * Number of packets successfully transmitted (<= pkts_n). */ -static unsigned int -linearize_mbuf(linear_t *linear, struct rte_mbuf *buf) +uint16_t +mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) { - unsigned int size = 0; - unsigned int offset; + struct txq *txq = (struct txq *)dpdk_txq; + uint16_t elts_head = txq->elts_head; + const unsigned int elts_n = txq->elts_n; + unsigned int i = 0; + unsigned int j = 0; + unsigned int max; + unsigned int comp; + volatile union mlx5_wqe *wqe = NULL; + if (unlikely(!pkts_n)) + return 0; + /* Prefetch first packet cacheline. */ + tx_prefetch_cqe(txq, txq->cq_ci); + tx_prefetch_cqe(txq, txq->cq_ci + 1); + rte_prefetch0(*pkts); + /* Start processing. */ + txq_complete(txq); + max = (elts_n - (elts_head - txq->elts_tail)); + if (max > elts_n) + max -= elts_n; do { - unsigned int len = DATA_LEN(buf); + struct rte_mbuf *buf = *(pkts++); + unsigned int elts_head_next; + uintptr_t addr; + uint32_t length; + uint32_t lkey; + unsigned int segs_n = buf->nb_segs; + volatile struct mlx5_wqe_data_seg *dseg; + unsigned int ds = sizeof(*wqe) / 16; - offset = size; - size += len; - if (unlikely(size > sizeof(*linear))) - return 0; - memcpy(&(*linear)[offset], - rte_pktmbuf_mtod(buf, uint8_t *), - len); - buf = NEXT(buf); - } while (buf != NULL); - return size; + /* + * Make sure there is enough room to store this packet and + * that one ring entry remains unused. + */ + assert(segs_n); + if (max < segs_n + 1) + break; + max -= segs_n; + --pkts_n; + elts_head_next = (elts_head + 1) & (elts_n - 1); + wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)]; + dseg = &wqe->wqe.dseg; + rte_prefetch0(wqe); + if (pkts_n) + rte_prefetch0(*pkts); + /* Retrieve buffer information. */ + addr = rte_pktmbuf_mtod(buf, uintptr_t); + length = DATA_LEN(buf); + /* Update element. */ + (*txq->elts)[elts_head] = buf; + /* Prefetch next buffer data. */ + if (pkts_n) + rte_prefetch0(rte_pktmbuf_mtod(*pkts, + volatile void *)); + /* Retrieve Memory Region key for this memory pool. */ + lkey = txq_mp2mr(txq, txq_mb2mp(buf)); + if (buf->ol_flags & PKT_TX_VLAN_PKT) + mlx5_wqe_write_vlan(txq, wqe, addr, length, lkey, + buf->vlan_tci); + else + mlx5_wqe_write(txq, wqe, addr, length, lkey); + /* Should we enable HW CKSUM offload */ + if (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { + wqe->wqe.eseg.cs_flags = + MLX5_ETH_WQE_L3_CSUM | + MLX5_ETH_WQE_L4_CSUM; + } else { + wqe->wqe.eseg.cs_flags = 0; + } + while (--segs_n) { + /* + * Spill on next WQE when the current one does not have + * enough room left. Size of WQE must a be a multiple + * of data segment size. + */ + assert(!(sizeof(*wqe) % sizeof(*dseg))); + if (!(ds % (sizeof(*wqe) / 16))) + dseg = (volatile void *) + &(*txq->wqes)[txq->wqe_ci++ & + (txq->wqe_n - 1)]; + else + ++dseg; + ++ds; + buf = buf->next; + assert(buf); + /* Store segment information. */ + dseg->byte_count = htonl(DATA_LEN(buf)); + dseg->lkey = txq_mp2mr(txq, txq_mb2mp(buf)); + dseg->addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); + (*txq->elts)[elts_head_next] = buf; + elts_head_next = (elts_head_next + 1) & (elts_n - 1); +#ifdef MLX5_PMD_SOFT_COUNTERS + length += DATA_LEN(buf); +#endif + ++j; + } + /* Update DS field in WQE. */ + wqe->wqe.ctrl.data[1] &= htonl(0xffffffc0); + wqe->wqe.ctrl.data[1] |= htonl(ds & 0x3f); + elts_head = elts_head_next; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent bytes counter. */ + txq->stats.obytes += length; +#endif + elts_head = elts_head_next; + ++i; + } while (pkts_n); + /* Take a shortcut if nothing must be sent. */ + if (unlikely(i == 0)) + return 0; + /* Check whether completion threshold has been reached. */ + comp = txq->elts_comp + i + j; + if (comp >= MLX5_TX_COMP_THRESH) { + /* Request completion on last WQE. */ + wqe->wqe.ctrl.data[2] = htonl(8); + /* Save elts_head in unused "immediate" field of WQE. */ + wqe->wqe.ctrl.data[3] = elts_head; + txq->elts_comp = 0; + } else { + txq->elts_comp = comp; + } +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent packets counter. */ + txq->stats.opackets += i; +#endif + /* Ring QP doorbell. */ + mlx5_tx_dbrec(txq); + txq->elts_head = elts_head; + return i; } /** - * Handle scattered buffers for mlx5_tx_burst(). + * DPDK callback for TX with inline support. * - * @param txq - * TX queue structure. - * @param segs - * Number of segments in buf. - * @param elt - * TX queue element to fill. - * @param[in] buf - * Buffer to process. - * @param elts_head - * Index of the linear buffer to use if necessary (normally txq->elts_head). - * @param[out] sges - * Array filled with SGEs on success. + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. * * @return - * A structure containing the processed packet size in bytes and the - * number of SGEs. Both fields are set to (unsigned int)-1 in case of - * failure. + * Number of packets successfully transmitted (<= pkts_n). */ -static struct tx_burst_sg_ret { - unsigned int length; - unsigned int num; -} -tx_burst_sg(struct txq *txq, unsigned int segs, struct txq_elt *elt, - struct rte_mbuf *buf, unsigned int elts_head, - struct ibv_sge (*sges)[MLX5_PMD_SGE_WR_N]) +uint16_t +mlx5_tx_burst_inline(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) { - unsigned int sent_size = 0; - unsigned int j; - int linearize = 0; - - /* When there are too many segments, extra segments are - * linearized in the last SGE. */ - if (unlikely(segs > RTE_DIM(*sges))) { - segs = (RTE_DIM(*sges) - 1); - linearize = 1; - } - /* Update element. */ - elt->buf = buf; - /* Register segments as SGEs. */ - for (j = 0; (j != segs); ++j) { - struct ibv_sge *sge = &(*sges)[j]; + struct txq *txq = (struct txq *)dpdk_txq; + uint16_t elts_head = txq->elts_head; + const unsigned int elts_n = txq->elts_n; + unsigned int i = 0; + unsigned int j = 0; + unsigned int max; + unsigned int comp; + volatile union mlx5_wqe *wqe = NULL; + unsigned int max_inline = txq->max_inline; + + if (unlikely(!pkts_n)) + return 0; + /* Prefetch first packet cacheline. */ + tx_prefetch_cqe(txq, txq->cq_ci); + tx_prefetch_cqe(txq, txq->cq_ci + 1); + rte_prefetch0(*pkts); + /* Start processing. */ + txq_complete(txq); + max = (elts_n - (elts_head - txq->elts_tail)); + if (max > elts_n) + max -= elts_n; + do { + struct rte_mbuf *buf = *(pkts++); + unsigned int elts_head_next; + uintptr_t addr; + uint32_t length; uint32_t lkey; + unsigned int segs_n = buf->nb_segs; + volatile struct mlx5_wqe_data_seg *dseg; + unsigned int ds = sizeof(*wqe) / 16; - /* Retrieve Memory Region key for this memory pool. */ - lkey = txq_mp2mr(txq, txq_mb2mp(buf)); - if (unlikely(lkey == (uint32_t)-1)) { - /* MR does not exist. */ - DEBUG("%p: unable to get MP <-> MR association", - (void *)txq); - /* Clean up TX element. */ - elt->buf = NULL; - goto stop; + /* + * Make sure there is enough room to store this packet and + * that one ring entry remains unused. + */ + assert(segs_n); + if (max < segs_n + 1) + break; + max -= segs_n; + --pkts_n; + elts_head_next = (elts_head + 1) & (elts_n - 1); + wqe = &(*txq->wqes)[txq->wqe_ci & (txq->wqe_n - 1)]; + dseg = &wqe->wqe.dseg; + tx_prefetch_wqe(txq, txq->wqe_ci); + tx_prefetch_wqe(txq, txq->wqe_ci + 1); + if (pkts_n) + rte_prefetch0(*pkts); + /* Should we enable HW CKSUM offload */ + if (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { + wqe->inl.eseg.cs_flags = + MLX5_ETH_WQE_L3_CSUM | + MLX5_ETH_WQE_L4_CSUM; + } else { + wqe->inl.eseg.cs_flags = 0; } - /* Update SGE. */ - sge->addr = rte_pktmbuf_mtod(buf, uintptr_t); - if (txq->priv->vf) - rte_prefetch0((volatile void *) - (uintptr_t)sge->addr); - sge->length = DATA_LEN(buf); - sge->lkey = lkey; - sent_size += sge->length; - buf = NEXT(buf); - } - /* If buf is not NULL here and is not going to be linearized, - * nb_segs is not valid. */ - assert(j == segs); - assert((buf == NULL) || (linearize)); - /* Linearize extra segments. */ - if (linearize) { - struct ibv_sge *sge = &(*sges)[segs]; - linear_t *linear = &(*txq->elts_linear)[elts_head]; - unsigned int size = linearize_mbuf(linear, buf); - - assert(segs == (RTE_DIM(*sges) - 1)); - if (size == 0) { - /* Invalid packet. */ - DEBUG("%p: packet too large to be linearized.", - (void *)txq); - /* Clean up TX element. */ - elt->buf = NULL; - goto stop; + /* Retrieve buffer information. */ + addr = rte_pktmbuf_mtod(buf, uintptr_t); + length = DATA_LEN(buf); + /* Update element. */ + (*txq->elts)[elts_head] = buf; + /* Prefetch next buffer data. */ + if (pkts_n) + rte_prefetch0(rte_pktmbuf_mtod(*pkts, + volatile void *)); + if ((length <= max_inline) && (segs_n == 1)) { + if (buf->ol_flags & PKT_TX_VLAN_PKT) + mlx5_wqe_write_inline_vlan(txq, wqe, + addr, length, + buf->vlan_tci); + else + mlx5_wqe_write_inline(txq, wqe, addr, length); + goto skip_segs; + } else { + /* Retrieve Memory Region key for this memory pool. */ + lkey = txq_mp2mr(txq, txq_mb2mp(buf)); + if (buf->ol_flags & PKT_TX_VLAN_PKT) + mlx5_wqe_write_vlan(txq, wqe, addr, length, + lkey, buf->vlan_tci); + else + mlx5_wqe_write(txq, wqe, addr, length, lkey); } - /* If MLX5_PMD_SGE_WR_N is 1, free mbuf immediately. */ - if (RTE_DIM(*sges) == 1) { - do { - struct rte_mbuf *next = NEXT(buf); - - rte_pktmbuf_free_seg(buf); - buf = next; - } while (buf != NULL); - elt->buf = NULL; + while (--segs_n) { + /* + * Spill on next WQE when the current one does not have + * enough room left. Size of WQE must a be a multiple + * of data segment size. + */ + assert(!(sizeof(*wqe) % sizeof(*dseg))); + if (!(ds % (sizeof(*wqe) / 16))) + dseg = (volatile void *) + &(*txq->wqes)[txq->wqe_ci++ & + (txq->wqe_n - 1)]; + else + ++dseg; + ++ds; + buf = buf->next; + assert(buf); + /* Store segment information. */ + dseg->byte_count = htonl(DATA_LEN(buf)); + dseg->lkey = txq_mp2mr(txq, txq_mb2mp(buf)); + dseg->addr = htonll(rte_pktmbuf_mtod(buf, uintptr_t)); + (*txq->elts)[elts_head_next] = buf; + elts_head_next = (elts_head_next + 1) & (elts_n - 1); +#ifdef MLX5_PMD_SOFT_COUNTERS + length += DATA_LEN(buf); +#endif + ++j; } - /* Update SGE. */ - sge->addr = (uintptr_t)&(*linear)[0]; - sge->length = size; - sge->lkey = txq->mr_linear->lkey; - sent_size += size; - /* Include last segment. */ - segs++; + /* Update DS field in WQE. */ + wqe->inl.ctrl.data[1] &= htonl(0xffffffc0); + wqe->inl.ctrl.data[1] |= htonl(ds & 0x3f); +skip_segs: + elts_head = elts_head_next; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent bytes counter. */ + txq->stats.obytes += length; +#endif + ++i; + } while (pkts_n); + /* Take a shortcut if nothing must be sent. */ + if (unlikely(i == 0)) + return 0; + /* Check whether completion threshold has been reached. */ + comp = txq->elts_comp + i + j; + if (comp >= MLX5_TX_COMP_THRESH) { + /* Request completion on last WQE. */ + wqe->inl.ctrl.data[2] = htonl(8); + /* Save elts_head in unused "immediate" field of WQE. */ + wqe->inl.ctrl.data[3] = elts_head; + txq->elts_comp = 0; + } else { + txq->elts_comp = comp; } - return (struct tx_burst_sg_ret){ - .length = sent_size, - .num = segs, - }; -stop: - return (struct tx_burst_sg_ret){ - .length = -1, - .num = -1, - }; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent packets counter. */ + txq->stats.opackets += i; +#endif + /* Ring QP doorbell. */ + mlx5_tx_dbrec(txq); + txq->elts_head = elts_head; + return i; } -#endif /* MLX5_PMD_SGE_WR_N > 1 */ +/** + * Open a MPW session. + * + * @param txq + * Pointer to TX queue structure. + * @param mpw + * Pointer to MPW session structure. + * @param length + * Packet length. + */ +static inline void +mlx5_mpw_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length) +{ + uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1); + volatile struct mlx5_wqe_data_seg (*dseg)[MLX5_MPW_DSEG_MAX] = + (volatile struct mlx5_wqe_data_seg (*)[]) + (uintptr_t)&(*txq->wqes)[(idx + 1) & (txq->wqe_n - 1)]; + + mpw->state = MLX5_MPW_STATE_OPENED; + mpw->pkts_n = 0; + mpw->len = length; + mpw->total_len = 0; + mpw->wqe = &(*txq->wqes)[idx]; + mpw->wqe->mpw.eseg.mss = htons(length); + mpw->wqe->mpw.eseg.inline_hdr_sz = 0; + mpw->wqe->mpw.eseg.rsvd0 = 0; + mpw->wqe->mpw.eseg.rsvd1 = 0; + mpw->wqe->mpw.eseg.rsvd2 = 0; + mpw->wqe->mpw.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_LSO_MPW); + mpw->wqe->mpw.ctrl.data[2] = 0; + mpw->wqe->mpw.ctrl.data[3] = 0; + mpw->data.dseg[0] = &mpw->wqe->mpw.dseg[0]; + mpw->data.dseg[1] = &mpw->wqe->mpw.dseg[1]; + mpw->data.dseg[2] = &(*dseg)[0]; + mpw->data.dseg[3] = &(*dseg)[1]; + mpw->data.dseg[4] = &(*dseg)[2]; +} /** - * DPDK callback for TX. + * Close a MPW session. + * + * @param txq + * Pointer to TX queue structure. + * @param mpw + * Pointer to MPW session structure. + */ +static inline void +mlx5_mpw_close(struct txq *txq, struct mlx5_mpw *mpw) +{ + unsigned int num = mpw->pkts_n; + + /* + * Store size in multiple of 16 bytes. Control and Ethernet segments + * count as 2. + */ + mpw->wqe->mpw.ctrl.data[1] = htonl(txq->qp_num_8s | (2 + num)); + mpw->state = MLX5_MPW_STATE_CLOSED; + if (num < 3) + ++txq->wqe_ci; + else + txq->wqe_ci += 2; + tx_prefetch_wqe(txq, txq->wqe_ci); + tx_prefetch_wqe(txq, txq->wqe_ci + 1); +} + +/** + * DPDK callback for TX with MPW support. * * @param dpdk_txq * Generic pointer to TX queue structure. @@ -531,224 +959,399 @@ stop: * Number of packets successfully transmitted (<= pkts_n). */ uint16_t -mlx5_tx_burst(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_tx_burst_mpw(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct txq *txq = (struct txq *)dpdk_txq; - unsigned int elts_head = txq->elts_head; + uint16_t elts_head = txq->elts_head; const unsigned int elts_n = txq->elts_n; - unsigned int elts_comp_cd = txq->elts_comp_cd; - unsigned int elts_comp = 0; - unsigned int i; + unsigned int i = 0; + unsigned int j = 0; unsigned int max; - int err; - struct rte_mbuf *buf = pkts[0]; + unsigned int comp; + struct mlx5_mpw mpw = { + .state = MLX5_MPW_STATE_CLOSED, + }; - assert(elts_comp_cd != 0); + if (unlikely(!pkts_n)) + return 0; /* Prefetch first packet cacheline. */ - rte_prefetch0(buf); + tx_prefetch_cqe(txq, txq->cq_ci); + tx_prefetch_wqe(txq, txq->wqe_ci); + tx_prefetch_wqe(txq, txq->wqe_ci + 1); + /* Start processing. */ txq_complete(txq); max = (elts_n - (elts_head - txq->elts_tail)); if (max > elts_n) max -= elts_n; - assert(max >= 1); - assert(max <= elts_n); - /* Always leave one free entry in the ring. */ - --max; - if (max == 0) + do { + struct rte_mbuf *buf = *(pkts++); + unsigned int elts_head_next; + uint32_t length; + unsigned int segs_n = buf->nb_segs; + uint32_t cs_flags = 0; + + /* + * Make sure there is enough room to store this packet and + * that one ring entry remains unused. + */ + assert(segs_n); + if (max < segs_n + 1) + break; + /* Do not bother with large packets MPW cannot handle. */ + if (segs_n > MLX5_MPW_DSEG_MAX) + break; + max -= segs_n; + --pkts_n; + /* Should we enable HW CKSUM offload */ + if (buf->ol_flags & + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) + cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; + /* Retrieve packet information. */ + length = PKT_LEN(buf); + assert(length); + /* Start new session if packet differs. */ + if ((mpw.state == MLX5_MPW_STATE_OPENED) && + ((mpw.len != length) || + (segs_n != 1) || + (mpw.wqe->mpw.eseg.cs_flags != cs_flags))) + mlx5_mpw_close(txq, &mpw); + if (mpw.state == MLX5_MPW_STATE_CLOSED) { + mlx5_mpw_new(txq, &mpw, length); + mpw.wqe->mpw.eseg.cs_flags = cs_flags; + } + /* Multi-segment packets must be alone in their MPW. */ + assert((segs_n == 1) || (mpw.pkts_n == 0)); +#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) + length = 0; +#endif + do { + volatile struct mlx5_wqe_data_seg *dseg; + uintptr_t addr; + + elts_head_next = (elts_head + 1) & (elts_n - 1); + assert(buf); + (*txq->elts)[elts_head] = buf; + dseg = mpw.data.dseg[mpw.pkts_n]; + addr = rte_pktmbuf_mtod(buf, uintptr_t); + *dseg = (struct mlx5_wqe_data_seg){ + .byte_count = htonl(DATA_LEN(buf)), + .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), + .addr = htonll(addr), + }; + elts_head = elts_head_next; +#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) + length += DATA_LEN(buf); +#endif + buf = buf->next; + ++mpw.pkts_n; + ++j; + } while (--segs_n); + assert(length == mpw.len); + if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) + mlx5_mpw_close(txq, &mpw); + elts_head = elts_head_next; +#ifdef MLX5_PMD_SOFT_COUNTERS + /* Increment sent bytes counter. */ + txq->stats.obytes += length; +#endif + ++i; + } while (pkts_n); + /* Take a shortcut if nothing must be sent. */ + if (unlikely(i == 0)) return 0; - if (max > pkts_n) - max = pkts_n; - for (i = 0; (i != max); ++i) { - struct rte_mbuf *buf_next = pkts[i + 1]; - unsigned int elts_head_next = - (((elts_head + 1) == elts_n) ? 0 : elts_head + 1); - struct txq_elt *elt = &(*txq->elts)[elts_head]; - unsigned int segs = NB_SEGS(buf); + /* Check whether completion threshold has been reached. */ + /* "j" includes both packets and segments. */ + comp = txq->elts_comp + j; + if (comp >= MLX5_TX_COMP_THRESH) { + volatile union mlx5_wqe *wqe = mpw.wqe; + + /* Request completion on last WQE. */ + wqe->mpw.ctrl.data[2] = htonl(8); + /* Save elts_head in unused "immediate" field of WQE. */ + wqe->mpw.ctrl.data[3] = elts_head; + txq->elts_comp = 0; + } else { + txq->elts_comp = comp; + } #ifdef MLX5_PMD_SOFT_COUNTERS - unsigned int sent_size = 0; + /* Increment sent packets counter. */ + txq->stats.opackets += i; #endif - uint32_t send_flags = 0; -#ifdef HAVE_VERBS_VLAN_INSERTION - int insert_vlan = 0; -#endif /* HAVE_VERBS_VLAN_INSERTION */ - - if (i + 1 < max) - rte_prefetch0(buf_next); - /* Request TX completion. */ - if (unlikely(--elts_comp_cd == 0)) { - elts_comp_cd = txq->elts_comp_cd_init; - ++elts_comp; - send_flags |= IBV_EXP_QP_BURST_SIGNALED; - } + /* Ring QP doorbell. */ + if (mpw.state == MLX5_MPW_STATE_OPENED) + mlx5_mpw_close(txq, &mpw); + mlx5_tx_dbrec(txq); + txq->elts_head = elts_head; + return i; +} + +/** + * Open a MPW inline session. + * + * @param txq + * Pointer to TX queue structure. + * @param mpw + * Pointer to MPW session structure. + * @param length + * Packet length. + */ +static inline void +mlx5_mpw_inline_new(struct txq *txq, struct mlx5_mpw *mpw, uint32_t length) +{ + uint16_t idx = txq->wqe_ci & (txq->wqe_n - 1); + + mpw->state = MLX5_MPW_INL_STATE_OPENED; + mpw->pkts_n = 0; + mpw->len = length; + mpw->total_len = 0; + mpw->wqe = &(*txq->wqes)[idx]; + mpw->wqe->mpw_inl.ctrl.data[0] = htonl((MLX5_OPC_MOD_MPW << 24) | + (txq->wqe_ci << 8) | + MLX5_OPCODE_LSO_MPW); + mpw->wqe->mpw_inl.ctrl.data[2] = 0; + mpw->wqe->mpw_inl.ctrl.data[3] = 0; + mpw->wqe->mpw_inl.eseg.mss = htons(length); + mpw->wqe->mpw_inl.eseg.inline_hdr_sz = 0; + mpw->wqe->mpw_inl.eseg.cs_flags = 0; + mpw->wqe->mpw_inl.eseg.rsvd0 = 0; + mpw->wqe->mpw_inl.eseg.rsvd1 = 0; + mpw->wqe->mpw_inl.eseg.rsvd2 = 0; + mpw->data.raw = &mpw->wqe->mpw_inl.data[0]; +} + +/** + * Close a MPW inline session. + * + * @param txq + * Pointer to TX queue structure. + * @param mpw + * Pointer to MPW session structure. + */ +static inline void +mlx5_mpw_inline_close(struct txq *txq, struct mlx5_mpw *mpw) +{ + unsigned int size; + + size = sizeof(*mpw->wqe) - MLX5_MWQE64_INL_DATA + mpw->total_len; + /* + * Store size in multiple of 16 bytes. Control and Ethernet segments + * count as 2. + */ + mpw->wqe->mpw_inl.ctrl.data[1] = + htonl(txq->qp_num_8s | ((size + 15) / 16)); + mpw->state = MLX5_MPW_STATE_CLOSED; + mpw->wqe->mpw_inl.byte_cnt = htonl(mpw->total_len | MLX5_INLINE_SEG); + txq->wqe_ci += (size + (sizeof(*mpw->wqe) - 1)) / sizeof(*mpw->wqe); +} + +/** + * DPDK callback for TX with MPW inline support. + * + * @param dpdk_txq + * Generic pointer to TX queue structure. + * @param[in] pkts + * Packets to transmit. + * @param pkts_n + * Number of packets in array. + * + * @return + * Number of packets successfully transmitted (<= pkts_n). + */ +uint16_t +mlx5_tx_burst_mpw_inline(void *dpdk_txq, struct rte_mbuf **pkts, + uint16_t pkts_n) +{ + struct txq *txq = (struct txq *)dpdk_txq; + uint16_t elts_head = txq->elts_head; + const unsigned int elts_n = txq->elts_n; + unsigned int i = 0; + unsigned int j = 0; + unsigned int max; + unsigned int comp; + unsigned int inline_room = txq->max_inline; + struct mlx5_mpw mpw = { + .state = MLX5_MPW_STATE_CLOSED, + }; + + if (unlikely(!pkts_n)) + return 0; + /* Prefetch first packet cacheline. */ + tx_prefetch_cqe(txq, txq->cq_ci); + tx_prefetch_wqe(txq, txq->wqe_ci); + tx_prefetch_wqe(txq, txq->wqe_ci + 1); + /* Start processing. */ + txq_complete(txq); + max = (elts_n - (elts_head - txq->elts_tail)); + if (max > elts_n) + max -= elts_n; + do { + struct rte_mbuf *buf = *(pkts++); + unsigned int elts_head_next; + uintptr_t addr; + uint32_t length; + unsigned int segs_n = buf->nb_segs; + uint32_t cs_flags = 0; + + /* + * Make sure there is enough room to store this packet and + * that one ring entry remains unused. + */ + assert(segs_n); + if (max < segs_n + 1) + break; + /* Do not bother with large packets MPW cannot handle. */ + if (segs_n > MLX5_MPW_DSEG_MAX) + break; + max -= segs_n; + --pkts_n; /* Should we enable HW CKSUM offload */ if (buf->ol_flags & - (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { - send_flags |= IBV_EXP_QP_BURST_IP_CSUM; - /* HW does not support checksum offloads at arbitrary - * offsets but automatically recognizes the packet - * type. For inner L3/L4 checksums, only VXLAN (UDP) - * tunnels are currently supported. */ - if (RTE_ETH_IS_TUNNEL_PKT(buf->packet_type)) - send_flags |= IBV_EXP_QP_BURST_TUNNEL; + (PKT_TX_IP_CKSUM | PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) + cs_flags = MLX5_ETH_WQE_L3_CSUM | MLX5_ETH_WQE_L4_CSUM; + /* Retrieve packet information. */ + length = PKT_LEN(buf); + /* Start new session if packet differs. */ + if (mpw.state == MLX5_MPW_STATE_OPENED) { + if ((mpw.len != length) || + (segs_n != 1) || + (mpw.wqe->mpw.eseg.cs_flags != cs_flags)) + mlx5_mpw_close(txq, &mpw); + } else if (mpw.state == MLX5_MPW_INL_STATE_OPENED) { + if ((mpw.len != length) || + (segs_n != 1) || + (length > inline_room) || + (mpw.wqe->mpw_inl.eseg.cs_flags != cs_flags)) { + mlx5_mpw_inline_close(txq, &mpw); + inline_room = txq->max_inline; + } } - if (buf->ol_flags & PKT_TX_VLAN_PKT) { -#ifdef HAVE_VERBS_VLAN_INSERTION - if (!txq->priv->mps) - insert_vlan = 1; - else -#endif /* HAVE_VERBS_VLAN_INSERTION */ - { - err = insert_vlan_sw(buf); - if (unlikely(err)) - goto stop; + if (mpw.state == MLX5_MPW_STATE_CLOSED) { + if ((segs_n != 1) || + (length > inline_room)) { + mlx5_mpw_new(txq, &mpw, length); + mpw.wqe->mpw.eseg.cs_flags = cs_flags; + } else { + mlx5_mpw_inline_new(txq, &mpw, length); + mpw.wqe->mpw_inl.eseg.cs_flags = cs_flags; } } - if (likely(segs == 1)) { - uintptr_t addr; - uint32_t length; - uint32_t lkey; - uintptr_t buf_next_addr; + /* Multi-segment packets must be alone in their MPW. */ + assert((segs_n == 1) || (mpw.pkts_n == 0)); + if (mpw.state == MLX5_MPW_STATE_OPENED) { + assert(inline_room == txq->max_inline); +#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) + length = 0; +#endif + do { + volatile struct mlx5_wqe_data_seg *dseg; + + elts_head_next = + (elts_head + 1) & (elts_n - 1); + assert(buf); + (*txq->elts)[elts_head] = buf; + dseg = mpw.data.dseg[mpw.pkts_n]; + addr = rte_pktmbuf_mtod(buf, uintptr_t); + *dseg = (struct mlx5_wqe_data_seg){ + .byte_count = htonl(DATA_LEN(buf)), + .lkey = txq_mp2mr(txq, txq_mb2mp(buf)), + .addr = htonll(addr), + }; + elts_head = elts_head_next; +#if defined(MLX5_PMD_SOFT_COUNTERS) || !defined(NDEBUG) + length += DATA_LEN(buf); +#endif + buf = buf->next; + ++mpw.pkts_n; + ++j; + } while (--segs_n); + assert(length == mpw.len); + if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) + mlx5_mpw_close(txq, &mpw); + } else { + unsigned int max; - /* Retrieve buffer information. */ + assert(mpw.state == MLX5_MPW_INL_STATE_OPENED); + assert(length <= inline_room); + assert(length == DATA_LEN(buf)); + elts_head_next = (elts_head + 1) & (elts_n - 1); addr = rte_pktmbuf_mtod(buf, uintptr_t); - length = DATA_LEN(buf); - /* Update element. */ - elt->buf = buf; - if (txq->priv->vf) - rte_prefetch0((volatile void *) - (uintptr_t)addr); - /* Prefetch next buffer data. */ - if (i + 1 < max) { - buf_next_addr = - rte_pktmbuf_mtod(buf_next, uintptr_t); - rte_prefetch0((volatile void *) - (uintptr_t)buf_next_addr); + (*txq->elts)[elts_head] = buf; + /* Maximum number of bytes before wrapping. */ + max = ((uintptr_t)&(*txq->wqes)[txq->wqe_n] - + (uintptr_t)mpw.data.raw); + if (length > max) { + rte_memcpy((void *)(uintptr_t)mpw.data.raw, + (void *)addr, + max); + mpw.data.raw = + (volatile void *)&(*txq->wqes)[0]; + rte_memcpy((void *)(uintptr_t)mpw.data.raw, + (void *)(addr + max), + length - max); + mpw.data.raw += length - max; + } else { + rte_memcpy((void *)(uintptr_t)mpw.data.raw, + (void *)addr, + length); + mpw.data.raw += length; } - /* Put packet into send queue. */ -#if MLX5_PMD_MAX_INLINE > 0 - if (length <= txq->max_inline) { -#ifdef HAVE_VERBS_VLAN_INSERTION - if (insert_vlan) - err = txq->send_pending_inline_vlan - (txq->qp, - (void *)addr, - length, - send_flags, - &buf->vlan_tci); - else -#endif /* HAVE_VERBS_VLAN_INSERTION */ - err = txq->send_pending_inline - (txq->qp, - (void *)addr, - length, - send_flags); - } else -#endif - { - /* Retrieve Memory Region key for this - * memory pool. */ - lkey = txq_mp2mr(txq, txq_mb2mp(buf)); - if (unlikely(lkey == (uint32_t)-1)) { - /* MR does not exist. */ - DEBUG("%p: unable to get MP <-> MR" - " association", (void *)txq); - /* Clean up TX element. */ - elt->buf = NULL; - goto stop; - } -#ifdef HAVE_VERBS_VLAN_INSERTION - if (insert_vlan) - err = txq->send_pending_vlan - (txq->qp, - addr, - length, - lkey, - send_flags, - &buf->vlan_tci); - else -#endif /* HAVE_VERBS_VLAN_INSERTION */ - err = txq->send_pending - (txq->qp, - addr, - length, - lkey, - send_flags); + if ((uintptr_t)mpw.data.raw == + (uintptr_t)&(*txq->wqes)[txq->wqe_n]) + mpw.data.raw = + (volatile void *)&(*txq->wqes)[0]; + ++mpw.pkts_n; + ++j; + if (mpw.pkts_n == MLX5_MPW_DSEG_MAX) { + mlx5_mpw_inline_close(txq, &mpw); + inline_room = txq->max_inline; + } else { + inline_room -= length; } - if (unlikely(err)) - goto stop; -#ifdef MLX5_PMD_SOFT_COUNTERS - sent_size += length; -#endif - } else { -#if MLX5_PMD_SGE_WR_N > 1 - struct ibv_sge sges[MLX5_PMD_SGE_WR_N]; - struct tx_burst_sg_ret ret; - - ret = tx_burst_sg(txq, segs, elt, buf, elts_head, - &sges); - if (ret.length == (unsigned int)-1) - goto stop; - /* Put SG list into send queue. */ -#ifdef HAVE_VERBS_VLAN_INSERTION - if (insert_vlan) - err = txq->send_pending_sg_list_vlan - (txq->qp, - sges, - ret.num, - send_flags, - &buf->vlan_tci); - else -#endif /* HAVE_VERBS_VLAN_INSERTION */ - err = txq->send_pending_sg_list - (txq->qp, - sges, - ret.num, - send_flags); - if (unlikely(err)) - goto stop; -#ifdef MLX5_PMD_SOFT_COUNTERS - sent_size += ret.length; -#endif -#else /* MLX5_PMD_SGE_WR_N > 1 */ - DEBUG("%p: TX scattered buffers support not" - " compiled in", (void *)txq); - goto stop; -#endif /* MLX5_PMD_SGE_WR_N > 1 */ } + mpw.total_len += length; elts_head = elts_head_next; - buf = buf_next; #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment sent bytes counter. */ - txq->stats.obytes += sent_size; + txq->stats.obytes += length; #endif - } -stop: + ++i; + } while (pkts_n); /* Take a shortcut if nothing must be sent. */ if (unlikely(i == 0)) return 0; + /* Check whether completion threshold has been reached. */ + /* "j" includes both packets and segments. */ + comp = txq->elts_comp + j; + if (comp >= MLX5_TX_COMP_THRESH) { + volatile union mlx5_wqe *wqe = mpw.wqe; + + /* Request completion on last WQE. */ + wqe->mpw_inl.ctrl.data[2] = htonl(8); + /* Save elts_head in unused "immediate" field of WQE. */ + wqe->mpw_inl.ctrl.data[3] = elts_head; + txq->elts_comp = 0; + } else { + txq->elts_comp = comp; + } #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment sent packets counter. */ txq->stats.opackets += i; #endif /* Ring QP doorbell. */ - err = txq->send_flush(txq->qp); - if (unlikely(err)) { - /* A nonzero value is not supposed to be returned. - * Nothing can be done about it. */ - DEBUG("%p: send_flush() failed with error %d", - (void *)txq, err); - } + if (mpw.state == MLX5_MPW_INL_STATE_OPENED) + mlx5_mpw_inline_close(txq, &mpw); + else if (mpw.state == MLX5_MPW_STATE_OPENED) + mlx5_mpw_close(txq, &mpw); + mlx5_tx_dbrec(txq); txq->elts_head = elts_head; - txq->elts_comp += elts_comp; - txq->elts_comp_cd = elts_comp_cd; return i; } /** * Translate RX completion flags to packet type. * - * @param flags - * RX completion flags returned by poll_length_flags(). + * @param[in] cqe + * Pointer to CQE. * * @note: fix mlx5_dev_supported_ptypes_get() if any change here. * @@ -756,11 +1359,13 @@ stop: * Packet type for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_pkt_type(uint32_t flags) +rxq_cq_to_pkt_type(volatile struct mlx5_cqe64 *cqe) { uint32_t pkt_type; + uint8_t flags = cqe->l4_hdr_type_etc; + uint8_t info = cqe->rsvd0[0]; - if (flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) + if (info & IBV_EXP_CQ_RX_TUNNEL_PACKET) pkt_type = TRANSPOSE(flags, IBV_EXP_CQ_RX_OUTER_IPV4_PACKET, @@ -777,66 +1382,157 @@ rxq_cq_to_pkt_type(uint32_t flags) else pkt_type = TRANSPOSE(flags, - IBV_EXP_CQ_RX_IPV4_PACKET, - RTE_PTYPE_L3_IPV4) | + MLX5_CQE_L3_HDR_TYPE_IPV6, + RTE_PTYPE_L3_IPV6) | TRANSPOSE(flags, - IBV_EXP_CQ_RX_IPV6_PACKET, - RTE_PTYPE_L3_IPV6); + MLX5_CQE_L3_HDR_TYPE_IPV4, + RTE_PTYPE_L3_IPV4); return pkt_type; } /** + * Get size of the next packet for a given CQE. For compressed CQEs, the + * consumer index is updated only once all packets of the current one have + * been processed. + * + * @param rxq + * Pointer to RX queue. + * @param cqe + * CQE to process. + * + * @return + * Packet size in bytes (0 if there is none), -1 in case of completion + * with error. + */ +static inline int +mlx5_rx_poll_len(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe, + uint16_t cqe_cnt) +{ + struct rxq_zip *zip = &rxq->zip; + uint16_t cqe_n = cqe_cnt + 1; + int len = 0; + + /* Process compressed data in the CQE and mini arrays. */ + if (zip->ai) { + volatile struct mlx5_mini_cqe8 (*mc)[8] = + (volatile struct mlx5_mini_cqe8 (*)[8]) + (uintptr_t)(&(*rxq->cqes)[zip->ca & cqe_cnt].cqe64); + + len = ntohl((*mc)[zip->ai & 7].byte_cnt); + if ((++zip->ai & 7) == 0) { + /* + * Increment consumer index to skip the number of + * CQEs consumed. Hardware leaves holes in the CQ + * ring for software use. + */ + zip->ca = zip->na; + zip->na += 8; + } + if (unlikely(rxq->zip.ai == rxq->zip.cqe_cnt)) { + uint16_t idx = rxq->cq_ci; + uint16_t end = zip->cq_ci; + + while (idx != end) { + (*rxq->cqes)[idx & cqe_cnt].cqe64.op_own = + MLX5_CQE_INVALIDATE; + ++idx; + } + rxq->cq_ci = zip->cq_ci; + zip->ai = 0; + } + /* No compressed data, get next CQE and verify if it is compressed. */ + } else { + int ret; + int8_t op_own; + + ret = check_cqe64(cqe, cqe_n, rxq->cq_ci); + if (unlikely(ret == 1)) + return 0; + ++rxq->cq_ci; + op_own = cqe->op_own; + if (MLX5_CQE_FORMAT(op_own) == MLX5_COMPRESSED) { + volatile struct mlx5_mini_cqe8 (*mc)[8] = + (volatile struct mlx5_mini_cqe8 (*)[8]) + (uintptr_t)(&(*rxq->cqes)[rxq->cq_ci & + cqe_cnt].cqe64); + + /* Fix endianness. */ + zip->cqe_cnt = ntohl(cqe->byte_cnt); + /* + * Current mini array position is the one returned by + * check_cqe64(). + * + * If completion comprises several mini arrays, as a + * special case the second one is located 7 CQEs after + * the initial CQE instead of 8 for subsequent ones. + */ + zip->ca = rxq->cq_ci & cqe_cnt; + zip->na = zip->ca + 7; + /* Compute the next non compressed CQE. */ + --rxq->cq_ci; + zip->cq_ci = rxq->cq_ci + zip->cqe_cnt; + /* Get packet size to return. */ + len = ntohl((*mc)[0].byte_cnt); + zip->ai = 1; + } else { + len = ntohl(cqe->byte_cnt); + } + /* Error while receiving packet. */ + if (unlikely(MLX5_CQE_OPCODE(op_own) == MLX5_CQE_RESP_ERR)) + return -1; + } + return len; +} + +/** * Translate RX completion flags to offload flags. * * @param[in] rxq * Pointer to RX queue structure. - * @param flags - * RX completion flags returned by poll_length_flags(). + * @param[in] cqe + * Pointer to CQE. * * @return * Offload flags (ol_flags) for struct rte_mbuf. */ static inline uint32_t -rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags) +rxq_cq_to_ol_flags(struct rxq *rxq, volatile struct mlx5_cqe64 *cqe) { uint32_t ol_flags = 0; + uint8_t l3_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L3_HDR_TYPE_MASK; + uint8_t l4_hdr = (cqe->l4_hdr_type_etc) & MLX5_CQE_L4_HDR_TYPE_MASK; + uint8_t info = cqe->rsvd0[0]; - if (rxq->csum) { - /* Set IP checksum flag only for IPv4/IPv6 packets. */ - if (flags & - (IBV_EXP_CQ_RX_IPV4_PACKET | IBV_EXP_CQ_RX_IPV6_PACKET)) - ol_flags |= - TRANSPOSE(~flags, - IBV_EXP_CQ_RX_IP_CSUM_OK, - PKT_RX_IP_CKSUM_BAD); -#ifdef HAVE_EXP_CQ_RX_TCP_PACKET - /* Set L4 checksum flag only for TCP/UDP packets. */ - if (flags & - (IBV_EXP_CQ_RX_TCP_PACKET | IBV_EXP_CQ_RX_UDP_PACKET)) -#endif /* HAVE_EXP_CQ_RX_TCP_PACKET */ - ol_flags |= - TRANSPOSE(~flags, - IBV_EXP_CQ_RX_TCP_UDP_CSUM_OK, - PKT_RX_L4_CKSUM_BAD); - } + if ((l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV4) || + (l3_hdr == MLX5_CQE_L3_HDR_TYPE_IPV6)) + ol_flags |= + (!(cqe->hds_ip_ext & MLX5_CQE_L3_OK) * + PKT_RX_IP_CKSUM_BAD); + if ((l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP) || + (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_EMP_ACK) || + (l4_hdr == MLX5_CQE_L4_HDR_TYPE_TCP_ACK) || + (l4_hdr == MLX5_CQE_L4_HDR_TYPE_UDP)) + ol_flags |= + (!(cqe->hds_ip_ext & MLX5_CQE_L4_OK) * + PKT_RX_L4_CKSUM_BAD); /* * PKT_RX_IP_CKSUM_BAD and PKT_RX_L4_CKSUM_BAD are used in place * of PKT_RX_EIP_CKSUM_BAD because the latter is not functional * (its value is 0). */ - if ((flags & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) + if ((info & IBV_EXP_CQ_RX_TUNNEL_PACKET) && (rxq->csum_l2tun)) ol_flags |= - TRANSPOSE(~flags, + TRANSPOSE(~cqe->l4_hdr_type_etc, IBV_EXP_CQ_RX_OUTER_IP_CSUM_OK, PKT_RX_IP_CKSUM_BAD) | - TRANSPOSE(~flags, + TRANSPOSE(~cqe->l4_hdr_type_etc, IBV_EXP_CQ_RX_OUTER_TCP_UDP_CSUM_OK, PKT_RX_L4_CKSUM_BAD); return ol_flags; } /** - * DPDK callback for RX with scattered packets support. + * DPDK callback for RX. * * @param dpdk_rxq * Generic pointer to RX queue structure. @@ -849,353 +1545,127 @@ rxq_cq_to_ol_flags(const struct rxq *rxq, uint32_t flags) * Number of packets successfully received (<= pkts_n). */ uint16_t -mlx5_rx_burst_sp(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) +mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) { - struct rxq *rxq = (struct rxq *)dpdk_rxq; - struct rxq_elt_sp (*elts)[rxq->elts_n] = rxq->elts.sp; - const unsigned int elts_n = rxq->elts_n; - unsigned int elts_head = rxq->elts_head; - unsigned int i; - unsigned int pkts_ret = 0; - int ret; + struct rxq *rxq = dpdk_rxq; + const unsigned int wqe_cnt = rxq->elts_n - 1; + const unsigned int cqe_cnt = rxq->cqe_n - 1; + const unsigned int sges_n = rxq->sges_n; + struct rte_mbuf *pkt = NULL; + struct rte_mbuf *seg = NULL; + volatile struct mlx5_cqe64 *cqe = + &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64; + unsigned int i = 0; + unsigned int rq_ci = rxq->rq_ci << sges_n; + int len; - if (unlikely(!rxq->sp)) - return mlx5_rx_burst(dpdk_rxq, pkts, pkts_n); - if (unlikely(elts == NULL)) /* See RTE_DEV_CMD_SET_MTU. */ - return 0; - for (i = 0; (i != pkts_n); ++i) { - struct rxq_elt_sp *elt = &(*elts)[elts_head]; - unsigned int len; - unsigned int pkt_buf_len; - struct rte_mbuf *pkt_buf = NULL; /* Buffer returned in pkts. */ - struct rte_mbuf **pkt_buf_next = &pkt_buf; - unsigned int seg_headroom = RTE_PKTMBUF_HEADROOM; - unsigned int j = 0; - uint32_t flags; - uint16_t vlan_tci; - - /* Sanity checks. */ - assert(elts_head < rxq->elts_n); - assert(rxq->elts_head < rxq->elts_n); - ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci); - if (unlikely(ret < 0)) { - struct ibv_wc wc; - int wcs_n; - - DEBUG("rxq=%p, poll_length() failed (ret=%d)", - (void *)rxq, ret); - /* ibv_poll_cq() must be used in case of failure. */ - wcs_n = ibv_poll_cq(rxq->cq, 1, &wc); - if (unlikely(wcs_n == 0)) - break; - if (unlikely(wcs_n < 0)) { - DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)", - (void *)rxq, wcs_n); + while (pkts_n) { + unsigned int idx = rq_ci & wqe_cnt; + volatile struct mlx5_wqe_data_seg *wqe = &(*rxq->wqes)[idx]; + struct rte_mbuf *rep = (*rxq->elts)[idx]; + + if (pkt) + NEXT(seg) = rep; + seg = rep; + rte_prefetch0(seg); + rte_prefetch0(cqe); + rte_prefetch0(wqe); + rep = rte_mbuf_raw_alloc(rxq->mp); + if (unlikely(rep == NULL)) { + while (pkt) { + seg = NEXT(pkt); + rte_mbuf_refcnt_set(pkt, 0); + __rte_mbuf_raw_free(pkt); + pkt = seg; + } + ++rxq->stats.rx_nombuf; + break; + } + if (!pkt) { + cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt].cqe64; + len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt); + if (len == 0) { + rte_mbuf_refcnt_set(rep, 0); + __rte_mbuf_raw_free(rep); break; } - assert(wcs_n == 1); - if (unlikely(wc.status != IBV_WC_SUCCESS)) { - /* Whatever, just repost the offending WR. */ - DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work" - " completion status (%d): %s", - (void *)rxq, wc.wr_id, wc.status, - ibv_wc_status_str(wc.status)); -#ifdef MLX5_PMD_SOFT_COUNTERS - /* Increment dropped packets counter. */ + if (unlikely(len == -1)) { + /* RX error, packet is likely too large. */ + rte_mbuf_refcnt_set(rep, 0); + __rte_mbuf_raw_free(rep); ++rxq->stats.idropped; -#endif - goto repost; + goto skip; } - ret = wc.byte_len; - } - if (ret == 0) - break; - assert(ret >= (rxq->crc_present << 2)); - len = ret - (rxq->crc_present << 2); - pkt_buf_len = len; - /* - * Replace spent segments with new ones, concatenate and - * return them as pkt_buf. - */ - while (1) { - struct ibv_sge *sge = &elt->sges[j]; - struct rte_mbuf *seg = elt->bufs[j]; - struct rte_mbuf *rep; - unsigned int seg_tailroom; - - assert(seg != NULL); - /* - * Fetch initial bytes of packet descriptor into a - * cacheline while allocating rep. - */ - rte_prefetch0(seg); - rep = __rte_mbuf_raw_alloc(rxq->mp); - if (unlikely(rep == NULL)) { - /* - * Unable to allocate a replacement mbuf, - * repost WR. - */ - DEBUG("rxq=%p: can't allocate a new mbuf", - (void *)rxq); - if (pkt_buf != NULL) { - *pkt_buf_next = NULL; - rte_pktmbuf_free(pkt_buf); + pkt = seg; + assert(len >= (rxq->crc_present << 2)); + /* Update packet information. */ + if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip | + rxq->crc_present) { + if (rxq->csum) { + pkt->packet_type = + rxq_cq_to_pkt_type(cqe); + pkt->ol_flags = + rxq_cq_to_ol_flags(rxq, cqe); } - /* Increment out of memory counters. */ - ++rxq->stats.rx_nombuf; - ++rxq->priv->dev->data->rx_mbuf_alloc_failed; - goto repost; - } -#ifndef NDEBUG - /* Poison user-modifiable fields in rep. */ - NEXT(rep) = (void *)((uintptr_t)-1); - SET_DATA_OFF(rep, 0xdead); - DATA_LEN(rep) = 0xd00d; - PKT_LEN(rep) = 0xdeadd00d; - NB_SEGS(rep) = 0x2a; - PORT(rep) = 0x2a; - rep->ol_flags = -1; -#endif - assert(rep->buf_len == seg->buf_len); - assert(rep->buf_len == rxq->mb_len); - /* Reconfigure sge to use rep instead of seg. */ - assert(sge->lkey == rxq->mr->lkey); - sge->addr = ((uintptr_t)rep->buf_addr + seg_headroom); - elt->bufs[j] = rep; - ++j; - /* Update pkt_buf if it's the first segment, or link - * seg to the previous one and update pkt_buf_next. */ - *pkt_buf_next = seg; - pkt_buf_next = &NEXT(seg); - /* Update seg information. */ - seg_tailroom = (seg->buf_len - seg_headroom); - assert(sge->length == seg_tailroom); - SET_DATA_OFF(seg, seg_headroom); - if (likely(len <= seg_tailroom)) { - /* Last segment. */ - DATA_LEN(seg) = len; - PKT_LEN(seg) = len; - /* Sanity check. */ - assert(rte_pktmbuf_headroom(seg) == - seg_headroom); - assert(rte_pktmbuf_tailroom(seg) == - (seg_tailroom - len)); - break; - } - DATA_LEN(seg) = seg_tailroom; - PKT_LEN(seg) = seg_tailroom; - /* Sanity check. */ - assert(rte_pktmbuf_headroom(seg) == seg_headroom); - assert(rte_pktmbuf_tailroom(seg) == 0); - /* Fix len and clear headroom for next segments. */ - len -= seg_tailroom; - seg_headroom = 0; - } - /* Update head and tail segments. */ - *pkt_buf_next = NULL; - assert(pkt_buf != NULL); - assert(j != 0); - NB_SEGS(pkt_buf) = j; - PORT(pkt_buf) = rxq->port_id; - PKT_LEN(pkt_buf) = pkt_buf_len; - if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) { - pkt_buf->packet_type = rxq_cq_to_pkt_type(flags); - pkt_buf->ol_flags = rxq_cq_to_ol_flags(rxq, flags); -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS - if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) { - pkt_buf->ol_flags |= PKT_RX_VLAN_PKT; - pkt_buf->vlan_tci = vlan_tci; + if (cqe->l4_hdr_type_etc & + MLX5_CQE_VLAN_STRIPPED) { + pkt->ol_flags |= PKT_RX_VLAN_PKT | + PKT_RX_VLAN_STRIPPED; + pkt->vlan_tci = ntohs(cqe->vlan_info); + } + if (rxq->crc_present) + len -= ETHER_CRC_LEN; } -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + PKT_LEN(pkt) = len; } - - /* Return packet. */ - *(pkts++) = pkt_buf; - ++pkts_ret; -#ifdef MLX5_PMD_SOFT_COUNTERS - /* Increment bytes counter. */ - rxq->stats.ibytes += pkt_buf_len; -#endif -repost: - ret = rxq->recv(rxq->wq, elt->sges, RTE_DIM(elt->sges)); - if (unlikely(ret)) { - /* Inability to repost WRs is fatal. */ - DEBUG("%p: recv_sg_list(): failed (ret=%d)", - (void *)rxq->priv, - ret); - abort(); - } - if (++elts_head >= elts_n) - elts_head = 0; - continue; - } - if (unlikely(i == 0)) - return 0; - rxq->elts_head = elts_head; -#ifdef MLX5_PMD_SOFT_COUNTERS - /* Increment packets counter. */ - rxq->stats.ipackets += pkts_ret; -#endif - return pkts_ret; -} - -/** - * DPDK callback for RX. - * - * The following function is the same as mlx5_rx_burst_sp(), except it doesn't - * manage scattered packets. Improves performance when MRU is lower than the - * size of the first segment. - * - * @param dpdk_rxq - * Generic pointer to RX queue structure. - * @param[out] pkts - * Array to store received packets. - * @param pkts_n - * Maximum number of packets in array. - * - * @return - * Number of packets successfully received (<= pkts_n). - */ -uint16_t -mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) -{ - struct rxq *rxq = (struct rxq *)dpdk_rxq; - struct rxq_elt (*elts)[rxq->elts_n] = rxq->elts.no_sp; - const unsigned int elts_n = rxq->elts_n; - unsigned int elts_head = rxq->elts_head; - struct ibv_sge sges[pkts_n]; - unsigned int i; - unsigned int pkts_ret = 0; - int ret; - - if (unlikely(rxq->sp)) - return mlx5_rx_burst_sp(dpdk_rxq, pkts, pkts_n); - for (i = 0; (i != pkts_n); ++i) { - struct rxq_elt *elt = &(*elts)[elts_head]; - unsigned int len; - struct rte_mbuf *seg = elt->buf; - struct rte_mbuf *rep; - uint32_t flags; - uint16_t vlan_tci; - - /* Sanity checks. */ - assert(seg != NULL); - assert(elts_head < rxq->elts_n); - assert(rxq->elts_head < rxq->elts_n); + DATA_LEN(rep) = DATA_LEN(seg); + PKT_LEN(rep) = PKT_LEN(seg); + SET_DATA_OFF(rep, DATA_OFF(seg)); + NB_SEGS(rep) = NB_SEGS(seg); + PORT(rep) = PORT(seg); + NEXT(rep) = NULL; + (*rxq->elts)[idx] = rep; /* - * Fetch initial bytes of packet descriptor into a - * cacheline while allocating rep. + * Fill NIC descriptor with the new buffer. The lkey and size + * of the buffers are already known, only the buffer address + * changes. */ - rte_prefetch0(seg); - rte_prefetch0(&seg->cacheline1); - ret = rxq->poll(rxq->cq, NULL, NULL, &flags, &vlan_tci); - if (unlikely(ret < 0)) { - struct ibv_wc wc; - int wcs_n; - - DEBUG("rxq=%p, poll_length() failed (ret=%d)", - (void *)rxq, ret); - /* ibv_poll_cq() must be used in case of failure. */ - wcs_n = ibv_poll_cq(rxq->cq, 1, &wc); - if (unlikely(wcs_n == 0)) - break; - if (unlikely(wcs_n < 0)) { - DEBUG("rxq=%p, ibv_poll_cq() failed (wcs_n=%d)", - (void *)rxq, wcs_n); - break; - } - assert(wcs_n == 1); - if (unlikely(wc.status != IBV_WC_SUCCESS)) { - /* Whatever, just repost the offending WR. */ - DEBUG("rxq=%p, wr_id=%" PRIu64 ": bad work" - " completion status (%d): %s", - (void *)rxq, wc.wr_id, wc.status, - ibv_wc_status_str(wc.status)); -#ifdef MLX5_PMD_SOFT_COUNTERS - /* Increment dropped packets counter. */ - ++rxq->stats.idropped; -#endif - /* Add SGE to array for repost. */ - sges[i] = elt->sge; - goto repost; - } - ret = wc.byte_len; - } - if (ret == 0) - break; - assert(ret >= (rxq->crc_present << 2)); - len = ret - (rxq->crc_present << 2); - rep = __rte_mbuf_raw_alloc(rxq->mp); - if (unlikely(rep == NULL)) { - /* - * Unable to allocate a replacement mbuf, - * repost WR. - */ - DEBUG("rxq=%p: can't allocate a new mbuf", - (void *)rxq); - /* Increment out of memory counters. */ - ++rxq->stats.rx_nombuf; - ++rxq->priv->dev->data->rx_mbuf_alloc_failed; - goto repost; + wqe->addr = htonll(rte_pktmbuf_mtod(rep, uintptr_t)); + if (len > DATA_LEN(seg)) { + len -= DATA_LEN(seg); + ++NB_SEGS(pkt); + ++rq_ci; + continue; } - - /* Reconfigure sge to use rep instead of seg. */ - elt->sge.addr = (uintptr_t)rep->buf_addr + RTE_PKTMBUF_HEADROOM; - assert(elt->sge.lkey == rxq->mr->lkey); - elt->buf = rep; - - /* Add SGE to array for repost. */ - sges[i] = elt->sge; - - /* Update seg information. */ - SET_DATA_OFF(seg, RTE_PKTMBUF_HEADROOM); - NB_SEGS(seg) = 1; - PORT(seg) = rxq->port_id; - NEXT(seg) = NULL; - PKT_LEN(seg) = len; DATA_LEN(seg) = len; - if (rxq->csum | rxq->csum_l2tun | rxq->vlan_strip) { - seg->packet_type = rxq_cq_to_pkt_type(flags); - seg->ol_flags = rxq_cq_to_ol_flags(rxq, flags); -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS - if (flags & IBV_EXP_CQ_RX_CVLAN_STRIPPED_V1) { - seg->ol_flags |= PKT_RX_VLAN_PKT; - seg->vlan_tci = vlan_tci; - } -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ - } - /* Return packet. */ - *(pkts++) = seg; - ++pkts_ret; #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment bytes counter. */ - rxq->stats.ibytes += len; + rxq->stats.ibytes += PKT_LEN(pkt); #endif -repost: - if (++elts_head >= elts_n) - elts_head = 0; - continue; + /* Return packet. */ + *(pkts++) = pkt; + pkt = NULL; + --pkts_n; + ++i; +skip: + /* Align consumer index to the next stride. */ + rq_ci >>= sges_n; + ++rq_ci; + rq_ci <<= sges_n; } - if (unlikely(i == 0)) + if (unlikely((i == 0) && ((rq_ci >> sges_n) == rxq->rq_ci))) return 0; - /* Repost WRs. */ -#ifdef DEBUG_RECV - DEBUG("%p: reposting %u WRs", (void *)rxq, i); -#endif - ret = rxq->recv(rxq->wq, sges, i); - if (unlikely(ret)) { - /* Inability to repost WRs is fatal. */ - DEBUG("%p: recv_burst(): failed (ret=%d)", - (void *)rxq->priv, - ret); - abort(); - } - rxq->elts_head = elts_head; + /* Update the consumer index. */ + rxq->rq_ci = rq_ci >> sges_n; + rte_wmb(); + *rxq->cq_db = htonl(rxq->cq_ci); + rte_wmb(); + *rxq->rq_db = htonl(rxq->rq_ci); #ifdef MLX5_PMD_SOFT_COUNTERS /* Increment packets counter. */ - rxq->stats.ipackets += pkts_ret; + rxq->stats.ipackets += i; #endif - return pkts_ret; + return i; } /** diff --git a/drivers/net/mlx5/mlx5_rxtx.h b/drivers/net/mlx5/mlx5_rxtx.h index 0e2b607d..f6e2cbac 100644 --- a/drivers/net/mlx5/mlx5_rxtx.h +++ b/drivers/net/mlx5/mlx5_rxtx.h @@ -43,6 +43,7 @@ #pragma GCC diagnostic ignored "-pedantic" #endif #include <infiniband/verbs.h> +#include <infiniband/mlx5_hw.h> #ifdef PEDANTIC #pragma GCC diagnostic error "-pedantic" #endif @@ -61,6 +62,7 @@ #include "mlx5.h" #include "mlx5_autoconf.h" #include "mlx5_defs.h" +#include "mlx5_prm.h" struct mlx5_rxq_stats { unsigned int idx; /**< Mapping index. */ @@ -81,18 +83,6 @@ struct mlx5_txq_stats { uint64_t odropped; /**< Total of packets not sent when TX ring full. */ }; -/* RX element (scattered packets). */ -struct rxq_elt_sp { - struct ibv_sge sges[MLX5_PMD_SGE_WR_N]; /* Scatter/Gather Elements. */ - struct rte_mbuf *bufs[MLX5_PMD_SGE_WR_N]; /* SGEs buffers. */ -}; - -/* RX element. */ -struct rxq_elt { - struct ibv_sge sge; /* Scatter/Gather Element. */ - struct rte_mbuf *buf; /* SGE buffer. */ -}; - /* Flow director queue structure. */ struct fdir_queue { struct ibv_qp *qp; /* Associated RX QP. */ @@ -101,38 +91,49 @@ struct fdir_queue { struct priv; +/* Compressed CQE context. */ +struct rxq_zip { + uint16_t ai; /* Array index. */ + uint16_t ca; /* Current array index. */ + uint16_t na; /* Next array index. */ + uint16_t cq_ci; /* The next CQE. */ + uint32_t cqe_cnt; /* Number of CQEs. */ +}; + /* RX queue descriptor. */ struct rxq { - struct priv *priv; /* Back pointer to private data. */ - struct rte_mempool *mp; /* Memory Pool for allocations. */ - struct ibv_cq *cq; /* Completion Queue. */ - struct ibv_exp_wq *wq; /* Work Queue. */ - int32_t (*poll)(); /* Verbs poll function. */ - int32_t (*recv)(); /* Verbs receive function. */ - unsigned int port_id; /* Port ID for incoming packets. */ - unsigned int elts_n; /* (*elts)[] length. */ - unsigned int elts_head; /* Current index in (*elts)[]. */ - unsigned int sp:1; /* Use scattered RX elements. */ unsigned int csum:1; /* Enable checksum offloading. */ unsigned int csum_l2tun:1; /* Same for L2 tunnels. */ unsigned int vlan_strip:1; /* Enable VLAN stripping. */ unsigned int crc_present:1; /* CRC must be subtracted. */ - union { - struct rxq_elt_sp (*sp)[]; /* Scattered RX elements. */ - struct rxq_elt (*no_sp)[]; /* RX elements. */ - } elts; - uint32_t mb_len; /* Length of a mp-issued mbuf. */ - unsigned int socket; /* CPU socket ID for allocations. */ - struct mlx5_rxq_stats stats; /* RX queue counters. */ + unsigned int sges_n:2; /* Log 2 of SGEs (max buffers per packet). */ + uint16_t rq_ci; + uint16_t cq_ci; + uint16_t elts_n; + uint16_t cqe_n; /* Number of CQ elements. */ + uint16_t port_id; + volatile struct mlx5_wqe_data_seg(*wqes)[]; + volatile struct mlx5_cqe(*cqes)[]; + struct rxq_zip zip; /* Compressed context. */ + volatile uint32_t *rq_db; + volatile uint32_t *cq_db; + struct rte_mbuf *(*elts)[]; + struct rte_mempool *mp; + struct mlx5_rxq_stats stats; +} __rte_cache_aligned; + +/* RX queue control descriptor. */ +struct rxq_ctrl { + struct priv *priv; /* Back pointer to private data. */ + struct ibv_cq *cq; /* Completion Queue. */ + struct ibv_exp_wq *wq; /* Work Queue. */ struct ibv_exp_res_domain *rd; /* Resource Domain. */ struct fdir_queue fdir_queue; /* Flow director queue. */ struct ibv_mr *mr; /* Memory Region (for mp). */ struct ibv_exp_wq_family *if_wq; /* WQ burst interface. */ -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS struct ibv_exp_cq_family_v1 *if_cq; /* CQ interface. */ -#else /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ - struct ibv_exp_cq_family *if_cq; /* CQ interface. */ -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ + unsigned int socket; /* CPU socket ID for allocations. */ + struct rxq rxq; /* Data path structure. */ }; /* Hash RX queue types. */ @@ -140,11 +141,9 @@ enum hash_rxq_type { HASH_RXQ_TCPV4, HASH_RXQ_UDPV4, HASH_RXQ_IPV4, -#ifdef HAVE_FLOW_SPEC_IPV6 HASH_RXQ_TCPV6, HASH_RXQ_UDPV6, HASH_RXQ_IPV6, -#endif /* HAVE_FLOW_SPEC_IPV6 */ HASH_RXQ_ETH, }; @@ -175,9 +174,7 @@ struct hash_rxq_init { } hdr; struct ibv_exp_flow_spec_tcp_udp tcp_udp; struct ibv_exp_flow_spec_ipv4 ipv4; -#ifdef HAVE_FLOW_SPEC_IPV6 struct ibv_exp_flow_spec_ipv6 ipv6; -#endif /* HAVE_FLOW_SPEC_IPV6 */ struct ibv_exp_flow_spec_eth eth; } flow_spec; /* Flow specification template. */ const struct hash_rxq_init *underlayer; /* Pointer to underlayer. */ @@ -232,74 +229,50 @@ struct hash_rxq { struct ibv_qp *qp; /* Hash RX QP. */ enum hash_rxq_type type; /* Hash RX queue type. */ /* MAC flow steering rules, one per VLAN ID. */ - struct ibv_exp_flow *mac_flow[MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS]; + struct ibv_exp_flow *mac_flow + [MLX5_MAX_MAC_ADDRESSES][MLX5_MAX_VLAN_IDS]; struct ibv_exp_flow *special_flow [MLX5_MAX_SPECIAL_FLOWS][MLX5_MAX_VLAN_IDS]; }; -/* TX element. */ -struct txq_elt { - struct rte_mbuf *buf; -}; - -/* Linear buffer type. It is used when transmitting buffers with too many - * segments that do not fit the hardware queue (see max_send_sge). - * Extra segments are copied (linearized) in such buffers, replacing the - * last SGE during TX. - * The size is arbitrary but large enough to hold a jumbo frame with - * 8 segments considering mbuf.buf_len is about 2048 bytes. */ -typedef uint8_t linear_t[16384]; - /* TX queue descriptor. */ struct txq { - struct priv *priv; /* Back pointer to private data. */ - int32_t (*poll_cnt)(struct ibv_cq *cq, uint32_t max); - int (*send_pending)(); -#ifdef HAVE_VERBS_VLAN_INSERTION - int (*send_pending_vlan)(); -#endif -#if MLX5_PMD_MAX_INLINE > 0 - int (*send_pending_inline)(); -#ifdef HAVE_VERBS_VLAN_INSERTION - int (*send_pending_inline_vlan)(); -#endif -#endif -#if MLX5_PMD_SGE_WR_N > 1 - int (*send_pending_sg_list)(); -#ifdef HAVE_VERBS_VLAN_INSERTION - int (*send_pending_sg_list_vlan)(); -#endif -#endif - int (*send_flush)(struct ibv_qp *qp); - struct ibv_cq *cq; /* Completion Queue. */ - struct ibv_qp *qp; /* Queue Pair. */ - struct txq_elt (*elts)[]; /* TX elements. */ -#if MLX5_PMD_MAX_INLINE > 0 - uint32_t max_inline; /* Max inline send size <= MLX5_PMD_MAX_INLINE. */ -#endif - unsigned int elts_n; /* (*elts)[] length. */ - unsigned int elts_head; /* Current index in (*elts)[]. */ - unsigned int elts_tail; /* First element awaiting completion. */ - unsigned int elts_comp; /* Number of completion requests. */ - unsigned int elts_comp_cd; /* Countdown for next completion request. */ - unsigned int elts_comp_cd_init; /* Initial value for countdown. */ + uint16_t elts_head; /* Current index in (*elts)[]. */ + uint16_t elts_tail; /* First element awaiting completion. */ + uint16_t elts_comp; /* Counter since last completion request. */ + uint16_t elts_n; /* (*elts)[] length. */ + uint16_t cq_ci; /* Consumer index for completion queue. */ + uint16_t cqe_n; /* Number of CQ elements. */ + uint16_t wqe_ci; /* Consumer index for work queue. */ + uint16_t wqe_n; /* Number of WQ elements. */ + uint16_t bf_offset; /* Blueflame offset. */ + uint16_t bf_buf_size; /* Blueflame size. */ + uint16_t max_inline; /* Maximum size to inline in a WQE. */ + uint32_t qp_num_8s; /* QP number shifted by 8. */ + volatile struct mlx5_cqe (*cqes)[]; /* Completion queue. */ + volatile union mlx5_wqe (*wqes)[]; /* Work queue. */ + volatile uint32_t *qp_db; /* Work queue doorbell. */ + volatile uint32_t *cq_db; /* Completion queue doorbell. */ + volatile void *bf_reg; /* Blueflame register. */ struct { const struct rte_mempool *mp; /* Cached Memory Pool. */ struct ibv_mr *mr; /* Memory Region (for mp). */ - uint32_t lkey; /* mr->lkey */ + uint32_t lkey; /* htonl(mr->lkey) */ } mp2mr[MLX5_PMD_TX_MP_CACHE]; /* MP to MR translation table. */ + struct rte_mbuf *(*elts)[]; /* TX elements. */ struct mlx5_txq_stats stats; /* TX queue counters. */ - /* Elements used only for init part are here. */ - linear_t (*elts_linear)[]; /* Linearized buffers. */ - struct ibv_mr *mr_linear; /* Memory Region for linearized buffers. */ -#ifdef HAVE_VERBS_VLAN_INSERTION - struct ibv_exp_qp_burst_family_v1 *if_qp; /* QP burst interface. */ -#else +} __rte_cache_aligned; + +/* TX queue control descriptor. */ +struct txq_ctrl { + struct priv *priv; /* Back pointer to private data. */ + struct ibv_cq *cq; /* Completion Queue. */ + struct ibv_qp *qp; /* Queue Pair. */ struct ibv_exp_qp_burst_family *if_qp; /* QP burst interface. */ -#endif struct ibv_exp_cq_family *if_cq; /* CQ interface. */ struct ibv_exp_res_domain *rd; /* Resource Domain. */ unsigned int socket; /* CPU socket ID for allocations. */ + struct txq txq; /* Data path structure. */ }; /* mlx5_rxq.c */ @@ -316,37 +289,40 @@ int priv_create_hash_rxqs(struct priv *); void priv_destroy_hash_rxqs(struct priv *); int priv_allow_flow_type(struct priv *, enum hash_rxq_flow_type); int priv_rehash_flows(struct priv *); -void rxq_cleanup(struct rxq *); -int rxq_rehash(struct rte_eth_dev *, struct rxq *); -int rxq_setup(struct rte_eth_dev *, struct rxq *, uint16_t, unsigned int, - const struct rte_eth_rxconf *, struct rte_mempool *); +void rxq_cleanup(struct rxq_ctrl *); +int rxq_rehash(struct rte_eth_dev *, struct rxq_ctrl *); +int rxq_ctrl_setup(struct rte_eth_dev *, struct rxq_ctrl *, uint16_t, + unsigned int, const struct rte_eth_rxconf *, + struct rte_mempool *); int mlx5_rx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, const struct rte_eth_rxconf *, struct rte_mempool *); void mlx5_rx_queue_release(void *); -uint16_t mlx5_rx_burst_secondary_setup(void *dpdk_rxq, struct rte_mbuf **pkts, - uint16_t pkts_n); - +uint16_t mlx5_rx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t); /* mlx5_txq.c */ -void txq_cleanup(struct txq *); -int txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, - unsigned int socket, const struct rte_eth_txconf *conf); - +void txq_cleanup(struct txq_ctrl *); +int txq_ctrl_setup(struct rte_eth_dev *, struct txq_ctrl *, uint16_t, + unsigned int, const struct rte_eth_txconf *); int mlx5_tx_queue_setup(struct rte_eth_dev *, uint16_t, uint16_t, unsigned int, const struct rte_eth_txconf *); void mlx5_tx_queue_release(void *); -uint16_t mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts, - uint16_t pkts_n); +uint16_t mlx5_tx_burst_secondary_setup(void *, struct rte_mbuf **, uint16_t); /* mlx5_rxtx.c */ -struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, const struct rte_mempool *); -void txq_mp2mr_iter(const struct rte_mempool *, void *); uint16_t mlx5_tx_burst(void *, struct rte_mbuf **, uint16_t); -uint16_t mlx5_rx_burst_sp(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_tx_burst_inline(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_tx_burst_mpw(void *, struct rte_mbuf **, uint16_t); +uint16_t mlx5_tx_burst_mpw_inline(void *, struct rte_mbuf **, uint16_t); uint16_t mlx5_rx_burst(void *, struct rte_mbuf **, uint16_t); uint16_t removed_tx_burst(void *, struct rte_mbuf **, uint16_t); uint16_t removed_rx_burst(void *, struct rte_mbuf **, uint16_t); +/* mlx5_mr.c */ + +struct ibv_mr *mlx5_mp2mr(struct ibv_pd *, struct rte_mempool *); +void txq_mp2mr_iter(struct rte_mempool *, void *); +uint32_t txq_mp2mr_reg(struct txq *, struct rte_mempool *, unsigned int); + #endif /* RTE_PMD_MLX5_RXTX_H_ */ diff --git a/drivers/net/mlx5/mlx5_txq.c b/drivers/net/mlx5/mlx5_txq.c index 31ce53ad..6fe61c4a 100644 --- a/drivers/net/mlx5/mlx5_txq.c +++ b/drivers/net/mlx5/mlx5_txq.c @@ -60,6 +60,7 @@ #endif #include "mlx5_utils.h" +#include "mlx5_defs.h" #include "mlx5.h" #include "mlx5_rxtx.h" #include "mlx5_autoconf.h" @@ -68,118 +69,62 @@ /** * Allocate TX queue elements. * - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. * @param elts_n * Number of elements to allocate. - * - * @return - * 0 on success, errno value on failure. */ -static int -txq_alloc_elts(struct txq *txq, unsigned int elts_n) +static void +txq_alloc_elts(struct txq_ctrl *txq_ctrl, unsigned int elts_n) { unsigned int i; - struct txq_elt (*elts)[elts_n] = - rte_calloc_socket("TXQ", 1, sizeof(*elts), 0, txq->socket); - linear_t (*elts_linear)[elts_n] = - rte_calloc_socket("TXQ", 1, sizeof(*elts_linear), 0, - txq->socket); - struct ibv_mr *mr_linear = NULL; - int ret = 0; - if ((elts == NULL) || (elts_linear == NULL)) { - ERROR("%p: can't allocate packets array", (void *)txq); - ret = ENOMEM; - goto error; - } - mr_linear = - ibv_reg_mr(txq->priv->pd, elts_linear, sizeof(*elts_linear), - (IBV_ACCESS_LOCAL_WRITE | IBV_ACCESS_REMOTE_WRITE)); - if (mr_linear == NULL) { - ERROR("%p: unable to configure MR, ibv_reg_mr() failed", - (void *)txq); - ret = EINVAL; - goto error; - } - for (i = 0; (i != elts_n); ++i) { - struct txq_elt *elt = &(*elts)[i]; + for (i = 0; (i != elts_n); ++i) + (*txq_ctrl->txq.elts)[i] = NULL; + for (i = 0; (i != txq_ctrl->txq.wqe_n); ++i) { + volatile union mlx5_wqe *wqe = &(*txq_ctrl->txq.wqes)[i]; - elt->buf = NULL; + memset((void *)(uintptr_t)wqe, 0x0, sizeof(*wqe)); } - DEBUG("%p: allocated and configured %u WRs", (void *)txq, elts_n); - txq->elts_n = elts_n; - txq->elts = elts; - txq->elts_head = 0; - txq->elts_tail = 0; - txq->elts_comp = 0; - /* Request send completion every MLX5_PMD_TX_PER_COMP_REQ packets or - * at least 4 times per ring. */ - txq->elts_comp_cd_init = - ((MLX5_PMD_TX_PER_COMP_REQ < (elts_n / 4)) ? - MLX5_PMD_TX_PER_COMP_REQ : (elts_n / 4)); - txq->elts_comp_cd = txq->elts_comp_cd_init; - txq->elts_linear = elts_linear; - txq->mr_linear = mr_linear; - assert(ret == 0); - return 0; -error: - if (mr_linear != NULL) - claim_zero(ibv_dereg_mr(mr_linear)); - - rte_free(elts_linear); - rte_free(elts); - - DEBUG("%p: failed, freed everything", (void *)txq); - assert(ret > 0); - return ret; + DEBUG("%p: allocated and configured %u WRs", (void *)txq_ctrl, elts_n); + txq_ctrl->txq.elts_head = 0; + txq_ctrl->txq.elts_tail = 0; + txq_ctrl->txq.elts_comp = 0; } /** * Free TX queue elements. * - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. */ static void -txq_free_elts(struct txq *txq) +txq_free_elts(struct txq_ctrl *txq_ctrl) { - unsigned int elts_n = txq->elts_n; - unsigned int elts_head = txq->elts_head; - unsigned int elts_tail = txq->elts_tail; - struct txq_elt (*elts)[elts_n] = txq->elts; - linear_t (*elts_linear)[elts_n] = txq->elts_linear; - struct ibv_mr *mr_linear = txq->mr_linear; + unsigned int elts_n = txq_ctrl->txq.elts_n; + unsigned int elts_head = txq_ctrl->txq.elts_head; + unsigned int elts_tail = txq_ctrl->txq.elts_tail; + struct rte_mbuf *(*elts)[elts_n] = txq_ctrl->txq.elts; - DEBUG("%p: freeing WRs", (void *)txq); - txq->elts_n = 0; - txq->elts_head = 0; - txq->elts_tail = 0; - txq->elts_comp = 0; - txq->elts_comp_cd = 0; - txq->elts_comp_cd_init = 0; - txq->elts = NULL; - txq->elts_linear = NULL; - txq->mr_linear = NULL; - if (mr_linear != NULL) - claim_zero(ibv_dereg_mr(mr_linear)); + DEBUG("%p: freeing WRs", (void *)txq_ctrl); + txq_ctrl->txq.elts_head = 0; + txq_ctrl->txq.elts_tail = 0; + txq_ctrl->txq.elts_comp = 0; - rte_free(elts_linear); - if (elts == NULL) - return; while (elts_tail != elts_head) { - struct txq_elt *elt = &(*elts)[elts_tail]; + struct rte_mbuf *elt = (*elts)[elts_tail]; - assert(elt->buf != NULL); - rte_pktmbuf_free(elt->buf); + assert(elt != NULL); + rte_pktmbuf_free(elt); #ifndef NDEBUG /* Poisoning. */ - memset(elt, 0x77, sizeof(*elt)); + memset(&(*elts)[elts_tail], + 0x77, + sizeof((*elts)[elts_tail])); #endif if (++elts_tail == elts_n) elts_tail = 0; } - rte_free(elts); } /** @@ -187,66 +132,104 @@ txq_free_elts(struct txq *txq) * * Destroy objects, free allocated memory and reset the structure for reuse. * - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. */ void -txq_cleanup(struct txq *txq) +txq_cleanup(struct txq_ctrl *txq_ctrl) { struct ibv_exp_release_intf_params params; size_t i; - DEBUG("cleaning up %p", (void *)txq); - txq_free_elts(txq); - txq->poll_cnt = NULL; -#if MLX5_PMD_MAX_INLINE > 0 - txq->send_pending_inline = NULL; -#endif - txq->send_flush = NULL; - if (txq->if_qp != NULL) { - assert(txq->priv != NULL); - assert(txq->priv->ctx != NULL); - assert(txq->qp != NULL); + DEBUG("cleaning up %p", (void *)txq_ctrl); + txq_free_elts(txq_ctrl); + if (txq_ctrl->if_qp != NULL) { + assert(txq_ctrl->priv != NULL); + assert(txq_ctrl->priv->ctx != NULL); + assert(txq_ctrl->qp != NULL); params = (struct ibv_exp_release_intf_params){ .comp_mask = 0, }; - claim_zero(ibv_exp_release_intf(txq->priv->ctx, - txq->if_qp, + claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx, + txq_ctrl->if_qp, ¶ms)); } - if (txq->if_cq != NULL) { - assert(txq->priv != NULL); - assert(txq->priv->ctx != NULL); - assert(txq->cq != NULL); + if (txq_ctrl->if_cq != NULL) { + assert(txq_ctrl->priv != NULL); + assert(txq_ctrl->priv->ctx != NULL); + assert(txq_ctrl->cq != NULL); params = (struct ibv_exp_release_intf_params){ .comp_mask = 0, }; - claim_zero(ibv_exp_release_intf(txq->priv->ctx, - txq->if_cq, + claim_zero(ibv_exp_release_intf(txq_ctrl->priv->ctx, + txq_ctrl->if_cq, ¶ms)); } - if (txq->qp != NULL) - claim_zero(ibv_destroy_qp(txq->qp)); - if (txq->cq != NULL) - claim_zero(ibv_destroy_cq(txq->cq)); - if (txq->rd != NULL) { + if (txq_ctrl->qp != NULL) + claim_zero(ibv_destroy_qp(txq_ctrl->qp)); + if (txq_ctrl->cq != NULL) + claim_zero(ibv_destroy_cq(txq_ctrl->cq)); + if (txq_ctrl->rd != NULL) { struct ibv_exp_destroy_res_domain_attr attr = { .comp_mask = 0, }; - assert(txq->priv != NULL); - assert(txq->priv->ctx != NULL); - claim_zero(ibv_exp_destroy_res_domain(txq->priv->ctx, - txq->rd, + assert(txq_ctrl->priv != NULL); + assert(txq_ctrl->priv->ctx != NULL); + claim_zero(ibv_exp_destroy_res_domain(txq_ctrl->priv->ctx, + txq_ctrl->rd, &attr)); } - for (i = 0; (i != RTE_DIM(txq->mp2mr)); ++i) { - if (txq->mp2mr[i].mp == NULL) + for (i = 0; (i != RTE_DIM(txq_ctrl->txq.mp2mr)); ++i) { + if (txq_ctrl->txq.mp2mr[i].mp == NULL) break; - assert(txq->mp2mr[i].mr != NULL); - claim_zero(ibv_dereg_mr(txq->mp2mr[i].mr)); + assert(txq_ctrl->txq.mp2mr[i].mr != NULL); + claim_zero(ibv_dereg_mr(txq_ctrl->txq.mp2mr[i].mr)); + } + memset(txq_ctrl, 0, sizeof(*txq_ctrl)); +} + +/** + * Initialize TX queue. + * + * @param tmpl + * Pointer to TX queue control template. + * @param txq_ctrl + * Pointer to TX queue control. + * + * @return + * 0 on success, errno value on failure. + */ +static inline int +txq_setup(struct txq_ctrl *tmpl, struct txq_ctrl *txq_ctrl) +{ + struct mlx5_qp *qp = to_mqp(tmpl->qp); + struct ibv_cq *ibcq = tmpl->cq; + struct mlx5_cq *cq = to_mxxx(cq, cq); + + if (cq->cqe_sz != RTE_CACHE_LINE_SIZE) { + ERROR("Wrong MLX5_CQE_SIZE environment variable value: " + "it should be set to %u", RTE_CACHE_LINE_SIZE); + return EINVAL; } - memset(txq, 0, sizeof(*txq)); + tmpl->txq.cqe_n = ibcq->cqe + 1; + tmpl->txq.qp_num_8s = qp->ctrl_seg.qp_num << 8; + tmpl->txq.wqes = + (volatile union mlx5_wqe (*)[]) + (uintptr_t)qp->gen_data.sqstart; + tmpl->txq.wqe_n = qp->sq.wqe_cnt; + tmpl->txq.qp_db = &qp->gen_data.db[MLX5_SND_DBR]; + tmpl->txq.bf_reg = qp->gen_data.bf->reg; + tmpl->txq.bf_offset = qp->gen_data.bf->offset; + tmpl->txq.bf_buf_size = qp->gen_data.bf->buf_size; + tmpl->txq.cq_db = cq->dbrec; + tmpl->txq.cqes = + (volatile struct mlx5_cqe (*)[]) + (uintptr_t)cq->active_buf->buf; + tmpl->txq.elts = + (struct rte_mbuf *(*)[tmpl->txq.elts_n]) + ((uintptr_t)txq_ctrl + sizeof(*txq_ctrl)); + return 0; } /** @@ -254,7 +237,7 @@ txq_cleanup(struct txq *txq) * * @param dev * Pointer to Ethernet device structure. - * @param txq + * @param txq_ctrl * Pointer to TX queue structure. * @param desc * Number of descriptors to configure in queue. @@ -267,13 +250,14 @@ txq_cleanup(struct txq *txq) * 0 on success, errno value on failure. */ int -txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, - unsigned int socket, const struct rte_eth_txconf *conf) +txq_ctrl_setup(struct rte_eth_dev *dev, struct txq_ctrl *txq_ctrl, + uint16_t desc, unsigned int socket, + const struct rte_eth_txconf *conf) { struct priv *priv = mlx5_get_priv(dev); - struct txq tmpl = { + struct txq_ctrl tmpl = { .priv = priv, - .socket = socket + .socket = socket, }; union { struct ibv_exp_query_intf_params params; @@ -281,17 +265,19 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, struct ibv_exp_res_domain_init_attr rd; struct ibv_exp_cq_init_attr cq; struct ibv_exp_qp_attr mod; + struct ibv_exp_cq_attr cq_attr; } attr; enum ibv_exp_query_intf_status status; int ret = 0; - (void)conf; /* Thresholds configuration (ignored). */ - if ((desc == 0) || (desc % MLX5_PMD_SGE_WR_N)) { - ERROR("%p: invalid number of TX descriptors (must be a" - " multiple of %d)", (void *)dev, MLX5_PMD_SGE_WR_N); - return EINVAL; + if (mlx5_getenv_int("MLX5_ENABLE_CQE_COMPRESSION")) { + ret = ENOTSUP; + ERROR("MLX5_ENABLE_CQE_COMPRESSION must never be set"); + goto error; } - desc /= MLX5_PMD_SGE_WR_N; + (void)conf; /* Thresholds configuration (ignored). */ + assert(desc > MLX5_TX_COMP_THRESH); + tmpl.txq.elts_n = desc; /* MRs will be registered in mp2mr[] later. */ attr.rd = (struct ibv_exp_res_domain_init_attr){ .comp_mask = (IBV_EXP_RES_DOMAIN_THREAD_MODEL | @@ -310,7 +296,10 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, .comp_mask = IBV_EXP_CQ_INIT_ATTR_RES_DOMAIN, .res_domain = tmpl.rd, }; - tmpl.cq = ibv_exp_create_cq(priv->ctx, desc, NULL, NULL, 0, &attr.cq); + tmpl.cq = ibv_exp_create_cq(priv->ctx, + (((desc / MLX5_TX_COMP_THRESH) - 1) ? + ((desc / MLX5_TX_COMP_THRESH) - 1) : 1), + NULL, NULL, 0, &attr.cq); if (tmpl.cq == NULL) { ret = ENOMEM; ERROR("%p: CQ creation failure: %s", @@ -331,14 +320,14 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, .max_send_wr = ((priv->device_attr.max_qp_wr < desc) ? priv->device_attr.max_qp_wr : desc), - /* Max number of scatter/gather elements in a WR. */ - .max_send_sge = ((priv->device_attr.max_sge < - MLX5_PMD_SGE_WR_N) ? - priv->device_attr.max_sge : - MLX5_PMD_SGE_WR_N), -#if MLX5_PMD_MAX_INLINE > 0 - .max_inline_data = MLX5_PMD_MAX_INLINE, -#endif + /* + * Max number of scatter/gather elements in a WR, + * must be 1 to prevent libmlx5 from trying to affect + * too much memory. TX gather is not impacted by the + * priv->device_attr.max_sge limit and will still work + * properly. + */ + .max_send_sge = 1, }, .qp_type = IBV_QPT_RAW_PACKET, /* Do *NOT* enable this, completions events are managed per @@ -349,6 +338,10 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, .comp_mask = (IBV_EXP_QP_INIT_ATTR_PD | IBV_EXP_QP_INIT_ATTR_RES_DOMAIN), }; + if (priv->txq_inline && priv->txqs_n >= priv->txqs_inline) { + tmpl.txq.max_inline = priv->txq_inline; + attr.init.cap.max_inline_data = tmpl.txq.max_inline; + } tmpl.qp = ibv_exp_create_qp(priv->ctx, &attr.init); if (tmpl.qp == NULL) { ret = (errno ? errno : EINVAL); @@ -356,10 +349,11 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, (void *)dev, strerror(ret)); goto error; } -#if MLX5_PMD_MAX_INLINE > 0 - /* ibv_create_qp() updates this value. */ - tmpl.max_inline = attr.init.cap.max_inline_data; -#endif + DEBUG("TX queue capabilities: max_send_wr=%u, max_send_sge=%u," + " max_inline_data=%u", + attr.init.cap.max_send_wr, + attr.init.cap.max_send_sge, + attr.init.cap.max_inline_data); attr.mod = (struct ibv_exp_qp_attr){ /* Move the QP to this state. */ .qp_state = IBV_QPS_INIT, @@ -373,12 +367,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, (void *)dev, strerror(ret)); goto error; } - ret = txq_alloc_elts(&tmpl, desc); + ret = txq_setup(&tmpl, txq_ctrl); if (ret) { - ERROR("%p: TXQ allocation failed: %s", + ERROR("%p: cannot initialize TX queue structure: %s", (void *)dev, strerror(ret)); goto error; } + txq_alloc_elts(&tmpl, desc); attr.mod = (struct ibv_exp_qp_attr){ .qp_state = IBV_QPS_RTR }; @@ -410,17 +405,13 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, attr.params = (struct ibv_exp_query_intf_params){ .intf_scope = IBV_EXP_INTF_GLOBAL, .intf = IBV_EXP_INTF_QP_BURST, - .obj = tmpl.qp, -#ifdef HAVE_VERBS_VLAN_INSERTION .intf_version = 1, -#endif -#ifdef HAVE_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR + .obj = tmpl.qp, /* Enable multi-packet send if supported. */ .family_flags = - (priv->mps ? + ((priv->mps && !priv->sriov) ? IBV_EXP_QP_BURST_CREATE_ENABLE_MULTI_PACKET_SEND_WR : 0), -#endif }; tmpl.if_qp = ibv_exp_query_intf(priv->ctx, &attr.params, &status); if (tmpl.if_qp == NULL) { @@ -430,30 +421,12 @@ txq_setup(struct rte_eth_dev *dev, struct txq *txq, uint16_t desc, goto error; } /* Clean up txq in case we're reinitializing it. */ - DEBUG("%p: cleaning-up old txq just in case", (void *)txq); - txq_cleanup(txq); - *txq = tmpl; - txq->poll_cnt = txq->if_cq->poll_cnt; -#if MLX5_PMD_MAX_INLINE > 0 - txq->send_pending_inline = txq->if_qp->send_pending_inline; -#ifdef HAVE_VERBS_VLAN_INSERTION - txq->send_pending_inline_vlan = txq->if_qp->send_pending_inline_vlan; -#endif -#endif -#if MLX5_PMD_SGE_WR_N > 1 - txq->send_pending_sg_list = txq->if_qp->send_pending_sg_list; -#ifdef HAVE_VERBS_VLAN_INSERTION - txq->send_pending_sg_list_vlan = txq->if_qp->send_pending_sg_list_vlan; -#endif -#endif - txq->send_pending = txq->if_qp->send_pending; -#ifdef HAVE_VERBS_VLAN_INSERTION - txq->send_pending_vlan = txq->if_qp->send_pending_vlan; -#endif - txq->send_flush = txq->if_qp->send_flush; - DEBUG("%p: txq updated with %p", (void *)txq, (void *)&tmpl); + DEBUG("%p: cleaning-up old txq just in case", (void *)txq_ctrl); + txq_cleanup(txq_ctrl); + *txq_ctrl = tmpl; + DEBUG("%p: txq updated with %p", (void *)txq_ctrl, (void *)&tmpl); /* Pre-register known mempools. */ - rte_mempool_walk(txq_mp2mr_iter, txq); + rte_mempool_walk(txq_mp2mr_iter, txq_ctrl); assert(ret == 0); return 0; error: @@ -485,12 +458,26 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, { struct priv *priv = dev->data->dev_private; struct txq *txq = (*priv->txqs)[idx]; + struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq); int ret; if (mlx5_is_secondary()) return -E_RTE_SECONDARY; priv_lock(priv); + if (desc <= MLX5_TX_COMP_THRESH) { + WARN("%p: number of descriptors requested for TX queue %u" + " must be higher than MLX5_TX_COMP_THRESH, using" + " %u instead of %u", + (void *)dev, idx, MLX5_TX_COMP_THRESH + 1, desc); + desc = MLX5_TX_COMP_THRESH + 1; + } + if (!rte_is_power_of_2(desc)) { + desc = 1 << log2above(desc); + WARN("%p: increased number of descriptors in TX queue %u" + " to the next power of two (%d)", + (void *)dev, idx, desc); + } DEBUG("%p: configuring queue %u for %u descriptors", (void *)dev, idx, desc); if (idx >= priv->txqs_n) { @@ -507,26 +494,30 @@ mlx5_tx_queue_setup(struct rte_eth_dev *dev, uint16_t idx, uint16_t desc, return -EEXIST; } (*priv->txqs)[idx] = NULL; - txq_cleanup(txq); + txq_cleanup(txq_ctrl); } else { - txq = rte_calloc_socket("TXQ", 1, sizeof(*txq), 0, socket); - if (txq == NULL) { + txq_ctrl = + rte_calloc_socket("TXQ", 1, + sizeof(*txq_ctrl) + + desc * sizeof(struct rte_mbuf *), + 0, socket); + if (txq_ctrl == NULL) { ERROR("%p: unable to allocate queue index %u", (void *)dev, idx); priv_unlock(priv); return -ENOMEM; } } - ret = txq_setup(dev, txq, desc, socket, conf); + ret = txq_ctrl_setup(dev, txq_ctrl, desc, socket, conf); if (ret) - rte_free(txq); + rte_free(txq_ctrl); else { - txq->stats.idx = idx; + txq_ctrl->txq.stats.idx = idx; DEBUG("%p: adding TX queue %p to list", - (void *)dev, (void *)txq); - (*priv->txqs)[idx] = txq; + (void *)dev, (void *)txq_ctrl); + (*priv->txqs)[idx] = &txq_ctrl->txq; /* Update send callback. */ - dev->tx_pkt_burst = mlx5_tx_burst; + priv_select_tx_function(priv); } priv_unlock(priv); return -ret; @@ -542,6 +533,7 @@ void mlx5_tx_queue_release(void *dpdk_txq) { struct txq *txq = (struct txq *)dpdk_txq; + struct txq_ctrl *txq_ctrl; struct priv *priv; unsigned int i; @@ -550,17 +542,18 @@ mlx5_tx_queue_release(void *dpdk_txq) if (txq == NULL) return; - priv = txq->priv; + txq_ctrl = container_of(txq, struct txq_ctrl, txq); + priv = txq_ctrl->priv; priv_lock(priv); for (i = 0; (i != priv->txqs_n); ++i) if ((*priv->txqs)[i] == txq) { DEBUG("%p: removing TX queue %p from list", - (void *)priv->dev, (void *)txq); + (void *)priv->dev, (void *)txq_ctrl); (*priv->txqs)[i] = NULL; break; } - txq_cleanup(txq); - rte_free(txq); + txq_cleanup(txq_ctrl); + rte_free(txq_ctrl); priv_unlock(priv); } @@ -585,7 +578,8 @@ mlx5_tx_burst_secondary_setup(void *dpdk_txq, struct rte_mbuf **pkts, uint16_t pkts_n) { struct txq *txq = dpdk_txq; - struct priv *priv = mlx5_secondary_data_setup(txq->priv); + struct txq_ctrl *txq_ctrl = container_of(txq, struct txq_ctrl, txq); + struct priv *priv = mlx5_secondary_data_setup(txq_ctrl->priv); struct priv *primary_priv; unsigned int index; diff --git a/drivers/net/mlx5/mlx5_vlan.c b/drivers/net/mlx5/mlx5_vlan.c index ea7af1e4..4719e697 100644 --- a/drivers/net/mlx5/mlx5_vlan.c +++ b/drivers/net/mlx5/mlx5_vlan.c @@ -144,7 +144,7 @@ static void priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) { struct rxq *rxq = (*priv->rxqs)[idx]; -#ifdef HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS + struct rxq_ctrl *rxq_ctrl = container_of(rxq, struct rxq_ctrl, rxq); struct ibv_exp_wq_attr mod; uint16_t vlan_offloads = (on ? IBV_EXP_RECEIVE_WQ_CVLAN_STRIP : 0) | @@ -158,15 +158,13 @@ priv_vlan_strip_queue_set(struct priv *priv, uint16_t idx, int on) .vlan_offloads = vlan_offloads, }; - err = ibv_exp_modify_wq(rxq->wq, &mod); + err = ibv_exp_modify_wq(rxq_ctrl->wq, &mod); if (err) { ERROR("%p: failed to modified stripping mode: %s", (void *)priv, strerror(err)); return; } -#endif /* HAVE_EXP_DEVICE_ATTR_VLAN_OFFLOADS */ - /* Update related bits in RX queue. */ rxq->vlan_strip = !!on; } @@ -218,7 +216,7 @@ mlx5_vlan_offload_set(struct rte_eth_dev *dev, int mask) unsigned int i; if (mask & ETH_VLAN_STRIP_MASK) { - int hw_vlan_strip = dev->data->dev_conf.rxmode.hw_vlan_strip; + int hw_vlan_strip = !!dev->data->dev_conf.rxmode.hw_vlan_strip; if (!priv->hw_vlan_strip) { ERROR("VLAN stripping is not supported"); diff --git a/drivers/net/mpipe/Makefile b/drivers/net/mpipe/Makefile index 46f046d5..846e2e07 100644 --- a/drivers/net/mpipe/Makefile +++ b/drivers/net/mpipe/Makefile @@ -42,6 +42,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += mpipe_tilegx.c DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_eal lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_mempool lib/librte_mbuf -DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_net lib/librte_malloc +DEPDIRS-$(CONFIG_RTE_LIBRTE_MPIPE_PMD) += lib/librte_net include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/mpipe/mpipe_tilegx.c b/drivers/net/mpipe/mpipe_tilegx.c index adcbc19e..26e14248 100644 --- a/drivers/net/mpipe/mpipe_tilegx.c +++ b/drivers/net/mpipe/mpipe_tilegx.c @@ -516,7 +516,7 @@ mpipe_recv_fill_stack(struct mpipe_dev_priv *priv, int count) int i; for (i = 0; i < count; i++) { - mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool); + mbuf = rte_mbuf_raw_alloc(priv->rx_mpool); if (!mbuf) break; mpipe_recv_push(priv, mbuf); @@ -1452,7 +1452,7 @@ mpipe_do_recv(struct mpipe_rx_queue *rx_queue, struct rte_mbuf **rx_pkts, MPIPE_BUF_DEBT_THRESHOLD) mpipe_local.mbuf_push_debt[in_port]++; else { - mbuf = __rte_mbuf_raw_alloc(priv->rx_mpool); + mbuf = rte_mbuf_raw_alloc(priv->rx_mpool); if (unlikely(!mbuf)) { nb_nomem++; gxio_mpipe_iqueue_drop(iqueue, idesc); diff --git a/drivers/net/nfp/Makefile b/drivers/net/nfp/Makefile index 1dddd1fd..4cadd131 100644 --- a/drivers/net/nfp/Makefile +++ b/drivers/net/nfp/Makefile @@ -53,6 +53,6 @@ SRCS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += nfp_net.c # this lib depends upon: DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_eal lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_mempool lib/librte_mbuf -DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_net lib/librte_malloc +DEPDIRS-$(CONFIG_RTE_LIBRTE_NFP_PMD) += lib/librte_net include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/nfp/nfp_net.c b/drivers/net/nfp/nfp_net.c index bcf5fa99..6afd49b1 100644 --- a/drivers/net/nfp/nfp_net.c +++ b/drivers/net/nfp/nfp_net.c @@ -54,6 +54,7 @@ #include <rte_version.h> #include <rte_string_fns.h> #include <rte_alarm.h> +#include <rte_spinlock.h> #include "nfp_net_pmd.h" #include "nfp_net_logs.h" @@ -322,7 +323,7 @@ nfp_net_tx_queue_release_mbufs(struct nfp_net_txq *txq) for (i = 0; i < txq->tx_count; i++) { if (txq->txbufs[i].mbuf) { - rte_pktmbuf_free_seg(txq->txbufs[i].mbuf); + rte_pktmbuf_free(txq->txbufs[i].mbuf); txq->txbufs[i].mbuf = NULL; } } @@ -407,6 +408,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) PMD_DRV_LOG(DEBUG, "nfp_net_reconfig: ctrl=%08x update=%08x\n", ctrl, update); + rte_spinlock_lock(&hw->reconfig_lock); + nn_cfg_writel(hw, NFP_NET_CFG_CTRL, ctrl); nn_cfg_writel(hw, NFP_NET_CFG_UPDATE, update); @@ -414,6 +417,8 @@ nfp_net_reconfig(struct nfp_net_hw *hw, uint32_t ctrl, uint32_t update) err = __nfp_net_reconfig(hw, update); + rte_spinlock_unlock(&hw->reconfig_lock); + if (!err) return 0; @@ -902,11 +907,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) nfp_dev_stats.obytes -= hw->eth_stats_base.obytes; - nfp_dev_stats.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - - nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts; - /* reading general device stats */ nfp_dev_stats.ierrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); @@ -918,12 +918,6 @@ nfp_net_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) nfp_dev_stats.oerrors -= hw->eth_stats_base.oerrors; - /* Multicast frames received */ - nfp_dev_stats.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - - nfp_dev_stats.imcasts -= hw->eth_stats_base.imcasts; - /* RX ring mbuf allocation failures */ nfp_dev_stats.rx_nombuf = dev->data->rx_mbuf_alloc_failed; @@ -985,9 +979,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.obytes = nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_OCTETS); - hw->eth_stats_base.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - /* reading general device stats */ hw->eth_stats_base.ierrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_ERRORS); @@ -995,10 +986,6 @@ nfp_net_stats_reset(struct rte_eth_dev *dev) hw->eth_stats_base.oerrors = nn_cfg_readq(hw, NFP_NET_CFG_STATS_TX_ERRORS); - /* Multicast frames received */ - hw->eth_stats_base.imcasts = - nn_cfg_readq(hw, NFP_NET_CFG_STATS_RX_MC_FRAMES); - /* RX ring mbuf allocation failures */ dev->data->rx_mbuf_alloc_failed = 0; @@ -1813,7 +1800,7 @@ nfp_net_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if ((rxds->rxd.flags & PCIE_DESC_RX_VLAN) && (hw->ctrl & NFP_NET_CFG_CTRL_RXVLAN)) { mb->vlan_tci = rte_cpu_to_le_32(rxds->rxd.vlan); - mb->ol_flags |= PKT_RX_VLAN_PKT; + mb->ol_flags |= PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED; } /* Adding the mbuff to the mbuff array passed by the app */ @@ -1989,11 +1976,16 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) */ pkt_size = pkt->pkt_len; - while (pkt_size) { - /* Releasing mbuf which was prefetched above */ - if (*lmbuf) - rte_pktmbuf_free_seg(*lmbuf); + /* Releasing mbuf which was prefetched above */ + if (*lmbuf) + rte_pktmbuf_free(*lmbuf); + /* + * Linking mbuf with descriptor for being released + * next time descriptor is used + */ + *lmbuf = pkt; + while (pkt_size) { dma_size = pkt->data_len; dma_addr = rte_mbuf_data_dma_addr(pkt); PMD_TX_LOG(DEBUG, "Working with mbuf at dma address:" @@ -2007,12 +1999,6 @@ nfp_net_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) ASSERT(free_descs > 0); free_descs--; - /* - * Linking mbuf with descriptor for being released - * next time descriptor is used - */ - *lmbuf = pkt; - txq->wr_p++; txq->tail++; if (unlikely(txq->tail == txq->tx_count)) /* wrapping?*/ @@ -2417,6 +2403,9 @@ nfp_net_init(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(INFO, "max_rx_queues: %u, max_tx_queues: %u\n", hw->max_rx_queues, hw->max_tx_queues); + /* Initializing spinlock for reconfigs */ + rte_spinlock_init(&hw->reconfig_lock); + /* Allocating memory for mac addr */ eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); if (eth_dev->data->mac_addrs == NULL) { @@ -2457,16 +2446,12 @@ nfp_net_init(struct rte_eth_dev *eth_dev) static struct rte_pci_id pci_id_nfp_net_map[] = { { - .vendor_id = PCI_VENDOR_ID_NETRONOME, - .device_id = PCI_DEVICE_ID_NFP6000_PF_NIC, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID, + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, + PCI_DEVICE_ID_NFP6000_PF_NIC) }, { - .vendor_id = PCI_VENDOR_ID_NETRONOME, - .device_id = PCI_DEVICE_ID_NFP6000_VF_NIC, - .subsystem_vendor_id = PCI_ANY_ID, - .subsystem_device_id = PCI_ANY_ID, + RTE_PCI_DEVICE(PCI_VENDOR_ID_NETRONOME, + PCI_DEVICE_ID_NFP6000_VF_NIC) }, { .vendor_id = 0, @@ -2477,7 +2462,8 @@ static struct eth_driver rte_nfp_net_pmd = { { .name = "rte_nfp_net_pmd", .id_table = pci_id_nfp_net_map, - .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC | + RTE_PCI_DRV_DETACHABLE, }, .eth_dev_init = nfp_net_init, .dev_private_size = sizeof(struct nfp_net_adapter), diff --git a/drivers/net/nfp/nfp_net_pmd.h b/drivers/net/nfp/nfp_net_pmd.h index 232ce5ca..c1809720 100644 --- a/drivers/net/nfp/nfp_net_pmd.h +++ b/drivers/net/nfp/nfp_net_pmd.h @@ -406,6 +406,7 @@ struct nfp_net_hw { int stride_tx; uint8_t *qcp_cfg; + rte_spinlock_t reconfig_lock; uint32_t max_tx_queues; uint32_t max_rx_queues; diff --git a/drivers/net/null/Makefile b/drivers/net/null/Makefile index 22023891..0c909c6f 100644 --- a/drivers/net/null/Makefile +++ b/drivers/net/null/Makefile @@ -54,7 +54,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += rte_eth_null.c SYMLINK-y-include += rte_eth_null.h # this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_mempool DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_NULL) += lib/librte_kvargs diff --git a/drivers/net/null/rte_eth_null.c b/drivers/net/null/rte_eth_null.c index 5e8e203c..ab440f3b 100644 --- a/drivers/net/null/rte_eth_null.c +++ b/drivers/net/null/rte_eth_null.c @@ -69,6 +69,7 @@ struct null_queue { struct pmd_internals { unsigned packet_size; unsigned packet_copy; + uint8_t port_id; struct null_queue rx_null_queues[RTE_MAX_QUEUES_PER_PORT]; struct null_queue tx_null_queues[RTE_MAX_QUEUES_PER_PORT]; @@ -114,6 +115,7 @@ eth_null_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) bufs[i]->pkt_len = packet_size; bufs[i]->nb_segs = 1; bufs[i]->next = NULL; + bufs[i]->port = h->internals->port_id; } rte_atomic64_add(&(h->rx_pkts), i); @@ -142,6 +144,7 @@ eth_null_copy_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) bufs[i]->pkt_len = packet_size; bufs[i]->nb_segs = 1; bufs[i]->next = NULL; + bufs[i]->port = h->internals->port_id; } rte_atomic64_add(&(h->rx_pkts), i); @@ -529,6 +532,7 @@ eth_dev_null_create(const char *name, internals->packet_size = packet_size; internals->packet_copy = packet_copy; + internals->port_id = eth_dev->data->port_id; internals->flow_type_rss_offloads = ETH_RSS_PROTO_MASK; internals->reta_size = RTE_DIM(internals->reta_conf) * RTE_RETA_GROUP_SIZE; diff --git a/drivers/net/pcap/Makefile b/drivers/net/pcap/Makefile index b41d8a27..89ac4024 100644 --- a/drivers/net/pcap/Makefile +++ b/drivers/net/pcap/Makefile @@ -56,7 +56,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += rte_eth_pcap.c SYMLINK-y-include += # this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_mempool DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_PCAP) += lib/librte_kvargs diff --git a/drivers/net/pcap/rte_eth_pcap.c b/drivers/net/pcap/rte_eth_pcap.c index c98e2341..c86f17b6 100644 --- a/drivers/net/pcap/rte_eth_pcap.c +++ b/drivers/net/pcap/rte_eth_pcap.c @@ -978,8 +978,8 @@ rte_pmd_pcap_devinit(const char *name, const char *params) unsigned numa_node, using_dumpers = 0; int ret; struct rte_kvargs *kvlist; - struct rx_pcaps pcaps; - struct tx_pcaps dumpers; + struct rx_pcaps pcaps = {0}; + struct tx_pcaps dumpers = {0}; RTE_LOG(INFO, PMD, "Initializing pmd_pcap for %s\n", name); diff --git a/drivers/net/qede/LICENSE.qede_pmd b/drivers/net/qede/LICENSE.qede_pmd new file mode 100644 index 00000000..c7cbdccc --- /dev/null +++ b/drivers/net/qede/LICENSE.qede_pmd @@ -0,0 +1,28 @@ +/* + * BSD LICENSE + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of QLogic Corporation nor the name of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written consent. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS' + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF + * THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/drivers/net/qede/Makefile b/drivers/net/qede/Makefile new file mode 100644 index 00000000..fe449aa9 --- /dev/null +++ b/drivers/net/qede/Makefile @@ -0,0 +1,106 @@ +# Copyright (c) 2016 QLogic Corporation. +# All rights reserved. +# www.qlogic.com +# +# See LICENSE.qede_pmd for copyright and licensing details. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_qede.a + +CFLAGS += -O3 +CFLAGS += $(WERROR_FLAGS) + +LDLIBS += -lz + +EXPORT_MAP := rte_pmd_qede_version.map + +LIBABIVER := 1 + +# +# OS +# +OS_TYPE := $(shell uname -s) + +# +# CFLAGS +# +CFLAGS_BASE_DRIVER = -Wno-unused-parameter +CFLAGS_BASE_DRIVER += -Wno-sign-compare +CFLAGS_BASE_DRIVER += -Wno-missing-prototypes +CFLAGS_BASE_DRIVER += -Wno-cast-qual +CFLAGS_BASE_DRIVER += -Wno-unused-function +CFLAGS_BASE_DRIVER += -Wno-unused-variable +CFLAGS_BASE_DRIVER += -Wno-strict-aliasing +CFLAGS_BASE_DRIVER += -Wno-missing-prototypes + +ifneq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) +CFLAGS_BASE_DRIVER += -Wno-unused-value +CFLAGS_BASE_DRIVER += -Wno-format-nonliteral +ifeq ($(OS_TYPE),Linux) +ifeq ($(shell clang -Wno-shift-negative-value -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0) +CFLAGS_BASE_DRIVER += -Wno-shift-negative-value +endif +endif +endif + +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_BASE_DRIVER += -Wno-unused-but-set-variable +CFLAGS_BASE_DRIVER += -Wno-missing-declarations +CFLAGS_BASE_DRIVER += -Wno-maybe-uninitialized +CFLAGS_BASE_DRIVER += -Wno-strict-prototypes +ifeq ($(shell test $(GCC_VERSION) -ge 60 && echo 1), 1) +CFLAGS_BASE_DRIVER += -Wno-shift-negative-value +endif +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) +CFLAGS_BASE_DRIVER += -Wno-format-extra-args +CFLAGS_BASE_DRIVER += -Wno-visibility +CFLAGS_BASE_DRIVER += -Wno-empty-body +CFLAGS_BASE_DRIVER += -Wno-invalid-source-encoding +CFLAGS_BASE_DRIVER += -Wno-sometimes-uninitialized +ifeq ($(shell clang -Wno-pointer-bool-conversion -Werror -E - < /dev/null > /dev/null 2>&1; echo $$?),0) +CFLAGS_BASE_DRIVER += -Wno-pointer-bool-conversion +endif +else +CFLAGS_BASE_DRIVER += -wd188 #188: enumerated type mixed with another type +endif + +# +# Add extra flags for base ecore driver files +# to disable warnings in them +# +# +BASE_DRIVER_OBJS=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +$(foreach obj, $(BASE_DRIVER_OBJS), $(eval CFLAGS+=$(CFLAGS_BASE_DRIVER))) + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_hw.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_cxt.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_l2.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sp_commands.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_fw_funcs.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_spq.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_init_ops.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_mcp.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_int.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_dcbx.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/bcm_osal.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_sriov.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += base/ecore_vf.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_ethdev.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_eth_if.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_main.c +SRCS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += qede_rxtx.c + +# dependent libs: +DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_mempool lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_QEDE_PMD) += lib/librte_net + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/qede/base/bcm_osal.c b/drivers/net/qede/base/bcm_osal.c new file mode 100644 index 00000000..16029b58 --- /dev/null +++ b/drivers/net/qede/base/bcm_osal.c @@ -0,0 +1,202 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include <zlib.h> + +#include <rte_memzone.h> +#include <rte_errno.h> + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_hw.h" +#include "ecore_iov_api.h" +#include "ecore_mcp_api.h" +#include "ecore_l2_api.h" + + +unsigned long qede_log2_align(unsigned long n) +{ + unsigned long ret = n ? 1 : 0; + unsigned long _n = n >> 1; + + while (_n) { + _n >>= 1; + ret <<= 1; + } + + if (ret < n) + ret <<= 1; + + return ret; +} + +u32 qede_osal_log2(u32 val) +{ + u32 log = 0; + + while (val >>= 1) + log++; + + return log; +} + +inline void qede_set_bit(u32 nr, unsigned long *addr) +{ + __sync_fetch_and_or(addr, (1UL << nr)); +} + +inline void qede_clr_bit(u32 nr, unsigned long *addr) +{ + __sync_fetch_and_and(addr, ~(1UL << nr)); +} + +inline bool qede_test_bit(u32 nr, unsigned long *addr) +{ + bool res; + + rte_mb(); + res = ((*addr) & (1UL << nr)) != 0; + rte_mb(); + return res; +} + +static inline u32 qede_ffz(unsigned long word) +{ + unsigned long first_zero; + + first_zero = __builtin_ffsl(~word); + return first_zero ? (first_zero - 1) : OSAL_BITS_PER_UL; +} + +inline u32 qede_find_first_zero_bit(unsigned long *addr, u32 limit) +{ + u32 i; + u32 nwords = 0; + OSAL_BUILD_BUG_ON(!limit); + nwords = (limit - 1) / OSAL_BITS_PER_UL + 1; + for (i = 0; i < nwords; i++) + if (~(addr[i] != 0)) + break; + return (i == nwords) ? limit : i * OSAL_BITS_PER_UL + qede_ffz(addr[i]); +} + +void qede_vf_fill_driver_data(struct ecore_hwfn *hwfn, + __rte_unused struct vf_pf_resc_request *resc_req, + struct ecore_vf_acquire_sw_info *vf_sw_info) +{ + vf_sw_info->os_type = VFPF_ACQUIRE_OS_LINUX_USERSPACE; + vf_sw_info->override_fw_version = 1; +} + +void *osal_dma_alloc_coherent(struct ecore_dev *p_dev, + dma_addr_t *phys, size_t size) +{ + const struct rte_memzone *mz; + char mz_name[RTE_MEMZONE_NAMESIZE]; + uint32_t core_id = rte_lcore_id(); + unsigned int socket_id; + + OSAL_MEM_ZERO(mz_name, sizeof(*mz_name)); + snprintf(mz_name, sizeof(mz_name) - 1, "%lx", + (unsigned long)rte_get_timer_cycles()); + if (core_id == (unsigned int)LCORE_ID_ANY) + core_id = 0; + socket_id = rte_lcore_to_socket_id(core_id); + mz = rte_memzone_reserve_aligned(mz_name, size, + socket_id, 0, RTE_CACHE_LINE_SIZE); + if (!mz) { + DP_ERR(p_dev, "Unable to allocate DMA memory " + "of size %zu bytes - %s\n", + size, rte_strerror(rte_errno)); + *phys = 0; + return OSAL_NULL; + } + *phys = mz->phys_addr; + DP_VERBOSE(p_dev, ECORE_MSG_PROBE, + "size=%zu phys=0x%" PRIx64 " virt=%p on socket=%u\n", + mz->len, mz->phys_addr, mz->addr, socket_id); + return mz->addr; +} + +void *osal_dma_alloc_coherent_aligned(struct ecore_dev *p_dev, + dma_addr_t *phys, size_t size, int align) +{ + const struct rte_memzone *mz; + char mz_name[RTE_MEMZONE_NAMESIZE]; + uint32_t core_id = rte_lcore_id(); + unsigned int socket_id; + + OSAL_MEM_ZERO(mz_name, sizeof(*mz_name)); + snprintf(mz_name, sizeof(mz_name) - 1, "%lx", + (unsigned long)rte_get_timer_cycles()); + if (core_id == (unsigned int)LCORE_ID_ANY) + core_id = 0; + socket_id = rte_lcore_to_socket_id(core_id); + mz = rte_memzone_reserve_aligned(mz_name, size, socket_id, 0, align); + if (!mz) { + DP_ERR(p_dev, "Unable to allocate DMA memory " + "of size %zu bytes - %s\n", + size, rte_strerror(rte_errno)); + *phys = 0; + return OSAL_NULL; + } + *phys = mz->phys_addr; + DP_VERBOSE(p_dev, ECORE_MSG_PROBE, + "aligned memory size=%zu phys=0x%" PRIx64 " virt=%p core=%d\n", + mz->len, mz->phys_addr, mz->addr, core_id); + return mz->addr; +} + +u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len, + u8 *input_buf, u32 max_size, u8 *unzip_buf) +{ + int rc; + + p_hwfn->stream->next_in = input_buf; + p_hwfn->stream->avail_in = input_len; + p_hwfn->stream->next_out = unzip_buf; + p_hwfn->stream->avail_out = max_size; + + rc = inflateInit2(p_hwfn->stream, MAX_WBITS); + + if (rc != Z_OK) { + DP_ERR(p_hwfn, + "zlib init failed, rc = %d\n", rc); + return 0; + } + + rc = inflate(p_hwfn->stream, Z_FINISH); + inflateEnd(p_hwfn->stream); + + if (rc != Z_OK && rc != Z_STREAM_END) { + DP_ERR(p_hwfn, + "FW unzip error: %s, rc=%d\n", p_hwfn->stream->msg, + rc); + return 0; + } + + return p_hwfn->stream->total_out / 4; +} + +void +qede_get_mcp_proto_stats(struct ecore_dev *edev, + enum ecore_mcp_protocol_type type, + union ecore_mcp_protocol_stats *stats) +{ + struct ecore_eth_stats lan_stats; + + if (type == ECORE_MCP_LAN_STATS) { + ecore_get_vport_stats(edev, &lan_stats); + stats->lan_stats.ucast_rx_pkts = lan_stats.rx_ucast_pkts; + stats->lan_stats.ucast_tx_pkts = lan_stats.tx_ucast_pkts; + stats->lan_stats.fcs_err = -1; + } else { + DP_INFO(edev, "Statistics request type %d not supported\n", + type); + } +} diff --git a/drivers/net/qede/base/bcm_osal.h b/drivers/net/qede/base/bcm_osal.h new file mode 100644 index 00000000..3e2aeb03 --- /dev/null +++ b/drivers/net/qede/base/bcm_osal.h @@ -0,0 +1,403 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __BCM_OSAL_H +#define __BCM_OSAL_H + +#include <rte_byteorder.h> +#include <rte_spinlock.h> +#include <rte_malloc.h> +#include <rte_atomic.h> +#include <rte_memcpy.h> +#include <rte_log.h> +#include <rte_cycles.h> +#include <rte_debug.h> +#include <rte_ether.h> + +/* Forward declaration */ +struct ecore_dev; +struct ecore_hwfn; +struct ecore_vf_acquire_sw_info; +struct vf_pf_resc_request; +enum ecore_mcp_protocol_type; +union ecore_mcp_protocol_stats; + +void qed_link_update(struct ecore_hwfn *hwfn); + +#if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN +#undef __BIG_ENDIAN +#ifndef __LITTLE_ENDIAN +#define __LITTLE_ENDIAN +#endif +#else +#undef __LITTLE_ENDIAN +#ifndef __BIG_ENDIAN +#define __BIG_ENDIAN +#endif +#endif + +/* Memory Types */ +typedef uint8_t u8; +typedef uint16_t u16; +typedef uint32_t u32; +typedef uint64_t u64; + +typedef int16_t s16; +typedef int32_t s32; + +typedef u16 __le16; +typedef u32 __le32; +typedef u32 OSAL_BE32; + +#define osal_uintptr_t uintptr_t + +typedef phys_addr_t dma_addr_t; + +typedef rte_spinlock_t osal_spinlock_t; + +typedef void *osal_dpc_t; + +typedef size_t osal_size_t; + +typedef intptr_t osal_int_ptr_t; + +typedef int bool; +#define true 1 +#define false 0 + +#define nothing do {} while (0) + +/* Delays */ + +#define DELAY(x) rte_delay_us(x) +#define usec_delay(x) DELAY(x) +#define msec_delay(x) DELAY(1000 * (x)) +#define OSAL_UDELAY(time) usec_delay(time) +#define OSAL_MSLEEP(time) msec_delay(time) + +/* Memory allocations and deallocations */ + +#define OSAL_NULL ((void *)0) +#define OSAL_ALLOC(dev, GFP, size) rte_malloc("qede", size, 0) +#define OSAL_ZALLOC(dev, GFP, size) rte_zmalloc("qede", size, 0) +#define OSAL_CALLOC(dev, GFP, num, size) rte_calloc("qede", num, size, 0) +#define OSAL_VALLOC(dev, size) rte_malloc("qede", size, 0) +#define OSAL_FREE(dev, memory) rte_free((void *)memory) +#define OSAL_VFREE(dev, memory) OSAL_FREE(dev, memory) +#define OSAL_MEM_ZERO(mem, size) bzero(mem, size) +#define OSAL_MEMCPY(dst, src, size) rte_memcpy(dst, src, size) +#define OSAL_MEMCMP(s1, s2, size) memcmp(s1, s2, size) +#define OSAL_MEMSET(dst, val, length) \ + memset(dst, val, length) + +void *osal_dma_alloc_coherent(struct ecore_dev *, dma_addr_t *, size_t); + +void *osal_dma_alloc_coherent_aligned(struct ecore_dev *, dma_addr_t *, + size_t, int); + +#define OSAL_DMA_ALLOC_COHERENT(dev, phys, size) \ + osal_dma_alloc_coherent(dev, phys, size) + +#define OSAL_DMA_ALLOC_COHERENT_ALIGNED(dev, phys, size, align) \ + osal_dma_alloc_coherent_aligned(dev, phys, size, align) + +/* TODO: */ +#define OSAL_DMA_FREE_COHERENT(dev, virt, phys, size) nothing + +/* HW reads/writes */ + +#define DIRECT_REG_RD(_dev, _reg_addr) \ + (*((volatile u32 *) (_reg_addr))) + +#define REG_RD(_p_hwfn, _reg_offset) \ + DIRECT_REG_RD(_p_hwfn, \ + ((u8 *)(uintptr_t)(_p_hwfn->regview) + (_reg_offset))) + +#define DIRECT_REG_WR16(_reg_addr, _val) \ + (*((volatile u16 *)(_reg_addr)) = _val) + +#define DIRECT_REG_WR(_dev, _reg_addr, _val) \ + (*((volatile u32 *)(_reg_addr)) = _val) + +#define REG_WR(_p_hwfn, _reg_offset, _val) \ + DIRECT_REG_WR(NULL, \ + ((u8 *)((uintptr_t)(_p_hwfn->regview)) + (_reg_offset)), (u32)_val) + +#define REG_WR16(_p_hwfn, _reg_offset, _val) \ + DIRECT_REG_WR16(((u8 *)(uintptr_t)(_p_hwfn->regview) + \ + (_reg_offset)), (u16)_val) + +#define DOORBELL(_p_hwfn, _db_addr, _val) \ + DIRECT_REG_WR(_p_hwfn, \ + ((u8 *)(uintptr_t)(_p_hwfn->doorbells) + (_db_addr)), (u32)_val) + +/* Mutexes */ + +typedef pthread_mutex_t osal_mutex_t; +#define OSAL_MUTEX_RELEASE(lock) pthread_mutex_unlock(lock) +#define OSAL_MUTEX_INIT(lock) pthread_mutex_init(lock, NULL) +#define OSAL_MUTEX_ACQUIRE(lock) pthread_mutex_lock(lock) +#define OSAL_MUTEX_ALLOC(hwfn, lock) nothing +#define OSAL_MUTEX_DEALLOC(lock) nothing + +/* Spinlocks */ + +#define OSAL_SPIN_LOCK_INIT(lock) rte_spinlock_init(lock) +#define OSAL_SPIN_LOCK(lock) rte_spinlock_lock(lock) +#define OSAL_SPIN_UNLOCK(lock) rte_spinlock_unlock(lock) +#define OSAL_SPIN_LOCK_IRQSAVE(lock, flags) nothing +#define OSAL_SPIN_UNLOCK_IRQSAVE(lock, flags) nothing +#define OSAL_SPIN_LOCK_ALLOC(hwfn, lock) nothing +#define OSAL_SPIN_LOCK_DEALLOC(lock) nothing + +/* DPC */ + +#define OSAL_DPC_ALLOC(hwfn) OSAL_ALLOC(hwfn, GFP, sizeof(osal_dpc_t)) +#define OSAL_DPC_INIT(dpc, hwfn) nothing +#define OSAL_POLL_MODE_DPC(hwfn) nothing + +/* Lists */ + +#define OSAL_LIST_SPLICE_INIT(new_list, list) nothing +#define OSAL_LIST_SPLICE_TAIL_INIT(new_list, list) nothing + +typedef struct _osal_list_entry_t { + struct _osal_list_entry_t *next, *prev; +} osal_list_entry_t; + +typedef struct osal_list_t { + osal_list_entry_t *head, *tail; + unsigned long cnt; +} osal_list_t; + +#define OSAL_LIST_INIT(list) \ + do { \ + (list)->head = NULL; \ + (list)->tail = NULL; \ + (list)->cnt = 0; \ + } while (0) + +#define OSAL_LIST_PUSH_HEAD(entry, list) \ + do { \ + (entry)->prev = (osal_list_entry_t *)0; \ + (entry)->next = (list)->head; \ + if ((list)->tail == (osal_list_entry_t *)0) { \ + (list)->tail = (entry); \ + } else { \ + (list)->head->prev = (entry); \ + } \ + (list)->head = (entry); \ + (list)->cnt++; \ + } while (0) + +#define OSAL_LIST_PUSH_TAIL(entry, list) \ + do { \ + (entry)->next = (osal_list_entry_t *)0; \ + (entry)->prev = (list)->tail; \ + if ((list)->tail) { \ + (list)->tail->next = (entry); \ + } else { \ + (list)->head = (entry); \ + } \ + (list)->tail = (entry); \ + (list)->cnt++; \ + } while (0) + +#define OSAL_LIST_FIRST_ENTRY(list, type, field) \ + (type *)((list)->head) + +#define OSAL_LIST_REMOVE_ENTRY(entry, list) \ + do { \ + if ((list)->head == (entry)) { \ + if ((list)->head) { \ + (list)->head = (list)->head->next; \ + if ((list)->head) { \ + (list)->head->prev = (osal_list_entry_t *)0;\ + } else { \ + (list)->tail = (osal_list_entry_t *)0; \ + } \ + (list)->cnt--; \ + } \ + } else if ((list)->tail == (entry)) { \ + if ((list)->tail) { \ + (list)->tail = (list)->tail->prev; \ + if ((list)->tail) { \ + (list)->tail->next = (osal_list_entry_t *)0;\ + } else { \ + (list)->head = (osal_list_entry_t *)0; \ + } \ + (list)->cnt--; \ + } \ + } else { \ + (entry)->prev->next = (entry)->next; \ + (entry)->next->prev = (entry)->prev; \ + (list)->cnt--; \ + } \ + } while (0) + +#define OSAL_LIST_IS_EMPTY(list) \ + ((list)->cnt == 0) + +#define OSAL_LIST_NEXT(entry, field, type) \ + (type *)((&((entry)->field))->next) + +/* TODO: Check field, type order */ + +#define OSAL_LIST_FOR_EACH_ENTRY(entry, list, field, type) \ + for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field); \ + entry; \ + entry = OSAL_LIST_NEXT(entry, field, type)) + +#define OSAL_LIST_FOR_EACH_ENTRY_SAFE(entry, tmp_entry, list, field, type) \ + for (entry = OSAL_LIST_FIRST_ENTRY(list, type, field), \ + tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL; \ + entry != NULL; \ + entry = (type *)tmp_entry, \ + tmp_entry = (entry) ? OSAL_LIST_NEXT(entry, field, type) : NULL) + +/* TODO: OSAL_LIST_INSERT_ENTRY_AFTER */ +#define OSAL_LIST_INSERT_ENTRY_AFTER(new_entry, entry, list) \ + OSAL_LIST_PUSH_HEAD(new_entry, list) + +/* PCI config space */ + +#define OSAL_PCI_READ_CONFIG_BYTE(dev, address, dst) nothing +#define OSAL_PCI_READ_CONFIG_WORD(dev, address, dst) nothing +#define OSAL_PCI_READ_CONFIG_DWORD(dev, address, dst) nothing +#define OSAL_PCI_FIND_EXT_CAPABILITY(dev, pcie_id) 0 +#define OSAL_PCI_FIND_CAPABILITY(dev, pcie_id) 0 +#define OSAL_PCI_WRITE_CONFIG_WORD(dev, address, val) nothing +#define OSAL_BAR_SIZE(dev, bar_id) 0 + +/* Barriers */ + +#define OSAL_MMIOWB(dev) rte_wmb() +#define OSAL_BARRIER(dev) rte_compiler_barrier() +#define OSAL_SMP_RMB(dev) rte_rmb() +#define OSAL_SMP_WMB(dev) rte_wmb() +#define OSAL_RMB(dev) rte_rmb() +#define OSAL_WMB(dev) rte_wmb() +#define OSAL_DMA_SYNC(dev, addr, length, is_post) nothing + +#define OSAL_BITS_PER_BYTE (8) +#define OSAL_BITS_PER_UL (sizeof(unsigned long) * OSAL_BITS_PER_BYTE) +#define OSAL_BITS_PER_UL_MASK (OSAL_BITS_PER_UL - 1) + +/* Bitops */ +void qede_set_bit(u32, unsigned long *); +#define OSAL_SET_BIT(bit, bitmap) \ + qede_set_bit(bit, bitmap) + +void qede_clr_bit(u32, unsigned long *); +#define OSAL_CLEAR_BIT(bit, bitmap) \ + qede_clr_bit(bit, bitmap) + +bool qede_test_bit(u32, unsigned long *); +#define OSAL_TEST_BIT(bit, bitmap) \ + qede_test_bit(bit, bitmap) + +u32 qede_find_first_zero_bit(unsigned long *, u32); +#define OSAL_FIND_FIRST_ZERO_BIT(bitmap, length) \ + qede_find_first_zero_bit(bitmap, length) + +#define OSAL_BUILD_BUG_ON(cond) nothing +#define ETH_ALEN ETHER_ADDR_LEN + +#define OSAL_LINK_UPDATE(hwfn) qed_link_update(hwfn) +#define OSAL_DCBX_AEN(hwfn, mib_type) nothing + +/* SR-IOV channel */ + +#define OSAL_VF_FLR_UPDATE(hwfn) nothing +#define OSAL_VF_SEND_MSG2PF(dev, done, msg, reply_addr, msg_size, reply_size) 0 +#define OSAL_VF_CQE_COMPLETION(_dev_p, _cqe, _protocol) (0) +#define OSAL_PF_VF_MSG(hwfn, vfid) 0 +#define OSAL_IOV_CHK_UCAST(hwfn, vfid, params) 0 +#define OSAL_IOV_POST_START_VPORT(hwfn, vf, vport_id, opaque_fid) nothing +#define OSAL_IOV_VF_ACQUIRE(hwfn, vfid) 0 +#define OSAL_IOV_VF_CLEANUP(hwfn, vfid) nothing +#define OSAL_IOV_VF_VPORT_UPDATE(hwfn, vfid, p_params, p_mask) 0 +#define OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(_dev_p, _resc_resp) 0 +#define OSAL_IOV_GET_OS_TYPE() 0 + +u32 qede_unzip_data(struct ecore_hwfn *p_hwfn, u32 input_len, + u8 *input_buf, u32 max_size, u8 *unzip_buf); +void qede_vf_fill_driver_data(struct ecore_hwfn *, struct vf_pf_resc_request *, + struct ecore_vf_acquire_sw_info *); +#define OSAL_VF_FILL_ACQUIRE_RESC_REQ(_dev_p, _resc_req, _os_info) \ + qede_vf_fill_driver_data(_dev_p, _resc_req, _os_info) + +#define OSAL_UNZIP_DATA(p_hwfn, input_len, buf, max_size, unzip_buf) \ + qede_unzip_data(p_hwfn, input_len, buf, max_size, unzip_buf) + +/* TODO: */ +#define OSAL_SCHEDULE_RECOVERY_HANDLER(hwfn) nothing +#define OSAL_HW_ERROR_OCCURRED(hwfn, err_type) nothing + +#define OSAL_NVM_IS_ACCESS_ENABLED(hwfn) (1) +#define OSAL_NUM_ACTIVE_CPU() 0 + +/* Utility functions */ + +#define RTE_DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) +#define DIV_ROUND_UP(size, to_what) RTE_DIV_ROUND_UP(size, to_what) +#define RTE_ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) +#define ROUNDUP(value, to_what) RTE_ROUNDUP((value), (to_what)) + +unsigned long qede_log2_align(unsigned long n); +#define OSAL_ROUNDUP_POW_OF_TWO(val) \ + qede_log2_align(val) + +u32 qede_osal_log2(u32); +#define OSAL_LOG2(val) \ + qede_osal_log2(val) + +#define PRINT(format, ...) printf +#define PRINT_ERR(format, ...) PRINT + +#define OFFSETOF(str, field) __builtin_offsetof(str, field) +#define OSAL_ASSERT(is_assert) assert(is_assert) +#define OSAL_BEFORE_PF_START(file, engine) nothing +#define OSAL_AFTER_PF_STOP(file, engine) nothing + +/* Endian macros */ +#define OSAL_CPU_TO_BE32(val) rte_cpu_to_be_32(val) +#define OSAL_BE32_TO_CPU(val) rte_be_to_cpu_32(val) +#define OSAL_CPU_TO_LE32(val) rte_cpu_to_le_32(val) +#define OSAL_CPU_TO_LE16(val) rte_cpu_to_le_16(val) +#define OSAL_LE32_TO_CPU(val) rte_le_to_cpu_32(val) +#define OSAL_LE16_TO_CPU(val) rte_le_to_cpu_16(val) +#define OSAL_CPU_TO_BE64(val) rte_cpu_to_be_64(val) + +#define OSAL_ARRAY_SIZE(arr) RTE_DIM(arr) +#define OSAL_SPRINTF(name, pattern, ...) \ + sprintf(name, pattern, ##__VA_ARGS__) +#define OSAL_STRLEN(string) strlen(string) +#define OSAL_STRCPY(dst, string) strcpy(dst, string) +#define OSAL_STRNCPY(dst, string, len) strncpy(dst, string, len) +#define OSAL_STRCMP(str1, str2) strcmp(str1, str2) + +#define OSAL_INLINE inline +#define OSAL_REG_ADDR(_p_hwfn, _offset) \ + (void *)((u8 *)(uintptr_t)(_p_hwfn->regview) + (_offset)) +#define OSAL_PAGE_SIZE 4096 +#define OSAL_IOMEM volatile +#define OSAL_UNLIKELY(x) __builtin_expect(!!(x), 0) +#define OSAL_MIN_T(type, __min1, __min2) \ + ((type)(__min1) < (type)(__min2) ? (type)(__min1) : (type)(__min2)) +#define OSAL_MAX_T(type, __max1, __max2) \ + ((type)(__max1) > (type)(__max2) ? (type)(__max1) : (type)(__max2)) + +void qede_get_mcp_proto_stats(struct ecore_dev *, enum ecore_mcp_protocol_type, + union ecore_mcp_protocol_stats *); +#define OSAL_GET_PROTOCOL_STATS(dev, type, stats) \ + qede_get_mcp_proto_stats(dev, type, stats) + +#define OSAL_SLOWPATH_IRQ_REQ(p_hwfn) (0) + +#endif /* __BCM_OSAL_H */ diff --git a/drivers/net/qede/base/common_hsi.h b/drivers/net/qede/base/common_hsi.h new file mode 100644 index 00000000..295a41f9 --- /dev/null +++ b/drivers/net/qede/base/common_hsi.h @@ -0,0 +1,714 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __COMMON_HSI__ +#define __COMMON_HSI__ + +#define CORE_SPQE_PAGE_SIZE_BYTES 4096 + +#define FW_MAJOR_VERSION 8 +#define FW_MINOR_VERSION 7 +#define FW_REVISION_VERSION 7 +#define FW_ENGINEERING_VERSION 0 + +/***********************/ +/* COMMON HW CONSTANTS */ +/***********************/ + +/* PCI functions */ +#define MAX_NUM_PORTS_K2 (4) +#define MAX_NUM_PORTS_BB (2) +#define MAX_NUM_PORTS (MAX_NUM_PORTS_K2) + +#define MAX_NUM_PFS_K2 (16) +#define MAX_NUM_PFS_BB (8) +#define MAX_NUM_PFS (MAX_NUM_PFS_K2) +#define MAX_NUM_OF_PFS_IN_CHIP (16) /* On both engines */ + +#define MAX_NUM_VFS_K2 (192) +#define MAX_NUM_VFS_BB (120) +#define MAX_NUM_VFS (MAX_NUM_VFS_K2) + +#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB) +#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS) + +#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB) +#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS) + +#define MAX_NUM_VPORTS_K2 (208) +#define MAX_NUM_VPORTS_BB (160) +#define MAX_NUM_VPORTS (MAX_NUM_VPORTS_K2) + +#define MAX_NUM_L2_QUEUES_K2 (320) +#define MAX_NUM_L2_QUEUES_BB (256) +#define MAX_NUM_L2_QUEUES (MAX_NUM_L2_QUEUES_K2) + +/* Traffic classes in network-facing blocks (PBF, BTB, NIG, BRB, PRS and QM) */ +#define NUM_PHYS_TCS_4PORT_K2 (4) +#define NUM_OF_PHYS_TCS (8) + +#define NUM_TCS_4PORT_K2 (NUM_PHYS_TCS_4PORT_K2 + 1) +#define NUM_OF_TCS (NUM_OF_PHYS_TCS + 1) + +#define LB_TC (NUM_OF_PHYS_TCS) + +/* Num of possible traffic priority values */ +#define NUM_OF_PRIO (8) + +#define MAX_NUM_VOQS_K2 (NUM_TCS_4PORT_K2 * MAX_NUM_PORTS_K2) +#define MAX_NUM_VOQS_BB (NUM_OF_TCS * MAX_NUM_PORTS_BB) +#define MAX_NUM_VOQS (MAX_NUM_VOQS_K2) +#define MAX_PHYS_VOQS (NUM_OF_PHYS_TCS * MAX_NUM_PORTS_BB) + +/* CIDs */ +#define NUM_OF_CONNECTION_TYPES (8) +#define NUM_OF_LCIDS (320) +#define NUM_OF_LTIDS (320) + +/*****************/ +/* CDU CONSTANTS */ +/*****************/ + +#define CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT (17) +#define CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK (0x1ffff) + +/*****************/ +/* DQ CONSTANTS */ +/*****************/ + +/* DEMS */ +#define DQ_DEMS_LEGACY 0 + +/* XCM agg val selection */ +#define DQ_XCM_AGG_VAL_SEL_WORD2 0 +#define DQ_XCM_AGG_VAL_SEL_WORD3 1 +#define DQ_XCM_AGG_VAL_SEL_WORD4 2 +#define DQ_XCM_AGG_VAL_SEL_WORD5 3 +#define DQ_XCM_AGG_VAL_SEL_REG3 4 +#define DQ_XCM_AGG_VAL_SEL_REG4 5 +#define DQ_XCM_AGG_VAL_SEL_REG5 6 +#define DQ_XCM_AGG_VAL_SEL_REG6 7 + +/* XCM agg val selection */ +#define DQ_XCM_ETH_EDPM_NUM_BDS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD2 +#define DQ_XCM_ETH_TX_BD_CONS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_CORE_TX_BD_CONS_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD3 +#define DQ_XCM_ETH_TX_BD_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_TX_BD_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_CORE_SPQ_PROD_CMD \ + DQ_XCM_AGG_VAL_SEL_WORD4 +#define DQ_XCM_ETH_GO_TO_BD_CONS_CMD DQ_XCM_AGG_VAL_SEL_WORD5 + +/* XCM agg counter flag selection */ +#define DQ_XCM_AGG_FLG_SHIFT_BIT14 0 +#define DQ_XCM_AGG_FLG_SHIFT_BIT15 1 +#define DQ_XCM_AGG_FLG_SHIFT_CF12 2 +#define DQ_XCM_AGG_FLG_SHIFT_CF13 3 +#define DQ_XCM_AGG_FLG_SHIFT_CF18 4 +#define DQ_XCM_AGG_FLG_SHIFT_CF19 5 +#define DQ_XCM_AGG_FLG_SHIFT_CF22 6 +#define DQ_XCM_AGG_FLG_SHIFT_CF23 7 + +/* XCM agg counter flag selection */ +#define DQ_XCM_ETH_DQ_CF_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_CORE_DQ_CF_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF18) +#define DQ_XCM_ETH_TERMINATE_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_CORE_TERMINATE_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF19) +#define DQ_XCM_ETH_SLOW_PATH_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_CORE_SLOW_PATH_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF22) +#define DQ_XCM_ETH_TPH_EN_CMD (1 << \ + DQ_XCM_AGG_FLG_SHIFT_CF23) + +/*****************/ +/* QM CONSTANTS */ +/*****************/ + +/* number of TX queues in the QM */ +#define MAX_QM_TX_QUEUES_K2 512 +#define MAX_QM_TX_QUEUES_BB 448 +#define MAX_QM_TX_QUEUES MAX_QM_TX_QUEUES_K2 + +/* number of Other queues in the QM */ +#define MAX_QM_OTHER_QUEUES_BB 64 +#define MAX_QM_OTHER_QUEUES_K2 128 +#define MAX_QM_OTHER_QUEUES MAX_QM_OTHER_QUEUES_K2 + +/* number of queues in a PF queue group */ +#define QM_PF_QUEUE_GROUP_SIZE 8 + +/* base number of Tx PQs in the CM PQ representation. + * should be used when storing PQ IDs in CM PQ registers and context + */ +#define CM_TX_PQ_BASE 0x200 + +/* QM registers data */ +#define QM_LINE_CRD_REG_WIDTH 16 +#define QM_LINE_CRD_REG_SIGN_BIT (1 << (QM_LINE_CRD_REG_WIDTH - 1)) +#define QM_BYTE_CRD_REG_WIDTH 24 +#define QM_BYTE_CRD_REG_SIGN_BIT (1 << (QM_BYTE_CRD_REG_WIDTH - 1)) +#define QM_WFQ_CRD_REG_WIDTH 32 +#define QM_WFQ_CRD_REG_SIGN_BIT (1 << (QM_WFQ_CRD_REG_WIDTH - 1)) +#define QM_RL_CRD_REG_WIDTH 32 +#define QM_RL_CRD_REG_SIGN_BIT (1 << (QM_RL_CRD_REG_WIDTH - 1)) + +/*****************/ +/* CAU CONSTANTS */ +/*****************/ + +#define CAU_FSM_ETH_RX 0 +#define CAU_FSM_ETH_TX 1 + +/* Number of Protocol Indices per Status Block */ +#define PIS_PER_SB 12 + +#define CAU_HC_STOPPED_STATE 3 +#define CAU_HC_DISABLE_STATE 4 +#define CAU_HC_ENABLE_STATE 0 + +/*****************/ +/* IGU CONSTANTS */ +/*****************/ + +#define MAX_SB_PER_PATH_K2 (368) +#define MAX_SB_PER_PATH_BB (288) +#define MAX_TOT_SB_PER_PATH \ + MAX_SB_PER_PATH_K2 + +#define MAX_SB_PER_PF_MIMD 129 +#define MAX_SB_PER_PF_SIMD 64 +#define MAX_SB_PER_VF 64 + +/* Memory addresses on the BAR for the IGU Sub Block */ +#define IGU_MEM_BASE 0x0000 + +#define IGU_MEM_MSIX_BASE 0x0000 +#define IGU_MEM_MSIX_UPPER 0x0101 +#define IGU_MEM_MSIX_RESERVED_UPPER 0x01ff + +#define IGU_MEM_PBA_MSIX_BASE 0x0200 +#define IGU_MEM_PBA_MSIX_UPPER 0x0202 +#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff + +#define IGU_CMD_INT_ACK_BASE 0x0400 +#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \ + MAX_TOT_SB_PER_PATH - \ + 1) +#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff + +#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0 +#define IGU_CMD_ATTN_BIT_SET_UPPER 0x05f1 +#define IGU_CMD_ATTN_BIT_CLR_UPPER 0x05f2 + +#define IGU_REG_SISR_MDPC_WMASK_UPPER 0x05f3 +#define IGU_REG_SISR_MDPC_WMASK_LSB_UPPER 0x05f4 +#define IGU_REG_SISR_MDPC_WMASK_MSB_UPPER 0x05f5 +#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6 + +#define IGU_CMD_PROD_UPD_BASE 0x0600 +#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\ + MAX_TOT_SB_PER_PATH - \ + 1) +#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff + +/*****************/ +/* PXP CONSTANTS */ +/*****************/ + +/* PTT and GTT */ +#define PXP_NUM_PF_WINDOWS 12 +#define PXP_PER_PF_ENTRY_SIZE 8 +#define PXP_NUM_GLOBAL_WINDOWS 243 +#define PXP_GLOBAL_ENTRY_SIZE 4 +#define PXP_ADMIN_WINDOW_ALLOWED_LENGTH 4 +#define PXP_PF_WINDOW_ADMIN_START 0 +#define PXP_PF_WINDOW_ADMIN_LENGTH 0x1000 +#define PXP_PF_WINDOW_ADMIN_END (PXP_PF_WINDOW_ADMIN_START + \ + PXP_PF_WINDOW_ADMIN_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_PER_PF_START 0 +#define PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH (PXP_NUM_PF_WINDOWS * \ + PXP_PER_PF_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_PER_PF_END (PXP_PF_WINDOW_ADMIN_PER_PF_START + \ + PXP_PF_WINDOW_ADMIN_PER_PF_LENGTH - 1) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_START 0x200 +#define PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH (PXP_NUM_GLOBAL_WINDOWS * \ + PXP_GLOBAL_ENTRY_SIZE) +#define PXP_PF_WINDOW_ADMIN_GLOBAL_END \ + (PXP_PF_WINDOW_ADMIN_GLOBAL_START + \ + PXP_PF_WINDOW_ADMIN_GLOBAL_LENGTH - 1) +#define PXP_PF_GLOBAL_PRETEND_ADDR 0x1f0 +#define PXP_PF_ME_OPAQUE_MASK_ADDR 0xf4 +#define PXP_PF_ME_OPAQUE_ADDR 0x1f8 +#define PXP_PF_ME_CONCRETE_ADDR 0x1fc + +#define PXP_EXTERNAL_BAR_PF_WINDOW_START 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_NUM PXP_NUM_PF_WINDOWS +#define PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_PF_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_PF_WINDOW_END \ + (PXP_EXTERNAL_BAR_PF_WINDOW_START + \ + PXP_EXTERNAL_BAR_PF_WINDOW_LENGTH - 1) + +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START \ + (PXP_EXTERNAL_BAR_PF_WINDOW_END + 1) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM PXP_NUM_GLOBAL_WINDOWS +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE 0x1000 +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_NUM * \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_SINGLE_SIZE) +#define PXP_EXTERNAL_BAR_GLOBAL_WINDOW_END \ + (PXP_EXTERNAL_BAR_GLOBAL_WINDOW_START + \ + PXP_EXTERNAL_BAR_GLOBAL_WINDOW_LENGTH - 1) + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 + +/* ILT Records */ +#define PXP_NUM_ILT_RECORDS_BB 7600 +#define PXP_NUM_ILT_RECORDS_K2 11000 +#define MAX_NUM_ILT_RECORDS MAX(PXP_NUM_ILT_RECORDS_BB, PXP_NUM_ILT_RECORDS_K2) + +/******************/ +/* PBF CONSTANTS */ +/******************/ + +/* Number of PBF command queue lines. Each line is 32B. */ +#define PBF_MAX_CMD_LINES 3328 + +/* Number of BTB blocks. Each block is 256B. */ +#define BTB_MAX_BLOCKS 1440 + +/*****************/ +/* PRS CONSTANTS */ +/*****************/ + +/* Async data KCQ CQE */ +struct async_data { + __le32 cid; + __le16 itid; + u8 error_code; + u8 fw_debug_param; +}; + +struct regpair { + __le32 lo /* low word for reg-pair */; + __le32 hi /* high word for reg-pair */; +}; + +struct vf_pf_channel_eqe_data { + struct regpair msg_addr /* VF-PF message address */; +}; + +struct iscsi_eqe_data { + __le32 cid /* Context ID of the connection */; + __le16 conn_id + /* Task Id of the task (for error that happened on a a task) */; + u8 error_code; + u8 reserved0; +}; + +/* + * Event Ring malicious VF data + */ +struct malicious_vf_eqe_data { + u8 vf_id /* Malicious VF ID */; /* WARNING:CAMELCASE */ + u8 err_id /* Malicious VF error */; + __le16 reserved[3]; +}; + +/* + * Event Ring initial cleanup data + */ +struct initial_cleanup_eqe_data { + u8 vf_id /* VF ID */; /* WARNING:CAMELCASE */ + u8 reserved[7]; +}; + + +union event_ring_data { + u8 bytes[8] /* Byte Array */; + struct vf_pf_channel_eqe_data vf_pf_channel /* VF-PF Channel data */; + struct iscsi_eqe_data iscsi_info /* Dedicated fields to iscsi data */; + struct regpair roce_handle /* WARNING:CAMELCASE */ + /* Dedicated field for RoCE affiliated asynchronous error */; + struct malicious_vf_eqe_data malicious_vf /* Malicious VF data */; + struct initial_cleanup_eqe_data vf_init_cleanup + /* VF Initial Cleanup data */; +}; +/* Event Ring Entry */ +struct event_ring_entry { + u8 protocol_id; + u8 opcode; + __le16 reserved0; + __le16 echo; + u8 fw_return_code; + u8 flags; +#define EVENT_RING_ENTRY_ASYNC_MASK 0x1 +#define EVENT_RING_ENTRY_ASYNC_SHIFT 0 +#define EVENT_RING_ENTRY_RESERVED1_MASK 0x7F +#define EVENT_RING_ENTRY_RESERVED1_SHIFT 1 + union event_ring_data data; +}; + +/* Multi function mode */ +enum mf_mode { + SF, + MF_OVLAN, + MF_NPAR, + MAX_MF_MODE +}; + +/* Per-protocol connection types */ +enum protocol_type { + PROTOCOLID_ISCSI /* iSCSI */, + PROTOCOLID_FCOE /* FCoE */, + PROTOCOLID_ROCE /* RoCE */, + PROTOCOLID_CORE /* Core (light L2, slow path core) */, + PROTOCOLID_ETH /* Ethernet */, + PROTOCOLID_IWARP /* iWARP */, + PROTOCOLID_TOE /* TOE */, + PROTOCOLID_PREROCE /* Pre (tapeout) RoCE */, + PROTOCOLID_COMMON /* ProtocolCommon */, + PROTOCOLID_TCP /* TCP */, + MAX_PROTOCOL_TYPE +}; + +/* status block structure */ +struct cau_pi_entry { + u32 prod; +#define CAU_PI_ENTRY_PROD_VAL_MASK 0xFFFF +#define CAU_PI_ENTRY_PROD_VAL_SHIFT 0 +#define CAU_PI_ENTRY_PI_TIMESET_MASK 0x7F +#define CAU_PI_ENTRY_PI_TIMESET_SHIFT 16 +#define CAU_PI_ENTRY_FSM_SEL_MASK 0x1 +#define CAU_PI_ENTRY_FSM_SEL_SHIFT 23 +#define CAU_PI_ENTRY_RESERVED_MASK 0xFF +#define CAU_PI_ENTRY_RESERVED_SHIFT 24 +}; + +/* status block structure */ +struct cau_sb_entry { + u32 data; +#define CAU_SB_ENTRY_SB_PROD_MASK 0xFFFFFF +#define CAU_SB_ENTRY_SB_PROD_SHIFT 0 +#define CAU_SB_ENTRY_STATE0_MASK 0xF +#define CAU_SB_ENTRY_STATE0_SHIFT 24 +#define CAU_SB_ENTRY_STATE1_MASK 0xF +#define CAU_SB_ENTRY_STATE1_SHIFT 28 + u32 params; +#define CAU_SB_ENTRY_SB_TIMESET0_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET0_SHIFT 0 +#define CAU_SB_ENTRY_SB_TIMESET1_MASK 0x7F +#define CAU_SB_ENTRY_SB_TIMESET1_SHIFT 7 +#define CAU_SB_ENTRY_TIMER_RES0_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES0_SHIFT 14 +#define CAU_SB_ENTRY_TIMER_RES1_MASK 0x3 +#define CAU_SB_ENTRY_TIMER_RES1_SHIFT 16 +#define CAU_SB_ENTRY_VF_NUMBER_MASK 0xFF +#define CAU_SB_ENTRY_VF_NUMBER_SHIFT 18 +#define CAU_SB_ENTRY_VF_VALID_MASK 0x1 +#define CAU_SB_ENTRY_VF_VALID_SHIFT 26 +#define CAU_SB_ENTRY_PF_NUMBER_MASK 0xF +#define CAU_SB_ENTRY_PF_NUMBER_SHIFT 27 +#define CAU_SB_ENTRY_TPH_MASK 0x1 +#define CAU_SB_ENTRY_TPH_SHIFT 31 +}; + +/* core doorbell data */ +struct core_db_data { + u8 params; +#define CORE_DB_DATA_DEST_MASK 0x3 +#define CORE_DB_DATA_DEST_SHIFT 0 +#define CORE_DB_DATA_AGG_CMD_MASK 0x3 +#define CORE_DB_DATA_AGG_CMD_SHIFT 2 +#define CORE_DB_DATA_BYPASS_EN_MASK 0x1 +#define CORE_DB_DATA_BYPASS_EN_SHIFT 4 +#define CORE_DB_DATA_RESERVED_MASK 0x1 +#define CORE_DB_DATA_RESERVED_SHIFT 5 +#define CORE_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define CORE_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 spq_prod; +}; + +/* Enum of doorbell aggregative command selection */ +enum db_agg_cmd_sel { + DB_AGG_CMD_NOP, + DB_AGG_CMD_SET, + DB_AGG_CMD_ADD, + DB_AGG_CMD_MAX, + MAX_DB_AGG_CMD_SEL +}; + +/* Enum of doorbell destination */ +enum db_dest { + DB_DEST_XCM, + DB_DEST_UCM, + DB_DEST_TCM, + DB_NUM_DESTINATIONS, + MAX_DB_DEST +}; + +/* Structure for doorbell address, in legacy mode */ +struct db_legacy_addr { + __le32 addr; +#define DB_LEGACY_ADDR_RESERVED0_MASK 0x3 +#define DB_LEGACY_ADDR_RESERVED0_SHIFT 0 +#define DB_LEGACY_ADDR_DEMS_MASK 0x7 +#define DB_LEGACY_ADDR_DEMS_SHIFT 2 +#define DB_LEGACY_ADDR_ICID_MASK 0x7FFFFFF +#define DB_LEGACY_ADDR_ICID_SHIFT 5 +}; + +/* Igu interrupt command */ +enum igu_int_cmd { + IGU_INT_ENABLE = 0, + IGU_INT_DISABLE = 1, + IGU_INT_NOP = 2, + IGU_INT_NOP2 = 3, + MAX_IGU_INT_CMD +}; + +/* IGU producer or consumer update command */ +struct igu_prod_cons_update { + u32 sb_id_and_flags; +#define IGU_PROD_CONS_UPDATE_SB_INDEX_MASK 0xFFFFFF +#define IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT 0 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT 24 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT 25 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT 27 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_TIMER_MASK_SHIFT 28 +#define IGU_PROD_CONS_UPDATE_RESERVED0_MASK 0x3 +#define IGU_PROD_CONS_UPDATE_RESERVED0_SHIFT 29 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_MASK 0x1 +#define IGU_PROD_CONS_UPDATE_COMMAND_TYPE_SHIFT 31 + u32 reserved1; +}; + +/* Igu segments access for default status block only */ +enum igu_seg_access { + IGU_SEG_ACCESS_REG = 0, + IGU_SEG_ACCESS_ATTN = 1, + MAX_IGU_SEG_ACCESS +}; + +struct parsing_and_err_flags { + __le16 flags; +#define PARSING_AND_ERR_FLAGS_L3TYPE_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT 0 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_MASK 0x3 +#define PARSING_AND_ERR_FLAGS_L4PROTOCOL_SHIFT 2 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPV4FRAG_SHIFT 4 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT 5 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT 6 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESYNCPKT_SHIFT 7 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TIMESTAMPRECORDED_SHIFT 8 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT 9 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT 10 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT 11 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT 12 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT 13 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT 14 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK 0x1 +#define PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT 15 +}; + +/* Concrete Function ID. */ +struct pxp_concrete_fid { + __le16 fid; +#define PXP_CONCRETE_FID_PFID_MASK 0xF +#define PXP_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_CONCRETE_FID_PORT_MASK 0x3 +#define PXP_CONCRETE_FID_PORT_SHIFT 4 +#define PXP_CONCRETE_FID_PATH_MASK 0x1 +#define PXP_CONCRETE_FID_PATH_SHIFT 6 +#define PXP_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_CONCRETE_FID_VFID_SHIFT 8 +}; + +struct pxp_pretend_concrete_fid { + __le16 fid; +#define PXP_PRETEND_CONCRETE_FID_PFID_MASK 0xF +#define PXP_PRETEND_CONCRETE_FID_PFID_SHIFT 0 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_MASK 0x7 +#define PXP_PRETEND_CONCRETE_FID_RESERVED_SHIFT 4 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_MASK 0x1 +#define PXP_PRETEND_CONCRETE_FID_VFVALID_SHIFT 7 +#define PXP_PRETEND_CONCRETE_FID_VFID_MASK 0xFF +#define PXP_PRETEND_CONCRETE_FID_VFID_SHIFT 8 +}; + +union pxp_pretend_fid { + struct pxp_pretend_concrete_fid concrete_fid; + __le16 opaque_fid; +}; + +/* Pxp Pretend Command Register. */ +struct pxp_pretend_cmd { + union pxp_pretend_fid fid; + __le16 control; +#define PXP_PRETEND_CMD_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PATH_SHIFT 0 +#define PXP_PRETEND_CMD_USE_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_USE_PORT_SHIFT 1 +#define PXP_PRETEND_CMD_PORT_MASK 0x3 +#define PXP_PRETEND_CMD_PORT_SHIFT 2 +#define PXP_PRETEND_CMD_RESERVED0_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED0_SHIFT 4 +#define PXP_PRETEND_CMD_RESERVED1_MASK 0xF +#define PXP_PRETEND_CMD_RESERVED1_SHIFT 8 +#define PXP_PRETEND_CMD_PRETEND_PATH_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PATH_SHIFT 12 +#define PXP_PRETEND_CMD_PRETEND_PORT_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_PORT_SHIFT 13 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_MASK 0x1 +#define PXP_PRETEND_CMD_PRETEND_FUNCTION_SHIFT 14 +#define PXP_PRETEND_CMD_IS_CONCRETE_MASK 0x1 +#define PXP_PRETEND_CMD_IS_CONCRETE_SHIFT 15 +}; + +/* PTT Record in PXP Admin Window. */ +struct pxp_ptt_entry { + __le32 offset; +#define PXP_PTT_ENTRY_OFFSET_MASK 0x7FFFFF +#define PXP_PTT_ENTRY_OFFSET_SHIFT 0 +#define PXP_PTT_ENTRY_RESERVED0_MASK 0x1FF +#define PXP_PTT_ENTRY_RESERVED0_SHIFT 23 + struct pxp_pretend_cmd pretend; +}; + +/* RSS hash type */ +enum rss_hash_type { + RSS_HASH_TYPE_DEFAULT = 0, + RSS_HASH_TYPE_IPV4 = 1, + RSS_HASH_TYPE_TCP_IPV4 = 2, + RSS_HASH_TYPE_IPV6 = 3, + RSS_HASH_TYPE_TCP_IPV6 = 4, + RSS_HASH_TYPE_UDP_IPV4 = 5, + RSS_HASH_TYPE_UDP_IPV6 = 6, + MAX_RSS_HASH_TYPE +}; + +/* status block structure */ +struct status_block { + __le16 pi_array[PIS_PER_SB]; + __le32 sb_num; +#define STATUS_BLOCK_SB_NUM_MASK 0x1FF +#define STATUS_BLOCK_SB_NUM_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD_MASK 0x7F +#define STATUS_BLOCK_ZERO_PAD_SHIFT 9 +#define STATUS_BLOCK_ZERO_PAD2_MASK 0xFFFF +#define STATUS_BLOCK_ZERO_PAD2_SHIFT 16 + __le32 prod_index; +#define STATUS_BLOCK_PROD_INDEX_MASK 0xFFFFFF +#define STATUS_BLOCK_PROD_INDEX_SHIFT 0 +#define STATUS_BLOCK_ZERO_PAD3_MASK 0xFF +#define STATUS_BLOCK_ZERO_PAD3_SHIFT 24 +}; + +/* @DPDK */ +#define X_FINAL_CLEANUP_AGG_INT 1 +#define SDM_COMP_TYPE_AGG_INT 2 +#define MAX_NUM_LL2_RX_QUEUES 32 +#define QM_PQ_ELEMENT_SIZE 4 +#define PXP_VF_BAR0_START_IGU 0 +#define EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE 3 + +#define TSTORM_QZONE_SIZE 8 +#define MSTORM_QZONE_SIZE 16 +#define USTORM_QZONE_SIZE 8 +#define XSTORM_QZONE_SIZE 0 +#define YSTORM_QZONE_SIZE 8 +#define PSTORM_QZONE_SIZE 0 + +/* VF BAR */ +#define PXP_VF_BAR0 0 + +#define PXP_VF_BAR0_START_GRC 0x3E00 +#define PXP_VF_BAR0_GRC_LENGTH 0x200 +#define PXP_VF_BAR0_END_GRC \ +(PXP_VF_BAR0_START_GRC + PXP_VF_BAR0_GRC_LENGTH - 1) + +#define PXP_VF_BAR0_START_IGU 0 +#define PXP_VF_BAR0_IGU_LENGTH 0x3000 +#define PXP_VF_BAR0_END_IGU \ +(PXP_VF_BAR0_START_IGU + PXP_VF_BAR0_IGU_LENGTH - 1) + +#define PXP_VF_BAR0_START_DQ 0x3000 +#define PXP_VF_BAR0_DQ_LENGTH 0x200 +#define PXP_VF_BAR0_DQ_OPAQUE_OFFSET 0 +#define PXP_VF_BAR0_ME_OPAQUE_ADDRESS \ +(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_OPAQUE_OFFSET) +#define PXP_VF_BAR0_ME_CONCRETE_ADDRESS \ +(PXP_VF_BAR0_ME_OPAQUE_ADDRESS + 4) +#define PXP_VF_BAR0_END_DQ \ +(PXP_VF_BAR0_START_DQ + PXP_VF_BAR0_DQ_LENGTH - 1) + +#define PXP_VF_BAR0_START_TSDM_ZONE_B 0x3200 +#define PXP_VF_BAR0_SDM_LENGTH_ZONE_B 0x200 +#define PXP_VF_BAR0_END_TSDM_ZONE_B \ +(PXP_VF_BAR0_START_TSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_MSDM_ZONE_B 0x3400 +#define PXP_VF_BAR0_END_MSDM_ZONE_B \ +(PXP_VF_BAR0_START_MSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_USDM_ZONE_B 0x3600 +#define PXP_VF_BAR0_END_USDM_ZONE_B \ +(PXP_VF_BAR0_START_USDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_XSDM_ZONE_B 0x3800 +#define PXP_VF_BAR0_END_XSDM_ZONE_B \ +(PXP_VF_BAR0_START_XSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_YSDM_ZONE_B 0x3a00 +#define PXP_VF_BAR0_END_YSDM_ZONE_B \ +(PXP_VF_BAR0_START_YSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_PSDM_ZONE_B 0x3c00 +#define PXP_VF_BAR0_END_PSDM_ZONE_B \ +(PXP_VF_BAR0_START_PSDM_ZONE_B + PXP_VF_BAR0_SDM_LENGTH_ZONE_B - 1) + +#define PXP_VF_BAR0_START_SDM_ZONE_A 0x4000 +#define PXP_VF_BAR0_END_SDM_ZONE_A 0x10000 + +#define PXP_VF_BAR0_GRC_WINDOW_LENGTH 32 + +#define PXP_ILT_PAGE_SIZE_NUM_BITS_MIN 12 +#define PXP_ILT_BLOCK_FACTOR_MULTIPLIER 1024 + +#endif /* __COMMON_HSI__ */ diff --git a/drivers/net/qede/base/ecore.h b/drivers/net/qede/base/ecore.h new file mode 100644 index 00000000..d682a785 --- /dev/null +++ b/drivers/net/qede/base/ecore.h @@ -0,0 +1,754 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_H +#define __ECORE_H + +#include "ecore_hsi_common.h" +#include "ecore_hsi_tools.h" +#include "ecore_proto_if.h" +#include "mcp_public.h" + +#define MAX_HWFNS_PER_DEVICE (4) +#define NAME_SIZE 64 /* @DPDK */ +#define VER_SIZE 16 +/* @DPDK ARRAY_DECL */ +#define ECORE_WFQ_UNIT 100 +#include "../qede_logs.h" /* @DPDK */ + +/* Constants */ +#define ECORE_WID_SIZE (1024) + +/* Configurable */ +#define ECORE_PF_DEMS_SIZE (4) + +/* cau states */ +enum ecore_coalescing_mode { + ECORE_COAL_MODE_DISABLE, + ECORE_COAL_MODE_ENABLE +}; + +enum ecore_nvm_cmd { + ECORE_PUT_FILE_BEGIN = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN, + ECORE_PUT_FILE_DATA = DRV_MSG_CODE_NVM_PUT_FILE_DATA, + ECORE_NVM_READ_NVRAM = DRV_MSG_CODE_NVM_READ_NVRAM, + ECORE_NVM_WRITE_NVRAM = DRV_MSG_CODE_NVM_WRITE_NVRAM, + ECORE_NVM_DEL_FILE = DRV_MSG_CODE_NVM_DEL_FILE, + ECORE_NVM_SET_SECURE_MODE = DRV_MSG_CODE_SET_SECURE_MODE, + ECORE_PHY_RAW_READ = DRV_MSG_CODE_PHY_RAW_READ, + ECORE_PHY_RAW_WRITE = DRV_MSG_CODE_PHY_RAW_WRITE, + ECORE_PHY_CORE_READ = DRV_MSG_CODE_PHY_CORE_READ, + ECORE_PHY_CORE_WRITE = DRV_MSG_CODE_PHY_CORE_WRITE, + ECORE_GET_MCP_NVM_RESP = 0xFFFFFF00 +}; + +#ifndef LINUX_REMOVE +#if !defined(CONFIG_ECORE_L2) +#define CONFIG_ECORE_L2 +#define CONFIG_ECORE_SRIOV +#endif +#endif + +/* helpers */ +#ifndef __EXTRACT__LINUX__ +#define MASK_FIELD(_name, _value) \ + ((_value) &= (_name##_MASK)) + +#define FIELD_VALUE(_name, _value) \ + ((_value & _name##_MASK) << _name##_SHIFT) + +#define SET_FIELD(value, name, flag) \ +do { \ + (value) &= ~(name##_MASK << name##_SHIFT); \ + (value) |= (((u64)flag) << (name##_SHIFT)); \ +} while (0) + +#define GET_FIELD(value, name) \ + (((value) >> (name##_SHIFT)) & name##_MASK) +#endif + +static OSAL_INLINE u32 DB_ADDR(u32 cid, u32 DEMS) +{ + u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | + (cid * ECORE_PF_DEMS_SIZE); + + return db_addr; +} + +/* @DPDK: This is a backport from latest ecore for TSS fix */ +static OSAL_INLINE u32 DB_ADDR_VF(u32 cid, u32 DEMS) +{ + u32 db_addr = FIELD_VALUE(DB_LEGACY_ADDR_DEMS, DEMS) | + FIELD_VALUE(DB_LEGACY_ADDR_ICID, cid); + + return db_addr; +} + +#define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \ + ((sizeof(type_name) + (u32)(1 << (p_hwfn->p_dev->cache_shift)) - 1) & \ + ~((1 << (p_hwfn->p_dev->cache_shift)) - 1)) + +#ifndef U64_HI +#define U64_HI(val) ((u32)(((u64)(val)) >> 32)) +#endif + +#ifndef U64_LO +#define U64_LO(val) ((u32)(((u64)(val)) & 0xffffffff)) +#endif + +#ifndef __EXTRACT__LINUX__ +enum DP_LEVEL { + ECORE_LEVEL_VERBOSE = 0x0, + ECORE_LEVEL_INFO = 0x1, + ECORE_LEVEL_NOTICE = 0x2, + ECORE_LEVEL_ERR = 0x3, +}; + +#define ECORE_LOG_LEVEL_SHIFT (30) +#define ECORE_LOG_VERBOSE_MASK (0x3fffffff) +#define ECORE_LOG_INFO_MASK (0x40000000) +#define ECORE_LOG_NOTICE_MASK (0x80000000) + +enum DP_MODULE { +#ifndef LINUX_REMOVE + ECORE_MSG_DRV = 0x0001, + ECORE_MSG_PROBE = 0x0002, + ECORE_MSG_LINK = 0x0004, + ECORE_MSG_TIMER = 0x0008, + ECORE_MSG_IFDOWN = 0x0010, + ECORE_MSG_IFUP = 0x0020, + ECORE_MSG_RX_ERR = 0x0040, + ECORE_MSG_TX_ERR = 0x0080, + ECORE_MSG_TX_QUEUED = 0x0100, + ECORE_MSG_INTR = 0x0200, + ECORE_MSG_TX_DONE = 0x0400, + ECORE_MSG_RX_STATUS = 0x0800, + ECORE_MSG_PKTDATA = 0x1000, + ECORE_MSG_HW = 0x2000, + ECORE_MSG_WOL = 0x4000, +#endif + ECORE_MSG_SPQ = 0x10000, + ECORE_MSG_STATS = 0x20000, + ECORE_MSG_DCB = 0x40000, + ECORE_MSG_IOV = 0x80000, + ECORE_MSG_SP = 0x100000, + ECORE_MSG_STORAGE = 0x200000, + ECORE_MSG_CXT = 0x800000, + ECORE_MSG_ILT = 0x2000000, + ECORE_MSG_DEBUG = 0x8000000, + /* to be added...up to 0x8000000 */ +}; +#endif + +#define for_each_hwfn(p_dev, i) for (i = 0; i < p_dev->num_hwfns; i++) + +#define D_TRINE(val, cond1, cond2, true1, true2, def) \ + (val == (cond1) ? true1 : \ + (val == (cond2) ? true2 : def)) + +/* forward */ +struct ecore_ptt_pool; +struct ecore_spq; +struct ecore_sb_info; +struct ecore_sb_attn_info; +struct ecore_cxt_mngr; +struct ecore_dma_mem; +struct ecore_sb_sp_info; +struct ecore_igu_info; +struct ecore_mcp_info; +struct ecore_dcbx_info; + +struct ecore_rt_data { + u32 *init_val; + bool *b_valid; +}; + +enum ecore_tunn_mode { + ECORE_MODE_L2GENEVE_TUNN, + ECORE_MODE_IPGENEVE_TUNN, + ECORE_MODE_L2GRE_TUNN, + ECORE_MODE_IPGRE_TUNN, + ECORE_MODE_VXLAN_TUNN, +}; + +enum ecore_tunn_clss { + ECORE_TUNN_CLSS_MAC_VLAN, + ECORE_TUNN_CLSS_MAC_VNI, + ECORE_TUNN_CLSS_INNER_MAC_VLAN, + ECORE_TUNN_CLSS_INNER_MAC_VNI, + MAX_ECORE_TUNN_CLSS, +}; + +struct ecore_tunn_start_params { + unsigned long tunn_mode; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 update_vxlan_udp_port; + u8 update_geneve_udp_port; + u8 tunn_clss_vxlan; + u8 tunn_clss_l2geneve; + u8 tunn_clss_ipgeneve; + u8 tunn_clss_l2gre; + u8 tunn_clss_ipgre; +}; + +struct ecore_tunn_update_params { + unsigned long tunn_mode_update_mask; + unsigned long tunn_mode; + u16 vxlan_udp_port; + u16 geneve_udp_port; + u8 update_rx_pf_clss; + u8 update_tx_pf_clss; + u8 update_vxlan_udp_port; + u8 update_geneve_udp_port; + u8 tunn_clss_vxlan; + u8 tunn_clss_l2geneve; + u8 tunn_clss_ipgeneve; + u8 tunn_clss_l2gre; + u8 tunn_clss_ipgre; +}; + +struct ecore_hw_sriov_info { + /* standard SRIOV capability fields, mostly for debugging */ + int pos; /* capability position */ + int nres; /* number of resources */ + u32 cap; /* SR-IOV Capabilities */ + u16 ctrl; /* SR-IOV Control */ + u16 total_vfs; /* total VFs associated with the PF */ + u16 num_vfs; /* number of vfs that have been started */ + u64 active_vfs[3]; /* bitfield of active vfs */ +#define ECORE_IS_VF_ACTIVE(_p_dev, _rel_vf_id) \ + (!!(_p_dev->sriov_info.active_vfs[_rel_vf_id / 64] & \ + (1ULL << (_rel_vf_id % 64)))) + u16 initial_vfs; /* initial VFs associated with the PF */ + u16 nr_virtfn; /* number of VFs available */ + u16 offset; /* first VF Routing ID offset */ + u16 stride; /* following VF stride */ + u16 vf_device_id; /* VF device id */ + u32 pgsz; /* page size for BAR alignment */ + u8 link; /* Function Dependency Link */ + + bool b_hw_channel; /* Whether PF uses the HW-channel */ +}; + +/* The PCI personality is not quite synonymous to protocol ID: + * 1. All personalities need CORE connections + * 2. The Ethernet personality may support also the RoCE protocol + */ +enum ecore_pci_personality { + ECORE_PCI_ETH, + ECORE_PCI_DEFAULT /* default in shmem */ +}; + +/* All VFs are symmetric, all counters are PF + all VFs */ +struct ecore_qm_iids { + u32 cids; + u32 vf_cids; + u32 tids; +}; + +#define MAX_PF_PER_PORT 8 + +/*@@@TBD MK RESC: need to remove and use MCP interface instead */ +/* HW / FW resources, output of features supported below, most information + * is received from MFW. + */ +enum ECORE_RESOURCES { + ECORE_SB, + ECORE_L2_QUEUE, + ECORE_VPORT, + ECORE_RSS_ENG, + ECORE_PQ, + ECORE_RL, + ECORE_MAC, + ECORE_VLAN, + ECORE_ILT, + ECORE_CMDQS_CQS, + ECORE_MAX_RESC, +}; + +/* Features that require resources, given as input to the resource management + * algorithm, the output are the resources above + */ +enum ECORE_FEATURE { + ECORE_PF_L2_QUE, + ECORE_PF_TC, + ECORE_VF, + ECORE_EXTRA_VF_QUE, + ECORE_VMQ, + ECORE_MAX_FEATURES, +}; + +enum ECORE_PORT_MODE { + ECORE_PORT_MODE_DE_2X40G, + ECORE_PORT_MODE_DE_2X50G, + ECORE_PORT_MODE_DE_1X100G, + ECORE_PORT_MODE_DE_4X10G_F, + ECORE_PORT_MODE_DE_4X10G_E, + ECORE_PORT_MODE_DE_4X20G, + ECORE_PORT_MODE_DE_1X40G, + ECORE_PORT_MODE_DE_2X25G, + ECORE_PORT_MODE_DE_1X25G +}; + +enum ecore_dev_cap { + ECORE_DEV_CAP_ETH, +}; + +#ifndef __EXTRACT__LINUX__ +enum ecore_hw_err_type { + ECORE_HW_ERR_FAN_FAIL, + ECORE_HW_ERR_MFW_RESP_FAIL, + ECORE_HW_ERR_HW_ATTN, + ECORE_HW_ERR_DMAE_FAIL, + ECORE_HW_ERR_RAMROD_FAIL, + ECORE_HW_ERR_FW_ASSERT, +}; +#endif + +struct ecore_hw_info { + /* PCI personality */ + enum ecore_pci_personality personality; + + /* Resource Allocation scheme results */ + u32 resc_start[ECORE_MAX_RESC]; + u32 resc_num[ECORE_MAX_RESC]; + u32 feat_num[ECORE_MAX_FEATURES]; + +#define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc]) +#define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc]) +#define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \ + RESC_NUM(_p_hwfn, resc)) +#define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc]) + + u8 num_tc; + u8 ooo_tc; + u8 offload_tc; + u8 non_offload_tc; + + u32 concrete_fid; + u16 opaque_fid; + u16 ovlan; + u32 part_num[4]; + + unsigned char hw_mac_addr[ETH_ALEN]; + + struct ecore_igu_info *p_igu_info; + /* Sriov */ + u32 first_vf_in_pf; + u8 max_chains_per_vf; + + u32 port_mode; + u32 hw_mode; + unsigned long device_capabilities; +}; + +struct ecore_hw_cid_data { + u32 cid; + bool b_cid_allocated; + u8 vfid; /* 1-based; 0 signals this is for a PF */ + + /* Additional identifiers */ + u16 opaque_fid; + u8 vport_id; +}; + +/* maximun size of read/write commands (HW limit) */ +#define DMAE_MAX_RW_SIZE 0x2000 + +struct ecore_dmae_info { + /* Mutex for synchronizing access to functions */ + osal_mutex_t mutex; + + u8 channel; + + dma_addr_t completion_word_phys_addr; + + /* The memory location where the DMAE writes the completion + * value when an operation is finished on this context. + */ + u32 *p_completion_word; + + dma_addr_t intermediate_buffer_phys_addr; + + /* An intermediate buffer for DMAE operations that use virtual + * addresses - data is DMA'd to/from this buffer and then + * memcpy'd to/from the virtual address + */ + u32 *p_intermediate_buffer; + + dma_addr_t dmae_cmd_phys_addr; + struct dmae_cmd *p_dmae_cmd; +}; + +struct ecore_wfq_data { + u32 default_min_speed; /* When wfq feature is not configured */ + u32 min_speed; /* when feature is configured for any 1 vport */ + bool configured; +}; + +struct ecore_qm_info { + struct init_qm_pq_params *qm_pq_params; + struct init_qm_vport_params *qm_vport_params; + struct init_qm_port_params *qm_port_params; + u16 start_pq; + u8 start_vport; + u8 pure_lb_pq; + u8 offload_pq; + u8 pure_ack_pq; + u8 ooo_pq; + u8 vf_queues_offset; + u16 num_pqs; + u16 num_vf_pqs; + u8 num_vports; + u8 max_phys_tcs_per_port; + bool pf_rl_en; + bool pf_wfq_en; + bool vport_rl_en; + bool vport_wfq_en; + u8 pf_wfq; + u32 pf_rl; + struct ecore_wfq_data *wfq_data; +}; + +struct storm_stats { + u32 address; + u32 len; +}; + +#define CONFIG_ECORE_BINARY_FW +#define CONFIG_ECORE_ZIPPED_FW + +struct ecore_fw_data { +#ifdef CONFIG_ECORE_BINARY_FW + struct fw_ver_info *fw_ver_info; +#endif + const u8 *modes_tree_buf; + union init_op *init_ops; + const u32 *arr_data; + u32 init_ops_size; +}; + +struct ecore_hwfn { + struct ecore_dev *p_dev; + u8 my_id; /* ID inside the PF */ +#define IS_LEAD_HWFN(edev) (!((edev)->my_id)) + u8 rel_pf_id; /* Relative to engine */ + u8 abs_pf_id; +#define ECORE_PATH_ID(_p_hwfn) \ + (ECORE_IS_K2((_p_hwfn)->p_dev) ? 0 : ((_p_hwfn)->abs_pf_id & 1)) + u8 port_id; + bool b_active; + + u32 dp_module; + u8 dp_level; + char name[NAME_SIZE]; + void *dp_ctx; + + bool first_on_engine; + bool hw_init_done; + + u8 num_funcs_on_engine; + + /* BAR access */ + void OSAL_IOMEM *regview; + void OSAL_IOMEM *doorbells; + u64 db_phys_addr; + unsigned long db_size; + + /* PTT pool */ + struct ecore_ptt_pool *p_ptt_pool; + + /* HW info */ + struct ecore_hw_info hw_info; + + /* rt_array (for init-tool) */ + struct ecore_rt_data rt_data; + + /* SPQ */ + struct ecore_spq *p_spq; + + /* EQ */ + struct ecore_eq *p_eq; + + /* Consolidate Q */ + struct ecore_consq *p_consq; + + /* Slow-Path definitions */ + osal_dpc_t sp_dpc; + bool b_sp_dpc_enabled; + + struct ecore_ptt *p_main_ptt; + struct ecore_ptt *p_dpc_ptt; + + struct ecore_sb_sp_info *p_sp_sb; + struct ecore_sb_attn_info *p_sb_attn; + + /* Protocol related */ + struct ecore_ooo_info *p_ooo_info; + struct ecore_pf_params pf_params; + + /* Array of sb_info of all status blocks */ + struct ecore_sb_info *sbs_info[MAX_SB_PER_PF_MIMD]; + u16 num_sbs; + + struct ecore_cxt_mngr *p_cxt_mngr; + + /* Flag indicating whether interrupts are enabled or not */ + bool b_int_enabled; + bool b_int_requested; + + /* True if the driver requests for the link */ + bool b_drv_link_init; + + struct ecore_vf_iov *vf_iov_info; + struct ecore_pf_iov *pf_iov_info; + struct ecore_mcp_info *mcp_info; + struct ecore_dcbx_info *p_dcbx_info; + + struct ecore_hw_cid_data *p_tx_cids; + struct ecore_hw_cid_data *p_rx_cids; + + struct ecore_dmae_info dmae_info; + + /* QM init */ + struct ecore_qm_info qm_info; + + /* Buffer for unzipping firmware data */ +#ifdef CONFIG_ECORE_ZIPPED_FW + void *unzip_buf; +#endif + + struct dbg_tools_data dbg_info; + + struct z_stream_s *stream; + + /* PWM region specific data */ + u32 dpi_size; + u32 dpi_count; + u32 dpi_start_offset; /* this is used to + * calculate th + * doorbell address + */ +}; + +#ifndef __EXTRACT__LINUX__ +enum ecore_mf_mode { + ECORE_MF_DEFAULT, + ECORE_MF_OVLAN, + ECORE_MF_NPAR, +}; +#endif + +struct ecore_dev { + u32 dp_module; + u8 dp_level; + char name[NAME_SIZE]; + void *dp_ctx; + + u8 type; +#define ECORE_DEV_TYPE_BB (0 << 0) +#define ECORE_DEV_TYPE_AH (1 << 0) +/* Translate type/revision combo into the proper conditions */ +#define ECORE_IS_BB(dev) ((dev)->type == ECORE_DEV_TYPE_BB) +#define ECORE_IS_BB_A0(dev) (ECORE_IS_BB(dev) && \ + CHIP_REV_IS_A0(dev)) +#define ECORE_IS_BB_B0(dev) (ECORE_IS_BB(dev) && \ + CHIP_REV_IS_B0(dev)) +#define ECORE_IS_AH(dev) ((dev)->type == ECORE_DEV_TYPE_AH) +#define ECORE_IS_K2(dev) ECORE_IS_AH(dev) +#define ECORE_GET_TYPE(dev) (ECORE_IS_BB_A0(dev) ? CHIP_BB_A0 : \ + ECORE_IS_BB_B0(dev) ? CHIP_BB_B0 : CHIP_K2) + + u16 vendor_id; + u16 device_id; + + u16 chip_num; +#define CHIP_NUM_MASK 0xffff +#define CHIP_NUM_SHIFT 16 + + u16 chip_rev; +#define CHIP_REV_MASK 0xf +#define CHIP_REV_SHIFT 12 +#ifndef ASIC_ONLY +#define CHIP_REV_IS_TEDIBEAR(_p_dev) ((_p_dev)->chip_rev == 0x5) +#define CHIP_REV_IS_EMUL_A0(_p_dev) ((_p_dev)->chip_rev == 0xe) +#define CHIP_REV_IS_EMUL_B0(_p_dev) ((_p_dev)->chip_rev == 0xc) +#define CHIP_REV_IS_EMUL(_p_dev) (CHIP_REV_IS_EMUL_A0(_p_dev) || \ + CHIP_REV_IS_EMUL_B0(_p_dev)) +#define CHIP_REV_IS_FPGA_A0(_p_dev) ((_p_dev)->chip_rev == 0xf) +#define CHIP_REV_IS_FPGA_B0(_p_dev) ((_p_dev)->chip_rev == 0xd) +#define CHIP_REV_IS_FPGA(_p_dev) (CHIP_REV_IS_FPGA_A0(_p_dev) || \ + CHIP_REV_IS_FPGA_B0(_p_dev)) +#define CHIP_REV_IS_SLOW(_p_dev) \ + (CHIP_REV_IS_EMUL(_p_dev) || CHIP_REV_IS_FPGA(_p_dev)) +#define CHIP_REV_IS_A0(_p_dev) \ + (CHIP_REV_IS_EMUL_A0(_p_dev) || \ + CHIP_REV_IS_FPGA_A0(_p_dev) || \ + !(_p_dev)->chip_rev) +#define CHIP_REV_IS_B0(_p_dev) \ + (CHIP_REV_IS_EMUL_B0(_p_dev) || \ + CHIP_REV_IS_FPGA_B0(_p_dev) || \ + (_p_dev)->chip_rev == 1) +#define CHIP_REV_IS_ASIC(_p_dev) (!CHIP_REV_IS_SLOW(_p_dev)) +#else +#define CHIP_REV_IS_A0(_p_dev) (!(_p_dev)->chip_rev) +#define CHIP_REV_IS_B0(_p_dev) ((_p_dev)->chip_rev == 1) +#endif + + u16 chip_metal; +#define CHIP_METAL_MASK 0xff +#define CHIP_METAL_SHIFT 4 + + u16 chip_bond_id; +#define CHIP_BOND_ID_MASK 0xf +#define CHIP_BOND_ID_SHIFT 0 + + u8 num_engines; + u8 num_ports_in_engines; + u8 num_funcs_in_port; + + u8 path_id; + enum ecore_mf_mode mf_mode; +#define IS_MF_DEFAULT(_p_hwfn) \ + (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_DEFAULT) +#define IS_MF_SI(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_NPAR) +#define IS_MF_SD(_p_hwfn) (((_p_hwfn)->p_dev)->mf_mode == ECORE_MF_OVLAN) + + int pcie_width; + int pcie_speed; + u8 ver_str[VER_SIZE]; + /* Add MF related configuration */ + u8 mcp_rev; + u8 boot_mode; + + u8 wol; + + u32 int_mode; + enum ecore_coalescing_mode int_coalescing_mode; + u8 rx_coalesce_usecs; + u8 tx_coalesce_usecs; + + /* Start Bar offset of first hwfn */ + void OSAL_IOMEM *regview; + void OSAL_IOMEM *doorbells; + u64 db_phys_addr; + unsigned long db_size; + + /* PCI */ + u8 cache_shift; + + /* Init */ + const struct iro *iro_arr; +#define IRO (p_hwfn->p_dev->iro_arr) + + /* HW functions */ + u8 num_hwfns; + struct ecore_hwfn hwfns[MAX_HWFNS_PER_DEVICE]; + + /* SRIOV */ + struct ecore_hw_sriov_info sriov_info; + unsigned long tunn_mode; +#define IS_ECORE_SRIOV(edev) (!!((edev)->sriov_info.total_vfs)) + bool b_is_vf; + + u32 drv_type; + + struct ecore_eth_stats *reset_stats; + struct ecore_fw_data *fw_data; + + u32 mcp_nvm_resp; + + /* Recovery */ + bool recov_in_prog; + +#ifndef ASIC_ONLY + bool b_is_emul_full; +#endif + + void *firmware; + + u64 fw_len; + +}; + +#define NUM_OF_VFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_VFS_BB \ + : MAX_NUM_VFS_K2) +#define NUM_OF_L2_QUEUES(dev) (ECORE_IS_BB(dev) ? MAX_NUM_L2_QUEUES_BB \ + : MAX_NUM_L2_QUEUES_K2) +#define NUM_OF_PORTS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PORTS_BB \ + : MAX_NUM_PORTS_K2) +#define NUM_OF_SBS(dev) (ECORE_IS_BB(dev) ? MAX_SB_PER_PATH_BB \ + : MAX_SB_PER_PATH_K2) +#define NUM_OF_ENG_PFS(dev) (ECORE_IS_BB(dev) ? MAX_NUM_PFS_BB \ + : MAX_NUM_PFS_K2) + +#define ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn) ( \ + (ECORE_IS_BB_A0(p_hwfn->p_dev)) && \ + (ECORE_PATH_ID(p_hwfn) == 1) && \ + ((p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X40G) || \ + (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X50G) || \ + (p_hwfn->hw_info.port_mode == ECORE_PORT_MODE_DE_2X25G))) + +/** + * @brief ecore_concrete_to_sw_fid - get the sw function id from + * the concrete value. + * + * @param concrete_fid + * + * @return OSAL_INLINE u8 + */ +static OSAL_INLINE u8 ecore_concrete_to_sw_fid(struct ecore_dev *p_dev, + u32 concrete_fid) +{ + u8 vfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID); + u8 pfid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID); + u8 vf_valid = GET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID); + u8 sw_fid; + + if (vf_valid) + sw_fid = vfid + MAX_NUM_PFS; + else + sw_fid = pfid; + + return sw_fid; +} + +#define PURE_LB_TC 8 +#define OOO_LB_TC 9 + +static OSAL_INLINE u16 ecore_sriov_get_next_vf(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + u16 i; + + for (i = rel_vf_id; i < p_hwfn->p_dev->sriov_info.total_vfs; i++) + if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, i)) + return i; + + return p_hwfn->p_dev->sriov_info.total_vfs; +} + +int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate); +void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, + u32 min_pf_rate); + +int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw); +int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw); +void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); +int ecore_device_num_engines(struct ecore_dev *p_dev); +int ecore_device_num_ports(struct ecore_dev *p_dev); + +#define ecore_for_each_vf(_p_hwfn, _i) \ + for (_i = ecore_sriov_get_next_vf(_p_hwfn, 0); \ + _i < _p_hwfn->p_dev->sriov_info.total_vfs; \ + _i = ecore_sriov_get_next_vf(_p_hwfn, _i + 1)) + +#define ECORE_LEADING_HWFN(dev) (&dev->hwfns[0]) + +#endif /* __ECORE_H */ diff --git a/drivers/net/qede/base/ecore_attn_values.h b/drivers/net/qede/base/ecore_attn_values.h new file mode 100644 index 00000000..d8951bcc --- /dev/null +++ b/drivers/net/qede/base/ecore_attn_values.h @@ -0,0 +1,13287 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ATTN_VALUES_H__ +#define __ATTN_VALUES_H__ + +#ifndef __PREVENT_INT_ATTN__ + +/* HW Attention register */ +struct attn_hw_reg { + u16 reg_idx; /* Index of this register in its block */ + u16 num_of_bits; /* number of valid attention bits */ + const u16 *bit_attn_idx; /* attention index per valid bit */ + u32 sts_addr; /* Address of the STS register */ + u32 sts_clr_addr; /* Address of the STS_CLR register */ + u32 sts_wr_addr; /* Address of the STS_WR register */ + u32 mask_addr; /* Address of the MASK register */ +}; + +/* HW block attention registers */ +struct attn_hw_regs { + u16 num_of_int_regs; /* Number of interrupt regs */ + u16 num_of_prty_regs; /* Number of parity regs */ + struct attn_hw_reg **int_regs; /* interrupt regs */ + struct attn_hw_reg **prty_regs; /* parity regs */ +}; + +/* HW block attention registers */ +struct attn_hw_block { + const char *name; /* Block name */ + const char **int_desc; /* Array of interrupt attention descriptions */ + const char **prty_desc; /* Array of parity attention descriptions */ + struct attn_hw_regs chip_regs[3]; /* attention regs per chip.*/ +}; + +#ifdef ATTN_DESC +static const char *grc_int_attn_desc[5] = { + "grc_address_error", + "grc_timeout_event", + "grc_global_reserved_address", + "grc_path_isolation_error", + "grc_trace_fifo_valid_data", +}; +#else +#define grc_int_attn_desc OSAL_NULL +#endif + +static const u16 grc_int0_bb_a0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg grc_int0_bb_a0 = { + 0, 4, grc_int0_bb_a0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184 +}; + +static struct attn_hw_reg *grc_int_bb_a0_regs[1] = { + &grc_int0_bb_a0, +}; + +static const u16 grc_int0_bb_b0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg grc_int0_bb_b0 = { + 0, 4, grc_int0_bb_b0_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184 +}; + +static struct attn_hw_reg *grc_int_bb_b0_regs[1] = { + &grc_int0_bb_b0, +}; + +static const u16 grc_int0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg grc_int0_k2 = { + 0, 5, grc_int0_k2_attn_idx, 0x50180, 0x5018c, 0x50188, 0x50184 +}; + +static struct attn_hw_reg *grc_int_k2_regs[1] = { + &grc_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *grc_prty_attn_desc[3] = { + "grc_mem003_i_mem_prty", + "grc_mem002_i_mem_prty", + "grc_mem001_i_mem_prty", +}; +#else +#define grc_prty_attn_desc OSAL_NULL +#endif + +static const u16 grc_prty1_bb_a0_attn_idx[2] = { + 1, 2, +}; + +static struct attn_hw_reg grc_prty1_bb_a0 = { + 0, 2, grc_prty1_bb_a0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204 +}; + +static struct attn_hw_reg *grc_prty_bb_a0_regs[1] = { + &grc_prty1_bb_a0, +}; + +static const u16 grc_prty1_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg grc_prty1_bb_b0 = { + 0, 2, grc_prty1_bb_b0_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204 +}; + +static struct attn_hw_reg *grc_prty_bb_b0_regs[1] = { + &grc_prty1_bb_b0, +}; + +static const u16 grc_prty1_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg grc_prty1_k2 = { + 0, 2, grc_prty1_k2_attn_idx, 0x50200, 0x5020c, 0x50208, 0x50204 +}; + +static struct attn_hw_reg *grc_prty_k2_regs[1] = { + &grc_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *miscs_int_attn_desc[14] = { + "miscs_address_error", + "miscs_generic_sw", + "miscs_cnig_interrupt", + "miscs_opte_dorq_fifo_err_eng1", + "miscs_opte_dorq_fifo_err_eng0", + "miscs_opte_dbg_fifo_err_eng1", + "miscs_opte_dbg_fifo_err_eng0", + "miscs_opte_btb_if1_fifo_err_eng1", + "miscs_opte_btb_if1_fifo_err_eng0", + "miscs_opte_btb_if0_fifo_err_eng1", + "miscs_opte_btb_if0_fifo_err_eng0", + "miscs_opte_btb_sop_fifo_err_eng1", + "miscs_opte_btb_sop_fifo_err_eng0", + "miscs_opte_storm_fifo_err_eng0", +}; +#else +#define miscs_int_attn_desc OSAL_NULL +#endif + +static const u16 miscs_int0_bb_a0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg miscs_int0_bb_a0 = { + 0, 2, miscs_int0_bb_a0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184 +}; + +static const u16 miscs_int1_bb_a0_attn_idx[11] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg miscs_int1_bb_a0 = { + 1, 11, miscs_int1_bb_a0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194 +}; + +static struct attn_hw_reg *miscs_int_bb_a0_regs[2] = { + &miscs_int0_bb_a0, &miscs_int1_bb_a0, +}; + +static const u16 miscs_int0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg miscs_int0_bb_b0 = { + 0, 3, miscs_int0_bb_b0_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184 +}; + +static const u16 miscs_int1_bb_b0_attn_idx[11] = { + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg miscs_int1_bb_b0 = { + 1, 11, miscs_int1_bb_b0_attn_idx, 0x9190, 0x919c, 0x9198, 0x9194 +}; + +static struct attn_hw_reg *miscs_int_bb_b0_regs[2] = { + &miscs_int0_bb_b0, &miscs_int1_bb_b0, +}; + +static const u16 miscs_int0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg miscs_int0_k2 = { + 0, 3, miscs_int0_k2_attn_idx, 0x9180, 0x918c, 0x9188, 0x9184 +}; + +static struct attn_hw_reg *miscs_int_k2_regs[1] = { + &miscs_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *miscs_prty_attn_desc[1] = { + "miscs_cnig_parity", +}; +#else +#define miscs_prty_attn_desc OSAL_NULL +#endif + +static const u16 miscs_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg miscs_prty0_bb_b0 = { + 0, 1, miscs_prty0_bb_b0_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4 +}; + +static struct attn_hw_reg *miscs_prty_bb_b0_regs[1] = { + &miscs_prty0_bb_b0, +}; + +static const u16 miscs_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg miscs_prty0_k2 = { + 0, 1, miscs_prty0_k2_attn_idx, 0x91a0, 0x91ac, 0x91a8, 0x91a4 +}; + +static struct attn_hw_reg *miscs_prty_k2_regs[1] = { + &miscs_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *misc_int_attn_desc[1] = { + "misc_address_error", +}; +#else +#define misc_int_attn_desc OSAL_NULL +#endif + +static const u16 misc_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg misc_int0_bb_a0 = { + 0, 1, misc_int0_bb_a0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184 +}; + +static struct attn_hw_reg *misc_int_bb_a0_regs[1] = { + &misc_int0_bb_a0, +}; + +static const u16 misc_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg misc_int0_bb_b0 = { + 0, 1, misc_int0_bb_b0_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184 +}; + +static struct attn_hw_reg *misc_int_bb_b0_regs[1] = { + &misc_int0_bb_b0, +}; + +static const u16 misc_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg misc_int0_k2 = { + 0, 1, misc_int0_k2_attn_idx, 0x8180, 0x818c, 0x8188, 0x8184 +}; + +static struct attn_hw_reg *misc_int_k2_regs[1] = { + &misc_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pglue_b_int_attn_desc[24] = { + "pglue_b_address_error", + "pglue_b_incorrect_rcv_behavior", + "pglue_b_was_error_attn", + "pglue_b_vf_length_violation_attn", + "pglue_b_vf_grc_space_violation_attn", + "pglue_b_tcpl_error_attn", + "pglue_b_tcpl_in_two_rcbs_attn", + "pglue_b_cssnoop_fifo_overflow", + "pglue_b_tcpl_translation_size_different", + "pglue_b_pcie_rx_l0s_timeout", + "pglue_b_master_zlr_attn", + "pglue_b_admin_window_violation_attn", + "pglue_b_out_of_range_function_in_pretend", + "pglue_b_illegal_address", + "pglue_b_pgl_cpl_err", + "pglue_b_pgl_txw_of", + "pglue_b_pgl_cpl_aft", + "pglue_b_pgl_cpl_of", + "pglue_b_pgl_cpl_ecrc", + "pglue_b_pgl_pcie_attn", + "pglue_b_pgl_read_blocked", + "pglue_b_pgl_write_blocked", + "pglue_b_vf_ilt_err", + "pglue_b_rxobffexception_attn", +}; +#else +#define pglue_b_int_attn_desc OSAL_NULL +#endif + +static const u16 pglue_b_int0_bb_a0_attn_idx[23] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, +}; + +static struct attn_hw_reg pglue_b_int0_bb_a0 = { + 0, 23, pglue_b_int0_bb_a0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188, + 0x2a8184 +}; + +static struct attn_hw_reg *pglue_b_int_bb_a0_regs[1] = { + &pglue_b_int0_bb_a0, +}; + +static const u16 pglue_b_int0_bb_b0_attn_idx[23] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, +}; + +static struct attn_hw_reg pglue_b_int0_bb_b0 = { + 0, 23, pglue_b_int0_bb_b0_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188, + 0x2a8184 +}; + +static struct attn_hw_reg *pglue_b_int_bb_b0_regs[1] = { + &pglue_b_int0_bb_b0, +}; + +static const u16 pglue_b_int0_k2_attn_idx[24] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, +}; + +static struct attn_hw_reg pglue_b_int0_k2 = { + 0, 24, pglue_b_int0_k2_attn_idx, 0x2a8180, 0x2a818c, 0x2a8188, 0x2a8184 +}; + +static struct attn_hw_reg *pglue_b_int_k2_regs[1] = { + &pglue_b_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pglue_b_prty_attn_desc[35] = { + "pglue_b_datapath_registers", + "pglue_b_mem027_i_mem_prty", + "pglue_b_mem007_i_mem_prty", + "pglue_b_mem009_i_mem_prty", + "pglue_b_mem010_i_mem_prty", + "pglue_b_mem008_i_mem_prty", + "pglue_b_mem022_i_mem_prty", + "pglue_b_mem023_i_mem_prty", + "pglue_b_mem024_i_mem_prty", + "pglue_b_mem025_i_mem_prty", + "pglue_b_mem004_i_mem_prty", + "pglue_b_mem005_i_mem_prty", + "pglue_b_mem011_i_mem_prty", + "pglue_b_mem016_i_mem_prty", + "pglue_b_mem017_i_mem_prty", + "pglue_b_mem012_i_mem_prty", + "pglue_b_mem013_i_mem_prty", + "pglue_b_mem014_i_mem_prty", + "pglue_b_mem015_i_mem_prty", + "pglue_b_mem018_i_mem_prty", + "pglue_b_mem020_i_mem_prty", + "pglue_b_mem021_i_mem_prty", + "pglue_b_mem019_i_mem_prty", + "pglue_b_mem026_i_mem_prty", + "pglue_b_mem006_i_mem_prty", + "pglue_b_mem003_i_mem_prty", + "pglue_b_mem002_i_mem_prty_0", + "pglue_b_mem002_i_mem_prty_1", + "pglue_b_mem002_i_mem_prty_2", + "pglue_b_mem002_i_mem_prty_3", + "pglue_b_mem002_i_mem_prty_4", + "pglue_b_mem002_i_mem_prty_5", + "pglue_b_mem002_i_mem_prty_6", + "pglue_b_mem002_i_mem_prty_7", + "pglue_b_mem001_i_mem_prty", +}; +#else +#define pglue_b_prty_attn_desc OSAL_NULL +#endif + +static const u16 pglue_b_prty1_bb_a0_attn_idx[22] = { + 2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, +}; + +static struct attn_hw_reg pglue_b_prty1_bb_a0 = { + 0, 22, pglue_b_prty1_bb_a0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208, + 0x2a8204 +}; + +static struct attn_hw_reg *pglue_b_prty_bb_a0_regs[1] = { + &pglue_b_prty1_bb_a0, +}; + +static const u16 pglue_b_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pglue_b_prty0_bb_b0 = { + 0, 1, pglue_b_prty0_bb_b0_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198, + 0x2a8194 +}; + +static const u16 pglue_b_prty1_bb_b0_attn_idx[22] = { + 2, 3, 4, 5, 10, 11, 12, 15, 16, 17, 18, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, +}; + +static struct attn_hw_reg pglue_b_prty1_bb_b0 = { + 1, 22, pglue_b_prty1_bb_b0_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208, + 0x2a8204 +}; + +static struct attn_hw_reg *pglue_b_prty_bb_b0_regs[2] = { + &pglue_b_prty0_bb_b0, &pglue_b_prty1_bb_b0, +}; + +static const u16 pglue_b_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pglue_b_prty0_k2 = { + 0, 1, pglue_b_prty0_k2_attn_idx, 0x2a8190, 0x2a819c, 0x2a8198, 0x2a8194 +}; + +static const u16 pglue_b_prty1_k2_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pglue_b_prty1_k2 = { + 1, 31, pglue_b_prty1_k2_attn_idx, 0x2a8200, 0x2a820c, 0x2a8208, + 0x2a8204 +}; + +static const u16 pglue_b_prty2_k2_attn_idx[3] = { + 32, 33, 34, +}; + +static struct attn_hw_reg pglue_b_prty2_k2 = { + 2, 3, pglue_b_prty2_k2_attn_idx, 0x2a8210, 0x2a821c, 0x2a8218, 0x2a8214 +}; + +static struct attn_hw_reg *pglue_b_prty_k2_regs[3] = { + &pglue_b_prty0_k2, &pglue_b_prty1_k2, &pglue_b_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *cnig_int_attn_desc[10] = { + "cnig_address_error", + "cnig_tx_illegal_sop_port0", + "cnig_tx_illegal_sop_port1", + "cnig_tx_illegal_sop_port2", + "cnig_tx_illegal_sop_port3", + "cnig_tdm_lane_0_bandwidth_exceed", + "cnig_tdm_lane_1_bandwidth_exceed", + "cnig_pmeg_intr", + "cnig_pmfc_intr", + "cnig_fifo_error", +}; +#else +#define cnig_int_attn_desc OSAL_NULL +#endif + +static const u16 cnig_int0_bb_a0_attn_idx[4] = { + 0, 7, 8, 9, +}; + +static struct attn_hw_reg cnig_int0_bb_a0 = { + 0, 4, cnig_int0_bb_a0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec +}; + +static struct attn_hw_reg *cnig_int_bb_a0_regs[1] = { + &cnig_int0_bb_a0, +}; + +static const u16 cnig_int0_bb_b0_attn_idx[6] = { + 0, 1, 3, 7, 8, 9, +}; + +static struct attn_hw_reg cnig_int0_bb_b0 = { + 0, 6, cnig_int0_bb_b0_attn_idx, 0x2182e8, 0x2182f4, 0x2182f0, 0x2182ec +}; + +static struct attn_hw_reg *cnig_int_bb_b0_regs[1] = { + &cnig_int0_bb_b0, +}; + +static const u16 cnig_int0_k2_attn_idx[7] = { + 0, 1, 2, 3, 4, 5, 6, +}; + +static struct attn_hw_reg cnig_int0_k2 = { + 0, 7, cnig_int0_k2_attn_idx, 0x218218, 0x218224, 0x218220, 0x21821c +}; + +static struct attn_hw_reg *cnig_int_k2_regs[1] = { + &cnig_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *cnig_prty_attn_desc[3] = { + "cnig_unused_0", + "cnig_datapath_tx", + "cnig_datapath_rx", +}; +#else +#define cnig_prty_attn_desc OSAL_NULL +#endif + +static const u16 cnig_prty0_bb_b0_attn_idx[2] = { + 1, 2, +}; + +static struct attn_hw_reg cnig_prty0_bb_b0 = { + 0, 2, cnig_prty0_bb_b0_attn_idx, 0x218348, 0x218354, 0x218350, 0x21834c +}; + +static struct attn_hw_reg *cnig_prty_bb_b0_regs[1] = { + &cnig_prty0_bb_b0, +}; + +static const u16 cnig_prty0_k2_attn_idx[1] = { + 1, +}; + +static struct attn_hw_reg cnig_prty0_k2 = { + 0, 1, cnig_prty0_k2_attn_idx, 0x21822c, 0x218238, 0x218234, 0x218230 +}; + +static struct attn_hw_reg *cnig_prty_k2_regs[1] = { + &cnig_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *cpmu_int_attn_desc[1] = { + "cpmu_address_error", +}; +#else +#define cpmu_int_attn_desc OSAL_NULL +#endif + +static const u16 cpmu_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg cpmu_int0_bb_a0 = { + 0, 1, cpmu_int0_bb_a0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4 +}; + +static struct attn_hw_reg *cpmu_int_bb_a0_regs[1] = { + &cpmu_int0_bb_a0, +}; + +static const u16 cpmu_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg cpmu_int0_bb_b0 = { + 0, 1, cpmu_int0_bb_b0_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4 +}; + +static struct attn_hw_reg *cpmu_int_bb_b0_regs[1] = { + &cpmu_int0_bb_b0, +}; + +static const u16 cpmu_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg cpmu_int0_k2 = { + 0, 1, cpmu_int0_k2_attn_idx, 0x303e0, 0x303ec, 0x303e8, 0x303e4 +}; + +static struct attn_hw_reg *cpmu_int_k2_regs[1] = { + &cpmu_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ncsi_int_attn_desc[1] = { + "ncsi_address_error", +}; +#else +#define ncsi_int_attn_desc OSAL_NULL +#endif + +static const u16 ncsi_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_int0_bb_a0 = { + 0, 1, ncsi_int0_bb_a0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0 +}; + +static struct attn_hw_reg *ncsi_int_bb_a0_regs[1] = { + &ncsi_int0_bb_a0, +}; + +static const u16 ncsi_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_int0_bb_b0 = { + 0, 1, ncsi_int0_bb_b0_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0 +}; + +static struct attn_hw_reg *ncsi_int_bb_b0_regs[1] = { + &ncsi_int0_bb_b0, +}; + +static const u16 ncsi_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_int0_k2 = { + 0, 1, ncsi_int0_k2_attn_idx, 0x404cc, 0x404d8, 0x404d4, 0x404d0 +}; + +static struct attn_hw_reg *ncsi_int_k2_regs[1] = { + &ncsi_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ncsi_prty_attn_desc[1] = { + "ncsi_mem002_i_mem_prty", +}; +#else +#define ncsi_prty_attn_desc OSAL_NULL +#endif + +static const u16 ncsi_prty1_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_prty1_bb_a0 = { + 0, 1, ncsi_prty1_bb_a0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004 +}; + +static struct attn_hw_reg *ncsi_prty_bb_a0_regs[1] = { + &ncsi_prty1_bb_a0, +}; + +static const u16 ncsi_prty1_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_prty1_bb_b0 = { + 0, 1, ncsi_prty1_bb_b0_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004 +}; + +static struct attn_hw_reg *ncsi_prty_bb_b0_regs[1] = { + &ncsi_prty1_bb_b0, +}; + +static const u16 ncsi_prty1_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ncsi_prty1_k2 = { + 0, 1, ncsi_prty1_k2_attn_idx, 0x40000, 0x4000c, 0x40008, 0x40004 +}; + +static struct attn_hw_reg *ncsi_prty_k2_regs[1] = { + &ncsi_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *opte_prty_attn_desc[12] = { + "opte_mem009_i_mem_prty", + "opte_mem010_i_mem_prty", + "opte_mem005_i_mem_prty", + "opte_mem006_i_mem_prty", + "opte_mem007_i_mem_prty", + "opte_mem008_i_mem_prty", + "opte_mem001_i_mem_prty", + "opte_mem002_i_mem_prty", + "opte_mem003_i_mem_prty", + "opte_mem004_i_mem_prty", + "opte_mem011_i_mem_prty", + "opte_datapath_parity_error", +}; +#else +#define opte_prty_attn_desc OSAL_NULL +#endif + +static const u16 opte_prty1_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg opte_prty1_bb_a0 = { + 0, 11, opte_prty1_bb_a0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004 +}; + +static struct attn_hw_reg *opte_prty_bb_a0_regs[1] = { + &opte_prty1_bb_a0, +}; + +static const u16 opte_prty1_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg opte_prty1_bb_b0 = { + 0, 11, opte_prty1_bb_b0_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004 +}; + +static const u16 opte_prty0_bb_b0_attn_idx[1] = { + 11, +}; + +static struct attn_hw_reg opte_prty0_bb_b0 = { + 1, 1, opte_prty0_bb_b0_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c +}; + +static struct attn_hw_reg *opte_prty_bb_b0_regs[2] = { + &opte_prty1_bb_b0, &opte_prty0_bb_b0, +}; + +static const u16 opte_prty1_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg opte_prty1_k2 = { + 0, 11, opte_prty1_k2_attn_idx, 0x53000, 0x5300c, 0x53008, 0x53004 +}; + +static const u16 opte_prty0_k2_attn_idx[1] = { + 11, +}; + +static struct attn_hw_reg opte_prty0_k2 = { + 1, 1, opte_prty0_k2_attn_idx, 0x53208, 0x53214, 0x53210, 0x5320c +}; + +static struct attn_hw_reg *opte_prty_k2_regs[2] = { + &opte_prty1_k2, &opte_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *bmb_int_attn_desc[297] = { + "bmb_address_error", + "bmb_rc_pkt0_rls_error", + "bmb_unused_0", + "bmb_rc_pkt0_protocol_error", + "bmb_rc_pkt1_rls_error", + "bmb_unused_1", + "bmb_rc_pkt1_protocol_error", + "bmb_rc_pkt2_rls_error", + "bmb_unused_2", + "bmb_rc_pkt2_protocol_error", + "bmb_rc_pkt3_rls_error", + "bmb_unused_3", + "bmb_rc_pkt3_protocol_error", + "bmb_rc_sop_req_tc_port_error", + "bmb_unused_4", + "bmb_wc0_protocol_error", + "bmb_wc1_protocol_error", + "bmb_wc2_protocol_error", + "bmb_wc3_protocol_error", + "bmb_unused_5", + "bmb_ll_blk_error", + "bmb_unused_6", + "bmb_mac0_fc_cnt_error", + "bmb_ll_arb_calc_error", + "bmb_wc0_inp_fifo_error", + "bmb_wc0_sop_fifo_error", + "bmb_wc0_len_fifo_error", + "bmb_wc0_queue_fifo_error", + "bmb_wc0_free_point_fifo_error", + "bmb_wc0_next_point_fifo_error", + "bmb_wc0_strt_fifo_error", + "bmb_wc0_second_dscr_fifo_error", + "bmb_wc0_pkt_avail_fifo_error", + "bmb_wc0_cos_cnt_fifo_error", + "bmb_wc0_notify_fifo_error", + "bmb_wc0_ll_req_fifo_error", + "bmb_wc0_ll_pa_cnt_error", + "bmb_wc0_bb_pa_cnt_error", + "bmb_wc1_inp_fifo_error", + "bmb_wc1_sop_fifo_error", + "bmb_wc1_queue_fifo_error", + "bmb_wc1_free_point_fifo_error", + "bmb_wc1_next_point_fifo_error", + "bmb_wc1_strt_fifo_error", + "bmb_wc1_second_dscr_fifo_error", + "bmb_wc1_pkt_avail_fifo_error", + "bmb_wc1_cos_cnt_fifo_error", + "bmb_wc1_notify_fifo_error", + "bmb_wc1_ll_req_fifo_error", + "bmb_wc1_ll_pa_cnt_error", + "bmb_wc1_bb_pa_cnt_error", + "bmb_wc2_inp_fifo_error", + "bmb_wc2_sop_fifo_error", + "bmb_wc2_queue_fifo_error", + "bmb_wc2_free_point_fifo_error", + "bmb_wc2_next_point_fifo_error", + "bmb_wc2_strt_fifo_error", + "bmb_wc2_second_dscr_fifo_error", + "bmb_wc2_pkt_avail_fifo_error", + "bmb_wc2_cos_cnt_fifo_error", + "bmb_wc2_notify_fifo_error", + "bmb_wc2_ll_req_fifo_error", + "bmb_wc2_ll_pa_cnt_error", + "bmb_wc2_bb_pa_cnt_error", + "bmb_wc3_inp_fifo_error", + "bmb_wc3_sop_fifo_error", + "bmb_wc3_queue_fifo_error", + "bmb_wc3_free_point_fifo_error", + "bmb_wc3_next_point_fifo_error", + "bmb_wc3_strt_fifo_error", + "bmb_wc3_second_dscr_fifo_error", + "bmb_wc3_pkt_avail_fifo_error", + "bmb_wc3_cos_cnt_fifo_error", + "bmb_wc3_notify_fifo_error", + "bmb_wc3_ll_req_fifo_error", + "bmb_wc3_ll_pa_cnt_error", + "bmb_wc3_bb_pa_cnt_error", + "bmb_rc_pkt0_side_fifo_error", + "bmb_rc_pkt0_req_fifo_error", + "bmb_rc_pkt0_blk_fifo_error", + "bmb_rc_pkt0_rls_left_fifo_error", + "bmb_rc_pkt0_strt_ptr_fifo_error", + "bmb_rc_pkt0_second_ptr_fifo_error", + "bmb_rc_pkt0_rsp_fifo_error", + "bmb_rc_pkt0_dscr_fifo_error", + "bmb_rc_pkt1_side_fifo_error", + "bmb_rc_pkt1_req_fifo_error", + "bmb_rc_pkt1_blk_fifo_error", + "bmb_rc_pkt1_rls_left_fifo_error", + "bmb_rc_pkt1_strt_ptr_fifo_error", + "bmb_rc_pkt1_second_ptr_fifo_error", + "bmb_rc_pkt1_rsp_fifo_error", + "bmb_rc_pkt1_dscr_fifo_error", + "bmb_rc_pkt2_side_fifo_error", + "bmb_rc_pkt2_req_fifo_error", + "bmb_rc_pkt2_blk_fifo_error", + "bmb_rc_pkt2_rls_left_fifo_error", + "bmb_rc_pkt2_strt_ptr_fifo_error", + "bmb_rc_pkt2_second_ptr_fifo_error", + "bmb_rc_pkt2_rsp_fifo_error", + "bmb_rc_pkt2_dscr_fifo_error", + "bmb_rc_pkt3_side_fifo_error", + "bmb_rc_pkt3_req_fifo_error", + "bmb_rc_pkt3_blk_fifo_error", + "bmb_rc_pkt3_rls_left_fifo_error", + "bmb_rc_pkt3_strt_ptr_fifo_error", + "bmb_rc_pkt3_second_ptr_fifo_error", + "bmb_rc_pkt3_rsp_fifo_error", + "bmb_rc_pkt3_dscr_fifo_error", + "bmb_rc_sop_strt_fifo_error", + "bmb_rc_sop_req_fifo_error", + "bmb_rc_sop_dscr_fifo_error", + "bmb_rc_sop_queue_fifo_error", + "bmb_ll_arb_rls_fifo_error", + "bmb_ll_arb_prefetch_fifo_error", + "bmb_rc_pkt0_rls_fifo_error", + "bmb_rc_pkt1_rls_fifo_error", + "bmb_rc_pkt2_rls_fifo_error", + "bmb_rc_pkt3_rls_fifo_error", + "bmb_rc_pkt4_rls_fifo_error", + "bmb_rc_pkt5_rls_fifo_error", + "bmb_rc_pkt6_rls_fifo_error", + "bmb_rc_pkt7_rls_fifo_error", + "bmb_rc_pkt8_rls_fifo_error", + "bmb_rc_pkt9_rls_fifo_error", + "bmb_rc_pkt4_rls_error", + "bmb_rc_pkt4_protocol_error", + "bmb_rc_pkt4_side_fifo_error", + "bmb_rc_pkt4_req_fifo_error", + "bmb_rc_pkt4_blk_fifo_error", + "bmb_rc_pkt4_rls_left_fifo_error", + "bmb_rc_pkt4_strt_ptr_fifo_error", + "bmb_rc_pkt4_second_ptr_fifo_error", + "bmb_rc_pkt4_rsp_fifo_error", + "bmb_rc_pkt4_dscr_fifo_error", + "bmb_rc_pkt5_rls_error", + "bmb_rc_pkt5_protocol_error", + "bmb_rc_pkt5_side_fifo_error", + "bmb_rc_pkt5_req_fifo_error", + "bmb_rc_pkt5_blk_fifo_error", + "bmb_rc_pkt5_rls_left_fifo_error", + "bmb_rc_pkt5_strt_ptr_fifo_error", + "bmb_rc_pkt5_second_ptr_fifo_error", + "bmb_rc_pkt5_rsp_fifo_error", + "bmb_rc_pkt5_dscr_fifo_error", + "bmb_rc_pkt6_rls_error", + "bmb_rc_pkt6_protocol_error", + "bmb_rc_pkt6_side_fifo_error", + "bmb_rc_pkt6_req_fifo_error", + "bmb_rc_pkt6_blk_fifo_error", + "bmb_rc_pkt6_rls_left_fifo_error", + "bmb_rc_pkt6_strt_ptr_fifo_error", + "bmb_rc_pkt6_second_ptr_fifo_error", + "bmb_rc_pkt6_rsp_fifo_error", + "bmb_rc_pkt6_dscr_fifo_error", + "bmb_rc_pkt7_rls_error", + "bmb_rc_pkt7_protocol_error", + "bmb_rc_pkt7_side_fifo_error", + "bmb_rc_pkt7_req_fifo_error", + "bmb_rc_pkt7_blk_fifo_error", + "bmb_rc_pkt7_rls_left_fifo_error", + "bmb_rc_pkt7_strt_ptr_fifo_error", + "bmb_rc_pkt7_second_ptr_fifo_error", + "bmb_rc_pkt7_rsp_fifo_error", + "bmb_packet_available_sync_fifo_push_error", + "bmb_rc_pkt8_rls_error", + "bmb_rc_pkt8_protocol_error", + "bmb_rc_pkt8_side_fifo_error", + "bmb_rc_pkt8_req_fifo_error", + "bmb_rc_pkt8_blk_fifo_error", + "bmb_rc_pkt8_rls_left_fifo_error", + "bmb_rc_pkt8_strt_ptr_fifo_error", + "bmb_rc_pkt8_second_ptr_fifo_error", + "bmb_rc_pkt8_rsp_fifo_error", + "bmb_rc_pkt8_dscr_fifo_error", + "bmb_rc_pkt9_rls_error", + "bmb_rc_pkt9_protocol_error", + "bmb_rc_pkt9_side_fifo_error", + "bmb_rc_pkt9_req_fifo_error", + "bmb_rc_pkt9_blk_fifo_error", + "bmb_rc_pkt9_rls_left_fifo_error", + "bmb_rc_pkt9_strt_ptr_fifo_error", + "bmb_rc_pkt9_second_ptr_fifo_error", + "bmb_rc_pkt9_rsp_fifo_error", + "bmb_rc_pkt9_dscr_fifo_error", + "bmb_wc4_protocol_error", + "bmb_wc5_protocol_error", + "bmb_wc6_protocol_error", + "bmb_wc7_protocol_error", + "bmb_wc8_protocol_error", + "bmb_wc9_protocol_error", + "bmb_wc4_inp_fifo_error", + "bmb_wc4_sop_fifo_error", + "bmb_wc4_queue_fifo_error", + "bmb_wc4_free_point_fifo_error", + "bmb_wc4_next_point_fifo_error", + "bmb_wc4_strt_fifo_error", + "bmb_wc4_second_dscr_fifo_error", + "bmb_wc4_pkt_avail_fifo_error", + "bmb_wc4_cos_cnt_fifo_error", + "bmb_wc4_notify_fifo_error", + "bmb_wc4_ll_req_fifo_error", + "bmb_wc4_ll_pa_cnt_error", + "bmb_wc4_bb_pa_cnt_error", + "bmb_wc5_inp_fifo_error", + "bmb_wc5_sop_fifo_error", + "bmb_wc5_queue_fifo_error", + "bmb_wc5_free_point_fifo_error", + "bmb_wc5_next_point_fifo_error", + "bmb_wc5_strt_fifo_error", + "bmb_wc5_second_dscr_fifo_error", + "bmb_wc5_pkt_avail_fifo_error", + "bmb_wc5_cos_cnt_fifo_error", + "bmb_wc5_notify_fifo_error", + "bmb_wc5_ll_req_fifo_error", + "bmb_wc5_ll_pa_cnt_error", + "bmb_wc5_bb_pa_cnt_error", + "bmb_wc6_inp_fifo_error", + "bmb_wc6_sop_fifo_error", + "bmb_wc6_queue_fifo_error", + "bmb_wc6_free_point_fifo_error", + "bmb_wc6_next_point_fifo_error", + "bmb_wc6_strt_fifo_error", + "bmb_wc6_second_dscr_fifo_error", + "bmb_wc6_pkt_avail_fifo_error", + "bmb_wc6_cos_cnt_fifo_error", + "bmb_wc6_notify_fifo_error", + "bmb_wc6_ll_req_fifo_error", + "bmb_wc6_ll_pa_cnt_error", + "bmb_wc6_bb_pa_cnt_error", + "bmb_wc7_inp_fifo_error", + "bmb_wc7_sop_fifo_error", + "bmb_wc7_queue_fifo_error", + "bmb_wc7_free_point_fifo_error", + "bmb_wc7_next_point_fifo_error", + "bmb_wc7_strt_fifo_error", + "bmb_wc7_second_dscr_fifo_error", + "bmb_wc7_pkt_avail_fifo_error", + "bmb_wc7_cos_cnt_fifo_error", + "bmb_wc7_notify_fifo_error", + "bmb_wc7_ll_req_fifo_error", + "bmb_wc7_ll_pa_cnt_error", + "bmb_wc7_bb_pa_cnt_error", + "bmb_wc8_inp_fifo_error", + "bmb_wc8_sop_fifo_error", + "bmb_wc8_queue_fifo_error", + "bmb_wc8_free_point_fifo_error", + "bmb_wc8_next_point_fifo_error", + "bmb_wc8_strt_fifo_error", + "bmb_wc8_second_dscr_fifo_error", + "bmb_wc8_pkt_avail_fifo_error", + "bmb_wc8_cos_cnt_fifo_error", + "bmb_wc8_notify_fifo_error", + "bmb_wc8_ll_req_fifo_error", + "bmb_wc8_ll_pa_cnt_error", + "bmb_wc8_bb_pa_cnt_error", + "bmb_wc9_inp_fifo_error", + "bmb_wc9_sop_fifo_error", + "bmb_wc9_queue_fifo_error", + "bmb_wc9_free_point_fifo_error", + "bmb_wc9_next_point_fifo_error", + "bmb_wc9_strt_fifo_error", + "bmb_wc9_second_dscr_fifo_error", + "bmb_wc9_pkt_avail_fifo_error", + "bmb_wc9_cos_cnt_fifo_error", + "bmb_wc9_notify_fifo_error", + "bmb_wc9_ll_req_fifo_error", + "bmb_wc9_ll_pa_cnt_error", + "bmb_wc9_bb_pa_cnt_error", + "bmb_rc9_sop_rc_out_sync_fifo_error", + "bmb_rc9_sop_out_sync_fifo_push_error", + "bmb_rc0_sop_pend_fifo_error", + "bmb_rc1_sop_pend_fifo_error", + "bmb_rc2_sop_pend_fifo_error", + "bmb_rc3_sop_pend_fifo_error", + "bmb_rc4_sop_pend_fifo_error", + "bmb_rc5_sop_pend_fifo_error", + "bmb_rc6_sop_pend_fifo_error", + "bmb_rc7_sop_pend_fifo_error", + "bmb_rc0_dscr_pend_fifo_error", + "bmb_rc1_dscr_pend_fifo_error", + "bmb_rc2_dscr_pend_fifo_error", + "bmb_rc3_dscr_pend_fifo_error", + "bmb_rc4_dscr_pend_fifo_error", + "bmb_rc5_dscr_pend_fifo_error", + "bmb_rc6_dscr_pend_fifo_error", + "bmb_rc7_dscr_pend_fifo_error", + "bmb_rc8_sop_inp_sync_fifo_push_error", + "bmb_rc9_sop_inp_sync_fifo_push_error", + "bmb_rc8_sop_out_sync_fifo_push_error", + "bmb_rc_gnt_pend_fifo_error", + "bmb_rc8_out_sync_fifo_push_error", + "bmb_rc9_out_sync_fifo_push_error", + "bmb_wc8_sync_fifo_push_error", + "bmb_wc9_sync_fifo_push_error", + "bmb_rc8_sop_rc_out_sync_fifo_error", + "bmb_rc_pkt7_dscr_fifo_error", +}; +#else +#define bmb_int_attn_desc OSAL_NULL +#endif + +static const u16 bmb_int0_bb_a0_attn_idx[16] = { + 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22, +}; + +static struct attn_hw_reg bmb_int0_bb_a0 = { + 0, 16, bmb_int0_bb_a0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4 +}; + +static const u16 bmb_int1_bb_a0_attn_idx[28] = { + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, +}; + +static struct attn_hw_reg bmb_int1_bb_a0 = { + 1, 28, bmb_int1_bb_a0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc +}; + +static const u16 bmb_int2_bb_a0_attn_idx[26] = { + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, +}; + +static struct attn_hw_reg bmb_int2_bb_a0 = { + 2, 26, bmb_int2_bb_a0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4 +}; + +static const u16 bmb_int3_bb_a0_attn_idx[31] = { + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, +}; + +static struct attn_hw_reg bmb_int3_bb_a0 = { + 3, 31, bmb_int3_bb_a0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c +}; + +static const u16 bmb_int4_bb_a0_attn_idx[27] = { + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, +}; + +static struct attn_hw_reg bmb_int4_bb_a0 = { + 4, 27, bmb_int4_bb_a0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124 +}; + +static const u16 bmb_int5_bb_a0_attn_idx[29] = { + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, +}; + +static struct attn_hw_reg bmb_int5_bb_a0 = { + 5, 29, bmb_int5_bb_a0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c +}; + +static const u16 bmb_int6_bb_a0_attn_idx[30] = { + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, +}; + +static struct attn_hw_reg bmb_int6_bb_a0 = { + 6, 30, bmb_int6_bb_a0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154 +}; + +static const u16 bmb_int7_bb_a0_attn_idx[32] = { + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, + 225, +}; + +static struct attn_hw_reg bmb_int7_bb_a0 = { + 7, 32, bmb_int7_bb_a0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c +}; + +static const u16 bmb_int8_bb_a0_attn_idx[32] = { + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, + 257, +}; + +static struct attn_hw_reg bmb_int8_bb_a0 = { + 8, 32, bmb_int8_bb_a0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188 +}; + +static const u16 bmb_int9_bb_a0_attn_idx[32] = { + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, + 289, +}; + +static struct attn_hw_reg bmb_int9_bb_a0 = { + 9, 32, bmb_int9_bb_a0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0 +}; + +static const u16 bmb_int10_bb_a0_attn_idx[3] = { + 290, 291, 292, +}; + +static struct attn_hw_reg bmb_int10_bb_a0 = { + 10, 3, bmb_int10_bb_a0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8 +}; + +static const u16 bmb_int11_bb_a0_attn_idx[4] = { + 293, 294, 295, 296, +}; + +static struct attn_hw_reg bmb_int11_bb_a0 = { + 11, 4, bmb_int11_bb_a0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0 +}; + +static struct attn_hw_reg *bmb_int_bb_a0_regs[12] = { + &bmb_int0_bb_a0, &bmb_int1_bb_a0, &bmb_int2_bb_a0, &bmb_int3_bb_a0, + &bmb_int4_bb_a0, &bmb_int5_bb_a0, &bmb_int6_bb_a0, &bmb_int7_bb_a0, + &bmb_int8_bb_a0, &bmb_int9_bb_a0, + &bmb_int10_bb_a0, &bmb_int11_bb_a0, +}; + +static const u16 bmb_int0_bb_b0_attn_idx[16] = { + 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22, +}; + +static struct attn_hw_reg bmb_int0_bb_b0 = { + 0, 16, bmb_int0_bb_b0_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4 +}; + +static const u16 bmb_int1_bb_b0_attn_idx[28] = { + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, +}; + +static struct attn_hw_reg bmb_int1_bb_b0 = { + 1, 28, bmb_int1_bb_b0_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc +}; + +static const u16 bmb_int2_bb_b0_attn_idx[26] = { + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, +}; + +static struct attn_hw_reg bmb_int2_bb_b0 = { + 2, 26, bmb_int2_bb_b0_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4 +}; + +static const u16 bmb_int3_bb_b0_attn_idx[31] = { + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, +}; + +static struct attn_hw_reg bmb_int3_bb_b0 = { + 3, 31, bmb_int3_bb_b0_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c +}; + +static const u16 bmb_int4_bb_b0_attn_idx[27] = { + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, +}; + +static struct attn_hw_reg bmb_int4_bb_b0 = { + 4, 27, bmb_int4_bb_b0_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124 +}; + +static const u16 bmb_int5_bb_b0_attn_idx[29] = { + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, +}; + +static struct attn_hw_reg bmb_int5_bb_b0 = { + 5, 29, bmb_int5_bb_b0_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c +}; + +static const u16 bmb_int6_bb_b0_attn_idx[30] = { + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, +}; + +static struct attn_hw_reg bmb_int6_bb_b0 = { + 6, 30, bmb_int6_bb_b0_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154 +}; + +static const u16 bmb_int7_bb_b0_attn_idx[32] = { + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, + 225, +}; + +static struct attn_hw_reg bmb_int7_bb_b0 = { + 7, 32, bmb_int7_bb_b0_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c +}; + +static const u16 bmb_int8_bb_b0_attn_idx[32] = { + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, + 257, +}; + +static struct attn_hw_reg bmb_int8_bb_b0 = { + 8, 32, bmb_int8_bb_b0_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188 +}; + +static const u16 bmb_int9_bb_b0_attn_idx[32] = { + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, + 289, +}; + +static struct attn_hw_reg bmb_int9_bb_b0 = { + 9, 32, bmb_int9_bb_b0_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0 +}; + +static const u16 bmb_int10_bb_b0_attn_idx[3] = { + 290, 291, 292, +}; + +static struct attn_hw_reg bmb_int10_bb_b0 = { + 10, 3, bmb_int10_bb_b0_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8 +}; + +static const u16 bmb_int11_bb_b0_attn_idx[4] = { + 293, 294, 295, 296, +}; + +static struct attn_hw_reg bmb_int11_bb_b0 = { + 11, 4, bmb_int11_bb_b0_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0 +}; + +static struct attn_hw_reg *bmb_int_bb_b0_regs[12] = { + &bmb_int0_bb_b0, &bmb_int1_bb_b0, &bmb_int2_bb_b0, &bmb_int3_bb_b0, + &bmb_int4_bb_b0, &bmb_int5_bb_b0, &bmb_int6_bb_b0, &bmb_int7_bb_b0, + &bmb_int8_bb_b0, &bmb_int9_bb_b0, + &bmb_int10_bb_b0, &bmb_int11_bb_b0, +}; + +static const u16 bmb_int0_k2_attn_idx[16] = { + 0, 1, 3, 4, 6, 7, 9, 10, 12, 13, 15, 16, 17, 18, 20, 22, +}; + +static struct attn_hw_reg bmb_int0_k2 = { + 0, 16, bmb_int0_k2_attn_idx, 0x5400c0, 0x5400cc, 0x5400c8, 0x5400c4 +}; + +static const u16 bmb_int1_k2_attn_idx[28] = { + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, + 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, +}; + +static struct attn_hw_reg bmb_int1_k2 = { + 1, 28, bmb_int1_k2_attn_idx, 0x5400d8, 0x5400e4, 0x5400e0, 0x5400dc +}; + +static const u16 bmb_int2_k2_attn_idx[26] = { + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, + 69, 70, 71, 72, 73, 74, 75, 76, +}; + +static struct attn_hw_reg bmb_int2_k2 = { + 2, 26, bmb_int2_k2_attn_idx, 0x5400f0, 0x5400fc, 0x5400f8, 0x5400f4 +}; + +static const u16 bmb_int3_k2_attn_idx[31] = { + 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, + 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, +}; + +static struct attn_hw_reg bmb_int3_k2 = { + 3, 31, bmb_int3_k2_attn_idx, 0x540108, 0x540114, 0x540110, 0x54010c +}; + +static const u16 bmb_int4_k2_attn_idx[27] = { + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, +}; + +static struct attn_hw_reg bmb_int4_k2 = { + 4, 27, bmb_int4_k2_attn_idx, 0x540120, 0x54012c, 0x540128, 0x540124 +}; + +static const u16 bmb_int5_k2_attn_idx[29] = { + 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, + 149, + 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, +}; + +static struct attn_hw_reg bmb_int5_k2 = { + 5, 29, bmb_int5_k2_attn_idx, 0x540138, 0x540144, 0x540140, 0x54013c +}; + +static const u16 bmb_int6_k2_attn_idx[30] = { + 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, + 178, + 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, + 193, +}; + +static struct attn_hw_reg bmb_int6_k2 = { + 6, 30, bmb_int6_k2_attn_idx, 0x540150, 0x54015c, 0x540158, 0x540154 +}; + +static const u16 bmb_int7_k2_attn_idx[32] = { + 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, 205, 206, 207, + 208, + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, + 223, 224, + 225, +}; + +static struct attn_hw_reg bmb_int7_k2 = { + 7, 32, bmb_int7_k2_attn_idx, 0x540168, 0x540174, 0x540170, 0x54016c +}; + +static const u16 bmb_int8_k2_attn_idx[32] = { + 226, 227, 228, 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, + 240, + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, + 255, 256, + 257, +}; + +static struct attn_hw_reg bmb_int8_k2 = { + 8, 32, bmb_int8_k2_attn_idx, 0x540184, 0x540190, 0x54018c, 0x540188 +}; + +static const u16 bmb_int9_k2_attn_idx[32] = { + 258, 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, + 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288, + 289, +}; + +static struct attn_hw_reg bmb_int9_k2 = { + 9, 32, bmb_int9_k2_attn_idx, 0x54019c, 0x5401a8, 0x5401a4, 0x5401a0 +}; + +static const u16 bmb_int10_k2_attn_idx[3] = { + 290, 291, 292, +}; + +static struct attn_hw_reg bmb_int10_k2 = { + 10, 3, bmb_int10_k2_attn_idx, 0x5401b4, 0x5401c0, 0x5401bc, 0x5401b8 +}; + +static const u16 bmb_int11_k2_attn_idx[4] = { + 293, 294, 295, 296, +}; + +static struct attn_hw_reg bmb_int11_k2 = { + 11, 4, bmb_int11_k2_attn_idx, 0x5401cc, 0x5401d8, 0x5401d4, 0x5401d0 +}; + +static struct attn_hw_reg *bmb_int_k2_regs[12] = { + &bmb_int0_k2, &bmb_int1_k2, &bmb_int2_k2, &bmb_int3_k2, &bmb_int4_k2, + &bmb_int5_k2, &bmb_int6_k2, &bmb_int7_k2, &bmb_int8_k2, &bmb_int9_k2, + &bmb_int10_k2, &bmb_int11_k2, +}; + +#ifdef ATTN_DESC +static const char *bmb_prty_attn_desc[61] = { + "bmb_ll_bank0_mem_prty", + "bmb_ll_bank1_mem_prty", + "bmb_ll_bank2_mem_prty", + "bmb_ll_bank3_mem_prty", + "bmb_datapath_registers", + "bmb_mem001_i_ecc_rf_int", + "bmb_mem008_i_ecc_rf_int", + "bmb_mem009_i_ecc_rf_int", + "bmb_mem010_i_ecc_rf_int", + "bmb_mem011_i_ecc_rf_int", + "bmb_mem012_i_ecc_rf_int", + "bmb_mem013_i_ecc_rf_int", + "bmb_mem014_i_ecc_rf_int", + "bmb_mem015_i_ecc_rf_int", + "bmb_mem016_i_ecc_rf_int", + "bmb_mem002_i_ecc_rf_int", + "bmb_mem003_i_ecc_rf_int", + "bmb_mem004_i_ecc_rf_int", + "bmb_mem005_i_ecc_rf_int", + "bmb_mem006_i_ecc_rf_int", + "bmb_mem007_i_ecc_rf_int", + "bmb_mem059_i_mem_prty", + "bmb_mem060_i_mem_prty", + "bmb_mem037_i_mem_prty", + "bmb_mem038_i_mem_prty", + "bmb_mem039_i_mem_prty", + "bmb_mem040_i_mem_prty", + "bmb_mem041_i_mem_prty", + "bmb_mem042_i_mem_prty", + "bmb_mem043_i_mem_prty", + "bmb_mem044_i_mem_prty", + "bmb_mem045_i_mem_prty", + "bmb_mem046_i_mem_prty", + "bmb_mem047_i_mem_prty", + "bmb_mem048_i_mem_prty", + "bmb_mem049_i_mem_prty", + "bmb_mem050_i_mem_prty", + "bmb_mem051_i_mem_prty", + "bmb_mem052_i_mem_prty", + "bmb_mem053_i_mem_prty", + "bmb_mem054_i_mem_prty", + "bmb_mem055_i_mem_prty", + "bmb_mem056_i_mem_prty", + "bmb_mem057_i_mem_prty", + "bmb_mem058_i_mem_prty", + "bmb_mem033_i_mem_prty", + "bmb_mem034_i_mem_prty", + "bmb_mem035_i_mem_prty", + "bmb_mem036_i_mem_prty", + "bmb_mem021_i_mem_prty", + "bmb_mem022_i_mem_prty", + "bmb_mem023_i_mem_prty", + "bmb_mem024_i_mem_prty", + "bmb_mem025_i_mem_prty", + "bmb_mem026_i_mem_prty", + "bmb_mem027_i_mem_prty", + "bmb_mem028_i_mem_prty", + "bmb_mem029_i_mem_prty", + "bmb_mem030_i_mem_prty", + "bmb_mem031_i_mem_prty", + "bmb_mem032_i_mem_prty", +}; +#else +#define bmb_prty_attn_desc OSAL_NULL +#endif + +static const u16 bmb_prty1_bb_a0_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg bmb_prty1_bb_a0 = { + 0, 31, bmb_prty1_bb_a0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404 +}; + +static const u16 bmb_prty2_bb_a0_attn_idx[25] = { + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, + 54, 55, 56, 57, 58, 59, 60, +}; + +static struct attn_hw_reg bmb_prty2_bb_a0 = { + 1, 25, bmb_prty2_bb_a0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414 +}; + +static struct attn_hw_reg *bmb_prty_bb_a0_regs[2] = { + &bmb_prty1_bb_a0, &bmb_prty2_bb_a0, +}; + +static const u16 bmb_prty0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg bmb_prty0_bb_b0 = { + 0, 5, bmb_prty0_bb_b0_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0 +}; + +static const u16 bmb_prty1_bb_b0_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg bmb_prty1_bb_b0 = { + 1, 31, bmb_prty1_bb_b0_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404 +}; + +static const u16 bmb_prty2_bb_b0_attn_idx[15] = { + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, +}; + +static struct attn_hw_reg bmb_prty2_bb_b0 = { + 2, 15, bmb_prty2_bb_b0_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414 +}; + +static struct attn_hw_reg *bmb_prty_bb_b0_regs[3] = { + &bmb_prty0_bb_b0, &bmb_prty1_bb_b0, &bmb_prty2_bb_b0, +}; + +static const u16 bmb_prty0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg bmb_prty0_k2 = { + 0, 5, bmb_prty0_k2_attn_idx, 0x5401dc, 0x5401e8, 0x5401e4, 0x5401e0 +}; + +static const u16 bmb_prty1_k2_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg bmb_prty1_k2 = { + 1, 31, bmb_prty1_k2_attn_idx, 0x540400, 0x54040c, 0x540408, 0x540404 +}; + +static const u16 bmb_prty2_k2_attn_idx[15] = { + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, +}; + +static struct attn_hw_reg bmb_prty2_k2 = { + 2, 15, bmb_prty2_k2_attn_idx, 0x540410, 0x54041c, 0x540418, 0x540414 +}; + +static struct attn_hw_reg *bmb_prty_k2_regs[3] = { + &bmb_prty0_k2, &bmb_prty1_k2, &bmb_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *pcie_int_attn_desc[17] = { + "pcie_address_error", + "pcie_link_down_detect", + "pcie_link_up_detect", + "pcie_cfg_link_eq_req_int", + "pcie_pcie_bandwidth_change_detect", + "pcie_early_hot_reset_detect", + "pcie_hot_reset_detect", + "pcie_l1_entry_detect", + "pcie_l1_exit_detect", + "pcie_ltssm_state_match_detect", + "pcie_fc_timeout_detect", + "pcie_pme_turnoff_message_detect", + "pcie_cfg_send_cor_err", + "pcie_cfg_send_nf_err", + "pcie_cfg_send_f_err", + "pcie_qoverflow_detect", + "pcie_vdm_detect", +}; +#else +#define pcie_int_attn_desc OSAL_NULL +#endif + +static const u16 pcie_int0_k2_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg pcie_int0_k2 = { + 0, 17, pcie_int0_k2_attn_idx, 0x547a0, 0x547ac, 0x547a8, 0x547a4 +}; + +static struct attn_hw_reg *pcie_int_k2_regs[1] = { + &pcie_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pcie_prty_attn_desc[24] = { + "pcie_mem003_i_ecc_rf_int", + "pcie_mem004_i_ecc_rf_int", + "pcie_mem008_i_mem_prty", + "pcie_mem007_i_mem_prty", + "pcie_mem005_i_mem_prty", + "pcie_mem006_i_mem_prty", + "pcie_mem001_i_mem_prty", + "pcie_mem002_i_mem_prty", + "pcie_mem001_i_ecc_rf_int", + "pcie_mem005_i_ecc_rf_int", + "pcie_mem010_i_ecc_rf_int", + "pcie_mem009_i_ecc_rf_int", + "pcie_mem007_i_ecc_rf_int", + "pcie_mem004_i_mem_prty_0", + "pcie_mem004_i_mem_prty_1", + "pcie_mem004_i_mem_prty_2", + "pcie_mem004_i_mem_prty_3", + "pcie_mem011_i_mem_prty_1", + "pcie_mem011_i_mem_prty_2", + "pcie_mem012_i_mem_prty_1", + "pcie_mem012_i_mem_prty_2", + "pcie_app_parity_errs_0", + "pcie_app_parity_errs_1", + "pcie_app_parity_errs_2", +}; +#else +#define pcie_prty_attn_desc OSAL_NULL +#endif + +static const u16 pcie_prty1_bb_a0_attn_idx[17] = { + 0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, +}; + +static struct attn_hw_reg pcie_prty1_bb_a0 = { + 0, 17, pcie_prty1_bb_a0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004 +}; + +static struct attn_hw_reg *pcie_prty_bb_a0_regs[1] = { + &pcie_prty1_bb_a0, +}; + +static const u16 pcie_prty1_bb_b0_attn_idx[17] = { + 0, 2, 5, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, +}; + +static struct attn_hw_reg pcie_prty1_bb_b0 = { + 0, 17, pcie_prty1_bb_b0_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004 +}; + +static struct attn_hw_reg *pcie_prty_bb_b0_regs[1] = { + &pcie_prty1_bb_b0, +}; + +static const u16 pcie_prty1_k2_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg pcie_prty1_k2 = { + 0, 8, pcie_prty1_k2_attn_idx, 0x54000, 0x5400c, 0x54008, 0x54004 +}; + +static const u16 pcie_prty0_k2_attn_idx[3] = { + 21, 22, 23, +}; + +static struct attn_hw_reg pcie_prty0_k2 = { + 1, 3, pcie_prty0_k2_attn_idx, 0x547b0, 0x547bc, 0x547b8, 0x547b4 +}; + +static struct attn_hw_reg *pcie_prty_k2_regs[2] = { + &pcie_prty1_k2, &pcie_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *mcp2_prty_attn_desc[13] = { + "mcp2_rom_parity", + "mcp2_mem001_i_ecc_rf_int", + "mcp2_mem006_i_ecc_0_rf_int", + "mcp2_mem006_i_ecc_1_rf_int", + "mcp2_mem006_i_ecc_2_rf_int", + "mcp2_mem006_i_ecc_3_rf_int", + "mcp2_mem007_i_ecc_rf_int", + "mcp2_mem004_i_mem_prty", + "mcp2_mem003_i_mem_prty", + "mcp2_mem002_i_mem_prty", + "mcp2_mem009_i_mem_prty", + "mcp2_mem008_i_mem_prty", + "mcp2_mem005_i_mem_prty", +}; +#else +#define mcp2_prty_attn_desc OSAL_NULL +#endif + +static const u16 mcp2_prty0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg mcp2_prty0_bb_a0 = { + 0, 1, mcp2_prty0_bb_a0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044 +}; + +static const u16 mcp2_prty1_bb_a0_attn_idx[12] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg mcp2_prty1_bb_a0 = { + 1, 12, mcp2_prty1_bb_a0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208 +}; + +static struct attn_hw_reg *mcp2_prty_bb_a0_regs[2] = { + &mcp2_prty0_bb_a0, &mcp2_prty1_bb_a0, +}; + +static const u16 mcp2_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg mcp2_prty0_bb_b0 = { + 0, 1, mcp2_prty0_bb_b0_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044 +}; + +static const u16 mcp2_prty1_bb_b0_attn_idx[12] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg mcp2_prty1_bb_b0 = { + 1, 12, mcp2_prty1_bb_b0_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208 +}; + +static struct attn_hw_reg *mcp2_prty_bb_b0_regs[2] = { + &mcp2_prty0_bb_b0, &mcp2_prty1_bb_b0, +}; + +static const u16 mcp2_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg mcp2_prty0_k2 = { + 0, 1, mcp2_prty0_k2_attn_idx, 0x52040, 0x5204c, 0x52048, 0x52044 +}; + +static const u16 mcp2_prty1_k2_attn_idx[12] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg mcp2_prty1_k2 = { + 1, 12, mcp2_prty1_k2_attn_idx, 0x52204, 0x52210, 0x5220c, 0x52208 +}; + +static struct attn_hw_reg *mcp2_prty_k2_regs[2] = { + &mcp2_prty0_k2, &mcp2_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *pswhst_int_attn_desc[18] = { + "pswhst_address_error", + "pswhst_hst_src_fifo1_err", + "pswhst_hst_src_fifo2_err", + "pswhst_hst_src_fifo3_err", + "pswhst_hst_src_fifo4_err", + "pswhst_hst_src_fifo5_err", + "pswhst_hst_hdr_sync_fifo_err", + "pswhst_hst_data_sync_fifo_err", + "pswhst_hst_cpl_sync_fifo_err", + "pswhst_hst_vf_disabled_access", + "pswhst_hst_permission_violation", + "pswhst_hst_incorrect_access", + "pswhst_hst_src_fifo6_err", + "pswhst_hst_src_fifo7_err", + "pswhst_hst_src_fifo8_err", + "pswhst_hst_src_fifo9_err", + "pswhst_hst_source_credit_violation", + "pswhst_hst_timeout", +}; +#else +#define pswhst_int_attn_desc OSAL_NULL +#endif + +static const u16 pswhst_int0_bb_a0_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_int0_bb_a0 = { + 0, 18, pswhst_int0_bb_a0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188, + 0x2a0184 +}; + +static struct attn_hw_reg *pswhst_int_bb_a0_regs[1] = { + &pswhst_int0_bb_a0, +}; + +static const u16 pswhst_int0_bb_b0_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_int0_bb_b0 = { + 0, 18, pswhst_int0_bb_b0_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188, + 0x2a0184 +}; + +static struct attn_hw_reg *pswhst_int_bb_b0_regs[1] = { + &pswhst_int0_bb_b0, +}; + +static const u16 pswhst_int0_k2_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_int0_k2 = { + 0, 18, pswhst_int0_k2_attn_idx, 0x2a0180, 0x2a018c, 0x2a0188, 0x2a0184 +}; + +static struct attn_hw_reg *pswhst_int_k2_regs[1] = { + &pswhst_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswhst_prty_attn_desc[18] = { + "pswhst_datapath_registers", + "pswhst_mem006_i_mem_prty", + "pswhst_mem007_i_mem_prty", + "pswhst_mem005_i_mem_prty", + "pswhst_mem002_i_mem_prty", + "pswhst_mem003_i_mem_prty", + "pswhst_mem001_i_mem_prty", + "pswhst_mem008_i_mem_prty", + "pswhst_mem004_i_mem_prty", + "pswhst_mem009_i_mem_prty", + "pswhst_mem010_i_mem_prty", + "pswhst_mem016_i_mem_prty", + "pswhst_mem012_i_mem_prty", + "pswhst_mem013_i_mem_prty", + "pswhst_mem014_i_mem_prty", + "pswhst_mem015_i_mem_prty", + "pswhst_mem011_i_mem_prty", + "pswhst_mem017_i_mem_prty", +}; +#else +#define pswhst_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswhst_prty1_bb_a0_attn_idx[17] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_prty1_bb_a0 = { + 0, 17, pswhst_prty1_bb_a0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208, + 0x2a0204 +}; + +static struct attn_hw_reg *pswhst_prty_bb_a0_regs[1] = { + &pswhst_prty1_bb_a0, +}; + +static const u16 pswhst_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswhst_prty0_bb_b0 = { + 0, 1, pswhst_prty0_bb_b0_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198, + 0x2a0194 +}; + +static const u16 pswhst_prty1_bb_b0_attn_idx[17] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_prty1_bb_b0 = { + 1, 17, pswhst_prty1_bb_b0_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208, + 0x2a0204 +}; + +static struct attn_hw_reg *pswhst_prty_bb_b0_regs[2] = { + &pswhst_prty0_bb_b0, &pswhst_prty1_bb_b0, +}; + +static const u16 pswhst_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswhst_prty0_k2 = { + 0, 1, pswhst_prty0_k2_attn_idx, 0x2a0190, 0x2a019c, 0x2a0198, 0x2a0194 +}; + +static const u16 pswhst_prty1_k2_attn_idx[17] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pswhst_prty1_k2 = { + 1, 17, pswhst_prty1_k2_attn_idx, 0x2a0200, 0x2a020c, 0x2a0208, 0x2a0204 +}; + +static struct attn_hw_reg *pswhst_prty_k2_regs[2] = { + &pswhst_prty0_k2, &pswhst_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *pswhst2_int_attn_desc[5] = { + "pswhst2_address_error", + "pswhst2_hst_header_fifo_err", + "pswhst2_hst_data_fifo_err", + "pswhst2_hst_cpl_fifo_err", + "pswhst2_hst_ireq_fifo_err", +}; +#else +#define pswhst2_int_attn_desc OSAL_NULL +#endif + +static const u16 pswhst2_int0_bb_a0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswhst2_int0_bb_a0 = { + 0, 5, pswhst2_int0_bb_a0_attn_idx, 0x29e180, 0x29e18c, 0x29e188, + 0x29e184 +}; + +static struct attn_hw_reg *pswhst2_int_bb_a0_regs[1] = { + &pswhst2_int0_bb_a0, +}; + +static const u16 pswhst2_int0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswhst2_int0_bb_b0 = { + 0, 5, pswhst2_int0_bb_b0_attn_idx, 0x29e180, 0x29e18c, 0x29e188, + 0x29e184 +}; + +static struct attn_hw_reg *pswhst2_int_bb_b0_regs[1] = { + &pswhst2_int0_bb_b0, +}; + +static const u16 pswhst2_int0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswhst2_int0_k2 = { + 0, 5, pswhst2_int0_k2_attn_idx, 0x29e180, 0x29e18c, 0x29e188, 0x29e184 +}; + +static struct attn_hw_reg *pswhst2_int_k2_regs[1] = { + &pswhst2_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswhst2_prty_attn_desc[1] = { + "pswhst2_datapath_registers", +}; +#else +#define pswhst2_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswhst2_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswhst2_prty0_bb_b0 = { + 0, 1, pswhst2_prty0_bb_b0_attn_idx, 0x29e190, 0x29e19c, 0x29e198, + 0x29e194 +}; + +static struct attn_hw_reg *pswhst2_prty_bb_b0_regs[1] = { + &pswhst2_prty0_bb_b0, +}; + +static const u16 pswhst2_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswhst2_prty0_k2 = { + 0, 1, pswhst2_prty0_k2_attn_idx, 0x29e190, 0x29e19c, 0x29e198, 0x29e194 +}; + +static struct attn_hw_reg *pswhst2_prty_k2_regs[1] = { + &pswhst2_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrd_int_attn_desc[3] = { + "pswrd_address_error", + "pswrd_pop_error", + "pswrd_pop_pbf_error", +}; +#else +#define pswrd_int_attn_desc OSAL_NULL +#endif + +static const u16 pswrd_int0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg pswrd_int0_bb_a0 = { + 0, 3, pswrd_int0_bb_a0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184 +}; + +static struct attn_hw_reg *pswrd_int_bb_a0_regs[1] = { + &pswrd_int0_bb_a0, +}; + +static const u16 pswrd_int0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg pswrd_int0_bb_b0 = { + 0, 3, pswrd_int0_bb_b0_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184 +}; + +static struct attn_hw_reg *pswrd_int_bb_b0_regs[1] = { + &pswrd_int0_bb_b0, +}; + +static const u16 pswrd_int0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg pswrd_int0_k2 = { + 0, 3, pswrd_int0_k2_attn_idx, 0x29c180, 0x29c18c, 0x29c188, 0x29c184 +}; + +static struct attn_hw_reg *pswrd_int_k2_regs[1] = { + &pswrd_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrd_prty_attn_desc[1] = { + "pswrd_datapath_registers", +}; +#else +#define pswrd_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswrd_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrd_prty0_bb_b0 = { + 0, 1, pswrd_prty0_bb_b0_attn_idx, 0x29c190, 0x29c19c, 0x29c198, + 0x29c194 +}; + +static struct attn_hw_reg *pswrd_prty_bb_b0_regs[1] = { + &pswrd_prty0_bb_b0, +}; + +static const u16 pswrd_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrd_prty0_k2 = { + 0, 1, pswrd_prty0_k2_attn_idx, 0x29c190, 0x29c19c, 0x29c198, 0x29c194 +}; + +static struct attn_hw_reg *pswrd_prty_k2_regs[1] = { + &pswrd_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrd2_int_attn_desc[5] = { + "pswrd2_address_error", + "pswrd2_sr_fifo_error", + "pswrd2_blk_fifo_error", + "pswrd2_push_error", + "pswrd2_push_pbf_error", +}; +#else +#define pswrd2_int_attn_desc OSAL_NULL +#endif + +static const u16 pswrd2_int0_bb_a0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswrd2_int0_bb_a0 = { + 0, 5, pswrd2_int0_bb_a0_attn_idx, 0x29d180, 0x29d18c, 0x29d188, + 0x29d184 +}; + +static struct attn_hw_reg *pswrd2_int_bb_a0_regs[1] = { + &pswrd2_int0_bb_a0, +}; + +static const u16 pswrd2_int0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswrd2_int0_bb_b0 = { + 0, 5, pswrd2_int0_bb_b0_attn_idx, 0x29d180, 0x29d18c, 0x29d188, + 0x29d184 +}; + +static struct attn_hw_reg *pswrd2_int_bb_b0_regs[1] = { + &pswrd2_int0_bb_b0, +}; + +static const u16 pswrd2_int0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pswrd2_int0_k2 = { + 0, 5, pswrd2_int0_k2_attn_idx, 0x29d180, 0x29d18c, 0x29d188, 0x29d184 +}; + +static struct attn_hw_reg *pswrd2_int_k2_regs[1] = { + &pswrd2_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrd2_prty_attn_desc[36] = { + "pswrd2_datapath_registers", + "pswrd2_mem017_i_ecc_rf_int", + "pswrd2_mem018_i_ecc_rf_int", + "pswrd2_mem019_i_ecc_rf_int", + "pswrd2_mem020_i_ecc_rf_int", + "pswrd2_mem021_i_ecc_rf_int", + "pswrd2_mem022_i_ecc_rf_int", + "pswrd2_mem023_i_ecc_rf_int", + "pswrd2_mem024_i_ecc_rf_int", + "pswrd2_mem025_i_ecc_rf_int", + "pswrd2_mem015_i_ecc_rf_int", + "pswrd2_mem034_i_mem_prty", + "pswrd2_mem032_i_mem_prty", + "pswrd2_mem028_i_mem_prty", + "pswrd2_mem033_i_mem_prty", + "pswrd2_mem030_i_mem_prty", + "pswrd2_mem029_i_mem_prty", + "pswrd2_mem031_i_mem_prty", + "pswrd2_mem027_i_mem_prty", + "pswrd2_mem026_i_mem_prty", + "pswrd2_mem001_i_mem_prty", + "pswrd2_mem007_i_mem_prty", + "pswrd2_mem008_i_mem_prty", + "pswrd2_mem009_i_mem_prty", + "pswrd2_mem010_i_mem_prty", + "pswrd2_mem011_i_mem_prty", + "pswrd2_mem012_i_mem_prty", + "pswrd2_mem013_i_mem_prty", + "pswrd2_mem014_i_mem_prty", + "pswrd2_mem002_i_mem_prty", + "pswrd2_mem003_i_mem_prty", + "pswrd2_mem004_i_mem_prty", + "pswrd2_mem005_i_mem_prty", + "pswrd2_mem006_i_mem_prty", + "pswrd2_mem016_i_mem_prty", + "pswrd2_mem015_i_mem_prty", +}; +#else +#define pswrd2_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswrd2_prty1_bb_a0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, + 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, +}; + +static struct attn_hw_reg pswrd2_prty1_bb_a0 = { + 0, 31, pswrd2_prty1_bb_a0_attn_idx, 0x29d200, 0x29d20c, 0x29d208, + 0x29d204 +}; + +static const u16 pswrd2_prty2_bb_a0_attn_idx[3] = { + 33, 34, 35, +}; + +static struct attn_hw_reg pswrd2_prty2_bb_a0 = { + 1, 3, pswrd2_prty2_bb_a0_attn_idx, 0x29d210, 0x29d21c, 0x29d218, + 0x29d214 +}; + +static struct attn_hw_reg *pswrd2_prty_bb_a0_regs[2] = { + &pswrd2_prty1_bb_a0, &pswrd2_prty2_bb_a0, +}; + +static const u16 pswrd2_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrd2_prty0_bb_b0 = { + 0, 1, pswrd2_prty0_bb_b0_attn_idx, 0x29d190, 0x29d19c, 0x29d198, + 0x29d194 +}; + +static const u16 pswrd2_prty1_bb_b0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pswrd2_prty1_bb_b0 = { + 1, 31, pswrd2_prty1_bb_b0_attn_idx, 0x29d200, 0x29d20c, 0x29d208, + 0x29d204 +}; + +static const u16 pswrd2_prty2_bb_b0_attn_idx[3] = { + 32, 33, 34, +}; + +static struct attn_hw_reg pswrd2_prty2_bb_b0 = { + 2, 3, pswrd2_prty2_bb_b0_attn_idx, 0x29d210, 0x29d21c, 0x29d218, + 0x29d214 +}; + +static struct attn_hw_reg *pswrd2_prty_bb_b0_regs[3] = { + &pswrd2_prty0_bb_b0, &pswrd2_prty1_bb_b0, &pswrd2_prty2_bb_b0, +}; + +static const u16 pswrd2_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrd2_prty0_k2 = { + 0, 1, pswrd2_prty0_k2_attn_idx, 0x29d190, 0x29d19c, 0x29d198, 0x29d194 +}; + +static const u16 pswrd2_prty1_k2_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pswrd2_prty1_k2 = { + 1, 31, pswrd2_prty1_k2_attn_idx, 0x29d200, 0x29d20c, 0x29d208, 0x29d204 +}; + +static const u16 pswrd2_prty2_k2_attn_idx[3] = { + 32, 33, 34, +}; + +static struct attn_hw_reg pswrd2_prty2_k2 = { + 2, 3, pswrd2_prty2_k2_attn_idx, 0x29d210, 0x29d21c, 0x29d218, 0x29d214 +}; + +static struct attn_hw_reg *pswrd2_prty_k2_regs[3] = { + &pswrd2_prty0_k2, &pswrd2_prty1_k2, &pswrd2_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *pswwr_int_attn_desc[16] = { + "pswwr_address_error", + "pswwr_src_fifo_overflow", + "pswwr_qm_fifo_overflow", + "pswwr_tm_fifo_overflow", + "pswwr_usdm_fifo_overflow", + "pswwr_usdmdp_fifo_overflow", + "pswwr_xsdm_fifo_overflow", + "pswwr_tsdm_fifo_overflow", + "pswwr_cduwr_fifo_overflow", + "pswwr_dbg_fifo_overflow", + "pswwr_dmae_fifo_overflow", + "pswwr_hc_fifo_overflow", + "pswwr_msdm_fifo_overflow", + "pswwr_ysdm_fifo_overflow", + "pswwr_psdm_fifo_overflow", + "pswwr_m2p_fifo_overflow", +}; +#else +#define pswwr_int_attn_desc OSAL_NULL +#endif + +static const u16 pswwr_int0_bb_a0_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg pswwr_int0_bb_a0 = { + 0, 16, pswwr_int0_bb_a0_attn_idx, 0x29a180, 0x29a18c, 0x29a188, + 0x29a184 +}; + +static struct attn_hw_reg *pswwr_int_bb_a0_regs[1] = { + &pswwr_int0_bb_a0, +}; + +static const u16 pswwr_int0_bb_b0_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg pswwr_int0_bb_b0 = { + 0, 16, pswwr_int0_bb_b0_attn_idx, 0x29a180, 0x29a18c, 0x29a188, + 0x29a184 +}; + +static struct attn_hw_reg *pswwr_int_bb_b0_regs[1] = { + &pswwr_int0_bb_b0, +}; + +static const u16 pswwr_int0_k2_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg pswwr_int0_k2 = { + 0, 16, pswwr_int0_k2_attn_idx, 0x29a180, 0x29a18c, 0x29a188, 0x29a184 +}; + +static struct attn_hw_reg *pswwr_int_k2_regs[1] = { + &pswwr_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswwr_prty_attn_desc[1] = { + "pswwr_datapath_registers", +}; +#else +#define pswwr_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswwr_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswwr_prty0_bb_b0 = { + 0, 1, pswwr_prty0_bb_b0_attn_idx, 0x29a190, 0x29a19c, 0x29a198, + 0x29a194 +}; + +static struct attn_hw_reg *pswwr_prty_bb_b0_regs[1] = { + &pswwr_prty0_bb_b0, +}; + +static const u16 pswwr_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswwr_prty0_k2 = { + 0, 1, pswwr_prty0_k2_attn_idx, 0x29a190, 0x29a19c, 0x29a198, 0x29a194 +}; + +static struct attn_hw_reg *pswwr_prty_k2_regs[1] = { + &pswwr_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswwr2_int_attn_desc[19] = { + "pswwr2_address_error", + "pswwr2_pglue_eop_error", + "pswwr2_pglue_lsr_error", + "pswwr2_tm_underflow", + "pswwr2_qm_underflow", + "pswwr2_src_underflow", + "pswwr2_usdm_underflow", + "pswwr2_tsdm_underflow", + "pswwr2_xsdm_underflow", + "pswwr2_usdmdp_underflow", + "pswwr2_cdu_underflow", + "pswwr2_dbg_underflow", + "pswwr2_dmae_underflow", + "pswwr2_hc_underflow", + "pswwr2_msdm_underflow", + "pswwr2_ysdm_underflow", + "pswwr2_psdm_underflow", + "pswwr2_m2p_underflow", + "pswwr2_pglue_eop_error_in_line", +}; +#else +#define pswwr2_int_attn_desc OSAL_NULL +#endif + +static const u16 pswwr2_int0_bb_a0_attn_idx[19] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pswwr2_int0_bb_a0 = { + 0, 19, pswwr2_int0_bb_a0_attn_idx, 0x29b180, 0x29b18c, 0x29b188, + 0x29b184 +}; + +static struct attn_hw_reg *pswwr2_int_bb_a0_regs[1] = { + &pswwr2_int0_bb_a0, +}; + +static const u16 pswwr2_int0_bb_b0_attn_idx[19] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pswwr2_int0_bb_b0 = { + 0, 19, pswwr2_int0_bb_b0_attn_idx, 0x29b180, 0x29b18c, 0x29b188, + 0x29b184 +}; + +static struct attn_hw_reg *pswwr2_int_bb_b0_regs[1] = { + &pswwr2_int0_bb_b0, +}; + +static const u16 pswwr2_int0_k2_attn_idx[19] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pswwr2_int0_k2 = { + 0, 19, pswwr2_int0_k2_attn_idx, 0x29b180, 0x29b18c, 0x29b188, 0x29b184 +}; + +static struct attn_hw_reg *pswwr2_int_k2_regs[1] = { + &pswwr2_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswwr2_prty_attn_desc[114] = { + "pswwr2_datapath_registers", + "pswwr2_mem008_i_ecc_rf_int", + "pswwr2_mem001_i_mem_prty", + "pswwr2_mem014_i_mem_prty_0", + "pswwr2_mem014_i_mem_prty_1", + "pswwr2_mem014_i_mem_prty_2", + "pswwr2_mem014_i_mem_prty_3", + "pswwr2_mem014_i_mem_prty_4", + "pswwr2_mem014_i_mem_prty_5", + "pswwr2_mem014_i_mem_prty_6", + "pswwr2_mem014_i_mem_prty_7", + "pswwr2_mem014_i_mem_prty_8", + "pswwr2_mem016_i_mem_prty_0", + "pswwr2_mem016_i_mem_prty_1", + "pswwr2_mem016_i_mem_prty_2", + "pswwr2_mem016_i_mem_prty_3", + "pswwr2_mem016_i_mem_prty_4", + "pswwr2_mem016_i_mem_prty_5", + "pswwr2_mem016_i_mem_prty_6", + "pswwr2_mem016_i_mem_prty_7", + "pswwr2_mem016_i_mem_prty_8", + "pswwr2_mem007_i_mem_prty_0", + "pswwr2_mem007_i_mem_prty_1", + "pswwr2_mem007_i_mem_prty_2", + "pswwr2_mem007_i_mem_prty_3", + "pswwr2_mem007_i_mem_prty_4", + "pswwr2_mem007_i_mem_prty_5", + "pswwr2_mem007_i_mem_prty_6", + "pswwr2_mem007_i_mem_prty_7", + "pswwr2_mem007_i_mem_prty_8", + "pswwr2_mem017_i_mem_prty_0", + "pswwr2_mem017_i_mem_prty_1", + "pswwr2_mem017_i_mem_prty_2", + "pswwr2_mem017_i_mem_prty_3", + "pswwr2_mem017_i_mem_prty_4", + "pswwr2_mem017_i_mem_prty_5", + "pswwr2_mem017_i_mem_prty_6", + "pswwr2_mem017_i_mem_prty_7", + "pswwr2_mem017_i_mem_prty_8", + "pswwr2_mem009_i_mem_prty_0", + "pswwr2_mem009_i_mem_prty_1", + "pswwr2_mem009_i_mem_prty_2", + "pswwr2_mem009_i_mem_prty_3", + "pswwr2_mem009_i_mem_prty_4", + "pswwr2_mem009_i_mem_prty_5", + "pswwr2_mem009_i_mem_prty_6", + "pswwr2_mem009_i_mem_prty_7", + "pswwr2_mem009_i_mem_prty_8", + "pswwr2_mem013_i_mem_prty_0", + "pswwr2_mem013_i_mem_prty_1", + "pswwr2_mem013_i_mem_prty_2", + "pswwr2_mem013_i_mem_prty_3", + "pswwr2_mem013_i_mem_prty_4", + "pswwr2_mem013_i_mem_prty_5", + "pswwr2_mem013_i_mem_prty_6", + "pswwr2_mem013_i_mem_prty_7", + "pswwr2_mem013_i_mem_prty_8", + "pswwr2_mem006_i_mem_prty_0", + "pswwr2_mem006_i_mem_prty_1", + "pswwr2_mem006_i_mem_prty_2", + "pswwr2_mem006_i_mem_prty_3", + "pswwr2_mem006_i_mem_prty_4", + "pswwr2_mem006_i_mem_prty_5", + "pswwr2_mem006_i_mem_prty_6", + "pswwr2_mem006_i_mem_prty_7", + "pswwr2_mem006_i_mem_prty_8", + "pswwr2_mem010_i_mem_prty_0", + "pswwr2_mem010_i_mem_prty_1", + "pswwr2_mem010_i_mem_prty_2", + "pswwr2_mem010_i_mem_prty_3", + "pswwr2_mem010_i_mem_prty_4", + "pswwr2_mem010_i_mem_prty_5", + "pswwr2_mem010_i_mem_prty_6", + "pswwr2_mem010_i_mem_prty_7", + "pswwr2_mem010_i_mem_prty_8", + "pswwr2_mem012_i_mem_prty", + "pswwr2_mem011_i_mem_prty_0", + "pswwr2_mem011_i_mem_prty_1", + "pswwr2_mem011_i_mem_prty_2", + "pswwr2_mem011_i_mem_prty_3", + "pswwr2_mem011_i_mem_prty_4", + "pswwr2_mem011_i_mem_prty_5", + "pswwr2_mem011_i_mem_prty_6", + "pswwr2_mem011_i_mem_prty_7", + "pswwr2_mem011_i_mem_prty_8", + "pswwr2_mem004_i_mem_prty_0", + "pswwr2_mem004_i_mem_prty_1", + "pswwr2_mem004_i_mem_prty_2", + "pswwr2_mem004_i_mem_prty_3", + "pswwr2_mem004_i_mem_prty_4", + "pswwr2_mem004_i_mem_prty_5", + "pswwr2_mem004_i_mem_prty_6", + "pswwr2_mem004_i_mem_prty_7", + "pswwr2_mem004_i_mem_prty_8", + "pswwr2_mem015_i_mem_prty_0", + "pswwr2_mem015_i_mem_prty_1", + "pswwr2_mem015_i_mem_prty_2", + "pswwr2_mem005_i_mem_prty_0", + "pswwr2_mem005_i_mem_prty_1", + "pswwr2_mem005_i_mem_prty_2", + "pswwr2_mem005_i_mem_prty_3", + "pswwr2_mem005_i_mem_prty_4", + "pswwr2_mem005_i_mem_prty_5", + "pswwr2_mem005_i_mem_prty_6", + "pswwr2_mem005_i_mem_prty_7", + "pswwr2_mem005_i_mem_prty_8", + "pswwr2_mem002_i_mem_prty_0", + "pswwr2_mem002_i_mem_prty_1", + "pswwr2_mem002_i_mem_prty_2", + "pswwr2_mem002_i_mem_prty_3", + "pswwr2_mem002_i_mem_prty_4", + "pswwr2_mem003_i_mem_prty_0", + "pswwr2_mem003_i_mem_prty_1", + "pswwr2_mem003_i_mem_prty_2", +}; +#else +#define pswwr2_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswwr2_prty1_bb_a0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pswwr2_prty1_bb_a0 = { + 0, 31, pswwr2_prty1_bb_a0_attn_idx, 0x29b200, 0x29b20c, 0x29b208, + 0x29b204 +}; + +static const u16 pswwr2_prty2_bb_a0_attn_idx[31] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, +}; + +static struct attn_hw_reg pswwr2_prty2_bb_a0 = { + 1, 31, pswwr2_prty2_bb_a0_attn_idx, 0x29b210, 0x29b21c, 0x29b218, + 0x29b214 +}; + +static const u16 pswwr2_prty3_bb_a0_attn_idx[31] = { + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, +}; + +static struct attn_hw_reg pswwr2_prty3_bb_a0 = { + 2, 31, pswwr2_prty3_bb_a0_attn_idx, 0x29b220, 0x29b22c, 0x29b228, + 0x29b224 +}; + +static const u16 pswwr2_prty4_bb_a0_attn_idx[20] = { + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, + 110, 111, 112, 113, +}; + +static struct attn_hw_reg pswwr2_prty4_bb_a0 = { + 3, 20, pswwr2_prty4_bb_a0_attn_idx, 0x29b230, 0x29b23c, 0x29b238, + 0x29b234 +}; + +static struct attn_hw_reg *pswwr2_prty_bb_a0_regs[4] = { + &pswwr2_prty1_bb_a0, &pswwr2_prty2_bb_a0, &pswwr2_prty3_bb_a0, + &pswwr2_prty4_bb_a0, +}; + +static const u16 pswwr2_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswwr2_prty0_bb_b0 = { + 0, 1, pswwr2_prty0_bb_b0_attn_idx, 0x29b190, 0x29b19c, 0x29b198, + 0x29b194 +}; + +static const u16 pswwr2_prty1_bb_b0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pswwr2_prty1_bb_b0 = { + 1, 31, pswwr2_prty1_bb_b0_attn_idx, 0x29b200, 0x29b20c, 0x29b208, + 0x29b204 +}; + +static const u16 pswwr2_prty2_bb_b0_attn_idx[31] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, +}; + +static struct attn_hw_reg pswwr2_prty2_bb_b0 = { + 2, 31, pswwr2_prty2_bb_b0_attn_idx, 0x29b210, 0x29b21c, 0x29b218, + 0x29b214 +}; + +static const u16 pswwr2_prty3_bb_b0_attn_idx[31] = { + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, +}; + +static struct attn_hw_reg pswwr2_prty3_bb_b0 = { + 3, 31, pswwr2_prty3_bb_b0_attn_idx, 0x29b220, 0x29b22c, 0x29b228, + 0x29b224 +}; + +static const u16 pswwr2_prty4_bb_b0_attn_idx[20] = { + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, + 110, 111, 112, 113, +}; + +static struct attn_hw_reg pswwr2_prty4_bb_b0 = { + 4, 20, pswwr2_prty4_bb_b0_attn_idx, 0x29b230, 0x29b23c, 0x29b238, + 0x29b234 +}; + +static struct attn_hw_reg *pswwr2_prty_bb_b0_regs[5] = { + &pswwr2_prty0_bb_b0, &pswwr2_prty1_bb_b0, &pswwr2_prty2_bb_b0, + &pswwr2_prty3_bb_b0, &pswwr2_prty4_bb_b0, +}; + +static const u16 pswwr2_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswwr2_prty0_k2 = { + 0, 1, pswwr2_prty0_k2_attn_idx, 0x29b190, 0x29b19c, 0x29b198, 0x29b194 +}; + +static const u16 pswwr2_prty1_k2_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pswwr2_prty1_k2 = { + 1, 31, pswwr2_prty1_k2_attn_idx, 0x29b200, 0x29b20c, 0x29b208, 0x29b204 +}; + +static const u16 pswwr2_prty2_k2_attn_idx[31] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, +}; + +static struct attn_hw_reg pswwr2_prty2_k2 = { + 2, 31, pswwr2_prty2_k2_attn_idx, 0x29b210, 0x29b21c, 0x29b218, 0x29b214 +}; + +static const u16 pswwr2_prty3_k2_attn_idx[31] = { + 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, +}; + +static struct attn_hw_reg pswwr2_prty3_k2 = { + 3, 31, pswwr2_prty3_k2_attn_idx, 0x29b220, 0x29b22c, 0x29b228, 0x29b224 +}; + +static const u16 pswwr2_prty4_k2_attn_idx[20] = { + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, + 109, + 110, 111, 112, 113, +}; + +static struct attn_hw_reg pswwr2_prty4_k2 = { + 4, 20, pswwr2_prty4_k2_attn_idx, 0x29b230, 0x29b23c, 0x29b238, 0x29b234 +}; + +static struct attn_hw_reg *pswwr2_prty_k2_regs[5] = { + &pswwr2_prty0_k2, &pswwr2_prty1_k2, &pswwr2_prty2_k2, &pswwr2_prty3_k2, + &pswwr2_prty4_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrq_int_attn_desc[21] = { + "pswrq_address_error", + "pswrq_pbf_fifo_overflow", + "pswrq_src_fifo_overflow", + "pswrq_qm_fifo_overflow", + "pswrq_tm_fifo_overflow", + "pswrq_usdm_fifo_overflow", + "pswrq_m2p_fifo_overflow", + "pswrq_xsdm_fifo_overflow", + "pswrq_tsdm_fifo_overflow", + "pswrq_ptu_fifo_overflow", + "pswrq_cduwr_fifo_overflow", + "pswrq_cdurd_fifo_overflow", + "pswrq_dmae_fifo_overflow", + "pswrq_hc_fifo_overflow", + "pswrq_dbg_fifo_overflow", + "pswrq_msdm_fifo_overflow", + "pswrq_ysdm_fifo_overflow", + "pswrq_psdm_fifo_overflow", + "pswrq_prm_fifo_overflow", + "pswrq_muld_fifo_overflow", + "pswrq_xyld_fifo_overflow", +}; +#else +#define pswrq_int_attn_desc OSAL_NULL +#endif + +static const u16 pswrq_int0_bb_a0_attn_idx[21] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, +}; + +static struct attn_hw_reg pswrq_int0_bb_a0 = { + 0, 21, pswrq_int0_bb_a0_attn_idx, 0x280180, 0x28018c, 0x280188, + 0x280184 +}; + +static struct attn_hw_reg *pswrq_int_bb_a0_regs[1] = { + &pswrq_int0_bb_a0, +}; + +static const u16 pswrq_int0_bb_b0_attn_idx[21] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, +}; + +static struct attn_hw_reg pswrq_int0_bb_b0 = { + 0, 21, pswrq_int0_bb_b0_attn_idx, 0x280180, 0x28018c, 0x280188, + 0x280184 +}; + +static struct attn_hw_reg *pswrq_int_bb_b0_regs[1] = { + &pswrq_int0_bb_b0, +}; + +static const u16 pswrq_int0_k2_attn_idx[21] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, +}; + +static struct attn_hw_reg pswrq_int0_k2 = { + 0, 21, pswrq_int0_k2_attn_idx, 0x280180, 0x28018c, 0x280188, 0x280184 +}; + +static struct attn_hw_reg *pswrq_int_k2_regs[1] = { + &pswrq_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrq_prty_attn_desc[1] = { + "pswrq_pxp_busip_parity", +}; +#else +#define pswrq_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswrq_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrq_prty0_bb_b0 = { + 0, 1, pswrq_prty0_bb_b0_attn_idx, 0x280190, 0x28019c, 0x280198, + 0x280194 +}; + +static struct attn_hw_reg *pswrq_prty_bb_b0_regs[1] = { + &pswrq_prty0_bb_b0, +}; + +static const u16 pswrq_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pswrq_prty0_k2 = { + 0, 1, pswrq_prty0_k2_attn_idx, 0x280190, 0x28019c, 0x280198, 0x280194 +}; + +static struct attn_hw_reg *pswrq_prty_k2_regs[1] = { + &pswrq_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrq2_int_attn_desc[15] = { + "pswrq2_address_error", + "pswrq2_l2p_fifo_overflow", + "pswrq2_wdfifo_overflow", + "pswrq2_phyaddr_fifo_of", + "pswrq2_l2p_violation_1", + "pswrq2_l2p_violation_2", + "pswrq2_free_list_empty", + "pswrq2_elt_addr", + "pswrq2_l2p_vf_err", + "pswrq2_core_wdone_overflow", + "pswrq2_treq_fifo_underflow", + "pswrq2_treq_fifo_overflow", + "pswrq2_icpl_fifo_underflow", + "pswrq2_icpl_fifo_overflow", + "pswrq2_back2back_atc_response", +}; +#else +#define pswrq2_int_attn_desc OSAL_NULL +#endif + +static const u16 pswrq2_int0_bb_a0_attn_idx[15] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, +}; + +static struct attn_hw_reg pswrq2_int0_bb_a0 = { + 0, 15, pswrq2_int0_bb_a0_attn_idx, 0x240180, 0x24018c, 0x240188, + 0x240184 +}; + +static struct attn_hw_reg *pswrq2_int_bb_a0_regs[1] = { + &pswrq2_int0_bb_a0, +}; + +static const u16 pswrq2_int0_bb_b0_attn_idx[15] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, +}; + +static struct attn_hw_reg pswrq2_int0_bb_b0 = { + 0, 15, pswrq2_int0_bb_b0_attn_idx, 0x240180, 0x24018c, 0x240188, + 0x240184 +}; + +static struct attn_hw_reg *pswrq2_int_bb_b0_regs[1] = { + &pswrq2_int0_bb_b0, +}; + +static const u16 pswrq2_int0_k2_attn_idx[15] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, +}; + +static struct attn_hw_reg pswrq2_int0_k2 = { + 0, 15, pswrq2_int0_k2_attn_idx, 0x240180, 0x24018c, 0x240188, 0x240184 +}; + +static struct attn_hw_reg *pswrq2_int_k2_regs[1] = { + &pswrq2_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pswrq2_prty_attn_desc[11] = { + "pswrq2_mem004_i_ecc_rf_int", + "pswrq2_mem005_i_ecc_rf_int", + "pswrq2_mem001_i_ecc_rf_int", + "pswrq2_mem006_i_mem_prty", + "pswrq2_mem008_i_mem_prty", + "pswrq2_mem009_i_mem_prty", + "pswrq2_mem003_i_mem_prty", + "pswrq2_mem002_i_mem_prty", + "pswrq2_mem010_i_mem_prty", + "pswrq2_mem007_i_mem_prty", + "pswrq2_mem005_i_mem_prty", +}; +#else +#define pswrq2_prty_attn_desc OSAL_NULL +#endif + +static const u16 pswrq2_prty1_bb_a0_attn_idx[9] = { + 0, 2, 3, 4, 5, 6, 7, 9, 10, +}; + +static struct attn_hw_reg pswrq2_prty1_bb_a0 = { + 0, 9, pswrq2_prty1_bb_a0_attn_idx, 0x240200, 0x24020c, 0x240208, + 0x240204 +}; + +static struct attn_hw_reg *pswrq2_prty_bb_a0_regs[1] = { + &pswrq2_prty1_bb_a0, +}; + +static const u16 pswrq2_prty1_bb_b0_attn_idx[9] = { + 0, 2, 3, 4, 5, 6, 7, 9, 10, +}; + +static struct attn_hw_reg pswrq2_prty1_bb_b0 = { + 0, 9, pswrq2_prty1_bb_b0_attn_idx, 0x240200, 0x24020c, 0x240208, + 0x240204 +}; + +static struct attn_hw_reg *pswrq2_prty_bb_b0_regs[1] = { + &pswrq2_prty1_bb_b0, +}; + +static const u16 pswrq2_prty1_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg pswrq2_prty1_k2 = { + 0, 10, pswrq2_prty1_k2_attn_idx, 0x240200, 0x24020c, 0x240208, 0x240204 +}; + +static struct attn_hw_reg *pswrq2_prty_k2_regs[1] = { + &pswrq2_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *pglcs_int_attn_desc[2] = { + "pglcs_address_error", + "pglcs_rasdp_error", +}; +#else +#define pglcs_int_attn_desc OSAL_NULL +#endif + +static const u16 pglcs_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pglcs_int0_bb_a0 = { + 0, 1, pglcs_int0_bb_a0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04 +}; + +static struct attn_hw_reg *pglcs_int_bb_a0_regs[1] = { + &pglcs_int0_bb_a0, +}; + +static const u16 pglcs_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pglcs_int0_bb_b0 = { + 0, 1, pglcs_int0_bb_b0_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04 +}; + +static struct attn_hw_reg *pglcs_int_bb_b0_regs[1] = { + &pglcs_int0_bb_b0, +}; + +static const u16 pglcs_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg pglcs_int0_k2 = { + 0, 2, pglcs_int0_k2_attn_idx, 0x1d00, 0x1d0c, 0x1d08, 0x1d04 +}; + +static struct attn_hw_reg *pglcs_int_k2_regs[1] = { + &pglcs_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *dmae_int_attn_desc[2] = { + "dmae_address_error", + "dmae_pci_rd_buf_err", +}; +#else +#define dmae_int_attn_desc OSAL_NULL +#endif + +static const u16 dmae_int0_bb_a0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg dmae_int0_bb_a0 = { + 0, 2, dmae_int0_bb_a0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184 +}; + +static struct attn_hw_reg *dmae_int_bb_a0_regs[1] = { + &dmae_int0_bb_a0, +}; + +static const u16 dmae_int0_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg dmae_int0_bb_b0 = { + 0, 2, dmae_int0_bb_b0_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184 +}; + +static struct attn_hw_reg *dmae_int_bb_b0_regs[1] = { + &dmae_int0_bb_b0, +}; + +static const u16 dmae_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg dmae_int0_k2 = { + 0, 2, dmae_int0_k2_attn_idx, 0xc180, 0xc18c, 0xc188, 0xc184 +}; + +static struct attn_hw_reg *dmae_int_k2_regs[1] = { + &dmae_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *dmae_prty_attn_desc[3] = { + "dmae_mem002_i_mem_prty", + "dmae_mem001_i_mem_prty", + "dmae_mem003_i_mem_prty", +}; +#else +#define dmae_prty_attn_desc OSAL_NULL +#endif + +static const u16 dmae_prty1_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg dmae_prty1_bb_a0 = { + 0, 3, dmae_prty1_bb_a0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204 +}; + +static struct attn_hw_reg *dmae_prty_bb_a0_regs[1] = { + &dmae_prty1_bb_a0, +}; + +static const u16 dmae_prty1_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg dmae_prty1_bb_b0 = { + 0, 3, dmae_prty1_bb_b0_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204 +}; + +static struct attn_hw_reg *dmae_prty_bb_b0_regs[1] = { + &dmae_prty1_bb_b0, +}; + +static const u16 dmae_prty1_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg dmae_prty1_k2 = { + 0, 3, dmae_prty1_k2_attn_idx, 0xc200, 0xc20c, 0xc208, 0xc204 +}; + +static struct attn_hw_reg *dmae_prty_k2_regs[1] = { + &dmae_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *ptu_int_attn_desc[8] = { + "ptu_address_error", + "ptu_atc_tcpl_to_not_pend", + "ptu_atc_gpa_multiple_hits", + "ptu_atc_rcpl_to_empty_cnt", + "ptu_atc_tcpl_error", + "ptu_atc_inv_halt", + "ptu_atc_reuse_transpend", + "ptu_atc_ireq_less_than_stu", +}; +#else +#define ptu_int_attn_desc OSAL_NULL +#endif + +static const u16 ptu_int0_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg ptu_int0_bb_a0 = { + 0, 8, ptu_int0_bb_a0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184 +}; + +static struct attn_hw_reg *ptu_int_bb_a0_regs[1] = { + &ptu_int0_bb_a0, +}; + +static const u16 ptu_int0_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg ptu_int0_bb_b0 = { + 0, 8, ptu_int0_bb_b0_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184 +}; + +static struct attn_hw_reg *ptu_int_bb_b0_regs[1] = { + &ptu_int0_bb_b0, +}; + +static const u16 ptu_int0_k2_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg ptu_int0_k2 = { + 0, 8, ptu_int0_k2_attn_idx, 0x560180, 0x56018c, 0x560188, 0x560184 +}; + +static struct attn_hw_reg *ptu_int_k2_regs[1] = { + &ptu_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ptu_prty_attn_desc[18] = { + "ptu_mem017_i_ecc_rf_int", + "ptu_mem018_i_mem_prty", + "ptu_mem006_i_mem_prty", + "ptu_mem001_i_mem_prty", + "ptu_mem002_i_mem_prty", + "ptu_mem003_i_mem_prty", + "ptu_mem004_i_mem_prty", + "ptu_mem005_i_mem_prty", + "ptu_mem009_i_mem_prty", + "ptu_mem010_i_mem_prty", + "ptu_mem016_i_mem_prty", + "ptu_mem007_i_mem_prty", + "ptu_mem015_i_mem_prty", + "ptu_mem013_i_mem_prty", + "ptu_mem012_i_mem_prty", + "ptu_mem014_i_mem_prty", + "ptu_mem011_i_mem_prty", + "ptu_mem008_i_mem_prty", +}; +#else +#define ptu_prty_attn_desc OSAL_NULL +#endif + +static const u16 ptu_prty1_bb_a0_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg ptu_prty1_bb_a0 = { + 0, 18, ptu_prty1_bb_a0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204 +}; + +static struct attn_hw_reg *ptu_prty_bb_a0_regs[1] = { + &ptu_prty1_bb_a0, +}; + +static const u16 ptu_prty1_bb_b0_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg ptu_prty1_bb_b0 = { + 0, 18, ptu_prty1_bb_b0_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204 +}; + +static struct attn_hw_reg *ptu_prty_bb_b0_regs[1] = { + &ptu_prty1_bb_b0, +}; + +static const u16 ptu_prty1_k2_attn_idx[18] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg ptu_prty1_k2 = { + 0, 18, ptu_prty1_k2_attn_idx, 0x560200, 0x56020c, 0x560208, 0x560204 +}; + +static struct attn_hw_reg *ptu_prty_k2_regs[1] = { + &ptu_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *tcm_int_attn_desc[41] = { + "tcm_address_error", + "tcm_is_storm_ovfl_err", + "tcm_is_storm_under_err", + "tcm_is_tsdm_ovfl_err", + "tcm_is_tsdm_under_err", + "tcm_is_msem_ovfl_err", + "tcm_is_msem_under_err", + "tcm_is_ysem_ovfl_err", + "tcm_is_ysem_under_err", + "tcm_is_dorq_ovfl_err", + "tcm_is_dorq_under_err", + "tcm_is_pbf_ovfl_err", + "tcm_is_pbf_under_err", + "tcm_is_prs_ovfl_err", + "tcm_is_prs_under_err", + "tcm_is_tm_ovfl_err", + "tcm_is_tm_under_err", + "tcm_is_qm_p_ovfl_err", + "tcm_is_qm_p_under_err", + "tcm_is_qm_s_ovfl_err", + "tcm_is_qm_s_under_err", + "tcm_is_grc_ovfl_err0", + "tcm_is_grc_under_err0", + "tcm_is_grc_ovfl_err1", + "tcm_is_grc_under_err1", + "tcm_is_grc_ovfl_err2", + "tcm_is_grc_under_err2", + "tcm_is_grc_ovfl_err3", + "tcm_is_grc_under_err3", + "tcm_in_prcs_tbl_ovfl", + "tcm_agg_con_data_buf_ovfl", + "tcm_agg_con_cmd_buf_ovfl", + "tcm_sm_con_data_buf_ovfl", + "tcm_sm_con_cmd_buf_ovfl", + "tcm_agg_task_data_buf_ovfl", + "tcm_agg_task_cmd_buf_ovfl", + "tcm_sm_task_data_buf_ovfl", + "tcm_sm_task_cmd_buf_ovfl", + "tcm_fi_desc_input_violate", + "tcm_se_desc_input_violate", + "tcm_qmreg_more4", +}; +#else +#define tcm_int_attn_desc OSAL_NULL +#endif + +static const u16 tcm_int0_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tcm_int0_bb_a0 = { + 0, 8, tcm_int0_bb_a0_attn_idx, 0x1180180, 0x118018c, 0x1180188, + 0x1180184 +}; + +static const u16 tcm_int1_bb_a0_attn_idx[32] = { + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg tcm_int1_bb_a0 = { + 1, 32, tcm_int1_bb_a0_attn_idx, 0x1180190, 0x118019c, 0x1180198, + 0x1180194 +}; + +static const u16 tcm_int2_bb_a0_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg tcm_int2_bb_a0 = { + 2, 1, tcm_int2_bb_a0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8, + 0x11801a4 +}; + +static struct attn_hw_reg *tcm_int_bb_a0_regs[3] = { + &tcm_int0_bb_a0, &tcm_int1_bb_a0, &tcm_int2_bb_a0, +}; + +static const u16 tcm_int0_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tcm_int0_bb_b0 = { + 0, 8, tcm_int0_bb_b0_attn_idx, 0x1180180, 0x118018c, 0x1180188, + 0x1180184 +}; + +static const u16 tcm_int1_bb_b0_attn_idx[32] = { + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg tcm_int1_bb_b0 = { + 1, 32, tcm_int1_bb_b0_attn_idx, 0x1180190, 0x118019c, 0x1180198, + 0x1180194 +}; + +static const u16 tcm_int2_bb_b0_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg tcm_int2_bb_b0 = { + 2, 1, tcm_int2_bb_b0_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8, + 0x11801a4 +}; + +static struct attn_hw_reg *tcm_int_bb_b0_regs[3] = { + &tcm_int0_bb_b0, &tcm_int1_bb_b0, &tcm_int2_bb_b0, +}; + +static const u16 tcm_int0_k2_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tcm_int0_k2 = { + 0, 8, tcm_int0_k2_attn_idx, 0x1180180, 0x118018c, 0x1180188, 0x1180184 +}; + +static const u16 tcm_int1_k2_attn_idx[32] = { + 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, + 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg tcm_int1_k2 = { + 1, 32, tcm_int1_k2_attn_idx, 0x1180190, 0x118019c, 0x1180198, 0x1180194 +}; + +static const u16 tcm_int2_k2_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg tcm_int2_k2 = { + 2, 1, tcm_int2_k2_attn_idx, 0x11801a0, 0x11801ac, 0x11801a8, 0x11801a4 +}; + +static struct attn_hw_reg *tcm_int_k2_regs[3] = { + &tcm_int0_k2, &tcm_int1_k2, &tcm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *tcm_prty_attn_desc[51] = { + "tcm_mem026_i_ecc_rf_int", + "tcm_mem003_i_ecc_0_rf_int", + "tcm_mem003_i_ecc_1_rf_int", + "tcm_mem022_i_ecc_0_rf_int", + "tcm_mem022_i_ecc_1_rf_int", + "tcm_mem005_i_ecc_0_rf_int", + "tcm_mem005_i_ecc_1_rf_int", + "tcm_mem024_i_ecc_0_rf_int", + "tcm_mem024_i_ecc_1_rf_int", + "tcm_mem018_i_mem_prty", + "tcm_mem019_i_mem_prty", + "tcm_mem015_i_mem_prty", + "tcm_mem016_i_mem_prty", + "tcm_mem017_i_mem_prty", + "tcm_mem010_i_mem_prty", + "tcm_mem020_i_mem_prty", + "tcm_mem011_i_mem_prty", + "tcm_mem012_i_mem_prty", + "tcm_mem013_i_mem_prty", + "tcm_mem014_i_mem_prty", + "tcm_mem029_i_mem_prty", + "tcm_mem028_i_mem_prty", + "tcm_mem027_i_mem_prty", + "tcm_mem004_i_mem_prty", + "tcm_mem023_i_mem_prty", + "tcm_mem006_i_mem_prty", + "tcm_mem025_i_mem_prty", + "tcm_mem021_i_mem_prty", + "tcm_mem007_i_mem_prty_0", + "tcm_mem007_i_mem_prty_1", + "tcm_mem008_i_mem_prty", + "tcm_mem025_i_ecc_rf_int", + "tcm_mem021_i_ecc_0_rf_int", + "tcm_mem021_i_ecc_1_rf_int", + "tcm_mem023_i_ecc_0_rf_int", + "tcm_mem023_i_ecc_1_rf_int", + "tcm_mem026_i_mem_prty", + "tcm_mem022_i_mem_prty", + "tcm_mem024_i_mem_prty", + "tcm_mem009_i_mem_prty", + "tcm_mem024_i_ecc_rf_int", + "tcm_mem001_i_ecc_0_rf_int", + "tcm_mem001_i_ecc_1_rf_int", + "tcm_mem019_i_ecc_0_rf_int", + "tcm_mem019_i_ecc_1_rf_int", + "tcm_mem022_i_ecc_rf_int", + "tcm_mem002_i_mem_prty", + "tcm_mem005_i_mem_prty_0", + "tcm_mem005_i_mem_prty_1", + "tcm_mem001_i_mem_prty", + "tcm_mem007_i_mem_prty", +}; +#else +#define tcm_prty_attn_desc OSAL_NULL +#endif + +static const u16 tcm_prty1_bb_a0_attn_idx[31] = { + 1, 2, 9, 11, 12, 13, 14, 15, 16, 17, 18, 19, 22, 23, 24, 25, 26, 30, 32, + 33, 36, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg tcm_prty1_bb_a0 = { + 0, 31, tcm_prty1_bb_a0_attn_idx, 0x1180200, 0x118020c, 0x1180208, + 0x1180204 +}; + +static const u16 tcm_prty2_bb_a0_attn_idx[3] = { + 50, 21, 20, +}; + +static struct attn_hw_reg tcm_prty2_bb_a0 = { + 1, 3, tcm_prty2_bb_a0_attn_idx, 0x1180210, 0x118021c, 0x1180218, + 0x1180214 +}; + +static struct attn_hw_reg *tcm_prty_bb_a0_regs[2] = { + &tcm_prty1_bb_a0, &tcm_prty2_bb_a0, +}; + +static const u16 tcm_prty1_bb_b0_attn_idx[31] = { + 1, 2, 5, 6, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, + 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg tcm_prty1_bb_b0 = { + 0, 31, tcm_prty1_bb_b0_attn_idx, 0x1180200, 0x118020c, 0x1180208, + 0x1180204 +}; + +static const u16 tcm_prty2_bb_b0_attn_idx[2] = { + 49, 46, +}; + +static struct attn_hw_reg tcm_prty2_bb_b0 = { + 1, 2, tcm_prty2_bb_b0_attn_idx, 0x1180210, 0x118021c, 0x1180218, + 0x1180214 +}; + +static struct attn_hw_reg *tcm_prty_bb_b0_regs[2] = { + &tcm_prty1_bb_b0, &tcm_prty2_bb_b0, +}; + +static const u16 tcm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg tcm_prty1_k2 = { + 0, 31, tcm_prty1_k2_attn_idx, 0x1180200, 0x118020c, 0x1180208, + 0x1180204 +}; + +static const u16 tcm_prty2_k2_attn_idx[3] = { + 39, 49, 46, +}; + +static struct attn_hw_reg tcm_prty2_k2 = { + 1, 3, tcm_prty2_k2_attn_idx, 0x1180210, 0x118021c, 0x1180218, 0x1180214 +}; + +static struct attn_hw_reg *tcm_prty_k2_regs[2] = { + &tcm_prty1_k2, &tcm_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *mcm_int_attn_desc[41] = { + "mcm_address_error", + "mcm_is_storm_ovfl_err", + "mcm_is_storm_under_err", + "mcm_is_msdm_ovfl_err", + "mcm_is_msdm_under_err", + "mcm_is_ysdm_ovfl_err", + "mcm_is_ysdm_under_err", + "mcm_is_usdm_ovfl_err", + "mcm_is_usdm_under_err", + "mcm_is_tmld_ovfl_err", + "mcm_is_tmld_under_err", + "mcm_is_usem_ovfl_err", + "mcm_is_usem_under_err", + "mcm_is_ysem_ovfl_err", + "mcm_is_ysem_under_err", + "mcm_is_pbf_ovfl_err", + "mcm_is_pbf_under_err", + "mcm_is_qm_p_ovfl_err", + "mcm_is_qm_p_under_err", + "mcm_is_qm_s_ovfl_err", + "mcm_is_qm_s_under_err", + "mcm_is_grc_ovfl_err0", + "mcm_is_grc_under_err0", + "mcm_is_grc_ovfl_err1", + "mcm_is_grc_under_err1", + "mcm_is_grc_ovfl_err2", + "mcm_is_grc_under_err2", + "mcm_is_grc_ovfl_err3", + "mcm_is_grc_under_err3", + "mcm_in_prcs_tbl_ovfl", + "mcm_agg_con_data_buf_ovfl", + "mcm_agg_con_cmd_buf_ovfl", + "mcm_sm_con_data_buf_ovfl", + "mcm_sm_con_cmd_buf_ovfl", + "mcm_agg_task_data_buf_ovfl", + "mcm_agg_task_cmd_buf_ovfl", + "mcm_sm_task_data_buf_ovfl", + "mcm_sm_task_cmd_buf_ovfl", + "mcm_fi_desc_input_violate", + "mcm_se_desc_input_violate", + "mcm_qmreg_more4", +}; +#else +#define mcm_int_attn_desc OSAL_NULL +#endif + +static const u16 mcm_int0_bb_a0_attn_idx[14] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg mcm_int0_bb_a0 = { + 0, 14, mcm_int0_bb_a0_attn_idx, 0x1200180, 0x120018c, 0x1200188, + 0x1200184 +}; + +static const u16 mcm_int1_bb_a0_attn_idx[26] = { + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg mcm_int1_bb_a0 = { + 1, 26, mcm_int1_bb_a0_attn_idx, 0x1200190, 0x120019c, 0x1200198, + 0x1200194 +}; + +static const u16 mcm_int2_bb_a0_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg mcm_int2_bb_a0 = { + 2, 1, mcm_int2_bb_a0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8, + 0x12001a4 +}; + +static struct attn_hw_reg *mcm_int_bb_a0_regs[3] = { + &mcm_int0_bb_a0, &mcm_int1_bb_a0, &mcm_int2_bb_a0, +}; + +static const u16 mcm_int0_bb_b0_attn_idx[14] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg mcm_int0_bb_b0 = { + 0, 14, mcm_int0_bb_b0_attn_idx, 0x1200180, 0x120018c, 0x1200188, + 0x1200184 +}; + +static const u16 mcm_int1_bb_b0_attn_idx[26] = { + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg mcm_int1_bb_b0 = { + 1, 26, mcm_int1_bb_b0_attn_idx, 0x1200190, 0x120019c, 0x1200198, + 0x1200194 +}; + +static const u16 mcm_int2_bb_b0_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg mcm_int2_bb_b0 = { + 2, 1, mcm_int2_bb_b0_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8, + 0x12001a4 +}; + +static struct attn_hw_reg *mcm_int_bb_b0_regs[3] = { + &mcm_int0_bb_b0, &mcm_int1_bb_b0, &mcm_int2_bb_b0, +}; + +static const u16 mcm_int0_k2_attn_idx[14] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg mcm_int0_k2 = { + 0, 14, mcm_int0_k2_attn_idx, 0x1200180, 0x120018c, 0x1200188, 0x1200184 +}; + +static const u16 mcm_int1_k2_attn_idx[26] = { + 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, + 32, 33, 34, 35, 36, 37, 38, 39, +}; + +static struct attn_hw_reg mcm_int1_k2 = { + 1, 26, mcm_int1_k2_attn_idx, 0x1200190, 0x120019c, 0x1200198, 0x1200194 +}; + +static const u16 mcm_int2_k2_attn_idx[1] = { + 40, +}; + +static struct attn_hw_reg mcm_int2_k2 = { + 2, 1, mcm_int2_k2_attn_idx, 0x12001a0, 0x12001ac, 0x12001a8, 0x12001a4 +}; + +static struct attn_hw_reg *mcm_int_k2_regs[3] = { + &mcm_int0_k2, &mcm_int1_k2, &mcm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *mcm_prty_attn_desc[46] = { + "mcm_mem028_i_ecc_rf_int", + "mcm_mem003_i_ecc_rf_int", + "mcm_mem023_i_ecc_0_rf_int", + "mcm_mem023_i_ecc_1_rf_int", + "mcm_mem005_i_ecc_0_rf_int", + "mcm_mem005_i_ecc_1_rf_int", + "mcm_mem025_i_ecc_0_rf_int", + "mcm_mem025_i_ecc_1_rf_int", + "mcm_mem026_i_ecc_rf_int", + "mcm_mem017_i_mem_prty", + "mcm_mem019_i_mem_prty", + "mcm_mem016_i_mem_prty", + "mcm_mem015_i_mem_prty", + "mcm_mem020_i_mem_prty", + "mcm_mem021_i_mem_prty", + "mcm_mem018_i_mem_prty", + "mcm_mem011_i_mem_prty", + "mcm_mem012_i_mem_prty", + "mcm_mem013_i_mem_prty", + "mcm_mem014_i_mem_prty", + "mcm_mem031_i_mem_prty", + "mcm_mem030_i_mem_prty", + "mcm_mem029_i_mem_prty", + "mcm_mem004_i_mem_prty", + "mcm_mem024_i_mem_prty", + "mcm_mem006_i_mem_prty", + "mcm_mem027_i_mem_prty", + "mcm_mem022_i_mem_prty", + "mcm_mem007_i_mem_prty_0", + "mcm_mem007_i_mem_prty_1", + "mcm_mem008_i_mem_prty", + "mcm_mem001_i_ecc_rf_int", + "mcm_mem021_i_ecc_0_rf_int", + "mcm_mem021_i_ecc_1_rf_int", + "mcm_mem003_i_ecc_0_rf_int", + "mcm_mem003_i_ecc_1_rf_int", + "mcm_mem024_i_ecc_rf_int", + "mcm_mem009_i_mem_prty", + "mcm_mem010_i_mem_prty", + "mcm_mem028_i_mem_prty", + "mcm_mem002_i_mem_prty", + "mcm_mem025_i_mem_prty", + "mcm_mem005_i_mem_prty_0", + "mcm_mem005_i_mem_prty_1", + "mcm_mem001_i_mem_prty", + "mcm_mem007_i_mem_prty", +}; +#else +#define mcm_prty_attn_desc OSAL_NULL +#endif + +static const u16 mcm_prty1_bb_a0_attn_idx[31] = { + 2, 3, 8, 9, 10, 11, 12, 13, 15, 16, 17, 18, 19, 22, 23, 25, 26, 27, 31, + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, +}; + +static struct attn_hw_reg mcm_prty1_bb_a0 = { + 0, 31, mcm_prty1_bb_a0_attn_idx, 0x1200200, 0x120020c, 0x1200208, + 0x1200204 +}; + +static const u16 mcm_prty2_bb_a0_attn_idx[4] = { + 45, 30, 21, 20, +}; + +static struct attn_hw_reg mcm_prty2_bb_a0 = { + 1, 4, mcm_prty2_bb_a0_attn_idx, 0x1200210, 0x120021c, 0x1200218, + 0x1200214 +}; + +static struct attn_hw_reg *mcm_prty_bb_a0_regs[2] = { + &mcm_prty1_bb_a0, &mcm_prty2_bb_a0, +}; + +static const u16 mcm_prty1_bb_b0_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg mcm_prty1_bb_b0 = { + 0, 31, mcm_prty1_bb_b0_attn_idx, 0x1200200, 0x120020c, 0x1200208, + 0x1200204 +}; + +static const u16 mcm_prty2_bb_b0_attn_idx[4] = { + 37, 38, 44, 40, +}; + +static struct attn_hw_reg mcm_prty2_bb_b0 = { + 1, 4, mcm_prty2_bb_b0_attn_idx, 0x1200210, 0x120021c, 0x1200218, + 0x1200214 +}; + +static struct attn_hw_reg *mcm_prty_bb_b0_regs[2] = { + &mcm_prty1_bb_b0, &mcm_prty2_bb_b0, +}; + +static const u16 mcm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg mcm_prty1_k2 = { + 0, 31, mcm_prty1_k2_attn_idx, 0x1200200, 0x120020c, 0x1200208, + 0x1200204 +}; + +static const u16 mcm_prty2_k2_attn_idx[4] = { + 37, 38, 44, 40, +}; + +static struct attn_hw_reg mcm_prty2_k2 = { + 1, 4, mcm_prty2_k2_attn_idx, 0x1200210, 0x120021c, 0x1200218, 0x1200214 +}; + +static struct attn_hw_reg *mcm_prty_k2_regs[2] = { + &mcm_prty1_k2, &mcm_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *ucm_int_attn_desc[47] = { + "ucm_address_error", + "ucm_is_storm_ovfl_err", + "ucm_is_storm_under_err", + "ucm_is_xsdm_ovfl_err", + "ucm_is_xsdm_under_err", + "ucm_is_ysdm_ovfl_err", + "ucm_is_ysdm_under_err", + "ucm_is_usdm_ovfl_err", + "ucm_is_usdm_under_err", + "ucm_is_rdif_ovfl_err", + "ucm_is_rdif_under_err", + "ucm_is_tdif_ovfl_err", + "ucm_is_tdif_under_err", + "ucm_is_muld_ovfl_err", + "ucm_is_muld_under_err", + "ucm_is_yuld_ovfl_err", + "ucm_is_yuld_under_err", + "ucm_is_dorq_ovfl_err", + "ucm_is_dorq_under_err", + "ucm_is_pbf_ovfl_err", + "ucm_is_pbf_under_err", + "ucm_is_tm_ovfl_err", + "ucm_is_tm_under_err", + "ucm_is_qm_p_ovfl_err", + "ucm_is_qm_p_under_err", + "ucm_is_qm_s_ovfl_err", + "ucm_is_qm_s_under_err", + "ucm_is_grc_ovfl_err0", + "ucm_is_grc_under_err0", + "ucm_is_grc_ovfl_err1", + "ucm_is_grc_under_err1", + "ucm_is_grc_ovfl_err2", + "ucm_is_grc_under_err2", + "ucm_is_grc_ovfl_err3", + "ucm_is_grc_under_err3", + "ucm_in_prcs_tbl_ovfl", + "ucm_agg_con_data_buf_ovfl", + "ucm_agg_con_cmd_buf_ovfl", + "ucm_sm_con_data_buf_ovfl", + "ucm_sm_con_cmd_buf_ovfl", + "ucm_agg_task_data_buf_ovfl", + "ucm_agg_task_cmd_buf_ovfl", + "ucm_sm_task_data_buf_ovfl", + "ucm_sm_task_cmd_buf_ovfl", + "ucm_fi_desc_input_violate", + "ucm_se_desc_input_violate", + "ucm_qmreg_more4", +}; +#else +#define ucm_int_attn_desc OSAL_NULL +#endif + +static const u16 ucm_int0_bb_a0_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg ucm_int0_bb_a0 = { + 0, 17, ucm_int0_bb_a0_attn_idx, 0x1280180, 0x128018c, 0x1280188, + 0x1280184 +}; + +static const u16 ucm_int1_bb_a0_attn_idx[29] = { + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, +}; + +static struct attn_hw_reg ucm_int1_bb_a0 = { + 1, 29, ucm_int1_bb_a0_attn_idx, 0x1280190, 0x128019c, 0x1280198, + 0x1280194 +}; + +static const u16 ucm_int2_bb_a0_attn_idx[1] = { + 46, +}; + +static struct attn_hw_reg ucm_int2_bb_a0 = { + 2, 1, ucm_int2_bb_a0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8, + 0x12801a4 +}; + +static struct attn_hw_reg *ucm_int_bb_a0_regs[3] = { + &ucm_int0_bb_a0, &ucm_int1_bb_a0, &ucm_int2_bb_a0, +}; + +static const u16 ucm_int0_bb_b0_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg ucm_int0_bb_b0 = { + 0, 17, ucm_int0_bb_b0_attn_idx, 0x1280180, 0x128018c, 0x1280188, + 0x1280184 +}; + +static const u16 ucm_int1_bb_b0_attn_idx[29] = { + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, +}; + +static struct attn_hw_reg ucm_int1_bb_b0 = { + 1, 29, ucm_int1_bb_b0_attn_idx, 0x1280190, 0x128019c, 0x1280198, + 0x1280194 +}; + +static const u16 ucm_int2_bb_b0_attn_idx[1] = { + 46, +}; + +static struct attn_hw_reg ucm_int2_bb_b0 = { + 2, 1, ucm_int2_bb_b0_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8, + 0x12801a4 +}; + +static struct attn_hw_reg *ucm_int_bb_b0_regs[3] = { + &ucm_int0_bb_b0, &ucm_int1_bb_b0, &ucm_int2_bb_b0, +}; + +static const u16 ucm_int0_k2_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg ucm_int0_k2 = { + 0, 17, ucm_int0_k2_attn_idx, 0x1280180, 0x128018c, 0x1280188, 0x1280184 +}; + +static const u16 ucm_int1_k2_attn_idx[29] = { + 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, + 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, +}; + +static struct attn_hw_reg ucm_int1_k2 = { + 1, 29, ucm_int1_k2_attn_idx, 0x1280190, 0x128019c, 0x1280198, 0x1280194 +}; + +static const u16 ucm_int2_k2_attn_idx[1] = { + 46, +}; + +static struct attn_hw_reg ucm_int2_k2 = { + 2, 1, ucm_int2_k2_attn_idx, 0x12801a0, 0x12801ac, 0x12801a8, 0x12801a4 +}; + +static struct attn_hw_reg *ucm_int_k2_regs[3] = { + &ucm_int0_k2, &ucm_int1_k2, &ucm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *ucm_prty_attn_desc[54] = { + "ucm_mem030_i_ecc_rf_int", + "ucm_mem005_i_ecc_0_rf_int", + "ucm_mem005_i_ecc_1_rf_int", + "ucm_mem024_i_ecc_0_rf_int", + "ucm_mem024_i_ecc_1_rf_int", + "ucm_mem025_i_ecc_rf_int", + "ucm_mem007_i_ecc_0_rf_int", + "ucm_mem007_i_ecc_1_rf_int", + "ucm_mem008_i_ecc_rf_int", + "ucm_mem027_i_ecc_0_rf_int", + "ucm_mem027_i_ecc_1_rf_int", + "ucm_mem028_i_ecc_rf_int", + "ucm_mem020_i_mem_prty", + "ucm_mem021_i_mem_prty", + "ucm_mem019_i_mem_prty", + "ucm_mem013_i_mem_prty", + "ucm_mem018_i_mem_prty", + "ucm_mem022_i_mem_prty", + "ucm_mem014_i_mem_prty", + "ucm_mem015_i_mem_prty", + "ucm_mem016_i_mem_prty", + "ucm_mem017_i_mem_prty", + "ucm_mem033_i_mem_prty", + "ucm_mem032_i_mem_prty", + "ucm_mem031_i_mem_prty", + "ucm_mem006_i_mem_prty", + "ucm_mem026_i_mem_prty", + "ucm_mem009_i_mem_prty", + "ucm_mem029_i_mem_prty", + "ucm_mem023_i_mem_prty", + "ucm_mem010_i_mem_prty_0", + "ucm_mem003_i_ecc_0_rf_int", + "ucm_mem003_i_ecc_1_rf_int", + "ucm_mem022_i_ecc_0_rf_int", + "ucm_mem022_i_ecc_1_rf_int", + "ucm_mem023_i_ecc_rf_int", + "ucm_mem006_i_ecc_rf_int", + "ucm_mem025_i_ecc_0_rf_int", + "ucm_mem025_i_ecc_1_rf_int", + "ucm_mem026_i_ecc_rf_int", + "ucm_mem011_i_mem_prty", + "ucm_mem012_i_mem_prty", + "ucm_mem030_i_mem_prty", + "ucm_mem004_i_mem_prty", + "ucm_mem024_i_mem_prty", + "ucm_mem007_i_mem_prty", + "ucm_mem027_i_mem_prty", + "ucm_mem008_i_mem_prty_0", + "ucm_mem010_i_mem_prty_1", + "ucm_mem003_i_mem_prty", + "ucm_mem001_i_mem_prty", + "ucm_mem002_i_mem_prty", + "ucm_mem008_i_mem_prty_1", + "ucm_mem010_i_mem_prty", +}; +#else +#define ucm_prty_attn_desc OSAL_NULL +#endif + +static const u16 ucm_prty1_bb_a0_attn_idx[31] = { + 1, 2, 11, 12, 13, 14, 15, 16, 18, 19, 20, 21, 24, 28, 31, 32, 33, 34, + 35, + 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, +}; + +static struct attn_hw_reg ucm_prty1_bb_a0 = { + 0, 31, ucm_prty1_bb_a0_attn_idx, 0x1280200, 0x128020c, 0x1280208, + 0x1280204 +}; + +static const u16 ucm_prty2_bb_a0_attn_idx[7] = { + 50, 51, 52, 27, 53, 23, 22, +}; + +static struct attn_hw_reg ucm_prty2_bb_a0 = { + 1, 7, ucm_prty2_bb_a0_attn_idx, 0x1280210, 0x128021c, 0x1280218, + 0x1280214 +}; + +static struct attn_hw_reg *ucm_prty_bb_a0_regs[2] = { + &ucm_prty1_bb_a0, &ucm_prty2_bb_a0, +}; + +static const u16 ucm_prty1_bb_b0_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg ucm_prty1_bb_b0 = { + 0, 31, ucm_prty1_bb_b0_attn_idx, 0x1280200, 0x128020c, 0x1280208, + 0x1280204 +}; + +static const u16 ucm_prty2_bb_b0_attn_idx[7] = { + 48, 40, 41, 49, 43, 50, 51, +}; + +static struct attn_hw_reg ucm_prty2_bb_b0 = { + 1, 7, ucm_prty2_bb_b0_attn_idx, 0x1280210, 0x128021c, 0x1280218, + 0x1280214 +}; + +static struct attn_hw_reg *ucm_prty_bb_b0_regs[2] = { + &ucm_prty1_bb_b0, &ucm_prty2_bb_b0, +}; + +static const u16 ucm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg ucm_prty1_k2 = { + 0, 31, ucm_prty1_k2_attn_idx, 0x1280200, 0x128020c, 0x1280208, + 0x1280204 +}; + +static const u16 ucm_prty2_k2_attn_idx[7] = { + 48, 40, 41, 49, 43, 50, 51, +}; + +static struct attn_hw_reg ucm_prty2_k2 = { + 1, 7, ucm_prty2_k2_attn_idx, 0x1280210, 0x128021c, 0x1280218, 0x1280214 +}; + +static struct attn_hw_reg *ucm_prty_k2_regs[2] = { + &ucm_prty1_k2, &ucm_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *xcm_int_attn_desc[49] = { + "xcm_address_error", + "xcm_is_storm_ovfl_err", + "xcm_is_storm_under_err", + "xcm_is_msdm_ovfl_err", + "xcm_is_msdm_under_err", + "xcm_is_xsdm_ovfl_err", + "xcm_is_xsdm_under_err", + "xcm_is_ysdm_ovfl_err", + "xcm_is_ysdm_under_err", + "xcm_is_usdm_ovfl_err", + "xcm_is_usdm_under_err", + "xcm_is_msem_ovfl_err", + "xcm_is_msem_under_err", + "xcm_is_usem_ovfl_err", + "xcm_is_usem_under_err", + "xcm_is_ysem_ovfl_err", + "xcm_is_ysem_under_err", + "xcm_is_dorq_ovfl_err", + "xcm_is_dorq_under_err", + "xcm_is_pbf_ovfl_err", + "xcm_is_pbf_under_err", + "xcm_is_tm_ovfl_err", + "xcm_is_tm_under_err", + "xcm_is_qm_p_ovfl_err", + "xcm_is_qm_p_under_err", + "xcm_is_qm_s_ovfl_err", + "xcm_is_qm_s_under_err", + "xcm_is_grc_ovfl_err0", + "xcm_is_grc_under_err0", + "xcm_is_grc_ovfl_err1", + "xcm_is_grc_under_err1", + "xcm_is_grc_ovfl_err2", + "xcm_is_grc_under_err2", + "xcm_is_grc_ovfl_err3", + "xcm_is_grc_under_err3", + "xcm_in_prcs_tbl_ovfl", + "xcm_agg_con_data_buf_ovfl", + "xcm_agg_con_cmd_buf_ovfl", + "xcm_sm_con_data_buf_ovfl", + "xcm_sm_con_cmd_buf_ovfl", + "xcm_fi_desc_input_violate", + "xcm_qm_act_st_cnt_msg_prcs_under", + "xcm_qm_act_st_cnt_msg_prcs_ovfl", + "xcm_qm_act_st_cnt_ext_ld_under", + "xcm_qm_act_st_cnt_ext_ld_ovfl", + "xcm_qm_act_st_cnt_rbc_under", + "xcm_qm_act_st_cnt_rbc_ovfl", + "xcm_qm_act_st_cnt_drop_under", + "xcm_qm_act_st_cnt_illeg_pqnum", +}; +#else +#define xcm_int_attn_desc OSAL_NULL +#endif + +static const u16 xcm_int0_bb_a0_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg xcm_int0_bb_a0 = { + 0, 16, xcm_int0_bb_a0_attn_idx, 0x1000180, 0x100018c, 0x1000188, + 0x1000184 +}; + +static const u16 xcm_int1_bb_a0_attn_idx[25] = { + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, +}; + +static struct attn_hw_reg xcm_int1_bb_a0 = { + 1, 25, xcm_int1_bb_a0_attn_idx, 0x1000190, 0x100019c, 0x1000198, + 0x1000194 +}; + +static const u16 xcm_int2_bb_a0_attn_idx[8] = { + 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg xcm_int2_bb_a0 = { + 2, 8, xcm_int2_bb_a0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8, + 0x10001a4 +}; + +static struct attn_hw_reg *xcm_int_bb_a0_regs[3] = { + &xcm_int0_bb_a0, &xcm_int1_bb_a0, &xcm_int2_bb_a0, +}; + +static const u16 xcm_int0_bb_b0_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg xcm_int0_bb_b0 = { + 0, 16, xcm_int0_bb_b0_attn_idx, 0x1000180, 0x100018c, 0x1000188, + 0x1000184 +}; + +static const u16 xcm_int1_bb_b0_attn_idx[25] = { + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, +}; + +static struct attn_hw_reg xcm_int1_bb_b0 = { + 1, 25, xcm_int1_bb_b0_attn_idx, 0x1000190, 0x100019c, 0x1000198, + 0x1000194 +}; + +static const u16 xcm_int2_bb_b0_attn_idx[8] = { + 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg xcm_int2_bb_b0 = { + 2, 8, xcm_int2_bb_b0_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8, + 0x10001a4 +}; + +static struct attn_hw_reg *xcm_int_bb_b0_regs[3] = { + &xcm_int0_bb_b0, &xcm_int1_bb_b0, &xcm_int2_bb_b0, +}; + +static const u16 xcm_int0_k2_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg xcm_int0_k2 = { + 0, 16, xcm_int0_k2_attn_idx, 0x1000180, 0x100018c, 0x1000188, 0x1000184 +}; + +static const u16 xcm_int1_k2_attn_idx[25] = { + 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, + 34, 35, 36, 37, 38, 39, 40, +}; + +static struct attn_hw_reg xcm_int1_k2 = { + 1, 25, xcm_int1_k2_attn_idx, 0x1000190, 0x100019c, 0x1000198, 0x1000194 +}; + +static const u16 xcm_int2_k2_attn_idx[8] = { + 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg xcm_int2_k2 = { + 2, 8, xcm_int2_k2_attn_idx, 0x10001a0, 0x10001ac, 0x10001a8, 0x10001a4 +}; + +static struct attn_hw_reg *xcm_int_k2_regs[3] = { + &xcm_int0_k2, &xcm_int1_k2, &xcm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *xcm_prty_attn_desc[59] = { + "xcm_mem036_i_ecc_rf_int", + "xcm_mem003_i_ecc_0_rf_int", + "xcm_mem003_i_ecc_1_rf_int", + "xcm_mem003_i_ecc_2_rf_int", + "xcm_mem003_i_ecc_3_rf_int", + "xcm_mem004_i_ecc_rf_int", + "xcm_mem033_i_ecc_0_rf_int", + "xcm_mem033_i_ecc_1_rf_int", + "xcm_mem034_i_ecc_rf_int", + "xcm_mem026_i_mem_prty", + "xcm_mem025_i_mem_prty", + "xcm_mem022_i_mem_prty", + "xcm_mem029_i_mem_prty", + "xcm_mem023_i_mem_prty", + "xcm_mem028_i_mem_prty", + "xcm_mem030_i_mem_prty", + "xcm_mem017_i_mem_prty", + "xcm_mem024_i_mem_prty", + "xcm_mem027_i_mem_prty", + "xcm_mem018_i_mem_prty", + "xcm_mem019_i_mem_prty", + "xcm_mem020_i_mem_prty", + "xcm_mem021_i_mem_prty", + "xcm_mem039_i_mem_prty", + "xcm_mem038_i_mem_prty", + "xcm_mem037_i_mem_prty", + "xcm_mem005_i_mem_prty", + "xcm_mem035_i_mem_prty", + "xcm_mem031_i_mem_prty", + "xcm_mem006_i_mem_prty", + "xcm_mem015_i_mem_prty", + "xcm_mem035_i_ecc_rf_int", + "xcm_mem032_i_ecc_0_rf_int", + "xcm_mem032_i_ecc_1_rf_int", + "xcm_mem033_i_ecc_rf_int", + "xcm_mem036_i_mem_prty", + "xcm_mem034_i_mem_prty", + "xcm_mem016_i_mem_prty", + "xcm_mem002_i_ecc_0_rf_int", + "xcm_mem002_i_ecc_1_rf_int", + "xcm_mem002_i_ecc_2_rf_int", + "xcm_mem002_i_ecc_3_rf_int", + "xcm_mem003_i_ecc_rf_int", + "xcm_mem031_i_ecc_0_rf_int", + "xcm_mem031_i_ecc_1_rf_int", + "xcm_mem032_i_ecc_rf_int", + "xcm_mem004_i_mem_prty", + "xcm_mem033_i_mem_prty", + "xcm_mem014_i_mem_prty", + "xcm_mem032_i_mem_prty", + "xcm_mem007_i_mem_prty", + "xcm_mem008_i_mem_prty", + "xcm_mem009_i_mem_prty", + "xcm_mem010_i_mem_prty", + "xcm_mem011_i_mem_prty", + "xcm_mem012_i_mem_prty", + "xcm_mem013_i_mem_prty", + "xcm_mem001_i_mem_prty", + "xcm_mem002_i_mem_prty", +}; +#else +#define xcm_prty_attn_desc OSAL_NULL +#endif + +static const u16 xcm_prty1_bb_a0_attn_idx[31] = { + 8, 9, 10, 11, 12, 13, 14, 16, 17, 18, 19, 20, 21, 22, 25, 26, 27, 30, + 35, + 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg xcm_prty1_bb_a0 = { + 0, 31, xcm_prty1_bb_a0_attn_idx, 0x1000200, 0x100020c, 0x1000208, + 0x1000204 +}; + +static const u16 xcm_prty2_bb_a0_attn_idx[11] = { + 50, 51, 52, 53, 54, 55, 56, 57, 15, 29, 24, +}; + +static struct attn_hw_reg xcm_prty2_bb_a0 = { + 1, 11, xcm_prty2_bb_a0_attn_idx, 0x1000210, 0x100021c, 0x1000218, + 0x1000214 +}; + +static struct attn_hw_reg *xcm_prty_bb_a0_regs[2] = { + &xcm_prty1_bb_a0, &xcm_prty2_bb_a0, +}; + +static const u16 xcm_prty1_bb_b0_attn_idx[31] = { + 1, 2, 3, 4, 5, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, + 24, + 25, 26, 29, 30, 31, 32, 33, 34, 35, 36, 37, +}; + +static struct attn_hw_reg xcm_prty1_bb_b0 = { + 0, 31, xcm_prty1_bb_b0_attn_idx, 0x1000200, 0x100020c, 0x1000208, + 0x1000204 +}; + +static const u16 xcm_prty2_bb_b0_attn_idx[11] = { + 50, 51, 52, 53, 54, 55, 56, 48, 57, 58, 28, +}; + +static struct attn_hw_reg xcm_prty2_bb_b0 = { + 1, 11, xcm_prty2_bb_b0_attn_idx, 0x1000210, 0x100021c, 0x1000218, + 0x1000214 +}; + +static struct attn_hw_reg *xcm_prty_bb_b0_regs[2] = { + &xcm_prty1_bb_b0, &xcm_prty2_bb_b0, +}; + +static const u16 xcm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg xcm_prty1_k2 = { + 0, 31, xcm_prty1_k2_attn_idx, 0x1000200, 0x100020c, 0x1000208, + 0x1000204 +}; + +static const u16 xcm_prty2_k2_attn_idx[12] = { + 37, 49, 50, 51, 52, 53, 54, 55, 56, 48, 57, 58, +}; + +static struct attn_hw_reg xcm_prty2_k2 = { + 1, 12, xcm_prty2_k2_attn_idx, 0x1000210, 0x100021c, 0x1000218, + 0x1000214 +}; + +static struct attn_hw_reg *xcm_prty_k2_regs[2] = { + &xcm_prty1_k2, &xcm_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *ycm_int_attn_desc[37] = { + "ycm_address_error", + "ycm_is_storm_ovfl_err", + "ycm_is_storm_under_err", + "ycm_is_msdm_ovfl_err", + "ycm_is_msdm_under_err", + "ycm_is_ysdm_ovfl_err", + "ycm_is_ysdm_under_err", + "ycm_is_xyld_ovfl_err", + "ycm_is_xyld_under_err", + "ycm_is_msem_ovfl_err", + "ycm_is_msem_under_err", + "ycm_is_usem_ovfl_err", + "ycm_is_usem_under_err", + "ycm_is_pbf_ovfl_err", + "ycm_is_pbf_under_err", + "ycm_is_qm_p_ovfl_err", + "ycm_is_qm_p_under_err", + "ycm_is_qm_s_ovfl_err", + "ycm_is_qm_s_under_err", + "ycm_is_grc_ovfl_err0", + "ycm_is_grc_under_err0", + "ycm_is_grc_ovfl_err1", + "ycm_is_grc_under_err1", + "ycm_is_grc_ovfl_err2", + "ycm_is_grc_under_err2", + "ycm_is_grc_ovfl_err3", + "ycm_is_grc_under_err3", + "ycm_in_prcs_tbl_ovfl", + "ycm_sm_con_data_buf_ovfl", + "ycm_sm_con_cmd_buf_ovfl", + "ycm_agg_task_data_buf_ovfl", + "ycm_agg_task_cmd_buf_ovfl", + "ycm_sm_task_data_buf_ovfl", + "ycm_sm_task_cmd_buf_ovfl", + "ycm_fi_desc_input_violate", + "ycm_se_desc_input_violate", + "ycm_qmreg_more4", +}; +#else +#define ycm_int_attn_desc OSAL_NULL +#endif + +static const u16 ycm_int0_bb_a0_attn_idx[13] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg ycm_int0_bb_a0 = { + 0, 13, ycm_int0_bb_a0_attn_idx, 0x1080180, 0x108018c, 0x1080188, + 0x1080184 +}; + +static const u16 ycm_int1_bb_a0_attn_idx[23] = { + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg ycm_int1_bb_a0 = { + 1, 23, ycm_int1_bb_a0_attn_idx, 0x1080190, 0x108019c, 0x1080198, + 0x1080194 +}; + +static const u16 ycm_int2_bb_a0_attn_idx[1] = { + 36, +}; + +static struct attn_hw_reg ycm_int2_bb_a0 = { + 2, 1, ycm_int2_bb_a0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8, + 0x10801a4 +}; + +static struct attn_hw_reg *ycm_int_bb_a0_regs[3] = { + &ycm_int0_bb_a0, &ycm_int1_bb_a0, &ycm_int2_bb_a0, +}; + +static const u16 ycm_int0_bb_b0_attn_idx[13] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg ycm_int0_bb_b0 = { + 0, 13, ycm_int0_bb_b0_attn_idx, 0x1080180, 0x108018c, 0x1080188, + 0x1080184 +}; + +static const u16 ycm_int1_bb_b0_attn_idx[23] = { + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg ycm_int1_bb_b0 = { + 1, 23, ycm_int1_bb_b0_attn_idx, 0x1080190, 0x108019c, 0x1080198, + 0x1080194 +}; + +static const u16 ycm_int2_bb_b0_attn_idx[1] = { + 36, +}; + +static struct attn_hw_reg ycm_int2_bb_b0 = { + 2, 1, ycm_int2_bb_b0_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8, + 0x10801a4 +}; + +static struct attn_hw_reg *ycm_int_bb_b0_regs[3] = { + &ycm_int0_bb_b0, &ycm_int1_bb_b0, &ycm_int2_bb_b0, +}; + +static const u16 ycm_int0_k2_attn_idx[13] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg ycm_int0_k2 = { + 0, 13, ycm_int0_k2_attn_idx, 0x1080180, 0x108018c, 0x1080188, 0x1080184 +}; + +static const u16 ycm_int1_k2_attn_idx[23] = { + 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, + 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg ycm_int1_k2 = { + 1, 23, ycm_int1_k2_attn_idx, 0x1080190, 0x108019c, 0x1080198, 0x1080194 +}; + +static const u16 ycm_int2_k2_attn_idx[1] = { + 36, +}; + +static struct attn_hw_reg ycm_int2_k2 = { + 2, 1, ycm_int2_k2_attn_idx, 0x10801a0, 0x10801ac, 0x10801a8, 0x10801a4 +}; + +static struct attn_hw_reg *ycm_int_k2_regs[3] = { + &ycm_int0_k2, &ycm_int1_k2, &ycm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *ycm_prty_attn_desc[44] = { + "ycm_mem027_i_ecc_rf_int", + "ycm_mem003_i_ecc_0_rf_int", + "ycm_mem003_i_ecc_1_rf_int", + "ycm_mem022_i_ecc_0_rf_int", + "ycm_mem022_i_ecc_1_rf_int", + "ycm_mem023_i_ecc_rf_int", + "ycm_mem005_i_ecc_0_rf_int", + "ycm_mem005_i_ecc_1_rf_int", + "ycm_mem025_i_ecc_0_rf_int", + "ycm_mem025_i_ecc_1_rf_int", + "ycm_mem018_i_mem_prty", + "ycm_mem020_i_mem_prty", + "ycm_mem017_i_mem_prty", + "ycm_mem016_i_mem_prty", + "ycm_mem019_i_mem_prty", + "ycm_mem015_i_mem_prty", + "ycm_mem011_i_mem_prty", + "ycm_mem012_i_mem_prty", + "ycm_mem013_i_mem_prty", + "ycm_mem014_i_mem_prty", + "ycm_mem030_i_mem_prty", + "ycm_mem029_i_mem_prty", + "ycm_mem028_i_mem_prty", + "ycm_mem004_i_mem_prty", + "ycm_mem024_i_mem_prty", + "ycm_mem006_i_mem_prty", + "ycm_mem026_i_mem_prty", + "ycm_mem021_i_mem_prty", + "ycm_mem007_i_mem_prty_0", + "ycm_mem007_i_mem_prty_1", + "ycm_mem008_i_mem_prty", + "ycm_mem026_i_ecc_rf_int", + "ycm_mem021_i_ecc_0_rf_int", + "ycm_mem021_i_ecc_1_rf_int", + "ycm_mem022_i_ecc_rf_int", + "ycm_mem024_i_ecc_0_rf_int", + "ycm_mem024_i_ecc_1_rf_int", + "ycm_mem027_i_mem_prty", + "ycm_mem023_i_mem_prty", + "ycm_mem025_i_mem_prty", + "ycm_mem009_i_mem_prty", + "ycm_mem010_i_mem_prty", + "ycm_mem001_i_mem_prty", + "ycm_mem002_i_mem_prty", +}; +#else +#define ycm_prty_attn_desc OSAL_NULL +#endif + +static const u16 ycm_prty1_bb_a0_attn_idx[31] = { + 1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, +}; + +static struct attn_hw_reg ycm_prty1_bb_a0 = { + 0, 31, ycm_prty1_bb_a0_attn_idx, 0x1080200, 0x108020c, 0x1080208, + 0x1080204 +}; + +static const u16 ycm_prty2_bb_a0_attn_idx[3] = { + 41, 42, 43, +}; + +static struct attn_hw_reg ycm_prty2_bb_a0 = { + 1, 3, ycm_prty2_bb_a0_attn_idx, 0x1080210, 0x108021c, 0x1080218, + 0x1080214 +}; + +static struct attn_hw_reg *ycm_prty_bb_a0_regs[2] = { + &ycm_prty1_bb_a0, &ycm_prty2_bb_a0, +}; + +static const u16 ycm_prty1_bb_b0_attn_idx[31] = { + 1, 2, 6, 7, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 21, 22, 23, 25, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, +}; + +static struct attn_hw_reg ycm_prty1_bb_b0 = { + 0, 31, ycm_prty1_bb_b0_attn_idx, 0x1080200, 0x108020c, 0x1080208, + 0x1080204 +}; + +static const u16 ycm_prty2_bb_b0_attn_idx[3] = { + 41, 42, 43, +}; + +static struct attn_hw_reg ycm_prty2_bb_b0 = { + 1, 3, ycm_prty2_bb_b0_attn_idx, 0x1080210, 0x108021c, 0x1080218, + 0x1080214 +}; + +static struct attn_hw_reg *ycm_prty_bb_b0_regs[2] = { + &ycm_prty1_bb_b0, &ycm_prty2_bb_b0, +}; + +static const u16 ycm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg ycm_prty1_k2 = { + 0, 31, ycm_prty1_k2_attn_idx, 0x1080200, 0x108020c, 0x1080208, + 0x1080204 +}; + +static const u16 ycm_prty2_k2_attn_idx[4] = { + 40, 41, 42, 43, +}; + +static struct attn_hw_reg ycm_prty2_k2 = { + 1, 4, ycm_prty2_k2_attn_idx, 0x1080210, 0x108021c, 0x1080218, 0x1080214 +}; + +static struct attn_hw_reg *ycm_prty_k2_regs[2] = { + &ycm_prty1_k2, &ycm_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *pcm_int_attn_desc[20] = { + "pcm_address_error", + "pcm_is_storm_ovfl_err", + "pcm_is_storm_under_err", + "pcm_is_psdm_ovfl_err", + "pcm_is_psdm_under_err", + "pcm_is_pbf_ovfl_err", + "pcm_is_pbf_under_err", + "pcm_is_grc_ovfl_err0", + "pcm_is_grc_under_err0", + "pcm_is_grc_ovfl_err1", + "pcm_is_grc_under_err1", + "pcm_is_grc_ovfl_err2", + "pcm_is_grc_under_err2", + "pcm_is_grc_ovfl_err3", + "pcm_is_grc_under_err3", + "pcm_in_prcs_tbl_ovfl", + "pcm_sm_con_data_buf_ovfl", + "pcm_sm_con_cmd_buf_ovfl", + "pcm_fi_desc_input_violate", + "pcm_qmreg_more4", +}; +#else +#define pcm_int_attn_desc OSAL_NULL +#endif + +static const u16 pcm_int0_bb_a0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pcm_int0_bb_a0 = { + 0, 5, pcm_int0_bb_a0_attn_idx, 0x1100180, 0x110018c, 0x1100188, + 0x1100184 +}; + +static const u16 pcm_int1_bb_a0_attn_idx[14] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pcm_int1_bb_a0 = { + 1, 14, pcm_int1_bb_a0_attn_idx, 0x1100190, 0x110019c, 0x1100198, + 0x1100194 +}; + +static const u16 pcm_int2_bb_a0_attn_idx[1] = { + 19, +}; + +static struct attn_hw_reg pcm_int2_bb_a0 = { + 2, 1, pcm_int2_bb_a0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8, + 0x11001a4 +}; + +static struct attn_hw_reg *pcm_int_bb_a0_regs[3] = { + &pcm_int0_bb_a0, &pcm_int1_bb_a0, &pcm_int2_bb_a0, +}; + +static const u16 pcm_int0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pcm_int0_bb_b0 = { + 0, 5, pcm_int0_bb_b0_attn_idx, 0x1100180, 0x110018c, 0x1100188, + 0x1100184 +}; + +static const u16 pcm_int1_bb_b0_attn_idx[14] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pcm_int1_bb_b0 = { + 1, 14, pcm_int1_bb_b0_attn_idx, 0x1100190, 0x110019c, 0x1100198, + 0x1100194 +}; + +static const u16 pcm_int2_bb_b0_attn_idx[1] = { + 19, +}; + +static struct attn_hw_reg pcm_int2_bb_b0 = { + 2, 1, pcm_int2_bb_b0_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8, + 0x11001a4 +}; + +static struct attn_hw_reg *pcm_int_bb_b0_regs[3] = { + &pcm_int0_bb_b0, &pcm_int1_bb_b0, &pcm_int2_bb_b0, +}; + +static const u16 pcm_int0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg pcm_int0_k2 = { + 0, 5, pcm_int0_k2_attn_idx, 0x1100180, 0x110018c, 0x1100188, 0x1100184 +}; + +static const u16 pcm_int1_k2_attn_idx[14] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, +}; + +static struct attn_hw_reg pcm_int1_k2 = { + 1, 14, pcm_int1_k2_attn_idx, 0x1100190, 0x110019c, 0x1100198, 0x1100194 +}; + +static const u16 pcm_int2_k2_attn_idx[1] = { + 19, +}; + +static struct attn_hw_reg pcm_int2_k2 = { + 2, 1, pcm_int2_k2_attn_idx, 0x11001a0, 0x11001ac, 0x11001a8, 0x11001a4 +}; + +static struct attn_hw_reg *pcm_int_k2_regs[3] = { + &pcm_int0_k2, &pcm_int1_k2, &pcm_int2_k2, +}; + +#ifdef ATTN_DESC +static const char *pcm_prty_attn_desc[18] = { + "pcm_mem012_i_ecc_rf_int", + "pcm_mem010_i_ecc_0_rf_int", + "pcm_mem010_i_ecc_1_rf_int", + "pcm_mem008_i_mem_prty", + "pcm_mem007_i_mem_prty", + "pcm_mem006_i_mem_prty", + "pcm_mem002_i_mem_prty", + "pcm_mem003_i_mem_prty", + "pcm_mem004_i_mem_prty", + "pcm_mem005_i_mem_prty", + "pcm_mem011_i_mem_prty", + "pcm_mem001_i_mem_prty", + "pcm_mem011_i_ecc_rf_int", + "pcm_mem009_i_ecc_0_rf_int", + "pcm_mem009_i_ecc_1_rf_int", + "pcm_mem010_i_mem_prty", + "pcm_mem013_i_mem_prty", + "pcm_mem012_i_mem_prty", +}; +#else +#define pcm_prty_attn_desc OSAL_NULL +#endif + +static const u16 pcm_prty1_bb_a0_attn_idx[14] = { + 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg pcm_prty1_bb_a0 = { + 0, 14, pcm_prty1_bb_a0_attn_idx, 0x1100200, 0x110020c, 0x1100208, + 0x1100204 +}; + +static struct attn_hw_reg *pcm_prty_bb_a0_regs[1] = { + &pcm_prty1_bb_a0, +}; + +static const u16 pcm_prty1_bb_b0_attn_idx[11] = { + 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg pcm_prty1_bb_b0 = { + 0, 11, pcm_prty1_bb_b0_attn_idx, 0x1100200, 0x110020c, 0x1100208, + 0x1100204 +}; + +static struct attn_hw_reg *pcm_prty_bb_b0_regs[1] = { + &pcm_prty1_bb_b0, +}; + +static const u16 pcm_prty1_k2_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg pcm_prty1_k2 = { + 0, 12, pcm_prty1_k2_attn_idx, 0x1100200, 0x110020c, 0x1100208, + 0x1100204 +}; + +static struct attn_hw_reg *pcm_prty_k2_regs[1] = { + &pcm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *qm_int_attn_desc[22] = { + "qm_address_error", + "qm_ovf_err_tx", + "qm_ovf_err_other", + "qm_pf_usg_cnt_err", + "qm_vf_usg_cnt_err", + "qm_voq_crd_inc_err", + "qm_voq_crd_dec_err", + "qm_byte_crd_inc_err", + "qm_byte_crd_dec_err", + "qm_err_incdec_rlglblcrd", + "qm_err_incdec_rlpfcrd", + "qm_err_incdec_wfqpfcrd", + "qm_err_incdec_wfqvpcrd", + "qm_err_incdec_voqlinecrd", + "qm_err_incdec_voqbytecrd", + "qm_fifos_error", + "qm_qm_rl_dc_exp_pf_controller_pop_error", + "qm_qm_rl_dc_exp_pf_controller_push_error", + "qm_qm_rl_dc_rf_req_controller_pop_error", + "qm_qm_rl_dc_rf_req_controller_push_error", + "qm_qm_rl_dc_rf_res_controller_pop_error", + "qm_qm_rl_dc_rf_res_controller_push_error", +}; +#else +#define qm_int_attn_desc OSAL_NULL +#endif + +static const u16 qm_int0_bb_a0_attn_idx[16] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg qm_int0_bb_a0 = { + 0, 16, qm_int0_bb_a0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184 +}; + +static struct attn_hw_reg *qm_int_bb_a0_regs[1] = { + &qm_int0_bb_a0, +}; + +static const u16 qm_int0_bb_b0_attn_idx[22] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, +}; + +static struct attn_hw_reg qm_int0_bb_b0 = { + 0, 22, qm_int0_bb_b0_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184 +}; + +static struct attn_hw_reg *qm_int_bb_b0_regs[1] = { + &qm_int0_bb_b0, +}; + +static const u16 qm_int0_k2_attn_idx[22] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, +}; + +static struct attn_hw_reg qm_int0_k2 = { + 0, 22, qm_int0_k2_attn_idx, 0x2f0180, 0x2f018c, 0x2f0188, 0x2f0184 +}; + +static struct attn_hw_reg *qm_int_k2_regs[1] = { + &qm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *qm_prty_attn_desc[109] = { + "qm_xcm_wrc_fifo", + "qm_ucm_wrc_fifo", + "qm_tcm_wrc_fifo", + "qm_ccm_wrc_fifo", + "qm_bigramhigh", + "qm_bigramlow", + "qm_base_address", + "qm_wrbuff", + "qm_bigramhigh_ext_a", + "qm_bigramlow_ext_a", + "qm_base_address_ext_a", + "qm_mem006_i_ecc_0_rf_int", + "qm_mem006_i_ecc_1_rf_int", + "qm_mem005_i_ecc_0_rf_int", + "qm_mem005_i_ecc_1_rf_int", + "qm_mem012_i_ecc_rf_int", + "qm_mem037_i_mem_prty", + "qm_mem036_i_mem_prty", + "qm_mem039_i_mem_prty", + "qm_mem038_i_mem_prty", + "qm_mem040_i_mem_prty", + "qm_mem042_i_mem_prty", + "qm_mem041_i_mem_prty", + "qm_mem056_i_mem_prty", + "qm_mem055_i_mem_prty", + "qm_mem053_i_mem_prty", + "qm_mem054_i_mem_prty", + "qm_mem057_i_mem_prty", + "qm_mem058_i_mem_prty", + "qm_mem062_i_mem_prty", + "qm_mem061_i_mem_prty", + "qm_mem059_i_mem_prty", + "qm_mem060_i_mem_prty", + "qm_mem063_i_mem_prty", + "qm_mem064_i_mem_prty", + "qm_mem033_i_mem_prty", + "qm_mem032_i_mem_prty", + "qm_mem030_i_mem_prty", + "qm_mem031_i_mem_prty", + "qm_mem034_i_mem_prty", + "qm_mem035_i_mem_prty", + "qm_mem051_i_mem_prty", + "qm_mem042_i_ecc_0_rf_int", + "qm_mem042_i_ecc_1_rf_int", + "qm_mem041_i_ecc_0_rf_int", + "qm_mem041_i_ecc_1_rf_int", + "qm_mem048_i_ecc_rf_int", + "qm_mem009_i_mem_prty", + "qm_mem008_i_mem_prty", + "qm_mem011_i_mem_prty", + "qm_mem010_i_mem_prty", + "qm_mem012_i_mem_prty", + "qm_mem014_i_mem_prty", + "qm_mem013_i_mem_prty", + "qm_mem028_i_mem_prty", + "qm_mem027_i_mem_prty", + "qm_mem025_i_mem_prty", + "qm_mem026_i_mem_prty", + "qm_mem029_i_mem_prty", + "qm_mem005_i_mem_prty", + "qm_mem004_i_mem_prty", + "qm_mem002_i_mem_prty", + "qm_mem003_i_mem_prty", + "qm_mem006_i_mem_prty", + "qm_mem007_i_mem_prty", + "qm_mem023_i_mem_prty", + "qm_mem047_i_mem_prty", + "qm_mem049_i_mem_prty", + "qm_mem048_i_mem_prty", + "qm_mem052_i_mem_prty", + "qm_mem050_i_mem_prty", + "qm_mem045_i_mem_prty", + "qm_mem046_i_mem_prty", + "qm_mem043_i_mem_prty", + "qm_mem044_i_mem_prty", + "qm_mem017_i_mem_prty", + "qm_mem016_i_mem_prty", + "qm_mem021_i_mem_prty", + "qm_mem024_i_mem_prty", + "qm_mem019_i_mem_prty", + "qm_mem018_i_mem_prty", + "qm_mem015_i_mem_prty", + "qm_mem022_i_mem_prty", + "qm_mem020_i_mem_prty", + "qm_mem007_i_mem_prty_0", + "qm_mem007_i_mem_prty_1", + "qm_mem007_i_mem_prty_2", + "qm_mem001_i_mem_prty", + "qm_mem043_i_mem_prty_0", + "qm_mem043_i_mem_prty_1", + "qm_mem043_i_mem_prty_2", + "qm_mem007_i_mem_prty_3", + "qm_mem007_i_mem_prty_4", + "qm_mem007_i_mem_prty_5", + "qm_mem007_i_mem_prty_6", + "qm_mem007_i_mem_prty_7", + "qm_mem007_i_mem_prty_8", + "qm_mem007_i_mem_prty_9", + "qm_mem007_i_mem_prty_10", + "qm_mem007_i_mem_prty_11", + "qm_mem007_i_mem_prty_12", + "qm_mem007_i_mem_prty_13", + "qm_mem007_i_mem_prty_14", + "qm_mem007_i_mem_prty_15", + "qm_mem043_i_mem_prty_3", + "qm_mem043_i_mem_prty_4", + "qm_mem043_i_mem_prty_5", + "qm_mem043_i_mem_prty_6", + "qm_mem043_i_mem_prty_7", +}; +#else +#define qm_prty_attn_desc OSAL_NULL +#endif + +static const u16 qm_prty0_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg qm_prty0_bb_a0 = { + 0, 11, qm_prty0_bb_a0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194 +}; + +static const u16 qm_prty1_bb_a0_attn_idx[31] = { + 17, 35, 36, 37, 38, 39, 40, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, +}; + +static struct attn_hw_reg qm_prty1_bb_a0 = { + 1, 31, qm_prty1_bb_a0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204 +}; + +static const u16 qm_prty2_bb_a0_attn_idx[31] = { + 66, 67, 69, 70, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 87, 20, 18, 25, + 27, 32, 24, 26, 41, 31, 29, 28, 30, 23, 88, 89, 90, +}; + +static struct attn_hw_reg qm_prty2_bb_a0 = { + 2, 31, qm_prty2_bb_a0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214 +}; + +static const u16 qm_prty3_bb_a0_attn_idx[11] = { + 104, 105, 106, 107, 108, 33, 16, 34, 19, 72, 71, +}; + +static struct attn_hw_reg qm_prty3_bb_a0 = { + 3, 11, qm_prty3_bb_a0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224 +}; + +static struct attn_hw_reg *qm_prty_bb_a0_regs[4] = { + &qm_prty0_bb_a0, &qm_prty1_bb_a0, &qm_prty2_bb_a0, &qm_prty3_bb_a0, +}; + +static const u16 qm_prty0_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg qm_prty0_bb_b0 = { + 0, 11, qm_prty0_bb_b0_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194 +}; + +static const u16 qm_prty1_bb_b0_attn_idx[31] = { + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg qm_prty1_bb_b0 = { + 1, 31, qm_prty1_bb_b0_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204 +}; + +static const u16 qm_prty2_bb_b0_attn_idx[31] = { + 66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78, + 79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86, +}; + +static struct attn_hw_reg qm_prty2_bb_b0 = { + 2, 31, qm_prty2_bb_b0_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214 +}; + +static const u16 qm_prty3_bb_b0_attn_idx[11] = { + 91, 92, 93, 94, 95, 55, 87, 54, 61, 50, 47, +}; + +static struct attn_hw_reg qm_prty3_bb_b0 = { + 3, 11, qm_prty3_bb_b0_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224 +}; + +static struct attn_hw_reg *qm_prty_bb_b0_regs[4] = { + &qm_prty0_bb_b0, &qm_prty1_bb_b0, &qm_prty2_bb_b0, &qm_prty3_bb_b0, +}; + +static const u16 qm_prty0_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg qm_prty0_k2 = { + 0, 11, qm_prty0_k2_attn_idx, 0x2f0190, 0x2f019c, 0x2f0198, 0x2f0194 +}; + +static const u16 qm_prty1_k2_attn_idx[31] = { + 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, + 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg qm_prty1_k2 = { + 1, 31, qm_prty1_k2_attn_idx, 0x2f0200, 0x2f020c, 0x2f0208, 0x2f0204 +}; + +static const u16 qm_prty2_k2_attn_idx[31] = { + 66, 67, 68, 69, 70, 71, 72, 73, 74, 58, 60, 62, 49, 75, 76, 53, 77, 78, + 79, 80, 81, 52, 65, 57, 82, 56, 83, 48, 84, 85, 86, +}; + +static struct attn_hw_reg qm_prty2_k2 = { + 2, 31, qm_prty2_k2_attn_idx, 0x2f0210, 0x2f021c, 0x2f0218, 0x2f0214 +}; + +static const u16 qm_prty3_k2_attn_idx[19] = { + 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 55, 87, 54, 61, + 50, 47, +}; + +static struct attn_hw_reg qm_prty3_k2 = { + 3, 19, qm_prty3_k2_attn_idx, 0x2f0220, 0x2f022c, 0x2f0228, 0x2f0224 +}; + +static struct attn_hw_reg *qm_prty_k2_regs[4] = { + &qm_prty0_k2, &qm_prty1_k2, &qm_prty2_k2, &qm_prty3_k2, +}; + +#ifdef ATTN_DESC +static const char *tm_int_attn_desc[43] = { + "tm_address_error", + "tm_pxp_read_data_fifo_ov", + "tm_pxp_read_data_fifo_un", + "tm_pxp_read_ctrl_fifo_ov", + "tm_pxp_read_ctrl_fifo_un", + "tm_cfc_load_command_fifo_ov", + "tm_cfc_load_command_fifo_un", + "tm_cfc_load_echo_fifo_ov", + "tm_cfc_load_echo_fifo_un", + "tm_client_out_fifo_ov", + "tm_client_out_fifo_un", + "tm_ac_command_fifo_ov", + "tm_ac_command_fifo_un", + "tm_client_in_pbf_fifo_ov", + "tm_client_in_pbf_fifo_un", + "tm_client_in_ucm_fifo_ov", + "tm_client_in_ucm_fifo_un", + "tm_client_in_tcm_fifo_ov", + "tm_client_in_tcm_fifo_un", + "tm_client_in_xcm_fifo_ov", + "tm_client_in_xcm_fifo_un", + "tm_expiration_cmd_fifo_ov", + "tm_expiration_cmd_fifo_un", + "tm_stop_all_lc_invalid", + "tm_command_lc_invalid_0", + "tm_command_lc_invalid_1", + "tm_init_command_lc_valid", + "tm_stop_all_exp_lc_valid", + "tm_command_cid_invalid_0", + "tm_reserved_command", + "tm_command_cid_invalid_1", + "tm_cload_res_loaderr_conn", + "tm_cload_res_loadcancel_conn", + "tm_cload_res_validerr_conn", + "tm_context_rd_last", + "tm_context_wr_last", + "tm_pxp_rd_data_eop_bvalid", + "tm_pend_conn_scan", + "tm_pend_task_scan", + "tm_pxp_rd_data_eop_error", + "tm_cload_res_loaderr_task", + "tm_cload_res_loadcancel_task", + "tm_cload_res_validerr_task", +}; +#else +#define tm_int_attn_desc OSAL_NULL +#endif + +static const u16 tm_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tm_int0_bb_a0 = { + 0, 32, tm_int0_bb_a0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184 +}; + +static const u16 tm_int1_bb_a0_attn_idx[11] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, +}; + +static struct attn_hw_reg tm_int1_bb_a0 = { + 1, 11, tm_int1_bb_a0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194 +}; + +static struct attn_hw_reg *tm_int_bb_a0_regs[2] = { + &tm_int0_bb_a0, &tm_int1_bb_a0, +}; + +static const u16 tm_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tm_int0_bb_b0 = { + 0, 32, tm_int0_bb_b0_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184 +}; + +static const u16 tm_int1_bb_b0_attn_idx[11] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, +}; + +static struct attn_hw_reg tm_int1_bb_b0 = { + 1, 11, tm_int1_bb_b0_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194 +}; + +static struct attn_hw_reg *tm_int_bb_b0_regs[2] = { + &tm_int0_bb_b0, &tm_int1_bb_b0, +}; + +static const u16 tm_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tm_int0_k2 = { + 0, 32, tm_int0_k2_attn_idx, 0x2c0180, 0x2c018c, 0x2c0188, 0x2c0184 +}; + +static const u16 tm_int1_k2_attn_idx[11] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, +}; + +static struct attn_hw_reg tm_int1_k2 = { + 1, 11, tm_int1_k2_attn_idx, 0x2c0190, 0x2c019c, 0x2c0198, 0x2c0194 +}; + +static struct attn_hw_reg *tm_int_k2_regs[2] = { + &tm_int0_k2, &tm_int1_k2, +}; + +#ifdef ATTN_DESC +static const char *tm_prty_attn_desc[17] = { + "tm_mem012_i_ecc_0_rf_int", + "tm_mem012_i_ecc_1_rf_int", + "tm_mem003_i_ecc_rf_int", + "tm_mem016_i_mem_prty", + "tm_mem007_i_mem_prty", + "tm_mem010_i_mem_prty", + "tm_mem008_i_mem_prty", + "tm_mem009_i_mem_prty", + "tm_mem013_i_mem_prty", + "tm_mem015_i_mem_prty", + "tm_mem014_i_mem_prty", + "tm_mem004_i_mem_prty", + "tm_mem005_i_mem_prty", + "tm_mem006_i_mem_prty", + "tm_mem011_i_mem_prty", + "tm_mem001_i_mem_prty", + "tm_mem002_i_mem_prty", +}; +#else +#define tm_prty_attn_desc OSAL_NULL +#endif + +static const u16 tm_prty1_bb_a0_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg tm_prty1_bb_a0 = { + 0, 17, tm_prty1_bb_a0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204 +}; + +static struct attn_hw_reg *tm_prty_bb_a0_regs[1] = { + &tm_prty1_bb_a0, +}; + +static const u16 tm_prty1_bb_b0_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg tm_prty1_bb_b0 = { + 0, 17, tm_prty1_bb_b0_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204 +}; + +static struct attn_hw_reg *tm_prty_bb_b0_regs[1] = { + &tm_prty1_bb_b0, +}; + +static const u16 tm_prty1_k2_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg tm_prty1_k2 = { + 0, 17, tm_prty1_k2_attn_idx, 0x2c0200, 0x2c020c, 0x2c0208, 0x2c0204 +}; + +static struct attn_hw_reg *tm_prty_k2_regs[1] = { + &tm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *dorq_int_attn_desc[9] = { + "dorq_address_error", + "dorq_db_drop", + "dorq_dorq_fifo_ovfl_err", + "dorq_dorq_fifo_afull", + "dorq_cfc_byp_validation_err", + "dorq_cfc_ld_resp_err", + "dorq_xcm_done_cnt_err", + "dorq_cfc_ld_req_fifo_ovfl_err", + "dorq_cfc_ld_req_fifo_under_err", +}; +#else +#define dorq_int_attn_desc OSAL_NULL +#endif + +static const u16 dorq_int0_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg dorq_int0_bb_a0 = { + 0, 9, dorq_int0_bb_a0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184 +}; + +static struct attn_hw_reg *dorq_int_bb_a0_regs[1] = { + &dorq_int0_bb_a0, +}; + +static const u16 dorq_int0_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg dorq_int0_bb_b0 = { + 0, 9, dorq_int0_bb_b0_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184 +}; + +static struct attn_hw_reg *dorq_int_bb_b0_regs[1] = { + &dorq_int0_bb_b0, +}; + +static const u16 dorq_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg dorq_int0_k2 = { + 0, 9, dorq_int0_k2_attn_idx, 0x100180, 0x10018c, 0x100188, 0x100184 +}; + +static struct attn_hw_reg *dorq_int_k2_regs[1] = { + &dorq_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *dorq_prty_attn_desc[7] = { + "dorq_datapath_registers", + "dorq_mem002_i_ecc_rf_int", + "dorq_mem001_i_mem_prty", + "dorq_mem003_i_mem_prty", + "dorq_mem004_i_mem_prty", + "dorq_mem005_i_mem_prty", + "dorq_mem006_i_mem_prty", +}; +#else +#define dorq_prty_attn_desc OSAL_NULL +#endif + +static const u16 dorq_prty1_bb_a0_attn_idx[6] = { + 1, 2, 3, 4, 5, 6, +}; + +static struct attn_hw_reg dorq_prty1_bb_a0 = { + 0, 6, dorq_prty1_bb_a0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204 +}; + +static struct attn_hw_reg *dorq_prty_bb_a0_regs[1] = { + &dorq_prty1_bb_a0, +}; + +static const u16 dorq_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dorq_prty0_bb_b0 = { + 0, 1, dorq_prty0_bb_b0_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194 +}; + +static const u16 dorq_prty1_bb_b0_attn_idx[6] = { + 1, 2, 3, 4, 5, 6, +}; + +static struct attn_hw_reg dorq_prty1_bb_b0 = { + 1, 6, dorq_prty1_bb_b0_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204 +}; + +static struct attn_hw_reg *dorq_prty_bb_b0_regs[2] = { + &dorq_prty0_bb_b0, &dorq_prty1_bb_b0, +}; + +static const u16 dorq_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dorq_prty0_k2 = { + 0, 1, dorq_prty0_k2_attn_idx, 0x100190, 0x10019c, 0x100198, 0x100194 +}; + +static const u16 dorq_prty1_k2_attn_idx[6] = { + 1, 2, 3, 4, 5, 6, +}; + +static struct attn_hw_reg dorq_prty1_k2 = { + 1, 6, dorq_prty1_k2_attn_idx, 0x100200, 0x10020c, 0x100208, 0x100204 +}; + +static struct attn_hw_reg *dorq_prty_k2_regs[2] = { + &dorq_prty0_k2, &dorq_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *brb_int_attn_desc[237] = { + "brb_address_error", + "brb_rc_pkt0_rls_error", + "brb_rc_pkt0_1st_error", + "brb_rc_pkt0_len_error", + "brb_rc_pkt0_middle_error", + "brb_rc_pkt0_protocol_error", + "brb_rc_pkt1_rls_error", + "brb_rc_pkt1_1st_error", + "brb_rc_pkt1_len_error", + "brb_rc_pkt1_middle_error", + "brb_rc_pkt1_protocol_error", + "brb_rc_pkt2_rls_error", + "brb_rc_pkt2_1st_error", + "brb_rc_pkt2_len_error", + "brb_rc_pkt2_middle_error", + "brb_rc_pkt2_protocol_error", + "brb_rc_pkt3_rls_error", + "brb_rc_pkt3_1st_error", + "brb_rc_pkt3_len_error", + "brb_rc_pkt3_middle_error", + "brb_rc_pkt3_protocol_error", + "brb_rc_sop_req_tc_port_error", + "brb_uncomplient_lossless_error", + "brb_wc0_protocol_error", + "brb_wc1_protocol_error", + "brb_wc2_protocol_error", + "brb_wc3_protocol_error", + "brb_ll_arb_prefetch_sop_error", + "brb_ll_blk_error", + "brb_packet_counter_error", + "brb_byte_counter_error", + "brb_mac0_fc_cnt_error", + "brb_mac1_fc_cnt_error", + "brb_ll_arb_calc_error", + "brb_unused_0", + "brb_wc0_inp_fifo_error", + "brb_wc0_sop_fifo_error", + "brb_unused_1", + "brb_wc0_eop_fifo_error", + "brb_wc0_queue_fifo_error", + "brb_wc0_free_point_fifo_error", + "brb_wc0_next_point_fifo_error", + "brb_wc0_strt_fifo_error", + "brb_wc0_second_dscr_fifo_error", + "brb_wc0_pkt_avail_fifo_error", + "brb_wc0_cos_cnt_fifo_error", + "brb_wc0_notify_fifo_error", + "brb_wc0_ll_req_fifo_error", + "brb_wc0_ll_pa_cnt_error", + "brb_wc0_bb_pa_cnt_error", + "brb_wc1_inp_fifo_error", + "brb_wc1_sop_fifo_error", + "brb_wc1_eop_fifo_error", + "brb_wc1_queue_fifo_error", + "brb_wc1_free_point_fifo_error", + "brb_wc1_next_point_fifo_error", + "brb_wc1_strt_fifo_error", + "brb_wc1_second_dscr_fifo_error", + "brb_wc1_pkt_avail_fifo_error", + "brb_wc1_cos_cnt_fifo_error", + "brb_wc1_notify_fifo_error", + "brb_wc1_ll_req_fifo_error", + "brb_wc1_ll_pa_cnt_error", + "brb_wc1_bb_pa_cnt_error", + "brb_wc2_inp_fifo_error", + "brb_wc2_sop_fifo_error", + "brb_wc2_eop_fifo_error", + "brb_wc2_queue_fifo_error", + "brb_wc2_free_point_fifo_error", + "brb_wc2_next_point_fifo_error", + "brb_wc2_strt_fifo_error", + "brb_wc2_second_dscr_fifo_error", + "brb_wc2_pkt_avail_fifo_error", + "brb_wc2_cos_cnt_fifo_error", + "brb_wc2_notify_fifo_error", + "brb_wc2_ll_req_fifo_error", + "brb_wc2_ll_pa_cnt_error", + "brb_wc2_bb_pa_cnt_error", + "brb_wc3_inp_fifo_error", + "brb_wc3_sop_fifo_error", + "brb_wc3_eop_fifo_error", + "brb_wc3_queue_fifo_error", + "brb_wc3_free_point_fifo_error", + "brb_wc3_next_point_fifo_error", + "brb_wc3_strt_fifo_error", + "brb_wc3_second_dscr_fifo_error", + "brb_wc3_pkt_avail_fifo_error", + "brb_wc3_cos_cnt_fifo_error", + "brb_wc3_notify_fifo_error", + "brb_wc3_ll_req_fifo_error", + "brb_wc3_ll_pa_cnt_error", + "brb_wc3_bb_pa_cnt_error", + "brb_rc_pkt0_side_fifo_error", + "brb_rc_pkt0_req_fifo_error", + "brb_rc_pkt0_blk_fifo_error", + "brb_rc_pkt0_rls_left_fifo_error", + "brb_rc_pkt0_strt_ptr_fifo_error", + "brb_rc_pkt0_second_ptr_fifo_error", + "brb_rc_pkt0_rsp_fifo_error", + "brb_rc_pkt0_dscr_fifo_error", + "brb_rc_pkt1_side_fifo_error", + "brb_rc_pkt1_req_fifo_error", + "brb_rc_pkt1_blk_fifo_error", + "brb_rc_pkt1_rls_left_fifo_error", + "brb_rc_pkt1_strt_ptr_fifo_error", + "brb_rc_pkt1_second_ptr_fifo_error", + "brb_rc_pkt1_rsp_fifo_error", + "brb_rc_pkt1_dscr_fifo_error", + "brb_rc_pkt2_side_fifo_error", + "brb_rc_pkt2_req_fifo_error", + "brb_rc_pkt2_blk_fifo_error", + "brb_rc_pkt2_rls_left_fifo_error", + "brb_rc_pkt2_strt_ptr_fifo_error", + "brb_rc_pkt2_second_ptr_fifo_error", + "brb_rc_pkt2_rsp_fifo_error", + "brb_rc_pkt2_dscr_fifo_error", + "brb_rc_pkt3_side_fifo_error", + "brb_rc_pkt3_req_fifo_error", + "brb_rc_pkt3_blk_fifo_error", + "brb_rc_pkt3_rls_left_fifo_error", + "brb_rc_pkt3_strt_ptr_fifo_error", + "brb_rc_pkt3_second_ptr_fifo_error", + "brb_rc_pkt3_rsp_fifo_error", + "brb_rc_pkt3_dscr_fifo_error", + "brb_rc_sop_strt_fifo_error", + "brb_rc_sop_req_fifo_error", + "brb_rc_sop_dscr_fifo_error", + "brb_rc_sop_queue_fifo_error", + "brb_rc0_eop_error", + "brb_rc1_eop_error", + "brb_ll_arb_rls_fifo_error", + "brb_ll_arb_prefetch_fifo_error", + "brb_rc_pkt0_rls_fifo_error", + "brb_rc_pkt1_rls_fifo_error", + "brb_rc_pkt2_rls_fifo_error", + "brb_rc_pkt3_rls_fifo_error", + "brb_rc_pkt4_rls_fifo_error", + "brb_rc_pkt4_rls_error", + "brb_rc_pkt4_1st_error", + "brb_rc_pkt4_len_error", + "brb_rc_pkt4_middle_error", + "brb_rc_pkt4_protocol_error", + "brb_rc_pkt4_side_fifo_error", + "brb_rc_pkt4_req_fifo_error", + "brb_rc_pkt4_blk_fifo_error", + "brb_rc_pkt4_rls_left_fifo_error", + "brb_rc_pkt4_strt_ptr_fifo_error", + "brb_rc_pkt4_second_ptr_fifo_error", + "brb_rc_pkt4_rsp_fifo_error", + "brb_rc_pkt4_dscr_fifo_error", + "brb_rc_pkt5_rls_error", + "brb_packet_available_sync_fifo_push_error", + "brb_wc4_protocol_error", + "brb_wc5_protocol_error", + "brb_wc6_protocol_error", + "brb_wc7_protocol_error", + "brb_wc4_inp_fifo_error", + "brb_wc4_sop_fifo_error", + "brb_wc4_queue_fifo_error", + "brb_wc4_free_point_fifo_error", + "brb_wc4_next_point_fifo_error", + "brb_wc4_strt_fifo_error", + "brb_wc4_second_dscr_fifo_error", + "brb_wc4_pkt_avail_fifo_error", + "brb_wc4_cos_cnt_fifo_error", + "brb_wc4_notify_fifo_error", + "brb_wc4_ll_req_fifo_error", + "brb_wc4_ll_pa_cnt_error", + "brb_wc4_bb_pa_cnt_error", + "brb_wc5_inp_fifo_error", + "brb_wc5_sop_fifo_error", + "brb_wc5_queue_fifo_error", + "brb_wc5_free_point_fifo_error", + "brb_wc5_next_point_fifo_error", + "brb_wc5_strt_fifo_error", + "brb_wc5_second_dscr_fifo_error", + "brb_wc5_pkt_avail_fifo_error", + "brb_wc5_cos_cnt_fifo_error", + "brb_wc5_notify_fifo_error", + "brb_wc5_ll_req_fifo_error", + "brb_wc5_ll_pa_cnt_error", + "brb_wc5_bb_pa_cnt_error", + "brb_wc6_inp_fifo_error", + "brb_wc6_sop_fifo_error", + "brb_wc6_queue_fifo_error", + "brb_wc6_free_point_fifo_error", + "brb_wc6_next_point_fifo_error", + "brb_wc6_strt_fifo_error", + "brb_wc6_second_dscr_fifo_error", + "brb_wc6_pkt_avail_fifo_error", + "brb_wc6_cos_cnt_fifo_error", + "brb_wc6_notify_fifo_error", + "brb_wc6_ll_req_fifo_error", + "brb_wc6_ll_pa_cnt_error", + "brb_wc6_bb_pa_cnt_error", + "brb_wc7_inp_fifo_error", + "brb_wc7_sop_fifo_error", + "brb_wc7_queue_fifo_error", + "brb_wc7_free_point_fifo_error", + "brb_wc7_next_point_fifo_error", + "brb_wc7_strt_fifo_error", + "brb_wc7_second_dscr_fifo_error", + "brb_wc7_pkt_avail_fifo_error", + "brb_wc7_cos_cnt_fifo_error", + "brb_wc7_notify_fifo_error", + "brb_wc7_ll_req_fifo_error", + "brb_wc7_ll_pa_cnt_error", + "brb_wc7_bb_pa_cnt_error", + "brb_wc9_queue_fifo_error", + "brb_rc_sop_inp_sync_fifo_push_error", + "brb_rc0_inp_sync_fifo_push_error", + "brb_rc1_inp_sync_fifo_push_error", + "brb_rc2_inp_sync_fifo_push_error", + "brb_rc3_inp_sync_fifo_push_error", + "brb_rc0_out_sync_fifo_push_error", + "brb_rc1_out_sync_fifo_push_error", + "brb_rc2_out_sync_fifo_push_error", + "brb_rc3_out_sync_fifo_push_error", + "brb_rc4_out_sync_fifo_push_error", + "brb_unused_2", + "brb_rc0_eop_inp_sync_fifo_push_error", + "brb_rc1_eop_inp_sync_fifo_push_error", + "brb_rc2_eop_inp_sync_fifo_push_error", + "brb_rc3_eop_inp_sync_fifo_push_error", + "brb_rc0_eop_out_sync_fifo_push_error", + "brb_rc1_eop_out_sync_fifo_push_error", + "brb_rc2_eop_out_sync_fifo_push_error", + "brb_rc3_eop_out_sync_fifo_push_error", + "brb_unused_3", + "brb_rc2_eop_error", + "brb_rc3_eop_error", + "brb_mac2_fc_cnt_error", + "brb_mac3_fc_cnt_error", + "brb_wc4_eop_fifo_error", + "brb_wc5_eop_fifo_error", + "brb_wc6_eop_fifo_error", + "brb_wc7_eop_fifo_error", +}; +#else +#define brb_int_attn_desc OSAL_NULL +#endif + +static const u16 brb_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg brb_int0_bb_a0 = { + 0, 32, brb_int0_bb_a0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4 +}; + +static const u16 brb_int1_bb_a0_attn_idx[30] = { + 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, +}; + +static struct attn_hw_reg brb_int1_bb_a0 = { + 1, 30, brb_int1_bb_a0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc +}; + +static const u16 brb_int2_bb_a0_attn_idx[28] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, +}; + +static struct attn_hw_reg brb_int2_bb_a0 = { + 2, 28, brb_int2_bb_a0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4 +}; + +static const u16 brb_int3_bb_a0_attn_idx[31] = { + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, +}; + +static struct attn_hw_reg brb_int3_bb_a0 = { + 3, 31, brb_int3_bb_a0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c +}; + +static const u16 brb_int4_bb_a0_attn_idx[27] = { + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, +}; + +static struct attn_hw_reg brb_int4_bb_a0 = { + 4, 27, brb_int4_bb_a0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124 +}; + +static const u16 brb_int5_bb_a0_attn_idx[1] = { + 150, +}; + +static struct attn_hw_reg brb_int5_bb_a0 = { + 5, 1, brb_int5_bb_a0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c +}; + +static const u16 brb_int6_bb_a0_attn_idx[8] = { + 151, 152, 153, 154, 155, 156, 157, 158, +}; + +static struct attn_hw_reg brb_int6_bb_a0 = { + 6, 8, brb_int6_bb_a0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154 +}; + +static const u16 brb_int7_bb_a0_attn_idx[32] = { + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, + 190, +}; + +static struct attn_hw_reg brb_int7_bb_a0 = { + 7, 32, brb_int7_bb_a0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c +}; + +static const u16 brb_int8_bb_a0_attn_idx[17] = { + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, + 206, 207, +}; + +static struct attn_hw_reg brb_int8_bb_a0 = { + 8, 17, brb_int8_bb_a0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188 +}; + +static const u16 brb_int9_bb_a0_attn_idx[1] = { + 208, +}; + +static struct attn_hw_reg brb_int9_bb_a0 = { + 9, 1, brb_int9_bb_a0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0 +}; + +static const u16 brb_int10_bb_a0_attn_idx[14] = { + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225, +}; + +static struct attn_hw_reg brb_int10_bb_a0 = { + 10, 14, brb_int10_bb_a0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc, + 0x3401b8 +}; + +static const u16 brb_int11_bb_a0_attn_idx[8] = { + 229, 230, 231, 232, 233, 234, 235, 236, +}; + +static struct attn_hw_reg brb_int11_bb_a0 = { + 11, 8, brb_int11_bb_a0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0 +}; + +static struct attn_hw_reg *brb_int_bb_a0_regs[12] = { + &brb_int0_bb_a0, &brb_int1_bb_a0, &brb_int2_bb_a0, &brb_int3_bb_a0, + &brb_int4_bb_a0, &brb_int5_bb_a0, &brb_int6_bb_a0, &brb_int7_bb_a0, + &brb_int8_bb_a0, &brb_int9_bb_a0, + &brb_int10_bb_a0, &brb_int11_bb_a0, +}; + +static const u16 brb_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg brb_int0_bb_b0 = { + 0, 32, brb_int0_bb_b0_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4 +}; + +static const u16 brb_int1_bb_b0_attn_idx[30] = { + 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, +}; + +static struct attn_hw_reg brb_int1_bb_b0 = { + 1, 30, brb_int1_bb_b0_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc +}; + +static const u16 brb_int2_bb_b0_attn_idx[28] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, +}; + +static struct attn_hw_reg brb_int2_bb_b0 = { + 2, 28, brb_int2_bb_b0_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4 +}; + +static const u16 brb_int3_bb_b0_attn_idx[31] = { + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, +}; + +static struct attn_hw_reg brb_int3_bb_b0 = { + 3, 31, brb_int3_bb_b0_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c +}; + +static const u16 brb_int4_bb_b0_attn_idx[27] = { + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, +}; + +static struct attn_hw_reg brb_int4_bb_b0 = { + 4, 27, brb_int4_bb_b0_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124 +}; + +static const u16 brb_int5_bb_b0_attn_idx[1] = { + 150, +}; + +static struct attn_hw_reg brb_int5_bb_b0 = { + 5, 1, brb_int5_bb_b0_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c +}; + +static const u16 brb_int6_bb_b0_attn_idx[8] = { + 151, 152, 153, 154, 155, 156, 157, 158, +}; + +static struct attn_hw_reg brb_int6_bb_b0 = { + 6, 8, brb_int6_bb_b0_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154 +}; + +static const u16 brb_int7_bb_b0_attn_idx[32] = { + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, + 190, +}; + +static struct attn_hw_reg brb_int7_bb_b0 = { + 7, 32, brb_int7_bb_b0_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c +}; + +static const u16 brb_int8_bb_b0_attn_idx[17] = { + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, + 206, 207, +}; + +static struct attn_hw_reg brb_int8_bb_b0 = { + 8, 17, brb_int8_bb_b0_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188 +}; + +static const u16 brb_int9_bb_b0_attn_idx[1] = { + 208, +}; + +static struct attn_hw_reg brb_int9_bb_b0 = { + 9, 1, brb_int9_bb_b0_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0 +}; + +static const u16 brb_int10_bb_b0_attn_idx[14] = { + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 224, 225, +}; + +static struct attn_hw_reg brb_int10_bb_b0 = { + 10, 14, brb_int10_bb_b0_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc, + 0x3401b8 +}; + +static const u16 brb_int11_bb_b0_attn_idx[8] = { + 229, 230, 231, 232, 233, 234, 235, 236, +}; + +static struct attn_hw_reg brb_int11_bb_b0 = { + 11, 8, brb_int11_bb_b0_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0 +}; + +static struct attn_hw_reg *brb_int_bb_b0_regs[12] = { + &brb_int0_bb_b0, &brb_int1_bb_b0, &brb_int2_bb_b0, &brb_int3_bb_b0, + &brb_int4_bb_b0, &brb_int5_bb_b0, &brb_int6_bb_b0, &brb_int7_bb_b0, + &brb_int8_bb_b0, &brb_int9_bb_b0, + &brb_int10_bb_b0, &brb_int11_bb_b0, +}; + +static const u16 brb_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg brb_int0_k2 = { + 0, 32, brb_int0_k2_attn_idx, 0x3400c0, 0x3400cc, 0x3400c8, 0x3400c4 +}; + +static const u16 brb_int1_k2_attn_idx[30] = { + 32, 33, 35, 36, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, +}; + +static struct attn_hw_reg brb_int1_k2 = { + 1, 30, brb_int1_k2_attn_idx, 0x3400d8, 0x3400e4, 0x3400e0, 0x3400dc +}; + +static const u16 brb_int2_k2_attn_idx[28] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, +}; + +static struct attn_hw_reg brb_int2_k2 = { + 2, 28, brb_int2_k2_attn_idx, 0x3400f0, 0x3400fc, 0x3400f8, 0x3400f4 +}; + +static const u16 brb_int3_k2_attn_idx[31] = { + 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, + 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, + 122, +}; + +static struct attn_hw_reg brb_int3_k2 = { + 3, 31, brb_int3_k2_attn_idx, 0x340108, 0x340114, 0x340110, 0x34010c +}; + +static const u16 brb_int4_k2_attn_idx[27] = { + 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, + 137, + 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, +}; + +static struct attn_hw_reg brb_int4_k2 = { + 4, 27, brb_int4_k2_attn_idx, 0x340120, 0x34012c, 0x340128, 0x340124 +}; + +static const u16 brb_int5_k2_attn_idx[1] = { + 150, +}; + +static struct attn_hw_reg brb_int5_k2 = { + 5, 1, brb_int5_k2_attn_idx, 0x340138, 0x340144, 0x340140, 0x34013c +}; + +static const u16 brb_int6_k2_attn_idx[8] = { + 151, 152, 153, 154, 155, 156, 157, 158, +}; + +static struct attn_hw_reg brb_int6_k2 = { + 6, 8, brb_int6_k2_attn_idx, 0x340150, 0x34015c, 0x340158, 0x340154 +}; + +static const u16 brb_int7_k2_attn_idx[32] = { + 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, + 173, + 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, + 188, 189, + 190, +}; + +static struct attn_hw_reg brb_int7_k2 = { + 7, 32, brb_int7_k2_attn_idx, 0x340168, 0x340174, 0x340170, 0x34016c +}; + +static const u16 brb_int8_k2_attn_idx[17] = { + 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203, 204, + 205, + 206, 207, +}; + +static struct attn_hw_reg brb_int8_k2 = { + 8, 17, brb_int8_k2_attn_idx, 0x340184, 0x340190, 0x34018c, 0x340188 +}; + +static const u16 brb_int9_k2_attn_idx[1] = { + 208, +}; + +static struct attn_hw_reg brb_int9_k2 = { + 9, 1, brb_int9_k2_attn_idx, 0x34019c, 0x3401a8, 0x3401a4, 0x3401a0 +}; + +static const u16 brb_int10_k2_attn_idx[18] = { + 209, 210, 211, 212, 213, 214, 215, 216, 217, 218, 220, 221, 222, 223, + 224, + 225, 226, 227, +}; + +static struct attn_hw_reg brb_int10_k2 = { + 10, 18, brb_int10_k2_attn_idx, 0x3401b4, 0x3401c0, 0x3401bc, 0x3401b8 +}; + +static const u16 brb_int11_k2_attn_idx[8] = { + 229, 230, 231, 232, 233, 234, 235, 236, +}; + +static struct attn_hw_reg brb_int11_k2 = { + 11, 8, brb_int11_k2_attn_idx, 0x3401cc, 0x3401d8, 0x3401d4, 0x3401d0 +}; + +static struct attn_hw_reg *brb_int_k2_regs[12] = { + &brb_int0_k2, &brb_int1_k2, &brb_int2_k2, &brb_int3_k2, &brb_int4_k2, + &brb_int5_k2, &brb_int6_k2, &brb_int7_k2, &brb_int8_k2, &brb_int9_k2, + &brb_int10_k2, &brb_int11_k2, +}; + +#ifdef ATTN_DESC +static const char *brb_prty_attn_desc[75] = { + "brb_ll_bank0_mem_prty", + "brb_ll_bank1_mem_prty", + "brb_ll_bank2_mem_prty", + "brb_ll_bank3_mem_prty", + "brb_datapath_registers", + "brb_mem001_i_ecc_rf_int", + "brb_mem008_i_ecc_rf_int", + "brb_mem009_i_ecc_rf_int", + "brb_mem010_i_ecc_rf_int", + "brb_mem011_i_ecc_rf_int", + "brb_mem012_i_ecc_rf_int", + "brb_mem013_i_ecc_rf_int", + "brb_mem014_i_ecc_rf_int", + "brb_mem015_i_ecc_rf_int", + "brb_mem016_i_ecc_rf_int", + "brb_mem002_i_ecc_rf_int", + "brb_mem003_i_ecc_rf_int", + "brb_mem004_i_ecc_rf_int", + "brb_mem005_i_ecc_rf_int", + "brb_mem006_i_ecc_rf_int", + "brb_mem007_i_ecc_rf_int", + "brb_mem070_i_mem_prty", + "brb_mem069_i_mem_prty", + "brb_mem053_i_mem_prty", + "brb_mem054_i_mem_prty", + "brb_mem055_i_mem_prty", + "brb_mem056_i_mem_prty", + "brb_mem057_i_mem_prty", + "brb_mem058_i_mem_prty", + "brb_mem059_i_mem_prty", + "brb_mem060_i_mem_prty", + "brb_mem061_i_mem_prty", + "brb_mem062_i_mem_prty", + "brb_mem063_i_mem_prty", + "brb_mem064_i_mem_prty", + "brb_mem065_i_mem_prty", + "brb_mem045_i_mem_prty", + "brb_mem046_i_mem_prty", + "brb_mem047_i_mem_prty", + "brb_mem048_i_mem_prty", + "brb_mem049_i_mem_prty", + "brb_mem050_i_mem_prty", + "brb_mem051_i_mem_prty", + "brb_mem052_i_mem_prty", + "brb_mem041_i_mem_prty", + "brb_mem042_i_mem_prty", + "brb_mem043_i_mem_prty", + "brb_mem044_i_mem_prty", + "brb_mem040_i_mem_prty", + "brb_mem035_i_mem_prty", + "brb_mem066_i_mem_prty", + "brb_mem067_i_mem_prty", + "brb_mem068_i_mem_prty", + "brb_mem030_i_mem_prty", + "brb_mem031_i_mem_prty", + "brb_mem032_i_mem_prty", + "brb_mem033_i_mem_prty", + "brb_mem037_i_mem_prty", + "brb_mem038_i_mem_prty", + "brb_mem034_i_mem_prty", + "brb_mem036_i_mem_prty", + "brb_mem017_i_mem_prty", + "brb_mem018_i_mem_prty", + "brb_mem019_i_mem_prty", + "brb_mem020_i_mem_prty", + "brb_mem021_i_mem_prty", + "brb_mem022_i_mem_prty", + "brb_mem023_i_mem_prty", + "brb_mem024_i_mem_prty", + "brb_mem029_i_mem_prty", + "brb_mem026_i_mem_prty", + "brb_mem027_i_mem_prty", + "brb_mem028_i_mem_prty", + "brb_mem025_i_mem_prty", + "brb_mem039_i_mem_prty", +}; +#else +#define brb_prty_attn_desc OSAL_NULL +#endif + +static const u16 brb_prty1_bb_a0_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36, + 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 49, +}; + +static struct attn_hw_reg brb_prty1_bb_a0 = { + 0, 31, brb_prty1_bb_a0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404 +}; + +static const u16 brb_prty2_bb_a0_attn_idx[19] = { + 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 69, 70, 71, 72, 73, 74, + 48, +}; + +static struct attn_hw_reg brb_prty2_bb_a0 = { + 1, 19, brb_prty2_bb_a0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414 +}; + +static struct attn_hw_reg *brb_prty_bb_a0_regs[2] = { + &brb_prty1_bb_a0, &brb_prty2_bb_a0, +}; + +static const u16 brb_prty0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg brb_prty0_bb_b0 = { + 0, 5, brb_prty0_bb_b0_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0 +}; + +static const u16 brb_prty1_bb_b0_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 23, 24, 36, + 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, +}; + +static struct attn_hw_reg brb_prty1_bb_b0 = { + 1, 31, brb_prty1_bb_b0_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404 +}; + +static const u16 brb_prty2_bb_b0_attn_idx[14] = { + 53, 54, 55, 56, 59, 61, 62, 63, 64, 69, 70, 71, 72, 73, +}; + +static struct attn_hw_reg brb_prty2_bb_b0 = { + 2, 14, brb_prty2_bb_b0_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414 +}; + +static struct attn_hw_reg *brb_prty_bb_b0_regs[3] = { + &brb_prty0_bb_b0, &brb_prty1_bb_b0, &brb_prty2_bb_b0, +}; + +static const u16 brb_prty0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg brb_prty0_k2 = { + 0, 5, brb_prty0_k2_attn_idx, 0x3401dc, 0x3401e8, 0x3401e4, 0x3401e0 +}; + +static const u16 brb_prty1_k2_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg brb_prty1_k2 = { + 1, 31, brb_prty1_k2_attn_idx, 0x340400, 0x34040c, 0x340408, 0x340404 +}; + +static const u16 brb_prty2_k2_attn_idx[30] = { + 50, 51, 52, 36, 37, 38, 39, 40, 41, 42, 43, 47, 53, 54, 55, 56, 57, 58, + 59, 49, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, +}; + +static struct attn_hw_reg brb_prty2_k2 = { + 2, 30, brb_prty2_k2_attn_idx, 0x340410, 0x34041c, 0x340418, 0x340414 +}; + +static struct attn_hw_reg *brb_prty_k2_regs[3] = { + &brb_prty0_k2, &brb_prty1_k2, &brb_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *src_int_attn_desc[1] = { + "src_address_error", +}; +#else +#define src_int_attn_desc OSAL_NULL +#endif + +static const u16 src_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg src_int0_bb_a0 = { + 0, 1, src_int0_bb_a0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4 +}; + +static struct attn_hw_reg *src_int_bb_a0_regs[1] = { + &src_int0_bb_a0, +}; + +static const u16 src_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg src_int0_bb_b0 = { + 0, 1, src_int0_bb_b0_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4 +}; + +static struct attn_hw_reg *src_int_bb_b0_regs[1] = { + &src_int0_bb_b0, +}; + +static const u16 src_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg src_int0_k2 = { + 0, 1, src_int0_k2_attn_idx, 0x2381d8, 0x2381dc, 0x2381e0, 0x2381e4 +}; + +static struct attn_hw_reg *src_int_k2_regs[1] = { + &src_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *prs_int_attn_desc[2] = { + "prs_address_error", + "prs_lcid_validation_err", +}; +#else +#define prs_int_attn_desc OSAL_NULL +#endif + +static const u16 prs_int0_bb_a0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg prs_int0_bb_a0 = { + 0, 2, prs_int0_bb_a0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044 +}; + +static struct attn_hw_reg *prs_int_bb_a0_regs[1] = { + &prs_int0_bb_a0, +}; + +static const u16 prs_int0_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg prs_int0_bb_b0 = { + 0, 2, prs_int0_bb_b0_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044 +}; + +static struct attn_hw_reg *prs_int_bb_b0_regs[1] = { + &prs_int0_bb_b0, +}; + +static const u16 prs_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg prs_int0_k2 = { + 0, 2, prs_int0_k2_attn_idx, 0x1f0040, 0x1f004c, 0x1f0048, 0x1f0044 +}; + +static struct attn_hw_reg *prs_int_k2_regs[1] = { + &prs_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *prs_prty_attn_desc[75] = { + "prs_cam_parity", + "prs_gft_cam_parity", + "prs_mem011_i_ecc_rf_int", + "prs_mem012_i_ecc_rf_int", + "prs_mem016_i_ecc_rf_int", + "prs_mem017_i_ecc_rf_int", + "prs_mem021_i_ecc_rf_int", + "prs_mem022_i_ecc_rf_int", + "prs_mem026_i_ecc_rf_int", + "prs_mem027_i_ecc_rf_int", + "prs_mem064_i_mem_prty", + "prs_mem044_i_mem_prty", + "prs_mem043_i_mem_prty", + "prs_mem037_i_mem_prty", + "prs_mem033_i_mem_prty", + "prs_mem034_i_mem_prty", + "prs_mem035_i_mem_prty", + "prs_mem036_i_mem_prty", + "prs_mem029_i_mem_prty", + "prs_mem030_i_mem_prty", + "prs_mem031_i_mem_prty", + "prs_mem032_i_mem_prty", + "prs_mem007_i_mem_prty", + "prs_mem028_i_mem_prty", + "prs_mem039_i_mem_prty", + "prs_mem040_i_mem_prty", + "prs_mem058_i_mem_prty", + "prs_mem059_i_mem_prty", + "prs_mem041_i_mem_prty", + "prs_mem042_i_mem_prty", + "prs_mem060_i_mem_prty", + "prs_mem061_i_mem_prty", + "prs_mem009_i_mem_prty", + "prs_mem009_i_ecc_rf_int", + "prs_mem010_i_ecc_rf_int", + "prs_mem014_i_ecc_rf_int", + "prs_mem015_i_ecc_rf_int", + "prs_mem026_i_mem_prty", + "prs_mem025_i_mem_prty", + "prs_mem021_i_mem_prty", + "prs_mem019_i_mem_prty", + "prs_mem020_i_mem_prty", + "prs_mem017_i_mem_prty", + "prs_mem018_i_mem_prty", + "prs_mem005_i_mem_prty", + "prs_mem016_i_mem_prty", + "prs_mem023_i_mem_prty", + "prs_mem024_i_mem_prty", + "prs_mem008_i_mem_prty", + "prs_mem012_i_mem_prty", + "prs_mem013_i_mem_prty", + "prs_mem006_i_mem_prty", + "prs_mem011_i_mem_prty", + "prs_mem003_i_mem_prty", + "prs_mem004_i_mem_prty", + "prs_mem027_i_mem_prty", + "prs_mem010_i_mem_prty", + "prs_mem014_i_mem_prty", + "prs_mem015_i_mem_prty", + "prs_mem054_i_mem_prty", + "prs_mem055_i_mem_prty", + "prs_mem056_i_mem_prty", + "prs_mem057_i_mem_prty", + "prs_mem046_i_mem_prty", + "prs_mem047_i_mem_prty", + "prs_mem048_i_mem_prty", + "prs_mem049_i_mem_prty", + "prs_mem050_i_mem_prty", + "prs_mem051_i_mem_prty", + "prs_mem052_i_mem_prty", + "prs_mem053_i_mem_prty", + "prs_mem062_i_mem_prty", + "prs_mem045_i_mem_prty", + "prs_mem002_i_mem_prty", + "prs_mem001_i_mem_prty", +}; +#else +#define prs_prty_attn_desc OSAL_NULL +#endif + +static const u16 prs_prty0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg prs_prty0_bb_a0 = { + 0, 1, prs_prty0_bb_a0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054 +}; + +static const u16 prs_prty1_bb_a0_attn_idx[31] = { + 13, 14, 15, 16, 18, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, + 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, +}; + +static struct attn_hw_reg prs_prty1_bb_a0 = { + 1, 31, prs_prty1_bb_a0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208 +}; + +static const u16 prs_prty2_bb_a0_attn_idx[5] = { + 73, 74, 20, 17, 19, +}; + +static struct attn_hw_reg prs_prty2_bb_a0 = { + 2, 5, prs_prty2_bb_a0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218 +}; + +static struct attn_hw_reg *prs_prty_bb_a0_regs[3] = { + &prs_prty0_bb_a0, &prs_prty1_bb_a0, &prs_prty2_bb_a0, +}; + +static const u16 prs_prty0_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg prs_prty0_bb_b0 = { + 0, 2, prs_prty0_bb_b0_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054 +}; + +static const u16 prs_prty1_bb_b0_attn_idx[31] = { + 13, 14, 15, 16, 18, 19, 21, 22, 23, 33, 34, 35, 36, 37, 38, 39, 40, 41, + 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, +}; + +static struct attn_hw_reg prs_prty1_bb_b0 = { + 1, 31, prs_prty1_bb_b0_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208 +}; + +static const u16 prs_prty2_bb_b0_attn_idx[5] = { + 73, 74, 20, 17, 55, +}; + +static struct attn_hw_reg prs_prty2_bb_b0 = { + 2, 5, prs_prty2_bb_b0_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218 +}; + +static struct attn_hw_reg *prs_prty_bb_b0_regs[3] = { + &prs_prty0_bb_b0, &prs_prty1_bb_b0, &prs_prty2_bb_b0, +}; + +static const u16 prs_prty0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg prs_prty0_k2 = { + 0, 2, prs_prty0_k2_attn_idx, 0x1f0050, 0x1f005c, 0x1f0058, 0x1f0054 +}; + +static const u16 prs_prty1_k2_attn_idx[31] = { + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, +}; + +static struct attn_hw_reg prs_prty1_k2 = { + 1, 31, prs_prty1_k2_attn_idx, 0x1f0204, 0x1f0210, 0x1f020c, 0x1f0208 +}; + +static const u16 prs_prty2_k2_attn_idx[31] = { + 56, 57, 58, 40, 41, 47, 38, 48, 50, 43, 46, 59, 60, 61, 62, 53, 54, 44, + 51, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, +}; + +static struct attn_hw_reg prs_prty2_k2 = { + 2, 31, prs_prty2_k2_attn_idx, 0x1f0214, 0x1f0220, 0x1f021c, 0x1f0218 +}; + +static struct attn_hw_reg *prs_prty_k2_regs[3] = { + &prs_prty0_k2, &prs_prty1_k2, &prs_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *tsdm_int_attn_desc[28] = { + "tsdm_address_error", + "tsdm_inp_queue_error", + "tsdm_delay_fifo_error", + "tsdm_async_host_error", + "tsdm_prm_fifo_error", + "tsdm_ccfc_load_pend_error", + "tsdm_tcfc_load_pend_error", + "tsdm_dst_int_ram_wait_error", + "tsdm_dst_pas_buf_wait_error", + "tsdm_dst_pxp_immed_error", + "tsdm_dst_pxp_dst_pend_error", + "tsdm_dst_brb_src_pend_error", + "tsdm_dst_brb_src_addr_error", + "tsdm_rsp_brb_pend_error", + "tsdm_rsp_int_ram_pend_error", + "tsdm_rsp_brb_rd_data_error", + "tsdm_rsp_int_ram_rd_data_error", + "tsdm_rsp_pxp_rd_data_error", + "tsdm_cm_delay_error", + "tsdm_sh_delay_error", + "tsdm_cmpl_pend_error", + "tsdm_cprm_pend_error", + "tsdm_timer_addr_error", + "tsdm_timer_pend_error", + "tsdm_dorq_dpm_error", + "tsdm_dst_pxp_done_error", + "tsdm_xcm_rmt_buffer_error", + "tsdm_ycm_rmt_buffer_error", +}; +#else +#define tsdm_int_attn_desc OSAL_NULL +#endif + +static const u16 tsdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg tsdm_int0_bb_a0 = { + 0, 26, tsdm_int0_bb_a0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044 +}; + +static struct attn_hw_reg *tsdm_int_bb_a0_regs[1] = { + &tsdm_int0_bb_a0, +}; + +static const u16 tsdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg tsdm_int0_bb_b0 = { + 0, 26, tsdm_int0_bb_b0_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044 +}; + +static struct attn_hw_reg *tsdm_int_bb_b0_regs[1] = { + &tsdm_int0_bb_b0, +}; + +static const u16 tsdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg tsdm_int0_k2 = { + 0, 28, tsdm_int0_k2_attn_idx, 0xfb0040, 0xfb004c, 0xfb0048, 0xfb0044 +}; + +static struct attn_hw_reg *tsdm_int_k2_regs[1] = { + &tsdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *tsdm_prty_attn_desc[10] = { + "tsdm_mem009_i_mem_prty", + "tsdm_mem008_i_mem_prty", + "tsdm_mem007_i_mem_prty", + "tsdm_mem006_i_mem_prty", + "tsdm_mem005_i_mem_prty", + "tsdm_mem002_i_mem_prty", + "tsdm_mem010_i_mem_prty", + "tsdm_mem001_i_mem_prty", + "tsdm_mem003_i_mem_prty", + "tsdm_mem004_i_mem_prty", +}; +#else +#define tsdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 tsdm_prty1_bb_a0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg tsdm_prty1_bb_a0 = { + 0, 10, tsdm_prty1_bb_a0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208, + 0xfb0204 +}; + +static struct attn_hw_reg *tsdm_prty_bb_a0_regs[1] = { + &tsdm_prty1_bb_a0, +}; + +static const u16 tsdm_prty1_bb_b0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg tsdm_prty1_bb_b0 = { + 0, 10, tsdm_prty1_bb_b0_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208, + 0xfb0204 +}; + +static struct attn_hw_reg *tsdm_prty_bb_b0_regs[1] = { + &tsdm_prty1_bb_b0, +}; + +static const u16 tsdm_prty1_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg tsdm_prty1_k2 = { + 0, 10, tsdm_prty1_k2_attn_idx, 0xfb0200, 0xfb020c, 0xfb0208, 0xfb0204 +}; + +static struct attn_hw_reg *tsdm_prty_k2_regs[1] = { + &tsdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *msdm_int_attn_desc[28] = { + "msdm_address_error", + "msdm_inp_queue_error", + "msdm_delay_fifo_error", + "msdm_async_host_error", + "msdm_prm_fifo_error", + "msdm_ccfc_load_pend_error", + "msdm_tcfc_load_pend_error", + "msdm_dst_int_ram_wait_error", + "msdm_dst_pas_buf_wait_error", + "msdm_dst_pxp_immed_error", + "msdm_dst_pxp_dst_pend_error", + "msdm_dst_brb_src_pend_error", + "msdm_dst_brb_src_addr_error", + "msdm_rsp_brb_pend_error", + "msdm_rsp_int_ram_pend_error", + "msdm_rsp_brb_rd_data_error", + "msdm_rsp_int_ram_rd_data_error", + "msdm_rsp_pxp_rd_data_error", + "msdm_cm_delay_error", + "msdm_sh_delay_error", + "msdm_cmpl_pend_error", + "msdm_cprm_pend_error", + "msdm_timer_addr_error", + "msdm_timer_pend_error", + "msdm_dorq_dpm_error", + "msdm_dst_pxp_done_error", + "msdm_xcm_rmt_buffer_error", + "msdm_ycm_rmt_buffer_error", +}; +#else +#define msdm_int_attn_desc OSAL_NULL +#endif + +static const u16 msdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg msdm_int0_bb_a0 = { + 0, 26, msdm_int0_bb_a0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044 +}; + +static struct attn_hw_reg *msdm_int_bb_a0_regs[1] = { + &msdm_int0_bb_a0, +}; + +static const u16 msdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg msdm_int0_bb_b0 = { + 0, 26, msdm_int0_bb_b0_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044 +}; + +static struct attn_hw_reg *msdm_int_bb_b0_regs[1] = { + &msdm_int0_bb_b0, +}; + +static const u16 msdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg msdm_int0_k2 = { + 0, 28, msdm_int0_k2_attn_idx, 0xfc0040, 0xfc004c, 0xfc0048, 0xfc0044 +}; + +static struct attn_hw_reg *msdm_int_k2_regs[1] = { + &msdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *msdm_prty_attn_desc[11] = { + "msdm_mem009_i_mem_prty", + "msdm_mem008_i_mem_prty", + "msdm_mem007_i_mem_prty", + "msdm_mem006_i_mem_prty", + "msdm_mem005_i_mem_prty", + "msdm_mem002_i_mem_prty", + "msdm_mem011_i_mem_prty", + "msdm_mem001_i_mem_prty", + "msdm_mem003_i_mem_prty", + "msdm_mem004_i_mem_prty", + "msdm_mem010_i_mem_prty", +}; +#else +#define msdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 msdm_prty1_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg msdm_prty1_bb_a0 = { + 0, 11, msdm_prty1_bb_a0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208, + 0xfc0204 +}; + +static struct attn_hw_reg *msdm_prty_bb_a0_regs[1] = { + &msdm_prty1_bb_a0, +}; + +static const u16 msdm_prty1_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg msdm_prty1_bb_b0 = { + 0, 11, msdm_prty1_bb_b0_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208, + 0xfc0204 +}; + +static struct attn_hw_reg *msdm_prty_bb_b0_regs[1] = { + &msdm_prty1_bb_b0, +}; + +static const u16 msdm_prty1_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg msdm_prty1_k2 = { + 0, 11, msdm_prty1_k2_attn_idx, 0xfc0200, 0xfc020c, 0xfc0208, 0xfc0204 +}; + +static struct attn_hw_reg *msdm_prty_k2_regs[1] = { + &msdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *usdm_int_attn_desc[28] = { + "usdm_address_error", + "usdm_inp_queue_error", + "usdm_delay_fifo_error", + "usdm_async_host_error", + "usdm_prm_fifo_error", + "usdm_ccfc_load_pend_error", + "usdm_tcfc_load_pend_error", + "usdm_dst_int_ram_wait_error", + "usdm_dst_pas_buf_wait_error", + "usdm_dst_pxp_immed_error", + "usdm_dst_pxp_dst_pend_error", + "usdm_dst_brb_src_pend_error", + "usdm_dst_brb_src_addr_error", + "usdm_rsp_brb_pend_error", + "usdm_rsp_int_ram_pend_error", + "usdm_rsp_brb_rd_data_error", + "usdm_rsp_int_ram_rd_data_error", + "usdm_rsp_pxp_rd_data_error", + "usdm_cm_delay_error", + "usdm_sh_delay_error", + "usdm_cmpl_pend_error", + "usdm_cprm_pend_error", + "usdm_timer_addr_error", + "usdm_timer_pend_error", + "usdm_dorq_dpm_error", + "usdm_dst_pxp_done_error", + "usdm_xcm_rmt_buffer_error", + "usdm_ycm_rmt_buffer_error", +}; +#else +#define usdm_int_attn_desc OSAL_NULL +#endif + +static const u16 usdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg usdm_int0_bb_a0 = { + 0, 26, usdm_int0_bb_a0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044 +}; + +static struct attn_hw_reg *usdm_int_bb_a0_regs[1] = { + &usdm_int0_bb_a0, +}; + +static const u16 usdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg usdm_int0_bb_b0 = { + 0, 26, usdm_int0_bb_b0_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044 +}; + +static struct attn_hw_reg *usdm_int_bb_b0_regs[1] = { + &usdm_int0_bb_b0, +}; + +static const u16 usdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg usdm_int0_k2 = { + 0, 28, usdm_int0_k2_attn_idx, 0xfd0040, 0xfd004c, 0xfd0048, 0xfd0044 +}; + +static struct attn_hw_reg *usdm_int_k2_regs[1] = { + &usdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *usdm_prty_attn_desc[10] = { + "usdm_mem008_i_mem_prty", + "usdm_mem007_i_mem_prty", + "usdm_mem006_i_mem_prty", + "usdm_mem005_i_mem_prty", + "usdm_mem002_i_mem_prty", + "usdm_mem010_i_mem_prty", + "usdm_mem001_i_mem_prty", + "usdm_mem003_i_mem_prty", + "usdm_mem004_i_mem_prty", + "usdm_mem009_i_mem_prty", +}; +#else +#define usdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 usdm_prty1_bb_a0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg usdm_prty1_bb_a0 = { + 0, 10, usdm_prty1_bb_a0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208, + 0xfd0204 +}; + +static struct attn_hw_reg *usdm_prty_bb_a0_regs[1] = { + &usdm_prty1_bb_a0, +}; + +static const u16 usdm_prty1_bb_b0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg usdm_prty1_bb_b0 = { + 0, 10, usdm_prty1_bb_b0_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208, + 0xfd0204 +}; + +static struct attn_hw_reg *usdm_prty_bb_b0_regs[1] = { + &usdm_prty1_bb_b0, +}; + +static const u16 usdm_prty1_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg usdm_prty1_k2 = { + 0, 10, usdm_prty1_k2_attn_idx, 0xfd0200, 0xfd020c, 0xfd0208, 0xfd0204 +}; + +static struct attn_hw_reg *usdm_prty_k2_regs[1] = { + &usdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *xsdm_int_attn_desc[28] = { + "xsdm_address_error", + "xsdm_inp_queue_error", + "xsdm_delay_fifo_error", + "xsdm_async_host_error", + "xsdm_prm_fifo_error", + "xsdm_ccfc_load_pend_error", + "xsdm_tcfc_load_pend_error", + "xsdm_dst_int_ram_wait_error", + "xsdm_dst_pas_buf_wait_error", + "xsdm_dst_pxp_immed_error", + "xsdm_dst_pxp_dst_pend_error", + "xsdm_dst_brb_src_pend_error", + "xsdm_dst_brb_src_addr_error", + "xsdm_rsp_brb_pend_error", + "xsdm_rsp_int_ram_pend_error", + "xsdm_rsp_brb_rd_data_error", + "xsdm_rsp_int_ram_rd_data_error", + "xsdm_rsp_pxp_rd_data_error", + "xsdm_cm_delay_error", + "xsdm_sh_delay_error", + "xsdm_cmpl_pend_error", + "xsdm_cprm_pend_error", + "xsdm_timer_addr_error", + "xsdm_timer_pend_error", + "xsdm_dorq_dpm_error", + "xsdm_dst_pxp_done_error", + "xsdm_xcm_rmt_buffer_error", + "xsdm_ycm_rmt_buffer_error", +}; +#else +#define xsdm_int_attn_desc OSAL_NULL +#endif + +static const u16 xsdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg xsdm_int0_bb_a0 = { + 0, 26, xsdm_int0_bb_a0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044 +}; + +static struct attn_hw_reg *xsdm_int_bb_a0_regs[1] = { + &xsdm_int0_bb_a0, +}; + +static const u16 xsdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg xsdm_int0_bb_b0 = { + 0, 26, xsdm_int0_bb_b0_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044 +}; + +static struct attn_hw_reg *xsdm_int_bb_b0_regs[1] = { + &xsdm_int0_bb_b0, +}; + +static const u16 xsdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg xsdm_int0_k2 = { + 0, 28, xsdm_int0_k2_attn_idx, 0xf80040, 0xf8004c, 0xf80048, 0xf80044 +}; + +static struct attn_hw_reg *xsdm_int_k2_regs[1] = { + &xsdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *xsdm_prty_attn_desc[10] = { + "xsdm_mem009_i_mem_prty", + "xsdm_mem008_i_mem_prty", + "xsdm_mem007_i_mem_prty", + "xsdm_mem006_i_mem_prty", + "xsdm_mem003_i_mem_prty", + "xsdm_mem010_i_mem_prty", + "xsdm_mem002_i_mem_prty", + "xsdm_mem004_i_mem_prty", + "xsdm_mem005_i_mem_prty", + "xsdm_mem001_i_mem_prty", +}; +#else +#define xsdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 xsdm_prty1_bb_a0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsdm_prty1_bb_a0 = { + 0, 10, xsdm_prty1_bb_a0_attn_idx, 0xf80200, 0xf8020c, 0xf80208, + 0xf80204 +}; + +static struct attn_hw_reg *xsdm_prty_bb_a0_regs[1] = { + &xsdm_prty1_bb_a0, +}; + +static const u16 xsdm_prty1_bb_b0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsdm_prty1_bb_b0 = { + 0, 10, xsdm_prty1_bb_b0_attn_idx, 0xf80200, 0xf8020c, 0xf80208, + 0xf80204 +}; + +static struct attn_hw_reg *xsdm_prty_bb_b0_regs[1] = { + &xsdm_prty1_bb_b0, +}; + +static const u16 xsdm_prty1_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsdm_prty1_k2 = { + 0, 10, xsdm_prty1_k2_attn_idx, 0xf80200, 0xf8020c, 0xf80208, 0xf80204 +}; + +static struct attn_hw_reg *xsdm_prty_k2_regs[1] = { + &xsdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *ysdm_int_attn_desc[28] = { + "ysdm_address_error", + "ysdm_inp_queue_error", + "ysdm_delay_fifo_error", + "ysdm_async_host_error", + "ysdm_prm_fifo_error", + "ysdm_ccfc_load_pend_error", + "ysdm_tcfc_load_pend_error", + "ysdm_dst_int_ram_wait_error", + "ysdm_dst_pas_buf_wait_error", + "ysdm_dst_pxp_immed_error", + "ysdm_dst_pxp_dst_pend_error", + "ysdm_dst_brb_src_pend_error", + "ysdm_dst_brb_src_addr_error", + "ysdm_rsp_brb_pend_error", + "ysdm_rsp_int_ram_pend_error", + "ysdm_rsp_brb_rd_data_error", + "ysdm_rsp_int_ram_rd_data_error", + "ysdm_rsp_pxp_rd_data_error", + "ysdm_cm_delay_error", + "ysdm_sh_delay_error", + "ysdm_cmpl_pend_error", + "ysdm_cprm_pend_error", + "ysdm_timer_addr_error", + "ysdm_timer_pend_error", + "ysdm_dorq_dpm_error", + "ysdm_dst_pxp_done_error", + "ysdm_xcm_rmt_buffer_error", + "ysdm_ycm_rmt_buffer_error", +}; +#else +#define ysdm_int_attn_desc OSAL_NULL +#endif + +static const u16 ysdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg ysdm_int0_bb_a0 = { + 0, 26, ysdm_int0_bb_a0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044 +}; + +static struct attn_hw_reg *ysdm_int_bb_a0_regs[1] = { + &ysdm_int0_bb_a0, +}; + +static const u16 ysdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg ysdm_int0_bb_b0 = { + 0, 26, ysdm_int0_bb_b0_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044 +}; + +static struct attn_hw_reg *ysdm_int_bb_b0_regs[1] = { + &ysdm_int0_bb_b0, +}; + +static const u16 ysdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg ysdm_int0_k2 = { + 0, 28, ysdm_int0_k2_attn_idx, 0xf90040, 0xf9004c, 0xf90048, 0xf90044 +}; + +static struct attn_hw_reg *ysdm_int_k2_regs[1] = { + &ysdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ysdm_prty_attn_desc[9] = { + "ysdm_mem008_i_mem_prty", + "ysdm_mem007_i_mem_prty", + "ysdm_mem006_i_mem_prty", + "ysdm_mem005_i_mem_prty", + "ysdm_mem002_i_mem_prty", + "ysdm_mem009_i_mem_prty", + "ysdm_mem001_i_mem_prty", + "ysdm_mem003_i_mem_prty", + "ysdm_mem004_i_mem_prty", +}; +#else +#define ysdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 ysdm_prty1_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg ysdm_prty1_bb_a0 = { + 0, 9, ysdm_prty1_bb_a0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204 +}; + +static struct attn_hw_reg *ysdm_prty_bb_a0_regs[1] = { + &ysdm_prty1_bb_a0, +}; + +static const u16 ysdm_prty1_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg ysdm_prty1_bb_b0 = { + 0, 9, ysdm_prty1_bb_b0_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204 +}; + +static struct attn_hw_reg *ysdm_prty_bb_b0_regs[1] = { + &ysdm_prty1_bb_b0, +}; + +static const u16 ysdm_prty1_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg ysdm_prty1_k2 = { + 0, 9, ysdm_prty1_k2_attn_idx, 0xf90200, 0xf9020c, 0xf90208, 0xf90204 +}; + +static struct attn_hw_reg *ysdm_prty_k2_regs[1] = { + &ysdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *psdm_int_attn_desc[28] = { + "psdm_address_error", + "psdm_inp_queue_error", + "psdm_delay_fifo_error", + "psdm_async_host_error", + "psdm_prm_fifo_error", + "psdm_ccfc_load_pend_error", + "psdm_tcfc_load_pend_error", + "psdm_dst_int_ram_wait_error", + "psdm_dst_pas_buf_wait_error", + "psdm_dst_pxp_immed_error", + "psdm_dst_pxp_dst_pend_error", + "psdm_dst_brb_src_pend_error", + "psdm_dst_brb_src_addr_error", + "psdm_rsp_brb_pend_error", + "psdm_rsp_int_ram_pend_error", + "psdm_rsp_brb_rd_data_error", + "psdm_rsp_int_ram_rd_data_error", + "psdm_rsp_pxp_rd_data_error", + "psdm_cm_delay_error", + "psdm_sh_delay_error", + "psdm_cmpl_pend_error", + "psdm_cprm_pend_error", + "psdm_timer_addr_error", + "psdm_timer_pend_error", + "psdm_dorq_dpm_error", + "psdm_dst_pxp_done_error", + "psdm_xcm_rmt_buffer_error", + "psdm_ycm_rmt_buffer_error", +}; +#else +#define psdm_int_attn_desc OSAL_NULL +#endif + +static const u16 psdm_int0_bb_a0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg psdm_int0_bb_a0 = { + 0, 26, psdm_int0_bb_a0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044 +}; + +static struct attn_hw_reg *psdm_int_bb_a0_regs[1] = { + &psdm_int0_bb_a0, +}; + +static const u16 psdm_int0_bb_b0_attn_idx[26] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, +}; + +static struct attn_hw_reg psdm_int0_bb_b0 = { + 0, 26, psdm_int0_bb_b0_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044 +}; + +static struct attn_hw_reg *psdm_int_bb_b0_regs[1] = { + &psdm_int0_bb_b0, +}; + +static const u16 psdm_int0_k2_attn_idx[28] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, +}; + +static struct attn_hw_reg psdm_int0_k2 = { + 0, 28, psdm_int0_k2_attn_idx, 0xfa0040, 0xfa004c, 0xfa0048, 0xfa0044 +}; + +static struct attn_hw_reg *psdm_int_k2_regs[1] = { + &psdm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *psdm_prty_attn_desc[9] = { + "psdm_mem008_i_mem_prty", + "psdm_mem007_i_mem_prty", + "psdm_mem006_i_mem_prty", + "psdm_mem005_i_mem_prty", + "psdm_mem002_i_mem_prty", + "psdm_mem009_i_mem_prty", + "psdm_mem001_i_mem_prty", + "psdm_mem003_i_mem_prty", + "psdm_mem004_i_mem_prty", +}; +#else +#define psdm_prty_attn_desc OSAL_NULL +#endif + +static const u16 psdm_prty1_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psdm_prty1_bb_a0 = { + 0, 9, psdm_prty1_bb_a0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204 +}; + +static struct attn_hw_reg *psdm_prty_bb_a0_regs[1] = { + &psdm_prty1_bb_a0, +}; + +static const u16 psdm_prty1_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psdm_prty1_bb_b0 = { + 0, 9, psdm_prty1_bb_b0_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204 +}; + +static struct attn_hw_reg *psdm_prty_bb_b0_regs[1] = { + &psdm_prty1_bb_b0, +}; + +static const u16 psdm_prty1_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psdm_prty1_k2 = { + 0, 9, psdm_prty1_k2_attn_idx, 0xfa0200, 0xfa020c, 0xfa0208, 0xfa0204 +}; + +static struct attn_hw_reg *psdm_prty_k2_regs[1] = { + &psdm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *tsem_int_attn_desc[46] = { + "tsem_address_error", + "tsem_fic_last_error", + "tsem_fic_length_error", + "tsem_fic_fifo_error", + "tsem_pas_buf_fifo_error", + "tsem_sync_fin_pop_error", + "tsem_sync_dra_wr_push_error", + "tsem_sync_dra_wr_pop_error", + "tsem_sync_dra_rd_push_error", + "tsem_sync_dra_rd_pop_error", + "tsem_sync_fin_push_error", + "tsem_sem_fast_address_error", + "tsem_cam_lsb_inp_fifo", + "tsem_cam_msb_inp_fifo", + "tsem_cam_out_fifo", + "tsem_fin_fifo", + "tsem_thread_fifo_error", + "tsem_thread_overrun", + "tsem_sync_ext_store_push_error", + "tsem_sync_ext_store_pop_error", + "tsem_sync_ext_load_push_error", + "tsem_sync_ext_load_pop_error", + "tsem_sync_ram_rd_push_error", + "tsem_sync_ram_rd_pop_error", + "tsem_sync_ram_wr_pop_error", + "tsem_sync_ram_wr_push_error", + "tsem_sync_dbg_push_error", + "tsem_sync_dbg_pop_error", + "tsem_dbg_fifo_error", + "tsem_cam_msb2_inp_fifo", + "tsem_vfc_interrupt", + "tsem_vfc_out_fifo_error", + "tsem_storm_stack_uf_attn", + "tsem_storm_stack_of_attn", + "tsem_storm_runtime_error", + "tsem_ext_load_pend_wr_error", + "tsem_thread_rls_orun_error", + "tsem_thread_rls_aloc_error", + "tsem_thread_rls_vld_error", + "tsem_ext_thread_oor_error", + "tsem_ord_id_fifo_error", + "tsem_invld_foc_error", + "tsem_ext_ld_len_error", + "tsem_thrd_ord_fifo_error", + "tsem_invld_thrd_ord_error", + "tsem_fast_memory_address_error", +}; +#else +#define tsem_int_attn_desc OSAL_NULL +#endif + +static const u16 tsem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tsem_int0_bb_a0 = { + 0, 32, tsem_int0_bb_a0_attn_idx, 0x1700040, 0x170004c, 0x1700048, + 0x1700044 +}; + +static const u16 tsem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg tsem_int1_bb_a0 = { + 1, 13, tsem_int1_bb_a0_attn_idx, 0x1700050, 0x170005c, 0x1700058, + 0x1700054 +}; + +static const u16 tsem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg tsem_fast_memory_int0_bb_a0 = { + 2, 1, tsem_fast_memory_int0_bb_a0_attn_idx, 0x1740040, 0x174004c, + 0x1740048, 0x1740044 +}; + +static struct attn_hw_reg *tsem_int_bb_a0_regs[3] = { + &tsem_int0_bb_a0, &tsem_int1_bb_a0, &tsem_fast_memory_int0_bb_a0, +}; + +static const u16 tsem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tsem_int0_bb_b0 = { + 0, 32, tsem_int0_bb_b0_attn_idx, 0x1700040, 0x170004c, 0x1700048, + 0x1700044 +}; + +static const u16 tsem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg tsem_int1_bb_b0 = { + 1, 13, tsem_int1_bb_b0_attn_idx, 0x1700050, 0x170005c, 0x1700058, + 0x1700054 +}; + +static const u16 tsem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg tsem_fast_memory_int0_bb_b0 = { + 2, 1, tsem_fast_memory_int0_bb_b0_attn_idx, 0x1740040, 0x174004c, + 0x1740048, 0x1740044 +}; + +static struct attn_hw_reg *tsem_int_bb_b0_regs[3] = { + &tsem_int0_bb_b0, &tsem_int1_bb_b0, &tsem_fast_memory_int0_bb_b0, +}; + +static const u16 tsem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg tsem_int0_k2 = { + 0, 32, tsem_int0_k2_attn_idx, 0x1700040, 0x170004c, 0x1700048, + 0x1700044 +}; + +static const u16 tsem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg tsem_int1_k2 = { + 1, 13, tsem_int1_k2_attn_idx, 0x1700050, 0x170005c, 0x1700058, + 0x1700054 +}; + +static const u16 tsem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg tsem_fast_memory_int0_k2 = { + 2, 1, tsem_fast_memory_int0_k2_attn_idx, 0x1740040, 0x174004c, + 0x1740048, + 0x1740044 +}; + +static struct attn_hw_reg *tsem_int_k2_regs[3] = { + &tsem_int0_k2, &tsem_int1_k2, &tsem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *tsem_prty_attn_desc[23] = { + "tsem_vfc_rbc_parity_error", + "tsem_storm_rf_parity_error", + "tsem_reg_gen_parity_error", + "tsem_mem005_i_ecc_0_rf_int", + "tsem_mem005_i_ecc_1_rf_int", + "tsem_mem004_i_mem_prty", + "tsem_mem002_i_mem_prty", + "tsem_mem003_i_mem_prty", + "tsem_mem001_i_mem_prty", + "tsem_fast_memory_mem024_i_mem_prty", + "tsem_fast_memory_mem023_i_mem_prty", + "tsem_fast_memory_mem022_i_mem_prty", + "tsem_fast_memory_mem021_i_mem_prty", + "tsem_fast_memory_mem020_i_mem_prty", + "tsem_fast_memory_mem019_i_mem_prty", + "tsem_fast_memory_mem018_i_mem_prty", + "tsem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "tsem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "tsem_fast_memory_vfc_config_mem006_i_mem_prty", + "tsem_fast_memory_vfc_config_mem001_i_mem_prty", + "tsem_fast_memory_vfc_config_mem004_i_mem_prty", + "tsem_fast_memory_vfc_config_mem003_i_mem_prty", + "tsem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define tsem_prty_attn_desc OSAL_NULL +#endif + +static const u16 tsem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg tsem_prty0_bb_a0 = { + 0, 3, tsem_prty0_bb_a0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0, + 0x17000cc +}; + +static const u16 tsem_prty1_bb_a0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg tsem_prty1_bb_a0 = { + 1, 6, tsem_prty1_bb_a0_attn_idx, 0x1700200, 0x170020c, 0x1700208, + 0x1700204 +}; + +static const u16 tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = { + 16, 17, 19, 20, 21, 22, +}; + +static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_a0 = { + 2, 6, tsem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x174a200, + 0x174a20c, 0x174a208, 0x174a204 +}; + +static struct attn_hw_reg *tsem_prty_bb_a0_regs[3] = { + &tsem_prty0_bb_a0, &tsem_prty1_bb_a0, + &tsem_fast_memory_vfc_config_prty1_bb_a0, +}; + +static const u16 tsem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg tsem_prty0_bb_b0 = { + 0, 3, tsem_prty0_bb_b0_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0, + 0x17000cc +}; + +static const u16 tsem_prty1_bb_b0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg tsem_prty1_bb_b0 = { + 1, 6, tsem_prty1_bb_b0_attn_idx, 0x1700200, 0x170020c, 0x1700208, + 0x1700204 +}; + +static const u16 tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = { + 16, 17, 19, 20, 21, 22, +}; + +static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_bb_b0 = { + 2, 6, tsem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x174a200, + 0x174a20c, 0x174a208, 0x174a204 +}; + +static struct attn_hw_reg *tsem_prty_bb_b0_regs[3] = { + &tsem_prty0_bb_b0, &tsem_prty1_bb_b0, + &tsem_fast_memory_vfc_config_prty1_bb_b0, +}; + +static const u16 tsem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg tsem_prty0_k2 = { + 0, 3, tsem_prty0_k2_attn_idx, 0x17000c8, 0x17000d4, 0x17000d0, + 0x17000cc +}; + +static const u16 tsem_prty1_k2_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg tsem_prty1_k2 = { + 1, 6, tsem_prty1_k2_attn_idx, 0x1700200, 0x170020c, 0x1700208, + 0x1700204 +}; + +static const u16 tsem_fast_memory_prty1_k2_attn_idx[7] = { + 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg tsem_fast_memory_prty1_k2 = { + 2, 7, tsem_fast_memory_prty1_k2_attn_idx, 0x1740200, 0x174020c, + 0x1740208, + 0x1740204 +}; + +static const u16 tsem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = { + 16, 17, 18, 19, 20, 21, +}; + +static struct attn_hw_reg tsem_fast_memory_vfc_config_prty1_k2 = { + 3, 6, tsem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x174a200, + 0x174a20c, + 0x174a208, 0x174a204 +}; + +static struct attn_hw_reg *tsem_prty_k2_regs[4] = { + &tsem_prty0_k2, &tsem_prty1_k2, &tsem_fast_memory_prty1_k2, + &tsem_fast_memory_vfc_config_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *msem_int_attn_desc[46] = { + "msem_address_error", + "msem_fic_last_error", + "msem_fic_length_error", + "msem_fic_fifo_error", + "msem_pas_buf_fifo_error", + "msem_sync_fin_pop_error", + "msem_sync_dra_wr_push_error", + "msem_sync_dra_wr_pop_error", + "msem_sync_dra_rd_push_error", + "msem_sync_dra_rd_pop_error", + "msem_sync_fin_push_error", + "msem_sem_fast_address_error", + "msem_cam_lsb_inp_fifo", + "msem_cam_msb_inp_fifo", + "msem_cam_out_fifo", + "msem_fin_fifo", + "msem_thread_fifo_error", + "msem_thread_overrun", + "msem_sync_ext_store_push_error", + "msem_sync_ext_store_pop_error", + "msem_sync_ext_load_push_error", + "msem_sync_ext_load_pop_error", + "msem_sync_ram_rd_push_error", + "msem_sync_ram_rd_pop_error", + "msem_sync_ram_wr_pop_error", + "msem_sync_ram_wr_push_error", + "msem_sync_dbg_push_error", + "msem_sync_dbg_pop_error", + "msem_dbg_fifo_error", + "msem_cam_msb2_inp_fifo", + "msem_vfc_interrupt", + "msem_vfc_out_fifo_error", + "msem_storm_stack_uf_attn", + "msem_storm_stack_of_attn", + "msem_storm_runtime_error", + "msem_ext_load_pend_wr_error", + "msem_thread_rls_orun_error", + "msem_thread_rls_aloc_error", + "msem_thread_rls_vld_error", + "msem_ext_thread_oor_error", + "msem_ord_id_fifo_error", + "msem_invld_foc_error", + "msem_ext_ld_len_error", + "msem_thrd_ord_fifo_error", + "msem_invld_thrd_ord_error", + "msem_fast_memory_address_error", +}; +#else +#define msem_int_attn_desc OSAL_NULL +#endif + +static const u16 msem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg msem_int0_bb_a0 = { + 0, 32, msem_int0_bb_a0_attn_idx, 0x1800040, 0x180004c, 0x1800048, + 0x1800044 +}; + +static const u16 msem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg msem_int1_bb_a0 = { + 1, 13, msem_int1_bb_a0_attn_idx, 0x1800050, 0x180005c, 0x1800058, + 0x1800054 +}; + +static const u16 msem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg msem_fast_memory_int0_bb_a0 = { + 2, 1, msem_fast_memory_int0_bb_a0_attn_idx, 0x1840040, 0x184004c, + 0x1840048, 0x1840044 +}; + +static struct attn_hw_reg *msem_int_bb_a0_regs[3] = { + &msem_int0_bb_a0, &msem_int1_bb_a0, &msem_fast_memory_int0_bb_a0, +}; + +static const u16 msem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg msem_int0_bb_b0 = { + 0, 32, msem_int0_bb_b0_attn_idx, 0x1800040, 0x180004c, 0x1800048, + 0x1800044 +}; + +static const u16 msem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg msem_int1_bb_b0 = { + 1, 13, msem_int1_bb_b0_attn_idx, 0x1800050, 0x180005c, 0x1800058, + 0x1800054 +}; + +static const u16 msem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg msem_fast_memory_int0_bb_b0 = { + 2, 1, msem_fast_memory_int0_bb_b0_attn_idx, 0x1840040, 0x184004c, + 0x1840048, 0x1840044 +}; + +static struct attn_hw_reg *msem_int_bb_b0_regs[3] = { + &msem_int0_bb_b0, &msem_int1_bb_b0, &msem_fast_memory_int0_bb_b0, +}; + +static const u16 msem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg msem_int0_k2 = { + 0, 32, msem_int0_k2_attn_idx, 0x1800040, 0x180004c, 0x1800048, + 0x1800044 +}; + +static const u16 msem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg msem_int1_k2 = { + 1, 13, msem_int1_k2_attn_idx, 0x1800050, 0x180005c, 0x1800058, + 0x1800054 +}; + +static const u16 msem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg msem_fast_memory_int0_k2 = { + 2, 1, msem_fast_memory_int0_k2_attn_idx, 0x1840040, 0x184004c, + 0x1840048, + 0x1840044 +}; + +static struct attn_hw_reg *msem_int_k2_regs[3] = { + &msem_int0_k2, &msem_int1_k2, &msem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *msem_prty_attn_desc[23] = { + "msem_vfc_rbc_parity_error", + "msem_storm_rf_parity_error", + "msem_reg_gen_parity_error", + "msem_mem005_i_ecc_0_rf_int", + "msem_mem005_i_ecc_1_rf_int", + "msem_mem004_i_mem_prty", + "msem_mem002_i_mem_prty", + "msem_mem003_i_mem_prty", + "msem_mem001_i_mem_prty", + "msem_fast_memory_mem024_i_mem_prty", + "msem_fast_memory_mem023_i_mem_prty", + "msem_fast_memory_mem022_i_mem_prty", + "msem_fast_memory_mem021_i_mem_prty", + "msem_fast_memory_mem020_i_mem_prty", + "msem_fast_memory_mem019_i_mem_prty", + "msem_fast_memory_mem018_i_mem_prty", + "msem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "msem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "msem_fast_memory_vfc_config_mem006_i_mem_prty", + "msem_fast_memory_vfc_config_mem001_i_mem_prty", + "msem_fast_memory_vfc_config_mem004_i_mem_prty", + "msem_fast_memory_vfc_config_mem003_i_mem_prty", + "msem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define msem_prty_attn_desc OSAL_NULL +#endif + +static const u16 msem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg msem_prty0_bb_a0 = { + 0, 3, msem_prty0_bb_a0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0, + 0x18000cc +}; + +static const u16 msem_prty1_bb_a0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg msem_prty1_bb_a0 = { + 1, 6, msem_prty1_bb_a0_attn_idx, 0x1800200, 0x180020c, 0x1800208, + 0x1800204 +}; + +static struct attn_hw_reg *msem_prty_bb_a0_regs[2] = { + &msem_prty0_bb_a0, &msem_prty1_bb_a0, +}; + +static const u16 msem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg msem_prty0_bb_b0 = { + 0, 3, msem_prty0_bb_b0_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0, + 0x18000cc +}; + +static const u16 msem_prty1_bb_b0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg msem_prty1_bb_b0 = { + 1, 6, msem_prty1_bb_b0_attn_idx, 0x1800200, 0x180020c, 0x1800208, + 0x1800204 +}; + +static struct attn_hw_reg *msem_prty_bb_b0_regs[2] = { + &msem_prty0_bb_b0, &msem_prty1_bb_b0, +}; + +static const u16 msem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg msem_prty0_k2 = { + 0, 3, msem_prty0_k2_attn_idx, 0x18000c8, 0x18000d4, 0x18000d0, + 0x18000cc +}; + +static const u16 msem_prty1_k2_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg msem_prty1_k2 = { + 1, 6, msem_prty1_k2_attn_idx, 0x1800200, 0x180020c, 0x1800208, + 0x1800204 +}; + +static const u16 msem_fast_memory_prty1_k2_attn_idx[7] = { + 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg msem_fast_memory_prty1_k2 = { + 2, 7, msem_fast_memory_prty1_k2_attn_idx, 0x1840200, 0x184020c, + 0x1840208, + 0x1840204 +}; + +static struct attn_hw_reg *msem_prty_k2_regs[3] = { + &msem_prty0_k2, &msem_prty1_k2, &msem_fast_memory_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *usem_int_attn_desc[46] = { + "usem_address_error", + "usem_fic_last_error", + "usem_fic_length_error", + "usem_fic_fifo_error", + "usem_pas_buf_fifo_error", + "usem_sync_fin_pop_error", + "usem_sync_dra_wr_push_error", + "usem_sync_dra_wr_pop_error", + "usem_sync_dra_rd_push_error", + "usem_sync_dra_rd_pop_error", + "usem_sync_fin_push_error", + "usem_sem_fast_address_error", + "usem_cam_lsb_inp_fifo", + "usem_cam_msb_inp_fifo", + "usem_cam_out_fifo", + "usem_fin_fifo", + "usem_thread_fifo_error", + "usem_thread_overrun", + "usem_sync_ext_store_push_error", + "usem_sync_ext_store_pop_error", + "usem_sync_ext_load_push_error", + "usem_sync_ext_load_pop_error", + "usem_sync_ram_rd_push_error", + "usem_sync_ram_rd_pop_error", + "usem_sync_ram_wr_pop_error", + "usem_sync_ram_wr_push_error", + "usem_sync_dbg_push_error", + "usem_sync_dbg_pop_error", + "usem_dbg_fifo_error", + "usem_cam_msb2_inp_fifo", + "usem_vfc_interrupt", + "usem_vfc_out_fifo_error", + "usem_storm_stack_uf_attn", + "usem_storm_stack_of_attn", + "usem_storm_runtime_error", + "usem_ext_load_pend_wr_error", + "usem_thread_rls_orun_error", + "usem_thread_rls_aloc_error", + "usem_thread_rls_vld_error", + "usem_ext_thread_oor_error", + "usem_ord_id_fifo_error", + "usem_invld_foc_error", + "usem_ext_ld_len_error", + "usem_thrd_ord_fifo_error", + "usem_invld_thrd_ord_error", + "usem_fast_memory_address_error", +}; +#else +#define usem_int_attn_desc OSAL_NULL +#endif + +static const u16 usem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg usem_int0_bb_a0 = { + 0, 32, usem_int0_bb_a0_attn_idx, 0x1900040, 0x190004c, 0x1900048, + 0x1900044 +}; + +static const u16 usem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg usem_int1_bb_a0 = { + 1, 13, usem_int1_bb_a0_attn_idx, 0x1900050, 0x190005c, 0x1900058, + 0x1900054 +}; + +static const u16 usem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg usem_fast_memory_int0_bb_a0 = { + 2, 1, usem_fast_memory_int0_bb_a0_attn_idx, 0x1940040, 0x194004c, + 0x1940048, 0x1940044 +}; + +static struct attn_hw_reg *usem_int_bb_a0_regs[3] = { + &usem_int0_bb_a0, &usem_int1_bb_a0, &usem_fast_memory_int0_bb_a0, +}; + +static const u16 usem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg usem_int0_bb_b0 = { + 0, 32, usem_int0_bb_b0_attn_idx, 0x1900040, 0x190004c, 0x1900048, + 0x1900044 +}; + +static const u16 usem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg usem_int1_bb_b0 = { + 1, 13, usem_int1_bb_b0_attn_idx, 0x1900050, 0x190005c, 0x1900058, + 0x1900054 +}; + +static const u16 usem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg usem_fast_memory_int0_bb_b0 = { + 2, 1, usem_fast_memory_int0_bb_b0_attn_idx, 0x1940040, 0x194004c, + 0x1940048, 0x1940044 +}; + +static struct attn_hw_reg *usem_int_bb_b0_regs[3] = { + &usem_int0_bb_b0, &usem_int1_bb_b0, &usem_fast_memory_int0_bb_b0, +}; + +static const u16 usem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg usem_int0_k2 = { + 0, 32, usem_int0_k2_attn_idx, 0x1900040, 0x190004c, 0x1900048, + 0x1900044 +}; + +static const u16 usem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg usem_int1_k2 = { + 1, 13, usem_int1_k2_attn_idx, 0x1900050, 0x190005c, 0x1900058, + 0x1900054 +}; + +static const u16 usem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg usem_fast_memory_int0_k2 = { + 2, 1, usem_fast_memory_int0_k2_attn_idx, 0x1940040, 0x194004c, + 0x1940048, + 0x1940044 +}; + +static struct attn_hw_reg *usem_int_k2_regs[3] = { + &usem_int0_k2, &usem_int1_k2, &usem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *usem_prty_attn_desc[23] = { + "usem_vfc_rbc_parity_error", + "usem_storm_rf_parity_error", + "usem_reg_gen_parity_error", + "usem_mem005_i_ecc_0_rf_int", + "usem_mem005_i_ecc_1_rf_int", + "usem_mem004_i_mem_prty", + "usem_mem002_i_mem_prty", + "usem_mem003_i_mem_prty", + "usem_mem001_i_mem_prty", + "usem_fast_memory_mem024_i_mem_prty", + "usem_fast_memory_mem023_i_mem_prty", + "usem_fast_memory_mem022_i_mem_prty", + "usem_fast_memory_mem021_i_mem_prty", + "usem_fast_memory_mem020_i_mem_prty", + "usem_fast_memory_mem019_i_mem_prty", + "usem_fast_memory_mem018_i_mem_prty", + "usem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "usem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "usem_fast_memory_vfc_config_mem006_i_mem_prty", + "usem_fast_memory_vfc_config_mem001_i_mem_prty", + "usem_fast_memory_vfc_config_mem004_i_mem_prty", + "usem_fast_memory_vfc_config_mem003_i_mem_prty", + "usem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define usem_prty_attn_desc OSAL_NULL +#endif + +static const u16 usem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg usem_prty0_bb_a0 = { + 0, 3, usem_prty0_bb_a0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0, + 0x19000cc +}; + +static const u16 usem_prty1_bb_a0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg usem_prty1_bb_a0 = { + 1, 6, usem_prty1_bb_a0_attn_idx, 0x1900200, 0x190020c, 0x1900208, + 0x1900204 +}; + +static struct attn_hw_reg *usem_prty_bb_a0_regs[2] = { + &usem_prty0_bb_a0, &usem_prty1_bb_a0, +}; + +static const u16 usem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg usem_prty0_bb_b0 = { + 0, 3, usem_prty0_bb_b0_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0, + 0x19000cc +}; + +static const u16 usem_prty1_bb_b0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg usem_prty1_bb_b0 = { + 1, 6, usem_prty1_bb_b0_attn_idx, 0x1900200, 0x190020c, 0x1900208, + 0x1900204 +}; + +static struct attn_hw_reg *usem_prty_bb_b0_regs[2] = { + &usem_prty0_bb_b0, &usem_prty1_bb_b0, +}; + +static const u16 usem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg usem_prty0_k2 = { + 0, 3, usem_prty0_k2_attn_idx, 0x19000c8, 0x19000d4, 0x19000d0, + 0x19000cc +}; + +static const u16 usem_prty1_k2_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg usem_prty1_k2 = { + 1, 6, usem_prty1_k2_attn_idx, 0x1900200, 0x190020c, 0x1900208, + 0x1900204 +}; + +static const u16 usem_fast_memory_prty1_k2_attn_idx[7] = { + 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg usem_fast_memory_prty1_k2 = { + 2, 7, usem_fast_memory_prty1_k2_attn_idx, 0x1940200, 0x194020c, + 0x1940208, + 0x1940204 +}; + +static struct attn_hw_reg *usem_prty_k2_regs[3] = { + &usem_prty0_k2, &usem_prty1_k2, &usem_fast_memory_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *xsem_int_attn_desc[46] = { + "xsem_address_error", + "xsem_fic_last_error", + "xsem_fic_length_error", + "xsem_fic_fifo_error", + "xsem_pas_buf_fifo_error", + "xsem_sync_fin_pop_error", + "xsem_sync_dra_wr_push_error", + "xsem_sync_dra_wr_pop_error", + "xsem_sync_dra_rd_push_error", + "xsem_sync_dra_rd_pop_error", + "xsem_sync_fin_push_error", + "xsem_sem_fast_address_error", + "xsem_cam_lsb_inp_fifo", + "xsem_cam_msb_inp_fifo", + "xsem_cam_out_fifo", + "xsem_fin_fifo", + "xsem_thread_fifo_error", + "xsem_thread_overrun", + "xsem_sync_ext_store_push_error", + "xsem_sync_ext_store_pop_error", + "xsem_sync_ext_load_push_error", + "xsem_sync_ext_load_pop_error", + "xsem_sync_ram_rd_push_error", + "xsem_sync_ram_rd_pop_error", + "xsem_sync_ram_wr_pop_error", + "xsem_sync_ram_wr_push_error", + "xsem_sync_dbg_push_error", + "xsem_sync_dbg_pop_error", + "xsem_dbg_fifo_error", + "xsem_cam_msb2_inp_fifo", + "xsem_vfc_interrupt", + "xsem_vfc_out_fifo_error", + "xsem_storm_stack_uf_attn", + "xsem_storm_stack_of_attn", + "xsem_storm_runtime_error", + "xsem_ext_load_pend_wr_error", + "xsem_thread_rls_orun_error", + "xsem_thread_rls_aloc_error", + "xsem_thread_rls_vld_error", + "xsem_ext_thread_oor_error", + "xsem_ord_id_fifo_error", + "xsem_invld_foc_error", + "xsem_ext_ld_len_error", + "xsem_thrd_ord_fifo_error", + "xsem_invld_thrd_ord_error", + "xsem_fast_memory_address_error", +}; +#else +#define xsem_int_attn_desc OSAL_NULL +#endif + +static const u16 xsem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg xsem_int0_bb_a0 = { + 0, 32, xsem_int0_bb_a0_attn_idx, 0x1400040, 0x140004c, 0x1400048, + 0x1400044 +}; + +static const u16 xsem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg xsem_int1_bb_a0 = { + 1, 13, xsem_int1_bb_a0_attn_idx, 0x1400050, 0x140005c, 0x1400058, + 0x1400054 +}; + +static const u16 xsem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg xsem_fast_memory_int0_bb_a0 = { + 2, 1, xsem_fast_memory_int0_bb_a0_attn_idx, 0x1440040, 0x144004c, + 0x1440048, 0x1440044 +}; + +static struct attn_hw_reg *xsem_int_bb_a0_regs[3] = { + &xsem_int0_bb_a0, &xsem_int1_bb_a0, &xsem_fast_memory_int0_bb_a0, +}; + +static const u16 xsem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg xsem_int0_bb_b0 = { + 0, 32, xsem_int0_bb_b0_attn_idx, 0x1400040, 0x140004c, 0x1400048, + 0x1400044 +}; + +static const u16 xsem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg xsem_int1_bb_b0 = { + 1, 13, xsem_int1_bb_b0_attn_idx, 0x1400050, 0x140005c, 0x1400058, + 0x1400054 +}; + +static const u16 xsem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg xsem_fast_memory_int0_bb_b0 = { + 2, 1, xsem_fast_memory_int0_bb_b0_attn_idx, 0x1440040, 0x144004c, + 0x1440048, 0x1440044 +}; + +static struct attn_hw_reg *xsem_int_bb_b0_regs[3] = { + &xsem_int0_bb_b0, &xsem_int1_bb_b0, &xsem_fast_memory_int0_bb_b0, +}; + +static const u16 xsem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg xsem_int0_k2 = { + 0, 32, xsem_int0_k2_attn_idx, 0x1400040, 0x140004c, 0x1400048, + 0x1400044 +}; + +static const u16 xsem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg xsem_int1_k2 = { + 1, 13, xsem_int1_k2_attn_idx, 0x1400050, 0x140005c, 0x1400058, + 0x1400054 +}; + +static const u16 xsem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg xsem_fast_memory_int0_k2 = { + 2, 1, xsem_fast_memory_int0_k2_attn_idx, 0x1440040, 0x144004c, + 0x1440048, + 0x1440044 +}; + +static struct attn_hw_reg *xsem_int_k2_regs[3] = { + &xsem_int0_k2, &xsem_int1_k2, &xsem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *xsem_prty_attn_desc[24] = { + "xsem_vfc_rbc_parity_error", + "xsem_storm_rf_parity_error", + "xsem_reg_gen_parity_error", + "xsem_mem006_i_ecc_0_rf_int", + "xsem_mem006_i_ecc_1_rf_int", + "xsem_mem005_i_mem_prty", + "xsem_mem002_i_mem_prty", + "xsem_mem004_i_mem_prty", + "xsem_mem003_i_mem_prty", + "xsem_mem001_i_mem_prty", + "xsem_fast_memory_mem024_i_mem_prty", + "xsem_fast_memory_mem023_i_mem_prty", + "xsem_fast_memory_mem022_i_mem_prty", + "xsem_fast_memory_mem021_i_mem_prty", + "xsem_fast_memory_mem020_i_mem_prty", + "xsem_fast_memory_mem019_i_mem_prty", + "xsem_fast_memory_mem018_i_mem_prty", + "xsem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "xsem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "xsem_fast_memory_vfc_config_mem006_i_mem_prty", + "xsem_fast_memory_vfc_config_mem001_i_mem_prty", + "xsem_fast_memory_vfc_config_mem004_i_mem_prty", + "xsem_fast_memory_vfc_config_mem003_i_mem_prty", + "xsem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define xsem_prty_attn_desc OSAL_NULL +#endif + +static const u16 xsem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg xsem_prty0_bb_a0 = { + 0, 3, xsem_prty0_bb_a0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0, + 0x14000cc +}; + +static const u16 xsem_prty1_bb_a0_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsem_prty1_bb_a0 = { + 1, 7, xsem_prty1_bb_a0_attn_idx, 0x1400200, 0x140020c, 0x1400208, + 0x1400204 +}; + +static struct attn_hw_reg *xsem_prty_bb_a0_regs[2] = { + &xsem_prty0_bb_a0, &xsem_prty1_bb_a0, +}; + +static const u16 xsem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg xsem_prty0_bb_b0 = { + 0, 3, xsem_prty0_bb_b0_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0, + 0x14000cc +}; + +static const u16 xsem_prty1_bb_b0_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsem_prty1_bb_b0 = { + 1, 7, xsem_prty1_bb_b0_attn_idx, 0x1400200, 0x140020c, 0x1400208, + 0x1400204 +}; + +static struct attn_hw_reg *xsem_prty_bb_b0_regs[2] = { + &xsem_prty0_bb_b0, &xsem_prty1_bb_b0, +}; + +static const u16 xsem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg xsem_prty0_k2 = { + 0, 3, xsem_prty0_k2_attn_idx, 0x14000c8, 0x14000d4, 0x14000d0, + 0x14000cc +}; + +static const u16 xsem_prty1_k2_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg xsem_prty1_k2 = { + 1, 7, xsem_prty1_k2_attn_idx, 0x1400200, 0x140020c, 0x1400208, + 0x1400204 +}; + +static const u16 xsem_fast_memory_prty1_k2_attn_idx[7] = { + 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg xsem_fast_memory_prty1_k2 = { + 2, 7, xsem_fast_memory_prty1_k2_attn_idx, 0x1440200, 0x144020c, + 0x1440208, + 0x1440204 +}; + +static struct attn_hw_reg *xsem_prty_k2_regs[3] = { + &xsem_prty0_k2, &xsem_prty1_k2, &xsem_fast_memory_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *ysem_int_attn_desc[46] = { + "ysem_address_error", + "ysem_fic_last_error", + "ysem_fic_length_error", + "ysem_fic_fifo_error", + "ysem_pas_buf_fifo_error", + "ysem_sync_fin_pop_error", + "ysem_sync_dra_wr_push_error", + "ysem_sync_dra_wr_pop_error", + "ysem_sync_dra_rd_push_error", + "ysem_sync_dra_rd_pop_error", + "ysem_sync_fin_push_error", + "ysem_sem_fast_address_error", + "ysem_cam_lsb_inp_fifo", + "ysem_cam_msb_inp_fifo", + "ysem_cam_out_fifo", + "ysem_fin_fifo", + "ysem_thread_fifo_error", + "ysem_thread_overrun", + "ysem_sync_ext_store_push_error", + "ysem_sync_ext_store_pop_error", + "ysem_sync_ext_load_push_error", + "ysem_sync_ext_load_pop_error", + "ysem_sync_ram_rd_push_error", + "ysem_sync_ram_rd_pop_error", + "ysem_sync_ram_wr_pop_error", + "ysem_sync_ram_wr_push_error", + "ysem_sync_dbg_push_error", + "ysem_sync_dbg_pop_error", + "ysem_dbg_fifo_error", + "ysem_cam_msb2_inp_fifo", + "ysem_vfc_interrupt", + "ysem_vfc_out_fifo_error", + "ysem_storm_stack_uf_attn", + "ysem_storm_stack_of_attn", + "ysem_storm_runtime_error", + "ysem_ext_load_pend_wr_error", + "ysem_thread_rls_orun_error", + "ysem_thread_rls_aloc_error", + "ysem_thread_rls_vld_error", + "ysem_ext_thread_oor_error", + "ysem_ord_id_fifo_error", + "ysem_invld_foc_error", + "ysem_ext_ld_len_error", + "ysem_thrd_ord_fifo_error", + "ysem_invld_thrd_ord_error", + "ysem_fast_memory_address_error", +}; +#else +#define ysem_int_attn_desc OSAL_NULL +#endif + +static const u16 ysem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg ysem_int0_bb_a0 = { + 0, 32, ysem_int0_bb_a0_attn_idx, 0x1500040, 0x150004c, 0x1500048, + 0x1500044 +}; + +static const u16 ysem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg ysem_int1_bb_a0 = { + 1, 13, ysem_int1_bb_a0_attn_idx, 0x1500050, 0x150005c, 0x1500058, + 0x1500054 +}; + +static const u16 ysem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg ysem_fast_memory_int0_bb_a0 = { + 2, 1, ysem_fast_memory_int0_bb_a0_attn_idx, 0x1540040, 0x154004c, + 0x1540048, 0x1540044 +}; + +static struct attn_hw_reg *ysem_int_bb_a0_regs[3] = { + &ysem_int0_bb_a0, &ysem_int1_bb_a0, &ysem_fast_memory_int0_bb_a0, +}; + +static const u16 ysem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg ysem_int0_bb_b0 = { + 0, 32, ysem_int0_bb_b0_attn_idx, 0x1500040, 0x150004c, 0x1500048, + 0x1500044 +}; + +static const u16 ysem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg ysem_int1_bb_b0 = { + 1, 13, ysem_int1_bb_b0_attn_idx, 0x1500050, 0x150005c, 0x1500058, + 0x1500054 +}; + +static const u16 ysem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg ysem_fast_memory_int0_bb_b0 = { + 2, 1, ysem_fast_memory_int0_bb_b0_attn_idx, 0x1540040, 0x154004c, + 0x1540048, 0x1540044 +}; + +static struct attn_hw_reg *ysem_int_bb_b0_regs[3] = { + &ysem_int0_bb_b0, &ysem_int1_bb_b0, &ysem_fast_memory_int0_bb_b0, +}; + +static const u16 ysem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg ysem_int0_k2 = { + 0, 32, ysem_int0_k2_attn_idx, 0x1500040, 0x150004c, 0x1500048, + 0x1500044 +}; + +static const u16 ysem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg ysem_int1_k2 = { + 1, 13, ysem_int1_k2_attn_idx, 0x1500050, 0x150005c, 0x1500058, + 0x1500054 +}; + +static const u16 ysem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg ysem_fast_memory_int0_k2 = { + 2, 1, ysem_fast_memory_int0_k2_attn_idx, 0x1540040, 0x154004c, + 0x1540048, + 0x1540044 +}; + +static struct attn_hw_reg *ysem_int_k2_regs[3] = { + &ysem_int0_k2, &ysem_int1_k2, &ysem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ysem_prty_attn_desc[24] = { + "ysem_vfc_rbc_parity_error", + "ysem_storm_rf_parity_error", + "ysem_reg_gen_parity_error", + "ysem_mem006_i_ecc_0_rf_int", + "ysem_mem006_i_ecc_1_rf_int", + "ysem_mem005_i_mem_prty", + "ysem_mem002_i_mem_prty", + "ysem_mem004_i_mem_prty", + "ysem_mem003_i_mem_prty", + "ysem_mem001_i_mem_prty", + "ysem_fast_memory_mem024_i_mem_prty", + "ysem_fast_memory_mem023_i_mem_prty", + "ysem_fast_memory_mem022_i_mem_prty", + "ysem_fast_memory_mem021_i_mem_prty", + "ysem_fast_memory_mem020_i_mem_prty", + "ysem_fast_memory_mem019_i_mem_prty", + "ysem_fast_memory_mem018_i_mem_prty", + "ysem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "ysem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "ysem_fast_memory_vfc_config_mem006_i_mem_prty", + "ysem_fast_memory_vfc_config_mem001_i_mem_prty", + "ysem_fast_memory_vfc_config_mem004_i_mem_prty", + "ysem_fast_memory_vfc_config_mem003_i_mem_prty", + "ysem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define ysem_prty_attn_desc OSAL_NULL +#endif + +static const u16 ysem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg ysem_prty0_bb_a0 = { + 0, 3, ysem_prty0_bb_a0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0, + 0x15000cc +}; + +static const u16 ysem_prty1_bb_a0_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg ysem_prty1_bb_a0 = { + 1, 7, ysem_prty1_bb_a0_attn_idx, 0x1500200, 0x150020c, 0x1500208, + 0x1500204 +}; + +static struct attn_hw_reg *ysem_prty_bb_a0_regs[2] = { + &ysem_prty0_bb_a0, &ysem_prty1_bb_a0, +}; + +static const u16 ysem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg ysem_prty0_bb_b0 = { + 0, 3, ysem_prty0_bb_b0_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0, + 0x15000cc +}; + +static const u16 ysem_prty1_bb_b0_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg ysem_prty1_bb_b0 = { + 1, 7, ysem_prty1_bb_b0_attn_idx, 0x1500200, 0x150020c, 0x1500208, + 0x1500204 +}; + +static struct attn_hw_reg *ysem_prty_bb_b0_regs[2] = { + &ysem_prty0_bb_b0, &ysem_prty1_bb_b0, +}; + +static const u16 ysem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg ysem_prty0_k2 = { + 0, 3, ysem_prty0_k2_attn_idx, 0x15000c8, 0x15000d4, 0x15000d0, + 0x15000cc +}; + +static const u16 ysem_prty1_k2_attn_idx[7] = { + 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg ysem_prty1_k2 = { + 1, 7, ysem_prty1_k2_attn_idx, 0x1500200, 0x150020c, 0x1500208, + 0x1500204 +}; + +static const u16 ysem_fast_memory_prty1_k2_attn_idx[7] = { + 10, 11, 12, 13, 14, 15, 16, +}; + +static struct attn_hw_reg ysem_fast_memory_prty1_k2 = { + 2, 7, ysem_fast_memory_prty1_k2_attn_idx, 0x1540200, 0x154020c, + 0x1540208, + 0x1540204 +}; + +static struct attn_hw_reg *ysem_prty_k2_regs[3] = { + &ysem_prty0_k2, &ysem_prty1_k2, &ysem_fast_memory_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *psem_int_attn_desc[46] = { + "psem_address_error", + "psem_fic_last_error", + "psem_fic_length_error", + "psem_fic_fifo_error", + "psem_pas_buf_fifo_error", + "psem_sync_fin_pop_error", + "psem_sync_dra_wr_push_error", + "psem_sync_dra_wr_pop_error", + "psem_sync_dra_rd_push_error", + "psem_sync_dra_rd_pop_error", + "psem_sync_fin_push_error", + "psem_sem_fast_address_error", + "psem_cam_lsb_inp_fifo", + "psem_cam_msb_inp_fifo", + "psem_cam_out_fifo", + "psem_fin_fifo", + "psem_thread_fifo_error", + "psem_thread_overrun", + "psem_sync_ext_store_push_error", + "psem_sync_ext_store_pop_error", + "psem_sync_ext_load_push_error", + "psem_sync_ext_load_pop_error", + "psem_sync_ram_rd_push_error", + "psem_sync_ram_rd_pop_error", + "psem_sync_ram_wr_pop_error", + "psem_sync_ram_wr_push_error", + "psem_sync_dbg_push_error", + "psem_sync_dbg_pop_error", + "psem_dbg_fifo_error", + "psem_cam_msb2_inp_fifo", + "psem_vfc_interrupt", + "psem_vfc_out_fifo_error", + "psem_storm_stack_uf_attn", + "psem_storm_stack_of_attn", + "psem_storm_runtime_error", + "psem_ext_load_pend_wr_error", + "psem_thread_rls_orun_error", + "psem_thread_rls_aloc_error", + "psem_thread_rls_vld_error", + "psem_ext_thread_oor_error", + "psem_ord_id_fifo_error", + "psem_invld_foc_error", + "psem_ext_ld_len_error", + "psem_thrd_ord_fifo_error", + "psem_invld_thrd_ord_error", + "psem_fast_memory_address_error", +}; +#else +#define psem_int_attn_desc OSAL_NULL +#endif + +static const u16 psem_int0_bb_a0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg psem_int0_bb_a0 = { + 0, 32, psem_int0_bb_a0_attn_idx, 0x1600040, 0x160004c, 0x1600048, + 0x1600044 +}; + +static const u16 psem_int1_bb_a0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg psem_int1_bb_a0 = { + 1, 13, psem_int1_bb_a0_attn_idx, 0x1600050, 0x160005c, 0x1600058, + 0x1600054 +}; + +static const u16 psem_fast_memory_int0_bb_a0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg psem_fast_memory_int0_bb_a0 = { + 2, 1, psem_fast_memory_int0_bb_a0_attn_idx, 0x1640040, 0x164004c, + 0x1640048, 0x1640044 +}; + +static struct attn_hw_reg *psem_int_bb_a0_regs[3] = { + &psem_int0_bb_a0, &psem_int1_bb_a0, &psem_fast_memory_int0_bb_a0, +}; + +static const u16 psem_int0_bb_b0_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg psem_int0_bb_b0 = { + 0, 32, psem_int0_bb_b0_attn_idx, 0x1600040, 0x160004c, 0x1600048, + 0x1600044 +}; + +static const u16 psem_int1_bb_b0_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg psem_int1_bb_b0 = { + 1, 13, psem_int1_bb_b0_attn_idx, 0x1600050, 0x160005c, 0x1600058, + 0x1600054 +}; + +static const u16 psem_fast_memory_int0_bb_b0_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg psem_fast_memory_int0_bb_b0 = { + 2, 1, psem_fast_memory_int0_bb_b0_attn_idx, 0x1640040, 0x164004c, + 0x1640048, 0x1640044 +}; + +static struct attn_hw_reg *psem_int_bb_b0_regs[3] = { + &psem_int0_bb_b0, &psem_int1_bb_b0, &psem_fast_memory_int0_bb_b0, +}; + +static const u16 psem_int0_k2_attn_idx[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg psem_int0_k2 = { + 0, 32, psem_int0_k2_attn_idx, 0x1600040, 0x160004c, 0x1600048, + 0x1600044 +}; + +static const u16 psem_int1_k2_attn_idx[13] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, +}; + +static struct attn_hw_reg psem_int1_k2 = { + 1, 13, psem_int1_k2_attn_idx, 0x1600050, 0x160005c, 0x1600058, + 0x1600054 +}; + +static const u16 psem_fast_memory_int0_k2_attn_idx[1] = { + 45, +}; + +static struct attn_hw_reg psem_fast_memory_int0_k2 = { + 2, 1, psem_fast_memory_int0_k2_attn_idx, 0x1640040, 0x164004c, + 0x1640048, + 0x1640044 +}; + +static struct attn_hw_reg *psem_int_k2_regs[3] = { + &psem_int0_k2, &psem_int1_k2, &psem_fast_memory_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *psem_prty_attn_desc[23] = { + "psem_vfc_rbc_parity_error", + "psem_storm_rf_parity_error", + "psem_reg_gen_parity_error", + "psem_mem005_i_ecc_0_rf_int", + "psem_mem005_i_ecc_1_rf_int", + "psem_mem004_i_mem_prty", + "psem_mem002_i_mem_prty", + "psem_mem003_i_mem_prty", + "psem_mem001_i_mem_prty", + "psem_fast_memory_mem024_i_mem_prty", + "psem_fast_memory_mem023_i_mem_prty", + "psem_fast_memory_mem022_i_mem_prty", + "psem_fast_memory_mem021_i_mem_prty", + "psem_fast_memory_mem020_i_mem_prty", + "psem_fast_memory_mem019_i_mem_prty", + "psem_fast_memory_mem018_i_mem_prty", + "psem_fast_memory_vfc_config_mem005_i_ecc_rf_int", + "psem_fast_memory_vfc_config_mem002_i_ecc_rf_int", + "psem_fast_memory_vfc_config_mem006_i_mem_prty", + "psem_fast_memory_vfc_config_mem001_i_mem_prty", + "psem_fast_memory_vfc_config_mem004_i_mem_prty", + "psem_fast_memory_vfc_config_mem003_i_mem_prty", + "psem_fast_memory_vfc_config_mem007_i_mem_prty", +}; +#else +#define psem_prty_attn_desc OSAL_NULL +#endif + +static const u16 psem_prty0_bb_a0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg psem_prty0_bb_a0 = { + 0, 3, psem_prty0_bb_a0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0, + 0x16000cc +}; + +static const u16 psem_prty1_bb_a0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psem_prty1_bb_a0 = { + 1, 6, psem_prty1_bb_a0_attn_idx, 0x1600200, 0x160020c, 0x1600208, + 0x1600204 +}; + +static const u16 psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx[6] = { + 16, 17, 19, 20, 21, 22, +}; + +static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_a0 = { + 2, 6, psem_fast_memory_vfc_config_prty1_bb_a0_attn_idx, 0x164a200, + 0x164a20c, 0x164a208, 0x164a204 +}; + +static struct attn_hw_reg *psem_prty_bb_a0_regs[3] = { + &psem_prty0_bb_a0, &psem_prty1_bb_a0, + &psem_fast_memory_vfc_config_prty1_bb_a0, +}; + +static const u16 psem_prty0_bb_b0_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg psem_prty0_bb_b0 = { + 0, 3, psem_prty0_bb_b0_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0, + 0x16000cc +}; + +static const u16 psem_prty1_bb_b0_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psem_prty1_bb_b0 = { + 1, 6, psem_prty1_bb_b0_attn_idx, 0x1600200, 0x160020c, 0x1600208, + 0x1600204 +}; + +static const u16 psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx[6] = { + 16, 17, 19, 20, 21, 22, +}; + +static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_bb_b0 = { + 2, 6, psem_fast_memory_vfc_config_prty1_bb_b0_attn_idx, 0x164a200, + 0x164a20c, 0x164a208, 0x164a204 +}; + +static struct attn_hw_reg *psem_prty_bb_b0_regs[3] = { + &psem_prty0_bb_b0, &psem_prty1_bb_b0, + &psem_fast_memory_vfc_config_prty1_bb_b0, +}; + +static const u16 psem_prty0_k2_attn_idx[3] = { + 0, 1, 2, +}; + +static struct attn_hw_reg psem_prty0_k2 = { + 0, 3, psem_prty0_k2_attn_idx, 0x16000c8, 0x16000d4, 0x16000d0, + 0x16000cc +}; + +static const u16 psem_prty1_k2_attn_idx[6] = { + 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg psem_prty1_k2 = { + 1, 6, psem_prty1_k2_attn_idx, 0x1600200, 0x160020c, 0x1600208, + 0x1600204 +}; + +static const u16 psem_fast_memory_prty1_k2_attn_idx[7] = { + 9, 10, 11, 12, 13, 14, 15, +}; + +static struct attn_hw_reg psem_fast_memory_prty1_k2 = { + 2, 7, psem_fast_memory_prty1_k2_attn_idx, 0x1640200, 0x164020c, + 0x1640208, + 0x1640204 +}; + +static const u16 psem_fast_memory_vfc_config_prty1_k2_attn_idx[6] = { + 16, 17, 18, 19, 20, 21, +}; + +static struct attn_hw_reg psem_fast_memory_vfc_config_prty1_k2 = { + 3, 6, psem_fast_memory_vfc_config_prty1_k2_attn_idx, 0x164a200, + 0x164a20c, + 0x164a208, 0x164a204 +}; + +static struct attn_hw_reg *psem_prty_k2_regs[4] = { + &psem_prty0_k2, &psem_prty1_k2, &psem_fast_memory_prty1_k2, + &psem_fast_memory_vfc_config_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *rss_int_attn_desc[12] = { + "rss_address_error", + "rss_msg_inp_cnt_error", + "rss_msg_out_cnt_error", + "rss_inp_state_error", + "rss_out_state_error", + "rss_main_state_error", + "rss_calc_state_error", + "rss_inp_fifo_error", + "rss_cmd_fifo_error", + "rss_msg_fifo_error", + "rss_rsp_fifo_error", + "rss_hdr_fifo_error", +}; +#else +#define rss_int_attn_desc OSAL_NULL +#endif + +static const u16 rss_int0_bb_a0_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg rss_int0_bb_a0 = { + 0, 12, rss_int0_bb_a0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984 +}; + +static struct attn_hw_reg *rss_int_bb_a0_regs[1] = { + &rss_int0_bb_a0, +}; + +static const u16 rss_int0_bb_b0_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg rss_int0_bb_b0 = { + 0, 12, rss_int0_bb_b0_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984 +}; + +static struct attn_hw_reg *rss_int_bb_b0_regs[1] = { + &rss_int0_bb_b0, +}; + +static const u16 rss_int0_k2_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg rss_int0_k2 = { + 0, 12, rss_int0_k2_attn_idx, 0x238980, 0x23898c, 0x238988, 0x238984 +}; + +static struct attn_hw_reg *rss_int_k2_regs[1] = { + &rss_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *rss_prty_attn_desc[4] = { + "rss_mem002_i_ecc_rf_int", + "rss_mem001_i_ecc_rf_int", + "rss_mem003_i_mem_prty", + "rss_mem004_i_mem_prty", +}; +#else +#define rss_prty_attn_desc OSAL_NULL +#endif + +static const u16 rss_prty1_bb_a0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg rss_prty1_bb_a0 = { + 0, 4, rss_prty1_bb_a0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04 +}; + +static struct attn_hw_reg *rss_prty_bb_a0_regs[1] = { + &rss_prty1_bb_a0, +}; + +static const u16 rss_prty1_bb_b0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg rss_prty1_bb_b0 = { + 0, 4, rss_prty1_bb_b0_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04 +}; + +static struct attn_hw_reg *rss_prty_bb_b0_regs[1] = { + &rss_prty1_bb_b0, +}; + +static const u16 rss_prty1_k2_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg rss_prty1_k2 = { + 0, 4, rss_prty1_k2_attn_idx, 0x238a00, 0x238a0c, 0x238a08, 0x238a04 +}; + +static struct attn_hw_reg *rss_prty_k2_regs[1] = { + &rss_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *tmld_int_attn_desc[6] = { + "tmld_address_error", + "tmld_ld_hdr_err", + "tmld_ld_seg_msg_err", + "tmld_ld_tid_mini_cache_err", + "tmld_ld_cid_mini_cache_err", + "tmld_ld_long_message", +}; +#else +#define tmld_int_attn_desc OSAL_NULL +#endif + +static const u16 tmld_int0_bb_a0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg tmld_int0_bb_a0 = { + 0, 6, tmld_int0_bb_a0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184 +}; + +static struct attn_hw_reg *tmld_int_bb_a0_regs[1] = { + &tmld_int0_bb_a0, +}; + +static const u16 tmld_int0_bb_b0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg tmld_int0_bb_b0 = { + 0, 6, tmld_int0_bb_b0_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184 +}; + +static struct attn_hw_reg *tmld_int_bb_b0_regs[1] = { + &tmld_int0_bb_b0, +}; + +static const u16 tmld_int0_k2_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg tmld_int0_k2 = { + 0, 6, tmld_int0_k2_attn_idx, 0x4d0180, 0x4d018c, 0x4d0188, 0x4d0184 +}; + +static struct attn_hw_reg *tmld_int_k2_regs[1] = { + &tmld_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *tmld_prty_attn_desc[8] = { + "tmld_mem006_i_ecc_rf_int", + "tmld_mem002_i_ecc_rf_int", + "tmld_mem003_i_mem_prty", + "tmld_mem004_i_mem_prty", + "tmld_mem007_i_mem_prty", + "tmld_mem008_i_mem_prty", + "tmld_mem005_i_mem_prty", + "tmld_mem001_i_mem_prty", +}; +#else +#define tmld_prty_attn_desc OSAL_NULL +#endif + +static const u16 tmld_prty1_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tmld_prty1_bb_a0 = { + 0, 8, tmld_prty1_bb_a0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204 +}; + +static struct attn_hw_reg *tmld_prty_bb_a0_regs[1] = { + &tmld_prty1_bb_a0, +}; + +static const u16 tmld_prty1_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tmld_prty1_bb_b0 = { + 0, 8, tmld_prty1_bb_b0_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204 +}; + +static struct attn_hw_reg *tmld_prty_bb_b0_regs[1] = { + &tmld_prty1_bb_b0, +}; + +static const u16 tmld_prty1_k2_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tmld_prty1_k2 = { + 0, 8, tmld_prty1_k2_attn_idx, 0x4d0200, 0x4d020c, 0x4d0208, 0x4d0204 +}; + +static struct attn_hw_reg *tmld_prty_k2_regs[1] = { + &tmld_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *muld_int_attn_desc[6] = { + "muld_address_error", + "muld_ld_hdr_err", + "muld_ld_seg_msg_err", + "muld_ld_tid_mini_cache_err", + "muld_ld_cid_mini_cache_err", + "muld_ld_long_message", +}; +#else +#define muld_int_attn_desc OSAL_NULL +#endif + +static const u16 muld_int0_bb_a0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg muld_int0_bb_a0 = { + 0, 6, muld_int0_bb_a0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184 +}; + +static struct attn_hw_reg *muld_int_bb_a0_regs[1] = { + &muld_int0_bb_a0, +}; + +static const u16 muld_int0_bb_b0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg muld_int0_bb_b0 = { + 0, 6, muld_int0_bb_b0_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184 +}; + +static struct attn_hw_reg *muld_int_bb_b0_regs[1] = { + &muld_int0_bb_b0, +}; + +static const u16 muld_int0_k2_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg muld_int0_k2 = { + 0, 6, muld_int0_k2_attn_idx, 0x4e0180, 0x4e018c, 0x4e0188, 0x4e0184 +}; + +static struct attn_hw_reg *muld_int_k2_regs[1] = { + &muld_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *muld_prty_attn_desc[10] = { + "muld_mem005_i_ecc_rf_int", + "muld_mem001_i_ecc_rf_int", + "muld_mem008_i_ecc_rf_int", + "muld_mem007_i_ecc_rf_int", + "muld_mem002_i_mem_prty", + "muld_mem003_i_mem_prty", + "muld_mem009_i_mem_prty", + "muld_mem010_i_mem_prty", + "muld_mem004_i_mem_prty", + "muld_mem006_i_mem_prty", +}; +#else +#define muld_prty_attn_desc OSAL_NULL +#endif + +static const u16 muld_prty1_bb_a0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg muld_prty1_bb_a0 = { + 0, 10, muld_prty1_bb_a0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208, + 0x4e0204 +}; + +static struct attn_hw_reg *muld_prty_bb_a0_regs[1] = { + &muld_prty1_bb_a0, +}; + +static const u16 muld_prty1_bb_b0_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg muld_prty1_bb_b0 = { + 0, 10, muld_prty1_bb_b0_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208, + 0x4e0204 +}; + +static struct attn_hw_reg *muld_prty_bb_b0_regs[1] = { + &muld_prty1_bb_b0, +}; + +static const u16 muld_prty1_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg muld_prty1_k2 = { + 0, 10, muld_prty1_k2_attn_idx, 0x4e0200, 0x4e020c, 0x4e0208, 0x4e0204 +}; + +static struct attn_hw_reg *muld_prty_k2_regs[1] = { + &muld_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *yuld_int_attn_desc[6] = { + "yuld_address_error", + "yuld_ld_hdr_err", + "yuld_ld_seg_msg_err", + "yuld_ld_tid_mini_cache_err", + "yuld_ld_cid_mini_cache_err", + "yuld_ld_long_message", +}; +#else +#define yuld_int_attn_desc OSAL_NULL +#endif + +static const u16 yuld_int0_bb_a0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_int0_bb_a0 = { + 0, 6, yuld_int0_bb_a0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184 +}; + +static struct attn_hw_reg *yuld_int_bb_a0_regs[1] = { + &yuld_int0_bb_a0, +}; + +static const u16 yuld_int0_bb_b0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_int0_bb_b0 = { + 0, 6, yuld_int0_bb_b0_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184 +}; + +static struct attn_hw_reg *yuld_int_bb_b0_regs[1] = { + &yuld_int0_bb_b0, +}; + +static const u16 yuld_int0_k2_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_int0_k2 = { + 0, 6, yuld_int0_k2_attn_idx, 0x4c8180, 0x4c818c, 0x4c8188, 0x4c8184 +}; + +static struct attn_hw_reg *yuld_int_k2_regs[1] = { + &yuld_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *yuld_prty_attn_desc[6] = { + "yuld_mem001_i_mem_prty", + "yuld_mem002_i_mem_prty", + "yuld_mem005_i_mem_prty", + "yuld_mem006_i_mem_prty", + "yuld_mem004_i_mem_prty", + "yuld_mem003_i_mem_prty", +}; +#else +#define yuld_prty_attn_desc OSAL_NULL +#endif + +static const u16 yuld_prty1_bb_a0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_prty1_bb_a0 = { + 0, 6, yuld_prty1_bb_a0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204 +}; + +static struct attn_hw_reg *yuld_prty_bb_a0_regs[1] = { + &yuld_prty1_bb_a0, +}; + +static const u16 yuld_prty1_bb_b0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_prty1_bb_b0 = { + 0, 6, yuld_prty1_bb_b0_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204 +}; + +static struct attn_hw_reg *yuld_prty_bb_b0_regs[1] = { + &yuld_prty1_bb_b0, +}; + +static const u16 yuld_prty1_k2_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg yuld_prty1_k2 = { + 0, 6, yuld_prty1_k2_attn_idx, 0x4c8200, 0x4c820c, 0x4c8208, 0x4c8204 +}; + +static struct attn_hw_reg *yuld_prty_k2_regs[1] = { + &yuld_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *xyld_int_attn_desc[6] = { + "xyld_address_error", + "xyld_ld_hdr_err", + "xyld_ld_seg_msg_err", + "xyld_ld_tid_mini_cache_err", + "xyld_ld_cid_mini_cache_err", + "xyld_ld_long_message", +}; +#else +#define xyld_int_attn_desc OSAL_NULL +#endif + +static const u16 xyld_int0_bb_a0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg xyld_int0_bb_a0 = { + 0, 6, xyld_int0_bb_a0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184 +}; + +static struct attn_hw_reg *xyld_int_bb_a0_regs[1] = { + &xyld_int0_bb_a0, +}; + +static const u16 xyld_int0_bb_b0_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg xyld_int0_bb_b0 = { + 0, 6, xyld_int0_bb_b0_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184 +}; + +static struct attn_hw_reg *xyld_int_bb_b0_regs[1] = { + &xyld_int0_bb_b0, +}; + +static const u16 xyld_int0_k2_attn_idx[6] = { + 0, 1, 2, 3, 4, 5, +}; + +static struct attn_hw_reg xyld_int0_k2 = { + 0, 6, xyld_int0_k2_attn_idx, 0x4c0180, 0x4c018c, 0x4c0188, 0x4c0184 +}; + +static struct attn_hw_reg *xyld_int_k2_regs[1] = { + &xyld_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *xyld_prty_attn_desc[9] = { + "xyld_mem004_i_ecc_rf_int", + "xyld_mem006_i_ecc_rf_int", + "xyld_mem001_i_mem_prty", + "xyld_mem002_i_mem_prty", + "xyld_mem008_i_mem_prty", + "xyld_mem009_i_mem_prty", + "xyld_mem003_i_mem_prty", + "xyld_mem005_i_mem_prty", + "xyld_mem007_i_mem_prty", +}; +#else +#define xyld_prty_attn_desc OSAL_NULL +#endif + +static const u16 xyld_prty1_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg xyld_prty1_bb_a0 = { + 0, 9, xyld_prty1_bb_a0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204 +}; + +static struct attn_hw_reg *xyld_prty_bb_a0_regs[1] = { + &xyld_prty1_bb_a0, +}; + +static const u16 xyld_prty1_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg xyld_prty1_bb_b0 = { + 0, 9, xyld_prty1_bb_b0_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204 +}; + +static struct attn_hw_reg *xyld_prty_bb_b0_regs[1] = { + &xyld_prty1_bb_b0, +}; + +static const u16 xyld_prty1_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg xyld_prty1_k2 = { + 0, 9, xyld_prty1_k2_attn_idx, 0x4c0200, 0x4c020c, 0x4c0208, 0x4c0204 +}; + +static struct attn_hw_reg *xyld_prty_k2_regs[1] = { + &xyld_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *prm_int_attn_desc[11] = { + "prm_address_error", + "prm_ififo_error", + "prm_immed_fifo_error", + "prm_ofst_pend_error", + "prm_pad_pend_error", + "prm_pbinp_pend_error", + "prm_tag_pend_error", + "prm_mstorm_eop_err", + "prm_ustorm_eop_err", + "prm_mstorm_que_err", + "prm_ustorm_que_err", +}; +#else +#define prm_int_attn_desc OSAL_NULL +#endif + +static const u16 prm_int0_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg prm_int0_bb_a0 = { + 0, 11, prm_int0_bb_a0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044 +}; + +static struct attn_hw_reg *prm_int_bb_a0_regs[1] = { + &prm_int0_bb_a0, +}; + +static const u16 prm_int0_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg prm_int0_bb_b0 = { + 0, 11, prm_int0_bb_b0_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044 +}; + +static struct attn_hw_reg *prm_int_bb_b0_regs[1] = { + &prm_int0_bb_b0, +}; + +static const u16 prm_int0_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg prm_int0_k2 = { + 0, 11, prm_int0_k2_attn_idx, 0x230040, 0x23004c, 0x230048, 0x230044 +}; + +static struct attn_hw_reg *prm_int_k2_regs[1] = { + &prm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *prm_prty_attn_desc[30] = { + "prm_datapath_registers", + "prm_mem012_i_ecc_rf_int", + "prm_mem013_i_ecc_rf_int", + "prm_mem014_i_ecc_rf_int", + "prm_mem020_i_ecc_rf_int", + "prm_mem004_i_mem_prty", + "prm_mem024_i_mem_prty", + "prm_mem016_i_mem_prty", + "prm_mem017_i_mem_prty", + "prm_mem008_i_mem_prty", + "prm_mem009_i_mem_prty", + "prm_mem010_i_mem_prty", + "prm_mem015_i_mem_prty", + "prm_mem011_i_mem_prty", + "prm_mem003_i_mem_prty", + "prm_mem002_i_mem_prty", + "prm_mem005_i_mem_prty", + "prm_mem023_i_mem_prty", + "prm_mem006_i_mem_prty", + "prm_mem007_i_mem_prty", + "prm_mem001_i_mem_prty", + "prm_mem022_i_mem_prty", + "prm_mem021_i_mem_prty", + "prm_mem019_i_mem_prty", + "prm_mem015_i_ecc_rf_int", + "prm_mem021_i_ecc_rf_int", + "prm_mem025_i_mem_prty", + "prm_mem018_i_mem_prty", + "prm_mem012_i_mem_prty", + "prm_mem020_i_mem_prty", +}; +#else +#define prm_prty_attn_desc OSAL_NULL +#endif + +static const u16 prm_prty1_bb_a0_attn_idx[25] = { + 2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, 24, + 25, 26, 27, 28, 29, +}; + +static struct attn_hw_reg prm_prty1_bb_a0 = { + 0, 25, prm_prty1_bb_a0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204 +}; + +static struct attn_hw_reg *prm_prty_bb_a0_regs[1] = { + &prm_prty1_bb_a0, +}; + +static const u16 prm_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg prm_prty0_bb_b0 = { + 0, 1, prm_prty0_bb_b0_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054 +}; + +static const u16 prm_prty1_bb_b0_attn_idx[24] = { + 2, 3, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 24, 25, + 26, 27, 28, 29, +}; + +static struct attn_hw_reg prm_prty1_bb_b0 = { + 1, 24, prm_prty1_bb_b0_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204 +}; + +static struct attn_hw_reg *prm_prty_bb_b0_regs[2] = { + &prm_prty0_bb_b0, &prm_prty1_bb_b0, +}; + +static const u16 prm_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg prm_prty0_k2 = { + 0, 1, prm_prty0_k2_attn_idx, 0x230050, 0x23005c, 0x230058, 0x230054 +}; + +static const u16 prm_prty1_k2_attn_idx[23] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, +}; + +static struct attn_hw_reg prm_prty1_k2 = { + 1, 23, prm_prty1_k2_attn_idx, 0x230200, 0x23020c, 0x230208, 0x230204 +}; + +static struct attn_hw_reg *prm_prty_k2_regs[2] = { + &prm_prty0_k2, &prm_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_pb1_int_attn_desc[9] = { + "pbf_pb1_address_error", + "pbf_pb1_eop_error", + "pbf_pb1_ififo_error", + "pbf_pb1_pfifo_error", + "pbf_pb1_db_buf_error", + "pbf_pb1_th_exec_error", + "pbf_pb1_tq_error_wr", + "pbf_pb1_tq_error_rd_th", + "pbf_pb1_tq_error_rd_ih", +}; +#else +#define pbf_pb1_int_attn_desc OSAL_NULL +#endif + +static const u16 pbf_pb1_int0_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb1_int0_bb_a0 = { + 0, 9, pbf_pb1_int0_bb_a0_attn_idx, 0xda0040, 0xda004c, 0xda0048, + 0xda0044 +}; + +static struct attn_hw_reg *pbf_pb1_int_bb_a0_regs[1] = { + &pbf_pb1_int0_bb_a0, +}; + +static const u16 pbf_pb1_int0_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb1_int0_bb_b0 = { + 0, 9, pbf_pb1_int0_bb_b0_attn_idx, 0xda0040, 0xda004c, 0xda0048, + 0xda0044 +}; + +static struct attn_hw_reg *pbf_pb1_int_bb_b0_regs[1] = { + &pbf_pb1_int0_bb_b0, +}; + +static const u16 pbf_pb1_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb1_int0_k2 = { + 0, 9, pbf_pb1_int0_k2_attn_idx, 0xda0040, 0xda004c, 0xda0048, 0xda0044 +}; + +static struct attn_hw_reg *pbf_pb1_int_k2_regs[1] = { + &pbf_pb1_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_pb1_prty_attn_desc[1] = { + "pbf_pb1_datapath_registers", +}; +#else +#define pbf_pb1_prty_attn_desc OSAL_NULL +#endif + +static const u16 pbf_pb1_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_pb1_prty0_bb_b0 = { + 0, 1, pbf_pb1_prty0_bb_b0_attn_idx, 0xda0050, 0xda005c, 0xda0058, + 0xda0054 +}; + +static struct attn_hw_reg *pbf_pb1_prty_bb_b0_regs[1] = { + &pbf_pb1_prty0_bb_b0, +}; + +static const u16 pbf_pb1_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_pb1_prty0_k2 = { + 0, 1, pbf_pb1_prty0_k2_attn_idx, 0xda0050, 0xda005c, 0xda0058, 0xda0054 +}; + +static struct attn_hw_reg *pbf_pb1_prty_k2_regs[1] = { + &pbf_pb1_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_pb2_int_attn_desc[9] = { + "pbf_pb2_address_error", + "pbf_pb2_eop_error", + "pbf_pb2_ififo_error", + "pbf_pb2_pfifo_error", + "pbf_pb2_db_buf_error", + "pbf_pb2_th_exec_error", + "pbf_pb2_tq_error_wr", + "pbf_pb2_tq_error_rd_th", + "pbf_pb2_tq_error_rd_ih", +}; +#else +#define pbf_pb2_int_attn_desc OSAL_NULL +#endif + +static const u16 pbf_pb2_int0_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb2_int0_bb_a0 = { + 0, 9, pbf_pb2_int0_bb_a0_attn_idx, 0xda4040, 0xda404c, 0xda4048, + 0xda4044 +}; + +static struct attn_hw_reg *pbf_pb2_int_bb_a0_regs[1] = { + &pbf_pb2_int0_bb_a0, +}; + +static const u16 pbf_pb2_int0_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb2_int0_bb_b0 = { + 0, 9, pbf_pb2_int0_bb_b0_attn_idx, 0xda4040, 0xda404c, 0xda4048, + 0xda4044 +}; + +static struct attn_hw_reg *pbf_pb2_int_bb_b0_regs[1] = { + &pbf_pb2_int0_bb_b0, +}; + +static const u16 pbf_pb2_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg pbf_pb2_int0_k2 = { + 0, 9, pbf_pb2_int0_k2_attn_idx, 0xda4040, 0xda404c, 0xda4048, 0xda4044 +}; + +static struct attn_hw_reg *pbf_pb2_int_k2_regs[1] = { + &pbf_pb2_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_pb2_prty_attn_desc[1] = { + "pbf_pb2_datapath_registers", +}; +#else +#define pbf_pb2_prty_attn_desc OSAL_NULL +#endif + +static const u16 pbf_pb2_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_pb2_prty0_bb_b0 = { + 0, 1, pbf_pb2_prty0_bb_b0_attn_idx, 0xda4050, 0xda405c, 0xda4058, + 0xda4054 +}; + +static struct attn_hw_reg *pbf_pb2_prty_bb_b0_regs[1] = { + &pbf_pb2_prty0_bb_b0, +}; + +static const u16 pbf_pb2_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_pb2_prty0_k2 = { + 0, 1, pbf_pb2_prty0_k2_attn_idx, 0xda4050, 0xda405c, 0xda4058, 0xda4054 +}; + +static struct attn_hw_reg *pbf_pb2_prty_k2_regs[1] = { + &pbf_pb2_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *rpb_int_attn_desc[9] = { + "rpb_address_error", + "rpb_eop_error", + "rpb_ififo_error", + "rpb_pfifo_error", + "rpb_db_buf_error", + "rpb_th_exec_error", + "rpb_tq_error_wr", + "rpb_tq_error_rd_th", + "rpb_tq_error_rd_ih", +}; +#else +#define rpb_int_attn_desc OSAL_NULL +#endif + +static const u16 rpb_int0_bb_a0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg rpb_int0_bb_a0 = { + 0, 9, rpb_int0_bb_a0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044 +}; + +static struct attn_hw_reg *rpb_int_bb_a0_regs[1] = { + &rpb_int0_bb_a0, +}; + +static const u16 rpb_int0_bb_b0_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg rpb_int0_bb_b0 = { + 0, 9, rpb_int0_bb_b0_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044 +}; + +static struct attn_hw_reg *rpb_int_bb_b0_regs[1] = { + &rpb_int0_bb_b0, +}; + +static const u16 rpb_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg rpb_int0_k2 = { + 0, 9, rpb_int0_k2_attn_idx, 0x23c040, 0x23c04c, 0x23c048, 0x23c044 +}; + +static struct attn_hw_reg *rpb_int_k2_regs[1] = { + &rpb_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *rpb_prty_attn_desc[1] = { + "rpb_datapath_registers", +}; +#else +#define rpb_prty_attn_desc OSAL_NULL +#endif + +static const u16 rpb_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg rpb_prty0_bb_b0 = { + 0, 1, rpb_prty0_bb_b0_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054 +}; + +static struct attn_hw_reg *rpb_prty_bb_b0_regs[1] = { + &rpb_prty0_bb_b0, +}; + +static const u16 rpb_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg rpb_prty0_k2 = { + 0, 1, rpb_prty0_k2_attn_idx, 0x23c050, 0x23c05c, 0x23c058, 0x23c054 +}; + +static struct attn_hw_reg *rpb_prty_k2_regs[1] = { + &rpb_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *btb_int_attn_desc[139] = { + "btb_address_error", + "btb_rc_pkt0_rls_error", + "btb_unused_0", + "btb_rc_pkt0_len_error", + "btb_unused_1", + "btb_rc_pkt0_protocol_error", + "btb_rc_pkt1_rls_error", + "btb_unused_2", + "btb_rc_pkt1_len_error", + "btb_unused_3", + "btb_rc_pkt1_protocol_error", + "btb_rc_pkt2_rls_error", + "btb_unused_4", + "btb_rc_pkt2_len_error", + "btb_unused_5", + "btb_rc_pkt2_protocol_error", + "btb_rc_pkt3_rls_error", + "btb_unused_6", + "btb_rc_pkt3_len_error", + "btb_unused_7", + "btb_rc_pkt3_protocol_error", + "btb_rc_sop_req_tc_port_error", + "btb_unused_8", + "btb_wc0_protocol_error", + "btb_unused_9", + "btb_ll_blk_error", + "btb_ll_arb_calc_error", + "btb_fc_alm_calc_error", + "btb_wc0_inp_fifo_error", + "btb_wc0_sop_fifo_error", + "btb_wc0_len_fifo_error", + "btb_wc0_eop_fifo_error", + "btb_wc0_queue_fifo_error", + "btb_wc0_free_point_fifo_error", + "btb_wc0_next_point_fifo_error", + "btb_wc0_strt_fifo_error", + "btb_wc0_second_dscr_fifo_error", + "btb_wc0_pkt_avail_fifo_error", + "btb_wc0_notify_fifo_error", + "btb_wc0_ll_req_fifo_error", + "btb_wc0_ll_pa_cnt_error", + "btb_wc0_bb_pa_cnt_error", + "btb_wc_dup_upd_data_fifo_error", + "btb_wc_dup_rsp_dscr_fifo_error", + "btb_wc_dup_upd_point_fifo_error", + "btb_wc_dup_pkt_avail_fifo_error", + "btb_wc_dup_pkt_avail_cnt_error", + "btb_rc_pkt0_side_fifo_error", + "btb_rc_pkt0_req_fifo_error", + "btb_rc_pkt0_blk_fifo_error", + "btb_rc_pkt0_rls_left_fifo_error", + "btb_rc_pkt0_strt_ptr_fifo_error", + "btb_rc_pkt0_second_ptr_fifo_error", + "btb_rc_pkt0_rsp_fifo_error", + "btb_rc_pkt0_dscr_fifo_error", + "btb_rc_pkt1_side_fifo_error", + "btb_rc_pkt1_req_fifo_error", + "btb_rc_pkt1_blk_fifo_error", + "btb_rc_pkt1_rls_left_fifo_error", + "btb_rc_pkt1_strt_ptr_fifo_error", + "btb_rc_pkt1_second_ptr_fifo_error", + "btb_rc_pkt1_rsp_fifo_error", + "btb_rc_pkt1_dscr_fifo_error", + "btb_rc_pkt2_side_fifo_error", + "btb_rc_pkt2_req_fifo_error", + "btb_rc_pkt2_blk_fifo_error", + "btb_rc_pkt2_rls_left_fifo_error", + "btb_rc_pkt2_strt_ptr_fifo_error", + "btb_rc_pkt2_second_ptr_fifo_error", + "btb_rc_pkt2_rsp_fifo_error", + "btb_rc_pkt2_dscr_fifo_error", + "btb_rc_pkt3_side_fifo_error", + "btb_rc_pkt3_req_fifo_error", + "btb_rc_pkt3_blk_fifo_error", + "btb_rc_pkt3_rls_left_fifo_error", + "btb_rc_pkt3_strt_ptr_fifo_error", + "btb_rc_pkt3_second_ptr_fifo_error", + "btb_rc_pkt3_rsp_fifo_error", + "btb_rc_pkt3_dscr_fifo_error", + "btb_rc_sop_queue_fifo_error", + "btb_ll_arb_rls_fifo_error", + "btb_ll_arb_prefetch_fifo_error", + "btb_rc_pkt0_rls_fifo_error", + "btb_rc_pkt1_rls_fifo_error", + "btb_rc_pkt2_rls_fifo_error", + "btb_rc_pkt3_rls_fifo_error", + "btb_rc_pkt4_rls_fifo_error", + "btb_rc_pkt5_rls_fifo_error", + "btb_rc_pkt6_rls_fifo_error", + "btb_rc_pkt7_rls_fifo_error", + "btb_rc_pkt4_rls_error", + "btb_rc_pkt4_len_error", + "btb_rc_pkt4_protocol_error", + "btb_rc_pkt4_side_fifo_error", + "btb_rc_pkt4_req_fifo_error", + "btb_rc_pkt4_blk_fifo_error", + "btb_rc_pkt4_rls_left_fifo_error", + "btb_rc_pkt4_strt_ptr_fifo_error", + "btb_rc_pkt4_second_ptr_fifo_error", + "btb_rc_pkt4_rsp_fifo_error", + "btb_rc_pkt4_dscr_fifo_error", + "btb_rc_pkt5_rls_error", + "btb_rc_pkt5_len_error", + "btb_rc_pkt5_protocol_error", + "btb_rc_pkt5_side_fifo_error", + "btb_rc_pkt5_req_fifo_error", + "btb_rc_pkt5_blk_fifo_error", + "btb_rc_pkt5_rls_left_fifo_error", + "btb_rc_pkt5_strt_ptr_fifo_error", + "btb_rc_pkt5_second_ptr_fifo_error", + "btb_rc_pkt5_rsp_fifo_error", + "btb_rc_pkt5_dscr_fifo_error", + "btb_rc_pkt6_rls_error", + "btb_rc_pkt6_len_error", + "btb_rc_pkt6_protocol_error", + "btb_rc_pkt6_side_fifo_error", + "btb_rc_pkt6_req_fifo_error", + "btb_rc_pkt6_blk_fifo_error", + "btb_rc_pkt6_rls_left_fifo_error", + "btb_rc_pkt6_strt_ptr_fifo_error", + "btb_rc_pkt6_second_ptr_fifo_error", + "btb_rc_pkt6_rsp_fifo_error", + "btb_rc_pkt6_dscr_fifo_error", + "btb_rc_pkt7_rls_error", + "btb_rc_pkt7_len_error", + "btb_rc_pkt7_protocol_error", + "btb_rc_pkt7_side_fifo_error", + "btb_rc_pkt7_req_fifo_error", + "btb_rc_pkt7_blk_fifo_error", + "btb_rc_pkt7_rls_left_fifo_error", + "btb_rc_pkt7_strt_ptr_fifo_error", + "btb_rc_pkt7_second_ptr_fifo_error", + "btb_rc_pkt7_rsp_fifo_error", + "btb_packet_available_sync_fifo_push_error", + "btb_wc6_notify_fifo_error", + "btb_wc9_queue_fifo_error", + "btb_wc0_sync_fifo_push_error", + "btb_rls_sync_fifo_push_error", + "btb_rc_pkt7_dscr_fifo_error", +}; +#else +#define btb_int_attn_desc OSAL_NULL +#endif + +static const u16 btb_int0_bb_a0_attn_idx[16] = { + 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25, +}; + +static struct attn_hw_reg btb_int0_bb_a0 = { + 0, 16, btb_int0_bb_a0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4 +}; + +static const u16 btb_int1_bb_a0_attn_idx[16] = { + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg btb_int1_bb_a0 = { + 1, 16, btb_int1_bb_a0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc +}; + +static const u16 btb_int2_bb_a0_attn_idx[4] = { + 42, 43, 44, 45, +}; + +static struct attn_hw_reg btb_int2_bb_a0 = { + 2, 4, btb_int2_bb_a0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4 +}; + +static const u16 btb_int3_bb_a0_attn_idx[32] = { + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, +}; + +static struct attn_hw_reg btb_int3_bb_a0 = { + 3, 32, btb_int3_bb_a0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c +}; + +static const u16 btb_int4_bb_a0_attn_idx[23] = { + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, +}; + +static struct attn_hw_reg btb_int4_bb_a0 = { + 4, 23, btb_int4_bb_a0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124 +}; + +static const u16 btb_int5_bb_a0_attn_idx[32] = { + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, + 132, +}; + +static struct attn_hw_reg btb_int5_bb_a0 = { + 5, 32, btb_int5_bb_a0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c +}; + +static const u16 btb_int6_bb_a0_attn_idx[1] = { + 133, +}; + +static struct attn_hw_reg btb_int6_bb_a0 = { + 6, 1, btb_int6_bb_a0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154 +}; + +static const u16 btb_int8_bb_a0_attn_idx[1] = { + 134, +}; + +static struct attn_hw_reg btb_int8_bb_a0 = { + 7, 1, btb_int8_bb_a0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188 +}; + +static const u16 btb_int9_bb_a0_attn_idx[1] = { + 135, +}; + +static struct attn_hw_reg btb_int9_bb_a0 = { + 8, 1, btb_int9_bb_a0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0 +}; + +static const u16 btb_int10_bb_a0_attn_idx[1] = { + 136, +}; + +static struct attn_hw_reg btb_int10_bb_a0 = { + 9, 1, btb_int10_bb_a0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8 +}; + +static const u16 btb_int11_bb_a0_attn_idx[2] = { + 137, 138, +}; + +static struct attn_hw_reg btb_int11_bb_a0 = { + 10, 2, btb_int11_bb_a0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0 +}; + +static struct attn_hw_reg *btb_int_bb_a0_regs[11] = { + &btb_int0_bb_a0, &btb_int1_bb_a0, &btb_int2_bb_a0, &btb_int3_bb_a0, + &btb_int4_bb_a0, &btb_int5_bb_a0, &btb_int6_bb_a0, &btb_int8_bb_a0, + &btb_int9_bb_a0, &btb_int10_bb_a0, + &btb_int11_bb_a0, +}; + +static const u16 btb_int0_bb_b0_attn_idx[16] = { + 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25, +}; + +static struct attn_hw_reg btb_int0_bb_b0 = { + 0, 16, btb_int0_bb_b0_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4 +}; + +static const u16 btb_int1_bb_b0_attn_idx[16] = { + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg btb_int1_bb_b0 = { + 1, 16, btb_int1_bb_b0_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc +}; + +static const u16 btb_int2_bb_b0_attn_idx[4] = { + 42, 43, 44, 45, +}; + +static struct attn_hw_reg btb_int2_bb_b0 = { + 2, 4, btb_int2_bb_b0_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4 +}; + +static const u16 btb_int3_bb_b0_attn_idx[32] = { + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, +}; + +static struct attn_hw_reg btb_int3_bb_b0 = { + 3, 32, btb_int3_bb_b0_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c +}; + +static const u16 btb_int4_bb_b0_attn_idx[23] = { + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, +}; + +static struct attn_hw_reg btb_int4_bb_b0 = { + 4, 23, btb_int4_bb_b0_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124 +}; + +static const u16 btb_int5_bb_b0_attn_idx[32] = { + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, + 132, +}; + +static struct attn_hw_reg btb_int5_bb_b0 = { + 5, 32, btb_int5_bb_b0_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c +}; + +static const u16 btb_int6_bb_b0_attn_idx[1] = { + 133, +}; + +static struct attn_hw_reg btb_int6_bb_b0 = { + 6, 1, btb_int6_bb_b0_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154 +}; + +static const u16 btb_int8_bb_b0_attn_idx[1] = { + 134, +}; + +static struct attn_hw_reg btb_int8_bb_b0 = { + 7, 1, btb_int8_bb_b0_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188 +}; + +static const u16 btb_int9_bb_b0_attn_idx[1] = { + 135, +}; + +static struct attn_hw_reg btb_int9_bb_b0 = { + 8, 1, btb_int9_bb_b0_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0 +}; + +static const u16 btb_int10_bb_b0_attn_idx[1] = { + 136, +}; + +static struct attn_hw_reg btb_int10_bb_b0 = { + 9, 1, btb_int10_bb_b0_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8 +}; + +static const u16 btb_int11_bb_b0_attn_idx[2] = { + 137, 138, +}; + +static struct attn_hw_reg btb_int11_bb_b0 = { + 10, 2, btb_int11_bb_b0_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0 +}; + +static struct attn_hw_reg *btb_int_bb_b0_regs[11] = { + &btb_int0_bb_b0, &btb_int1_bb_b0, &btb_int2_bb_b0, &btb_int3_bb_b0, + &btb_int4_bb_b0, &btb_int5_bb_b0, &btb_int6_bb_b0, &btb_int8_bb_b0, + &btb_int9_bb_b0, &btb_int10_bb_b0, + &btb_int11_bb_b0, +}; + +static const u16 btb_int0_k2_attn_idx[16] = { + 0, 1, 3, 5, 6, 8, 10, 11, 13, 15, 16, 18, 20, 21, 23, 25, +}; + +static struct attn_hw_reg btb_int0_k2 = { + 0, 16, btb_int0_k2_attn_idx, 0xdb00c0, 0xdb00cc, 0xdb00c8, 0xdb00c4 +}; + +static const u16 btb_int1_k2_attn_idx[16] = { + 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg btb_int1_k2 = { + 1, 16, btb_int1_k2_attn_idx, 0xdb00d8, 0xdb00e4, 0xdb00e0, 0xdb00dc +}; + +static const u16 btb_int2_k2_attn_idx[4] = { + 42, 43, 44, 45, +}; + +static struct attn_hw_reg btb_int2_k2 = { + 2, 4, btb_int2_k2_attn_idx, 0xdb00f0, 0xdb00fc, 0xdb00f8, 0xdb00f4 +}; + +static const u16 btb_int3_k2_attn_idx[32] = { + 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, +}; + +static struct attn_hw_reg btb_int3_k2 = { + 3, 32, btb_int3_k2_attn_idx, 0xdb0108, 0xdb0114, 0xdb0110, 0xdb010c +}; + +static const u16 btb_int4_k2_attn_idx[23] = { + 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, + 96, 97, 98, 99, 100, +}; + +static struct attn_hw_reg btb_int4_k2 = { + 4, 23, btb_int4_k2_attn_idx, 0xdb0120, 0xdb012c, 0xdb0128, 0xdb0124 +}; + +static const u16 btb_int5_k2_attn_idx[32] = { + 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, + 115, + 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, + 130, 131, + 132, +}; + +static struct attn_hw_reg btb_int5_k2 = { + 5, 32, btb_int5_k2_attn_idx, 0xdb0138, 0xdb0144, 0xdb0140, 0xdb013c +}; + +static const u16 btb_int6_k2_attn_idx[1] = { + 133, +}; + +static struct attn_hw_reg btb_int6_k2 = { + 6, 1, btb_int6_k2_attn_idx, 0xdb0150, 0xdb015c, 0xdb0158, 0xdb0154 +}; + +static const u16 btb_int8_k2_attn_idx[1] = { + 134, +}; + +static struct attn_hw_reg btb_int8_k2 = { + 7, 1, btb_int8_k2_attn_idx, 0xdb0184, 0xdb0190, 0xdb018c, 0xdb0188 +}; + +static const u16 btb_int9_k2_attn_idx[1] = { + 135, +}; + +static struct attn_hw_reg btb_int9_k2 = { + 8, 1, btb_int9_k2_attn_idx, 0xdb019c, 0xdb01a8, 0xdb01a4, 0xdb01a0 +}; + +static const u16 btb_int10_k2_attn_idx[1] = { + 136, +}; + +static struct attn_hw_reg btb_int10_k2 = { + 9, 1, btb_int10_k2_attn_idx, 0xdb01b4, 0xdb01c0, 0xdb01bc, 0xdb01b8 +}; + +static const u16 btb_int11_k2_attn_idx[2] = { + 137, 138, +}; + +static struct attn_hw_reg btb_int11_k2 = { + 10, 2, btb_int11_k2_attn_idx, 0xdb01cc, 0xdb01d8, 0xdb01d4, 0xdb01d0 +}; + +static struct attn_hw_reg *btb_int_k2_regs[11] = { + &btb_int0_k2, &btb_int1_k2, &btb_int2_k2, &btb_int3_k2, &btb_int4_k2, + &btb_int5_k2, &btb_int6_k2, &btb_int8_k2, &btb_int9_k2, &btb_int10_k2, + &btb_int11_k2, +}; + +#ifdef ATTN_DESC +static const char *btb_prty_attn_desc[36] = { + "btb_ll_bank0_mem_prty", + "btb_ll_bank1_mem_prty", + "btb_ll_bank2_mem_prty", + "btb_ll_bank3_mem_prty", + "btb_datapath_registers", + "btb_mem001_i_ecc_rf_int", + "btb_mem008_i_ecc_rf_int", + "btb_mem009_i_ecc_rf_int", + "btb_mem010_i_ecc_rf_int", + "btb_mem011_i_ecc_rf_int", + "btb_mem012_i_ecc_rf_int", + "btb_mem013_i_ecc_rf_int", + "btb_mem014_i_ecc_rf_int", + "btb_mem015_i_ecc_rf_int", + "btb_mem016_i_ecc_rf_int", + "btb_mem002_i_ecc_rf_int", + "btb_mem003_i_ecc_rf_int", + "btb_mem004_i_ecc_rf_int", + "btb_mem005_i_ecc_rf_int", + "btb_mem006_i_ecc_rf_int", + "btb_mem007_i_ecc_rf_int", + "btb_mem033_i_mem_prty", + "btb_mem035_i_mem_prty", + "btb_mem034_i_mem_prty", + "btb_mem032_i_mem_prty", + "btb_mem031_i_mem_prty", + "btb_mem021_i_mem_prty", + "btb_mem022_i_mem_prty", + "btb_mem023_i_mem_prty", + "btb_mem024_i_mem_prty", + "btb_mem025_i_mem_prty", + "btb_mem026_i_mem_prty", + "btb_mem027_i_mem_prty", + "btb_mem028_i_mem_prty", + "btb_mem030_i_mem_prty", + "btb_mem029_i_mem_prty", +}; +#else +#define btb_prty_attn_desc OSAL_NULL +#endif + +static const u16 btb_prty1_bb_a0_attn_idx[27] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 26, 27, + 28, + 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg btb_prty1_bb_a0 = { + 0, 27, btb_prty1_bb_a0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404 +}; + +static struct attn_hw_reg *btb_prty_bb_a0_regs[1] = { + &btb_prty1_bb_a0, +}; + +static const u16 btb_prty0_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg btb_prty0_bb_b0 = { + 0, 5, btb_prty0_bb_b0_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0 +}; + +static const u16 btb_prty1_bb_b0_attn_idx[23] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 25, 30, 31, + 32, + 33, 34, 35, +}; + +static struct attn_hw_reg btb_prty1_bb_b0 = { + 1, 23, btb_prty1_bb_b0_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404 +}; + +static struct attn_hw_reg *btb_prty_bb_b0_regs[2] = { + &btb_prty0_bb_b0, &btb_prty1_bb_b0, +}; + +static const u16 btb_prty0_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg btb_prty0_k2 = { + 0, 5, btb_prty0_k2_attn_idx, 0xdb01dc, 0xdb01e8, 0xdb01e4, 0xdb01e0 +}; + +static const u16 btb_prty1_k2_attn_idx[31] = { + 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, + 24, + 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, +}; + +static struct attn_hw_reg btb_prty1_k2 = { + 1, 31, btb_prty1_k2_attn_idx, 0xdb0400, 0xdb040c, 0xdb0408, 0xdb0404 +}; + +static struct attn_hw_reg *btb_prty_k2_regs[2] = { + &btb_prty0_k2, &btb_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_int_attn_desc[1] = { + "pbf_address_error", +}; +#else +#define pbf_int_attn_desc OSAL_NULL +#endif + +static const u16 pbf_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_int0_bb_a0 = { + 0, 1, pbf_int0_bb_a0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184 +}; + +static struct attn_hw_reg *pbf_int_bb_a0_regs[1] = { + &pbf_int0_bb_a0, +}; + +static const u16 pbf_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_int0_bb_b0 = { + 0, 1, pbf_int0_bb_b0_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184 +}; + +static struct attn_hw_reg *pbf_int_bb_b0_regs[1] = { + &pbf_int0_bb_b0, +}; + +static const u16 pbf_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_int0_k2 = { + 0, 1, pbf_int0_k2_attn_idx, 0xd80180, 0xd8018c, 0xd80188, 0xd80184 +}; + +static struct attn_hw_reg *pbf_int_k2_regs[1] = { + &pbf_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *pbf_prty_attn_desc[59] = { + "pbf_datapath_registers", + "pbf_mem041_i_ecc_rf_int", + "pbf_mem042_i_ecc_rf_int", + "pbf_mem033_i_ecc_rf_int", + "pbf_mem003_i_ecc_rf_int", + "pbf_mem018_i_ecc_rf_int", + "pbf_mem009_i_ecc_0_rf_int", + "pbf_mem009_i_ecc_1_rf_int", + "pbf_mem012_i_ecc_0_rf_int", + "pbf_mem012_i_ecc_1_rf_int", + "pbf_mem012_i_ecc_2_rf_int", + "pbf_mem012_i_ecc_3_rf_int", + "pbf_mem012_i_ecc_4_rf_int", + "pbf_mem012_i_ecc_5_rf_int", + "pbf_mem012_i_ecc_6_rf_int", + "pbf_mem012_i_ecc_7_rf_int", + "pbf_mem012_i_ecc_8_rf_int", + "pbf_mem012_i_ecc_9_rf_int", + "pbf_mem012_i_ecc_10_rf_int", + "pbf_mem012_i_ecc_11_rf_int", + "pbf_mem012_i_ecc_12_rf_int", + "pbf_mem012_i_ecc_13_rf_int", + "pbf_mem012_i_ecc_14_rf_int", + "pbf_mem012_i_ecc_15_rf_int", + "pbf_mem040_i_mem_prty", + "pbf_mem039_i_mem_prty", + "pbf_mem038_i_mem_prty", + "pbf_mem034_i_mem_prty", + "pbf_mem032_i_mem_prty", + "pbf_mem031_i_mem_prty", + "pbf_mem030_i_mem_prty", + "pbf_mem029_i_mem_prty", + "pbf_mem022_i_mem_prty", + "pbf_mem023_i_mem_prty", + "pbf_mem021_i_mem_prty", + "pbf_mem020_i_mem_prty", + "pbf_mem001_i_mem_prty", + "pbf_mem002_i_mem_prty", + "pbf_mem006_i_mem_prty", + "pbf_mem007_i_mem_prty", + "pbf_mem005_i_mem_prty", + "pbf_mem004_i_mem_prty", + "pbf_mem028_i_mem_prty", + "pbf_mem026_i_mem_prty", + "pbf_mem027_i_mem_prty", + "pbf_mem019_i_mem_prty", + "pbf_mem016_i_mem_prty", + "pbf_mem017_i_mem_prty", + "pbf_mem008_i_mem_prty", + "pbf_mem011_i_mem_prty", + "pbf_mem010_i_mem_prty", + "pbf_mem024_i_mem_prty", + "pbf_mem025_i_mem_prty", + "pbf_mem037_i_mem_prty", + "pbf_mem036_i_mem_prty", + "pbf_mem035_i_mem_prty", + "pbf_mem014_i_mem_prty", + "pbf_mem015_i_mem_prty", + "pbf_mem013_i_mem_prty", +}; +#else +#define pbf_prty_attn_desc OSAL_NULL +#endif + +static const u16 pbf_prty1_bb_a0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pbf_prty1_bb_a0 = { + 0, 31, pbf_prty1_bb_a0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204 +}; + +static const u16 pbf_prty2_bb_a0_attn_idx[27] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, +}; + +static struct attn_hw_reg pbf_prty2_bb_a0 = { + 1, 27, pbf_prty2_bb_a0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214 +}; + +static struct attn_hw_reg *pbf_prty_bb_a0_regs[2] = { + &pbf_prty1_bb_a0, &pbf_prty2_bb_a0, +}; + +static const u16 pbf_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_prty0_bb_b0 = { + 0, 1, pbf_prty0_bb_b0_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194 +}; + +static const u16 pbf_prty1_bb_b0_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pbf_prty1_bb_b0 = { + 1, 31, pbf_prty1_bb_b0_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204 +}; + +static const u16 pbf_prty2_bb_b0_attn_idx[27] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, +}; + +static struct attn_hw_reg pbf_prty2_bb_b0 = { + 2, 27, pbf_prty2_bb_b0_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214 +}; + +static struct attn_hw_reg *pbf_prty_bb_b0_regs[3] = { + &pbf_prty0_bb_b0, &pbf_prty1_bb_b0, &pbf_prty2_bb_b0, +}; + +static const u16 pbf_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg pbf_prty0_k2 = { + 0, 1, pbf_prty0_k2_attn_idx, 0xd80190, 0xd8019c, 0xd80198, 0xd80194 +}; + +static const u16 pbf_prty1_k2_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg pbf_prty1_k2 = { + 1, 31, pbf_prty1_k2_attn_idx, 0xd80200, 0xd8020c, 0xd80208, 0xd80204 +}; + +static const u16 pbf_prty2_k2_attn_idx[27] = { + 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, + 50, 51, 52, 53, 54, 55, 56, 57, 58, +}; + +static struct attn_hw_reg pbf_prty2_k2 = { + 2, 27, pbf_prty2_k2_attn_idx, 0xd80210, 0xd8021c, 0xd80218, 0xd80214 +}; + +static struct attn_hw_reg *pbf_prty_k2_regs[3] = { + &pbf_prty0_k2, &pbf_prty1_k2, &pbf_prty2_k2, +}; + +#ifdef ATTN_DESC +static const char *rdif_int_attn_desc[9] = { + "rdif_address_error", + "rdif_fatal_dix_err", + "rdif_fatal_config_err", + "rdif_cmd_fifo_err", + "rdif_order_fifo_err", + "rdif_rdata_fifo_err", + "rdif_dif_stop_err", + "rdif_partial_dif_w_eob", + "rdif_l1_dirty_bit", +}; +#else +#define rdif_int_attn_desc OSAL_NULL +#endif + +static const u16 rdif_int0_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg rdif_int0_bb_a0 = { + 0, 8, rdif_int0_bb_a0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184 +}; + +static struct attn_hw_reg *rdif_int_bb_a0_regs[1] = { + &rdif_int0_bb_a0, +}; + +static const u16 rdif_int0_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg rdif_int0_bb_b0 = { + 0, 8, rdif_int0_bb_b0_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184 +}; + +static struct attn_hw_reg *rdif_int_bb_b0_regs[1] = { + &rdif_int0_bb_b0, +}; + +static const u16 rdif_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg rdif_int0_k2 = { + 0, 9, rdif_int0_k2_attn_idx, 0x300180, 0x30018c, 0x300188, 0x300184 +}; + +static struct attn_hw_reg *rdif_int_k2_regs[1] = { + &rdif_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *rdif_prty_attn_desc[2] = { + "rdif_unused_0", + "rdif_datapath_registers", +}; +#else +#define rdif_prty_attn_desc OSAL_NULL +#endif + +static const u16 rdif_prty0_bb_b0_attn_idx[1] = { + 1, +}; + +static struct attn_hw_reg rdif_prty0_bb_b0 = { + 0, 1, rdif_prty0_bb_b0_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194 +}; + +static struct attn_hw_reg *rdif_prty_bb_b0_regs[1] = { + &rdif_prty0_bb_b0, +}; + +static const u16 rdif_prty0_k2_attn_idx[1] = { + 1, +}; + +static struct attn_hw_reg rdif_prty0_k2 = { + 0, 1, rdif_prty0_k2_attn_idx, 0x300190, 0x30019c, 0x300198, 0x300194 +}; + +static struct attn_hw_reg *rdif_prty_k2_regs[1] = { + &rdif_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *tdif_int_attn_desc[9] = { + "tdif_address_error", + "tdif_fatal_dix_err", + "tdif_fatal_config_err", + "tdif_cmd_fifo_err", + "tdif_order_fifo_err", + "tdif_rdata_fifo_err", + "tdif_dif_stop_err", + "tdif_partial_dif_w_eob", + "tdif_l1_dirty_bit", +}; +#else +#define tdif_int_attn_desc OSAL_NULL +#endif + +static const u16 tdif_int0_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tdif_int0_bb_a0 = { + 0, 8, tdif_int0_bb_a0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184 +}; + +static struct attn_hw_reg *tdif_int_bb_a0_regs[1] = { + &tdif_int0_bb_a0, +}; + +static const u16 tdif_int0_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg tdif_int0_bb_b0 = { + 0, 8, tdif_int0_bb_b0_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184 +}; + +static struct attn_hw_reg *tdif_int_bb_b0_regs[1] = { + &tdif_int0_bb_b0, +}; + +static const u16 tdif_int0_k2_attn_idx[9] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, +}; + +static struct attn_hw_reg tdif_int0_k2 = { + 0, 9, tdif_int0_k2_attn_idx, 0x310180, 0x31018c, 0x310188, 0x310184 +}; + +static struct attn_hw_reg *tdif_int_k2_regs[1] = { + &tdif_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *tdif_prty_attn_desc[13] = { + "tdif_unused_0", + "tdif_datapath_registers", + "tdif_mem005_i_ecc_rf_int", + "tdif_mem009_i_ecc_rf_int", + "tdif_mem010_i_ecc_rf_int", + "tdif_mem011_i_ecc_rf_int", + "tdif_mem001_i_mem_prty", + "tdif_mem003_i_mem_prty", + "tdif_mem002_i_mem_prty", + "tdif_mem006_i_mem_prty", + "tdif_mem007_i_mem_prty", + "tdif_mem008_i_mem_prty", + "tdif_mem004_i_mem_prty", +}; +#else +#define tdif_prty_attn_desc OSAL_NULL +#endif + +static const u16 tdif_prty1_bb_a0_attn_idx[11] = { + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg tdif_prty1_bb_a0 = { + 0, 11, tdif_prty1_bb_a0_attn_idx, 0x310200, 0x31020c, 0x310208, + 0x310204 +}; + +static struct attn_hw_reg *tdif_prty_bb_a0_regs[1] = { + &tdif_prty1_bb_a0, +}; + +static const u16 tdif_prty0_bb_b0_attn_idx[1] = { + 1, +}; + +static struct attn_hw_reg tdif_prty0_bb_b0 = { + 0, 1, tdif_prty0_bb_b0_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194 +}; + +static const u16 tdif_prty1_bb_b0_attn_idx[11] = { + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg tdif_prty1_bb_b0 = { + 1, 11, tdif_prty1_bb_b0_attn_idx, 0x310200, 0x31020c, 0x310208, + 0x310204 +}; + +static struct attn_hw_reg *tdif_prty_bb_b0_regs[2] = { + &tdif_prty0_bb_b0, &tdif_prty1_bb_b0, +}; + +static const u16 tdif_prty0_k2_attn_idx[1] = { + 1, +}; + +static struct attn_hw_reg tdif_prty0_k2 = { + 0, 1, tdif_prty0_k2_attn_idx, 0x310190, 0x31019c, 0x310198, 0x310194 +}; + +static const u16 tdif_prty1_k2_attn_idx[11] = { + 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg tdif_prty1_k2 = { + 1, 11, tdif_prty1_k2_attn_idx, 0x310200, 0x31020c, 0x310208, 0x310204 +}; + +static struct attn_hw_reg *tdif_prty_k2_regs[2] = { + &tdif_prty0_k2, &tdif_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *cdu_int_attn_desc[8] = { + "cdu_address_error", + "cdu_ccfc_ld_l1_num_error", + "cdu_tcfc_ld_l1_num_error", + "cdu_ccfc_wb_l1_num_error", + "cdu_tcfc_wb_l1_num_error", + "cdu_ccfc_cvld_error", + "cdu_tcfc_cvld_error", + "cdu_bvalid_error", +}; +#else +#define cdu_int_attn_desc OSAL_NULL +#endif + +static const u16 cdu_int0_bb_a0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg cdu_int0_bb_a0 = { + 0, 8, cdu_int0_bb_a0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc +}; + +static struct attn_hw_reg *cdu_int_bb_a0_regs[1] = { + &cdu_int0_bb_a0, +}; + +static const u16 cdu_int0_bb_b0_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg cdu_int0_bb_b0 = { + 0, 8, cdu_int0_bb_b0_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc +}; + +static struct attn_hw_reg *cdu_int_bb_b0_regs[1] = { + &cdu_int0_bb_b0, +}; + +static const u16 cdu_int0_k2_attn_idx[8] = { + 0, 1, 2, 3, 4, 5, 6, 7, +}; + +static struct attn_hw_reg cdu_int0_k2 = { + 0, 8, cdu_int0_k2_attn_idx, 0x5801c0, 0x5801c4, 0x5801c8, 0x5801cc +}; + +static struct attn_hw_reg *cdu_int_k2_regs[1] = { + &cdu_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *cdu_prty_attn_desc[5] = { + "cdu_mem001_i_mem_prty", + "cdu_mem004_i_mem_prty", + "cdu_mem002_i_mem_prty", + "cdu_mem005_i_mem_prty", + "cdu_mem003_i_mem_prty", +}; +#else +#define cdu_prty_attn_desc OSAL_NULL +#endif + +static const u16 cdu_prty1_bb_a0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg cdu_prty1_bb_a0 = { + 0, 5, cdu_prty1_bb_a0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204 +}; + +static struct attn_hw_reg *cdu_prty_bb_a0_regs[1] = { + &cdu_prty1_bb_a0, +}; + +static const u16 cdu_prty1_bb_b0_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg cdu_prty1_bb_b0 = { + 0, 5, cdu_prty1_bb_b0_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204 +}; + +static struct attn_hw_reg *cdu_prty_bb_b0_regs[1] = { + &cdu_prty1_bb_b0, +}; + +static const u16 cdu_prty1_k2_attn_idx[5] = { + 0, 1, 2, 3, 4, +}; + +static struct attn_hw_reg cdu_prty1_k2 = { + 0, 5, cdu_prty1_k2_attn_idx, 0x580200, 0x58020c, 0x580208, 0x580204 +}; + +static struct attn_hw_reg *cdu_prty_k2_regs[1] = { + &cdu_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *ccfc_int_attn_desc[2] = { + "ccfc_address_error", + "ccfc_exe_error", +}; +#else +#define ccfc_int_attn_desc OSAL_NULL +#endif + +static const u16 ccfc_int0_bb_a0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg ccfc_int0_bb_a0 = { + 0, 2, ccfc_int0_bb_a0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184 +}; + +static struct attn_hw_reg *ccfc_int_bb_a0_regs[1] = { + &ccfc_int0_bb_a0, +}; + +static const u16 ccfc_int0_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg ccfc_int0_bb_b0 = { + 0, 2, ccfc_int0_bb_b0_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184 +}; + +static struct attn_hw_reg *ccfc_int_bb_b0_regs[1] = { + &ccfc_int0_bb_b0, +}; + +static const u16 ccfc_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg ccfc_int0_k2 = { + 0, 2, ccfc_int0_k2_attn_idx, 0x2e0180, 0x2e018c, 0x2e0188, 0x2e0184 +}; + +static struct attn_hw_reg *ccfc_int_k2_regs[1] = { + &ccfc_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ccfc_prty_attn_desc[10] = { + "ccfc_mem001_i_ecc_rf_int", + "ccfc_mem003_i_mem_prty", + "ccfc_mem007_i_mem_prty", + "ccfc_mem006_i_mem_prty", + "ccfc_ccam_par_err", + "ccfc_scam_par_err", + "ccfc_lc_que_ram_porta_lsb_par_err", + "ccfc_lc_que_ram_porta_msb_par_err", + "ccfc_lc_que_ram_portb_lsb_par_err", + "ccfc_lc_que_ram_portb_msb_par_err", +}; +#else +#define ccfc_prty_attn_desc OSAL_NULL +#endif + +static const u16 ccfc_prty1_bb_a0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg ccfc_prty1_bb_a0 = { + 0, 4, ccfc_prty1_bb_a0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204 +}; + +static const u16 ccfc_prty0_bb_a0_attn_idx[2] = { + 4, 5, +}; + +static struct attn_hw_reg ccfc_prty0_bb_a0 = { + 1, 2, ccfc_prty0_bb_a0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8 +}; + +static struct attn_hw_reg *ccfc_prty_bb_a0_regs[2] = { + &ccfc_prty1_bb_a0, &ccfc_prty0_bb_a0, +}; + +static const u16 ccfc_prty1_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg ccfc_prty1_bb_b0 = { + 0, 2, ccfc_prty1_bb_b0_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204 +}; + +static const u16 ccfc_prty0_bb_b0_attn_idx[6] = { + 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg ccfc_prty0_bb_b0 = { + 1, 6, ccfc_prty0_bb_b0_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8 +}; + +static struct attn_hw_reg *ccfc_prty_bb_b0_regs[2] = { + &ccfc_prty1_bb_b0, &ccfc_prty0_bb_b0, +}; + +static const u16 ccfc_prty1_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg ccfc_prty1_k2 = { + 0, 2, ccfc_prty1_k2_attn_idx, 0x2e0200, 0x2e020c, 0x2e0208, 0x2e0204 +}; + +static const u16 ccfc_prty0_k2_attn_idx[6] = { + 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg ccfc_prty0_k2 = { + 1, 6, ccfc_prty0_k2_attn_idx, 0x2e05e4, 0x2e05f0, 0x2e05ec, 0x2e05e8 +}; + +static struct attn_hw_reg *ccfc_prty_k2_regs[2] = { + &ccfc_prty1_k2, &ccfc_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *tcfc_int_attn_desc[2] = { + "tcfc_address_error", + "tcfc_exe_error", +}; +#else +#define tcfc_int_attn_desc OSAL_NULL +#endif + +static const u16 tcfc_int0_bb_a0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg tcfc_int0_bb_a0 = { + 0, 2, tcfc_int0_bb_a0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184 +}; + +static struct attn_hw_reg *tcfc_int_bb_a0_regs[1] = { + &tcfc_int0_bb_a0, +}; + +static const u16 tcfc_int0_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg tcfc_int0_bb_b0 = { + 0, 2, tcfc_int0_bb_b0_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184 +}; + +static struct attn_hw_reg *tcfc_int_bb_b0_regs[1] = { + &tcfc_int0_bb_b0, +}; + +static const u16 tcfc_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg tcfc_int0_k2 = { + 0, 2, tcfc_int0_k2_attn_idx, 0x2d0180, 0x2d018c, 0x2d0188, 0x2d0184 +}; + +static struct attn_hw_reg *tcfc_int_k2_regs[1] = { + &tcfc_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *tcfc_prty_attn_desc[10] = { + "tcfc_mem002_i_mem_prty", + "tcfc_mem001_i_mem_prty", + "tcfc_mem006_i_mem_prty", + "tcfc_mem005_i_mem_prty", + "tcfc_ccam_par_err", + "tcfc_scam_par_err", + "tcfc_lc_que_ram_porta_lsb_par_err", + "tcfc_lc_que_ram_porta_msb_par_err", + "tcfc_lc_que_ram_portb_lsb_par_err", + "tcfc_lc_que_ram_portb_msb_par_err", +}; +#else +#define tcfc_prty_attn_desc OSAL_NULL +#endif + +static const u16 tcfc_prty1_bb_a0_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg tcfc_prty1_bb_a0 = { + 0, 4, tcfc_prty1_bb_a0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204 +}; + +static const u16 tcfc_prty0_bb_a0_attn_idx[2] = { + 4, 5, +}; + +static struct attn_hw_reg tcfc_prty0_bb_a0 = { + 1, 2, tcfc_prty0_bb_a0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8 +}; + +static struct attn_hw_reg *tcfc_prty_bb_a0_regs[2] = { + &tcfc_prty1_bb_a0, &tcfc_prty0_bb_a0, +}; + +static const u16 tcfc_prty1_bb_b0_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg tcfc_prty1_bb_b0 = { + 0, 2, tcfc_prty1_bb_b0_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204 +}; + +static const u16 tcfc_prty0_bb_b0_attn_idx[6] = { + 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg tcfc_prty0_bb_b0 = { + 1, 6, tcfc_prty0_bb_b0_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8 +}; + +static struct attn_hw_reg *tcfc_prty_bb_b0_regs[2] = { + &tcfc_prty1_bb_b0, &tcfc_prty0_bb_b0, +}; + +static const u16 tcfc_prty1_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg tcfc_prty1_k2 = { + 0, 2, tcfc_prty1_k2_attn_idx, 0x2d0200, 0x2d020c, 0x2d0208, 0x2d0204 +}; + +static const u16 tcfc_prty0_k2_attn_idx[6] = { + 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg tcfc_prty0_k2 = { + 1, 6, tcfc_prty0_k2_attn_idx, 0x2d05e4, 0x2d05f0, 0x2d05ec, 0x2d05e8 +}; + +static struct attn_hw_reg *tcfc_prty_k2_regs[2] = { + &tcfc_prty1_k2, &tcfc_prty0_k2, +}; + +#ifdef ATTN_DESC +static const char *igu_int_attn_desc[11] = { + "igu_address_error", + "igu_ctrl_fifo_error_err", + "igu_pxp_req_length_too_big", + "igu_host_tries2access_prod_upd", + "igu_vf_tries2acc_attn_cmd", + "igu_mme_bigger_then_5", + "igu_sb_index_is_not_valid", + "igu_durin_int_read_with_simd_dis", + "igu_cmd_fid_not_match", + "igu_segment_access_invalid", + "igu_attn_prod_acc", +}; +#else +#define igu_int_attn_desc OSAL_NULL +#endif + +static const u16 igu_int0_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg igu_int0_bb_a0 = { + 0, 11, igu_int0_bb_a0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184 +}; + +static struct attn_hw_reg *igu_int_bb_a0_regs[1] = { + &igu_int0_bb_a0, +}; + +static const u16 igu_int0_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg igu_int0_bb_b0 = { + 0, 11, igu_int0_bb_b0_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184 +}; + +static struct attn_hw_reg *igu_int_bb_b0_regs[1] = { + &igu_int0_bb_b0, +}; + +static const u16 igu_int0_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg igu_int0_k2 = { + 0, 11, igu_int0_k2_attn_idx, 0x180180, 0x18018c, 0x180188, 0x180184 +}; + +static struct attn_hw_reg *igu_int_k2_regs[1] = { + &igu_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *igu_prty_attn_desc[42] = { + "igu_cam_parity", + "igu_mem009_i_ecc_rf_int", + "igu_mem015_i_mem_prty", + "igu_mem016_i_mem_prty", + "igu_mem017_i_mem_prty", + "igu_mem018_i_mem_prty", + "igu_mem019_i_mem_prty", + "igu_mem001_i_mem_prty", + "igu_mem002_i_mem_prty_0", + "igu_mem002_i_mem_prty_1", + "igu_mem004_i_mem_prty_0", + "igu_mem004_i_mem_prty_1", + "igu_mem004_i_mem_prty_2", + "igu_mem003_i_mem_prty", + "igu_mem005_i_mem_prty", + "igu_mem006_i_mem_prty_0", + "igu_mem006_i_mem_prty_1", + "igu_mem008_i_mem_prty_0", + "igu_mem008_i_mem_prty_1", + "igu_mem008_i_mem_prty_2", + "igu_mem007_i_mem_prty", + "igu_mem010_i_mem_prty_0", + "igu_mem010_i_mem_prty_1", + "igu_mem012_i_mem_prty_0", + "igu_mem012_i_mem_prty_1", + "igu_mem012_i_mem_prty_2", + "igu_mem011_i_mem_prty", + "igu_mem013_i_mem_prty", + "igu_mem014_i_mem_prty", + "igu_mem020_i_mem_prty", + "igu_mem003_i_mem_prty_0", + "igu_mem003_i_mem_prty_1", + "igu_mem003_i_mem_prty_2", + "igu_mem002_i_mem_prty", + "igu_mem007_i_mem_prty_0", + "igu_mem007_i_mem_prty_1", + "igu_mem007_i_mem_prty_2", + "igu_mem006_i_mem_prty", + "igu_mem010_i_mem_prty_2", + "igu_mem010_i_mem_prty_3", + "igu_mem013_i_mem_prty_0", + "igu_mem013_i_mem_prty_1", +}; +#else +#define igu_prty_attn_desc OSAL_NULL +#endif + +static const u16 igu_prty0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg igu_prty0_bb_a0 = { + 0, 1, igu_prty0_bb_a0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194 +}; + +static const u16 igu_prty1_bb_a0_attn_idx[31] = { + 1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29, + 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg igu_prty1_bb_a0 = { + 1, 31, igu_prty1_bb_a0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204 +}; + +static const u16 igu_prty2_bb_a0_attn_idx[1] = { + 2, +}; + +static struct attn_hw_reg igu_prty2_bb_a0 = { + 2, 1, igu_prty2_bb_a0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214 +}; + +static struct attn_hw_reg *igu_prty_bb_a0_regs[3] = { + &igu_prty0_bb_a0, &igu_prty1_bb_a0, &igu_prty2_bb_a0, +}; + +static const u16 igu_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg igu_prty0_bb_b0 = { + 0, 1, igu_prty0_bb_b0_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194 +}; + +static const u16 igu_prty1_bb_b0_attn_idx[31] = { + 1, 3, 4, 5, 6, 7, 10, 11, 14, 17, 18, 21, 22, 23, 24, 25, 26, 28, 29, + 30, + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, +}; + +static struct attn_hw_reg igu_prty1_bb_b0 = { + 1, 31, igu_prty1_bb_b0_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204 +}; + +static const u16 igu_prty2_bb_b0_attn_idx[1] = { + 2, +}; + +static struct attn_hw_reg igu_prty2_bb_b0 = { + 2, 1, igu_prty2_bb_b0_attn_idx, 0x180210, 0x18021c, 0x180218, 0x180214 +}; + +static struct attn_hw_reg *igu_prty_bb_b0_regs[3] = { + &igu_prty0_bb_b0, &igu_prty1_bb_b0, &igu_prty2_bb_b0, +}; + +static const u16 igu_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg igu_prty0_k2 = { + 0, 1, igu_prty0_k2_attn_idx, 0x180190, 0x18019c, 0x180198, 0x180194 +}; + +static const u16 igu_prty1_k2_attn_idx[28] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, +}; + +static struct attn_hw_reg igu_prty1_k2 = { + 1, 28, igu_prty1_k2_attn_idx, 0x180200, 0x18020c, 0x180208, 0x180204 +}; + +static struct attn_hw_reg *igu_prty_k2_regs[2] = { + &igu_prty0_k2, &igu_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *cau_int_attn_desc[11] = { + "cau_address_error", + "cau_unauthorized_pxp_rd_cmd", + "cau_unauthorized_pxp_length_cmd", + "cau_pxp_sb_address_error", + "cau_pxp_pi_number_error", + "cau_cleanup_reg_sb_idx_error", + "cau_fsm_invalid_line", + "cau_cqe_fifo_err", + "cau_igu_wdata_fifo_err", + "cau_igu_req_fifo_err", + "cau_igu_cmd_fifo_err", +}; +#else +#define cau_int_attn_desc OSAL_NULL +#endif + +static const u16 cau_int0_bb_a0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg cau_int0_bb_a0 = { + 0, 11, cau_int0_bb_a0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0 +}; + +static struct attn_hw_reg *cau_int_bb_a0_regs[1] = { + &cau_int0_bb_a0, +}; + +static const u16 cau_int0_bb_b0_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg cau_int0_bb_b0 = { + 0, 11, cau_int0_bb_b0_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0 +}; + +static struct attn_hw_reg *cau_int_bb_b0_regs[1] = { + &cau_int0_bb_b0, +}; + +static const u16 cau_int0_k2_attn_idx[11] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, +}; + +static struct attn_hw_reg cau_int0_k2 = { + 0, 11, cau_int0_k2_attn_idx, 0x1c00d4, 0x1c00d8, 0x1c00dc, 0x1c00e0 +}; + +static struct attn_hw_reg *cau_int_k2_regs[1] = { + &cau_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *cau_prty_attn_desc[15] = { + "cau_mem006_i_ecc_rf_int", + "cau_mem001_i_ecc_0_rf_int", + "cau_mem001_i_ecc_1_rf_int", + "cau_mem002_i_ecc_rf_int", + "cau_mem004_i_ecc_rf_int", + "cau_mem005_i_mem_prty", + "cau_mem007_i_mem_prty", + "cau_mem008_i_mem_prty", + "cau_mem009_i_mem_prty", + "cau_mem010_i_mem_prty", + "cau_mem011_i_mem_prty", + "cau_mem003_i_mem_prty_0", + "cau_mem003_i_mem_prty_1", + "cau_mem002_i_mem_prty", + "cau_mem004_i_mem_prty", +}; +#else +#define cau_prty_attn_desc OSAL_NULL +#endif + +static const u16 cau_prty1_bb_a0_attn_idx[13] = { + 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, +}; + +static struct attn_hw_reg cau_prty1_bb_a0 = { + 0, 13, cau_prty1_bb_a0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204 +}; + +static struct attn_hw_reg *cau_prty_bb_a0_regs[1] = { + &cau_prty1_bb_a0, +}; + +static const u16 cau_prty1_bb_b0_attn_idx[13] = { + 0, 1, 2, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, +}; + +static struct attn_hw_reg cau_prty1_bb_b0 = { + 0, 13, cau_prty1_bb_b0_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204 +}; + +static struct attn_hw_reg *cau_prty_bb_b0_regs[1] = { + &cau_prty1_bb_b0, +}; + +static const u16 cau_prty1_k2_attn_idx[13] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, +}; + +static struct attn_hw_reg cau_prty1_k2 = { + 0, 13, cau_prty1_k2_attn_idx, 0x1c0200, 0x1c020c, 0x1c0208, 0x1c0204 +}; + +static struct attn_hw_reg *cau_prty_k2_regs[1] = { + &cau_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *umac_int_attn_desc[2] = { + "umac_address_error", + "umac_tx_overflow", +}; +#else +#define umac_int_attn_desc OSAL_NULL +#endif + +static const u16 umac_int0_k2_attn_idx[2] = { + 0, 1, +}; + +static struct attn_hw_reg umac_int0_k2 = { + 0, 2, umac_int0_k2_attn_idx, 0x51180, 0x5118c, 0x51188, 0x51184 +}; + +static struct attn_hw_reg *umac_int_k2_regs[1] = { + &umac_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *dbg_int_attn_desc[1] = { + "dbg_address_error", +}; +#else +#define dbg_int_attn_desc OSAL_NULL +#endif + +static const u16 dbg_int0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_int0_bb_a0 = { + 0, 1, dbg_int0_bb_a0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184 +}; + +static struct attn_hw_reg *dbg_int_bb_a0_regs[1] = { + &dbg_int0_bb_a0, +}; + +static const u16 dbg_int0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_int0_bb_b0 = { + 0, 1, dbg_int0_bb_b0_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184 +}; + +static struct attn_hw_reg *dbg_int_bb_b0_regs[1] = { + &dbg_int0_bb_b0, +}; + +static const u16 dbg_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_int0_k2 = { + 0, 1, dbg_int0_k2_attn_idx, 0x10180, 0x1018c, 0x10188, 0x10184 +}; + +static struct attn_hw_reg *dbg_int_k2_regs[1] = { + &dbg_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *dbg_prty_attn_desc[1] = { + "dbg_mem001_i_mem_prty", +}; +#else +#define dbg_prty_attn_desc OSAL_NULL +#endif + +static const u16 dbg_prty1_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_prty1_bb_a0 = { + 0, 1, dbg_prty1_bb_a0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204 +}; + +static struct attn_hw_reg *dbg_prty_bb_a0_regs[1] = { + &dbg_prty1_bb_a0, +}; + +static const u16 dbg_prty1_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_prty1_bb_b0 = { + 0, 1, dbg_prty1_bb_b0_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204 +}; + +static struct attn_hw_reg *dbg_prty_bb_b0_regs[1] = { + &dbg_prty1_bb_b0, +}; + +static const u16 dbg_prty1_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg dbg_prty1_k2 = { + 0, 1, dbg_prty1_k2_attn_idx, 0x10200, 0x1020c, 0x10208, 0x10204 +}; + +static struct attn_hw_reg *dbg_prty_k2_regs[1] = { + &dbg_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *nig_int_attn_desc[196] = { + "nig_address_error", + "nig_debug_fifo_error", + "nig_dorq_fifo_error", + "nig_dbg_syncfifo_error_wr", + "nig_dorq_syncfifo_error_wr", + "nig_storm_syncfifo_error_wr", + "nig_dbgmux_syncfifo_error_wr", + "nig_msdm_syncfifo_error_wr", + "nig_tsdm_syncfifo_error_wr", + "nig_usdm_syncfifo_error_wr", + "nig_xsdm_syncfifo_error_wr", + "nig_ysdm_syncfifo_error_wr", + "nig_tx_sopq0_error", + "nig_tx_sopq1_error", + "nig_tx_sopq2_error", + "nig_tx_sopq3_error", + "nig_tx_sopq4_error", + "nig_tx_sopq5_error", + "nig_tx_sopq6_error", + "nig_tx_sopq7_error", + "nig_tx_sopq8_error", + "nig_tx_sopq9_error", + "nig_tx_sopq10_error", + "nig_tx_sopq11_error", + "nig_tx_sopq12_error", + "nig_tx_sopq13_error", + "nig_tx_sopq14_error", + "nig_tx_sopq15_error", + "nig_lb_sopq0_error", + "nig_lb_sopq1_error", + "nig_lb_sopq2_error", + "nig_lb_sopq3_error", + "nig_lb_sopq4_error", + "nig_lb_sopq5_error", + "nig_lb_sopq6_error", + "nig_lb_sopq7_error", + "nig_lb_sopq8_error", + "nig_lb_sopq9_error", + "nig_lb_sopq10_error", + "nig_lb_sopq11_error", + "nig_lb_sopq12_error", + "nig_lb_sopq13_error", + "nig_lb_sopq14_error", + "nig_lb_sopq15_error", + "nig_p0_purelb_sopq_error", + "nig_p0_rx_macfifo_error", + "nig_p0_tx_macfifo_error", + "nig_p0_tx_bmb_fifo_error", + "nig_p0_lb_bmb_fifo_error", + "nig_p0_tx_btb_fifo_error", + "nig_p0_lb_btb_fifo_error", + "nig_p0_rx_llh_dfifo_error", + "nig_p0_tx_llh_dfifo_error", + "nig_p0_lb_llh_dfifo_error", + "nig_p0_rx_llh_hfifo_error", + "nig_p0_tx_llh_hfifo_error", + "nig_p0_lb_llh_hfifo_error", + "nig_p0_rx_llh_rfifo_error", + "nig_p0_tx_llh_rfifo_error", + "nig_p0_lb_llh_rfifo_error", + "nig_p0_storm_fifo_error", + "nig_p0_storm_dscr_fifo_error", + "nig_p0_tx_gnt_fifo_error", + "nig_p0_lb_gnt_fifo_error", + "nig_p0_tx_pause_too_long_int", + "nig_p0_tc0_pause_too_long_int", + "nig_p0_tc1_pause_too_long_int", + "nig_p0_tc2_pause_too_long_int", + "nig_p0_tc3_pause_too_long_int", + "nig_p0_tc4_pause_too_long_int", + "nig_p0_tc5_pause_too_long_int", + "nig_p0_tc6_pause_too_long_int", + "nig_p0_tc7_pause_too_long_int", + "nig_p0_lb_tc0_pause_too_long_int", + "nig_p0_lb_tc1_pause_too_long_int", + "nig_p0_lb_tc2_pause_too_long_int", + "nig_p0_lb_tc3_pause_too_long_int", + "nig_p0_lb_tc4_pause_too_long_int", + "nig_p0_lb_tc5_pause_too_long_int", + "nig_p0_lb_tc6_pause_too_long_int", + "nig_p0_lb_tc7_pause_too_long_int", + "nig_p0_lb_tc8_pause_too_long_int", + "nig_p1_purelb_sopq_error", + "nig_p1_rx_macfifo_error", + "nig_p1_tx_macfifo_error", + "nig_p1_tx_bmb_fifo_error", + "nig_p1_lb_bmb_fifo_error", + "nig_p1_tx_btb_fifo_error", + "nig_p1_lb_btb_fifo_error", + "nig_p1_rx_llh_dfifo_error", + "nig_p1_tx_llh_dfifo_error", + "nig_p1_lb_llh_dfifo_error", + "nig_p1_rx_llh_hfifo_error", + "nig_p1_tx_llh_hfifo_error", + "nig_p1_lb_llh_hfifo_error", + "nig_p1_rx_llh_rfifo_error", + "nig_p1_tx_llh_rfifo_error", + "nig_p1_lb_llh_rfifo_error", + "nig_p1_storm_fifo_error", + "nig_p1_storm_dscr_fifo_error", + "nig_p1_tx_gnt_fifo_error", + "nig_p1_lb_gnt_fifo_error", + "nig_p1_tx_pause_too_long_int", + "nig_p1_tc0_pause_too_long_int", + "nig_p1_tc1_pause_too_long_int", + "nig_p1_tc2_pause_too_long_int", + "nig_p1_tc3_pause_too_long_int", + "nig_p1_tc4_pause_too_long_int", + "nig_p1_tc5_pause_too_long_int", + "nig_p1_tc6_pause_too_long_int", + "nig_p1_tc7_pause_too_long_int", + "nig_p1_lb_tc0_pause_too_long_int", + "nig_p1_lb_tc1_pause_too_long_int", + "nig_p1_lb_tc2_pause_too_long_int", + "nig_p1_lb_tc3_pause_too_long_int", + "nig_p1_lb_tc4_pause_too_long_int", + "nig_p1_lb_tc5_pause_too_long_int", + "nig_p1_lb_tc6_pause_too_long_int", + "nig_p1_lb_tc7_pause_too_long_int", + "nig_p1_lb_tc8_pause_too_long_int", + "nig_p2_purelb_sopq_error", + "nig_p2_rx_macfifo_error", + "nig_p2_tx_macfifo_error", + "nig_p2_tx_bmb_fifo_error", + "nig_p2_lb_bmb_fifo_error", + "nig_p2_tx_btb_fifo_error", + "nig_p2_lb_btb_fifo_error", + "nig_p2_rx_llh_dfifo_error", + "nig_p2_tx_llh_dfifo_error", + "nig_p2_lb_llh_dfifo_error", + "nig_p2_rx_llh_hfifo_error", + "nig_p2_tx_llh_hfifo_error", + "nig_p2_lb_llh_hfifo_error", + "nig_p2_rx_llh_rfifo_error", + "nig_p2_tx_llh_rfifo_error", + "nig_p2_lb_llh_rfifo_error", + "nig_p2_storm_fifo_error", + "nig_p2_storm_dscr_fifo_error", + "nig_p2_tx_gnt_fifo_error", + "nig_p2_lb_gnt_fifo_error", + "nig_p2_tx_pause_too_long_int", + "nig_p2_tc0_pause_too_long_int", + "nig_p2_tc1_pause_too_long_int", + "nig_p2_tc2_pause_too_long_int", + "nig_p2_tc3_pause_too_long_int", + "nig_p2_tc4_pause_too_long_int", + "nig_p2_tc5_pause_too_long_int", + "nig_p2_tc6_pause_too_long_int", + "nig_p2_tc7_pause_too_long_int", + "nig_p2_lb_tc0_pause_too_long_int", + "nig_p2_lb_tc1_pause_too_long_int", + "nig_p2_lb_tc2_pause_too_long_int", + "nig_p2_lb_tc3_pause_too_long_int", + "nig_p2_lb_tc4_pause_too_long_int", + "nig_p2_lb_tc5_pause_too_long_int", + "nig_p2_lb_tc6_pause_too_long_int", + "nig_p2_lb_tc7_pause_too_long_int", + "nig_p2_lb_tc8_pause_too_long_int", + "nig_p3_purelb_sopq_error", + "nig_p3_rx_macfifo_error", + "nig_p3_tx_macfifo_error", + "nig_p3_tx_bmb_fifo_error", + "nig_p3_lb_bmb_fifo_error", + "nig_p3_tx_btb_fifo_error", + "nig_p3_lb_btb_fifo_error", + "nig_p3_rx_llh_dfifo_error", + "nig_p3_tx_llh_dfifo_error", + "nig_p3_lb_llh_dfifo_error", + "nig_p3_rx_llh_hfifo_error", + "nig_p3_tx_llh_hfifo_error", + "nig_p3_lb_llh_hfifo_error", + "nig_p3_rx_llh_rfifo_error", + "nig_p3_tx_llh_rfifo_error", + "nig_p3_lb_llh_rfifo_error", + "nig_p3_storm_fifo_error", + "nig_p3_storm_dscr_fifo_error", + "nig_p3_tx_gnt_fifo_error", + "nig_p3_lb_gnt_fifo_error", + "nig_p3_tx_pause_too_long_int", + "nig_p3_tc0_pause_too_long_int", + "nig_p3_tc1_pause_too_long_int", + "nig_p3_tc2_pause_too_long_int", + "nig_p3_tc3_pause_too_long_int", + "nig_p3_tc4_pause_too_long_int", + "nig_p3_tc5_pause_too_long_int", + "nig_p3_tc6_pause_too_long_int", + "nig_p3_tc7_pause_too_long_int", + "nig_p3_lb_tc0_pause_too_long_int", + "nig_p3_lb_tc1_pause_too_long_int", + "nig_p3_lb_tc2_pause_too_long_int", + "nig_p3_lb_tc3_pause_too_long_int", + "nig_p3_lb_tc4_pause_too_long_int", + "nig_p3_lb_tc5_pause_too_long_int", + "nig_p3_lb_tc6_pause_too_long_int", + "nig_p3_lb_tc7_pause_too_long_int", + "nig_p3_lb_tc8_pause_too_long_int", +}; +#else +#define nig_int_attn_desc OSAL_NULL +#endif + +static const u16 nig_int0_bb_a0_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg nig_int0_bb_a0 = { + 0, 12, nig_int0_bb_a0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044 +}; + +static const u16 nig_int1_bb_a0_attn_idx[32] = { + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, +}; + +static struct attn_hw_reg nig_int1_bb_a0 = { + 1, 32, nig_int1_bb_a0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054 +}; + +static const u16 nig_int2_bb_a0_attn_idx[20] = { + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, +}; + +static struct attn_hw_reg nig_int2_bb_a0 = { + 2, 20, nig_int2_bb_a0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064 +}; + +static const u16 nig_int3_bb_a0_attn_idx[18] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, +}; + +static struct attn_hw_reg nig_int3_bb_a0 = { + 3, 18, nig_int3_bb_a0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074 +}; + +static const u16 nig_int4_bb_a0_attn_idx[20] = { + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, +}; + +static struct attn_hw_reg nig_int4_bb_a0 = { + 4, 20, nig_int4_bb_a0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084 +}; + +static const u16 nig_int5_bb_a0_attn_idx[18] = { + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, + 117, 118, 119, +}; + +static struct attn_hw_reg nig_int5_bb_a0 = { + 5, 18, nig_int5_bb_a0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094 +}; + +static struct attn_hw_reg *nig_int_bb_a0_regs[6] = { + &nig_int0_bb_a0, &nig_int1_bb_a0, &nig_int2_bb_a0, &nig_int3_bb_a0, + &nig_int4_bb_a0, &nig_int5_bb_a0, +}; + +static const u16 nig_int0_bb_b0_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg nig_int0_bb_b0 = { + 0, 12, nig_int0_bb_b0_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044 +}; + +static const u16 nig_int1_bb_b0_attn_idx[32] = { + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, +}; + +static struct attn_hw_reg nig_int1_bb_b0 = { + 1, 32, nig_int1_bb_b0_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054 +}; + +static const u16 nig_int2_bb_b0_attn_idx[20] = { + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, +}; + +static struct attn_hw_reg nig_int2_bb_b0 = { + 2, 20, nig_int2_bb_b0_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064 +}; + +static const u16 nig_int3_bb_b0_attn_idx[18] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, +}; + +static struct attn_hw_reg nig_int3_bb_b0 = { + 3, 18, nig_int3_bb_b0_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074 +}; + +static const u16 nig_int4_bb_b0_attn_idx[20] = { + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, +}; + +static struct attn_hw_reg nig_int4_bb_b0 = { + 4, 20, nig_int4_bb_b0_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084 +}; + +static const u16 nig_int5_bb_b0_attn_idx[18] = { + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, + 117, 118, 119, +}; + +static struct attn_hw_reg nig_int5_bb_b0 = { + 5, 18, nig_int5_bb_b0_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094 +}; + +static struct attn_hw_reg *nig_int_bb_b0_regs[6] = { + &nig_int0_bb_b0, &nig_int1_bb_b0, &nig_int2_bb_b0, &nig_int3_bb_b0, + &nig_int4_bb_b0, &nig_int5_bb_b0, +}; + +static const u16 nig_int0_k2_attn_idx[12] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, +}; + +static struct attn_hw_reg nig_int0_k2 = { + 0, 12, nig_int0_k2_attn_idx, 0x500040, 0x50004c, 0x500048, 0x500044 +}; + +static const u16 nig_int1_k2_attn_idx[32] = { + 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, + 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, +}; + +static struct attn_hw_reg nig_int1_k2 = { + 1, 32, nig_int1_k2_attn_idx, 0x500050, 0x50005c, 0x500058, 0x500054 +}; + +static const u16 nig_int2_k2_attn_idx[20] = { + 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, + 62, 63, +}; + +static struct attn_hw_reg nig_int2_k2 = { + 2, 20, nig_int2_k2_attn_idx, 0x500060, 0x50006c, 0x500068, 0x500064 +}; + +static const u16 nig_int3_k2_attn_idx[18] = { + 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, +}; + +static struct attn_hw_reg nig_int3_k2 = { + 3, 18, nig_int3_k2_attn_idx, 0x500070, 0x50007c, 0x500078, 0x500074 +}; + +static const u16 nig_int4_k2_attn_idx[20] = { + 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, + 100, 101, +}; + +static struct attn_hw_reg nig_int4_k2 = { + 4, 20, nig_int4_k2_attn_idx, 0x500080, 0x50008c, 0x500088, 0x500084 +}; + +static const u16 nig_int5_k2_attn_idx[18] = { + 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, + 116, + 117, 118, 119, +}; + +static struct attn_hw_reg nig_int5_k2 = { + 5, 18, nig_int5_k2_attn_idx, 0x500090, 0x50009c, 0x500098, 0x500094 +}; + +static const u16 nig_int6_k2_attn_idx[20] = { + 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, + 134, + 135, 136, 137, 138, 139, +}; + +static struct attn_hw_reg nig_int6_k2 = { + 6, 20, nig_int6_k2_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4 +}; + +static const u16 nig_int7_k2_attn_idx[18] = { + 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, + 154, + 155, 156, 157, +}; + +static struct attn_hw_reg nig_int7_k2 = { + 7, 18, nig_int7_k2_attn_idx, 0x5000b0, 0x5000bc, 0x5000b8, 0x5000b4 +}; + +static const u16 nig_int8_k2_attn_idx[20] = { + 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, + 172, + 173, 174, 175, 176, 177, +}; + +static struct attn_hw_reg nig_int8_k2 = { + 8, 20, nig_int8_k2_attn_idx, 0x5000c0, 0x5000cc, 0x5000c8, 0x5000c4 +}; + +static const u16 nig_int9_k2_attn_idx[18] = { + 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, + 192, + 193, 194, 195, +}; + +static struct attn_hw_reg nig_int9_k2 = { + 9, 18, nig_int9_k2_attn_idx, 0x5000d0, 0x5000dc, 0x5000d8, 0x5000d4 +}; + +static struct attn_hw_reg *nig_int_k2_regs[10] = { + &nig_int0_k2, &nig_int1_k2, &nig_int2_k2, &nig_int3_k2, &nig_int4_k2, + &nig_int5_k2, &nig_int6_k2, &nig_int7_k2, &nig_int8_k2, &nig_int9_k2, +}; + +#ifdef ATTN_DESC +static const char *nig_prty_attn_desc[113] = { + "nig_datapath_parity_error", + "nig_mem107_i_mem_prty", + "nig_mem103_i_mem_prty", + "nig_mem104_i_mem_prty", + "nig_mem105_i_mem_prty", + "nig_mem106_i_mem_prty", + "nig_mem072_i_mem_prty", + "nig_mem071_i_mem_prty", + "nig_mem074_i_mem_prty", + "nig_mem073_i_mem_prty", + "nig_mem076_i_mem_prty", + "nig_mem075_i_mem_prty", + "nig_mem078_i_mem_prty", + "nig_mem077_i_mem_prty", + "nig_mem055_i_mem_prty", + "nig_mem062_i_mem_prty", + "nig_mem063_i_mem_prty", + "nig_mem064_i_mem_prty", + "nig_mem065_i_mem_prty", + "nig_mem066_i_mem_prty", + "nig_mem067_i_mem_prty", + "nig_mem068_i_mem_prty", + "nig_mem069_i_mem_prty", + "nig_mem070_i_mem_prty", + "nig_mem056_i_mem_prty", + "nig_mem057_i_mem_prty", + "nig_mem058_i_mem_prty", + "nig_mem059_i_mem_prty", + "nig_mem060_i_mem_prty", + "nig_mem061_i_mem_prty", + "nig_mem035_i_mem_prty", + "nig_mem046_i_mem_prty", + "nig_mem051_i_mem_prty", + "nig_mem052_i_mem_prty", + "nig_mem090_i_mem_prty", + "nig_mem089_i_mem_prty", + "nig_mem092_i_mem_prty", + "nig_mem091_i_mem_prty", + "nig_mem109_i_mem_prty", + "nig_mem110_i_mem_prty", + "nig_mem001_i_mem_prty", + "nig_mem008_i_mem_prty", + "nig_mem009_i_mem_prty", + "nig_mem010_i_mem_prty", + "nig_mem011_i_mem_prty", + "nig_mem012_i_mem_prty", + "nig_mem013_i_mem_prty", + "nig_mem014_i_mem_prty", + "nig_mem015_i_mem_prty", + "nig_mem016_i_mem_prty", + "nig_mem002_i_mem_prty", + "nig_mem003_i_mem_prty", + "nig_mem004_i_mem_prty", + "nig_mem005_i_mem_prty", + "nig_mem006_i_mem_prty", + "nig_mem007_i_mem_prty", + "nig_mem080_i_mem_prty", + "nig_mem081_i_mem_prty", + "nig_mem082_i_mem_prty", + "nig_mem083_i_mem_prty", + "nig_mem048_i_mem_prty", + "nig_mem049_i_mem_prty", + "nig_mem102_i_mem_prty", + "nig_mem087_i_mem_prty", + "nig_mem086_i_mem_prty", + "nig_mem088_i_mem_prty", + "nig_mem079_i_mem_prty", + "nig_mem047_i_mem_prty", + "nig_mem050_i_mem_prty", + "nig_mem053_i_mem_prty", + "nig_mem054_i_mem_prty", + "nig_mem036_i_mem_prty", + "nig_mem037_i_mem_prty", + "nig_mem038_i_mem_prty", + "nig_mem039_i_mem_prty", + "nig_mem040_i_mem_prty", + "nig_mem041_i_mem_prty", + "nig_mem042_i_mem_prty", + "nig_mem043_i_mem_prty", + "nig_mem044_i_mem_prty", + "nig_mem045_i_mem_prty", + "nig_mem093_i_mem_prty", + "nig_mem094_i_mem_prty", + "nig_mem027_i_mem_prty", + "nig_mem028_i_mem_prty", + "nig_mem029_i_mem_prty", + "nig_mem030_i_mem_prty", + "nig_mem017_i_mem_prty", + "nig_mem018_i_mem_prty", + "nig_mem095_i_mem_prty", + "nig_mem084_i_mem_prty", + "nig_mem085_i_mem_prty", + "nig_mem099_i_mem_prty", + "nig_mem100_i_mem_prty", + "nig_mem096_i_mem_prty", + "nig_mem097_i_mem_prty", + "nig_mem098_i_mem_prty", + "nig_mem031_i_mem_prty", + "nig_mem032_i_mem_prty", + "nig_mem033_i_mem_prty", + "nig_mem034_i_mem_prty", + "nig_mem019_i_mem_prty", + "nig_mem020_i_mem_prty", + "nig_mem021_i_mem_prty", + "nig_mem022_i_mem_prty", + "nig_mem101_i_mem_prty", + "nig_mem023_i_mem_prty", + "nig_mem024_i_mem_prty", + "nig_mem025_i_mem_prty", + "nig_mem026_i_mem_prty", + "nig_mem108_i_mem_prty", + "nig_mem031_ext_i_mem_prty", + "nig_mem034_ext_i_mem_prty", +}; +#else +#define nig_prty_attn_desc OSAL_NULL +#endif + +static const u16 nig_prty1_bb_a0_attn_idx[31] = { + 1, 2, 5, 12, 13, 23, 35, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, + 52, 53, 54, 55, 56, 60, 61, 62, 63, 64, 65, 66, +}; + +static struct attn_hw_reg nig_prty1_bb_a0 = { + 0, 31, nig_prty1_bb_a0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204 +}; + +static const u16 nig_prty2_bb_a0_attn_idx[31] = { + 33, 69, 70, 90, 91, 8, 11, 10, 14, 17, 18, 19, 20, 21, 22, 7, 6, 24, 25, + 26, 27, 28, 29, 15, 16, 57, 58, 59, 9, 94, 95, +}; + +static struct attn_hw_reg nig_prty2_bb_a0 = { + 1, 31, nig_prty2_bb_a0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214 +}; + +static const u16 nig_prty3_bb_a0_attn_idx[31] = { + 96, 97, 98, 103, 104, 92, 93, 105, 106, 107, 108, 109, 80, 31, 67, 83, + 84, + 3, 68, 85, 86, 89, 77, 78, 79, 4, 32, 36, 81, 82, 87, +}; + +static struct attn_hw_reg nig_prty3_bb_a0 = { + 2, 31, nig_prty3_bb_a0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224 +}; + +static const u16 nig_prty4_bb_a0_attn_idx[14] = { + 88, 101, 102, 75, 71, 74, 76, 73, 72, 34, 37, 99, 30, 100, +}; + +static struct attn_hw_reg nig_prty4_bb_a0 = { + 3, 14, nig_prty4_bb_a0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234 +}; + +static struct attn_hw_reg *nig_prty_bb_a0_regs[4] = { + &nig_prty1_bb_a0, &nig_prty2_bb_a0, &nig_prty3_bb_a0, &nig_prty4_bb_a0, +}; + +static const u16 nig_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg nig_prty0_bb_b0 = { + 0, 1, nig_prty0_bb_b0_attn_idx, 0x5000a0, 0x5000ac, 0x5000a8, 0x5000a4 +}; + +static const u16 nig_prty1_bb_b0_attn_idx[31] = { + 4, 5, 9, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, + 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, +}; + +static struct attn_hw_reg nig_prty1_bb_b0 = { + 1, 31, nig_prty1_bb_b0_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204 +}; + +static const u16 nig_prty2_bb_b0_attn_idx[31] = { + 90, 91, 64, 63, 65, 8, 11, 10, 13, 12, 66, 14, 17, 18, 19, 20, 21, 22, + 23, + 7, 6, 24, 25, 26, 27, 28, 29, 15, 16, 92, 93, +}; + +static struct attn_hw_reg nig_prty2_bb_b0 = { + 2, 31, nig_prty2_bb_b0_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214 +}; + +static const u16 nig_prty3_bb_b0_attn_idx[31] = { + 94, 95, 96, 97, 99, 100, 103, 104, 105, 62, 108, 109, 80, 31, 1, 67, 60, + 69, 83, 84, 2, 3, 110, 61, 68, 70, 85, 86, 111, 112, 89, +}; + +static struct attn_hw_reg nig_prty3_bb_b0 = { + 3, 31, nig_prty3_bb_b0_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224 +}; + +static const u16 nig_prty4_bb_b0_attn_idx[17] = { + 106, 107, 87, 88, 81, 82, 101, 102, 75, 71, 74, 76, 77, 78, 79, 73, 72, +}; + +static struct attn_hw_reg nig_prty4_bb_b0 = { + 4, 17, nig_prty4_bb_b0_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234 +}; + +static struct attn_hw_reg *nig_prty_bb_b0_regs[5] = { + &nig_prty0_bb_b0, &nig_prty1_bb_b0, &nig_prty2_bb_b0, &nig_prty3_bb_b0, + &nig_prty4_bb_b0, +}; + +static const u16 nig_prty0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg nig_prty0_k2 = { + 0, 1, nig_prty0_k2_attn_idx, 0x5000e0, 0x5000ec, 0x5000e8, 0x5000e4 +}; + +static const u16 nig_prty1_k2_attn_idx[31] = { + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, + 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, +}; + +static struct attn_hw_reg nig_prty1_k2 = { + 1, 31, nig_prty1_k2_attn_idx, 0x500200, 0x50020c, 0x500208, 0x500204 +}; + +static const u16 nig_prty2_k2_attn_idx[31] = { + 67, 60, 61, 68, 32, 33, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, + 37, 36, 81, 82, 83, 84, 85, 86, 48, 49, 87, 88, 89, +}; + +static struct attn_hw_reg nig_prty2_k2 = { + 2, 31, nig_prty2_k2_attn_idx, 0x500210, 0x50021c, 0x500218, 0x500214 +}; + +static const u16 nig_prty3_k2_attn_idx[31] = { + 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 92, 93, 105, 62, 106, + 107, 108, 109, 59, 90, 91, 64, 55, 41, 42, 43, 63, 65, 35, 34, +}; + +static struct attn_hw_reg nig_prty3_k2 = { + 3, 31, nig_prty3_k2_attn_idx, 0x500220, 0x50022c, 0x500228, 0x500224 +}; + +static const u16 nig_prty4_k2_attn_idx[14] = { + 44, 45, 46, 47, 40, 50, 66, 56, 57, 58, 51, 52, 53, 54, +}; + +static struct attn_hw_reg nig_prty4_k2 = { + 4, 14, nig_prty4_k2_attn_idx, 0x500230, 0x50023c, 0x500238, 0x500234 +}; + +static struct attn_hw_reg *nig_prty_k2_regs[5] = { + &nig_prty0_k2, &nig_prty1_k2, &nig_prty2_k2, &nig_prty3_k2, + &nig_prty4_k2, +}; + +#ifdef ATTN_DESC +static const char *wol_int_attn_desc[1] = { + "wol_address_error", +}; +#else +#define wol_int_attn_desc OSAL_NULL +#endif + +static const u16 wol_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg wol_int0_k2 = { + 0, 1, wol_int0_k2_attn_idx, 0x600040, 0x60004c, 0x600048, 0x600044 +}; + +static struct attn_hw_reg *wol_int_k2_regs[1] = { + &wol_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *wol_prty_attn_desc[24] = { + "wol_mem017_i_mem_prty", + "wol_mem018_i_mem_prty", + "wol_mem019_i_mem_prty", + "wol_mem020_i_mem_prty", + "wol_mem021_i_mem_prty", + "wol_mem022_i_mem_prty", + "wol_mem023_i_mem_prty", + "wol_mem024_i_mem_prty", + "wol_mem001_i_mem_prty", + "wol_mem008_i_mem_prty", + "wol_mem009_i_mem_prty", + "wol_mem010_i_mem_prty", + "wol_mem011_i_mem_prty", + "wol_mem012_i_mem_prty", + "wol_mem013_i_mem_prty", + "wol_mem014_i_mem_prty", + "wol_mem015_i_mem_prty", + "wol_mem016_i_mem_prty", + "wol_mem002_i_mem_prty", + "wol_mem003_i_mem_prty", + "wol_mem004_i_mem_prty", + "wol_mem005_i_mem_prty", + "wol_mem006_i_mem_prty", + "wol_mem007_i_mem_prty", +}; +#else +#define wol_prty_attn_desc OSAL_NULL +#endif + +static const u16 wol_prty1_k2_attn_idx[24] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, +}; + +static struct attn_hw_reg wol_prty1_k2 = { + 0, 24, wol_prty1_k2_attn_idx, 0x600200, 0x60020c, 0x600208, 0x600204 +}; + +static struct attn_hw_reg *wol_prty_k2_regs[1] = { + &wol_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *bmbn_int_attn_desc[1] = { + "bmbn_address_error", +}; +#else +#define bmbn_int_attn_desc OSAL_NULL +#endif + +static const u16 bmbn_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg bmbn_int0_k2 = { + 0, 1, bmbn_int0_k2_attn_idx, 0x610040, 0x61004c, 0x610048, 0x610044 +}; + +static struct attn_hw_reg *bmbn_int_k2_regs[1] = { + &bmbn_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ipc_int_attn_desc[14] = { + "ipc_address_error", + "ipc_unused_0", + "ipc_vmain_por_assert", + "ipc_vmain_por_deassert", + "ipc_perst_assert", + "ipc_perst_deassert", + "ipc_otp_ecc_ded_0", + "ipc_otp_ecc_ded_1", + "ipc_otp_ecc_ded_2", + "ipc_otp_ecc_ded_3", + "ipc_otp_ecc_ded_4", + "ipc_otp_ecc_ded_5", + "ipc_otp_ecc_ded_6", + "ipc_otp_ecc_ded_7", +}; +#else +#define ipc_int_attn_desc OSAL_NULL +#endif + +static const u16 ipc_int0_bb_a0_attn_idx[5] = { + 0, 2, 3, 4, 5, +}; + +static struct attn_hw_reg ipc_int0_bb_a0 = { + 0, 5, ipc_int0_bb_a0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510 +}; + +static struct attn_hw_reg *ipc_int_bb_a0_regs[1] = { + &ipc_int0_bb_a0, +}; + +static const u16 ipc_int0_bb_b0_attn_idx[13] = { + 0, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, +}; + +static struct attn_hw_reg ipc_int0_bb_b0 = { + 0, 13, ipc_int0_bb_b0_attn_idx, 0x2050c, 0x20518, 0x20514, 0x20510 +}; + +static struct attn_hw_reg *ipc_int_bb_b0_regs[1] = { + &ipc_int0_bb_b0, +}; + +static const u16 ipc_int0_k2_attn_idx[5] = { + 0, 2, 3, 4, 5, +}; + +static struct attn_hw_reg ipc_int0_k2 = { + 0, 5, ipc_int0_k2_attn_idx, 0x202dc, 0x202e8, 0x202e4, 0x202e0 +}; + +static struct attn_hw_reg *ipc_int_k2_regs[1] = { + &ipc_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *ipc_prty_attn_desc[1] = { + "ipc_fake_par_err", +}; +#else +#define ipc_prty_attn_desc OSAL_NULL +#endif + +static const u16 ipc_prty0_bb_a0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ipc_prty0_bb_a0 = { + 0, 1, ipc_prty0_bb_a0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520 +}; + +static struct attn_hw_reg *ipc_prty_bb_a0_regs[1] = { + &ipc_prty0_bb_a0, +}; + +static const u16 ipc_prty0_bb_b0_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ipc_prty0_bb_b0 = { + 0, 1, ipc_prty0_bb_b0_attn_idx, 0x2051c, 0x20528, 0x20524, 0x20520 +}; + +static struct attn_hw_reg *ipc_prty_bb_b0_regs[1] = { + &ipc_prty0_bb_b0, +}; + +#ifdef ATTN_DESC +static const char *nwm_int_attn_desc[18] = { + "nwm_address_error", + "nwm_tx_overflow_0", + "nwm_tx_underflow_0", + "nwm_tx_overflow_1", + "nwm_tx_underflow_1", + "nwm_tx_overflow_2", + "nwm_tx_underflow_2", + "nwm_tx_overflow_3", + "nwm_tx_underflow_3", + "nwm_unused_0", + "nwm_ln0_at_10M", + "nwm_ln0_at_100M", + "nwm_ln1_at_10M", + "nwm_ln1_at_100M", + "nwm_ln2_at_10M", + "nwm_ln2_at_100M", + "nwm_ln3_at_10M", + "nwm_ln3_at_100M", +}; +#else +#define nwm_int_attn_desc OSAL_NULL +#endif + +static const u16 nwm_int0_k2_attn_idx[17] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, +}; + +static struct attn_hw_reg nwm_int0_k2 = { + 0, 17, nwm_int0_k2_attn_idx, 0x800004, 0x800010, 0x80000c, 0x800008 +}; + +static struct attn_hw_reg *nwm_int_k2_regs[1] = { + &nwm_int0_k2, +}; + +#ifdef ATTN_DESC +static const char *nwm_prty_attn_desc[72] = { + "nwm_mem020_i_mem_prty", + "nwm_mem028_i_mem_prty", + "nwm_mem036_i_mem_prty", + "nwm_mem044_i_mem_prty", + "nwm_mem023_i_mem_prty", + "nwm_mem031_i_mem_prty", + "nwm_mem039_i_mem_prty", + "nwm_mem047_i_mem_prty", + "nwm_mem024_i_mem_prty", + "nwm_mem032_i_mem_prty", + "nwm_mem040_i_mem_prty", + "nwm_mem048_i_mem_prty", + "nwm_mem018_i_mem_prty", + "nwm_mem026_i_mem_prty", + "nwm_mem034_i_mem_prty", + "nwm_mem042_i_mem_prty", + "nwm_mem017_i_mem_prty", + "nwm_mem025_i_mem_prty", + "nwm_mem033_i_mem_prty", + "nwm_mem041_i_mem_prty", + "nwm_mem021_i_mem_prty", + "nwm_mem029_i_mem_prty", + "nwm_mem037_i_mem_prty", + "nwm_mem045_i_mem_prty", + "nwm_mem019_i_mem_prty", + "nwm_mem027_i_mem_prty", + "nwm_mem035_i_mem_prty", + "nwm_mem043_i_mem_prty", + "nwm_mem022_i_mem_prty", + "nwm_mem030_i_mem_prty", + "nwm_mem038_i_mem_prty", + "nwm_mem046_i_mem_prty", + "nwm_mem057_i_mem_prty", + "nwm_mem059_i_mem_prty", + "nwm_mem061_i_mem_prty", + "nwm_mem063_i_mem_prty", + "nwm_mem058_i_mem_prty", + "nwm_mem060_i_mem_prty", + "nwm_mem062_i_mem_prty", + "nwm_mem064_i_mem_prty", + "nwm_mem009_i_mem_prty", + "nwm_mem010_i_mem_prty", + "nwm_mem011_i_mem_prty", + "nwm_mem012_i_mem_prty", + "nwm_mem013_i_mem_prty", + "nwm_mem014_i_mem_prty", + "nwm_mem015_i_mem_prty", + "nwm_mem016_i_mem_prty", + "nwm_mem001_i_mem_prty", + "nwm_mem002_i_mem_prty", + "nwm_mem003_i_mem_prty", + "nwm_mem004_i_mem_prty", + "nwm_mem005_i_mem_prty", + "nwm_mem006_i_mem_prty", + "nwm_mem007_i_mem_prty", + "nwm_mem008_i_mem_prty", + "nwm_mem049_i_mem_prty", + "nwm_mem053_i_mem_prty", + "nwm_mem050_i_mem_prty", + "nwm_mem054_i_mem_prty", + "nwm_mem051_i_mem_prty", + "nwm_mem055_i_mem_prty", + "nwm_mem052_i_mem_prty", + "nwm_mem056_i_mem_prty", + "nwm_mem066_i_mem_prty", + "nwm_mem068_i_mem_prty", + "nwm_mem070_i_mem_prty", + "nwm_mem072_i_mem_prty", + "nwm_mem065_i_mem_prty", + "nwm_mem067_i_mem_prty", + "nwm_mem069_i_mem_prty", + "nwm_mem071_i_mem_prty", +}; +#else +#define nwm_prty_attn_desc OSAL_NULL +#endif + +static const u16 nwm_prty1_k2_attn_idx[31] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, + 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, +}; + +static struct attn_hw_reg nwm_prty1_k2 = { + 0, 31, nwm_prty1_k2_attn_idx, 0x800200, 0x80020c, 0x800208, 0x800204 +}; + +static const u16 nwm_prty2_k2_attn_idx[31] = { + 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, + 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, +}; + +static struct attn_hw_reg nwm_prty2_k2 = { + 1, 31, nwm_prty2_k2_attn_idx, 0x800210, 0x80021c, 0x800218, 0x800214 +}; + +static const u16 nwm_prty3_k2_attn_idx[10] = { + 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, +}; + +static struct attn_hw_reg nwm_prty3_k2 = { + 2, 10, nwm_prty3_k2_attn_idx, 0x800220, 0x80022c, 0x800228, 0x800224 +}; + +static struct attn_hw_reg *nwm_prty_k2_regs[3] = { + &nwm_prty1_k2, &nwm_prty2_k2, &nwm_prty3_k2, +}; + +#ifdef ATTN_DESC +static const char *nws_int_attn_desc[38] = { + "nws_address_error", + "nws_ln0_an_resolve_50g_cr2", + "nws_ln0_an_resolve_50g_kr2", + "nws_ln0_an_resolve_40g_cr4", + "nws_ln0_an_resolve_40g_kr4", + "nws_ln0_an_resolve_25g_gr", + "nws_ln0_an_resolve_25g_cr", + "nws_ln0_an_resolve_25g_kr", + "nws_ln0_an_resolve_10g_kr", + "nws_ln0_an_resolve_1g_kx", + "nws_unused_0", + "nws_ln1_an_resolve_50g_cr2", + "nws_ln1_an_resolve_50g_kr2", + "nws_ln1_an_resolve_40g_cr4", + "nws_ln1_an_resolve_40g_kr4", + "nws_ln1_an_resolve_25g_gr", + "nws_ln1_an_resolve_25g_cr", + "nws_ln1_an_resolve_25g_kr", + "nws_ln1_an_resolve_10g_kr", + "nws_ln1_an_resolve_1g_kx", + "nws_ln2_an_resolve_50g_cr2", + "nws_ln2_an_resolve_50g_kr2", + "nws_ln2_an_resolve_40g_cr4", + "nws_ln2_an_resolve_40g_kr4", + "nws_ln2_an_resolve_25g_gr", + "nws_ln2_an_resolve_25g_cr", + "nws_ln2_an_resolve_25g_kr", + "nws_ln2_an_resolve_10g_kr", + "nws_ln2_an_resolve_1g_kx", + "nws_ln3_an_resolve_50g_cr2", + "nws_ln3_an_resolve_50g_kr2", + "nws_ln3_an_resolve_40g_cr4", + "nws_ln3_an_resolve_40g_kr4", + "nws_ln3_an_resolve_25g_gr", + "nws_ln3_an_resolve_25g_cr", + "nws_ln3_an_resolve_25g_kr", + "nws_ln3_an_resolve_10g_kr", + "nws_ln3_an_resolve_1g_kx", +}; +#else +#define nws_int_attn_desc OSAL_NULL +#endif + +static const u16 nws_int0_k2_attn_idx[10] = { + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, +}; + +static struct attn_hw_reg nws_int0_k2 = { + 0, 10, nws_int0_k2_attn_idx, 0x700180, 0x70018c, 0x700188, 0x700184 +}; + +static const u16 nws_int1_k2_attn_idx[9] = { + 11, 12, 13, 14, 15, 16, 17, 18, 19, +}; + +static struct attn_hw_reg nws_int1_k2 = { + 1, 9, nws_int1_k2_attn_idx, 0x700190, 0x70019c, 0x700198, 0x700194 +}; + +static const u16 nws_int2_k2_attn_idx[9] = { + 20, 21, 22, 23, 24, 25, 26, 27, 28, +}; + +static struct attn_hw_reg nws_int2_k2 = { + 2, 9, nws_int2_k2_attn_idx, 0x7001a0, 0x7001ac, 0x7001a8, 0x7001a4 +}; + +static const u16 nws_int3_k2_attn_idx[9] = { + 29, 30, 31, 32, 33, 34, 35, 36, 37, +}; + +static struct attn_hw_reg nws_int3_k2 = { + 3, 9, nws_int3_k2_attn_idx, 0x7001b0, 0x7001bc, 0x7001b8, 0x7001b4 +}; + +static struct attn_hw_reg *nws_int_k2_regs[4] = { + &nws_int0_k2, &nws_int1_k2, &nws_int2_k2, &nws_int3_k2, +}; + +#ifdef ATTN_DESC +static const char *nws_prty_attn_desc[4] = { + "nws_mem003_i_mem_prty", + "nws_mem001_i_mem_prty", + "nws_mem004_i_mem_prty", + "nws_mem002_i_mem_prty", +}; +#else +#define nws_prty_attn_desc OSAL_NULL +#endif + +static const u16 nws_prty1_k2_attn_idx[4] = { + 0, 1, 2, 3, +}; + +static struct attn_hw_reg nws_prty1_k2 = { + 0, 4, nws_prty1_k2_attn_idx, 0x700200, 0x70020c, 0x700208, 0x700204 +}; + +static struct attn_hw_reg *nws_prty_k2_regs[1] = { + &nws_prty1_k2, +}; + +#ifdef ATTN_DESC +static const char *ms_int_attn_desc[1] = { + "ms_address_error", +}; +#else +#define ms_int_attn_desc OSAL_NULL +#endif + +static const u16 ms_int0_k2_attn_idx[1] = { + 0, +}; + +static struct attn_hw_reg ms_int0_k2 = { + 0, 1, ms_int0_k2_attn_idx, 0x6a0180, 0x6a018c, 0x6a0188, 0x6a0184 +}; + +static struct attn_hw_reg *ms_int_k2_regs[1] = { + &ms_int0_k2, +}; + +static struct attn_hw_block attn_blocks[] = { + {"grc", grc_int_attn_desc, grc_prty_attn_desc, { + {1, 1, + grc_int_bb_a0_regs, + grc_prty_bb_a0_regs}, + {1, 1, + grc_int_bb_b0_regs, + grc_prty_bb_b0_regs}, + {1, 1, grc_int_k2_regs, + grc_prty_k2_regs} } }, + {"miscs", miscs_int_attn_desc, miscs_prty_attn_desc, { + {2, 0, + + miscs_int_bb_a0_regs, + OSAL_NULL}, + {2, 1, + + miscs_int_bb_b0_regs, + + miscs_prty_bb_b0_regs}, + {1, 1, + + miscs_int_k2_regs, + + miscs_prty_k2_regs } } }, + {"misc", misc_int_attn_desc, OSAL_NULL, { + {1, 0, misc_int_bb_a0_regs, + OSAL_NULL}, + {1, 0, misc_int_bb_b0_regs, + OSAL_NULL}, + {1, 0, misc_int_k2_regs, + OSAL_NULL } } }, + {"dbu", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, + {"pglue_b", pglue_b_int_attn_desc, pglue_b_prty_attn_desc, { + {1, 1, + + pglue_b_int_bb_a0_regs, + + pglue_b_prty_bb_a0_regs}, + {1, 2, + + pglue_b_int_bb_b0_regs, + + pglue_b_prty_bb_b0_regs}, + {1, 3, + + pglue_b_int_k2_regs, + + pglue_b_prty_k2_regs } } }, + {"cnig", cnig_int_attn_desc, cnig_prty_attn_desc, { + {1, 0, + cnig_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + cnig_int_bb_b0_regs, + + cnig_prty_bb_b0_regs}, + {1, 1, + cnig_int_k2_regs, + + cnig_prty_k2_regs } } }, + {"cpmu", cpmu_int_attn_desc, OSAL_NULL, { + {1, 0, cpmu_int_bb_a0_regs, + OSAL_NULL}, + {1, 0, cpmu_int_bb_b0_regs, + OSAL_NULL}, + {1, 0, cpmu_int_k2_regs, + OSAL_NULL } } }, + {"ncsi", ncsi_int_attn_desc, ncsi_prty_attn_desc, { + {1, 1, + ncsi_int_bb_a0_regs, + + ncsi_prty_bb_a0_regs}, + {1, 1, + ncsi_int_bb_b0_regs, + + ncsi_prty_bb_b0_regs}, + {1, 1, + ncsi_int_k2_regs, + + ncsi_prty_k2_regs } } }, + {"opte", OSAL_NULL, opte_prty_attn_desc, { + {0, 1, OSAL_NULL, + opte_prty_bb_a0_regs}, + {0, 2, OSAL_NULL, + opte_prty_bb_b0_regs}, + {0, 2, OSAL_NULL, + opte_prty_k2_regs } } }, + {"bmb", bmb_int_attn_desc, bmb_prty_attn_desc, { + {12, 2, + bmb_int_bb_a0_regs, + bmb_prty_bb_a0_regs}, + {12, 3, + bmb_int_bb_b0_regs, + bmb_prty_bb_b0_regs}, + {12, 3, bmb_int_k2_regs, + bmb_prty_k2_regs } } }, + {"pcie", pcie_int_attn_desc, pcie_prty_attn_desc, { + {0, 1, OSAL_NULL, + + pcie_prty_bb_a0_regs}, + {0, 1, OSAL_NULL, + + pcie_prty_bb_b0_regs}, + {1, 2, + pcie_int_k2_regs, + + pcie_prty_k2_regs } } }, + {"mcp", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, + {"mcp2", OSAL_NULL, mcp2_prty_attn_desc, { + {0, 2, OSAL_NULL, + mcp2_prty_bb_a0_regs}, + {0, 2, OSAL_NULL, + mcp2_prty_bb_b0_regs}, + {0, 2, OSAL_NULL, + mcp2_prty_k2_regs } } }, + {"pswhst", pswhst_int_attn_desc, pswhst_prty_attn_desc, { + {1, 1, + + pswhst_int_bb_a0_regs, + + pswhst_prty_bb_a0_regs}, + {1, 2, + + pswhst_int_bb_b0_regs, + + pswhst_prty_bb_b0_regs}, + {1, 2, + + pswhst_int_k2_regs, + + pswhst_prty_k2_regs } } }, + {"pswhst2", pswhst2_int_attn_desc, pswhst2_prty_attn_desc, { + {1, 0, + + pswhst2_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pswhst2_int_bb_b0_regs, + + pswhst2_prty_bb_b0_regs}, + {1, 1, + + pswhst2_int_k2_regs, + + pswhst2_prty_k2_regs } } }, + {"pswrd", pswrd_int_attn_desc, pswrd_prty_attn_desc, { + {1, 0, + + pswrd_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pswrd_int_bb_b0_regs, + + pswrd_prty_bb_b0_regs}, + {1, 1, + + pswrd_int_k2_regs, + + pswrd_prty_k2_regs } } }, + {"pswrd2", pswrd2_int_attn_desc, pswrd2_prty_attn_desc, { + {1, 2, + + pswrd2_int_bb_a0_regs, + + pswrd2_prty_bb_a0_regs}, + {1, 3, + + pswrd2_int_bb_b0_regs, + + pswrd2_prty_bb_b0_regs}, + {1, 3, + + pswrd2_int_k2_regs, + + pswrd2_prty_k2_regs } } }, + {"pswwr", pswwr_int_attn_desc, pswwr_prty_attn_desc, { + {1, 0, + + pswwr_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pswwr_int_bb_b0_regs, + + pswwr_prty_bb_b0_regs}, + {1, 1, + + pswwr_int_k2_regs, + + pswwr_prty_k2_regs } } }, + {"pswwr2", pswwr2_int_attn_desc, pswwr2_prty_attn_desc, { + {1, 4, + + pswwr2_int_bb_a0_regs, + + pswwr2_prty_bb_a0_regs}, + {1, 5, + + pswwr2_int_bb_b0_regs, + + pswwr2_prty_bb_b0_regs}, + {1, 5, + + pswwr2_int_k2_regs, + + pswwr2_prty_k2_regs } } }, + {"pswrq", pswrq_int_attn_desc, pswrq_prty_attn_desc, { + {1, 0, + + pswrq_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pswrq_int_bb_b0_regs, + + pswrq_prty_bb_b0_regs}, + {1, 1, + + pswrq_int_k2_regs, + + pswrq_prty_k2_regs } } }, + {"pswrq2", pswrq2_int_attn_desc, pswrq2_prty_attn_desc, { + {1, 1, + + pswrq2_int_bb_a0_regs, + + pswrq2_prty_bb_a0_regs}, + {1, 1, + + pswrq2_int_bb_b0_regs, + + pswrq2_prty_bb_b0_regs}, + {1, 1, + + pswrq2_int_k2_regs, + + pswrq2_prty_k2_regs } } }, + {"pglcs", pglcs_int_attn_desc, OSAL_NULL, { + {1, 0, pglcs_int_bb_a0_regs, + OSAL_NULL}, + {1, 0, pglcs_int_bb_b0_regs, + OSAL_NULL}, + {1, 0, pglcs_int_k2_regs, + OSAL_NULL } } }, + {"dmae", dmae_int_attn_desc, dmae_prty_attn_desc, { + {1, 1, + dmae_int_bb_a0_regs, + + dmae_prty_bb_a0_regs}, + {1, 1, + dmae_int_bb_b0_regs, + + dmae_prty_bb_b0_regs}, + {1, 1, + dmae_int_k2_regs, + + dmae_prty_k2_regs } } }, + {"ptu", ptu_int_attn_desc, ptu_prty_attn_desc, { + {1, 1, + ptu_int_bb_a0_regs, + ptu_prty_bb_a0_regs}, + {1, 1, + ptu_int_bb_b0_regs, + ptu_prty_bb_b0_regs}, + {1, 1, ptu_int_k2_regs, + ptu_prty_k2_regs } } }, + {"tcm", tcm_int_attn_desc, tcm_prty_attn_desc, { + {3, 2, + tcm_int_bb_a0_regs, + tcm_prty_bb_a0_regs}, + {3, 2, + tcm_int_bb_b0_regs, + tcm_prty_bb_b0_regs}, + {3, 2, tcm_int_k2_regs, + tcm_prty_k2_regs } } }, + {"mcm", mcm_int_attn_desc, mcm_prty_attn_desc, { + {3, 2, + mcm_int_bb_a0_regs, + mcm_prty_bb_a0_regs}, + {3, 2, + mcm_int_bb_b0_regs, + mcm_prty_bb_b0_regs}, + {3, 2, mcm_int_k2_regs, + mcm_prty_k2_regs } } }, + {"ucm", ucm_int_attn_desc, ucm_prty_attn_desc, { + {3, 2, + ucm_int_bb_a0_regs, + ucm_prty_bb_a0_regs}, + {3, 2, + ucm_int_bb_b0_regs, + ucm_prty_bb_b0_regs}, + {3, 2, ucm_int_k2_regs, + ucm_prty_k2_regs } } }, + {"xcm", xcm_int_attn_desc, xcm_prty_attn_desc, { + {3, 2, + xcm_int_bb_a0_regs, + xcm_prty_bb_a0_regs}, + {3, 2, + xcm_int_bb_b0_regs, + xcm_prty_bb_b0_regs}, + {3, 2, xcm_int_k2_regs, + xcm_prty_k2_regs } } }, + {"ycm", ycm_int_attn_desc, ycm_prty_attn_desc, { + {3, 2, + ycm_int_bb_a0_regs, + ycm_prty_bb_a0_regs}, + {3, 2, + ycm_int_bb_b0_regs, + ycm_prty_bb_b0_regs}, + {3, 2, ycm_int_k2_regs, + ycm_prty_k2_regs } } }, + {"pcm", pcm_int_attn_desc, pcm_prty_attn_desc, { + {3, 1, + pcm_int_bb_a0_regs, + pcm_prty_bb_a0_regs}, + {3, 1, + pcm_int_bb_b0_regs, + pcm_prty_bb_b0_regs}, + {3, 1, pcm_int_k2_regs, + pcm_prty_k2_regs } } }, + {"qm", qm_int_attn_desc, qm_prty_attn_desc, { + {1, 4, qm_int_bb_a0_regs, + qm_prty_bb_a0_regs}, + {1, 4, qm_int_bb_b0_regs, + qm_prty_bb_b0_regs}, + {1, 4, qm_int_k2_regs, + qm_prty_k2_regs } } }, + {"tm", tm_int_attn_desc, tm_prty_attn_desc, { + {2, 1, tm_int_bb_a0_regs, + tm_prty_bb_a0_regs}, + {2, 1, tm_int_bb_b0_regs, + tm_prty_bb_b0_regs}, + {2, 1, tm_int_k2_regs, + tm_prty_k2_regs } } }, + {"dorq", dorq_int_attn_desc, dorq_prty_attn_desc, { + {1, 1, + dorq_int_bb_a0_regs, + + dorq_prty_bb_a0_regs}, + {1, 2, + dorq_int_bb_b0_regs, + + dorq_prty_bb_b0_regs}, + {1, 2, + dorq_int_k2_regs, + + dorq_prty_k2_regs } } }, + {"brb", brb_int_attn_desc, brb_prty_attn_desc, { + {12, 2, + brb_int_bb_a0_regs, + brb_prty_bb_a0_regs}, + {12, 3, + brb_int_bb_b0_regs, + brb_prty_bb_b0_regs}, + {12, 3, brb_int_k2_regs, + brb_prty_k2_regs } } }, + {"src", src_int_attn_desc, OSAL_NULL, { + {1, 0, src_int_bb_a0_regs, + OSAL_NULL}, + {1, 0, src_int_bb_b0_regs, + OSAL_NULL}, + {1, 0, src_int_k2_regs, + OSAL_NULL } } }, + {"prs", prs_int_attn_desc, prs_prty_attn_desc, { + {1, 3, + prs_int_bb_a0_regs, + prs_prty_bb_a0_regs}, + {1, 3, + prs_int_bb_b0_regs, + prs_prty_bb_b0_regs}, + {1, 3, prs_int_k2_regs, + prs_prty_k2_regs } } }, + {"tsdm", tsdm_int_attn_desc, tsdm_prty_attn_desc, { + {1, 1, + tsdm_int_bb_a0_regs, + + tsdm_prty_bb_a0_regs}, + {1, 1, + tsdm_int_bb_b0_regs, + + tsdm_prty_bb_b0_regs}, + {1, 1, + tsdm_int_k2_regs, + + tsdm_prty_k2_regs } } }, + {"msdm", msdm_int_attn_desc, msdm_prty_attn_desc, { + {1, 1, + msdm_int_bb_a0_regs, + + msdm_prty_bb_a0_regs}, + {1, 1, + msdm_int_bb_b0_regs, + + msdm_prty_bb_b0_regs}, + {1, 1, + msdm_int_k2_regs, + + msdm_prty_k2_regs } } }, + {"usdm", usdm_int_attn_desc, usdm_prty_attn_desc, { + {1, 1, + usdm_int_bb_a0_regs, + + usdm_prty_bb_a0_regs}, + {1, 1, + usdm_int_bb_b0_regs, + + usdm_prty_bb_b0_regs}, + {1, 1, + usdm_int_k2_regs, + + usdm_prty_k2_regs } } }, + {"xsdm", xsdm_int_attn_desc, xsdm_prty_attn_desc, { + {1, 1, + xsdm_int_bb_a0_regs, + + xsdm_prty_bb_a0_regs}, + {1, 1, + xsdm_int_bb_b0_regs, + + xsdm_prty_bb_b0_regs}, + {1, 1, + xsdm_int_k2_regs, + + xsdm_prty_k2_regs } } }, + {"ysdm", ysdm_int_attn_desc, ysdm_prty_attn_desc, { + {1, 1, + ysdm_int_bb_a0_regs, + + ysdm_prty_bb_a0_regs}, + {1, 1, + ysdm_int_bb_b0_regs, + + ysdm_prty_bb_b0_regs}, + {1, 1, + ysdm_int_k2_regs, + + ysdm_prty_k2_regs } } }, + {"psdm", psdm_int_attn_desc, psdm_prty_attn_desc, { + {1, 1, + psdm_int_bb_a0_regs, + + psdm_prty_bb_a0_regs}, + {1, 1, + psdm_int_bb_b0_regs, + + psdm_prty_bb_b0_regs}, + {1, 1, + psdm_int_k2_regs, + + psdm_prty_k2_regs } } }, + {"tsem", tsem_int_attn_desc, tsem_prty_attn_desc, { + {3, 3, + tsem_int_bb_a0_regs, + + tsem_prty_bb_a0_regs}, + {3, 3, + tsem_int_bb_b0_regs, + + tsem_prty_bb_b0_regs}, + {3, 4, + tsem_int_k2_regs, + + tsem_prty_k2_regs } } }, + {"msem", msem_int_attn_desc, msem_prty_attn_desc, { + {3, 2, + msem_int_bb_a0_regs, + + msem_prty_bb_a0_regs}, + {3, 2, + msem_int_bb_b0_regs, + + msem_prty_bb_b0_regs}, + {3, 3, + msem_int_k2_regs, + + msem_prty_k2_regs } } }, + {"usem", usem_int_attn_desc, usem_prty_attn_desc, { + {3, 2, + usem_int_bb_a0_regs, + + usem_prty_bb_a0_regs}, + {3, 2, + usem_int_bb_b0_regs, + + usem_prty_bb_b0_regs}, + {3, 3, + usem_int_k2_regs, + + usem_prty_k2_regs } } }, + {"xsem", xsem_int_attn_desc, xsem_prty_attn_desc, { + {3, 2, + xsem_int_bb_a0_regs, + + xsem_prty_bb_a0_regs}, + {3, 2, + xsem_int_bb_b0_regs, + + xsem_prty_bb_b0_regs}, + {3, 3, + xsem_int_k2_regs, + + xsem_prty_k2_regs } } }, + {"ysem", ysem_int_attn_desc, ysem_prty_attn_desc, { + {3, 2, + ysem_int_bb_a0_regs, + + ysem_prty_bb_a0_regs}, + {3, 2, + ysem_int_bb_b0_regs, + + ysem_prty_bb_b0_regs}, + {3, 3, + ysem_int_k2_regs, + + ysem_prty_k2_regs } } }, + {"psem", psem_int_attn_desc, psem_prty_attn_desc, { + {3, 3, + psem_int_bb_a0_regs, + + psem_prty_bb_a0_regs}, + {3, 3, + psem_int_bb_b0_regs, + + psem_prty_bb_b0_regs}, + {3, 4, + psem_int_k2_regs, + + psem_prty_k2_regs } } }, + {"rss", rss_int_attn_desc, rss_prty_attn_desc, { + {1, 1, + rss_int_bb_a0_regs, + rss_prty_bb_a0_regs}, + {1, 1, + rss_int_bb_b0_regs, + rss_prty_bb_b0_regs}, + {1, 1, rss_int_k2_regs, + rss_prty_k2_regs } } }, + {"tmld", tmld_int_attn_desc, tmld_prty_attn_desc, { + {1, 1, + tmld_int_bb_a0_regs, + + tmld_prty_bb_a0_regs}, + {1, 1, + tmld_int_bb_b0_regs, + + tmld_prty_bb_b0_regs}, + {1, 1, + tmld_int_k2_regs, + + tmld_prty_k2_regs } } }, + {"muld", muld_int_attn_desc, muld_prty_attn_desc, { + {1, 1, + muld_int_bb_a0_regs, + + muld_prty_bb_a0_regs}, + {1, 1, + muld_int_bb_b0_regs, + + muld_prty_bb_b0_regs}, + {1, 1, + muld_int_k2_regs, + + muld_prty_k2_regs } } }, + {"yuld", yuld_int_attn_desc, yuld_prty_attn_desc, { + {1, 1, + yuld_int_bb_a0_regs, + + yuld_prty_bb_a0_regs}, + {1, 1, + yuld_int_bb_b0_regs, + + yuld_prty_bb_b0_regs}, + {1, 1, + yuld_int_k2_regs, + + yuld_prty_k2_regs } } }, + {"xyld", xyld_int_attn_desc, xyld_prty_attn_desc, { + {1, 1, + xyld_int_bb_a0_regs, + + xyld_prty_bb_a0_regs}, + {1, 1, + xyld_int_bb_b0_regs, + + xyld_prty_bb_b0_regs}, + {1, 1, + xyld_int_k2_regs, + + xyld_prty_k2_regs } } }, + {"prm", prm_int_attn_desc, prm_prty_attn_desc, { + {1, 1, + prm_int_bb_a0_regs, + prm_prty_bb_a0_regs}, + {1, 2, + prm_int_bb_b0_regs, + prm_prty_bb_b0_regs}, + {1, 2, prm_int_k2_regs, + prm_prty_k2_regs } } }, + {"pbf_pb1", pbf_pb1_int_attn_desc, pbf_pb1_prty_attn_desc, { + {1, 0, + + pbf_pb1_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pbf_pb1_int_bb_b0_regs, + + pbf_pb1_prty_bb_b0_regs}, + {1, 1, + + pbf_pb1_int_k2_regs, + + pbf_pb1_prty_k2_regs } } }, + {"pbf_pb2", pbf_pb2_int_attn_desc, pbf_pb2_prty_attn_desc, { + {1, 0, + + pbf_pb2_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + + pbf_pb2_int_bb_b0_regs, + + pbf_pb2_prty_bb_b0_regs}, + {1, 1, + + pbf_pb2_int_k2_regs, + + pbf_pb2_prty_k2_regs } } }, + {"rpb", rpb_int_attn_desc, rpb_prty_attn_desc, { + {1, 0, + rpb_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + rpb_int_bb_b0_regs, + rpb_prty_bb_b0_regs}, + {1, 1, rpb_int_k2_regs, + rpb_prty_k2_regs } } }, + {"btb", btb_int_attn_desc, btb_prty_attn_desc, { + {11, 1, + btb_int_bb_a0_regs, + btb_prty_bb_a0_regs}, + {11, 2, + btb_int_bb_b0_regs, + btb_prty_bb_b0_regs}, + {11, 2, btb_int_k2_regs, + btb_prty_k2_regs } } }, + {"pbf", pbf_int_attn_desc, pbf_prty_attn_desc, { + {1, 2, + pbf_int_bb_a0_regs, + pbf_prty_bb_a0_regs}, + {1, 3, + pbf_int_bb_b0_regs, + pbf_prty_bb_b0_regs}, + {1, 3, pbf_int_k2_regs, + pbf_prty_k2_regs } } }, + {"rdif", rdif_int_attn_desc, rdif_prty_attn_desc, { + {1, 0, + rdif_int_bb_a0_regs, + OSAL_NULL}, + {1, 1, + rdif_int_bb_b0_regs, + + rdif_prty_bb_b0_regs}, + {1, 1, + rdif_int_k2_regs, + + rdif_prty_k2_regs } } }, + {"tdif", tdif_int_attn_desc, tdif_prty_attn_desc, { + {1, 1, + tdif_int_bb_a0_regs, + + tdif_prty_bb_a0_regs}, + {1, 2, + tdif_int_bb_b0_regs, + + tdif_prty_bb_b0_regs}, + {1, 2, + tdif_int_k2_regs, + + tdif_prty_k2_regs } } }, + {"cdu", cdu_int_attn_desc, cdu_prty_attn_desc, { + {1, 1, + cdu_int_bb_a0_regs, + cdu_prty_bb_a0_regs}, + {1, 1, + cdu_int_bb_b0_regs, + cdu_prty_bb_b0_regs}, + {1, 1, cdu_int_k2_regs, + cdu_prty_k2_regs } } }, + {"ccfc", ccfc_int_attn_desc, ccfc_prty_attn_desc, { + {1, 2, + ccfc_int_bb_a0_regs, + + ccfc_prty_bb_a0_regs}, + {1, 2, + ccfc_int_bb_b0_regs, + + ccfc_prty_bb_b0_regs}, + {1, 2, + ccfc_int_k2_regs, + + ccfc_prty_k2_regs } } }, + {"tcfc", tcfc_int_attn_desc, tcfc_prty_attn_desc, { + {1, 2, + tcfc_int_bb_a0_regs, + + tcfc_prty_bb_a0_regs}, + {1, 2, + tcfc_int_bb_b0_regs, + + tcfc_prty_bb_b0_regs}, + {1, 2, + tcfc_int_k2_regs, + + tcfc_prty_k2_regs } } }, + {"igu", igu_int_attn_desc, igu_prty_attn_desc, { + {1, 3, + igu_int_bb_a0_regs, + igu_prty_bb_a0_regs}, + {1, 3, + igu_int_bb_b0_regs, + igu_prty_bb_b0_regs}, + {1, 2, igu_int_k2_regs, + igu_prty_k2_regs } } }, + {"cau", cau_int_attn_desc, cau_prty_attn_desc, { + {1, 1, + cau_int_bb_a0_regs, + cau_prty_bb_a0_regs}, + {1, 1, + cau_int_bb_b0_regs, + cau_prty_bb_b0_regs}, + {1, 1, cau_int_k2_regs, + cau_prty_k2_regs } } }, + {"umac", umac_int_attn_desc, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {1, 0, umac_int_k2_regs, + OSAL_NULL } } }, + {"xmac", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, + {"dbg", dbg_int_attn_desc, dbg_prty_attn_desc, { + {1, 1, + dbg_int_bb_a0_regs, + dbg_prty_bb_a0_regs}, + {1, 1, + dbg_int_bb_b0_regs, + dbg_prty_bb_b0_regs}, + {1, 1, dbg_int_k2_regs, + dbg_prty_k2_regs } } }, + {"nig", nig_int_attn_desc, nig_prty_attn_desc, { + {6, 4, + nig_int_bb_a0_regs, + nig_prty_bb_a0_regs}, + {6, 5, + nig_int_bb_b0_regs, + nig_prty_bb_b0_regs}, + {10, 5, nig_int_k2_regs, + nig_prty_k2_regs } } }, + {"wol", wol_int_attn_desc, wol_prty_attn_desc, { + {0, 0, OSAL_NULL, + OSAL_NULL}, + {0, 0, OSAL_NULL, + OSAL_NULL}, + {1, 1, wol_int_k2_regs, + wol_prty_k2_regs } } }, + {"bmbn", bmbn_int_attn_desc, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {1, 0, bmbn_int_k2_regs, + OSAL_NULL } } }, + {"ipc", ipc_int_attn_desc, ipc_prty_attn_desc, { + {1, 1, + ipc_int_bb_a0_regs, + ipc_prty_bb_a0_regs}, + {1, 1, + ipc_int_bb_b0_regs, + ipc_prty_bb_b0_regs}, + {1, 0, ipc_int_k2_regs, + OSAL_NULL } } }, + {"nwm", nwm_int_attn_desc, nwm_prty_attn_desc, { + {0, 0, OSAL_NULL, + OSAL_NULL}, + {0, 0, OSAL_NULL, + OSAL_NULL}, + {1, 3, nwm_int_k2_regs, + nwm_prty_k2_regs } } }, + {"nws", nws_int_attn_desc, nws_prty_attn_desc, { + {0, 0, OSAL_NULL, + OSAL_NULL}, + {0, 0, OSAL_NULL, + OSAL_NULL}, + {4, 1, nws_int_k2_regs, + nws_prty_k2_regs } } }, + {"ms", ms_int_attn_desc, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {1, 0, ms_int_k2_regs, + OSAL_NULL } } }, + {"phy_pcie", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, + {"misc_aeu", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, + {"bar0_map", OSAL_NULL, OSAL_NULL, { + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL}, + {0, 0, OSAL_NULL, OSAL_NULL } } }, +}; + +#define NUM_INT_REGS 423 +#define NUM_PRTY_REGS 378 + +#endif /* __PREVENT_INT_ATTN__ */ + +#endif /* __ATTN_VALUES_H__ */ diff --git a/drivers/net/qede/base/ecore_chain.h b/drivers/net/qede/base/ecore_chain.h new file mode 100644 index 00000000..c5734490 --- /dev/null +++ b/drivers/net/qede/base/ecore_chain.h @@ -0,0 +1,724 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_CHAIN_H__ +#define __ECORE_CHAIN_H__ + +#include <assert.h> /* @DPDK */ + +#include "common_hsi.h" +#include "ecore_utils.h" + +enum ecore_chain_mode { + /* Each Page contains a next pointer at its end */ + ECORE_CHAIN_MODE_NEXT_PTR, + + /* Chain is a single page (next ptr) is unrequired */ + ECORE_CHAIN_MODE_SINGLE, + + /* Page pointers are located in a side list */ + ECORE_CHAIN_MODE_PBL, +}; + +enum ecore_chain_use_mode { + ECORE_CHAIN_USE_TO_PRODUCE, /* Chain starts empty */ + ECORE_CHAIN_USE_TO_CONSUME, /* Chain starts full */ + ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, /* Chain starts empty */ +}; + +enum ecore_chain_cnt_type { + /* The chain's size/prod/cons are kept in 16-bit variables */ + ECORE_CHAIN_CNT_TYPE_U16, + + /* The chain's size/prod/cons are kept in 32-bit variables */ + ECORE_CHAIN_CNT_TYPE_U32, +}; + +struct ecore_chain_next { + struct regpair next_phys; + void *next_virt; +}; + +struct ecore_chain_pbl_u16 { + u16 prod_page_idx; + u16 cons_page_idx; +}; + +struct ecore_chain_pbl_u32 { + u32 prod_page_idx; + u32 cons_page_idx; +}; + +struct ecore_chain_pbl { + /* Base address of a pre-allocated buffer for pbl */ + dma_addr_t p_phys_table; + void *p_virt_table; + + /* Table for keeping the virtual addresses of the chain pages, + * respectively to the physical addresses in the pbl table. + */ + void **pp_virt_addr_tbl; + + /* Index to current used page by producer/consumer */ + union { + struct ecore_chain_pbl_u16 pbl16; + struct ecore_chain_pbl_u32 pbl32; + } u; +}; + +struct ecore_chain_u16 { + /* Cyclic index of next element to produce/consme */ + u16 prod_idx; + u16 cons_idx; +}; + +struct ecore_chain_u32 { + /* Cyclic index of next element to produce/consme */ + u32 prod_idx; + u32 cons_idx; +}; + +struct ecore_chain { + /* Address of first page of the chain */ + void *p_virt_addr; + dma_addr_t p_phys_addr; + + /* Point to next element to produce/consume */ + void *p_prod_elem; + void *p_cons_elem; + + enum ecore_chain_mode mode; + enum ecore_chain_use_mode intended_use; + + enum ecore_chain_cnt_type cnt_type; + union { + struct ecore_chain_u16 chain16; + struct ecore_chain_u32 chain32; + } u; + + u32 page_cnt; + + /* Number of elements - capacity is for usable elements only, + * while size will contain total number of elements [for entire chain]. + */ + u32 capacity; + u32 size; + + /* Elements information for fast calculations */ + u16 elem_per_page; + u16 elem_per_page_mask; + u16 elem_unusable; + u16 usable_per_page; + u16 elem_size; + u16 next_page_mask; + + struct ecore_chain_pbl pbl; +}; + +#define ECORE_CHAIN_PBL_ENTRY_SIZE (8) +#define ECORE_CHAIN_PAGE_SIZE (0x1000) +#define ELEMS_PER_PAGE(elem_size) (ECORE_CHAIN_PAGE_SIZE / (elem_size)) + +#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((mode == ECORE_CHAIN_MODE_NEXT_PTR) ? \ + (1 + ((sizeof(struct ecore_chain_next) - 1) / \ + (elem_size))) : 0) + +#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ + ((u32)(ELEMS_PER_PAGE(elem_size) - \ + UNUSABLE_ELEMS_PER_PAGE(elem_size, mode))) + +#define ECORE_CHAIN_PAGE_CNT(elem_cnt, elem_size, mode) \ + DIV_ROUND_UP(elem_cnt, USABLE_ELEMS_PER_PAGE(elem_size, mode)) + +#define is_chain_u16(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U16) +#define is_chain_u32(p) ((p)->cnt_type == ECORE_CHAIN_CNT_TYPE_U32) + +/* Accessors */ +static OSAL_INLINE u16 ecore_chain_get_prod_idx(struct ecore_chain *p_chain) +{ + OSAL_ASSERT(is_chain_u16(p_chain)); + return p_chain->u.chain16.prod_idx; +} + +static OSAL_INLINE u32 ecore_chain_get_prod_idx_u32(struct ecore_chain *p_chain) +{ + OSAL_ASSERT(is_chain_u32(p_chain)); + return p_chain->u.chain32.prod_idx; +} + +static OSAL_INLINE u16 ecore_chain_get_cons_idx(struct ecore_chain *p_chain) +{ + OSAL_ASSERT(is_chain_u16(p_chain)); + return p_chain->u.chain16.cons_idx; +} + +static OSAL_INLINE u32 ecore_chain_get_cons_idx_u32(struct ecore_chain *p_chain) +{ + OSAL_ASSERT(is_chain_u32(p_chain)); + return p_chain->u.chain32.cons_idx; +} + +/* FIXME: + * Should create OSALs for the below definitions. + * For Linux, replace them with the existing U16_MAX and U32_MAX, and handle + * kernel versions that lack them. + */ +#define ECORE_U16_MAX ((u16)~0U) +#define ECORE_U32_MAX ((u32)~0U) + +static OSAL_INLINE u16 ecore_chain_get_elem_left(struct ecore_chain *p_chain) +{ + u16 used; + + OSAL_ASSERT(is_chain_u16(p_chain)); + + used = (u16)(((u32)ECORE_U16_MAX + 1 + + (u32)(p_chain->u.chain16.prod_idx)) - + (u32)p_chain->u.chain16.cons_idx); + if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR) + used -= p_chain->u.chain16.prod_idx / p_chain->elem_per_page - + p_chain->u.chain16.cons_idx / p_chain->elem_per_page; + + return (u16)(p_chain->capacity - used); +} + +static OSAL_INLINE u32 +ecore_chain_get_elem_left_u32(struct ecore_chain *p_chain) +{ + u32 used; + + OSAL_ASSERT(is_chain_u32(p_chain)); + + used = (u32)(((u64)ECORE_U32_MAX + 1 + + (u64)(p_chain->u.chain32.prod_idx)) - + (u64)p_chain->u.chain32.cons_idx); + if (p_chain->mode == ECORE_CHAIN_MODE_NEXT_PTR) + used -= p_chain->u.chain32.prod_idx / p_chain->elem_per_page - + p_chain->u.chain32.cons_idx / p_chain->elem_per_page; + + return p_chain->capacity - used; +} + +static OSAL_INLINE u8 ecore_chain_is_full(struct ecore_chain *p_chain) +{ + if (is_chain_u16(p_chain)) + return (ecore_chain_get_elem_left(p_chain) == + p_chain->capacity); + else + return (ecore_chain_get_elem_left_u32(p_chain) == + p_chain->capacity); +} + +static OSAL_INLINE u8 ecore_chain_is_empty(struct ecore_chain *p_chain) +{ + if (is_chain_u16(p_chain)) + return (ecore_chain_get_elem_left(p_chain) == 0); + else + return (ecore_chain_get_elem_left_u32(p_chain) == 0); +} + +static OSAL_INLINE +u16 ecore_chain_get_elem_per_page(struct ecore_chain *p_chain) +{ + return p_chain->elem_per_page; +} + +static OSAL_INLINE +u16 ecore_chain_get_usable_per_page(struct ecore_chain *p_chain) +{ + return p_chain->usable_per_page; +} + +static OSAL_INLINE +u16 ecore_chain_get_unusable_per_page(struct ecore_chain *p_chain) +{ + return p_chain->elem_unusable; +} + +static OSAL_INLINE u32 ecore_chain_get_size(struct ecore_chain *p_chain) +{ + return p_chain->size; +} + +static OSAL_INLINE u32 ecore_chain_get_page_cnt(struct ecore_chain *p_chain) +{ + return p_chain->page_cnt; +} + +static OSAL_INLINE +dma_addr_t ecore_chain_get_pbl_phys(struct ecore_chain *p_chain) +{ + return p_chain->pbl.p_phys_table; +} + +/** + * @brief ecore_chain_advance_page - + * + * Advance the next element accros pages for a linked chain + * + * @param p_chain + * @param p_next_elem + * @param idx_to_inc + * @param page_to_inc + */ +static OSAL_INLINE void +ecore_chain_advance_page(struct ecore_chain *p_chain, void **p_next_elem, + void *idx_to_inc, void *page_to_inc) +{ + struct ecore_chain_next *p_next = OSAL_NULL; + u32 page_index = 0; + + switch (p_chain->mode) { + case ECORE_CHAIN_MODE_NEXT_PTR: + p_next = (struct ecore_chain_next *)(*p_next_elem); + *p_next_elem = p_next->next_virt; + if (is_chain_u16(p_chain)) + *(u16 *)idx_to_inc += p_chain->elem_unusable; + else + *(u32 *)idx_to_inc += p_chain->elem_unusable; + break; + case ECORE_CHAIN_MODE_SINGLE: + *p_next_elem = p_chain->p_virt_addr; + break; + case ECORE_CHAIN_MODE_PBL: + if (is_chain_u16(p_chain)) { + if (++(*(u16 *)page_to_inc) == p_chain->page_cnt) + *(u16 *)page_to_inc = 0; + page_index = *(u16 *)page_to_inc; + } else { + if (++(*(u32 *)page_to_inc) == p_chain->page_cnt) + *(u32 *)page_to_inc = 0; + page_index = *(u32 *)page_to_inc; + } + *p_next_elem = p_chain->pbl.pp_virt_addr_tbl[page_index]; + } +} + +#define is_unusable_idx(p, idx) \ + (((p)->u.chain16.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_idx_u32(p, idx) \ + (((p)->u.chain32.idx & (p)->elem_per_page_mask) == (p)->usable_per_page) + +#define is_unusable_next_idx(p, idx) \ + ((((p)->u.chain16.idx + 1) & (p)->elem_per_page_mask) == \ + (p)->usable_per_page) + +#define is_unusable_next_idx_u32(p, idx) \ + ((((p)->u.chain32.idx + 1) & (p)->elem_per_page_mask) \ + == (p)->usable_per_page) + +#define test_and_skip(p, idx) \ + do { \ + if (is_chain_u16(p)) { \ + if (is_unusable_idx(p, idx)) \ + (p)->u.chain16.idx += (p)->elem_unusable; \ + } else { \ + if (is_unusable_idx_u32(p, idx)) \ + (p)->u.chain32.idx += (p)->elem_unusable; \ + } \ + } while (0) + +/** + * @brief ecore_chain_return_multi_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + * @param num + */ +static OSAL_INLINE +void ecore_chain_return_multi_produced(struct ecore_chain *p_chain, u32 num) +{ + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx += (u16)num; + else + p_chain->u.chain32.cons_idx += num; + test_and_skip(p_chain, cons_idx); +} + +/** + * @brief ecore_chain_return_produced - + * + * A chain in which the driver "Produces" elements should use this API + * to indicate previous produced elements are now consumed. + * + * @param p_chain + */ +static OSAL_INLINE void ecore_chain_return_produced(struct ecore_chain *p_chain) +{ + if (is_chain_u16(p_chain)) + p_chain->u.chain16.cons_idx++; + else + p_chain->u.chain32.cons_idx++; + test_and_skip(p_chain, cons_idx); +} + +/** + * @brief ecore_chain_produce - + * + * A chain in which the driver "Produces" elements should use this to get + * a pointer to the next element which can be "Produced". It's driver + * responsibility to validate that the chain has room for new element. + * + * @param p_chain + * + * @return void*, a pointer to next element + */ +static OSAL_INLINE void *ecore_chain_produce(struct ecore_chain *p_chain) +{ + void *p_ret = OSAL_NULL, *p_prod_idx, *p_prod_page_idx; + + if (is_chain_u16(p_chain)) { + if ((p_chain->u.chain16.prod_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_prod_idx = &p_chain->u.chain16.prod_idx; + p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx; + ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem, + p_prod_idx, p_prod_page_idx); + } + p_chain->u.chain16.prod_idx++; + } else { + if ((p_chain->u.chain32.prod_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_prod_idx = &p_chain->u.chain32.prod_idx; + p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx; + ecore_chain_advance_page(p_chain, &p_chain->p_prod_elem, + p_prod_idx, p_prod_page_idx); + } + p_chain->u.chain32.prod_idx++; + } + + p_ret = p_chain->p_prod_elem; + p_chain->p_prod_elem = (void *)(((u8 *)p_chain->p_prod_elem) + + p_chain->elem_size); + + return p_ret; +} + +/** + * @brief ecore_chain_get_capacity - + * + * Get the maximum number of BDs in chain + * + * @param p_chain + * @param num + * + * @return number of unusable BDs + */ +static OSAL_INLINE u32 ecore_chain_get_capacity(struct ecore_chain *p_chain) +{ + return p_chain->capacity; +} + +/** + * @brief ecore_chain_recycle_consumed - + * + * Returns an element which was previously consumed; + * Increments producers so they could be written to FW. + * + * @param p_chain + */ +static OSAL_INLINE +void ecore_chain_recycle_consumed(struct ecore_chain *p_chain) +{ + test_and_skip(p_chain, prod_idx); + if (is_chain_u16(p_chain)) + p_chain->u.chain16.prod_idx++; + else + p_chain->u.chain32.prod_idx++; +} + +/** + * @brief ecore_chain_consume - + * + * A Chain in which the driver utilizes data written by a different source + * (i.e., FW) should use this to access passed buffers. + * + * @param p_chain + * + * @return void*, a pointer to the next buffer written + */ +static OSAL_INLINE void *ecore_chain_consume(struct ecore_chain *p_chain) +{ + void *p_ret = OSAL_NULL, *p_cons_idx, *p_cons_page_idx; + + if (is_chain_u16(p_chain)) { + if ((p_chain->u.chain16.cons_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_cons_idx = &p_chain->u.chain16.cons_idx; + p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx; + ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_idx, p_cons_page_idx); + } + p_chain->u.chain16.cons_idx++; + } else { + if ((p_chain->u.chain32.cons_idx & + p_chain->elem_per_page_mask) == p_chain->next_page_mask) { + p_cons_idx = &p_chain->u.chain32.cons_idx; + p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx; + ecore_chain_advance_page(p_chain, &p_chain->p_cons_elem, + p_cons_idx, p_cons_page_idx); + } + p_chain->u.chain32.cons_idx++; + } + + p_ret = p_chain->p_cons_elem; + p_chain->p_cons_elem = (void *)(((u8 *)p_chain->p_cons_elem) + + p_chain->elem_size); + + return p_ret; +} + +/** + * @brief ecore_chain_reset - + * + * Resets the chain to its start state + * + * @param p_chain pointer to a previously allocted chain + */ +static OSAL_INLINE void ecore_chain_reset(struct ecore_chain *p_chain) +{ + u32 i; + + if (is_chain_u16(p_chain)) { + p_chain->u.chain16.prod_idx = 0; + p_chain->u.chain16.cons_idx = 0; + } else { + p_chain->u.chain32.prod_idx = 0; + p_chain->u.chain32.cons_idx = 0; + } + p_chain->p_cons_elem = p_chain->p_virt_addr; + p_chain->p_prod_elem = p_chain->p_virt_addr; + + if (p_chain->mode == ECORE_CHAIN_MODE_PBL) { + /* Use (page_cnt - 1) as a reset value for the prod/cons page's + * indices, to avoid unnecessary page advancing on the first + * call to ecore_chain_produce/consume. Instead, the indices + * will be advanced to page_cnt and then will be wrapped to 0. + */ + u32 reset_val = p_chain->page_cnt - 1; + + if (is_chain_u16(p_chain)) { + p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val; + p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val; + } else { + p_chain->pbl.u.pbl32.prod_page_idx = reset_val; + p_chain->pbl.u.pbl32.cons_page_idx = reset_val; + } + } + + switch (p_chain->intended_use) { + case ECORE_CHAIN_USE_TO_CONSUME_PRODUCE: + case ECORE_CHAIN_USE_TO_PRODUCE: + /* Do nothing */ + break; + + case ECORE_CHAIN_USE_TO_CONSUME: + /* produce empty elements */ + for (i = 0; i < p_chain->capacity; i++) + ecore_chain_recycle_consumed(p_chain); + break; + } +} + +/** + * @brief ecore_chain_init_params - + * + * Initalizes a basic chain struct + * + * @param p_chain + * @param page_cnt number of pages in the allocated buffer + * @param elem_size size of each element in the chain + * @param intended_use + * @param mode + * @param cnt_type + */ +static OSAL_INLINE void +ecore_chain_init_params(struct ecore_chain *p_chain, u32 page_cnt, u8 elem_size, + enum ecore_chain_use_mode intended_use, + enum ecore_chain_mode mode, + enum ecore_chain_cnt_type cnt_type) +{ + /* chain fixed parameters */ + p_chain->p_virt_addr = OSAL_NULL; + p_chain->p_phys_addr = 0; + p_chain->elem_size = elem_size; + p_chain->intended_use = intended_use; + p_chain->mode = mode; + p_chain->cnt_type = cnt_type; + + p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); + p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; + p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); + p_chain->next_page_mask = (p_chain->usable_per_page & + p_chain->elem_per_page_mask); + + p_chain->page_cnt = page_cnt; + p_chain->capacity = p_chain->usable_per_page * page_cnt; + p_chain->size = p_chain->elem_per_page * page_cnt; + + p_chain->pbl.p_phys_table = 0; + p_chain->pbl.p_virt_table = OSAL_NULL; + p_chain->pbl.pp_virt_addr_tbl = OSAL_NULL; +} + +/** + * @brief ecore_chain_init_mem - + * + * Initalizes a basic chain struct with its chain buffers + * + * @param p_chain + * @param p_virt_addr virtual address of allocated buffer's beginning + * @param p_phys_addr physical address of allocated buffer's beginning + * + */ +static OSAL_INLINE void ecore_chain_init_mem(struct ecore_chain *p_chain, + void *p_virt_addr, + dma_addr_t p_phys_addr) +{ + p_chain->p_virt_addr = p_virt_addr; + p_chain->p_phys_addr = p_phys_addr; +} + +/** + * @brief ecore_chain_init_pbl_mem - + * + * Initalizes a basic chain struct with its pbl buffers + * + * @param p_chain + * @param p_virt_pbl pointer to a pre allocated side table which will hold + * virtual page addresses. + * @param p_phys_pbl pointer to a pre-allocated side table which will hold + * physical page addresses. + * @param pp_virt_addr_tbl + * pointer to a pre-allocated side table which will hold + * the virtual addresses of the chain pages. + * + */ +static OSAL_INLINE void ecore_chain_init_pbl_mem(struct ecore_chain *p_chain, + void *p_virt_pbl, + dma_addr_t p_phys_pbl, + void **pp_virt_addr_tbl) +{ + p_chain->pbl.p_phys_table = p_phys_pbl; + p_chain->pbl.p_virt_table = p_virt_pbl; + p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; +} + +/** + * @brief ecore_chain_init_next_ptr_elem - + * + * Initalizes a next pointer element + * + * @param p_chain + * @param p_virt_curr virtual address of a chain page of which the next + * pointer element is initialized + * @param p_virt_next virtual address of the next chain page + * @param p_phys_next physical address of the next chain page + * + */ +static OSAL_INLINE void +ecore_chain_init_next_ptr_elem(struct ecore_chain *p_chain, void *p_virt_curr, + void *p_virt_next, dma_addr_t p_phys_next) +{ + struct ecore_chain_next *p_next; + u32 size; + + size = p_chain->elem_size * p_chain->usable_per_page; + p_next = (struct ecore_chain_next *)((u8 *)p_virt_curr + size); + + DMA_REGPAIR_LE(p_next->next_phys, p_phys_next); + + p_next->next_virt = p_virt_next; +} + +/** + * @brief ecore_chain_get_last_elem - + * + * Returns a pointer to the last element of the chain + * + * @param p_chain + * + * @return void* + */ +static OSAL_INLINE void *ecore_chain_get_last_elem(struct ecore_chain *p_chain) +{ + struct ecore_chain_next *p_next = OSAL_NULL; + void *p_virt_addr = OSAL_NULL; + u32 size, last_page_idx; + + if (!p_chain->p_virt_addr) + goto out; + + switch (p_chain->mode) { + case ECORE_CHAIN_MODE_NEXT_PTR: + size = p_chain->elem_size * p_chain->usable_per_page; + p_virt_addr = p_chain->p_virt_addr; + p_next = (struct ecore_chain_next *)((u8 *)p_virt_addr + size); + while (p_next->next_virt != p_chain->p_virt_addr) { + p_virt_addr = p_next->next_virt; + p_next = + (struct ecore_chain_next *)((u8 *)p_virt_addr + + size); + } + break; + case ECORE_CHAIN_MODE_SINGLE: + p_virt_addr = p_chain->p_virt_addr; + break; + case ECORE_CHAIN_MODE_PBL: + last_page_idx = p_chain->page_cnt - 1; + p_virt_addr = p_chain->pbl.pp_virt_addr_tbl[last_page_idx]; + break; + } + /* p_virt_addr points at this stage to the last page of the chain */ + size = p_chain->elem_size * (p_chain->usable_per_page - 1); + p_virt_addr = ((u8 *)p_virt_addr + size); +out: + return p_virt_addr; +} + +/** + * @brief ecore_chain_set_prod - sets the prod to the given value + * + * @param prod_idx + * @param p_prod_elem + */ +static OSAL_INLINE void ecore_chain_set_prod(struct ecore_chain *p_chain, + u32 prod_idx, void *p_prod_elem) +{ + if (is_chain_u16(p_chain)) + p_chain->u.chain16.prod_idx = (u16)prod_idx; + else + p_chain->u.chain32.prod_idx = prod_idx; + p_chain->p_prod_elem = p_prod_elem; +} + +/** + * @brief ecore_chain_pbl_zero_mem - set chain memory to 0 + * + * @param p_chain + */ +static OSAL_INLINE void ecore_chain_pbl_zero_mem(struct ecore_chain *p_chain) +{ + u32 i, page_cnt; + + if (p_chain->mode != ECORE_CHAIN_MODE_PBL) + return; + + page_cnt = ecore_chain_get_page_cnt(p_chain); + + for (i = 0; i < page_cnt; i++) + OSAL_MEM_ZERO(p_chain->pbl.pp_virt_addr_tbl[i], + ECORE_CHAIN_PAGE_SIZE); +} + +#endif /* __ECORE_CHAIN_H__ */ diff --git a/drivers/net/qede/base/ecore_cxt.c b/drivers/net/qede/base/ecore_cxt.c new file mode 100644 index 00000000..1201c1a9 --- /dev/null +++ b/drivers/net/qede/base/ecore_cxt.c @@ -0,0 +1,1961 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" +#include "reg_addr.h" +#include "ecore_hsi_common.h" +#include "ecore_hsi_eth.h" +#include "ecore_rt_defs.h" +#include "ecore_status.h" +#include "ecore.h" +#include "ecore_init_ops.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_cxt.h" +#include "ecore_hw.h" +#include "ecore_dev_api.h" + +/* Max number of connection types in HW (DQ/CDU etc.) */ +#define MAX_CONN_TYPES PROTOCOLID_COMMON +#define NUM_TASK_TYPES 2 +#define NUM_TASK_PF_SEGMENTS 4 +#define NUM_TASK_VF_SEGMENTS 1 + +/* Doorbell-Queue constants */ +#define DQ_RANGE_SHIFT 4 +#define DQ_RANGE_ALIGN (1 << DQ_RANGE_SHIFT) + +/* Searcher constants */ +#define SRC_MIN_NUM_ELEMS 256 + +/* Timers constants */ +#define TM_SHIFT 7 +#define TM_ALIGN (1 << TM_SHIFT) +#define TM_ELEM_SIZE 4 + +/* ILT constants */ +/* If for some reason, HW P size is modified to be less than 32K, + * special handling needs to be made for CDU initialization + */ +#define ILT_DEFAULT_HW_P_SIZE 3 + +#define ILT_PAGE_IN_BYTES(hw_p_size) (1U << ((hw_p_size) + 12)) +#define ILT_CFG_REG(cli, reg) PSWRQ2_REG_##cli##_##reg##_RT_OFFSET + +/* ILT entry structure */ +#define ILT_ENTRY_PHY_ADDR_MASK 0x000FFFFFFFFFFFULL +#define ILT_ENTRY_PHY_ADDR_SHIFT 0 +#define ILT_ENTRY_VALID_MASK 0x1ULL +#define ILT_ENTRY_VALID_SHIFT 52 +#define ILT_ENTRY_IN_REGS 2 +#define ILT_REG_SIZE_IN_BYTES 4 + +/* connection context union */ +union conn_context { + struct core_conn_context core_ctx; + struct eth_conn_context eth_ctx; +}; + +struct src_ent { + u8 opaque[56]; + u64 next; +}; + +#define CDUT_SEG_ALIGNMET 3 /* in 4k chunks */ +#define CDUT_SEG_ALIGNMET_IN_BYTES (1 << (CDUT_SEG_ALIGNMET + 12)) + +#define CONN_CXT_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(union conn_context, p_hwfn) + +/* PF per protocl configuration object */ +#define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS) +#define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS) + +struct ecore_tid_seg { + u32 count; + u8 type; + bool has_fl_mem; +}; + +struct ecore_conn_type_cfg { + u32 cid_count; + u32 cid_start; + u32 cids_per_vf; + struct ecore_tid_seg tid_seg[TASK_SEGMENTS]; +}; + +/* ILT Client configuration, + * Per connection type (protocol) resources (cids, tis, vf cids etc.) + * 1 - for connection context (CDUC) and for each task context we need two + * values, for regular task context and for force load memory + */ +#define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2) +#define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2) +#define CDUC_BLK (0) +#define CDUT_SEG_BLK(n) (1 + (u8)(n)) +#define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_##X##_SEGMENTS) + +enum ilt_clients { + ILT_CLI_CDUC, + ILT_CLI_CDUT, + ILT_CLI_QM, + ILT_CLI_TM, + ILT_CLI_SRC, + ILT_CLI_MAX +}; + +struct ilt_cfg_pair { + u32 reg; + u32 val; +}; + +struct ecore_ilt_cli_blk { + u32 total_size; /* 0 means not active */ + u32 real_size_in_page; + u32 start_line; + u32 dynamic_line_cnt; +}; + +struct ecore_ilt_client_cfg { + bool active; + + /* ILT boundaries */ + struct ilt_cfg_pair first; + struct ilt_cfg_pair last; + struct ilt_cfg_pair p_size; + + /* ILT client blocks for PF */ + struct ecore_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS]; + u32 pf_total_lines; + + /* ILT client blocks for VFs */ + struct ecore_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS]; + u32 vf_total_lines; +}; + +/* Per Path - + * ILT shadow table + * Protocol acquired CID lists + * PF start line in ILT + */ +struct ecore_dma_mem { + dma_addr_t p_phys; + void *p_virt; + osal_size_t size; +}; + +#define MAP_WORD_SIZE sizeof(unsigned long) +#define BITS_PER_MAP_WORD (MAP_WORD_SIZE * 8) + +struct ecore_cid_acquired_map { + u32 start_cid; + u32 max_count; + unsigned long *cid_map; +}; + +struct ecore_cxt_mngr { + /* Per protocl configuration */ + struct ecore_conn_type_cfg conn_cfg[MAX_CONN_TYPES]; + + /* computed ILT structure */ + struct ecore_ilt_client_cfg clients[ILT_CLI_MAX]; + + /* Task type sizes */ + u32 task_type_size[NUM_TASK_TYPES]; + + /* total number of VFs for this hwfn - + * ALL VFs are symmetric in terms of HW resources + */ + u32 vf_count; + + /* Acquired CIDs */ + struct ecore_cid_acquired_map acquired[MAX_CONN_TYPES]; + + /* ILT shadow table */ + struct ecore_dma_mem *ilt_shadow; + u32 pf_start_line; + + /* SRC T2 */ + struct ecore_dma_mem *t2; + u32 t2_num_pages; + u64 first_free; + u64 last_free; +}; + +/* check if resources/configuration is required according to protocol type */ +static OSAL_INLINE bool src_proto(enum protocol_type type) +{ + return type == PROTOCOLID_TOE; +} + +static OSAL_INLINE bool tm_cid_proto(enum protocol_type type) +{ + return type == PROTOCOLID_TOE; +} + +/* counts the iids for the CDU/CDUC ILT client configuration */ +struct ecore_cdu_iids { + u32 pf_cids; + u32 per_vf_cids; +}; + +static void ecore_cxt_cdu_iids(struct ecore_cxt_mngr *p_mngr, + struct ecore_cdu_iids *iids) +{ + u32 type; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + iids->pf_cids += p_mngr->conn_cfg[type].cid_count; + iids->per_vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + } +} + +/* counts the iids for the Searcher block configuration */ +struct ecore_src_iids { + u32 pf_cids; + u32 per_vf_cids; +}; + +static OSAL_INLINE void ecore_cxt_src_iids(struct ecore_cxt_mngr *p_mngr, + struct ecore_src_iids *iids) +{ + u32 i; + + for (i = 0; i < MAX_CONN_TYPES; i++) { + if (!src_proto(i)) + continue; + + iids->pf_cids += p_mngr->conn_cfg[i].cid_count; + iids->per_vf_cids += p_mngr->conn_cfg[i].cids_per_vf; + } +} + +/* counts the iids for the Timers block configuration */ +struct ecore_tm_iids { + u32 pf_cids; + u32 pf_tids[NUM_TASK_PF_SEGMENTS]; /* per segment */ + u32 pf_tids_total; + u32 per_vf_cids; + u32 per_vf_tids; +}; + +static OSAL_INLINE void ecore_cxt_tm_iids(struct ecore_cxt_mngr *p_mngr, + struct ecore_tm_iids *iids) +{ + u32 i, j; + + for (i = 0; i < MAX_CONN_TYPES; i++) { + struct ecore_conn_type_cfg *p_cfg = &p_mngr->conn_cfg[i]; + + if (tm_cid_proto(i)) { + iids->pf_cids += p_cfg->cid_count; + iids->per_vf_cids += p_cfg->cids_per_vf; + } + } + + iids->pf_cids = ROUNDUP(iids->pf_cids, TM_ALIGN); + iids->per_vf_cids = ROUNDUP(iids->per_vf_cids, TM_ALIGN); + iids->per_vf_tids = ROUNDUP(iids->per_vf_tids, TM_ALIGN); + + for (iids->pf_tids_total = 0, j = 0; j < NUM_TASK_PF_SEGMENTS; j++) { + iids->pf_tids[j] = ROUNDUP(iids->pf_tids[j], TM_ALIGN); + iids->pf_tids_total += iids->pf_tids[j]; + } +} + +void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_tid_seg *segs; + u32 vf_cids = 0, type, j; + u32 vf_tids = 0; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + iids->cids += p_mngr->conn_cfg[type].cid_count; + vf_cids += p_mngr->conn_cfg[type].cids_per_vf; + + segs = p_mngr->conn_cfg[type].tid_seg; + /* for each segment there is at most one + * protocol for which count is not 0. + */ + for (j = 0; j < NUM_TASK_PF_SEGMENTS; j++) + iids->tids += segs[j].count; + + /* The last array elelment is for the VFs. As for PF + * segments there can be only one protocol for + * which this value is not 0. + */ + vf_tids += segs[NUM_TASK_PF_SEGMENTS].count; + } + + iids->vf_cids += vf_cids * p_mngr->vf_count; + iids->tids += vf_tids * p_mngr->vf_count; + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "iids: CIDS %08x vf_cids %08x tids %08x vf_tids %08x\n", + iids->cids, iids->vf_cids, iids->tids, vf_tids); +} + +static struct ecore_tid_seg *ecore_cxt_tid_seg_info(struct ecore_hwfn *p_hwfn, + u32 seg) +{ + struct ecore_cxt_mngr *p_cfg = p_hwfn->p_cxt_mngr; + u32 i; + + /* Find the protocol with tid count > 0 for this segment. + * Note: there can only be one and this is already validated. + */ + for (i = 0; i < MAX_CONN_TYPES; i++) { + if (p_cfg->conn_cfg[i].tid_seg[seg].count) + return &p_cfg->conn_cfg[i].tid_seg[seg]; + } + return OSAL_NULL; +} + +/* set the iids (cid/tid) count per protocol */ +void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type type, + u32 cid_count, u32 vf_cid_cnt) +{ + struct ecore_cxt_mngr *p_mgr = p_hwfn->p_cxt_mngr; + struct ecore_conn_type_cfg *p_conn = &p_mgr->conn_cfg[type]; + + p_conn->cid_count = ROUNDUP(cid_count, DQ_RANGE_ALIGN); + p_conn->cids_per_vf = ROUNDUP(vf_cid_cnt, DQ_RANGE_ALIGN); +} + +u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type type, u32 *vf_cid) +{ + if (vf_cid) + *vf_cid = p_hwfn->p_cxt_mngr->conn_cfg[type].cids_per_vf; + + return p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; +} + +u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn, + enum protocol_type type) +{ + return p_hwfn->p_cxt_mngr->acquired[type].start_cid; +} + +static u32 ecore_cxt_get_proto_tid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type type) +{ + u32 cnt = 0; + int i; + + for (i = 0; i < TASK_SEGMENTS; i++) + cnt += p_hwfn->p_cxt_mngr->conn_cfg[type].tid_seg[i].count; + + return cnt; +} + +static OSAL_INLINE void +ecore_cxt_set_proto_tid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type proto, + u8 seg, u8 seg_type, u32 count, bool has_fl) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_tid_seg *p_seg = &p_mngr->conn_cfg[proto].tid_seg[seg]; + + p_seg->count = count; + p_seg->has_fl_mem = has_fl; + p_seg->type = seg_type; +} + +/* the *p_line parameter must be either 0 for the first invocation or the + * value returned in the previous invocation. + */ +static void ecore_ilt_cli_blk_fill(struct ecore_ilt_client_cfg *p_cli, + struct ecore_ilt_cli_blk *p_blk, + u32 start_line, + u32 total_size, u32 elem_size) +{ + u32 ilt_size = ILT_PAGE_IN_BYTES(p_cli->p_size.val); + + /* verfiy called once for each block */ + if (p_blk->total_size) + return; + + p_blk->total_size = total_size; + p_blk->real_size_in_page = 0; + if (elem_size) + p_blk->real_size_in_page = (ilt_size / elem_size) * elem_size; + p_blk->start_line = start_line; +} + +static void ecore_ilt_cli_adv_line(struct ecore_hwfn *p_hwfn, + struct ecore_ilt_client_cfg *p_cli, + struct ecore_ilt_cli_blk *p_blk, + u32 *p_line, enum ilt_clients client_id) +{ + if (!p_blk->total_size) + return; + + if (!p_cli->active) + p_cli->first.val = *p_line; + + p_cli->active = true; + *p_line += DIV_ROUND_UP(p_blk->total_size, p_blk->real_size_in_page); + p_cli->last.val = *p_line - 1; + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "ILT[Client %d] - Lines: [%08x - %08x]. Block - Size %08x [Real %08x] Start line %d\n", + client_id, p_cli->first.val, p_cli->last.val, + p_blk->total_size, p_blk->real_size_in_page, + p_blk->start_line); +} + +static u32 ecore_ilt_get_dynamic_line_cnt(struct ecore_hwfn *p_hwfn, + enum ilt_clients ilt_client) +{ + u32 cid_count = p_hwfn->p_cxt_mngr->conn_cfg[PROTOCOLID_ROCE].cid_count; + struct ecore_ilt_client_cfg *p_cli; + u32 lines_to_skip = 0; + u32 cxts_per_p; + + /* TBD MK: ILT code should be simplified once PROTO enum is changed */ + + if (ilt_client == ILT_CLI_CDUC) { + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + + cxts_per_p = ILT_PAGE_IN_BYTES(p_cli->p_size.val) / + (u32)CONN_CXT_SIZE(p_hwfn); + + lines_to_skip = cid_count / cxts_per_p; + } + + return lines_to_skip; +} + +enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 curr_line, total, i, task_size, line; + struct ecore_ilt_client_cfg *p_cli; + struct ecore_ilt_cli_blk *p_blk; + struct ecore_cdu_iids cdu_iids; + struct ecore_src_iids src_iids; + struct ecore_qm_iids qm_iids; + struct ecore_tm_iids tm_iids; + struct ecore_tid_seg *p_seg; + + OSAL_MEM_ZERO(&qm_iids, sizeof(qm_iids)); + OSAL_MEM_ZERO(&cdu_iids, sizeof(cdu_iids)); + OSAL_MEM_ZERO(&src_iids, sizeof(src_iids)); + OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids)); + + p_mngr->pf_start_line = RESC_START(p_hwfn, ECORE_ILT); + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "hwfn [%d] - Set context manager starting line to be 0x%08x\n", + p_hwfn->my_id, p_hwfn->p_cxt_mngr->pf_start_line); + + /* CDUC */ + p_cli = &p_mngr->clients[ILT_CLI_CDUC]; + + curr_line = p_mngr->pf_start_line; + + /* CDUC PF */ + p_cli->pf_total_lines = 0; + + /* get the counters for the CDUC,CDUC and QM clients */ + ecore_cxt_cdu_iids(p_mngr, &cdu_iids); + + p_blk = &p_cli->pf_blks[CDUC_BLK]; + + total = cdu_iids.pf_cids * CONN_CXT_SIZE(p_hwfn); + + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total, CONN_CXT_SIZE(p_hwfn)); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + p_blk->dynamic_line_cnt = ecore_ilt_get_dynamic_line_cnt(p_hwfn, + ILT_CLI_CDUC); + + /* CDUC VF */ + p_blk = &p_cli->vf_blks[CDUC_BLK]; + total = cdu_iids.per_vf_cids * CONN_CXT_SIZE(p_hwfn); + + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total, CONN_CXT_SIZE(p_hwfn)); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_CDUC); + p_cli->vf_total_lines = curr_line - p_blk->start_line; + + for (i = 1; i < p_mngr->vf_count; i++) + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUC); + + /* CDUT PF */ + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + p_cli->first.val = curr_line; + + /* first the 'working' task memory */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = ecore_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + p_blk = &p_cli->pf_blks[CDUT_SEG_BLK(i)]; + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + + /* next the 'init' task memory (forced load memory) */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + p_seg = ecore_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg || p_seg->count == 0) + continue; + + p_blk = &p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)]; + + if (!p_seg->has_fl_mem) { + /* The segment is active (total size pf 'working' + * memory is > 0) but has no FL (forced-load, Init) + * memory. Thus: + * + * 1. The total-size in the corrsponding FL block of + * the ILT client is set to 0 - No ILT line are + * provisioned and no ILT memory allocated. + * + * 2. The start-line of said block is set to the + * start line of the matching working memory + * block in the ILT client. This is later used to + * configure the CDU segment offset registers and + * results in an FL command for TIDs of this + * segment behaves as regular load commands + * (loading TIDs from the working memory). + */ + line = p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line; + + ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); + continue; + } + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + + ecore_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + p_cli->pf_total_lines = curr_line - p_cli->pf_blks[0].start_line; + + /* CDUT VF */ + p_seg = ecore_cxt_tid_seg_info(p_hwfn, TASK_SEGMENT_VF); + if (p_seg && p_seg->count) { + /* Stricly speaking we need to iterate over all VF + * task segment types, but a VF has only 1 segment + */ + + /* 'working' memory */ + total = p_seg->count * p_mngr->task_type_size[p_seg->type]; + + p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; + ecore_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, + p_mngr->task_type_size[p_seg->type]); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + + /* 'init' memory */ + p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; + if (!p_seg->has_fl_mem) { + /* see comment above */ + line = p_cli->vf_blks[CDUT_SEG_BLK(0)].start_line; + ecore_ilt_cli_blk_fill(p_cli, p_blk, line, 0, 0); + } else { + task_size = p_mngr->task_type_size[p_seg->type]; + ecore_ilt_cli_blk_fill(p_cli, p_blk, + curr_line, total, task_size); + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + p_cli->vf_total_lines = curr_line - + p_cli->vf_blks[0].start_line; + + /* Now for the rest of the VFs */ + for (i = 1; i < p_mngr->vf_count; i++) { + p_blk = &p_cli->vf_blks[CDUT_SEG_BLK(0)]; + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + + p_blk = &p_cli->vf_blks[CDUT_FL_SEG_BLK(0, VF)]; + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_CDUT); + } + } + + /* QM */ + p_cli = &p_mngr->clients[ILT_CLI_QM]; + p_blk = &p_cli->pf_blks[0]; + + ecore_cxt_qm_iids(p_hwfn, &qm_iids); + total = ecore_qm_pf_mem_size(p_hwfn->rel_pf_id, qm_iids.cids, + qm_iids.vf_cids, qm_iids.tids, + p_hwfn->qm_info.num_pqs, + p_hwfn->qm_info.num_vf_pqs); + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "QM ILT Info, (cids=%d, vf_cids=%d, tids=%d, num_pqs=%d," + " num_vf_pqs=%d, memory_size=%d)\n", + qm_iids.cids, qm_iids.vf_cids, qm_iids.tids, + p_hwfn->qm_info.num_pqs, p_hwfn->qm_info.num_vf_pqs, total); + + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, total * 0x1000, + QM_PQ_ELEMENT_SIZE); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, ILT_CLI_QM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + /* SRC */ + p_cli = &p_mngr->clients[ILT_CLI_SRC]; + ecore_cxt_src_iids(p_mngr, &src_iids); + + /* Both the PF and VFs searcher connections are stored in the per PF + * database. Thus sum the PF searcher cids and all the VFs searcher + * cids. + */ + total = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + if (total) { + u32 local_max = OSAL_MAX_T(u32, total, + SRC_MIN_NUM_ELEMS); + + total = OSAL_ROUNDUP_POW_OF_TWO(local_max); + + p_blk = &p_cli->pf_blks[0]; + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * sizeof(struct src_ent), + sizeof(struct src_ent)); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_SRC); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + /* TM PF */ + p_cli = &p_mngr->clients[ILT_CLI_TM]; + ecore_cxt_tm_iids(p_mngr, &tm_iids); + total = tm_iids.pf_cids + tm_iids.pf_tids_total; + if (total) { + p_blk = &p_cli->pf_blks[0]; + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * TM_ELEM_SIZE, TM_ELEM_SIZE); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + } + + /* TM VF */ + total = tm_iids.per_vf_cids + tm_iids.per_vf_tids; + if (total) { + p_blk = &p_cli->vf_blks[0]; + ecore_ilt_cli_blk_fill(p_cli, p_blk, curr_line, + total * TM_ELEM_SIZE, TM_ELEM_SIZE); + + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + p_cli->pf_total_lines = curr_line - p_blk->start_line; + + for (i = 1; i < p_mngr->vf_count; i++) { + ecore_ilt_cli_adv_line(p_hwfn, p_cli, p_blk, &curr_line, + ILT_CLI_TM); + } + } + + if (curr_line - p_hwfn->p_cxt_mngr->pf_start_line > + RESC_NUM(p_hwfn, ECORE_ILT)) { + DP_ERR(p_hwfn, "too many ilt lines...#lines=%d\n", + curr_line - p_hwfn->p_cxt_mngr->pf_start_line); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +static void ecore_cxt_src_t2_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 i; + + if (!p_mngr->t2) + return; + + for (i = 0; i < p_mngr->t2_num_pages; i++) + if (p_mngr->t2[i].p_virt) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_mngr->t2[i].p_virt, + p_mngr->t2[i].p_phys, + p_mngr->t2[i].size); + + OSAL_FREE(p_hwfn->p_dev, p_mngr->t2); + p_mngr->t2 = OSAL_NULL; +} + +static enum _ecore_status_t ecore_cxt_src_t2_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 conn_num, total_size, ent_per_page, psz, i; + struct ecore_ilt_client_cfg *p_src; + struct ecore_src_iids src_iids; + struct ecore_dma_mem *p_t2; + enum _ecore_status_t rc; + + OSAL_MEM_ZERO(&src_iids, sizeof(src_iids)); + + /* if the SRC ILT client is inactive - there are no connection + * requiring the searcer, leave. + */ + p_src = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_SRC]; + if (!p_src->active) + return ECORE_SUCCESS; + + ecore_cxt_src_iids(p_mngr, &src_iids); + conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + total_size = conn_num * sizeof(struct src_ent); + + /* use the same page size as the SRC ILT client */ + psz = ILT_PAGE_IN_BYTES(p_src->p_size.val); + p_mngr->t2_num_pages = DIV_ROUND_UP(total_size, psz); + + /* allocate t2 */ + p_mngr->t2 = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + p_mngr->t2_num_pages * + sizeof(struct ecore_dma_mem)); + if (!p_mngr->t2) { + DP_NOTICE(p_hwfn, true, "Failed to allocate t2 table\n"); + rc = ECORE_NOMEM; + goto t2_fail; + } + + /* allocate t2 pages */ + for (i = 0; i < p_mngr->t2_num_pages; i++) { + u32 size = OSAL_MIN_T(u32, total_size, psz); + void **p_virt = &p_mngr->t2[i].p_virt; + + *p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_mngr->t2[i].p_phys, size); + if (!p_mngr->t2[i].p_virt) { + rc = ECORE_NOMEM; + goto t2_fail; + } + OSAL_MEM_ZERO(*p_virt, size); + p_mngr->t2[i].size = size; + total_size -= size; + } + + /* Set the t2 pointers */ + + /* entries per page - must be a power of two */ + ent_per_page = psz / sizeof(struct src_ent); + + p_mngr->first_free = (u64)p_mngr->t2[0].p_phys; + + p_t2 = &p_mngr->t2[(conn_num - 1) / ent_per_page]; + p_mngr->last_free = (u64)p_t2->p_phys + + ((conn_num - 1) & (ent_per_page - 1)) * sizeof(struct src_ent); + + for (i = 0; i < p_mngr->t2_num_pages; i++) { + u32 ent_num = OSAL_MIN_T(u32, ent_per_page, conn_num); + struct src_ent *entries = p_mngr->t2[i].p_virt; + u64 p_ent_phys = (u64)p_mngr->t2[i].p_phys, val; + u32 j; + + for (j = 0; j < ent_num - 1; j++) { + val = p_ent_phys + (j + 1) * sizeof(struct src_ent); + entries[j].next = OSAL_CPU_TO_BE64(val); + } + + if (i < p_mngr->t2_num_pages - 1) + val = (u64)p_mngr->t2[i + 1].p_phys; + else + val = 0; + entries[j].next = OSAL_CPU_TO_BE64(val); + + conn_num -= ent_per_page; + } + + return ECORE_SUCCESS; + +t2_fail: + ecore_cxt_src_t2_free(p_hwfn); + return rc; +} + +/* Total number of ILT lines used by this PF */ +static u32 ecore_cxt_ilt_shadow_size(struct ecore_ilt_client_cfg *ilt_clients) +{ + u32 size = 0; + u32 i; + + for (i = 0; i < ILT_CLI_MAX; i++) + if (!ilt_clients[i].active) + continue; + else + size += (ilt_clients[i].last.val - + ilt_clients[i].first.val + 1); + + return size; +} + +static void ecore_ilt_shadow_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ilt_client_cfg *p_cli = p_hwfn->p_cxt_mngr->clients; + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 ilt_size, i; + + ilt_size = ecore_cxt_ilt_shadow_size(p_cli); + + for (i = 0; p_mngr->ilt_shadow && i < ilt_size; i++) { + struct ecore_dma_mem *p_dma = &p_mngr->ilt_shadow[i]; + + if (p_dma->p_virt) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_dma->p_virt, + p_dma->p_phys, p_dma->size); + p_dma->p_virt = OSAL_NULL; + } + OSAL_FREE(p_hwfn->p_dev, p_mngr->ilt_shadow); +} + +static enum _ecore_status_t +ecore_ilt_blk_alloc(struct ecore_hwfn *p_hwfn, + struct ecore_ilt_cli_blk *p_blk, + enum ilt_clients ilt_client, u32 start_line_offset) +{ + struct ecore_dma_mem *ilt_shadow = p_hwfn->p_cxt_mngr->ilt_shadow; + u32 lines, line, sz_left, lines_to_skip = 0; + + /* Special handling for RoCE that supports dynamic allocation */ + if (ilt_client == ILT_CLI_CDUT) + return ECORE_SUCCESS; + + lines_to_skip = p_blk->dynamic_line_cnt; + + if (!p_blk->total_size) + return ECORE_SUCCESS; + + sz_left = p_blk->total_size; + lines = DIV_ROUND_UP(sz_left, p_blk->real_size_in_page) - lines_to_skip; + line = p_blk->start_line + start_line_offset - + p_hwfn->p_cxt_mngr->pf_start_line + lines_to_skip; + + for (; lines; lines--) { + dma_addr_t p_phys; + void *p_virt; + u32 size; + + size = OSAL_MIN_T(u32, sz_left, p_blk->real_size_in_page); + +/* @DPDK */ +#define ILT_BLOCK_ALIGN_SIZE 0x1000 + p_virt = OSAL_DMA_ALLOC_COHERENT_ALIGNED(p_hwfn->p_dev, + &p_phys, size, + ILT_BLOCK_ALIGN_SIZE); + if (!p_virt) + return ECORE_NOMEM; + OSAL_MEM_ZERO(p_virt, size); + + ilt_shadow[line].p_phys = p_phys; + ilt_shadow[line].p_virt = p_virt; + ilt_shadow[line].size = size; + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "ILT shadow: Line [%d] Physical 0x%" PRIx64 + " Virtual %p Size %d\n", + line, (u64)p_phys, p_virt, size); + + sz_left -= size; + line++; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_ilt_shadow_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_ilt_client_cfg *clients = p_mngr->clients; + struct ecore_ilt_cli_blk *p_blk; + enum _ecore_status_t rc; + u32 size, i, j, k; + + size = ecore_cxt_ilt_shadow_size(clients); + p_mngr->ilt_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + size * sizeof(struct ecore_dma_mem)); + + if (!p_mngr->ilt_shadow) { + DP_NOTICE(p_hwfn, true, "Failed to allocate ilt shadow table"); + rc = ECORE_NOMEM; + goto ilt_shadow_fail; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "Allocated 0x%x bytes for ilt shadow\n", + (u32)(size * sizeof(struct ecore_dma_mem))); + + for (i = 0; i < ILT_CLI_MAX; i++) + if (!clients[i].active) { + continue; + } else { + for (j = 0; j < ILT_CLI_PF_BLOCKS; j++) { + p_blk = &clients[i].pf_blks[j]; + rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, i, 0); + if (rc != ECORE_SUCCESS) + goto ilt_shadow_fail; + } + for (k = 0; k < p_mngr->vf_count; k++) { + for (j = 0; j < ILT_CLI_VF_BLOCKS; j++) { + u32 lines = clients[i].vf_total_lines * k; + + p_blk = &clients[i].vf_blks[j]; + rc = ecore_ilt_blk_alloc(p_hwfn, p_blk, + i, lines); + if (rc != ECORE_SUCCESS) + goto ilt_shadow_fail; + } + } + } + + return ECORE_SUCCESS; + +ilt_shadow_fail: + ecore_ilt_shadow_free(p_hwfn); + return rc; +} + +static void ecore_cid_map_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 type; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + OSAL_FREE(p_hwfn->p_dev, p_mngr->acquired[type].cid_map); + p_mngr->acquired[type].max_count = 0; + p_mngr->acquired[type].start_cid = 0; + } +} + +static enum _ecore_status_t ecore_cid_map_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 start_cid = 0; + u32 type; + + for (type = 0; type < MAX_CONN_TYPES; type++) { + u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; + u32 size; + + if (cid_cnt == 0) + continue; + + size = MAP_WORD_SIZE * DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD); + p_mngr->acquired[type].cid_map = OSAL_ZALLOC(p_hwfn->p_dev, + GFP_KERNEL, size); + if (!p_mngr->acquired[type].cid_map) + goto cid_map_fail; + + p_mngr->acquired[type].max_count = cid_cnt; + p_mngr->acquired[type].start_cid = start_cid; + + p_hwfn->p_cxt_mngr->conn_cfg[type].cid_start = start_cid; + + DP_VERBOSE(p_hwfn, ECORE_MSG_CXT, + "Type %08x start: %08x count %08x\n", + type, p_mngr->acquired[type].start_cid, + p_mngr->acquired[type].max_count); + start_cid += cid_cnt; + } + + return ECORE_SUCCESS; + +cid_map_fail: + ecore_cid_map_free(p_hwfn); + return ECORE_NOMEM; +} + +enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr; + u32 i; + + p_mngr = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_mngr)); + if (!p_mngr) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `struct ecore_cxt_mngr'\n"); + return ECORE_NOMEM; + } + + /* Initialize ILT client registers */ + p_mngr->clients[ILT_CLI_CDUC].first.reg = ILT_CFG_REG(CDUC, FIRST_ILT); + p_mngr->clients[ILT_CLI_CDUC].last.reg = ILT_CFG_REG(CDUC, LAST_ILT); + p_mngr->clients[ILT_CLI_CDUC].p_size.reg = ILT_CFG_REG(CDUC, P_SIZE); + + p_mngr->clients[ILT_CLI_QM].first.reg = ILT_CFG_REG(QM, FIRST_ILT); + p_mngr->clients[ILT_CLI_QM].last.reg = ILT_CFG_REG(QM, LAST_ILT); + p_mngr->clients[ILT_CLI_QM].p_size.reg = ILT_CFG_REG(QM, P_SIZE); + + p_mngr->clients[ILT_CLI_TM].first.reg = ILT_CFG_REG(TM, FIRST_ILT); + p_mngr->clients[ILT_CLI_TM].last.reg = ILT_CFG_REG(TM, LAST_ILT); + p_mngr->clients[ILT_CLI_TM].p_size.reg = ILT_CFG_REG(TM, P_SIZE); + + p_mngr->clients[ILT_CLI_SRC].first.reg = ILT_CFG_REG(SRC, FIRST_ILT); + p_mngr->clients[ILT_CLI_SRC].last.reg = ILT_CFG_REG(SRC, LAST_ILT); + p_mngr->clients[ILT_CLI_SRC].p_size.reg = ILT_CFG_REG(SRC, P_SIZE); + + p_mngr->clients[ILT_CLI_CDUT].first.reg = ILT_CFG_REG(CDUT, FIRST_ILT); + p_mngr->clients[ILT_CLI_CDUT].last.reg = ILT_CFG_REG(CDUT, LAST_ILT); + p_mngr->clients[ILT_CLI_CDUT].p_size.reg = ILT_CFG_REG(CDUT, P_SIZE); + + /* default ILT page size for all clients is 32K */ + for (i = 0; i < ILT_CLI_MAX; i++) + p_mngr->clients[i].p_size.val = ILT_DEFAULT_HW_P_SIZE; + + /* Initialize task sizes */ + p_mngr->task_type_size[0] = 512; /* @DPDK */ + p_mngr->task_type_size[1] = 128; /* @DPDK */ + + p_mngr->vf_count = p_hwfn->p_dev->sriov_info.total_vfs; + /* Set the cxt mangr pointer priori to further allocations */ + p_hwfn->p_cxt_mngr = p_mngr; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn) +{ + enum _ecore_status_t rc; + + /* Allocate the ILT shadow table */ + rc = ecore_ilt_shadow_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, true, "Failed to allocate ilt memory\n"); + goto tables_alloc_fail; + } + + /* Allocate the T2 table */ + rc = ecore_cxt_src_t2_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, true, "Failed to allocate T2 memory\n"); + goto tables_alloc_fail; + } + + /* Allocate and initialize the acquired cids bitmaps */ + rc = ecore_cid_map_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, true, "Failed to allocate cid maps\n"); + goto tables_alloc_fail; + } + + return ECORE_SUCCESS; + +tables_alloc_fail: + ecore_cxt_mngr_free(p_hwfn); + return rc; +} + +void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn->p_cxt_mngr) + return; + + ecore_cid_map_free(p_hwfn); + ecore_cxt_src_t2_free(p_hwfn); + ecore_ilt_shadow_free(p_hwfn); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_cxt_mngr); + + p_hwfn->p_cxt_mngr = OSAL_NULL; +} + +void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + int type; + + /* Reset acquired cids */ + for (type = 0; type < MAX_CONN_TYPES; type++) { + u32 cid_cnt = p_hwfn->p_cxt_mngr->conn_cfg[type].cid_count; + u32 i; + + if (cid_cnt == 0) + continue; + + for (i = 0; i < DIV_ROUND_UP(cid_cnt, BITS_PER_MAP_WORD); i++) + p_mngr->acquired[type].cid_map[i] = 0; + } +} + +/* HW initialization helper (per Block, per phase) */ + +/* CDU Common */ +#define CDUC_CXT_SIZE_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT + +#define CDUC_CXT_SIZE_MASK \ + (CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE >> CDUC_CXT_SIZE_SHIFT) + +#define CDUC_BLOCK_WASTE_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT + +#define CDUC_BLOCK_WASTE_MASK \ + (CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE >> CDUC_BLOCK_WASTE_SHIFT) + +#define CDUC_NCIB_SHIFT \ + CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT + +#define CDUC_NCIB_MASK \ + (CDU_REG_CID_ADDR_PARAMS_NCIB >> CDUC_NCIB_SHIFT) + +#define CDUT_TYPE0_CXT_SIZE_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT + +#define CDUT_TYPE0_CXT_SIZE_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE >> \ + CDUT_TYPE0_CXT_SIZE_SHIFT) + +#define CDUT_TYPE0_BLOCK_WASTE_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT + +#define CDUT_TYPE0_BLOCK_WASTE_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE >> \ + CDUT_TYPE0_BLOCK_WASTE_SHIFT) + +#define CDUT_TYPE0_NCIB_SHIFT \ + CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT + +#define CDUT_TYPE0_NCIB_MASK \ + (CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK >> \ + CDUT_TYPE0_NCIB_SHIFT) + +#define CDUT_TYPE1_CXT_SIZE_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT + +#define CDUT_TYPE1_CXT_SIZE_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE >> \ + CDUT_TYPE1_CXT_SIZE_SHIFT) + +#define CDUT_TYPE1_BLOCK_WASTE_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT + +#define CDUT_TYPE1_BLOCK_WASTE_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE >> \ + CDUT_TYPE1_BLOCK_WASTE_SHIFT) + +#define CDUT_TYPE1_NCIB_SHIFT \ + CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT + +#define CDUT_TYPE1_NCIB_MASK \ + (CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK >> \ + CDUT_TYPE1_NCIB_SHIFT) + +static void ecore_cdu_init_common(struct ecore_hwfn *p_hwfn) +{ + u32 page_sz, elems_per_page, block_waste, cxt_size, cdu_params = 0; + + /* CDUC - connection configuration */ + page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; + cxt_size = CONN_CXT_SIZE(p_hwfn); + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + SET_FIELD(cdu_params, CDUC_CXT_SIZE, cxt_size); + SET_FIELD(cdu_params, CDUC_BLOCK_WASTE, block_waste); + SET_FIELD(cdu_params, CDUC_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_CID_ADDR_PARAMS_RT_OFFSET, cdu_params); + + /* CDUT - type-0 tasks configuration */ + page_sz = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT].p_size.val; + cxt_size = p_hwfn->p_cxt_mngr->task_type_size[0]; + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + /* cxt size and block-waste are multipes of 8 */ + cdu_params = 0; + SET_FIELD(cdu_params, CDUT_TYPE0_CXT_SIZE, (cxt_size >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE0_BLOCK_WASTE, (block_waste >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE0_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT0_PARAMS_RT_OFFSET, cdu_params); + + /* CDUT - type-1 tasks configuration */ + cxt_size = p_hwfn->p_cxt_mngr->task_type_size[1]; + elems_per_page = ILT_PAGE_IN_BYTES(page_sz) / cxt_size; + block_waste = ILT_PAGE_IN_BYTES(page_sz) - elems_per_page * cxt_size; + + /* cxt size and block-waste are multipes of 8 */ + cdu_params = 0; + SET_FIELD(cdu_params, CDUT_TYPE1_CXT_SIZE, (cxt_size >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE1_BLOCK_WASTE, (block_waste >> 3)); + SET_FIELD(cdu_params, CDUT_TYPE1_NCIB, elems_per_page); + STORE_RT_REG(p_hwfn, CDU_REG_SEGMENT1_PARAMS_RT_OFFSET, cdu_params); +} + +/* CDU PF */ +#define CDU_SEG_REG_TYPE_SHIFT CDU_SEG_TYPE_OFFSET_REG_TYPE_SHIFT +#define CDU_SEG_REG_TYPE_MASK 0x1 +#define CDU_SEG_REG_OFFSET_SHIFT 0 +#define CDU_SEG_REG_OFFSET_MASK CDU_SEG_TYPE_OFFSET_REG_OFFSET_MASK + +static void ecore_cdu_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ilt_client_cfg *p_cli; + struct ecore_tid_seg *p_seg; + u32 cdu_seg_params, offset; + int i; + + static const u32 rt_type_offset_arr[] = { + CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET + }; + + static const u32 rt_type_offset_fl_arr[] = { + CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET, + CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET + }; + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + + /* There are initializations only for CDUT during pf Phase */ + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + /* Segment 0 */ + p_seg = ecore_cxt_tid_seg_info(p_hwfn, i); + if (!p_seg) + continue; + + /* Note: start_line is already adjusted for the CDU + * segment register granularity, so we just need to + * divide. Adjustment is implicit as we assume ILT + * Page size is larger than 32K! + */ + offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * + (p_cli->pf_blks[CDUT_SEG_BLK(i)].start_line - + p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; + + cdu_seg_params = 0; + SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); + SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); + STORE_RT_REG(p_hwfn, rt_type_offset_arr[i], cdu_seg_params); + + offset = (ILT_PAGE_IN_BYTES(p_cli->p_size.val) * + (p_cli->pf_blks[CDUT_FL_SEG_BLK(i, PF)].start_line - + p_cli->first.val)) / CDUT_SEG_ALIGNMET_IN_BYTES; + + cdu_seg_params = 0; + SET_FIELD(cdu_seg_params, CDU_SEG_REG_TYPE, p_seg->type); + SET_FIELD(cdu_seg_params, CDU_SEG_REG_OFFSET, offset); + STORE_RT_REG(p_hwfn, rt_type_offset_fl_arr[i], cdu_seg_params); + } +} + +void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + struct ecore_qm_iids iids; + + OSAL_MEM_ZERO(&iids, sizeof(iids)); + ecore_cxt_qm_iids(p_hwfn, &iids); + + ecore_qm_pf_rt_init(p_hwfn, p_hwfn->p_main_ptt, p_hwfn->port_id, + p_hwfn->rel_pf_id, qm_info->max_phys_tcs_per_port, + p_hwfn->first_on_engine, + iids.cids, iids.vf_cids, iids.tids, + qm_info->start_pq, + qm_info->num_pqs - qm_info->num_vf_pqs, + qm_info->num_vf_pqs, + qm_info->start_vport, + qm_info->num_vports, qm_info->pf_wfq, + qm_info->pf_rl, p_hwfn->qm_info.qm_pq_params, + p_hwfn->qm_info.qm_vport_params); +} + +/* CM PF */ +static enum _ecore_status_t ecore_cm_init_pf(struct ecore_hwfn *p_hwfn) +{ + union ecore_qm_pq_params pq_params; + u16 pq; + + /* XCM pure-LB queue */ + OSAL_MEMSET(&pq_params, 0, sizeof(pq_params)); + pq_params.core.tc = LB_TC; + pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); + STORE_RT_REG(p_hwfn, XCM_REG_CON_PHY_Q3_RT_OFFSET, pq); + + return ECORE_SUCCESS; +} + +/* DQ PF */ +static void ecore_dq_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 dq_pf_max_cid = 0, dq_vf_max_cid = 0; + + dq_pf_max_cid += (p_mngr->conn_cfg[0].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_0_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[0].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_0_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[1].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_1_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[1].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_1_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[2].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_2_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[2].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_2_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[3].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_3_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[3].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_3_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[4].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_4_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[4].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_4_RT_OFFSET, dq_vf_max_cid); + + dq_pf_max_cid += (p_mngr->conn_cfg[5].cid_count >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_5_RT_OFFSET, dq_pf_max_cid); + + dq_vf_max_cid += (p_mngr->conn_cfg[5].cids_per_vf >> DQ_RANGE_SHIFT); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_5_RT_OFFSET, dq_vf_max_cid); + + /* Connection types 6 & 7 are not in use, yet they must be configured + * as the highest possible connection. Not configuring them means the + * defaults will be used, and with a large number of cids a bug may + * occur, if the defaults will be smaller than dq_pf_max_cid / + * dq_vf_max_cid. + */ + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_6_RT_OFFSET, dq_pf_max_cid); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_6_RT_OFFSET, dq_vf_max_cid); + + STORE_RT_REG(p_hwfn, DORQ_REG_PF_MAX_ICID_7_RT_OFFSET, dq_pf_max_cid); + STORE_RT_REG(p_hwfn, DORQ_REG_VF_MAX_ICID_7_RT_OFFSET, dq_vf_max_cid); +} + +static void ecore_ilt_bounds_init(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ilt_client_cfg *ilt_clients; + int i; + + ilt_clients = p_hwfn->p_cxt_mngr->clients; + for (i = 0; i < ILT_CLI_MAX; i++) + if (!ilt_clients[i].active) { + continue; + } else { + STORE_RT_REG(p_hwfn, + ilt_clients[i].first.reg, + ilt_clients[i].first.val); + STORE_RT_REG(p_hwfn, + ilt_clients[i].last.reg, ilt_clients[i].last.val); + STORE_RT_REG(p_hwfn, + ilt_clients[i].p_size.reg, + ilt_clients[i].p_size.val); + } +} + +static void ecore_ilt_vf_bounds_init(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ilt_client_cfg *p_cli; + u32 blk_factor; + + /* For simplicty we set the 'block' to be an ILT page */ + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_VF_BASE_RT_OFFSET, + p_hwfn->hw_info.first_vf_in_pf); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET, + p_hwfn->hw_info.first_vf_in_pf + + p_hwfn->p_dev->sriov_info.total_vfs); + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET, + blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUT]; + blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET, + blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } + + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_TM]; + blk_factor = OSAL_LOG2(ILT_PAGE_IN_BYTES(p_cli->p_size.val) >> 10); + if (p_cli->active) { + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET, blk_factor); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET, + p_cli->pf_total_lines); + STORE_RT_REG(p_hwfn, + PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET, + p_cli->vf_total_lines); + } +} + +/* ILT (PSWRQ2) PF */ +static void ecore_ilt_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ilt_client_cfg *clients; + struct ecore_cxt_mngr *p_mngr; + struct ecore_dma_mem *p_shdw; + u32 line, rt_offst, i; + + ecore_ilt_bounds_init(p_hwfn); + ecore_ilt_vf_bounds_init(p_hwfn); + + p_mngr = p_hwfn->p_cxt_mngr; + p_shdw = p_mngr->ilt_shadow; + clients = p_hwfn->p_cxt_mngr->clients; + + for (i = 0; i < ILT_CLI_MAX; i++) + if (!clients[i].active) { + continue; + } else { + /* Client's 1st val and RT array are absolute, ILT shadows' + * lines are relative. + */ + line = clients[i].first.val - p_mngr->pf_start_line; + rt_offst = PSWRQ2_REG_ILT_MEMORY_RT_OFFSET + + clients[i].first.val * ILT_ENTRY_IN_REGS; + + for (; line <= clients[i].last.val - p_mngr->pf_start_line; + line++, rt_offst += ILT_ENTRY_IN_REGS) { + u64 ilt_hw_entry = 0; + + /** p_virt could be OSAL_NULL incase of dynamic + * allocation + */ + if (p_shdw[line].p_virt != OSAL_NULL) { + SET_FIELD(ilt_hw_entry, ILT_ENTRY_VALID, 1ULL); + SET_FIELD(ilt_hw_entry, ILT_ENTRY_PHY_ADDR, + (p_shdw[line].p_phys >> 12)); + + DP_VERBOSE(p_hwfn, ECORE_MSG_ILT, + "Setting RT[0x%08x] from" + " ILT[0x%08x] [Client is %d] to" + " Physical addr: 0x%" PRIx64 "\n", + rt_offst, line, i, + (u64)(p_shdw[line].p_phys >> 12)); + } + + STORE_RT_REG_AGG(p_hwfn, rt_offst, ilt_hw_entry); + } + } +} + +/* SRC (Searcher) PF */ +static void ecore_src_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 rounded_conn_num, conn_num, conn_max; + struct ecore_src_iids src_iids; + + OSAL_MEM_ZERO(&src_iids, sizeof(src_iids)); + ecore_cxt_src_iids(p_mngr, &src_iids); + conn_num = src_iids.pf_cids + src_iids.per_vf_cids * p_mngr->vf_count; + if (!conn_num) + return; + + conn_max = OSAL_MAX_T(u32, conn_num, SRC_MIN_NUM_ELEMS); + rounded_conn_num = OSAL_ROUNDUP_POW_OF_TWO(conn_max); + + STORE_RT_REG(p_hwfn, SRC_REG_COUNTFREE_RT_OFFSET, conn_num); + STORE_RT_REG(p_hwfn, SRC_REG_NUMBER_HASH_BITS_RT_OFFSET, + OSAL_LOG2(rounded_conn_num)); + + STORE_RT_REG_AGG(p_hwfn, SRC_REG_FIRSTFREE_RT_OFFSET, + p_hwfn->p_cxt_mngr->first_free); + STORE_RT_REG_AGG(p_hwfn, SRC_REG_LASTFREE_RT_OFFSET, + p_hwfn->p_cxt_mngr->last_free); +} + +/* Timers PF */ +#define TM_CFG_NUM_IDS_SHIFT 0 +#define TM_CFG_NUM_IDS_MASK 0xFFFFULL +#define TM_CFG_PRE_SCAN_OFFSET_SHIFT 16 +#define TM_CFG_PRE_SCAN_OFFSET_MASK 0x1FFULL +#define TM_CFG_PARENT_PF_SHIFT 25 +#define TM_CFG_PARENT_PF_MASK 0x7ULL + +#define TM_CFG_CID_PRE_SCAN_ROWS_SHIFT 30 +#define TM_CFG_CID_PRE_SCAN_ROWS_MASK 0x1FFULL + +#define TM_CFG_TID_OFFSET_SHIFT 30 +#define TM_CFG_TID_OFFSET_MASK 0x7FFFFULL +#define TM_CFG_TID_PRE_SCAN_ROWS_SHIFT 49 +#define TM_CFG_TID_PRE_SCAN_ROWS_MASK 0x1FFULL + +static void ecore_tm_init_pf(struct ecore_hwfn *p_hwfn) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 active_seg_mask = 0, tm_offset, rt_reg; + struct ecore_tm_iids tm_iids; + u64 cfg_word; + u8 i; + + OSAL_MEM_ZERO(&tm_iids, sizeof(tm_iids)); + ecore_cxt_tm_iids(p_mngr, &tm_iids); + + /* @@@TBD No pre-scan for now */ + + /* Note: We assume consecutive VFs for a PF */ + for (i = 0; i < p_mngr->vf_count; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_cids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); + SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); + + rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (p_hwfn->hw_info.first_vf_in_pf + i); + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + } + + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_cids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); /* n/a for PF */ + SET_FIELD(cfg_word, TM_CFG_CID_PRE_SCAN_ROWS, 0); + + rt_reg = TM_REG_CONFIG_CONN_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (NUM_OF_VFS(p_hwfn->p_dev) + p_hwfn->rel_pf_id); + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + + /* enale scan */ + STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_CONN_RT_OFFSET, + tm_iids.pf_cids ? 0x1 : 0x0); + + /* @@@TBD how to enable the scan for the VFs */ + + tm_offset = tm_iids.per_vf_cids; + + /* Note: We assume consecutive VFs for a PF */ + for (i = 0; i < p_mngr->vf_count; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.per_vf_tids); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, p_hwfn->rel_pf_id); + SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); + SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0); + + rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (p_hwfn->hw_info.first_vf_in_pf + i); + + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + } + + tm_offset = tm_iids.pf_cids; + for (i = 0; i < NUM_TASK_PF_SEGMENTS; i++) { + cfg_word = 0; + SET_FIELD(cfg_word, TM_CFG_NUM_IDS, tm_iids.pf_tids[i]); + SET_FIELD(cfg_word, TM_CFG_PRE_SCAN_OFFSET, 0); + SET_FIELD(cfg_word, TM_CFG_PARENT_PF, 0); + SET_FIELD(cfg_word, TM_CFG_TID_OFFSET, tm_offset); + SET_FIELD(cfg_word, TM_CFG_TID_PRE_SCAN_ROWS, (u64)0); + + rt_reg = TM_REG_CONFIG_TASK_MEM_RT_OFFSET + + (sizeof(cfg_word) / sizeof(u32)) * + (NUM_OF_VFS(p_hwfn->p_dev) + + p_hwfn->rel_pf_id * NUM_TASK_PF_SEGMENTS + i); + + STORE_RT_REG_AGG(p_hwfn, rt_reg, cfg_word); + active_seg_mask |= (tm_iids.pf_tids[i] ? (1 << i) : 0); + + tm_offset += tm_iids.pf_tids[i]; + } + + STORE_RT_REG(p_hwfn, TM_REG_PF_ENABLE_TASK_RT_OFFSET, active_seg_mask); + + /* @@@TBD how to enable the scan for the VFs */ +} + +static void ecore_prs_init_common(struct ecore_hwfn *p_hwfn) +{ +} + +void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn) +{ + /* CDU configuration */ + ecore_cdu_init_common(p_hwfn); + ecore_prs_init_common(p_hwfn); +} + +void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn) +{ + ecore_qm_init_pf(p_hwfn); + ecore_cm_init_pf(p_hwfn); + ecore_dq_init_pf(p_hwfn); + ecore_cdu_init_pf(p_hwfn); + ecore_ilt_init_pf(p_hwfn); + ecore_src_init_pf(p_hwfn); + ecore_tm_init_pf(p_hwfn); +} + +enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn, + enum protocol_type type, u32 *p_cid) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 rel_cid; + + if (type >= MAX_CONN_TYPES || !p_mngr->acquired[type].cid_map) { + DP_NOTICE(p_hwfn, true, "Invalid protocol type %d", type); + return ECORE_INVAL; + } + + rel_cid = OSAL_FIND_FIRST_ZERO_BIT(p_mngr->acquired[type].cid_map, + p_mngr->acquired[type].max_count); + + if (rel_cid >= p_mngr->acquired[type].max_count) { + DP_NOTICE(p_hwfn, false, "no CID available for protocol %d", + type); + return ECORE_NORESOURCES; + } + + OSAL_SET_BIT(rel_cid, p_mngr->acquired[type].cid_map); + + *p_cid = rel_cid + p_mngr->acquired[type].start_cid; + + return ECORE_SUCCESS; +} + +static bool ecore_cxt_test_cid_acquired(struct ecore_hwfn *p_hwfn, + u32 cid, enum protocol_type *p_type) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_cid_acquired_map *p_map; + enum protocol_type p; + u32 rel_cid; + + /* Iterate over protocols and find matching cid range */ + for (p = 0; p < MAX_CONN_TYPES; p++) { + p_map = &p_mngr->acquired[p]; + + if (!p_map->cid_map) + continue; + if (cid >= p_map->start_cid && + cid < p_map->start_cid + p_map->max_count) { + break; + } + } + *p_type = p; + + if (p == MAX_CONN_TYPES) { + DP_NOTICE(p_hwfn, true, "Invalid CID %d", cid); + return false; + } + rel_cid = cid - p_map->start_cid; + if (!OSAL_TEST_BIT(rel_cid, p_map->cid_map)) { + DP_NOTICE(p_hwfn, true, "CID %d not acquired", cid); + return false; + } + return true; +} + +void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + enum protocol_type type; + bool b_acquired; + u32 rel_cid; + + /* Test acquired and find matching per-protocol map */ + b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, cid, &type); + + if (!b_acquired) + return; + + rel_cid = cid - p_mngr->acquired[type].start_cid; + OSAL_CLEAR_BIT(rel_cid, p_mngr->acquired[type].cid_map); +} + +enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn, + struct ecore_cxt_info *p_info) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 conn_cxt_size, hw_p_size, cxts_per_p, line; + enum protocol_type type; + bool b_acquired; + + /* Test acquired and find matching per-protocol map */ + b_acquired = ecore_cxt_test_cid_acquired(p_hwfn, p_info->iid, &type); + + if (!b_acquired) + return ECORE_INVAL; + + /* set the protocl type */ + p_info->type = type; + + /* compute context virtual pointer */ + hw_p_size = p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC].p_size.val; + + conn_cxt_size = CONN_CXT_SIZE(p_hwfn); + cxts_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / conn_cxt_size; + line = p_info->iid / cxts_per_p; + + /* Make sure context is allocated (dynamic allocation) */ + if (!p_mngr->ilt_shadow[line].p_virt) + return ECORE_INVAL; + + p_info->p_cxt = (u8 *)p_mngr->ilt_shadow[line].p_virt + + p_info->iid % cxts_per_p * conn_cxt_size; + + DP_VERBOSE(p_hwfn, (ECORE_MSG_ILT | ECORE_MSG_CXT), + "Accessing ILT shadow[%d]: CXT pointer is at %p (for iid %d)\n", + (p_info->iid / cxts_per_p), p_info->p_cxt, p_info->iid); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn) +{ + /* Set the number of required CORE connections */ + u32 core_cids = 1; /* SPQ */ + + ecore_cxt_set_proto_cid_count(p_hwfn, PROTOCOLID_CORE, core_cids, 0); + + switch (p_hwfn->hw_info.personality) { + case ECORE_PCI_ETH: + { + struct ecore_eth_pf_params *p_params = + &p_hwfn->pf_params.eth_pf_params; + + ecore_cxt_set_proto_cid_count(p_hwfn, + PROTOCOLID_ETH, + p_params->num_cons, 1); /* FIXME VF count... */ + + break; + } + default: + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn, + struct ecore_tid_mem *p_info) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + u32 proto, seg, total_lines, i, shadow_line; + struct ecore_ilt_client_cfg *p_cli; + struct ecore_ilt_cli_blk *p_fl_seg; + struct ecore_tid_seg *p_seg_info; + + /* Verify the personality */ + switch (p_hwfn->hw_info.personality) { + default: + return ECORE_INVAL; + } + + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + if (!p_cli->active) + return ECORE_INVAL; + + p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; + if (!p_seg_info->has_fl_mem) + return ECORE_INVAL; + + p_fl_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; + total_lines = DIV_ROUND_UP(p_fl_seg->total_size, + p_fl_seg->real_size_in_page); + + for (i = 0; i < total_lines; i++) { + shadow_line = i + p_fl_seg->start_line - + p_hwfn->p_cxt_mngr->pf_start_line; + p_info->blocks[i] = p_mngr->ilt_shadow[shadow_line].p_virt; + } + p_info->waste = ILT_PAGE_IN_BYTES(p_cli->p_size.val) - + p_fl_seg->real_size_in_page; + p_info->tid_size = p_mngr->task_type_size[p_seg_info->type]; + p_info->num_tids_per_block = p_fl_seg->real_size_in_page / + p_info->tid_size; + + return ECORE_SUCCESS; +} + +/* This function is very RoCE oriented, if another protocol in the future + * will want this feature we'll need to modify the function to be more generic + */ +static enum _ecore_status_t +ecore_cxt_free_ilt_range(struct ecore_hwfn *p_hwfn, + enum ecore_cxt_elem_type elem_type, + u32 start_iid, u32 count) +{ + u32 reg_offset, elem_size, hw_p_size, elems_per_p; + u32 start_line, end_line, shadow_start_line, shadow_end_line; + struct ecore_ilt_client_cfg *p_cli; + struct ecore_ilt_cli_blk *p_blk; + u32 end_iid = start_iid + count; + struct ecore_ptt *p_ptt; + u64 ilt_hw_entry = 0; + u32 i; + + if (elem_type == ECORE_ELEM_CXT) { + p_cli = &p_hwfn->p_cxt_mngr->clients[ILT_CLI_CDUC]; + elem_size = CONN_CXT_SIZE(p_hwfn); + p_blk = &p_cli->pf_blks[CDUC_BLK]; + } + + /* Calculate line in ilt */ + hw_p_size = p_cli->p_size.val; + elems_per_p = ILT_PAGE_IN_BYTES(hw_p_size) / elem_size; + start_line = p_blk->start_line + (start_iid / elems_per_p); + end_line = p_blk->start_line + (end_iid / elems_per_p); + if (((end_iid + 1) / elems_per_p) != (end_iid / elems_per_p)) + end_line--; + + shadow_start_line = start_line - p_hwfn->p_cxt_mngr->pf_start_line; + shadow_end_line = end_line - p_hwfn->p_cxt_mngr->pf_start_line; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) { + DP_NOTICE(p_hwfn, false, + "ECORE_TIME_OUT on ptt acquire - dynamic allocation"); + return ECORE_TIMEOUT; + } + + for (i = shadow_start_line; i < shadow_end_line; i++) { + if (!p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt) + continue; + + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt, + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys, + p_hwfn->p_cxt_mngr->ilt_shadow[i].size); + + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_virt = OSAL_NULL; + p_hwfn->p_cxt_mngr->ilt_shadow[i].p_phys = 0; + p_hwfn->p_cxt_mngr->ilt_shadow[i].size = 0; + + /* compute absolute offset */ + reg_offset = PSWRQ2_REG_ILT_MEMORY + + ((start_line++) * ILT_REG_SIZE_IN_BYTES * + ILT_ENTRY_IN_REGS); + + ecore_wr(p_hwfn, p_ptt, reg_offset, U64_LO(ilt_hw_entry)); + ecore_wr(p_hwfn, p_ptt, reg_offset + ILT_REG_SIZE_IN_BYTES, + U64_HI(ilt_hw_entry)); + } + + ecore_ptt_release(p_hwfn, p_ptt); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn, + enum protocol_type proto) +{ + enum _ecore_status_t rc; + u32 cid; + + /* Free Connection CXT */ + rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_CXT, + ecore_cxt_get_proto_cid_start(p_hwfn, + proto), + ecore_cxt_get_proto_cid_count(p_hwfn, + proto, + &cid)); + + if (rc) + return rc; + + /* Free Task CXT */ + rc = ecore_cxt_free_ilt_range(p_hwfn, ECORE_ELEM_TASK, 0, + ecore_cxt_get_proto_tid_count(p_hwfn, + proto)); + + return rc; +} + +enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn, + u32 tid, + u8 ctx_type, void **pp_task_ctx) +{ + struct ecore_cxt_mngr *p_mngr = p_hwfn->p_cxt_mngr; + struct ecore_ilt_client_cfg *p_cli; + struct ecore_ilt_cli_blk *p_seg; + struct ecore_tid_seg *p_seg_info; + u32 proto, seg; + u32 total_lines; + u32 tid_size, ilt_idx; + u32 num_tids_per_block; + + /* Verify the personality */ + switch (p_hwfn->hw_info.personality) { + default: + return ECORE_INVAL; + } + + p_cli = &p_mngr->clients[ILT_CLI_CDUT]; + if (!p_cli->active) + return ECORE_INVAL; + + p_seg_info = &p_mngr->conn_cfg[proto].tid_seg[seg]; + + if (ctx_type == ECORE_CTX_WORKING_MEM) { + p_seg = &p_cli->pf_blks[CDUT_SEG_BLK(seg)]; + } else if (ctx_type == ECORE_CTX_FL_MEM) { + if (!p_seg_info->has_fl_mem) + return ECORE_INVAL; + p_seg = &p_cli->pf_blks[CDUT_FL_SEG_BLK(seg, PF)]; + } else { + return ECORE_INVAL; + } + total_lines = DIV_ROUND_UP(p_seg->total_size, p_seg->real_size_in_page); + tid_size = p_mngr->task_type_size[p_seg_info->type]; + num_tids_per_block = p_seg->real_size_in_page / tid_size; + + if (total_lines < tid / num_tids_per_block) + return ECORE_INVAL; + + ilt_idx = tid / num_tids_per_block + p_seg->start_line - + p_mngr->pf_start_line; + *pp_task_ctx = (u8 *)p_mngr->ilt_shadow[ilt_idx].p_virt + + (tid % num_tids_per_block) * tid_size; + + return ECORE_SUCCESS; +} diff --git a/drivers/net/qede/base/ecore_cxt.h b/drivers/net/qede/base/ecore_cxt.h new file mode 100644 index 00000000..1ac95f98 --- /dev/null +++ b/drivers/net/qede/base/ecore_cxt.h @@ -0,0 +1,157 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef _ECORE_CID_ +#define _ECORE_CID_ + +#include "ecore_hsi_common.h" +#include "ecore_proto_if.h" +#include "ecore_cxt_api.h" + +enum ecore_cxt_elem_type { + ECORE_ELEM_CXT, + ECORE_ELEM_TASK +}; + +u32 ecore_cxt_get_proto_cid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type type, u32 *vf_cid); + +u32 ecore_cxt_get_proto_cid_start(struct ecore_hwfn *p_hwfn, + enum protocol_type type); + +/** + * @brief ecore_cxt_qm_iids - fills the cid/tid counts for the QM configuration + * + * @param p_hwfn + * @param iids [out], a structure holding all the counters + */ +void ecore_cxt_qm_iids(struct ecore_hwfn *p_hwfn, struct ecore_qm_iids *iids); + +/** + * @brief ecore_cxt_set_pf_params - Set the PF params for cxt init + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_set_pf_params(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_set_proto_cid_count - Set the max cids per protocol for cxt + * init + * + * @param p_hwfn + * @param type + * @param cid_cnt - number of pf cids + * @param vf_cid_cnt - number of vf cids + */ +void ecore_cxt_set_proto_cid_count(struct ecore_hwfn *p_hwfn, + enum protocol_type type, + u32 cid_cnt, u32 vf_cid_cnt); +/** + * @brief ecore_cxt_cfg_ilt_compute - compute ILT init parameters + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_cfg_ilt_compute(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_mngr_alloc - Allocate and init the context manager struct + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_mngr_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_mngr_free + * + * @param p_hwfn + */ +void ecore_cxt_mngr_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_tables_alloc - Allocate ILT shadow, Searcher T2, acquired + * map + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_tables_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_mngr_setup - Reset the acquired CIDs + * + * @param p_hwfn + */ +void ecore_cxt_mngr_setup(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_hw_init_common - Initailze ILT and DQ, common phase, per + * path. + * + * @param p_hwfn + */ +void ecore_cxt_hw_init_common(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_cxt_hw_init_pf - Initailze ILT and DQ, PF phase, per path. + * + * @param p_hwfn + */ +void ecore_cxt_hw_init_pf(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_qm_init_pf - Initailze the QM PF phase, per path + * + * @param p_hwfn + */ +void ecore_qm_init_pf(struct ecore_hwfn *p_hwfn); + + /** + * @brief Reconfigures QM pf on the fly + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** +* @brief ecore_cxt_release - Release a cid +* +* @param p_hwfn +* @param cid +*/ +void ecore_cxt_release_cid(struct ecore_hwfn *p_hwfn, u32 cid); + +/** + * @brief ecore_cxt_free_proto_ilt - function frees ilt pages + * associated with the protocol passed. + * + * @param p_hwfn + * @param proto + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_cxt_free_proto_ilt(struct ecore_hwfn *p_hwfn, + enum protocol_type proto); + +#define ECORE_CTX_WORKING_MEM 0 +#define ECORE_CTX_FL_MEM 1 +enum _ecore_status_t ecore_cxt_get_task_ctx(struct ecore_hwfn *p_hwfn, + u32 tid, + u8 ctx_type, void **task_ctx); + +#endif /* _ECORE_CID_ */ diff --git a/drivers/net/qede/base/ecore_cxt_api.h b/drivers/net/qede/base/ecore_cxt_api.h new file mode 100644 index 00000000..d98dddb2 --- /dev/null +++ b/drivers/net/qede/base/ecore_cxt_api.h @@ -0,0 +1,79 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_CXT_API_H__ +#define __ECORE_CXT_API_H__ + +struct ecore_hwfn; + +struct ecore_cxt_info { + void *p_cxt; + u32 iid; + enum protocol_type type; +}; + +#define MAX_TID_BLOCKS 512 +struct ecore_tid_mem { + u32 tid_size; + u32 num_tids_per_block; + u32 waste; + u8 *blocks[MAX_TID_BLOCKS]; /* 4K */ +}; + +static OSAL_INLINE void *get_task_mem(struct ecore_tid_mem *info, u32 tid) +{ + /* note: waste is superfluous */ + return (void *)(info->blocks[tid / info->num_tids_per_block] + + (tid % info->num_tids_per_block) * info->tid_size); + + /* more elaborate alternative with no modulo + * u32 mask = info->tid_size * info->num_tids_per_block + + * info->waste - 1; + * u32 index = tid / info->num_tids_per_block; + * u32 offset = tid * info->tid_size + index * info->waste; + * return (void *)(blocks[index] + (offset & mask)); + */ +} + +/** +* @brief ecore_cxt_acquire - Acquire a new cid of a specific protocol type +* +* @param p_hwfn +* @param type +* @param p_cid +* +* @return enum _ecore_status_t +*/ +enum _ecore_status_t ecore_cxt_acquire_cid(struct ecore_hwfn *p_hwfn, + enum protocol_type type, + u32 *p_cid); + +/** +* @brief ecoreo_cid_get_cxt_info - Returns the context info for a specific cid +* +* +* @param p_hwfn +* @param p_info in/out +* +* @return enum _ecore_status_t +*/ +enum _ecore_status_t ecore_cxt_get_cid_info(struct ecore_hwfn *p_hwfn, + struct ecore_cxt_info *p_info); + +/** +* @brief ecore_cxt_get_tid_mem_info +* +* @param p_hwfn +* @param p_info +* +* @return enum _ecore_status_t +*/ +enum _ecore_status_t ecore_cxt_get_tid_mem_info(struct ecore_hwfn *p_hwfn, + struct ecore_tid_mem *p_info); + +#endif diff --git a/drivers/net/qede/base/ecore_dcbx.c b/drivers/net/qede/base/ecore_dcbx.c new file mode 100644 index 00000000..6a966cb9 --- /dev/null +++ b/drivers/net/qede/base/ecore_dcbx.c @@ -0,0 +1,887 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_sp_commands.h" +#include "ecore_dcbx.h" +#include "ecore_cxt.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_iro.h" + +#define ECORE_DCBX_MAX_MIB_READ_TRY (100) +#define ECORE_MAX_PFC_PRIORITIES 8 +#define ECORE_ETH_TYPE_DEFAULT (0) + +#define ECORE_DCBX_INVALID_PRIORITY 0xFF + +/* Get Traffic Class from priority traffic class table, 4 bits represent + * the traffic class corresponding to the priority. + */ +#define ECORE_DCBX_PRIO2TC(prio_tc_tbl, prio) \ + ((u32)(pri_tc_tbl >> ((7 - prio) * 4)) & 0x7) + +static bool ecore_dcbx_app_ethtype(u32 app_info_bitmap) +{ + return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == + DCBX_APP_SF_ETHTYPE) ? true : false; +} + +static bool ecore_dcbx_app_port(u32 app_info_bitmap) +{ + return (ECORE_MFW_GET_FIELD(app_info_bitmap, DCBX_APP_SF) == + DCBX_APP_SF_PORT) ? true : false; +} + +static bool ecore_dcbx_default_tlv(u32 app_info_bitmap, u16 proto_id) +{ + return (ecore_dcbx_app_ethtype(app_info_bitmap) && + proto_id == ECORE_ETH_TYPE_DEFAULT) ? true : false; +} + +static bool ecore_dcbx_enabled(u32 dcbx_cfg_bitmap) +{ + return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_DISABLED) ? false : true; +} + +static bool ecore_dcbx_cee(u32 dcbx_cfg_bitmap) +{ + return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_CEE) ? true : false; +} + +static bool ecore_dcbx_ieee(u32 dcbx_cfg_bitmap) +{ + return (ECORE_MFW_GET_FIELD(dcbx_cfg_bitmap, DCBX_CONFIG_VERSION) == + DCBX_CONFIG_VERSION_IEEE) ? true : false; +} + +/* @@@TBD A0 Eagle workaround */ +void ecore_dcbx_eagle_workaround(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, bool set_to_pfc) +{ + if (!ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) + return; + + ecore_wr(p_hwfn, p_ptt, + YSEM_REG_FAST_MEMORY + 0x20000 /* RAM in FASTMEM */ + + YSTORM_FLOW_CONTROL_MODE_OFFSET, + set_to_pfc ? flow_ctrl_pfc : flow_ctrl_pause); + ecore_wr(p_hwfn, p_ptt, NIG_REG_FLOWCTRL_MODE, + EAGLE_ENG1_WORKAROUND_NIG_FLOWCTRL_MODE); +} + +static void +ecore_dcbx_dp_protocol(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_results *p_data) +{ + struct ecore_hw_info *p_info = &p_hwfn->hw_info; + enum dcbx_protocol_type id; + bool enable, update; + u8 prio, tc, size; + const char *name; /* @DPDK */ + int i; + + size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update); + + DP_INFO(p_hwfn, "DCBX negotiated: %d\n", p_data->dcbx_enabled); + + for (i = 0; i < size; i++) { + id = ecore_dcbx_app_update[i].id; + name = ecore_dcbx_app_update[i].name; + + enable = p_data->arr[id].enable; + update = p_data->arr[id].update; + tc = p_data->arr[id].tc; + prio = p_data->arr[id].priority; + + DP_INFO(p_hwfn, + "%s info: update %d, enable %d, prio %d, tc %d, num_tc %d\n", + name, update, enable, prio, tc, p_info->num_tc); + } +} + +static void +ecore_dcbx_set_pf_tcs(struct ecore_hw_info *p_info, + u8 tc, enum ecore_pci_personality personality) +{ + /* QM reconf data */ + if (p_info->personality == personality) { + if (personality == ECORE_PCI_ETH) + p_info->non_offload_tc = tc; + else + p_info->offload_tc = tc; + } +} + +void +ecore_dcbx_set_params(struct ecore_dcbx_results *p_data, + struct ecore_hw_info *p_info, + bool enable, bool update, u8 prio, u8 tc, + enum dcbx_protocol_type type, + enum ecore_pci_personality personality) +{ + /* PF update ramrod data */ + p_data->arr[type].update = update; + p_data->arr[type].enable = enable; + p_data->arr[type].priority = prio; + p_data->arr[type].tc = tc; + + ecore_dcbx_set_pf_tcs(p_info, tc, personality); +} + +/* Update app protocol data and hw_info fields with the TLV info */ +static void +ecore_dcbx_update_app_info(struct ecore_dcbx_results *p_data, + struct ecore_hwfn *p_hwfn, + bool enable, bool update, u8 prio, u8 tc, + enum dcbx_protocol_type type) +{ + struct ecore_hw_info *p_info = &p_hwfn->hw_info; + enum ecore_pci_personality personality; + enum dcbx_protocol_type id; + const char *name; /* @DPDK */ + u8 size; + int i; + + size = OSAL_ARRAY_SIZE(ecore_dcbx_app_update); + + for (i = 0; i < size; i++) { + id = ecore_dcbx_app_update[i].id; + + if (type != id) + continue; + + personality = ecore_dcbx_app_update[i].personality; + name = ecore_dcbx_app_update[i].name; + + ecore_dcbx_set_params(p_data, p_info, enable, update, + prio, tc, type, personality); + } +} + +static enum _ecore_status_t +ecore_dcbx_get_app_priority(u8 pri_bitmap, u8 *priority) +{ + u32 pri_mask, pri = ECORE_MAX_PFC_PRIORITIES; + u32 index = ECORE_MAX_PFC_PRIORITIES - 1; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Bitmap 1 corresponds to priority 0, return priority 0 */ + if (pri_bitmap == 1) { + *priority = 0; + return rc; + } + + /* Choose the highest priority */ + while ((pri == ECORE_MAX_PFC_PRIORITIES) && index) { + pri_mask = 1 << index; + if (pri_bitmap & pri_mask) + pri = index; + index--; + } + + if (pri < ECORE_MAX_PFC_PRIORITIES) + *priority = (u8)pri; + else + rc = ECORE_INVAL; + + return rc; +} + +static bool +ecore_dcbx_get_app_protocol_type(struct ecore_hwfn *p_hwfn, + u32 app_prio_bitmap, u16 id, int *type) +{ + bool status = false; + + if (ecore_dcbx_default_tlv(app_prio_bitmap, id)) { + *type = DCBX_PROTOCOL_ETH; + status = true; + } else { + DP_ERR(p_hwfn, "Unsupported protocol %d\n", id); + } + + return status; +} + +/* Parse app TLV's to update TC information in hw_info structure for + * reconfiguring QM. Get protocol specific data for PF update ramrod command. + */ +static enum _ecore_status_t +ecore_dcbx_process_tlv(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_results *p_data, + struct dcbx_app_priority_entry *p_tbl, u32 pri_tc_tbl, + int count, bool dcbx_enabled) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u8 tc, priority, priority_map; + int i, type = -1; + u16 protocol_id; + bool enable; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, "Num APP entries = %d\n", count); + + /* Parse APP TLV */ + for (i = 0; i < count; i++) { + protocol_id = ECORE_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PROTOCOL_ID); + priority_map = ECORE_MFW_GET_FIELD(p_tbl[i].entry, + DCBX_APP_PRI_MAP); + rc = ecore_dcbx_get_app_priority(priority_map, &priority); + if (rc == ECORE_INVAL) { + DP_ERR(p_hwfn, "Invalid priority\n"); + return rc; + } + + tc = ECORE_DCBX_PRIO2TC(pri_tc_tbl, priority); + if (ecore_dcbx_get_app_protocol_type(p_hwfn, p_tbl[i].entry, + protocol_id, &type)) { + /* ETH always have the enable bit reset, as it gets + * vlan information per packet. For other protocols, + * should be set according to the dcbx_enabled + * indication, but we only got here if there was an + * app tlv for the protocol, so dcbx must be enabled. + */ + enable = (type == DCBX_PROTOCOL_ETH ? false : true); + + ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true, + priority, tc, type); + } + } + /* Update ramrod protocol data and hw_info fields + * with default info when corresponding APP TLV's are not detected. + * The enabled field has a different logic for ethernet as only for + * ethernet dcb should disabled by default, as the information arrives + * from the OS (unless an explicit app tlv was present). + */ + tc = p_data->arr[DCBX_PROTOCOL_ETH].tc; + priority = p_data->arr[DCBX_PROTOCOL_ETH].priority; + for (type = 0; type < DCBX_MAX_PROTOCOL_TYPE; type++) { + if (p_data->arr[type].update) + continue; + + enable = (type == DCBX_PROTOCOL_ETH) ? false : dcbx_enabled; + ecore_dcbx_update_app_info(p_data, p_hwfn, enable, true, + priority, tc, type); + } + + return ECORE_SUCCESS; +} + +/* Parse app TLV's to update TC information in hw_info structure for + * reconfiguring QM. Get protocol specific data for PF update ramrod command. + */ +static enum _ecore_status_t +ecore_dcbx_process_mib_info(struct ecore_hwfn *p_hwfn) +{ + struct dcbx_app_priority_feature *p_app; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_dcbx_results data = { 0 }; + struct dcbx_app_priority_entry *p_tbl; + struct dcbx_ets_feature *p_ets; + struct ecore_hw_info *p_info; + u32 pri_tc_tbl, flags; + bool dcbx_enabled; + int num_entries; + + /* If DCBx version is non zero, then negotiation was + * successfuly performed + */ + flags = p_hwfn->p_dcbx_info->operational.flags; + dcbx_enabled = ECORE_MFW_GET_FIELD(flags, DCBX_CONFIG_VERSION) != 0; + + p_app = &p_hwfn->p_dcbx_info->operational.features.app; + p_tbl = p_app->app_pri_tbl; + + p_ets = &p_hwfn->p_dcbx_info->operational.features.ets; + pri_tc_tbl = p_ets->pri_tc_tbl[0]; + + p_info = &p_hwfn->hw_info; + num_entries = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_NUM_ENTRIES); + + rc = ecore_dcbx_process_tlv(p_hwfn, &data, p_tbl, pri_tc_tbl, + num_entries, dcbx_enabled); + if (rc != ECORE_SUCCESS) + return rc; + + p_info->num_tc = ECORE_MFW_GET_FIELD(p_ets->flags, DCBX_ETS_MAX_TCS); + data.pf_id = p_hwfn->rel_pf_id; + data.dcbx_enabled = dcbx_enabled; + + ecore_dcbx_dp_protocol(p_hwfn, &data); + + OSAL_MEMCPY(&p_hwfn->p_dcbx_info->results, &data, + sizeof(struct ecore_dcbx_results)); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_dcbx_copy_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_dcbx_mib_meta_data *p_data, + enum ecore_mib_read_type type) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 prefix_seq_num, suffix_seq_num; + int read_count = 0; + + do { + if (type == ECORE_DCBX_REMOTE_LLDP_MIB) { + ecore_memcpy_from(p_hwfn, p_ptt, p_data->lldp_remote, + p_data->addr, p_data->size); + prefix_seq_num = p_data->lldp_remote->prefix_seq_num; + suffix_seq_num = p_data->lldp_remote->suffix_seq_num; + } else { + ecore_memcpy_from(p_hwfn, p_ptt, p_data->mib, + p_data->addr, p_data->size); + prefix_seq_num = p_data->mib->prefix_seq_num; + suffix_seq_num = p_data->mib->suffix_seq_num; + } + read_count++; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n", + type, read_count, prefix_seq_num, suffix_seq_num); + } while ((prefix_seq_num != suffix_seq_num) && + (read_count < ECORE_DCBX_MAX_MIB_READ_TRY)); + + if (read_count >= ECORE_DCBX_MAX_MIB_READ_TRY) { + DP_ERR(p_hwfn, + "MIB read err, mib type = %d, try count = %d prefix seq num = %d suffix seq num = %d\n", + type, read_count, prefix_seq_num, suffix_seq_num); + rc = ECORE_IO; + } + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_get_priority_info(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_app_prio *p_prio, + struct ecore_dcbx_results *p_results) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (p_results->arr[DCBX_PROTOCOL_ETH].update && + p_results->arr[DCBX_PROTOCOL_ETH].enable) { + p_prio->eth = p_results->arr[DCBX_PROTOCOL_ETH].priority; + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "Priority: eth %d\n", p_prio->eth); + } + + return rc; +} + +static void +ecore_dcbx_get_app_data(struct ecore_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct dcbx_app_priority_entry *p_tbl, + struct ecore_dcbx_params *p_params) +{ + int i; + + p_params->app_willing = ECORE_MFW_GET_FIELD(p_app->flags, + DCBX_APP_WILLING); + p_params->app_valid = ECORE_MFW_GET_FIELD(p_app->flags, + DCBX_APP_ENABLED); + p_params->num_app_entries = ECORE_MFW_GET_FIELD(p_app->flags, + DCBX_APP_ENABLED); + for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) + p_params->app_bitmap[i] = p_tbl[i].entry; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "APP params: willing %d, valid %d\n", + p_params->app_willing, p_params->app_valid); +} + +static void +ecore_dcbx_get_pfc_data(struct ecore_hwfn *p_hwfn, + u32 pfc, struct ecore_dcbx_params *p_params) +{ + p_params->pfc_willing = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_WILLING); + p_params->max_pfc_tc = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_CAPS); + p_params->pfc_enabled = ECORE_MFW_GET_FIELD(pfc, DCBX_PFC_ENABLED); + p_params->pfc_bitmap = pfc; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "PFC params: willing %d, pfc_bitmap %d\n", + p_params->pfc_willing, p_params->pfc_bitmap); +} + +static void +ecore_dcbx_get_ets_data(struct ecore_hwfn *p_hwfn, + struct dcbx_ets_feature *p_ets, + struct ecore_dcbx_params *p_params) +{ + int i; + + p_params->ets_willing = ECORE_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_WILLING); + p_params->ets_enabled = ECORE_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_ENABLED); + p_params->max_ets_tc = ECORE_MFW_GET_FIELD(p_ets->flags, + DCBX_ETS_MAX_TCS); + p_params->ets_pri_tc_tbl[0] = p_ets->pri_tc_tbl[0]; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "ETS params: willing %d, pri_tc_tbl_0 %x max_ets_tc %d\n", + p_params->ets_willing, p_params->ets_pri_tc_tbl[0], + p_params->max_ets_tc); + + /* 8 bit tsa and bw data corresponding to each of the 8 TC's are + * encoded in a type u32 array of size 2. + */ + for (i = 0; i < 2; i++) { + p_params->ets_tc_tsa_tbl[i] = p_ets->tc_tsa_tbl[i]; + p_params->ets_tc_bw_tbl[i] = p_ets->tc_bw_tbl[i]; + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "elem %d bw_tbl %x tsa_tbl %x\n", + i, p_params->ets_tc_bw_tbl[i], + p_params->ets_tc_tsa_tbl[i]); + } +} + +static enum _ecore_status_t +ecore_dcbx_get_common_params(struct ecore_hwfn *p_hwfn, + struct dcbx_app_priority_feature *p_app, + struct dcbx_app_priority_entry *p_tbl, + struct dcbx_ets_feature *p_ets, + u32 pfc, struct ecore_dcbx_params *p_params) +{ + ecore_dcbx_get_app_data(p_hwfn, p_app, p_tbl, p_params); + ecore_dcbx_get_ets_data(p_hwfn, p_ets, p_params); + ecore_dcbx_get_pfc_data(p_hwfn, pfc, p_params); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_dcbx_get_local_params(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_dcbx_get *params) +{ + struct ecore_dcbx_admin_params *p_local; + struct dcbx_app_priority_feature *p_app; + struct dcbx_app_priority_entry *p_tbl; + struct ecore_dcbx_params *p_data; + struct dcbx_ets_feature *p_ets; + u32 pfc; + + p_local = ¶ms->local; + p_data = &p_local->params; + p_app = &p_hwfn->p_dcbx_info->local_admin.features.app; + p_tbl = p_app->app_pri_tbl; + p_ets = &p_hwfn->p_dcbx_info->local_admin.features.ets; + pfc = p_hwfn->p_dcbx_info->local_admin.features.pfc; + + ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data); + p_local->valid = true; + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_dcbx_get_remote_params(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_dcbx_get *params) +{ + struct ecore_dcbx_remote_params *p_remote; + struct dcbx_app_priority_feature *p_app; + struct dcbx_app_priority_entry *p_tbl; + struct ecore_dcbx_params *p_data; + struct dcbx_ets_feature *p_ets; + u32 pfc; + + p_remote = ¶ms->remote; + p_data = &p_remote->params; + p_app = &p_hwfn->p_dcbx_info->remote.features.app; + p_tbl = p_app->app_pri_tbl; + p_ets = &p_hwfn->p_dcbx_info->remote.features.ets; + pfc = p_hwfn->p_dcbx_info->remote.features.pfc; + + ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data); + p_remote->valid = true; + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_dcbx_get_operational_params(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_dcbx_get *params) +{ + struct ecore_dcbx_operational_params *p_operational; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct dcbx_app_priority_feature *p_app; + struct dcbx_app_priority_entry *p_tbl; + struct ecore_dcbx_results *p_results; + struct ecore_dcbx_params *p_data; + struct dcbx_ets_feature *p_ets; + bool enabled, err; + u32 pfc, flags; + + flags = p_hwfn->p_dcbx_info->operational.flags; + + /* If DCBx version is non zero, then negotiation + * was successfuly performed + */ + p_operational = ¶ms->operational; + enabled = ecore_dcbx_enabled(flags); + if (!enabled) { + p_operational->enabled = enabled; + p_operational->valid = false; + return ECORE_INVAL; + } + + p_data = &p_operational->params; + p_results = &p_hwfn->p_dcbx_info->results; + p_app = &p_hwfn->p_dcbx_info->operational.features.app; + p_tbl = p_app->app_pri_tbl; + p_ets = &p_hwfn->p_dcbx_info->operational.features.ets; + pfc = p_hwfn->p_dcbx_info->operational.features.pfc; + + p_operational->ieee = ecore_dcbx_ieee(flags); + p_operational->cee = ecore_dcbx_cee(flags); + + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "Version support: ieee %d, cee %d\n", + p_operational->ieee, p_operational->cee); + + ecore_dcbx_get_common_params(p_hwfn, p_app, p_tbl, p_ets, pfc, p_data); + ecore_dcbx_get_priority_info(p_hwfn, &p_operational->app_prio, + p_results); + err = ECORE_MFW_GET_FIELD(p_app->flags, DCBX_APP_ERROR); + p_operational->err = err; + p_operational->enabled = enabled; + p_operational->valid = true; + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_get_local_lldp_params(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_dcbx_get *params) +{ + struct ecore_dcbx_lldp_local *p_local; + osal_size_t size; + u32 *dest; + + p_local = ¶ms->lldp_local; + + size = OSAL_ARRAY_SIZE(p_local->local_chassis_id); + dest = p_hwfn->p_dcbx_info->get.lldp_local.local_chassis_id; + OSAL_MEMCPY(dest, p_local->local_chassis_id, size); + + size = OSAL_ARRAY_SIZE(p_local->local_port_id); + dest = p_hwfn->p_dcbx_info->get.lldp_local.local_port_id; + OSAL_MEMCPY(dest, p_local->local_port_id, size); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_dcbx_get_remote_lldp_params(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_dcbx_get *params) +{ + struct ecore_dcbx_lldp_remote *p_remote; + osal_size_t size; + u32 *dest; + + p_remote = ¶ms->lldp_remote; + + size = OSAL_ARRAY_SIZE(p_remote->peer_chassis_id); + dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_chassis_id; + OSAL_MEMCPY(dest, p_remote->peer_chassis_id, size); + + size = OSAL_ARRAY_SIZE(p_remote->peer_port_id); + dest = p_hwfn->p_dcbx_info->get.lldp_remote.peer_port_id; + OSAL_MEMCPY(dest, p_remote->peer_port_id, size); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_dcbx_get_params(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, enum ecore_mib_read_type type) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_dcbx_get *p_params; + + p_params = &p_hwfn->p_dcbx_info->get; + + switch (type) { + case ECORE_DCBX_REMOTE_MIB: + ecore_dcbx_get_remote_params(p_hwfn, p_ptt, p_params); + break; + case ECORE_DCBX_LOCAL_MIB: + ecore_dcbx_get_local_params(p_hwfn, p_ptt, p_params); + break; + case ECORE_DCBX_OPERATIONAL_MIB: + ecore_dcbx_get_operational_params(p_hwfn, p_ptt, p_params); + break; + case ECORE_DCBX_REMOTE_LLDP_MIB: + rc = ecore_dcbx_get_remote_lldp_params(p_hwfn, p_ptt, p_params); + break; + case ECORE_DCBX_LOCAL_LLDP_MIB: + rc = ecore_dcbx_get_local_lldp_params(p_hwfn, p_ptt, p_params); + break; + default: + DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); + return ECORE_INVAL; + } + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_read_local_lldp_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_dcbx_mib_meta_data data; + + data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, + lldp_config_params); + data.lldp_local = p_hwfn->p_dcbx_info->lldp_local; + data.size = sizeof(struct lldp_config_params_s); + ecore_memcpy_from(p_hwfn, p_ptt, data.lldp_local, data.addr, data.size); + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_read_remote_lldp_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_mib_read_type type) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_dcbx_mib_meta_data data; + + data.addr = p_hwfn->mcp_info->port_addr + offsetof(struct public_port, + lldp_status_params); + data.lldp_remote = p_hwfn->p_dcbx_info->lldp_remote; + data.size = sizeof(struct lldp_status_params_s); + rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_read_operational_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_mib_read_type type) +{ + struct ecore_dcbx_mib_meta_data data; + enum _ecore_status_t rc = ECORE_SUCCESS; + + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, operational_dcbx_mib); + data.mib = &p_hwfn->p_dcbx_info->operational; + data.size = sizeof(struct dcbx_mib); + rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_read_remote_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_mib_read_type type) +{ + struct ecore_dcbx_mib_meta_data data; + enum _ecore_status_t rc = ECORE_SUCCESS; + + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, remote_dcbx_mib); + data.mib = &p_hwfn->p_dcbx_info->remote; + data.size = sizeof(struct dcbx_mib); + rc = ecore_dcbx_copy_mib(p_hwfn, p_ptt, &data, type); + + return rc; +} + +static enum _ecore_status_t +ecore_dcbx_read_local_mib(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct ecore_dcbx_mib_meta_data data; + enum _ecore_status_t rc = ECORE_SUCCESS; + + data.addr = p_hwfn->mcp_info->port_addr + + offsetof(struct public_port, local_admin_dcbx_mib); + data.local_admin = &p_hwfn->p_dcbx_info->local_admin; + data.size = sizeof(struct dcbx_local_params); + ecore_memcpy_from(p_hwfn, p_ptt, data.local_admin, + data.addr, data.size); + + return rc; +} + +static enum _ecore_status_t ecore_dcbx_read_mib(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_mib_read_type type) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + switch (type) { + case ECORE_DCBX_OPERATIONAL_MIB: + rc = ecore_dcbx_read_operational_mib(p_hwfn, p_ptt, type); + break; + case ECORE_DCBX_REMOTE_MIB: + rc = ecore_dcbx_read_remote_mib(p_hwfn, p_ptt, type); + break; + case ECORE_DCBX_LOCAL_MIB: + rc = ecore_dcbx_read_local_mib(p_hwfn, p_ptt); + break; + case ECORE_DCBX_REMOTE_LLDP_MIB: + rc = ecore_dcbx_read_remote_lldp_mib(p_hwfn, p_ptt, type); + break; + case ECORE_DCBX_LOCAL_LLDP_MIB: + rc = ecore_dcbx_read_local_lldp_mib(p_hwfn, p_ptt); + break; + default: + DP_ERR(p_hwfn, "MIB read err, unknown mib type %d\n", type); + return ECORE_INVAL; + } + + return rc; +} + +/* + * Read updated MIB. + * Reconfigure QM and invoke PF update ramrod command if operational MIB + * change is detected. + */ +enum _ecore_status_t +ecore_dcbx_mib_update_event(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_mib_read_type type) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type); + if (rc) + return rc; + + if (type == ECORE_DCBX_OPERATIONAL_MIB) { + rc = ecore_dcbx_process_mib_info(p_hwfn); + if (!rc) { + bool enabled; + + /* reconfigure tcs of QM queues according + * to negotiation results + */ + ecore_qm_reconf(p_hwfn, p_ptt); + + /* update storm FW with negotiation results */ + ecore_sp_pf_update(p_hwfn); + + /* set eagle enigne 1 flow control workaround + * according to negotiation results + */ + enabled = p_hwfn->p_dcbx_info->results.dcbx_enabled; + ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, enabled); + } + } + ecore_dcbx_get_params(p_hwfn, p_ptt, type); + OSAL_DCBX_AEN(p_hwfn, type); + + return rc; +} + +enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + p_hwfn->p_dcbx_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(struct ecore_dcbx_info)); + if (!p_hwfn->p_dcbx_info) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `struct ecore_dcbx_info'"); + rc = ECORE_NOMEM; + } + + return rc; +} + +void ecore_dcbx_info_free(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_info *p_dcbx_info) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_dcbx_info); +} + +static void ecore_dcbx_update_protocol_data(struct protocol_dcb_data *p_data, + struct ecore_dcbx_results *p_src, + enum dcbx_protocol_type type) +{ + p_data->dcb_enable_flag = p_src->arr[type].enable; + p_data->dcb_priority = p_src->arr[type].priority; + p_data->dcb_tc = p_src->arr[type].tc; +} + +/* Set pf update ramrod command params */ +void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src, + struct pf_update_ramrod_data *p_dest) +{ + struct protocol_dcb_data *p_dcb_data; + bool update_flag; + + p_dest->pf_id = p_src->pf_id; + + update_flag = p_src->arr[DCBX_PROTOCOL_ETH].update; + p_dest->update_eth_dcb_data_flag = update_flag; + + p_dcb_data = &p_dest->eth_dcb_data; + ecore_dcbx_update_protocol_data(p_dcb_data, p_src, DCBX_PROTOCOL_ETH); +} + +static +enum _ecore_status_t ecore_dcbx_query(struct ecore_hwfn *p_hwfn, + enum ecore_mib_read_type type) +{ + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) { + rc = ECORE_TIMEOUT; + DP_ERR(p_hwfn, "rc = %d\n", rc); + return rc; + } + + rc = ecore_dcbx_read_mib(p_hwfn, p_ptt, type); + if (rc != ECORE_SUCCESS) + goto out; + + rc = ecore_dcbx_get_params(p_hwfn, p_ptt, type); + +out: + ecore_ptt_release(p_hwfn, p_ptt); + return rc; +} + +enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *p_hwfn, + struct ecore_dcbx_get *p_get, + enum ecore_mib_read_type type) +{ + enum _ecore_status_t rc; + + rc = ecore_dcbx_query(p_hwfn, type); + if (rc) + return rc; + + if (p_get != OSAL_NULL) + OSAL_MEMCPY(p_get, &p_hwfn->p_dcbx_info->get, + sizeof(struct ecore_dcbx_get)); + + return rc; +} diff --git a/drivers/net/qede/base/ecore_dcbx.h b/drivers/net/qede/base/ecore_dcbx.h new file mode 100644 index 00000000..d577f4e1 --- /dev/null +++ b/drivers/net/qede/base/ecore_dcbx.h @@ -0,0 +1,55 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_DCBX_H__ +#define __ECORE_DCBX_H__ + +#include "ecore.h" +#include "ecore_mcp.h" +#include "mcp_public.h" +#include "reg_addr.h" +#include "ecore_hw.h" +#include "ecore_hsi_common.h" +#include "ecore_dcbx_api.h" + +#define ECORE_MFW_GET_FIELD(name, field) \ + (((name) & (field ## _MASK)) >> (field ## _SHIFT)) + +struct ecore_dcbx_info { + struct lldp_status_params_s lldp_remote[LLDP_MAX_LLDP_AGENTS]; + struct lldp_config_params_s lldp_local[LLDP_MAX_LLDP_AGENTS]; + struct dcbx_local_params local_admin; + struct ecore_dcbx_results results; + struct dcbx_mib operational; + struct dcbx_mib remote; + struct ecore_dcbx_set set; + struct ecore_dcbx_get get; + u8 dcbx_cap; +}; + +/* Upper layer driver interface routines */ +enum _ecore_status_t ecore_dcbx_config_params(struct ecore_hwfn *, + struct ecore_ptt *, + struct ecore_dcbx_set *); + +/* ECORE local interface routines */ +enum _ecore_status_t +ecore_dcbx_mib_update_event(struct ecore_hwfn *, struct ecore_ptt *, + enum ecore_mib_read_type); + +enum _ecore_status_t ecore_dcbx_read_lldp_params(struct ecore_hwfn *, + struct ecore_ptt *); +enum _ecore_status_t ecore_dcbx_info_alloc(struct ecore_hwfn *p_hwfn); +void ecore_dcbx_info_free(struct ecore_hwfn *, struct ecore_dcbx_info *); +void ecore_dcbx_set_pf_update_params(struct ecore_dcbx_results *p_src, + struct pf_update_ramrod_data *p_dest); +/* @@@TBD eagle phy workaround */ +void ecore_dcbx_eagle_workaround(struct ecore_hwfn *, struct ecore_ptt *, + bool set_to_pfc); + +#endif /* __ECORE_DCBX_H__ */ diff --git a/drivers/net/qede/base/ecore_dcbx_api.h b/drivers/net/qede/base/ecore_dcbx_api.h new file mode 100644 index 00000000..7767d48e --- /dev/null +++ b/drivers/net/qede/base/ecore_dcbx_api.h @@ -0,0 +1,160 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_DCBX_API_H__ +#define __ECORE_DCBX_API_H__ + +#include "ecore.h" + +#define DCBX_CONFIG_MAX_APP_PROTOCOL 4 + +enum ecore_mib_read_type { + ECORE_DCBX_OPERATIONAL_MIB, + ECORE_DCBX_REMOTE_MIB, + ECORE_DCBX_LOCAL_MIB, + ECORE_DCBX_REMOTE_LLDP_MIB, + ECORE_DCBX_LOCAL_LLDP_MIB +}; + +struct ecore_dcbx_app_data { + bool enable; /* DCB enabled */ + bool update; /* Update indication */ + u8 priority; /* Priority */ + u8 tc; /* Traffic Class */ +}; + +#ifndef __EXTRACT__LINUX__ +enum dcbx_protocol_type { + DCBX_PROTOCOL_ETH, + DCBX_MAX_PROTOCOL_TYPE +}; + +#ifdef LINUX_REMOVE +/* We can't assume THE HSI values are available to clients, so we need + * to redefine those here. + */ +#ifndef LLDP_CHASSIS_ID_STAT_LEN +#define LLDP_CHASSIS_ID_STAT_LEN 4 +#endif +#ifndef LLDP_PORT_ID_STAT_LEN +#define LLDP_PORT_ID_STAT_LEN 4 +#endif +#ifndef DCBX_MAX_APP_PROTOCOL +#define DCBX_MAX_APP_PROTOCOL 32 +#endif + +#endif + +struct ecore_dcbx_lldp_remote { + u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; + bool enable_rx; + bool enable_tx; + u32 tx_interval; + u32 max_credit; +}; + +struct ecore_dcbx_lldp_local { + u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; +}; + +struct ecore_dcbx_app_prio { + u8 eth; +}; + +struct ecore_dcbx_params { + u32 app_bitmap[DCBX_MAX_APP_PROTOCOL]; + u16 num_app_entries; + bool app_willing; + bool app_valid; + bool ets_willing; + bool ets_enabled; + bool valid; /* Indicate validity of params */ + u32 ets_pri_tc_tbl[1]; + u32 ets_tc_bw_tbl[2]; + u32 ets_tc_tsa_tbl[2]; + bool pfc_willing; + bool pfc_enabled; + u32 pfc_bitmap; + u8 max_pfc_tc; + u8 max_ets_tc; +}; + +struct ecore_dcbx_admin_params { + struct ecore_dcbx_params params; + bool valid; /* Indicate validity of params */ +}; + +struct ecore_dcbx_remote_params { + struct ecore_dcbx_params params; + bool valid; /* Indicate validity of params */ +}; + +struct ecore_dcbx_operational_params { + struct ecore_dcbx_app_prio app_prio; + struct ecore_dcbx_params params; + bool valid; /* Indicate validity of params */ + bool enabled; + bool ieee; + bool cee; + u32 err; +}; + +struct ecore_dcbx_get { + struct ecore_dcbx_operational_params operational; + struct ecore_dcbx_lldp_remote lldp_remote; + struct ecore_dcbx_lldp_local lldp_local; + struct ecore_dcbx_remote_params remote; + struct ecore_dcbx_admin_params local; +}; +#endif + +struct ecore_dcbx_set { + struct ecore_dcbx_admin_params config; + bool enabled; + u32 ver_num; +}; + +struct ecore_dcbx_results { + bool dcbx_enabled; + u8 pf_id; + struct ecore_dcbx_app_data arr[DCBX_MAX_PROTOCOL_TYPE]; +}; + +struct ecore_dcbx_app_metadata { + enum dcbx_protocol_type id; + const char *name; /* @DPDK */ + enum ecore_pci_personality personality; +}; + +struct ecore_dcbx_mib_meta_data { + struct lldp_config_params_s *lldp_local; + struct lldp_status_params_s *lldp_remote; + struct dcbx_local_params *local_admin; + struct dcbx_mib *mib; + osal_size_t size; + u32 addr; +}; + +void +ecore_dcbx_set_params(struct ecore_dcbx_results *p_data, + struct ecore_hw_info *p_info, + bool enable, bool update, u8 prio, u8 tc, + enum dcbx_protocol_type type, + enum ecore_pci_personality personality); + +enum _ecore_status_t ecore_dcbx_query_params(struct ecore_hwfn *, + struct ecore_dcbx_get *, + enum ecore_mib_read_type); + +static const struct ecore_dcbx_app_metadata ecore_dcbx_app_update[] = { + {DCBX_PROTOCOL_ETH, "ETH", ECORE_PCI_ETH} +}; + +#endif /* __ECORE_DCBX_API_H__ */ diff --git a/drivers/net/qede/base/ecore_dev.c b/drivers/net/qede/base/ecore_dev.c new file mode 100644 index 00000000..0a689694 --- /dev/null +++ b/drivers/net/qede/base/ecore_dev.c @@ -0,0 +1,3597 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" +#include "reg_addr.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore.h" +#include "ecore_chain.h" +#include "ecore_status.h" +#include "ecore_hw.h" +#include "ecore_rt_defs.h" +#include "ecore_init_ops.h" +#include "ecore_int.h" +#include "ecore_cxt.h" +#include "ecore_spq.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_sp_commands.h" +#include "ecore_dev_api.h" +#include "ecore_sriov.h" +#include "ecore_vf.h" +#include "ecore_mcp.h" +#include "ecore_hw_defs.h" +#include "mcp_public.h" +#include "ecore_iro.h" +#include "nvm_cfg.h" +#include "ecore_dev_api.h" +#include "ecore_attn_values.h" +#include "ecore_dcbx.h" + +/* Configurable */ +#define ECORE_MIN_DPIS (4) /* The minimal number of DPIs required + * load the driver. The number was + * arbitrarily set. + */ + +/* Derived */ +#define ECORE_MIN_PWM_REGION ((ECORE_WID_SIZE) * (ECORE_MIN_DPIS)) + +enum BAR_ID { + BAR_ID_0, /* used for GRC */ + BAR_ID_1 /* Used for doorbells */ +}; + +static u32 ecore_hw_bar_size(struct ecore_hwfn *p_hwfn, enum BAR_ID bar_id) +{ + u32 bar_reg = (bar_id == BAR_ID_0 ? + PGLUE_B_REG_PF_BAR0_SIZE : PGLUE_B_REG_PF_BAR1_SIZE); + u32 val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, bar_reg); + + /* The above registers were updated in the past only in CMT mode. Since + * they were found to be useful MFW started updating them from 8.7.7.0. + * In older MFW versions they are set to 0 which means disabled. + */ + if (!val) { + if (p_hwfn->p_dev->num_hwfns > 1) { + DP_NOTICE(p_hwfn, false, + "BAR size not configured. Assuming BAR" + " size of 256kB for GRC and 512kB for DB\n"); + return BAR_ID_0 ? 256 * 1024 : 512 * 1024; + } + + DP_NOTICE(p_hwfn, false, + "BAR size not configured. Assuming BAR" + " size of 512kB for GRC and 512kB for DB\n"); + return 512 * 1024; + } + + return 1 << (val + 15); +} + +void ecore_init_dp(struct ecore_dev *p_dev, + u32 dp_module, u8 dp_level, void *dp_ctx) +{ + u32 i; + + p_dev->dp_level = dp_level; + p_dev->dp_module = dp_module; + p_dev->dp_ctx = dp_ctx; + for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + p_hwfn->dp_level = dp_level; + p_hwfn->dp_module = dp_module; + p_hwfn->dp_ctx = dp_ctx; + } +} + +void ecore_init_struct(struct ecore_dev *p_dev) +{ + u8 i; + + for (i = 0; i < MAX_HWFNS_PER_DEVICE; i++) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + p_hwfn->p_dev = p_dev; + p_hwfn->my_id = i; + p_hwfn->b_active = false; + + OSAL_MUTEX_ALLOC(p_hwfn, &p_hwfn->dmae_info.mutex); + OSAL_MUTEX_INIT(&p_hwfn->dmae_info.mutex); + } + + /* hwfn 0 is always active */ + p_dev->hwfns[0].b_active = true; + + /* set the default cache alignment to 128 (may be overridden later) */ + p_dev->cache_shift = 7; +} + +static void ecore_qm_info_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + + OSAL_FREE(p_hwfn->p_dev, qm_info->qm_pq_params); + qm_info->qm_pq_params = OSAL_NULL; + OSAL_FREE(p_hwfn->p_dev, qm_info->qm_vport_params); + qm_info->qm_vport_params = OSAL_NULL; + OSAL_FREE(p_hwfn->p_dev, qm_info->qm_port_params); + qm_info->qm_port_params = OSAL_NULL; + OSAL_FREE(p_hwfn->p_dev, qm_info->wfq_data); + qm_info->wfq_data = OSAL_NULL; +} + +void ecore_resc_free(struct ecore_dev *p_dev) +{ + int i; + + if (IS_VF(p_dev)) + return; + + OSAL_FREE(p_dev, p_dev->fw_data); + p_dev->fw_data = OSAL_NULL; + + OSAL_FREE(p_dev, p_dev->reset_stats); + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + OSAL_FREE(p_dev, p_hwfn->p_tx_cids); + p_hwfn->p_tx_cids = OSAL_NULL; + OSAL_FREE(p_dev, p_hwfn->p_rx_cids); + p_hwfn->p_rx_cids = OSAL_NULL; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + ecore_cxt_mngr_free(p_hwfn); + ecore_qm_info_free(p_hwfn); + ecore_spq_free(p_hwfn); + ecore_eq_free(p_hwfn, p_hwfn->p_eq); + ecore_consq_free(p_hwfn, p_hwfn->p_consq); + ecore_int_free(p_hwfn); + ecore_iov_free(p_hwfn); + ecore_dmae_info_free(p_hwfn); + ecore_dcbx_info_free(p_hwfn, p_hwfn->p_dcbx_info); + /* @@@TBD Flush work-queue ? */ + } +} + +static enum _ecore_status_t ecore_init_qm_info(struct ecore_hwfn *p_hwfn, + bool b_sleepable) +{ + u8 num_vports, vf_offset = 0, i, vport_id, num_ports; + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + struct init_qm_port_params *p_qm_port; + u16 num_pqs, multi_cos_tcs = 1; +#ifdef CONFIG_ECORE_SRIOV + u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs; +#else + u16 num_vfs = 0; +#endif + + OSAL_MEM_ZERO(qm_info, sizeof(*qm_info)); + +#ifndef ASIC_ONLY + /* @TMP - Don't allocate QM queues for VFs on emulation */ + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, false, + "Emulation - skip configuring QM queues for VFs\n"); + num_vfs = 0; + } +#endif + + num_pqs = multi_cos_tcs + num_vfs + 1; /* The '1' is for pure-LB */ + num_vports = (u8)RESC_NUM(p_hwfn, ECORE_VPORT); + + /* Sanity checking that setup requires legal number of resources */ + if (num_pqs > RESC_NUM(p_hwfn, ECORE_PQ)) { + DP_ERR(p_hwfn, + "Need too many Physical queues - 0x%04x when" + " only %04x are available\n", + num_pqs, RESC_NUM(p_hwfn, ECORE_PQ)); + return ECORE_INVAL; + } + + /* PQs will be arranged as follows: First per-TC PQ, then pure-LB queue, + * then special queues, then per-VF PQ. + */ + qm_info->qm_pq_params = OSAL_ZALLOC(p_hwfn->p_dev, + b_sleepable ? GFP_KERNEL : + GFP_ATOMIC, + sizeof(struct init_qm_pq_params) * + num_pqs); + if (!qm_info->qm_pq_params) + goto alloc_err; + + qm_info->qm_vport_params = OSAL_ZALLOC(p_hwfn->p_dev, + b_sleepable ? GFP_KERNEL : + GFP_ATOMIC, + sizeof(struct + init_qm_vport_params) * + num_vports); + if (!qm_info->qm_vport_params) + goto alloc_err; + + qm_info->qm_port_params = OSAL_ZALLOC(p_hwfn->p_dev, + b_sleepable ? GFP_KERNEL : + GFP_ATOMIC, + sizeof(struct init_qm_port_params) + * MAX_NUM_PORTS); + if (!qm_info->qm_port_params) + goto alloc_err; + + qm_info->wfq_data = OSAL_ZALLOC(p_hwfn->p_dev, + b_sleepable ? GFP_KERNEL : + GFP_ATOMIC, + sizeof(struct ecore_wfq_data) * + num_vports); + + if (!qm_info->wfq_data) + goto alloc_err; + + vport_id = (u8)RESC_START(p_hwfn, ECORE_VPORT); + + /* First init per-TC PQs */ + for (i = 0; i < multi_cos_tcs; i++) { + struct init_qm_pq_params *params = &qm_info->qm_pq_params[i]; + + if (p_hwfn->hw_info.personality == ECORE_PCI_ETH) { + params->vport_id = vport_id; + params->tc_id = p_hwfn->hw_info.non_offload_tc; + params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */ + } else { + params->vport_id = vport_id; + params->tc_id = p_hwfn->hw_info.offload_tc; + params->wrr_group = 1; /* @@@TBD ECORE_WRR_MEDIUM */ + } + } + + /* Then init pure-LB PQ */ + qm_info->pure_lb_pq = i; + qm_info->qm_pq_params[i].vport_id = + (u8)RESC_START(p_hwfn, ECORE_VPORT); + qm_info->qm_pq_params[i].tc_id = PURE_LB_TC; + qm_info->qm_pq_params[i].wrr_group = 1; + i++; + + /* Then init per-VF PQs */ + vf_offset = i; + for (i = 0; i < num_vfs; i++) { + /* First vport is used by the PF */ + qm_info->qm_pq_params[vf_offset + i].vport_id = vport_id + + i + 1; + qm_info->qm_pq_params[vf_offset + i].tc_id = + p_hwfn->hw_info.non_offload_tc; + qm_info->qm_pq_params[vf_offset + i].wrr_group = 1; + }; + + qm_info->vf_queues_offset = vf_offset; + qm_info->num_pqs = num_pqs; + qm_info->num_vports = num_vports; + + /* Initialize qm port parameters */ + num_ports = p_hwfn->p_dev->num_ports_in_engines; + for (i = 0; i < num_ports; i++) { + p_qm_port = &qm_info->qm_port_params[i]; + p_qm_port->active = 1; + /* @@@TMP - was NUM_OF_PHYS_TCS; Changed until dcbx will + * be in place + */ + if (num_ports == 4) + p_qm_port->num_active_phys_tcs = 2; + else + p_qm_port->num_active_phys_tcs = 5; + p_qm_port->num_pbf_cmd_lines = PBF_MAX_CMD_LINES / num_ports; + p_qm_port->num_btb_blocks = BTB_MAX_BLOCKS / num_ports; + } + + if (ECORE_IS_AH(p_hwfn->p_dev) && (num_ports == 4)) + qm_info->max_phys_tcs_per_port = NUM_PHYS_TCS_4PORT_K2; + else + qm_info->max_phys_tcs_per_port = NUM_OF_PHYS_TCS; + + qm_info->start_pq = (u16)RESC_START(p_hwfn, ECORE_PQ); + + qm_info->num_vf_pqs = num_vfs; + qm_info->start_vport = (u8)RESC_START(p_hwfn, ECORE_VPORT); + + for (i = 0; i < qm_info->num_vports; i++) + qm_info->qm_vport_params[i].vport_wfq = 1; + + qm_info->pf_wfq = 0; + qm_info->pf_rl = 0; + qm_info->vport_rl_en = 1; + qm_info->vport_wfq_en = 1; + + return ECORE_SUCCESS; + +alloc_err: + DP_NOTICE(p_hwfn, false, "Failed to allocate memory for QM params\n"); + ecore_qm_info_free(p_hwfn); + return ECORE_NOMEM; +} + +/* This function reconfigures the QM pf on the fly. + * For this purpose we: + * 1. reconfigure the QM database + * 2. set new values to runtime arrat + * 3. send an sdm_qm_cmd through the rbc interface to stop the QM + * 4. activate init tool in QM_PF stage + * 5. send an sdm_qm_cmd through rbc interface to release the QM + */ +enum _ecore_status_t ecore_qm_reconf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + enum _ecore_status_t rc; + bool b_rc; + + /* qm_info is allocated in ecore_init_qm_info() which is already called + * from ecore_resc_alloc() or previous call of ecore_qm_reconf(). + * The allocated size may change each init, so we free it before next + * allocation. + */ + ecore_qm_info_free(p_hwfn); + + /* initialize ecore's qm data structure */ + rc = ecore_init_qm_info(p_hwfn, false); + if (rc != ECORE_SUCCESS) + return rc; + + /* stop PF's qm queues */ + b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, false, true, + qm_info->start_pq, qm_info->num_pqs); + if (!b_rc) + return ECORE_INVAL; + + /* clear the QM_PF runtime phase leftovers from previous init */ + ecore_init_clear_rt_data(p_hwfn); + + /* prepare QM portion of runtime array */ + ecore_qm_init_pf(p_hwfn); + + /* activate init tool on runtime array */ + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, p_hwfn->rel_pf_id, + p_hwfn->hw_info.hw_mode); + if (rc != ECORE_SUCCESS) + return rc; + + /* start PF's qm queues */ + b_rc = ecore_send_qm_stop_cmd(p_hwfn, p_ptt, true, true, + qm_info->start_pq, qm_info->num_pqs); + if (!rc) + return ECORE_INVAL; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_consq *p_consq; + struct ecore_eq *p_eq; + int i; + + if (IS_VF(p_dev)) + return rc; + + p_dev->fw_data = OSAL_ZALLOC(p_dev, GFP_KERNEL, + sizeof(struct ecore_fw_data)); + if (!p_dev->fw_data) + return ECORE_NOMEM; + + /* Allocate Memory for the Queue->CID mapping */ + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + /* @@@TMP - resc management, change to actual required size */ + int tx_size = sizeof(struct ecore_hw_cid_data) * + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); + int rx_size = sizeof(struct ecore_hw_cid_data) * + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); + + p_hwfn->p_tx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + tx_size); + if (!p_hwfn->p_tx_cids) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate memory for Tx Cids\n"); + goto alloc_no_mem; + } + + p_hwfn->p_rx_cids = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + rx_size); + if (!p_hwfn->p_rx_cids) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate memory for Rx Cids\n"); + goto alloc_no_mem; + } + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + /* First allocate the context manager structure */ + rc = ecore_cxt_mngr_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* Set the HW cid/tid numbers (in the contest manager) + * Must be done prior to any further computations. + */ + rc = ecore_cxt_set_pf_params(p_hwfn); + if (rc) + goto alloc_err; + + /* Prepare and process QM requirements */ + rc = ecore_init_qm_info(p_hwfn, true); + if (rc) + goto alloc_err; + + /* Compute the ILT client partition */ + rc = ecore_cxt_cfg_ilt_compute(p_hwfn); + if (rc) + goto alloc_err; + + /* CID map / ILT shadow table / T2 + * The talbes sizes are determined by the computations above + */ + rc = ecore_cxt_tables_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* SPQ, must follow ILT because initializes SPQ context */ + rc = ecore_spq_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* SP status block allocation */ + p_hwfn->p_dpc_ptt = ecore_get_reserved_ptt(p_hwfn, + RESERVED_PTT_DPC); + + rc = ecore_int_alloc(p_hwfn, p_hwfn->p_main_ptt); + if (rc) + goto alloc_err; + + rc = ecore_iov_alloc(p_hwfn); + if (rc) + goto alloc_err; + + /* EQ */ + p_eq = ecore_eq_alloc(p_hwfn, 256); + if (!p_eq) + goto alloc_no_mem; + p_hwfn->p_eq = p_eq; + + p_consq = ecore_consq_alloc(p_hwfn); + if (!p_consq) + goto alloc_no_mem; + p_hwfn->p_consq = p_consq; + + /* DMA info initialization */ + rc = ecore_dmae_info_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate memory for" + " dmae_info structure\n"); + goto alloc_err; + } + + /* DCBX initialization */ + rc = ecore_dcbx_info_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate memory for dcbxstruct\n"); + goto alloc_err; + } + } + + p_dev->reset_stats = OSAL_ZALLOC(p_dev, GFP_KERNEL, + sizeof(struct ecore_eth_stats)); + if (!p_dev->reset_stats) { + DP_NOTICE(p_dev, true, "Failed to allocate reset statistics\n"); + goto alloc_no_mem; + } + + return ECORE_SUCCESS; + +alloc_no_mem: + rc = ECORE_NOMEM; +alloc_err: + ecore_resc_free(p_dev); + return rc; +} + +void ecore_resc_setup(struct ecore_dev *p_dev) +{ + int i; + + if (IS_VF(p_dev)) + return; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + ecore_cxt_mngr_setup(p_hwfn); + ecore_spq_setup(p_hwfn); + ecore_eq_setup(p_hwfn, p_hwfn->p_eq); + ecore_consq_setup(p_hwfn, p_hwfn->p_consq); + + /* Read shadow of current MFW mailbox */ + ecore_mcp_read_mb(p_hwfn, p_hwfn->p_main_ptt); + OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, + p_hwfn->mcp_info->mfw_mb_cur, + p_hwfn->mcp_info->mfw_mb_length); + + ecore_int_setup(p_hwfn, p_hwfn->p_main_ptt); + + ecore_iov_setup(p_hwfn, p_hwfn->p_main_ptt); + } +} + +#define FINAL_CLEANUP_POLL_CNT (100) +#define FINAL_CLEANUP_POLL_TIME (10) +enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 id, bool is_vf) +{ + u32 command = 0, addr, count = FINAL_CLEANUP_POLL_CNT; + enum _ecore_status_t rc = ECORE_TIMEOUT; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev) || + CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + DP_INFO(p_hwfn, "Skipping final cleanup for non-ASIC\n"); + return ECORE_SUCCESS; + } +#endif + + addr = GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_FLR_FINAL_ACK_OFFSET(p_hwfn->rel_pf_id); + + if (is_vf) + id += 0x10; + + command |= X_FINAL_CLEANUP_AGG_INT << + SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT; + command |= 1 << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT; + command |= id << SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT; + command |= SDM_COMP_TYPE_AGG_INT << SDM_OP_GEN_COMP_TYPE_SHIFT; + + /* Make sure notification is not set before initiating final cleanup */ + if (REG_RD(p_hwfn, addr)) { + DP_NOTICE(p_hwfn, false, + "Unexpected; Found final cleanup notification " + "before initiating final cleanup\n"); + REG_WR(p_hwfn, addr, 0); + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Sending final cleanup for PFVF[%d] [Command %08x\n]", + id, OSAL_CPU_TO_LE32(command)); + + ecore_wr(p_hwfn, p_ptt, XSDM_REG_OPERATION_GEN, + OSAL_CPU_TO_LE32(command)); + + /* Poll until completion */ + while (!REG_RD(p_hwfn, addr) && count--) + OSAL_MSLEEP(FINAL_CLEANUP_POLL_TIME); + + if (REG_RD(p_hwfn, addr)) + rc = ECORE_SUCCESS; + else + DP_NOTICE(p_hwfn, true, + "Failed to receive FW final cleanup notification\n"); + + /* Cleanup afterwards */ + REG_WR(p_hwfn, addr, 0); + + return rc; +} + +static void ecore_calc_hw_mode(struct ecore_hwfn *p_hwfn) +{ + int hw_mode = 0; + + switch (ECORE_GET_TYPE(p_hwfn->p_dev)) { + case CHIP_BB_A0: + hw_mode |= 1 << MODE_BB_A0; + break; + case CHIP_BB_B0: + hw_mode |= 1 << MODE_BB_B0; + break; + case CHIP_K2: + hw_mode |= 1 << MODE_K2; + break; + default: + DP_NOTICE(p_hwfn, true, "Can't initialize chip ID %d\n", + ECORE_GET_TYPE(p_hwfn->p_dev)); + return; + } + + /* Ports per engine is based on the values in CNIG_REG_NW_PORT_MODE */ + switch (p_hwfn->p_dev->num_ports_in_engines) { + case 1: + hw_mode |= 1 << MODE_PORTS_PER_ENG_1; + break; + case 2: + hw_mode |= 1 << MODE_PORTS_PER_ENG_2; + break; + case 4: + hw_mode |= 1 << MODE_PORTS_PER_ENG_4; + break; + default: + DP_NOTICE(p_hwfn, true, + "num_ports_in_engine = %d not supported\n", + p_hwfn->p_dev->num_ports_in_engines); + return; + } + + switch (p_hwfn->p_dev->mf_mode) { + case ECORE_MF_DEFAULT: + case ECORE_MF_NPAR: + hw_mode |= 1 << MODE_MF_SI; + break; + case ECORE_MF_OVLAN: + hw_mode |= 1 << MODE_MF_SD; + break; + default: + DP_NOTICE(p_hwfn, true, + "Unsupported MF mode, init as DEFAULT\n"); + hw_mode |= 1 << MODE_MF_SI; + } + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + hw_mode |= 1 << MODE_FPGA; + } else { + if (p_hwfn->p_dev->b_is_emul_full) + hw_mode |= 1 << MODE_EMUL_FULL; + else + hw_mode |= 1 << MODE_EMUL_REDUCED; + } + } else +#endif + hw_mode |= 1 << MODE_ASIC; + + if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) + hw_mode |= 1 << MODE_EAGLE_ENG1_WORKAROUND; + + if (p_hwfn->p_dev->num_hwfns > 1) + hw_mode |= 1 << MODE_100G; + + p_hwfn->hw_info.hw_mode = hw_mode; + + DP_VERBOSE(p_hwfn, (ECORE_MSG_PROBE | ECORE_MSG_IFUP), + "Configuring function for hw_mode: 0x%08x\n", + p_hwfn->hw_info.hw_mode); +} + +#ifndef ASIC_ONLY +/* MFW-replacement initializations for non-ASIC */ +static void ecore_hw_init_chip(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 pl_hv = 1; + int i; + + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) + pl_hv |= 0x600; + + ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV + 4, pl_hv); + + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) + ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV_2, 0x3ffffff); + + /* initialize interrupt masks */ + for (i = 0; + i < + attn_blocks[BLOCK_MISCS].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)]. + num_of_int_regs; i++) + ecore_wr(p_hwfn, p_ptt, + attn_blocks[BLOCK_MISCS]. + chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[i]-> + mask_addr, 0); + + if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev)) + ecore_wr(p_hwfn, p_ptt, + attn_blocks[BLOCK_CNIG]. + chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]-> + mask_addr, 0); + ecore_wr(p_hwfn, p_ptt, + attn_blocks[BLOCK_PGLCS]. + chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]-> + mask_addr, 0); + ecore_wr(p_hwfn, p_ptt, + attn_blocks[BLOCK_CPMU]. + chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]-> + mask_addr, 0); + /* Currently A0 and B0 interrupt bits are the same in pglue_b; + * If this changes, need to set this according to chip type. <14/09/23> + */ + ecore_wr(p_hwfn, p_ptt, + attn_blocks[BLOCK_PGLUE_B]. + chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].int_regs[0]-> + mask_addr, 0x80000); + + /* initialize port mode to 4x10G_E (10G with 4x10 SERDES) */ + /* CNIG_REG_NW_PORT_MODE is same for A0 and B0 */ + if (!CHIP_REV_IS_EMUL(p_hwfn->p_dev) || !ECORE_IS_AH(p_hwfn->p_dev)) + ecore_wr(p_hwfn, p_ptt, CNIG_REG_NW_PORT_MODE_BB_B0, 4); + + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) { + /* 2 for 4-port, 1 for 2-port, 0 for 1-port */ + ecore_wr(p_hwfn, p_ptt, MISC_REG_PORT_MODE, + (p_hwfn->p_dev->num_ports_in_engines >> 1)); + + ecore_wr(p_hwfn, p_ptt, MISC_REG_BLOCK_256B_EN, + p_hwfn->p_dev->num_ports_in_engines == 4 ? 0 : 3); + } + + /* Poll on RBC */ + ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_RBC_DONE, 1); + for (i = 0; i < 100; i++) { + OSAL_UDELAY(50); + if (ecore_rd(p_hwfn, p_ptt, PSWRQ2_REG_CFG_DONE) == 1) + break; + } + if (i == 100) + DP_NOTICE(p_hwfn, true, + "RBC done failed to complete in PSWRQ2\n"); +} +#endif + +/* Init run time data for all PFs and their VFs on an engine. + * TBD - for VFs - Once we have parent PF info for each VF in + * shmem available as CAU requires knowledge of parent PF for each VF. + */ +static void ecore_init_cau_rt_data(struct ecore_dev *p_dev) +{ + u32 offset = CAU_REG_SB_VAR_MEMORY_RT_OFFSET; + int i, sb_id; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct ecore_igu_info *p_igu_info; + struct ecore_igu_block *p_block; + struct cau_sb_entry sb_entry; + + p_igu_info = p_hwfn->hw_info.p_igu_info; + + for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_dev); + sb_id++) { + p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; + + if (!p_block->is_pf) + continue; + + ecore_init_cau_sb_entry(p_hwfn, &sb_entry, + p_block->function_id, 0, 0); + STORE_RT_REG_AGG(p_hwfn, offset + sb_id * 2, sb_entry); + } + } +} + +static enum _ecore_status_t ecore_hw_init_common(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int hw_mode) +{ + struct ecore_qm_info *qm_info = &p_hwfn->qm_info; + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_dev *p_dev = p_hwfn->p_dev; + u8 vf_id, max_num_vfs; + u16 num_pfs, pf_id; + u32 concrete_fid; + + ecore_init_cau_rt_data(p_dev); + + /* Program GTT windows */ + ecore_gtt_init(p_hwfn); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + ecore_hw_init_chip(p_hwfn, p_hwfn->p_main_ptt); +#endif + + if (p_hwfn->mcp_info) { + if (p_hwfn->mcp_info->func_info.bandwidth_max) + qm_info->pf_rl_en = 1; + if (p_hwfn->mcp_info->func_info.bandwidth_min) + qm_info->pf_wfq_en = 1; + } + + ecore_qm_common_rt_init(p_hwfn, + p_hwfn->p_dev->num_ports_in_engines, + qm_info->max_phys_tcs_per_port, + qm_info->pf_rl_en, qm_info->pf_wfq_en, + qm_info->vport_rl_en, qm_info->vport_wfq_en, + qm_info->qm_port_params); + + ecore_cxt_hw_init_common(p_hwfn); + + /* Close gate from NIG to BRB/Storm; By default they are open, but + * we close them to prevent NIG from passing data to reset blocks. + * Should have been done in the ENGINE phase, but init-tool lacks + * proper port-pretend capabilities. + */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0); + ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0); + ecore_port_pretend(p_hwfn, p_ptt, p_hwfn->port_id ^ 1); + ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_BRB_OUT_EN, 0); + ecore_wr(p_hwfn, p_ptt, NIG_REG_STORM_OUT_EN, 0); + ecore_port_unpretend(p_hwfn, p_ptt); + + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_ENGINE, ANY_PHASE_ID, hw_mode); + if (rc != ECORE_SUCCESS) + return rc; + + /* @@TBD MichalK - should add VALIDATE_VFID to init tool... + * need to decide with which value, maybe runtime + */ + ecore_wr(p_hwfn, p_ptt, PSWRQ2_REG_L2P_VALIDATE_VFID, 0); + ecore_wr(p_hwfn, p_ptt, PGLUE_B_REG_USE_CLIENTID_IN_TAG, 1); + + if (ECORE_IS_BB(p_hwfn->p_dev)) { + num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev); + if (num_pfs == 1) + return rc; + /* pretend to original PF */ + ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + } + + /* Workaround for avoiding CCFC execution error when getting packets + * with CRC errors, and allowing instead the invoking of the FW error + * handler. + * This is not done inside the init tool since it currently can't + * perform a pretending to VFs. + */ + max_num_vfs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_VFS_K2 + : MAX_NUM_VFS_BB; + for (vf_id = 0; vf_id < max_num_vfs; vf_id++) { + concrete_fid = ecore_vfid_to_concrete(p_hwfn, vf_id); + ecore_fid_pretend(p_hwfn, p_ptt, (u16)concrete_fid); + ecore_wr(p_hwfn, p_ptt, CCFC_REG_STRONG_ENABLE_VF, 0x1); + } + /* pretend to original PF */ + ecore_fid_pretend(p_hwfn, p_ptt, p_hwfn->rel_pf_id); + + return rc; +} + +#ifndef ASIC_ONLY +#define MISC_REG_RESET_REG_2_XMAC_BIT (1 << 4) +#define MISC_REG_RESET_REG_2_XMAC_SOFT_BIT (1 << 5) + +#define PMEG_IF_BYTE_COUNT 8 + +static void ecore_wr_nw_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, u64 data, u8 reg_type, u8 port) +{ + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "CMD: %08x, ADDR: 0x%08x, DATA: %08x:%08x\n", + ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) | + (8 << PMEG_IF_BYTE_COUNT), + (reg_type << 25) | (addr << 8) | port, + (u32)((data >> 32) & 0xffffffff), + (u32)(data & 0xffffffff)); + + ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0, + (ecore_rd(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_CMD_BB_B0) & + 0xffff00fe) | (8 << PMEG_IF_BYTE_COUNT)); + ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_ADDR_BB_B0, + (reg_type << 25) | (addr << 8) | port); + ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0, + data & 0xffffffff); + ecore_wr(p_hwfn, p_ptt, CNIG_REG_PMEG_IF_WRDATA_BB_B0, + (data >> 32) & 0xffffffff); +} + +#define XLPORT_MODE_REG (0x20a) +#define XLPORT_MAC_CONTROL (0x210) +#define XLPORT_FLOW_CONTROL_CONFIG (0x207) +#define XLPORT_ENABLE_REG (0x20b) + +#define XLMAC_CTRL (0x600) +#define XLMAC_MODE (0x601) +#define XLMAC_RX_MAX_SIZE (0x608) +#define XLMAC_TX_CTRL (0x604) +#define XLMAC_PAUSE_CTRL (0x60d) +#define XLMAC_PFC_CTRL (0x60e) + +static void ecore_emul_link_init_ah(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u8 port = p_hwfn->port_id; + u32 mac_base = NWM_REG_MAC0 + (port << 2) * NWM_REG_MAC0_SIZE; + + ecore_wr(p_hwfn, p_ptt, CNIG_REG_NIG_PORT0_CONF_K2 + (port << 2), + (1 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT) | + (port << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT) + | (0 << CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT)); + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_XIF_MODE, + 1 << ETH_MAC_REG_XIF_MODE_XGMII_SHIFT); + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_FRM_LENGTH, + 9018 << ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT); + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_IPG_LENGTH, + 0xc << ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT); + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_RX_FIFO_SECTIONS, + 8 << ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT); + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_TX_FIFO_SECTIONS, + (0xA << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT) | + (8 << ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT)); + + ecore_wr(p_hwfn, p_ptt, mac_base + ETH_MAC_REG_COMMAND_CONFIG, 0xa853); +} + +static void ecore_emul_link_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u8 loopback = 0, port = p_hwfn->port_id * 2; + + DP_INFO(p_hwfn->p_dev, "Configurating Emulation Link %02x\n", port); + + if (ECORE_IS_AH(p_hwfn->p_dev)) { + ecore_emul_link_init_ah(p_hwfn, p_ptt); + return; + } + + ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MODE_REG, (0x4 << 4) | 0x4, 1, + port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_MAC_CONTROL, 0, 1, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x40, 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_MODE, 0x40, 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_RX_MAX_SIZE, 0x3fff, 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_TX_CTRL, + 0x01000000800ULL | (0xa << 12) | ((u64)1 << 38), + 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PAUSE_CTRL, 0x7c000, 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_PFC_CTRL, + 0x30ffffc000ULL, 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x3 | (loopback << 2), 0, + port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLMAC_CTRL, 0x1003 | (loopback << 2), + 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_FLOW_CONTROL_CONFIG, 1, 0, port); + ecore_wr_nw_port(p_hwfn, p_ptt, XLPORT_ENABLE_REG, 0xf, 1, port); +} + +static void ecore_link_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 port) +{ + int port_offset = port ? 0x800 : 0; + u32 xmac_rxctrl = 0; + + /* Reset of XMAC */ + /* FIXME: move to common start */ + ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), + MISC_REG_RESET_REG_2_XMAC_BIT); /* Clear */ + OSAL_MSLEEP(1); + ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), + MISC_REG_RESET_REG_2_XMAC_BIT); /* Set */ + + ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_CORE_PORT_MODE, 1); + + /* Set the number of ports on the Warp Core to 10G */ + ecore_wr(p_hwfn, p_ptt, MISC_REG_XMAC_PHY_PORT_MODE, 3); + + /* Soft reset of XMAC */ + ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + 2 * sizeof(u32), + MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); + OSAL_MSLEEP(1); + ecore_wr(p_hwfn, p_ptt, MISC_REG_RESET_PL_PDA_VAUX + sizeof(u32), + MISC_REG_RESET_REG_2_XMAC_SOFT_BIT); + + /* FIXME: move to common end */ + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) + ecore_wr(p_hwfn, p_ptt, XMAC_REG_MODE + port_offset, 0x20); + + /* Set Max packet size: initialize XMAC block register for port 0 */ + ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_MAX_SIZE + port_offset, 0x2710); + + /* CRC append for Tx packets: init XMAC block register for port 1 */ + ecore_wr(p_hwfn, p_ptt, XMAC_REG_TX_CTRL_LO + port_offset, 0xC800); + + /* Enable TX and RX: initialize XMAC block register for port 1 */ + ecore_wr(p_hwfn, p_ptt, XMAC_REG_CTRL + port_offset, + XMAC_REG_CTRL_TX_EN | XMAC_REG_CTRL_RX_EN); + xmac_rxctrl = ecore_rd(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset); + xmac_rxctrl |= XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE; + ecore_wr(p_hwfn, p_ptt, XMAC_REG_RX_CTRL + port_offset, xmac_rxctrl); +} +#endif + +static enum _ecore_status_t ecore_hw_init_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int hw_mode) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Init sequence */ + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PORT, p_hwfn->port_id, + hw_mode); + if (rc != ECORE_SUCCESS) + return rc; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) + return ECORE_SUCCESS; + + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + if (ECORE_IS_AH(p_hwfn->p_dev)) + return ECORE_SUCCESS; + ecore_link_init(p_hwfn, p_ptt, p_hwfn->port_id); + } else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { + if (p_hwfn->p_dev->num_hwfns > 1) { + /* Activate OPTE in CMT */ + u32 val; + + val = ecore_rd(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV); + val |= 0x10; + ecore_wr(p_hwfn, p_ptt, MISCS_REG_RESET_PL_HV, val); + ecore_wr(p_hwfn, p_ptt, MISC_REG_CLK_100G_MODE, 1); + ecore_wr(p_hwfn, p_ptt, MISCS_REG_CLK_100G_MODE, 1); + ecore_wr(p_hwfn, p_ptt, MISC_REG_OPTE_MODE, 1); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH, 1); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_ENG_CLS_ENG_ID_TBL, 0x55555555); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_ENG_CLS_ENG_ID_TBL + 0x4, + 0x55555555); + } + + ecore_emul_link_init(p_hwfn, p_ptt); + } else { + DP_INFO(p_hwfn->p_dev, "link is not being configured\n"); + } +#endif + + return rc; +} + +static enum _ecore_status_t +ecore_hw_init_pf_doorbell_bar(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 pwm_regsize, norm_regsize; + u32 non_pwm_conn, min_addr_reg1; + u32 db_bar_size, n_cpus; + u32 pf_dems_shift; + int rc = ECORE_SUCCESS; + + db_bar_size = ecore_hw_bar_size(p_hwfn, BAR_ID_1); + if (p_hwfn->p_dev->num_hwfns > 1) + db_bar_size /= 2; + + /* Calculate doorbell regions + * ----------------------------------- + * The doorbell BAR is made of two regions. The first is called normal + * region and the second is called PWM region. In the normal region + * each ICID has its own set of addresses so that writing to that + * specific address identifies the ICID. In the Process Window Mode + * region the ICID is given in the data written to the doorbell. The + * above per PF register denotes the offset in the doorbell BAR in which + * the PWM region begins. + * The normal region has ECORE_PF_DEMS_SIZE bytes per ICID, that is per + * non-PWM connection. The calculation below computes the total non-PWM + * connections. The DORQ_REG_PF_MIN_ADDR_REG1 register is + * in units of 4,096 bytes. + */ + non_pwm_conn = ecore_cxt_get_proto_cid_start(p_hwfn, PROTOCOLID_CORE) + + ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_CORE, + OSAL_NULL) + + ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, OSAL_NULL); + norm_regsize = ROUNDUP(ECORE_PF_DEMS_SIZE * non_pwm_conn, 4096); + min_addr_reg1 = norm_regsize / 4096; + pwm_regsize = db_bar_size - norm_regsize; + + /* Check that the normal and PWM sizes are valid */ + if (db_bar_size < norm_regsize) { + DP_ERR(p_hwfn->p_dev, + "Doorbell BAR size 0x%x is too" + " small (normal region is 0x%0x )\n", + db_bar_size, norm_regsize); + return ECORE_NORESOURCES; + } + if (pwm_regsize < ECORE_MIN_PWM_REGION) { + DP_ERR(p_hwfn->p_dev, + "PWM region size 0x%0x is too small." + " Should be at least 0x%0x (Doorbell BAR size" + " is 0x%x and normal region size is 0x%0x)\n", + pwm_regsize, ECORE_MIN_PWM_REGION, db_bar_size, + norm_regsize); + return ECORE_NORESOURCES; + } + + /* Update hwfn */ + p_hwfn->dpi_start_offset = norm_regsize; /* this is later used to + * calculate the doorbell + * address + */ + + /* Update registers */ + /* DEMS size is configured log2 of DWORDs, hence the division by 4 */ + pf_dems_shift = OSAL_LOG2(ECORE_PF_DEMS_SIZE / 4); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_ICID_BIT_SHIFT_NORM, pf_dems_shift); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_PF_MIN_ADDR_REG1, min_addr_reg1); + + DP_INFO(p_hwfn, + "Doorbell size 0x%x, Normal region 0x%x, PWM region 0x%x\n", + db_bar_size, norm_regsize, pwm_regsize); + DP_INFO(p_hwfn, "DPI size 0x%x, DPI count 0x%x\n", p_hwfn->dpi_size, + p_hwfn->dpi_count); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_hw_init_pf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_tunn_start_params *p_tunn, + int hw_mode, + bool b_hw_start, + enum ecore_int_mode int_mode, bool allow_npar_tx_switch) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u8 rel_pf_id = p_hwfn->rel_pf_id; + u32 prs_reg; + u16 ctrl; + int pos; + + /* ILT/DQ/CM/QM */ + if (p_hwfn->mcp_info) { + struct ecore_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + if (p_info->bandwidth_min) + p_hwfn->qm_info.pf_wfq = p_info->bandwidth_min; + + /* Update rate limit once we'll actually have a link */ + p_hwfn->qm_info.pf_rl = 100; + } + ecore_cxt_hw_init_pf(p_hwfn); + + ecore_int_igu_init_rt(p_hwfn); /* @@@TBD TODO MichalS multi hwfn ?? */ + + /* Set VLAN in NIG if needed */ + if (hw_mode & (1 << MODE_MF_SD)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "Configuring LLH_FUNC_TAG\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET, 1); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET, + p_hwfn->hw_info.ovlan); + } + + /* Enable classification by MAC if needed */ + if (hw_mode & (1 << MODE_MF_SI)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "Configuring TAGMAC_CLS_TYPE\n"); + STORE_RT_REG(p_hwfn, NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET, + 1); + } + + /* Protocl Configuration - @@@TBD - should we set 0 otherwise? */ + STORE_RT_REG(p_hwfn, PRS_REG_SEARCH_TCP_RT_OFFSET, 0); + + /* perform debug configuration when chip is out of reset */ + OSAL_BEFORE_PF_START((void *)p_hwfn->p_dev, p_hwfn->my_id); + + /* Cleanup chip from previous driver if such remains exist */ + rc = ecore_final_cleanup(p_hwfn, p_ptt, rel_pf_id, false); + if (rc != ECORE_SUCCESS) { + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL); + return rc; + } + + /* PF Init sequence */ + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_PF, rel_pf_id, hw_mode); + if (rc) + return rc; + + /* QM_PF Init sequence (may be invoked separately e.g. for DCB) */ + rc = ecore_init_run(p_hwfn, p_ptt, PHASE_QM_PF, rel_pf_id, hw_mode); + if (rc) + return rc; + + /* Pure runtime initializations - directly to the HW */ + ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, true, true); + + /* PCI relaxed ordering causes a decrease in the performance on some + * systems. Till a root cause is found, disable this attribute in the + * PCI config space. + */ + /* Not in use @DPDK + * pos = OSAL_PCI_FIND_CAPABILITY(p_hwfn->p_dev, PCI_CAP_ID_EXP); + * if (!pos) { + * DP_NOTICE(p_hwfn, true, + * "Failed to find the PCI Express" + * " Capability structure in the PCI config space\n"); + * return ECORE_IO; + * } + * OSAL_PCI_READ_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, + * &ctrl); + * ctrl &= ~PCI_EXP_DEVCTL_RELAX_EN; + * OSAL_PCI_WRITE_CONFIG_WORD(p_hwfn->p_dev, pos + PCI_EXP_DEVCTL, + * &ctrl); + */ + +#ifndef ASIC_ONLY + /*@@TMP - On B0 build 1, need to mask the datapath_registers parity */ + if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev) && + (p_hwfn->p_dev->chip_metal == 1)) { + u32 reg_addr, tmp; + + reg_addr = + attn_blocks[BLOCK_PGLUE_B]. + chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)].prty_regs[0]-> + mask_addr; + DP_NOTICE(p_hwfn, false, + "Masking datapath registers parity on" + " B0 emulation [build 1]\n"); + tmp = ecore_rd(p_hwfn, p_ptt, reg_addr); + tmp |= (1 << 0); /* Was PRTY_MASK_DATAPATH_REGISTERS */ + ecore_wr(p_hwfn, p_ptt, reg_addr, tmp); + } +#endif + + rc = ecore_hw_init_pf_doorbell_bar(p_hwfn, p_ptt); + if (rc) + return rc; + + if (b_hw_start) { + /* enable interrupts */ + ecore_int_igu_enable(p_hwfn, p_ptt, int_mode); + + /* send function start command */ + rc = ecore_sp_pf_start(p_hwfn, p_tunn, p_hwfn->p_dev->mf_mode, + allow_npar_tx_switch); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Function start ramrod failed\n"); + } else { + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); + + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH register after start PFn\n"); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TCP: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_UDP: %x\n", prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, + PRS_REG_SEARCH_TCP_FIRST_FRAG); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TCP_FIRST_FRAG: %x\n", + prs_reg); + prs_reg = ecore_rd(p_hwfn, p_ptt, PRS_REG_SEARCH_TAG1); + DP_VERBOSE(p_hwfn, ECORE_MSG_STORAGE, + "PRS_REG_SEARCH_TAG1: %x\n", prs_reg); + } + } + return rc; +} + +static enum _ecore_status_t +ecore_change_pci_hwfn(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 enable) +{ + u32 delay_idx = 0, val, set_val = enable ? 1 : 0; + + /* Change PF in PXP */ + ecore_wr(p_hwfn, p_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, set_val); + + /* wait until value is set - try for 1 second every 50us */ + for (delay_idx = 0; delay_idx < 20000; delay_idx++) { + val = ecore_rd(p_hwfn, p_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER); + if (val == set_val) + break; + + OSAL_UDELAY(50); + } + + if (val != set_val) { + DP_NOTICE(p_hwfn, true, + "PFID_ENABLE_MASTER wasn't changed after a second\n"); + return ECORE_UNKNOWN_ERROR; + } + + return ECORE_SUCCESS; +} + +static void ecore_reset_mb_shadow(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_main_ptt) +{ + /* Read shadow of current MFW mailbox */ + ecore_mcp_read_mb(p_hwfn, p_main_ptt); + OSAL_MEMCPY(p_hwfn->mcp_info->mfw_mb_shadow, + p_hwfn->mcp_info->mfw_mb_cur, + p_hwfn->mcp_info->mfw_mb_length); +} + +enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, + struct ecore_tunn_start_params *p_tunn, + bool b_hw_start, + enum ecore_int_mode int_mode, + bool allow_npar_tx_switch, + const u8 *bin_fw_data) +{ + enum _ecore_status_t rc, mfw_rc; + u32 load_code, param; + int i, j; + + if (IS_PF(p_dev)) { + rc = ecore_init_fw_data(p_dev, bin_fw_data); + if (rc != ECORE_SUCCESS) + return rc; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + if (IS_VF(p_dev)) { + rc = ecore_vf_pf_init(p_hwfn); + if (rc) + return rc; + continue; + } + + /* Enable DMAE in PXP */ + rc = ecore_change_pci_hwfn(p_hwfn, p_hwfn->p_main_ptt, true); + + ecore_calc_hw_mode(p_hwfn); + /* @@@TBD need to add here: + * Check for fan failure + * Prev_unload + */ + rc = ecore_mcp_load_req(p_hwfn, p_hwfn->p_main_ptt, &load_code); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Failed sending LOAD_REQ command\n"); + return rc; + } + + /* CQ75580: + * When coming back from hiberbate state, the registers from + * which shadow is read initially are not initialized. It turns + * out that these registers get initialized during the call to + * ecore_mcp_load_req request. So we need to reread them here + * to get the proper shadow register value. + * Note: This is a workaround for the missinginig MFW + * initialization. It may be removed once the implementation + * is done. + */ + ecore_reset_mb_shadow(p_hwfn, p_hwfn->p_main_ptt); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load request was sent.Resp:0x%x, Load code: 0x%x\n", + rc, load_code); + + /* Only relevant for recovery: + * Clear the indication after the LOAD_REQ command is responded + * by the MFW. + */ + p_dev->recov_in_prog = false; + + p_hwfn->first_on_engine = (load_code == + FW_MSG_CODE_DRV_LOAD_ENGINE); + + switch (load_code) { + case FW_MSG_CODE_DRV_LOAD_ENGINE: + rc = ecore_hw_init_common(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.hw_mode); + if (rc) + break; + /* Fall into */ + case FW_MSG_CODE_DRV_LOAD_PORT: + rc = ecore_hw_init_port(p_hwfn, p_hwfn->p_main_ptt, + p_hwfn->hw_info.hw_mode); + if (rc) + break; + + if (ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) { + struct init_nig_pri_tc_map_req tc_map; + + OSAL_MEM_ZERO(&tc_map, sizeof(tc_map)); + + /* remove this once flow control is + * implemented + */ + for (j = 0; j < NUM_OF_VLAN_PRIORITIES; j++) { + tc_map.pri[j].tc_id = 0; + tc_map.pri[j].valid = 1; + } + ecore_init_nig_pri_tc_map(p_hwfn, + p_hwfn->p_main_ptt, + &tc_map); + } + /* fallthrough */ + case FW_MSG_CODE_DRV_LOAD_FUNCTION: + rc = ecore_hw_init_pf(p_hwfn, p_hwfn->p_main_ptt, + p_tunn, p_hwfn->hw_info.hw_mode, + b_hw_start, int_mode, + allow_npar_tx_switch); + break; + default: + rc = ECORE_NOTIMPL; + break; + } + + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, true, + "init phase failed loadcode 0x%x (rc %d)\n", + load_code, rc); + + /* ACK mfw regardless of success or failure of initialization */ + mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_LOAD_DONE, + 0, &load_code, ¶m); + if (rc != ECORE_SUCCESS) + return rc; + if (mfw_rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, + "Failed sending LOAD_DONE command\n"); + return mfw_rc; + } + + /* send DCBX attention request command */ + DP_VERBOSE(p_hwfn, ECORE_MSG_DCB, + "sending phony dcbx set command to trigger DCBx" + " attention handling\n"); + mfw_rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_SET_DCBX, + 1 << DRV_MB_PARAM_DCBX_NOTIFY_SHIFT, + &load_code, ¶m); + if (mfw_rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, + "Failed to send DCBX attention request\n"); + return mfw_rc; + } + + p_hwfn->hw_init_done = true; + } + + return ECORE_SUCCESS; +} + +#define ECORE_HW_STOP_RETRY_LIMIT (10) +static OSAL_INLINE void ecore_hw_timers_stop(struct ecore_dev *p_dev, + struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + int i; + + /* close timers */ + ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_CONN, 0x0); + ecore_wr(p_hwfn, p_ptt, TM_REG_PF_ENABLE_TASK, 0x0); + for (i = 0; i < ECORE_HW_STOP_RETRY_LIMIT && + !p_dev->recov_in_prog; i++) { + if ((!ecore_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN)) && + (!ecore_rd(p_hwfn, p_ptt, TM_REG_PF_SCAN_ACTIVE_TASK))) + break; + + /* Dependent on number of connection/tasks, possibly + * 1ms sleep is required between polls + */ + OSAL_MSLEEP(1); + } + if (i == ECORE_HW_STOP_RETRY_LIMIT) + DP_NOTICE(p_hwfn, true, + "Timers linear scans are not over" + " [Connection %02x Tasks %02x]\n", + (u8)ecore_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_CONN), + (u8)ecore_rd(p_hwfn, p_ptt, + TM_REG_PF_SCAN_ACTIVE_TASK)); +} + +void ecore_hw_timers_stop_all(struct ecore_dev *p_dev) +{ + int j; + + for_each_hwfn(p_dev, j) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; + struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; + + ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); + } +} + +enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev) +{ + enum _ecore_status_t rc = ECORE_SUCCESS, t_rc; + int j; + + for_each_hwfn(p_dev, j) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; + struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Stopping hw/fw\n"); + + if (IS_VF(p_dev)) { + ecore_vf_pf_int_cleanup(p_hwfn); + continue; + } + + /* mark the hw as uninitialized... */ + p_hwfn->hw_init_done = false; + + rc = ecore_sp_pf_stop(p_hwfn); + if (rc) + DP_NOTICE(p_hwfn, true, + "Failed to close PF against FW. Continue to" + " stop HW to prevent illegal host access" + " by the device\n"); + + /* perform debug action after PF stop was sent */ + OSAL_AFTER_PF_STOP((void *)p_hwfn->p_dev, p_hwfn->my_id); + + /* close NIG to BRB gate */ + ecore_wr(p_hwfn, p_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); + + /* close parser */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); + + /* @@@TBD - clean transmission queues (5.b) */ + /* @@@TBD - clean BTB (5.c) */ + + ecore_hw_timers_stop(p_dev, p_hwfn, p_ptt); + + /* @@@TBD - verify DMAE requests are done (8) */ + + /* Disable Attention Generation */ + ecore_int_igu_disable_int(p_hwfn, p_ptt); + ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0); + ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0); + ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, true); + /* Need to wait 1ms to guarantee SBs are cleared */ + OSAL_MSLEEP(1); + } + + if (IS_PF(p_dev)) { + /* Disable DMAE in PXP - in CMT, this should only be done for + * first hw-function, and only after all transactions have + * stopped for all active hw-functions. + */ + t_rc = ecore_change_pci_hwfn(&p_dev->hwfns[0], + p_dev->hwfns[0].p_main_ptt, false); + if (t_rc != ECORE_SUCCESS) + rc = t_rc; + } + + return rc; +} + +void ecore_hw_stop_fastpath(struct ecore_dev *p_dev) +{ + int j; + + for_each_hwfn(p_dev, j) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; + struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; + + if (IS_VF(p_dev)) { + ecore_vf_pf_int_cleanup(p_hwfn); + continue; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, + "Shutting down the fastpath\n"); + + ecore_wr(p_hwfn, p_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x1); + + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_TCP, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_UDP, 0x0); + ecore_wr(p_hwfn, p_ptt, PRS_REG_SEARCH_OPENFLOW, 0x0); + + /* @@@TBD - clean transmission queues (5.b) */ + /* @@@TBD - clean BTB (5.c) */ + + /* @@@TBD - verify DMAE requests are done (8) */ + + ecore_int_igu_init_pure_rt(p_hwfn, p_ptt, false, false); + /* Need to wait 1ms to guarantee SBs are cleared */ + OSAL_MSLEEP(1); + } +} + +void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ptt *p_ptt = p_hwfn->p_main_ptt; + + if (IS_VF(p_hwfn->p_dev)) + return; + + /* Re-open incoming traffic */ + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF, 0x0); +} + +static enum _ecore_status_t ecore_reg_assert(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 reg, + bool expected) +{ + u32 assert_val = ecore_rd(p_hwfn, p_ptt, reg); + + if (assert_val != expected) { + DP_NOTICE(p_hwfn, true, "Value at address 0x%08x != 0x%08x\n", + reg, expected); + return ECORE_UNKNOWN_ERROR; + } + + return 0; +} + +enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 unload_resp, unload_param; + int i; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + if (IS_VF(p_dev)) { + rc = ecore_vf_pf_reset(p_hwfn); + if (rc) + return rc; + continue; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, "Resetting hw/fw\n"); + + /* Check for incorrect states */ + if (!p_dev->recov_in_prog) { + ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt, + QM_REG_USG_CNT_PF_TX, 0); + ecore_reg_assert(p_hwfn, p_hwfn->p_main_ptt, + QM_REG_USG_CNT_PF_OTHER, 0); + /* @@@TBD - assert on incorrect xCFC values (10.b) */ + } + + /* Disable PF in HW blocks */ + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, DORQ_REG_PF_DB_ENABLE, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, QM_REG_PF_EN, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + TCFC_REG_STRONG_ENABLE_PF, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + CCFC_REG_STRONG_ENABLE_PF, 0); + + if (p_dev->recov_in_prog) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, + "Recovery is in progress -> skip " + "sending unload_req/done\n"); + break; + } + + /* Send unload command to MCP */ + rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_UNLOAD_REQ, + DRV_MB_PARAM_UNLOAD_WOL_MCP, + &unload_resp, &unload_param); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, + "ecore_hw_reset: UNLOAD_REQ failed\n"); + /* @@TBD - what to do? for now, assume ENG. */ + unload_resp = FW_MSG_CODE_DRV_UNLOAD_ENGINE; + } + + rc = ecore_mcp_cmd(p_hwfn, p_hwfn->p_main_ptt, + DRV_MSG_CODE_UNLOAD_DONE, + 0, &unload_resp, &unload_param); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, + true, "ecore_hw_reset: UNLOAD_DONE failed\n"); + /* @@@TBD - Should it really ASSERT here ? */ + return rc; + } + } + + return rc; +} + +/* Free hwfn memory and resources acquired in hw_hwfn_prepare */ +static void ecore_hw_hwfn_free(struct ecore_hwfn *p_hwfn) +{ + ecore_ptt_pool_free(p_hwfn); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->hw_info.p_igu_info); +} + +/* Setup bar access */ +static void ecore_hw_hwfn_prepare(struct ecore_hwfn *p_hwfn) +{ + /* clear indirect access */ + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_88_F0, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_8C_F0, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_90_F0, 0); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PGLUE_B_REG_PGL_ADDR_94_F0, 0); + + /* Clean Previous errors if such exist */ + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR, 1 << p_hwfn->abs_pf_id); + + /* enable internal target-read */ + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ, 1); +} + +static void get_function_id(struct ecore_hwfn *p_hwfn) +{ + /* ME Register */ + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, + PXP_PF_ME_OPAQUE_ADDR); + + p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, PXP_PF_ME_CONCRETE_ADDR); + + /* Bits 16-19 from the ME registers are the pf_num */ + /* @@ @TBD - check, may be wrong after B0 implementation for CMT */ + p_hwfn->abs_pf_id = (p_hwfn->hw_info.concrete_fid >> 16) & 0xf; + p_hwfn->rel_pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PFID); + p_hwfn->port_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, + PXP_CONCRETE_FID_PORT); + + DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, + "Read ME register: Concrete 0x%08x Opaque 0x%04x\n", + p_hwfn->hw_info.concrete_fid, p_hwfn->hw_info.opaque_fid); +} + +static void ecore_hw_set_feat(struct ecore_hwfn *p_hwfn) +{ + u32 *feat_num = p_hwfn->hw_info.feat_num; + int num_features = 1; + + /* L2 Queues require each: 1 status block. 1 L2 queue */ + feat_num[ECORE_PF_L2_QUE] = + OSAL_MIN_T(u32, + RESC_NUM(p_hwfn, ECORE_SB) / num_features, + RESC_NUM(p_hwfn, ECORE_L2_QUEUE)); + + DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, + "#PF_L2_QUEUES=%d #SBS=%d num_features=%d\n", + feat_num[ECORE_PF_L2_QUE], + RESC_NUM(p_hwfn, ECORE_SB), num_features); +} + +/* @@@TBD MK RESC: This info is currently hard code and set as if we were MF + * need to read it from shmem... + */ +static enum _ecore_status_t ecore_hw_get_resc(struct ecore_hwfn *p_hwfn) +{ + u32 *resc_start = p_hwfn->hw_info.resc_start; + u8 num_funcs = p_hwfn->num_funcs_on_engine; + u32 *resc_num = p_hwfn->hw_info.resc_num; + int i, max_vf_vlan_filters; + struct ecore_sb_cnt_info sb_cnt_info; + bool b_ah = ECORE_IS_AH(p_hwfn->p_dev); + + OSAL_MEM_ZERO(&sb_cnt_info, sizeof(sb_cnt_info)); + +#ifdef CONFIG_ECORE_SRIOV + max_vf_vlan_filters = ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS; +#else + max_vf_vlan_filters = 0; +#endif + + ecore_int_get_num_sbs(p_hwfn, &sb_cnt_info); + resc_num[ECORE_SB] = OSAL_MIN_T(u32, + (MAX_SB_PER_PATH_BB / num_funcs), + sb_cnt_info.sb_cnt); + + resc_num[ECORE_L2_QUEUE] = (b_ah ? MAX_NUM_L2_QUEUES_K2 : + MAX_NUM_L2_QUEUES_BB) / num_funcs; + resc_num[ECORE_VPORT] = (b_ah ? MAX_NUM_VPORTS_K2 : + MAX_NUM_VPORTS_BB) / num_funcs; + resc_num[ECORE_RSS_ENG] = (b_ah ? ETH_RSS_ENGINE_NUM_K2 : + ETH_RSS_ENGINE_NUM_BB) / num_funcs; + resc_num[ECORE_PQ] = (b_ah ? MAX_QM_TX_QUEUES_K2 : + MAX_QM_TX_QUEUES_BB) / num_funcs; + resc_num[ECORE_RL] = 8; + resc_num[ECORE_MAC] = ETH_NUM_MAC_FILTERS / num_funcs; + resc_num[ECORE_VLAN] = (ETH_NUM_VLAN_FILTERS - + max_vf_vlan_filters + + 1 /*For vlan0 */) / num_funcs; + + /* TODO - there will be a problem in AH - there are only 11k lines */ + resc_num[ECORE_ILT] = (b_ah ? PXP_NUM_ILT_RECORDS_K2 : + PXP_NUM_ILT_RECORDS_BB) / num_funcs; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + /* Reduced build contains less PQs */ + if (!(p_hwfn->p_dev->b_is_emul_full)) + resc_num[ECORE_PQ] = 32; + + /* For AH emulation, since we have a possible maximal number of + * 16 enabled PFs, in case there are not enough ILT lines - + * allocate only first PF as RoCE and have all the other ETH + * only with less ILT lines. + */ + if (!p_hwfn->rel_pf_id && p_hwfn->p_dev->b_is_emul_full) + resc_num[ECORE_ILT] = resc_num[ECORE_ILT]; + } +#endif + + for (i = 0; i < ECORE_MAX_RESC; i++) + resc_start[i] = resc_num[i] * p_hwfn->rel_pf_id; + +#ifndef ASIC_ONLY + /* Correct the common ILT calculation if PF0 has more */ + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev) && + p_hwfn->p_dev->b_is_emul_full && + p_hwfn->rel_pf_id && resc_num[ECORE_ILT]) + resc_start[ECORE_ILT] += resc_num[ECORE_ILT]; +#endif + + /* Sanity for ILT */ + if ((b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_K2)) || + (!b_ah && (RESC_END(p_hwfn, ECORE_ILT) > PXP_NUM_ILT_RECORDS_BB))) { + DP_NOTICE(p_hwfn, true, + "Can't assign ILT pages [%08x,...,%08x]\n", + RESC_START(p_hwfn, ECORE_ILT), RESC_END(p_hwfn, + ECORE_ILT) - + 1); + return ECORE_INVAL; + } + + ecore_hw_set_feat(p_hwfn); + + DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, + "The numbers for each resource are:\n" + "SB = %d start = %d\n" + "L2_QUEUE = %d start = %d\n" + "VPORT = %d start = %d\n" + "PQ = %d start = %d\n" + "RL = %d start = %d\n" + "MAC = %d start = %d\n" + "VLAN = %d start = %d\n" + "ILT = %d start = %d\n" + "CMDQS_CQS = %d start = %d\n", + RESC_NUM(p_hwfn, ECORE_SB), RESC_START(p_hwfn, ECORE_SB), + RESC_NUM(p_hwfn, ECORE_L2_QUEUE), + RESC_START(p_hwfn, ECORE_L2_QUEUE), + RESC_NUM(p_hwfn, ECORE_VPORT), + RESC_START(p_hwfn, ECORE_VPORT), + RESC_NUM(p_hwfn, ECORE_PQ), RESC_START(p_hwfn, ECORE_PQ), + RESC_NUM(p_hwfn, ECORE_RL), RESC_START(p_hwfn, ECORE_RL), + RESC_NUM(p_hwfn, ECORE_MAC), RESC_START(p_hwfn, ECORE_MAC), + RESC_NUM(p_hwfn, ECORE_VLAN), + RESC_START(p_hwfn, ECORE_VLAN), + RESC_NUM(p_hwfn, ECORE_ILT), RESC_START(p_hwfn, ECORE_ILT), + RESC_NUM(p_hwfn, ECORE_CMDQS_CQS), + RESC_START(p_hwfn, ECORE_CMDQS_CQS)); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_hw_get_nvm_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 nvm_cfg1_offset, mf_mode, addr, generic_cont0, core_cfg; + u32 port_cfg_addr, link_temp, device_capabilities; + struct ecore_mcp_link_params *link; + + /* Read global nvm_cfg address */ + u32 nvm_cfg_addr = ecore_rd(p_hwfn, p_ptt, MISC_REG_GEN_PURP_CR0); + + /* Verify MCP has initialized it */ + if (nvm_cfg_addr == 0) { + DP_NOTICE(p_hwfn, false, "Shared memory not initialized\n"); + return ECORE_INVAL; + } + + /* Read nvm_cfg1 (Notice this is just offset, and not offsize (TBD) */ + nvm_cfg1_offset = ecore_rd(p_hwfn, p_ptt, nvm_cfg_addr + 4); + + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + OFFSETOF(struct nvm_cfg1, glob) + OFFSETOF(struct nvm_cfg1_glob, + core_cfg); + + core_cfg = ecore_rd(p_hwfn, p_ptt, addr); + + switch ((core_cfg & NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK) >> + NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET) { + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X40G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X50G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X100G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_F; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X10G_E; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_4X20G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X40G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_2X25G; + break; + case NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G: + p_hwfn->hw_info.port_mode = ECORE_PORT_MODE_DE_1X25G; + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown port mode in 0x%08x\n", + core_cfg); + break; + } + + /* Read default link configuration */ + link = &p_hwfn->mcp_info->link_input; + port_cfg_addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + OFFSETOF(struct nvm_cfg1, port[MFW_PORT(p_hwfn)]); + link_temp = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + OFFSETOF(struct nvm_cfg1_port, speed_cap_mask)); + link_temp &= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK; + link->speed.advertised_speeds = link_temp; + + link_temp = link->speed.advertised_speeds; + p_hwfn->mcp_info->link_capabilities.speed_capabilities = link_temp; + + link_temp = ecore_rd(p_hwfn, p_ptt, + port_cfg_addr + + OFFSETOF(struct nvm_cfg1_port, link_settings)); + switch ((link_temp & NVM_CFG1_PORT_DRV_LINK_SPEED_MASK) >> + NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET) { + case NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG: + link->speed.autoneg = true; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_1G: + link->speed.forced_speed = 1000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_10G: + link->speed.forced_speed = 10000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_25G: + link->speed.forced_speed = 25000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_40G: + link->speed.forced_speed = 40000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_50G: + link->speed.forced_speed = 50000; + break; + case NVM_CFG1_PORT_DRV_LINK_SPEED_100G: + link->speed.forced_speed = 100000; + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown Speed in 0x%08x\n", link_temp); + } + + link_temp &= NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK; + link_temp >>= NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET; + link->pause.autoneg = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG); + link->pause.forced_rx = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX); + link->pause.forced_tx = !!(link_temp & + NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX); + link->loopback_mode = 0; + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Read default link: Speed 0x%08x, Adv. Speed 0x%08x," + " AN: 0x%02x, PAUSE AN: 0x%02x\n", + link->speed.forced_speed, link->speed.advertised_speeds, + link->speed.autoneg, link->pause.autoneg); + + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + OFFSETOF(struct nvm_cfg1, glob) + + OFFSETOF(struct nvm_cfg1_glob, generic_cont0); + + generic_cont0 = ecore_rd(p_hwfn, p_ptt, addr); + + mf_mode = (generic_cont0 & NVM_CFG1_GLOB_MF_MODE_MASK) >> + NVM_CFG1_GLOB_MF_MODE_OFFSET; + + switch (mf_mode) { + case NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED: + p_hwfn->p_dev->mf_mode = ECORE_MF_OVLAN; + break; + case NVM_CFG1_GLOB_MF_MODE_NPAR1_0: + p_hwfn->p_dev->mf_mode = ECORE_MF_NPAR; + break; + case NVM_CFG1_GLOB_MF_MODE_DEFAULT: + p_hwfn->p_dev->mf_mode = ECORE_MF_DEFAULT; + break; + } + DP_INFO(p_hwfn, "Multi function mode is %08x\n", + p_hwfn->p_dev->mf_mode); + + /* Read Multi-function information from shmem */ + addr = MCP_REG_SCRATCH + nvm_cfg1_offset + + OFFSETOF(struct nvm_cfg1, glob) + + OFFSETOF(struct nvm_cfg1_glob, device_capabilities); + + device_capabilities = ecore_rd(p_hwfn, p_ptt, addr); + if (device_capabilities & NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET) + OSAL_SET_BIT(ECORE_DEV_CAP_ETH, + &p_hwfn->hw_info.device_capabilities); + + return ecore_mcp_fill_shmem_func_info(p_hwfn, p_ptt); +} + +static void ecore_get_num_funcs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u8 num_funcs; + u32 tmp, mask; + + num_funcs = ECORE_IS_AH(p_hwfn->p_dev) ? MAX_NUM_PFS_K2 + : MAX_NUM_PFS_BB; + + /* Bit 0 of MISCS_REG_FUNCTION_HIDE indicates whether the bypass values + * in the other bits are selected. + * Bits 1-15 are for functions 1-15, respectively, and their value is + * '0' only for enabled functions (function 0 always exists and + * enabled). + * In case of CMT, only the "even" functions are enabled, and thus the + * number of functions for both hwfns is learnt from the same bits. + */ + + tmp = ecore_rd(p_hwfn, p_ptt, MISCS_REG_FUNCTION_HIDE); + if (tmp & 0x1) { + if (ECORE_PATH_ID(p_hwfn) && p_hwfn->p_dev->num_hwfns == 1) { + num_funcs = 0; + mask = 0xaaaa; + } else { + num_funcs = 1; + mask = 0x5554; + } + + tmp = (tmp ^ 0xffffffff) & mask; + while (tmp) { + if (tmp & 0x1) + num_funcs++; + tmp >>= 0x1; + } + } + + p_hwfn->num_funcs_on_engine = num_funcs; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, false, + "FPGA: Limit number of PFs to 4 [would affect" + " resource allocation, needed for IOV]\n"); + p_hwfn->num_funcs_on_engine = 4; + } +#endif + + DP_VERBOSE(p_hwfn, ECORE_MSG_PROBE, "num_funcs_on_engine = %d\n", + p_hwfn->num_funcs_on_engine); +} + +static void ecore_hw_info_port_num_bb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 port_mode; + +#ifndef ASIC_ONLY + /* Read the port mode */ + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) + port_mode = 4; + else if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && + (p_hwfn->p_dev->num_hwfns > 1)) + /* In CMT on emulation, assume 1 port */ + port_mode = 1; + else +#endif + port_mode = ecore_rd(p_hwfn, p_ptt, + CNIG_REG_NW_PORT_MODE_BB_B0); + + if (port_mode < 3) { + p_hwfn->p_dev->num_ports_in_engines = 1; + } else if (port_mode <= 5) { + p_hwfn->p_dev->num_ports_in_engines = 2; + } else { + DP_NOTICE(p_hwfn, true, "PORT MODE: %d not supported\n", + p_hwfn->p_dev->num_ports_in_engines); + + /* Default num_ports_in_engines to something */ + p_hwfn->p_dev->num_ports_in_engines = 1; + } +} + +static void ecore_hw_info_port_num_ah(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 port; + int i; + + p_hwfn->p_dev->num_ports_in_engines = 0; + + for (i = 0; i < MAX_NUM_PORTS_K2; i++) { + port = ecore_rd(p_hwfn, p_ptt, + CNIG_REG_NIG_PORT0_CONF_K2 + (i * 4)); + if (port & 1) + p_hwfn->p_dev->num_ports_in_engines++; + } +} + +static void ecore_hw_info_port_num(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + if (ECORE_IS_BB(p_hwfn->p_dev)) + ecore_hw_info_port_num_bb(p_hwfn, p_ptt); + else + ecore_hw_info_port_num_ah(p_hwfn, p_ptt); +} + +static enum _ecore_status_t +ecore_get_hw_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_pci_personality personality) +{ + enum _ecore_status_t rc; + + rc = ecore_iov_hw_info(p_hwfn, p_hwfn->p_main_ptt); + if (rc) + return rc; + + /* TODO In get_hw_info, amoungst others: + * Get MCP FW revision and determine according to it the supported + * featrues (e.g. DCB) + * Get boot mode + * ecore_get_pcie_width_speed, WOL capability. + * Number of global CQ-s (for storage + */ + ecore_hw_info_port_num(p_hwfn, p_ptt); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_ASIC(p_hwfn->p_dev)) +#endif + ecore_hw_get_nvm_info(p_hwfn, p_ptt); + + rc = ecore_int_igu_read_cam(p_hwfn, p_ptt); + if (rc) + return rc; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_ASIC(p_hwfn->p_dev) && ecore_mcp_is_init(p_hwfn)) { +#endif + OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, + p_hwfn->mcp_info->func_info.mac, ETH_ALEN); +#ifndef ASIC_ONLY + } else { + static u8 mcp_hw_mac[6] = { 0, 2, 3, 4, 5, 6 }; + + OSAL_MEMCPY(p_hwfn->hw_info.hw_mac_addr, mcp_hw_mac, ETH_ALEN); + p_hwfn->hw_info.hw_mac_addr[5] = p_hwfn->abs_pf_id; + } +#endif + + if (ecore_mcp_is_init(p_hwfn)) { + if (p_hwfn->mcp_info->func_info.ovlan != ECORE_MCP_VLAN_UNSET) + p_hwfn->hw_info.ovlan = + p_hwfn->mcp_info->func_info.ovlan; + + ecore_mcp_cmd_port_init(p_hwfn, p_ptt); + } + + if (personality != ECORE_PCI_DEFAULT) + p_hwfn->hw_info.personality = personality; + else if (ecore_mcp_is_init(p_hwfn)) + p_hwfn->hw_info.personality = + p_hwfn->mcp_info->func_info.protocol; + +#ifndef ASIC_ONLY + /* To overcome ILT lack for emulation, until at least until we'll have + * a definite answer from system about it, allow only PF0 to be RoCE. + */ + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev) && ECORE_IS_AH(p_hwfn->p_dev)) + p_hwfn->hw_info.personality = ECORE_PCI_ETH; +#endif + + ecore_get_num_funcs(p_hwfn, p_ptt); + + /* Feat num is dependent on personality and on the number of functions + * on the engine. Therefore it should be come after personality + * initialization and after getting the number of functions. + */ + return ecore_hw_get_resc(p_hwfn); +} + +/* @TMP - this should move to a proper .h */ +#define CHIP_NUM_AH 0x8070 + +static enum _ecore_status_t ecore_get_dev_info(struct ecore_dev *p_dev) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + u32 tmp; + + /* Read Vendor Id / Device Id */ + OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_VENDOR_ID_OFFSET, + &p_dev->vendor_id); + OSAL_PCI_READ_CONFIG_WORD(p_dev, PCICFG_DEVICE_ID_OFFSET, + &p_dev->device_id); + + p_dev->chip_num = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, + MISCS_REG_CHIP_NUM); + p_dev->chip_rev = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, + MISCS_REG_CHIP_REV); + + MASK_FIELD(CHIP_REV, p_dev->chip_rev); + + /* Determine type */ + if (p_dev->device_id == CHIP_NUM_AH) + p_dev->type = ECORE_DEV_TYPE_AH; + else + p_dev->type = ECORE_DEV_TYPE_BB; + + /* Learn number of HW-functions */ + tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, + MISCS_REG_CMT_ENABLED_FOR_PAIR); + + if (tmp & (1 << p_hwfn->rel_pf_id)) { + DP_NOTICE(p_dev->hwfns, false, "device in CMT mode\n"); + p_dev->num_hwfns = 2; + } else { + p_dev->num_hwfns = 1; + } + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_dev)) { + /* For some reason we have problems with this register + * in B0 emulation; Simply assume no CMT + */ + DP_NOTICE(p_dev->hwfns, false, + "device on emul - assume no CMT\n"); + p_dev->num_hwfns = 1; + } +#endif + + p_dev->chip_bond_id = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, + MISCS_REG_CHIP_TEST_REG) >> 4; + MASK_FIELD(CHIP_BOND_ID, p_dev->chip_bond_id); + p_dev->chip_metal = (u16)ecore_rd(p_hwfn, p_hwfn->p_main_ptt, + MISCS_REG_CHIP_METAL); + MASK_FIELD(CHIP_METAL, p_dev->chip_metal); + DP_INFO(p_dev->hwfns, + "Chip details - %s%d, Num: %04x Rev: %04x Bond id: %04x" + " Metal: %04x\n", + ECORE_IS_BB(p_dev) ? "BB" : "AH", + CHIP_REV_IS_A0(p_dev) ? 0 : 1, + p_dev->chip_num, p_dev->chip_rev, p_dev->chip_bond_id, + p_dev->chip_metal); + + if (ECORE_IS_BB(p_dev) && CHIP_REV_IS_A0(p_dev)) { + DP_NOTICE(p_dev->hwfns, false, + "The chip type/rev (BB A0) is not supported!\n"); + return ECORE_ABORTED; + } +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_dev) && ECORE_IS_AH(p_dev)) + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + MISCS_REG_PLL_MAIN_CTRL_4, 0x1); + + if (CHIP_REV_IS_EMUL(p_dev)) { + tmp = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, + MISCS_REG_ECO_RESERVED); + if (tmp & (1 << 29)) { + DP_NOTICE(p_hwfn, false, + "Emulation: Running on a FULL build\n"); + p_dev->b_is_emul_full = true; + } else { + DP_NOTICE(p_hwfn, false, + "Emulation: Running on a REDUCED build\n"); + } + } +#endif + + return ECORE_SUCCESS; +} + +void ecore_prepare_hibernate(struct ecore_dev *p_dev) +{ + int j; + + if (IS_VF(p_dev)) + return; + + for_each_hwfn(p_dev, j) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[j]; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFDOWN, + "Mark hw/fw uninitialized\n"); + + p_hwfn->hw_init_done = false; + p_hwfn->first_on_engine = false; + } +} + +static enum _ecore_status_t +ecore_hw_prepare_single(struct ecore_hwfn *p_hwfn, + void OSAL_IOMEM *p_regview, + void OSAL_IOMEM *p_doorbells, + enum ecore_pci_personality personality) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Split PCI bars evenly between hwfns */ + p_hwfn->regview = p_regview; + p_hwfn->doorbells = p_doorbells; + + /* Validate that chip access is feasible */ + if (REG_RD(p_hwfn, PXP_PF_ME_OPAQUE_ADDR) == 0xffffffff) { + DP_ERR(p_hwfn, + "Reading the ME register returns all Fs;" + " Preventing further chip access\n"); + return ECORE_INVAL; + } + + get_function_id(p_hwfn); + + /* Allocate PTT pool */ + rc = ecore_ptt_pool_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, true, "Failed to prepare hwfn's hw\n"); + goto err0; + } + + /* Allocate the main PTT */ + p_hwfn->p_main_ptt = ecore_get_reserved_ptt(p_hwfn, RESERVED_PTT_MAIN); + + /* First hwfn learns basic information, e.g., number of hwfns */ + if (!p_hwfn->my_id) { + rc = ecore_get_dev_info(p_hwfn->p_dev); + if (rc != ECORE_SUCCESS) + goto err1; + } + + ecore_hw_hwfn_prepare(p_hwfn); + + /* Initialize MCP structure */ + rc = ecore_mcp_cmd_init(p_hwfn, p_hwfn->p_main_ptt); + if (rc) { + DP_NOTICE(p_hwfn, true, "Failed initializing mcp command\n"); + goto err1; + } + + /* Read the device configuration information from the HW and SHMEM */ + rc = ecore_get_hw_info(p_hwfn, p_hwfn->p_main_ptt, personality); + if (rc) { + DP_NOTICE(p_hwfn, true, "Failed to get HW information\n"); + goto err2; + } + + /* Allocate the init RT array and initialize the init-ops engine */ + rc = ecore_init_alloc(p_hwfn); + if (rc) { + DP_NOTICE(p_hwfn, true, "Failed to allocate the init array\n"); + goto err2; + } +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, false, + "FPGA: workaround; Prevent DMAE parities\n"); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, PCIE_REG_PRTY_MASK, 7); + + DP_NOTICE(p_hwfn, false, + "FPGA: workaround: Set VF bar0 size\n"); + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_VF_BAR0_SIZE, 4); + } +#endif + + return rc; +err2: + ecore_mcp_free(p_hwfn); +err1: + ecore_hw_hwfn_free(p_hwfn); +err0: + return rc; +} + +enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + enum _ecore_status_t rc; + + if (IS_VF(p_dev)) + return ecore_vf_hw_prepare(p_dev); + + /* Store the precompiled init data ptrs */ + ecore_init_iro_array(p_dev); + + /* Initialize the first hwfn - will learn number of hwfns */ + rc = ecore_hw_prepare_single(p_hwfn, + p_dev->regview, + p_dev->doorbells, personality); + if (rc != ECORE_SUCCESS) + return rc; + + personality = p_hwfn->hw_info.personality; + + /* initialalize 2nd hwfn if necessary */ + if (p_dev->num_hwfns > 1) { + void OSAL_IOMEM *p_regview, *p_doorbell; + u8 OSAL_IOMEM *addr; + + /* adjust bar offset for second engine */ + addr = (u8 OSAL_IOMEM *)p_dev->regview + + ecore_hw_bar_size(p_hwfn, BAR_ID_0) / 2; + p_regview = (void OSAL_IOMEM *)addr; + + addr = (u8 OSAL_IOMEM *)p_dev->doorbells + + ecore_hw_bar_size(p_hwfn, BAR_ID_1) / 2; + p_doorbell = (void OSAL_IOMEM *)addr; + + /* prepare second hw function */ + rc = ecore_hw_prepare_single(&p_dev->hwfns[1], p_regview, + p_doorbell, personality); + + /* in case of error, need to free the previously + * initialiazed hwfn 0 + */ + if (rc != ECORE_SUCCESS) { + ecore_init_free(p_hwfn); + ecore_mcp_free(p_hwfn); + ecore_hw_hwfn_free(p_hwfn); + return rc; + } + } + + return ECORE_SUCCESS; +} + +void ecore_hw_remove(struct ecore_dev *p_dev) +{ + int i; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + if (IS_VF(p_dev)) { + ecore_vf_pf_release(p_hwfn); + continue; + } + + ecore_init_free(p_hwfn); + ecore_hw_hwfn_free(p_hwfn); + ecore_mcp_free(p_hwfn); + + OSAL_MUTEX_DEALLOC(&p_hwfn->dmae_info.mutex); + } +} + +static void ecore_chain_free_next_ptr(struct ecore_dev *p_dev, + struct ecore_chain *p_chain) +{ + void *p_virt = p_chain->p_virt_addr, *p_virt_next = OSAL_NULL; + dma_addr_t p_phys = p_chain->p_phys_addr, p_phys_next = 0; + struct ecore_chain_next *p_next; + u32 size, i; + + if (!p_virt) + return; + + size = p_chain->elem_size * p_chain->usable_per_page; + + for (i = 0; i < p_chain->page_cnt; i++) { + if (!p_virt) + break; + + p_next = (struct ecore_chain_next *)((u8 *)p_virt + size); + p_virt_next = p_next->next_virt; + p_phys_next = HILO_DMA_REGPAIR(p_next->next_phys); + + OSAL_DMA_FREE_COHERENT(p_dev, p_virt, p_phys, + ECORE_CHAIN_PAGE_SIZE); + + p_virt = p_virt_next; + p_phys = p_phys_next; + } +} + +static void ecore_chain_free_single(struct ecore_dev *p_dev, + struct ecore_chain *p_chain) +{ + if (!p_chain->p_virt_addr) + return; + + OSAL_DMA_FREE_COHERENT(p_dev, p_chain->p_virt_addr, + p_chain->p_phys_addr, ECORE_CHAIN_PAGE_SIZE); +} + +static void ecore_chain_free_pbl(struct ecore_dev *p_dev, + struct ecore_chain *p_chain) +{ + void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; + u8 *p_pbl_virt = (u8 *)p_chain->pbl.p_virt_table; + u32 page_cnt = p_chain->page_cnt, i, pbl_size; + + if (!pp_virt_addr_tbl) + return; + + if (!p_chain->pbl.p_virt_table) + goto out; + + for (i = 0; i < page_cnt; i++) { + if (!pp_virt_addr_tbl[i]) + break; + + OSAL_DMA_FREE_COHERENT(p_dev, pp_virt_addr_tbl[i], + *(dma_addr_t *)p_pbl_virt, + ECORE_CHAIN_PAGE_SIZE); + + p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; + } + + pbl_size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; + OSAL_DMA_FREE_COHERENT(p_dev, p_chain->pbl.p_virt_table, + p_chain->pbl.p_phys_table, pbl_size); +out: + OSAL_VFREE(p_dev, p_chain->pbl.pp_virt_addr_tbl); +} + +void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain) +{ + switch (p_chain->mode) { + case ECORE_CHAIN_MODE_NEXT_PTR: + ecore_chain_free_next_ptr(p_dev, p_chain); + break; + case ECORE_CHAIN_MODE_SINGLE: + ecore_chain_free_single(p_dev, p_chain); + break; + case ECORE_CHAIN_MODE_PBL: + ecore_chain_free_pbl(p_dev, p_chain); + break; + } +} + +static enum _ecore_status_t +ecore_chain_alloc_sanity_check(struct ecore_dev *p_dev, + enum ecore_chain_cnt_type cnt_type, + osal_size_t elem_size, u32 page_cnt) +{ + u64 chain_size = ELEMS_PER_PAGE(elem_size) * page_cnt; + + /* The actual chain size can be larger than the maximal possible value + * after rounding up the requested elements number to pages, and after + * taking into acount the unusuable elements (next-ptr elements). + * The size of a "u16" chain can be (U16_MAX + 1) since the chain + * size/capacity fields are of a u32 type. + */ + if ((cnt_type == ECORE_CHAIN_CNT_TYPE_U16 && + chain_size > ((u32)ECORE_U16_MAX + 1)) || + (cnt_type == ECORE_CHAIN_CNT_TYPE_U32 && + chain_size > ECORE_U32_MAX)) { + DP_NOTICE(p_dev, true, + "The actual chain size (0x%lx) is larger than" + " the maximal possible value\n", + (unsigned long)chain_size); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_chain_alloc_next_ptr(struct ecore_dev *p_dev, struct ecore_chain *p_chain) +{ + void *p_virt = OSAL_NULL, *p_virt_prev = OSAL_NULL; + dma_addr_t p_phys = 0; + u32 i; + + for (i = 0; i < p_chain->page_cnt; i++) { + p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, + ECORE_CHAIN_PAGE_SIZE); + if (!p_virt) { + DP_NOTICE(p_dev, true, + "Failed to allocate chain memory\n"); + return ECORE_NOMEM; + } + + if (i == 0) { + ecore_chain_init_mem(p_chain, p_virt, p_phys); + ecore_chain_reset(p_chain); + } else { + ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, + p_virt, p_phys); + } + + p_virt_prev = p_virt; + } + /* Last page's next element should point to the beginning of the + * chain. + */ + ecore_chain_init_next_ptr_elem(p_chain, p_virt_prev, + p_chain->p_virt_addr, + p_chain->p_phys_addr); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_chain_alloc_single(struct ecore_dev *p_dev, struct ecore_chain *p_chain) +{ + void *p_virt = OSAL_NULL; + dma_addr_t p_phys = 0; + + p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, ECORE_CHAIN_PAGE_SIZE); + if (!p_virt) { + DP_NOTICE(p_dev, true, "Failed to allocate chain memory\n"); + return ECORE_NOMEM; + } + + ecore_chain_init_mem(p_chain, p_virt, p_phys); + ecore_chain_reset(p_chain); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_chain_alloc_pbl(struct ecore_dev *p_dev, + struct ecore_chain *p_chain) +{ + void *p_virt = OSAL_NULL; + u8 *p_pbl_virt = OSAL_NULL; + void **pp_virt_addr_tbl = OSAL_NULL; + dma_addr_t p_phys = 0, p_pbl_phys = 0; + u32 page_cnt = p_chain->page_cnt, size, i; + + size = page_cnt * sizeof(*pp_virt_addr_tbl); + pp_virt_addr_tbl = (void **)OSAL_VALLOC(p_dev, size); + if (!pp_virt_addr_tbl) { + DP_NOTICE(p_dev, true, + "Failed to allocate memory for the chain" + " virtual addresses table\n"); + return ECORE_NOMEM; + } + OSAL_MEM_ZERO(pp_virt_addr_tbl, size); + + /* The allocation of the PBL table is done with its full size, since it + * is expected to be successive. + */ + size = page_cnt * ECORE_CHAIN_PBL_ENTRY_SIZE; + p_pbl_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_pbl_phys, size); + if (!p_pbl_virt) { + DP_NOTICE(p_dev, true, "Failed to allocate chain pbl memory\n"); + return ECORE_NOMEM; + } + + ecore_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, + pp_virt_addr_tbl); + + for (i = 0; i < page_cnt; i++) { + p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, + ECORE_CHAIN_PAGE_SIZE); + if (!p_virt) { + DP_NOTICE(p_dev, true, + "Failed to allocate chain memory\n"); + return ECORE_NOMEM; + } + + if (i == 0) { + ecore_chain_init_mem(p_chain, p_virt, p_phys); + ecore_chain_reset(p_chain); + } + + /* Fill the PBL table with the physical address of the page */ + *(dma_addr_t *)p_pbl_virt = p_phys; + /* Keep the virtual address of the page */ + p_chain->pbl.pp_virt_addr_tbl[i] = p_virt; + + p_pbl_virt += ECORE_CHAIN_PBL_ENTRY_SIZE; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_chain_alloc(struct ecore_dev *p_dev, + enum ecore_chain_use_mode intended_use, + enum ecore_chain_mode mode, + enum ecore_chain_cnt_type cnt_type, + u32 num_elems, osal_size_t elem_size, + struct ecore_chain *p_chain) +{ + u32 page_cnt; + enum _ecore_status_t rc = ECORE_SUCCESS; + + if (mode == ECORE_CHAIN_MODE_SINGLE) + page_cnt = 1; + else + page_cnt = ECORE_CHAIN_PAGE_CNT(num_elems, elem_size, mode); + + rc = ecore_chain_alloc_sanity_check(p_dev, cnt_type, elem_size, + page_cnt); + if (rc) { + DP_NOTICE(p_dev, true, + "Cannot allocate a chain with the given arguments:\n" + " [use_mode %d, mode %d, cnt_type %d, num_elems %d," + " elem_size %zu]\n", + intended_use, mode, cnt_type, num_elems, elem_size); + return rc; + } + + ecore_chain_init_params(p_chain, page_cnt, (u8)elem_size, intended_use, + mode, cnt_type); + + switch (mode) { + case ECORE_CHAIN_MODE_NEXT_PTR: + rc = ecore_chain_alloc_next_ptr(p_dev, p_chain); + break; + case ECORE_CHAIN_MODE_SINGLE: + rc = ecore_chain_alloc_single(p_dev, p_chain); + break; + case ECORE_CHAIN_MODE_PBL: + rc = ecore_chain_alloc_pbl(p_dev, p_chain); + break; + } + if (rc) + goto nomem; + + return ECORE_SUCCESS; + +nomem: + ecore_chain_free(p_dev, p_chain); + return rc; +} + +enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, + u16 src_id, u16 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { + u16 min, max; + + min = (u16)RESC_START(p_hwfn, ECORE_L2_QUEUE); + max = min + RESC_NUM(p_hwfn, ECORE_L2_QUEUE); + DP_NOTICE(p_hwfn, true, + "l2_queue id [%d] is not valid, available" + " indices [%d - %d]\n", + src_id, min, max); + + return ECORE_INVAL; + } + + *dst_id = RESC_START(p_hwfn, ECORE_L2_QUEUE) + src_id; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, + u8 src_id, u8 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, ECORE_VPORT)) { + u8 min, max; + + min = (u8)RESC_START(p_hwfn, ECORE_VPORT); + max = min + RESC_NUM(p_hwfn, ECORE_VPORT); + DP_NOTICE(p_hwfn, true, + "vport id [%d] is not valid, available" + " indices [%d - %d]\n", + src_id, min, max); + + return ECORE_INVAL; + } + + *dst_id = RESC_START(p_hwfn, ECORE_VPORT) + src_id; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, + u8 src_id, u8 *dst_id) +{ + if (src_id >= RESC_NUM(p_hwfn, ECORE_RSS_ENG)) { + u8 min, max; + + min = (u8)RESC_START(p_hwfn, ECORE_RSS_ENG); + max = min + RESC_NUM(p_hwfn, ECORE_RSS_ENG); + DP_NOTICE(p_hwfn, true, + "rss_eng id [%d] is not valid,avail idx [%d - %d]\n", + src_id, min, max); + + return ECORE_INVAL; + } + + *dst_id = RESC_START(p_hwfn, ECORE_RSS_ENG) + src_id; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 *p_filter) +{ + u32 high, low, en; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return ECORE_SUCCESS; + + high = p_filter[1] | (p_filter[0] << 8); + low = p_filter[5] | (p_filter[4] << 8) | + (p_filter[3] << 16) | (p_filter[2] << 24); + + /* Find a free entry and utilize it */ + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + en = ecore_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); + if (en) + continue; + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32), low); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), high); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 0); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32), 0); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); + break; + } + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { + DP_NOTICE(p_hwfn, false, + "Failed to find an empty LLH filter to utilize\n"); + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "MAC: %x:%x:%x:%x:%x:%x is added at %d\n", + p_filter[0], p_filter[1], p_filter[2], + p_filter[3], p_filter[4], p_filter[5], i); + + return ECORE_SUCCESS; +} + +void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 *p_filter) +{ + u32 high, low; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return; + + high = p_filter[1] | (p_filter[0] << 8); + low = p_filter[5] | (p_filter[4] << 8) | + (p_filter[3] << 16) | (p_filter[2] << 24); + + /* Find the entry and clean it */ + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + if (ecore_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32)) != low) + continue; + if (ecore_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32)) != high) + continue; + + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32), 0); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), 0); + break; + } + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) + DP_NOTICE(p_hwfn, false, + "Tried to remove a non-configured filter\n"); +} + +enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 filter) +{ + u32 high, low, en; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return ECORE_SUCCESS; + + high = filter; + low = 0; + + /* Find a free entry and utilize it */ + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + en = ecore_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32)); + if (en) + continue; + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32), low); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), high); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_MODE + i * sizeof(u32), 1); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE + + i * sizeof(u32), 1); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 1); + break; + } + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) { + DP_NOTICE(p_hwfn, false, + "Failed to find an empty LLH filter to utilize\n"); + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "ETH type: %x is added at %d\n", filter, i); + + return ECORE_SUCCESS; +} + +void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 filter) +{ + u32 high, low; + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return; + + high = filter; + low = 0; + + /* Find the entry and clean it */ + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + if (ecore_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32)) != low) + continue; + if (ecore_rd(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32)) != high) + continue; + + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32), 0); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), 0); + break; + } + if (i >= NIG_REG_LLH_FUNC_FILTER_EN_SIZE) + DP_NOTICE(p_hwfn, false, + "Tried to remove a non-configured filter\n"); +} + +void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + int i; + + if (!(IS_MF_SI(p_hwfn) || IS_MF_DEFAULT(p_hwfn))) + return; + + for (i = 0; i < NIG_REG_LLH_FUNC_FILTER_EN_SIZE; i++) { + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_EN + i * sizeof(u32), 0); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + 2 * i * sizeof(u32), 0); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LLH_FUNC_FILTER_VALUE + + (2 * i + 1) * sizeof(u32), 0); + } +} + +enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 reg_tbl[] = { + BRB_REG_HEADER_SIZE, + BTB_REG_HEADER_SIZE, + CAU_REG_LONG_TIMEOUT_THRESHOLD, + CCFC_REG_ACTIVITY_COUNTER, + CDU_REG_CID_ADDR_PARAMS, + DBG_REG_CLIENT_ENABLE, + DMAE_REG_INIT, + DORQ_REG_IFEN, + GRC_REG_TIMEOUT_EN, + IGU_REG_BLOCK_CONFIGURATION, + MCM_REG_INIT, + MCP2_REG_DBG_DWORD_ENABLE, + MISC_REG_PORT_MODE, + MISCS_REG_CLK_100G_MODE, + MSDM_REG_ENABLE_IN1, + MSEM_REG_ENABLE_IN, + NIG_REG_CM_HDR, + NCSI_REG_CONFIG, + PBF_REG_INIT, + PTU_REG_ATC_INIT_ARRAY, + PCM_REG_INIT, + PGLUE_B_REG_ADMIN_PER_PF_REGION, + PRM_REG_DISABLE_PRM, + PRS_REG_SOFT_RST, + PSDM_REG_ENABLE_IN1, + PSEM_REG_ENABLE_IN, + PSWRQ_REG_DBG_SELECT, + PSWRQ2_REG_CDUT_P_SIZE, + PSWHST_REG_DISCARD_INTERNAL_WRITES, + PSWHST2_REG_DBGSYN_ALMOST_FULL_THR, + PSWRD_REG_DBG_SELECT, + PSWRD2_REG_CONF11, + PSWWR_REG_USDM_FULL_TH, + PSWWR2_REG_CDU_FULL_TH2, + QM_REG_MAXPQSIZE_0, + RSS_REG_RSS_INIT_EN, + RDIF_REG_STOP_ON_ERROR, + SRC_REG_SOFT_RST, + TCFC_REG_ACTIVITY_COUNTER, + TCM_REG_INIT, + TM_REG_PXP_READ_DATA_FIFO_INIT, + TSDM_REG_ENABLE_IN1, + TSEM_REG_ENABLE_IN, + TDIF_REG_STOP_ON_ERROR, + UCM_REG_INIT, + UMAC_REG_IPG_HD_BKP_CNTL_BB_B0, + USDM_REG_ENABLE_IN1, + USEM_REG_ENABLE_IN, + XCM_REG_INIT, + XSDM_REG_ENABLE_IN1, + XSEM_REG_ENABLE_IN, + YCM_REG_INIT, + YSDM_REG_ENABLE_IN1, + YSEM_REG_ENABLE_IN, + XYLD_REG_SCBD_STRICT_PRIO, + TMLD_REG_SCBD_STRICT_PRIO, + MULD_REG_SCBD_STRICT_PRIO, + YULD_REG_SCBD_STRICT_PRIO, + }; + u32 test_val[] = { 0x0, 0x1 }; + u32 val, save_val, i, j; + + for (i = 0; i < OSAL_ARRAY_SIZE(test_val); i++) { + for (j = 0; j < OSAL_ARRAY_SIZE(reg_tbl); j++) { + save_val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]); + ecore_wr(p_hwfn, p_ptt, reg_tbl[j], test_val[i]); + val = ecore_rd(p_hwfn, p_ptt, reg_tbl[j]); + /* Restore the original register's value */ + ecore_wr(p_hwfn, p_ptt, reg_tbl[j], save_val); + if (val != test_val[i]) { + DP_INFO(p_hwfn->p_dev, + "offset 0x%x: val 0x%x != 0x%x\n", + reg_tbl[j], val, test_val[i]); + return ECORE_AGAIN; + } + } + } + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_set_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 hw_addr, void *p_qzone, + osal_size_t qzone_size, + u8 timeset) +{ + struct coalescing_timeset *p_coalesce_timeset; + + if (IS_VF(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, true, "VF coalescing config not supported\n"); + return ECORE_INVAL; + } + + if (p_hwfn->p_dev->int_coalescing_mode != ECORE_COAL_MODE_ENABLE) { + DP_NOTICE(p_hwfn, true, + "Coalescing configuration not enabled\n"); + return ECORE_INVAL; + } + + OSAL_MEMSET(p_qzone, 0, qzone_size); + p_coalesce_timeset = p_qzone; + p_coalesce_timeset->timeset = timeset; + p_coalesce_timeset->valid = 1; + ecore_memcpy_to(p_hwfn, p_ptt, hw_addr, p_qzone, qzone_size); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 coalesce, u8 qid) +{ + struct ustorm_eth_queue_zone qzone; + u16 fw_qid = 0; + u32 address; + u8 timeset; + enum _ecore_status_t rc; + + rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid); + if (rc != ECORE_SUCCESS) + return rc; + + address = BAR0_MAP_REG_USDM_RAM + USTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + /* Translate the coalescing time into a timeset, according to: + * Timeout[Rx] = TimeSet[Rx] << (TimerRes[Rx] + 1) + */ + timeset = coalesce >> (ECORE_CAU_DEF_RX_TIMER_RES + 1); + + rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone, + sizeof(struct ustorm_eth_queue_zone), timeset); + if (rc != ECORE_SUCCESS) + goto out; + + p_hwfn->p_dev->rx_coalesce_usecs = coalesce; +out: + return rc; +} + +enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 coalesce, u8 qid) +{ + struct ystorm_eth_queue_zone qzone; + u16 fw_qid = 0; + u32 address; + u8 timeset; + enum _ecore_status_t rc; + + rc = ecore_fw_l2_queue(p_hwfn, (u16)qid, &fw_qid); + if (rc != ECORE_SUCCESS) + return rc; + + address = BAR0_MAP_REG_YSDM_RAM + YSTORM_ETH_QUEUE_ZONE_OFFSET(fw_qid); + /* Translate the coalescing time into a timeset, according to: + * Timeout[Tx] = TimeSet[Tx] << (TimerRes[Tx] + 1) + */ + timeset = coalesce >> (ECORE_CAU_DEF_TX_TIMER_RES + 1); + + rc = ecore_set_coalesce(p_hwfn, p_ptt, address, &qzone, + sizeof(struct ystorm_eth_queue_zone), timeset); + if (rc != ECORE_SUCCESS) + goto out; + + p_hwfn->p_dev->tx_coalesce_usecs = coalesce; +out: + return rc; +} + +/* Calculate final WFQ values for all vports and configure it. + * After this configuration each vport must have + * approx min rate = vport_wfq * min_pf_rate / ECORE_WFQ_UNIT + */ +static void ecore_configure_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i, num_vports; + + vport_params = p_hwfn->qm_info.qm_vport_params; + num_vports = p_hwfn->qm_info.num_vports; + + for (i = 0; i < num_vports; i++) { + u32 wfq_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + + vport_params[i].vport_wfq = + (wfq_speed * ECORE_WFQ_UNIT) / min_pf_rate; + ecore_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].vport_wfq); + } +} + +static void +ecore_init_wfq_default_param(struct ecore_hwfn *p_hwfn, u32 min_pf_rate) +{ + int i, num_vports; + u32 min_speed; + + num_vports = p_hwfn->qm_info.num_vports; + min_speed = min_pf_rate / num_vports; + + for (i = 0; i < num_vports; i++) { + p_hwfn->qm_info.qm_vport_params[i].vport_wfq = 1; + p_hwfn->qm_info.wfq_data[i].default_min_speed = min_speed; + } +} + +static void ecore_disable_wfq_for_all_vports(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 min_pf_rate) +{ + struct init_qm_vport_params *vport_params; + int i, num_vports; + + vport_params = p_hwfn->qm_info.qm_vport_params; + num_vports = p_hwfn->qm_info.num_vports; + + for (i = 0; i < num_vports; i++) { + ecore_init_wfq_default_param(p_hwfn, min_pf_rate); + ecore_init_vport_wfq(p_hwfn, p_ptt, + vport_params[i].first_tx_pq_id, + vport_params[i].vport_wfq); + } +} + +/* validate wfq for a given vport and required min rate */ +static enum _ecore_status_t ecore_init_wfq_param(struct ecore_hwfn *p_hwfn, + u16 vport_id, u32 req_rate, + u32 min_pf_rate) +{ + u32 total_req_min_rate = 0, total_left_rate = 0, left_rate_per_vp = 0; + int non_requested_count = 0, req_count = 0, i, num_vports; + + num_vports = p_hwfn->qm_info.num_vports; + + /* Check pre-set data for some of the vports */ + for (i = 0; i < num_vports; i++) { + u32 tmp_speed; + + if ((i != vport_id) && p_hwfn->qm_info.wfq_data[i].configured) { + req_count++; + tmp_speed = p_hwfn->qm_info.wfq_data[i].min_speed; + total_req_min_rate += tmp_speed; + } + } + + /* Include current vport data as well */ + req_count++; + total_req_min_rate += req_rate; + non_requested_count = p_hwfn->qm_info.num_vports - req_count; + + /* validate possible error cases */ + if (req_rate > min_pf_rate) { + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Vport [%d] - Requested rate[%d Mbps] is greater" + " than configured PF min rate[%d Mbps]\n", + vport_id, req_rate, min_pf_rate); + return ECORE_INVAL; + } + + if (req_rate * ECORE_WFQ_UNIT / min_pf_rate < 1) { + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Vport [%d] - Requested rate[%d Mbps] is less than" + " one percent of configured PF min rate[%d Mbps]\n", + vport_id, req_rate, min_pf_rate); + return ECORE_INVAL; + } + + /* TBD - for number of vports greater than 100 */ + if (ECORE_WFQ_UNIT / p_hwfn->qm_info.num_vports < 1) { + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Number of vports are greater than 100\n"); + return ECORE_INVAL; + } + + if (total_req_min_rate > min_pf_rate) { + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Total requested min rate for all vports[%d Mbps]" + "is greater than configured PF min rate[%d Mbps]\n", + total_req_min_rate, min_pf_rate); + return ECORE_INVAL; + } + + /* Data left for non requested vports */ + total_left_rate = min_pf_rate - total_req_min_rate; + left_rate_per_vp = total_left_rate / non_requested_count; + + /* validate if non requested get < 1% of min bw */ + if (left_rate_per_vp * ECORE_WFQ_UNIT / min_pf_rate < 1) + return ECORE_INVAL; + + /* now req_rate for given vport passes all scenarios. + * assign final wfq rates to all vports. + */ + p_hwfn->qm_info.wfq_data[vport_id].min_speed = req_rate; + p_hwfn->qm_info.wfq_data[vport_id].configured = true; + + for (i = 0; i < num_vports; i++) { + if (p_hwfn->qm_info.wfq_data[i].configured) + continue; + + p_hwfn->qm_info.wfq_data[i].min_speed = left_rate_per_vp; + } + + return ECORE_SUCCESS; +} + +static int __ecore_configure_vport_wfq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 vp_id, u32 rate) +{ + struct ecore_mcp_link_state *p_link; + int rc = ECORE_SUCCESS; + + p_link = &p_hwfn->p_dev->hwfns[0].mcp_info->link_output; + + if (!p_link->min_pf_rate) { + p_hwfn->qm_info.wfq_data[vp_id].min_speed = rate; + p_hwfn->qm_info.wfq_data[vp_id].configured = true; + return rc; + } + + rc = ecore_init_wfq_param(p_hwfn, vp_id, rate, p_link->min_pf_rate); + + if (rc == ECORE_SUCCESS) + ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + else + DP_NOTICE(p_hwfn, false, + "Validation failed while configuring min rate\n"); + + return rc; +} + +static int __ecore_configure_vp_wfq_on_link_change(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 min_pf_rate) +{ + int rc = ECORE_SUCCESS; + bool use_wfq = false; + u16 i, num_vports; + + num_vports = p_hwfn->qm_info.num_vports; + + /* Validate all pre configured vports for wfq */ + for (i = 0; i < num_vports; i++) { + if (p_hwfn->qm_info.wfq_data[i].configured) { + u32 rate = p_hwfn->qm_info.wfq_data[i].min_speed; + + use_wfq = true; + rc = ecore_init_wfq_param(p_hwfn, i, rate, min_pf_rate); + if (rc == ECORE_INVAL) { + DP_NOTICE(p_hwfn, false, + "Validation failed while" + " configuring min rate\n"); + break; + } + } + } + + if (rc == ECORE_SUCCESS && use_wfq) + ecore_configure_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + else + ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, min_pf_rate); + + return rc; +} + +/* Main API for ecore clients to configure vport min rate. + * vp_id - vport id in PF Range[0 - (total_num_vports_per_pf - 1)] + * rate - Speed in Mbps needs to be assigned to a given vport. + */ +int ecore_configure_vport_wfq(struct ecore_dev *p_dev, u16 vp_id, u32 rate) +{ + int i, rc = ECORE_INVAL; + + /* TBD - for multiple hardware functions - that is 100 gig */ + if (p_dev->num_hwfns > 1) { + DP_NOTICE(p_dev, false, + "WFQ configuration is not supported for this dev\n"); + return rc; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct ecore_ptt *p_ptt; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_TIMEOUT; + + rc = __ecore_configure_vport_wfq(p_hwfn, p_ptt, vp_id, rate); + + if (rc != ECORE_SUCCESS) { + ecore_ptt_release(p_hwfn, p_ptt); + return rc; + } + + ecore_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +/* API to configure WFQ from mcp link change */ +void ecore_configure_vp_wfq_on_link_change(struct ecore_dev *p_dev, + u32 min_pf_rate) +{ + int i; + + /* TBD - for multiple hardware functions - that is 100 gig */ + if (p_dev->num_hwfns > 1) { + DP_VERBOSE(p_dev, ECORE_MSG_LINK, + "WFQ configuration is not supported for this dev\n"); + return; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + __ecore_configure_vp_wfq_on_link_change(p_hwfn, + p_hwfn->p_dpc_ptt, + min_pf_rate); + } +} + +int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_link_state *p_link, + u8 max_bw) +{ + int rc = ECORE_SUCCESS; + + p_hwfn->mcp_info->func_info.bandwidth_max = max_bw; + + if (!p_link->line_speed) + return rc; + + p_link->speed = (p_link->line_speed * max_bw) / 100; + + rc = ecore_init_pf_rl(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_link->speed); + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Configured MAX bandwidth to be %08x Mb/sec\n", + p_link->speed); + + return rc; +} + +/* Main API to configure PF max bandwidth where bw range is [1 - 100] */ +int ecore_configure_pf_max_bandwidth(struct ecore_dev *p_dev, u8 max_bw) +{ + int i, rc = ECORE_INVAL; + + if (max_bw < 1 || max_bw > 100) { + DP_NOTICE(p_dev, false, "PF max bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); + struct ecore_mcp_link_state *p_link; + struct ecore_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_TIMEOUT; + + rc = __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, + p_link, max_bw); + if (rc != ECORE_SUCCESS) { + ecore_ptt_release(p_hwfn, p_ptt); + return rc; + } + + ecore_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_link_state *p_link, + u8 min_bw) +{ + int rc = ECORE_SUCCESS; + + p_hwfn->mcp_info->func_info.bandwidth_min = min_bw; + + if (!p_link->line_speed) + return rc; + + p_link->min_pf_rate = (p_link->line_speed * min_bw) / 100; + + rc = ecore_init_pf_wfq(p_hwfn, p_ptt, p_hwfn->rel_pf_id, min_bw); + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Configured MIN bandwidth to be %d Mb/sec\n", + p_link->min_pf_rate); + + return rc; +} + +/* Main API to configure PF min bandwidth where bw range is [1-100] */ +int ecore_configure_pf_min_bandwidth(struct ecore_dev *p_dev, u8 min_bw) +{ + int i, rc = ECORE_INVAL; + + if (min_bw < 1 || min_bw > 100) { + DP_NOTICE(p_dev, false, "PF min bw valid range is [1-100]\n"); + return rc; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct ecore_hwfn *p_lead = ECORE_LEADING_HWFN(p_dev); + struct ecore_mcp_link_state *p_link; + struct ecore_ptt *p_ptt; + + p_link = &p_lead->mcp_info->link_output; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_TIMEOUT; + + rc = __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, + p_link, min_bw); + if (rc != ECORE_SUCCESS) { + ecore_ptt_release(p_hwfn, p_ptt); + return rc; + } + + if (p_link->min_pf_rate) { + u32 min_rate = p_link->min_pf_rate; + + rc = __ecore_configure_vp_wfq_on_link_change(p_hwfn, + p_ptt, + min_rate); + } + + ecore_ptt_release(p_hwfn, p_ptt); + } + + return rc; +} + +void ecore_clean_wfq_db(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_link_state *p_link; + + p_link = &p_hwfn->mcp_info->link_output; + + if (p_link->min_pf_rate) + ecore_disable_wfq_for_all_vports(p_hwfn, p_ptt, + p_link->min_pf_rate); + + OSAL_MEMSET(p_hwfn->qm_info.wfq_data, 0, + sizeof(*p_hwfn->qm_info.wfq_data) * + p_hwfn->qm_info.num_vports); +} + +int ecore_device_num_engines(struct ecore_dev *p_dev) +{ + return ECORE_IS_BB(p_dev) ? 2 : 1; +} + +int ecore_device_num_ports(struct ecore_dev *p_dev) +{ + /* in CMT always only one port */ + if (p_dev->num_hwfns > 1) + return 1; + + return p_dev->num_ports_in_engines * ecore_device_num_engines(p_dev); +} diff --git a/drivers/net/qede/base/ecore_dev_api.h b/drivers/net/qede/base/ecore_dev_api.h new file mode 100644 index 00000000..535b82b5 --- /dev/null +++ b/drivers/net/qede/base/ecore_dev_api.h @@ -0,0 +1,497 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_DEV_API_H__ +#define __ECORE_DEV_API_H__ + +#include "ecore_status.h" +#include "ecore_chain.h" +#include "ecore_int_api.h" + +struct ecore_tunn_start_params; + +/** + * @brief ecore_init_dp - initialize the debug level + * + * @param p_dev + * @param dp_module + * @param dp_level + * @param dp_ctx + */ +void ecore_init_dp(struct ecore_dev *p_dev, + u32 dp_module, u8 dp_level, void *dp_ctx); + +/** + * @brief ecore_init_struct - initialize the device structure to + * its defaults + * + * @param p_dev + */ +void ecore_init_struct(struct ecore_dev *p_dev); + +/** + * @brief ecore_resc_free - + * + * @param p_dev + */ +void ecore_resc_free(struct ecore_dev *p_dev); + +/** + * @brief ecore_resc_alloc - + * + * @param p_dev + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_resc_alloc(struct ecore_dev *p_dev); + +/** + * @brief ecore_resc_setup - + * + * @param p_dev + */ +void ecore_resc_setup(struct ecore_dev *p_dev); + +/** + * @brief ecore_hw_init - + * + * @param p_dev + * @param p_tunn - tunneling parameters + * @param b_hw_start + * @param int_mode - interrupt mode [msix, inta, etc.] to use. + * @param allow_npar_tx_switch - npar tx switching to be used + * for vports configured for tx-switching. + * @param bin_fw_data - binary fw data pointer in binary fw file. + * Pass NULL if not using binary fw file. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_hw_init(struct ecore_dev *p_dev, + struct ecore_tunn_start_params *p_tunn, + bool b_hw_start, + enum ecore_int_mode int_mode, + bool allow_npar_tx_switch, + const u8 *bin_fw_data); + +/** + * @brief ecore_hw_timers_stop_all - + * + * @param p_dev + * + * @return void + */ +void ecore_hw_timers_stop_all(struct ecore_dev *p_dev); + +/** + * @brief ecore_hw_stop - + * + * @param p_dev + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_hw_stop(struct ecore_dev *p_dev); + +/** + * @brief ecore_hw_stop_fastpath -should be called incase + * slowpath is still required for the device, but + * fastpath is not. + * + * @param p_dev + * + */ +void ecore_hw_stop_fastpath(struct ecore_dev *p_dev); + +/** + * @brief ecore_prepare_hibernate -should be called when + * the system is going into the hibernate state + * + * @param p_dev + * + */ +void ecore_prepare_hibernate(struct ecore_dev *p_dev); + +/** + * @brief ecore_hw_start_fastpath -restart fastpath traffic, + * only if hw_stop_fastpath was called + + * @param p_dev + * + */ +void ecore_hw_start_fastpath(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_hw_reset - + * + * @param p_dev + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_hw_reset(struct ecore_dev *p_dev); + +/** + * @brief ecore_hw_prepare - + * + * @param p_dev + * @param personality - personality to initialize + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_hw_prepare(struct ecore_dev *p_dev, int personality); + +/** + * @brief ecore_hw_remove - + * + * @param p_dev + */ +void ecore_hw_remove(struct ecore_dev *p_dev); + +/** + * @brief ecore_ptt_acquire - Allocate a PTT window + * + * Should be called at the entry point to the driver (at the beginning of an + * exported function) + * + * @param p_hwfn + * + * @return struct ecore_ptt + */ +struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_ptt_release - Release PTT Window + * + * Should be called at the end of a flow - at the end of the function that + * acquired the PTT. + * + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); + +#ifndef __EXTRACT__LINUX__ +struct ecore_eth_stats { + u64 no_buff_discards; + u64 packet_too_big_discard; + u64 ttl0_discard; + u64 rx_ucast_bytes; + u64 rx_mcast_bytes; + u64 rx_bcast_bytes; + u64 rx_ucast_pkts; + u64 rx_mcast_pkts; + u64 rx_bcast_pkts; + u64 mftag_filter_discards; + u64 mac_filter_discards; + u64 tx_ucast_bytes; + u64 tx_mcast_bytes; + u64 tx_bcast_bytes; + u64 tx_ucast_pkts; + u64 tx_mcast_pkts; + u64 tx_bcast_pkts; + u64 tx_err_drop_pkts; + u64 tpa_coalesced_pkts; + u64 tpa_coalesced_events; + u64 tpa_aborts_num; + u64 tpa_not_coalesced_pkts; + u64 tpa_coalesced_bytes; + + /* port */ + u64 rx_64_byte_packets; + u64 rx_65_to_127_byte_packets; + u64 rx_128_to_255_byte_packets; + u64 rx_256_to_511_byte_packets; + u64 rx_512_to_1023_byte_packets; + u64 rx_1024_to_1518_byte_packets; + u64 rx_1519_to_1522_byte_packets; + u64 rx_1519_to_2047_byte_packets; + u64 rx_2048_to_4095_byte_packets; + u64 rx_4096_to_9216_byte_packets; + u64 rx_9217_to_16383_byte_packets; + u64 rx_crc_errors; + u64 rx_mac_crtl_frames; + u64 rx_pause_frames; + u64 rx_pfc_frames; + u64 rx_align_errors; + u64 rx_carrier_errors; + u64 rx_oversize_packets; + u64 rx_jabbers; + u64 rx_undersize_packets; + u64 rx_fragments; + u64 tx_64_byte_packets; + u64 tx_65_to_127_byte_packets; + u64 tx_128_to_255_byte_packets; + u64 tx_256_to_511_byte_packets; + u64 tx_512_to_1023_byte_packets; + u64 tx_1024_to_1518_byte_packets; + u64 tx_1519_to_2047_byte_packets; + u64 tx_2048_to_4095_byte_packets; + u64 tx_4096_to_9216_byte_packets; + u64 tx_9217_to_16383_byte_packets; + u64 tx_pause_frames; + u64 tx_pfc_frames; + u64 tx_lpi_entry_count; + u64 tx_total_collisions; + u64 brb_truncates; + u64 brb_discards; + u64 rx_mac_bytes; + u64 rx_mac_uc_packets; + u64 rx_mac_mc_packets; + u64 rx_mac_bc_packets; + u64 rx_mac_frames_ok; + u64 tx_mac_bytes; + u64 tx_mac_uc_packets; + u64 tx_mac_mc_packets; + u64 tx_mac_bc_packets; + u64 tx_mac_ctrl_frames; +}; +#endif + +enum ecore_dmae_address_type_t { + ECORE_DMAE_ADDRESS_HOST_VIRT, + ECORE_DMAE_ADDRESS_HOST_PHYS, + ECORE_DMAE_ADDRESS_GRC +}; + +/* value of flags If ECORE_DMAE_FLAG_RW_REPL_SRC flag is set and the + * source is a block of length DMAE_MAX_RW_SIZE and the + * destination is larger, the source block will be duplicated as + * many times as required to fill the destination block. This is + * used mostly to write a zeroed buffer to destination address + * using DMA + */ +#define ECORE_DMAE_FLAG_RW_REPL_SRC 0x00000001 +#define ECORE_DMAE_FLAG_VF_SRC 0x00000002 +#define ECORE_DMAE_FLAG_VF_DST 0x00000004 +#define ECORE_DMAE_FLAG_COMPLETION_DST 0x00000008 + +struct ecore_dmae_params { + u32 flags; /* consists of ECORE_DMAE_FLAG_* values */ + u8 src_vfid; + u8 dst_vfid; +}; + +/** +* @brief ecore_dmae_host2grc - copy data from source addr to +* dmae registers using the given ptt +* +* @param p_hwfn +* @param p_ptt +* @param source_addr +* @param grc_addr (dmae_data_offset) +* @param size_in_dwords +* @param flags (one of the flags defined above) +*/ +enum _ecore_status_t +ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u64 source_addr, + u32 grc_addr, u32 size_in_dwords, u32 flags); + +/** +* @brief ecore_dmae_grc2host - Read data from dmae data offset +* to source address using the given ptt +* +* @param p_ptt +* @param grc_addr (dmae_data_offset) +* @param dest_addr +* @param size_in_dwords +* @param flags - one of the flags defined above +*/ +enum _ecore_status_t +ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 grc_addr, + dma_addr_t dest_addr, u32 size_in_dwords, u32 flags); + +/** +* @brief ecore_dmae_host2host - copy data from to source address +* to a destination address (for SRIOV) using the given ptt +* +* @param p_hwfn +* @param p_ptt +* @param source_addr +* @param dest_addr +* @param size_in_dwords +* @param params +*/ +enum _ecore_status_t +ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, struct ecore_dmae_params *p_params); + +/** + * @brief ecore_chain_alloc - Allocate and initialize a chain + * + * @param p_hwfn + * @param intended_use + * @param mode + * @param num_elems + * @param elem_size + * @param p_chain + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_chain_alloc(struct ecore_dev *p_dev, + enum ecore_chain_use_mode intended_use, + enum ecore_chain_mode mode, + enum ecore_chain_cnt_type cnt_type, + u32 num_elems, + osal_size_t elem_size, struct ecore_chain *p_chain); + +/** + * @brief ecore_chain_free - Free chain DMA memory + * + * @param p_hwfn + * @param p_chain + */ +void ecore_chain_free(struct ecore_dev *p_dev, struct ecore_chain *p_chain); + +/** + * @@brief ecore_fw_l2_queue - Get absolute L2 queue ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_fw_l2_queue(struct ecore_hwfn *p_hwfn, + u16 src_id, u16 *dst_id); + +/** + * @@brief ecore_fw_vport - Get absolute vport ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_fw_vport(struct ecore_hwfn *p_hwfn, + u8 src_id, u8 *dst_id); + +/** + * @@brief ecore_fw_rss_eng - Get absolute RSS engine ID + * + * @param p_hwfn + * @param src_id - relative to p_hwfn + * @param dst_id - absolute per engine + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_fw_rss_eng(struct ecore_hwfn *p_hwfn, + u8 src_id, u8 *dst_id); + +/** + * @brief ecore_llh_add_mac_filter - configures a MAC filter in llh + * + * @param p_hwfn + * @param p_ptt + * @param p_filter - MAC to add + */ +enum _ecore_status_t ecore_llh_add_mac_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 *p_filter); + +/** + * @brief ecore_llh_remove_mac_filter - removes a MAC filtre from llh + * + * @param p_hwfn + * @param p_ptt + * @param p_filter - MAC to remove + */ +void ecore_llh_remove_mac_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 *p_filter); + +/** + * @brief ecore_llh_add_ethertype_filter - configures a ethertype filter in llh + * + * @param p_hwfn + * @param p_ptt + * @param filter - ethertype to add + */ +enum _ecore_status_t ecore_llh_add_ethertype_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 filter); + +/** + * @brief ecore_llh_remove_ethertype_filter - removes a ethertype llh filter + * + * @param p_hwfn + * @param p_ptt + * @param filter - ethertype to remove + */ +void ecore_llh_remove_ethertype_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 filter); + +/** + * @brief ecore_llh_clear_all_filters - removes all MAC filters from llh + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_llh_clear_all_filters(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + + /** +*@brief Cleanup of previous driver remains prior to load + * + * @param p_hwfn + * @param p_ptt + * @param id - For PF, engine-relative. For VF, PF-relative. + * @param is_vf - true iff cleanup is made for a VF. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_final_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 id, bool is_vf); + +/** + * @brief ecore_test_registers - Perform register tests + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_test_registers(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief ecore_set_rxq_coalesce - Configure coalesce parameters for an Rx queue + * + * @param p_hwfn + * @param p_ptt + * @param coalesce - Coalesce value in micro seconds. + * @param qid - Queue index. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_set_rxq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 coalesce, u8 qid); + +/** + * @brief ecore_set_txq_coalesce - Configure coalesce parameters for a Tx queue + * + * @param p_hwfn + * @param p_ptt + * @param coalesce - Coalesce value in micro seconds. + * @param qid - Queue index. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_set_txq_coalesce(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 coalesce, u8 qid); + +#endif diff --git a/drivers/net/qede/base/ecore_gtt_reg_addr.h b/drivers/net/qede/base/ecore_gtt_reg_addr.h new file mode 100644 index 00000000..cc49fc7b --- /dev/null +++ b/drivers/net/qede/base/ecore_gtt_reg_addr.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef GTT_REG_ADDR_H +#define GTT_REG_ADDR_H + +/* Win 2 */ +#define GTT_BAR0_MAP_REG_IGU_CMD 0x00f000UL + +/* Win 3 */ +#define GTT_BAR0_MAP_REG_TSDM_RAM 0x010000UL + +/* Win 4 */ +#define GTT_BAR0_MAP_REG_MSDM_RAM 0x011000UL + +/* Win 5 */ +#define GTT_BAR0_MAP_REG_MSDM_RAM_1024 0x012000UL + +/* Win 6 */ +#define GTT_BAR0_MAP_REG_USDM_RAM 0x013000UL + +/* Win 7 */ +#define GTT_BAR0_MAP_REG_USDM_RAM_1024 0x014000UL + +/* Win 8 */ +#define GTT_BAR0_MAP_REG_USDM_RAM_2048 0x015000UL + +/* Win 9 */ +#define GTT_BAR0_MAP_REG_XSDM_RAM 0x016000UL + +/* Win 10 */ +#define GTT_BAR0_MAP_REG_YSDM_RAM 0x017000UL + +/* Win 11 */ +#define GTT_BAR0_MAP_REG_PSDM_RAM 0x018000UL + +#endif diff --git a/drivers/net/qede/base/ecore_gtt_values.h b/drivers/net/qede/base/ecore_gtt_values.h new file mode 100644 index 00000000..f2efe24e --- /dev/null +++ b/drivers/net/qede/base/ecore_gtt_values.h @@ -0,0 +1,33 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __PREVENT_PXP_GLOBAL_WIN__ + +static u32 pxp_global_win[] = { + 0, + 0, + 0x1c02, /* win 2: addr=0x1c02000, size=4096 bytes */ + 0x1c80, /* win 3: addr=0x1c80000, size=4096 bytes */ + 0x1d00, /* win 4: addr=0x1d00000, size=4096 bytes */ + 0x1d01, /* win 5: addr=0x1d01000, size=4096 bytes */ + 0x1d80, /* win 6: addr=0x1d80000, size=4096 bytes */ + 0x1d81, /* win 7: addr=0x1d81000, size=4096 bytes */ + 0x1d82, /* win 8: addr=0x1d82000, size=4096 bytes */ + 0x1e00, /* win 9: addr=0x1e00000, size=4096 bytes */ + 0x1e80, /* win 10: addr=0x1e80000, size=4096 bytes */ + 0x1f00, /* win 11: addr=0x1f00000, size=4096 bytes */ + 0, + 0, + 0, + 0, + 0, + 0, + 0, +}; + +#endif /* __PREVENT_PXP_GLOBAL_WIN__ */ diff --git a/drivers/net/qede/base/ecore_hsi_common.h b/drivers/net/qede/base/ecore_hsi_common.h new file mode 100644 index 00000000..e341b95c --- /dev/null +++ b/drivers/net/qede/base/ecore_hsi_common.h @@ -0,0 +1,1912 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_HSI_COMMON__ +#define __ECORE_HSI_COMMON__ +/********************************/ +/* Add include to common target */ +/********************************/ +#include "common_hsi.h" + +/* + * opcodes for the event ring + */ +enum common_event_opcode { + COMMON_EVENT_PF_START, + COMMON_EVENT_PF_STOP, + COMMON_EVENT_VF_START, + COMMON_EVENT_VF_STOP, + COMMON_EVENT_VF_PF_CHANNEL, + COMMON_EVENT_VF_FLR, + COMMON_EVENT_PF_UPDATE, + COMMON_EVENT_MALICIOUS_VF, + COMMON_EVENT_EMPTY, + MAX_COMMON_EVENT_OPCODE +}; + +/* + * Common Ramrod Command IDs + */ +enum common_ramrod_cmd_id { + COMMON_RAMROD_UNUSED, + COMMON_RAMROD_PF_START /* PF Function Start Ramrod */, + COMMON_RAMROD_PF_STOP /* PF Function Stop Ramrod */, + COMMON_RAMROD_VF_START /* VF Function Start */, + COMMON_RAMROD_VF_STOP /* VF Function Stop Ramrod */, + COMMON_RAMROD_PF_UPDATE /* PF update Ramrod */, + COMMON_RAMROD_EMPTY /* Empty Ramrod */, + MAX_COMMON_RAMROD_CMD_ID +}; + +/* + * The core storm context for the Ystorm + */ +struct ystorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* + * The core storm context for the Pstorm + */ +struct pstorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* + * Core Slowpath Connection storm context of Xstorm + */ +struct xstorm_core_conn_st_ctx { + __le32 spq_base_lo /* SPQ Ring Base Address low dword */; + __le32 spq_base_hi /* SPQ Ring Base Address high dword */; + struct regpair consolid_base_addr /* Consolidation Ring Base Address */ + ; + __le16 spq_cons /* SPQ Ring Consumer */; + __le16 consolid_cons /* Consolidation Ring Consumer */; + __le32 reserved0[55] /* Pad to 15 cycles */; +}; + +struct xstorm_core_conn_ag_ctx { + u8 reserved0 /* cdu_validation */; + u8 core_state /* state */; + u8 flags0; +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_CORE_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF17_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF17_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF17EN_SHIFT 7 + u8 flags10; +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_CF23EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_CORE_CONN_AG_CTX_BIT16_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT16_SHIFT 0 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT17_SHIFT 1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT18_SHIFT 2 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT19_SHIFT 3 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT20_SHIFT 4 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_MASK 0x1 +#define XSTORM_CORE_CONN_AG_CTX_BIT21_SHIFT 5 +#define XSTORM_CORE_CONN_AG_CTX_CF23_MASK 0x3 +#define XSTORM_CORE_CONN_AG_CTX_CF23_SHIFT 6 + u8 byte2 /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 consolid_prod /* physical_q1 */; + __le16 reserved16 /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_or_spq_prod /* word4 */; + __le16 word5 /* word5 */; + __le16 conn_dpi /* conn_dpi */; + u8 byte3 /* byte3 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + u8 byte6 /* byte6 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* cf_array0 */; + __le32 reg6 /* cf_array1 */; + __le16 word7 /* word7 */; + __le16 word8 /* word8 */; + __le16 word9 /* word9 */; + __le16 word10 /* word10 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + __le32 reg9 /* reg9 */; + u8 byte7 /* byte7 */; + u8 byte8 /* byte8 */; + u8 byte9 /* byte9 */; + u8 byte10 /* byte10 */; + u8 byte11 /* byte11 */; + u8 byte12 /* byte12 */; + u8 byte13 /* byte13 */; + u8 byte14 /* byte14 */; + u8 byte15 /* byte15 */; + u8 byte16 /* byte16 */; + __le16 word11 /* word11 */; + __le32 reg10 /* reg10 */; + __le32 reg11 /* reg11 */; + __le32 reg12 /* reg12 */; + __le32 reg13 /* reg13 */; + __le32 reg14 /* reg14 */; + __le32 reg15 /* reg15 */; + __le32 reg16 /* reg16 */; + __le32 reg17 /* reg17 */; + __le32 reg18 /* reg18 */; + __le32 reg19 /* reg19 */; + __le16 word12 /* word12 */; + __le16 word13 /* word13 */; + __le16 word14 /* word14 */; + __le16 word15 /* word15 */; +}; + +struct tstorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_CORE_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_CORE_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 word1 /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +struct ustorm_core_conn_ag_ctx { + u8 reserved /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_CORE_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF4_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF4_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF5_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF5_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF6_MASK 0x3 +#define USTORM_CORE_CONN_AG_CTX_CF6_SHIFT 6 + u8 flags2; +#define USTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF4EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF5EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_CF6EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_CORE_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 word1 /* word1 */; + __le32 rx_producers /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; +}; + +/* + * The core storm context for the Mstorm + */ +struct mstorm_core_conn_st_ctx { + __le32 reserved[24]; +}; + +/* + * The core storm context for the Ustorm + */ +struct ustorm_core_conn_st_ctx { + __le32 reserved[4]; +}; + +/* + * core connection context + */ +struct core_conn_context { + struct ystorm_core_conn_st_ctx ystorm_st_context + /* ystorm storm context */; + struct regpair ystorm_st_padding[2] /* padding */; + struct pstorm_core_conn_st_ctx pstorm_st_context + /* pstorm storm context */; + struct regpair pstorm_st_padding[2] /* padding */; + struct xstorm_core_conn_st_ctx xstorm_st_context + /* xstorm storm context */; + struct xstorm_core_conn_ag_ctx xstorm_ag_context + /* xstorm aggregative context */; + struct tstorm_core_conn_ag_ctx tstorm_ag_context + /* tstorm aggregative context */; + struct ustorm_core_conn_ag_ctx ustorm_ag_context + /* ustorm aggregative context */; + struct mstorm_core_conn_st_ctx mstorm_st_context + /* mstorm storm context */; + struct ustorm_core_conn_st_ctx ustorm_st_context + /* ustorm storm context */; + struct regpair ustorm_st_padding[2] /* padding */; +}; + +/* + * How ll2 should deal with packet upon errors + */ +enum core_error_handle { + LL2_DROP_PACKET /* If error occurs drop packet */, + LL2_DO_NOTHING /* If error occurs do nothing */, + LL2_ASSERT /* If error occurs assert */, + MAX_CORE_ERROR_HANDLE +}; + +/* + * opcodes for the event ring + */ +enum core_event_opcode { + CORE_EVENT_TX_QUEUE_START, + CORE_EVENT_TX_QUEUE_STOP, + CORE_EVENT_RX_QUEUE_START, + CORE_EVENT_RX_QUEUE_STOP, + MAX_CORE_EVENT_OPCODE +}; + +/* + * The L4 pseudo checksum mode for Core + */ +enum core_l4_pseudo_checksum_mode { + CORE_L4_PSEUDO_CSUM_CORRECT_LENGTH + , + CORE_L4_PSEUDO_CSUM_ZERO_LENGTH + /* Pseudo Checksum on packet is calculated with zero length. */, + MAX_CORE_L4_PSEUDO_CHECKSUM_MODE +}; + +/* + * Light-L2 RX Producers in Tstorm RAM + */ +struct core_ll2_port_stats { + struct regpair gsi_invalid_hdr; + struct regpair gsi_invalid_pkt_length; + struct regpair gsi_unsupported_pkt_typ; + struct regpair gsi_crcchksm_error; +}; + +/* + * Ethernet TX Per Queue Stats + */ +struct core_ll2_pstorm_per_queue_stat { + struct regpair sent_ucast_bytes + /* number of total bytes sent without errors */; + struct regpair sent_mcast_bytes + /* number of total bytes sent without errors */; + struct regpair sent_bcast_bytes + /* number of total bytes sent without errors */; + struct regpair sent_ucast_pkts + /* number of total packets sent without errors */; + struct regpair sent_mcast_pkts + /* number of total packets sent without errors */; + struct regpair sent_bcast_pkts + /* number of total packets sent without errors */; +}; + +/* + * Light-L2 RX Producers in Tstorm RAM + */ +struct core_ll2_rx_prod { + __le16 bd_prod /* BD Producer */; + __le16 cqe_prod /* CQE Producer */; + __le32 reserved; +}; + +struct core_ll2_tstorm_per_queue_stat { + struct regpair packet_too_big_discard + /* Number of packets discarded because they are bigger than MTU */; + struct regpair no_buff_discard + /* Number of packets discarded due to lack of host buffers */; +}; + +struct core_ll2_ustorm_per_queue_stat { + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; +}; + +/* + * Core Ramrod Command IDs (light L2) + */ +enum core_ramrod_cmd_id { + CORE_RAMROD_UNUSED, + CORE_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */, + CORE_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */, + CORE_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */, + CORE_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */, + MAX_CORE_RAMROD_CMD_ID +}; + +/* + * Specifies how ll2 should deal with packets errors: packet_too_big and no_buff + */ +struct core_rx_action_on_error { + u8 error_type; +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_PACKET_TOO_BIG_SHIFT 0 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_MASK 0x3 +#define CORE_RX_ACTION_ON_ERROR_NO_BUFF_SHIFT 2 +#define CORE_RX_ACTION_ON_ERROR_RESERVED_MASK 0xF +#define CORE_RX_ACTION_ON_ERROR_RESERVED_SHIFT 4 +}; + +/* + * Core RX BD for Light L2 + */ +struct core_rx_bd { + struct regpair addr; + __le16 reserved[4]; +}; + +/* + * Core RX CM offload BD for Light L2 + */ +struct core_rx_bd_with_buff_len { + struct regpair addr; + __le16 buff_length; + __le16 reserved[3]; +}; + +/* + * Core RX CM offload BD for Light L2 + */ +union core_rx_bd_union { + struct core_rx_bd rx_bd /* Core Rx Bd static buffer size */; + struct core_rx_bd_with_buff_len rx_bd_with_len + /* Core Rx Bd with dynamic buffer length */; +}; + +/* + * Opaque Data for Light L2 RX CQE . + */ +struct core_rx_cqe_opaque_data { + __le32 data[2] /* Opaque CQE Data */; +}; + +/* + * Core RX CQE Type for Light L2 + */ +enum core_rx_cqe_type { + CORE_RX_CQE_ILLIGAL_TYPE /* Bad RX Cqe type */, + CORE_RX_CQE_TYPE_REGULAR /* Regular Core RX CQE */, + CORE_RX_CQE_TYPE_GSI_OFFLOAD /* Fp Gsi offload RX CQE */, + CORE_RX_CQE_TYPE_SLOW_PATH /* Slow path Core RX CQE */, + MAX_CORE_RX_CQE_TYPE +}; + +/* + * Core RX CQE for Light L2 . + */ +struct core_rx_fast_path_cqe { + u8 type /* CQE type */; + u8 placement_offset + /* Offset (in bytes) of the packet from start of the buffer */; + struct parsing_and_err_flags parse_flags + /* Parsing and error flags from the parser */; + __le16 packet_length /* Total packet length (from the parser) */; + __le16 vlan /* 802.1q VLAN tag */; + struct core_rx_cqe_opaque_data opaque_data /* Opaque Data */; + __le32 reserved[4]; +}; + +/* + * Core Rx CM offload CQE . + */ +struct core_rx_gsi_offload_cqe { + u8 type /* CQE type */; + u8 data_length_error /* set if gsi data is bigger than buff */; + struct parsing_and_err_flags parse_flags + /* Parsing and error flags from the parser */; + __le16 data_length /* Total packet length (from the parser) */; + __le16 vlan /* 802.1q VLAN tag */; + __le32 src_mac_addrhi /* hi 4 bytes source mac address */; + __le16 src_mac_addrlo /* lo 2 bytes of source mac address */; + u8 reserved1[2]; + __le32 gid_dst[4] /* Gid destination address */; +}; + +/* + * Core RX CQE for Light L2 . + */ +struct core_rx_slow_path_cqe { + u8 type /* CQE type */; + u8 ramrod_cmd_id; + __le16 echo; + __le32 reserved1[7]; +}; + +/* + * Core RX CM offload BD for Light L2 + */ +union core_rx_cqe_union { + struct core_rx_fast_path_cqe rx_cqe_fp /* Fast path CQE */; + struct core_rx_gsi_offload_cqe rx_cqe_gsi /* GSI offload CQE */; + struct core_rx_slow_path_cqe rx_cqe_sp /* Slow path CQE */; +}; + +/* + * Ramrod data for rx queue start ramrod + */ +struct core_rx_start_ramrod_data { + struct regpair bd_base /* bd address of the first bd page */; + struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */; + __le16 mtu /* Maximum transmission unit */; + __le16 sb_id /* Status block ID */; + u8 sb_index /* index of the protocol index */; + u8 complete_cqe_flg /* post completion to the CQE ring if set */; + u8 complete_event_flg /* post completion to the event ring if set */; + u8 drop_ttl0_flg /* drop packet with ttl0 if set */; + __le16 num_of_pbl_pages /* Num of pages in CQE PBL */; + u8 inner_vlan_removal_en + /* if set, 802.1q tags will be removed and copied to CQE */; + u8 queue_id /* Light L2 RX Queue ID */; + u8 main_func_queue /* Is this the main queue for the PF */; + u8 mf_si_bcast_accept_all; + u8 mf_si_mcast_accept_all; + struct core_rx_action_on_error action_on_error; + u8 gsi_offload_flag + /* set when in GSI offload mode on ROCE connection */; + u8 reserved[7]; +}; + +/* + * Ramrod data for rx queue stop ramrod + */ +struct core_rx_stop_ramrod_data { + u8 complete_cqe_flg /* post completion to the CQE ring if set */; + u8 complete_event_flg /* post completion to the event ring if set */; + u8 queue_id /* Light L2 RX Queue ID */; + u8 reserved1; + __le16 reserved2[2]; +}; + +/* + * Flags for Core TX BD + */ +struct core_tx_bd_flags { + u8 as_bitfield; +#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define CORE_TX_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 0 +#define CORE_TX_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define CORE_TX_BD_FLAGS_VLAN_INSERTION_SHIFT 1 +#define CORE_TX_BD_FLAGS_START_BD_MASK 0x1 +#define CORE_TX_BD_FLAGS_START_BD_SHIFT 2 +#define CORE_TX_BD_FLAGS_IP_CSUM_MASK 0x1 +#define CORE_TX_BD_FLAGS_IP_CSUM_SHIFT 3 +#define CORE_TX_BD_FLAGS_L4_CSUM_MASK 0x1 +#define CORE_TX_BD_FLAGS_L4_CSUM_SHIFT 4 +#define CORE_TX_BD_FLAGS_IPV6_EXT_MASK 0x1 +#define CORE_TX_BD_FLAGS_IPV6_EXT_SHIFT 5 +#define CORE_TX_BD_FLAGS_L4_PROTOCOL_MASK 0x1 +#define CORE_TX_BD_FLAGS_L4_PROTOCOL_SHIFT 6 +#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define CORE_TX_BD_FLAGS_L4_PSEUDO_CSUM_MODE_SHIFT 7 +}; + +/* + * Core TX BD for Light L2 + */ +struct core_tx_bd { + struct regpair addr /* Buffer Address */; + __le16 nbytes /* Number of Bytes in Buffer */; + __le16 vlan /* VLAN to insert to packet (if insertion flag set) */; + u8 nbds /* Number of BDs that make up one packet */; + struct core_tx_bd_flags bd_flags /* BD Flags */; + __le16 l4_hdr_offset_w; +}; + +/* + * Light L2 TX Destination + */ +enum core_tx_dest { + CORE_TX_DEST_NW /* Light L2 TX Destination to the Network */, + CORE_TX_DEST_LB /* Light L2 TX Destination to the Loopback */, + MAX_CORE_TX_DEST +}; + +/* + * Ramrod data for rx queue start ramrod + */ +struct core_tx_start_ramrod_data { + struct regpair pbl_base_addr /* Address of the pbl page */; + __le16 mtu /* Maximum transmission unit */; + __le16 sb_id /* Status block ID */; + u8 sb_index /* Status block protocol index */; + u8 tx_dest /* TX Destination (either Network or LB) */; + u8 stats_en /* Statistics Enable */; + u8 stats_id /* Statistics Counter ID */; + __le16 pbl_size /* Number of BD pages pointed by PBL */; + __le16 qm_pq_id /* QM PQ ID */; + u8 conn_type /* connection type that loaded ll2 */; + u8 gsi_offload_flag + /* set when in GSI offload mode on ROCE connection */; + u8 resrved[2]; +}; + +/* + * Ramrod data for tx queue stop ramrod + */ +struct core_tx_stop_ramrod_data { + __le32 reserved0[2]; +}; + +struct eth_mstorm_per_queue_stat { + struct regpair ttl0_discard; + struct regpair packet_too_big_discard; + struct regpair no_buff_discard; + struct regpair not_active_discard; + struct regpair tpa_coalesced_pkts; + struct regpair tpa_coalesced_events; + struct regpair tpa_aborts_num; + struct regpair tpa_coalesced_bytes; +}; + +/* + * Ethernet TX Per Queue Stats + */ +struct eth_pstorm_per_queue_stat { + struct regpair sent_ucast_bytes + /* number of total bytes sent without errors */; + struct regpair sent_mcast_bytes + /* number of total bytes sent without errors */; + struct regpair sent_bcast_bytes + /* number of total bytes sent without errors */; + struct regpair sent_ucast_pkts + /* number of total packets sent without errors */; + struct regpair sent_mcast_pkts + /* number of total packets sent without errors */; + struct regpair sent_bcast_pkts + /* number of total packets sent without errors */; + struct regpair error_drop_pkts + /* number of total packets dropped due to errors */; +}; + +/* + * ETH Rx producers data + */ +struct eth_rx_rate_limit { + __le16 mult; + __le16 cnst + /* Constant term to add (or subtract from number of cycles) */; + u8 add_sub_cnst /* Add (1) or subtract (0) constant term */; + u8 reserved0; + __le16 reserved1; +}; + +struct eth_ustorm_per_queue_stat { + struct regpair rcv_ucast_bytes; + struct regpair rcv_mcast_bytes; + struct regpair rcv_bcast_bytes; + struct regpair rcv_ucast_pkts; + struct regpair rcv_mcast_pkts; + struct regpair rcv_bcast_pkts; +}; + +/* + * Event Ring Next Page Address + */ +struct event_ring_next_addr { + struct regpair addr /* Next Page Address */; + __le32 reserved[2] /* Reserved */; +}; + +/* + * Event Ring Element + */ +union event_ring_element { + struct event_ring_entry entry /* Event Ring Entry */; + struct event_ring_next_addr next_addr /* Event Ring Next Page Address */ + ; +}; + +/* + * Ports mode + */ +enum fw_flow_ctrl_mode { + flow_ctrl_pause, + flow_ctrl_pfc, + MAX_FW_FLOW_CTRL_MODE +}; + +/* + * Integration Phase + */ +enum integ_phase { + INTEG_PHASE_BB_A0_LATEST = 3 /* BB A0 latest integration phase */, + INTEG_PHASE_BB_B0_NO_MCP = 10 /* BB B0 without MCP */, + INTEG_PHASE_BB_B0_WITH_MCP = 11 /* BB B0 with MCP */, + MAX_INTEG_PHASE +}; + +/* + * Malicious VF error ID + */ +enum malicious_vf_error_id { + MALICIOUS_VF_NO_ERROR /* Zero placeholder value */, + VF_PF_CHANNEL_NOT_READY + /* Writing to VF/PF channel when it is not ready */, + VF_ZONE_MSG_NOT_VALID /* VF channel message is not valid */, + VF_ZONE_FUNC_NOT_ENABLED /* Parent PF of VF channel is not active */, + ETH_PACKET_TOO_SMALL + /* TX packet is shorter then reported on BDs or from minimal size */ + , + ETH_ILLEGAL_VLAN_MODE + /* Tx packet with marked as insert VLAN when its illegal */, + ETH_MTU_VIOLATION /* TX packet is greater then MTU */, + ETH_ILLEGAL_INBAND_TAGS /* TX packet has illegal inband tags marked */, + ETH_VLAN_INSERT_AND_INBAND_VLAN /* Vlan cant be added to inband tag */, + ETH_ILLEGAL_NBDS /* indicated number of BDs for the packet is illegal */ + , + ETH_FIRST_BD_WO_SOP /* 1st BD must have start_bd flag set */, + ETH_INSUFFICIENT_BDS + /* There are not enough BDs for transmission of even one packet */, + ETH_ILLEGAL_LSO_HDR_NBDS /* Header NBDs value is illegal */, + ETH_ILLEGAL_LSO_MSS /* LSO MSS value is more than allowed */, + ETH_ZERO_SIZE_BD + /* empty BD (which not contains control flags) is illegal */, + ETH_ILLEGAL_LSO_HDR_LEN /* LSO header size is above the limit */, + ETH_INSUFFICIENT_PAYLOAD + , + ETH_EDPM_OUT_OF_SYNC /* Valid BDs on local ring after EDPM L2 sync */, + ETH_TUNN_IPV6_EXT_NBD_ERR + /* Tunneled packet with IPv6+Ext without a proper number of BDs */, + MAX_MALICIOUS_VF_ERROR_ID +}; + +/* + * Mstorm non-triggering VF zone + */ +struct mstorm_non_trigger_vf_zone { + struct eth_mstorm_per_queue_stat eth_queue_stat + /* VF statistic bucket */; +}; + +/* + * Mstorm VF zone + */ +struct mstorm_vf_zone { + struct mstorm_non_trigger_vf_zone non_trigger + /* non-interrupt-triggering zone */; +}; + +/* + * personality per PF + */ +enum personality_type { + BAD_PERSONALITY_TYP, + PERSONALITY_ISCSI /* iSCSI and LL2 */, + PERSONALITY_FCOE /* Fcoe and LL2 */, + PERSONALITY_RDMA_AND_ETH /* Roce or Iwarp, Eth and LL2 */, + PERSONALITY_RDMA /* Roce and LL2 */, + PERSONALITY_CORE /* CORE(LL2) */, + PERSONALITY_ETH /* Ethernet */, + PERSONALITY_TOE /* Toe and LL2 */, + MAX_PERSONALITY_TYPE +}; + +/* + * tunnel configuration + */ +struct pf_start_tunnel_config { + u8 set_vxlan_udp_port_flg /* Set VXLAN tunnel UDP destination port. */; + u8 set_geneve_udp_port_flg /* Set GENEVE tunnel UDP destination port. */ + ; + u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */; + u8 tx_enable_l2geneve /* If set, enable l2 GENEVE tunnel in TX path. */ + ; + u8 tx_enable_ipgeneve /* If set, enable IP GENEVE tunnel in TX path. */ + ; + u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */; + u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */; + u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */; + u8 tunnel_clss_l2geneve + /* Classification scheme for l2 GENEVE tunnel. */; + u8 tunnel_clss_ipgeneve + /* Classification scheme for ip GENEVE tunnel. */; + u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */; + u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */; + __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */; + __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */; +}; + +/* + * Ramrod data for PF start ramrod + */ +struct pf_start_ramrod_data { + struct regpair event_ring_pbl_addr /* Address of event ring PBL */; + struct regpair consolid_q_pbl_addr + /* PBL address of consolidation queue */; + struct pf_start_tunnel_config tunnel_config /* tunnel configuration. */ + ; + __le16 event_ring_sb_id /* Status block ID */; + u8 base_vf_id; + ; + u8 num_vfs /* Amount of vfs owned by PF */; + u8 event_ring_num_pages /* Number of PBL pages in event ring */; + u8 event_ring_sb_index /* Status block index */; + u8 path_id /* HW path ID (engine ID) */; + u8 warning_as_error /* In FW asserts, treat warning as error */; + u8 dont_log_ramrods + /* If not set - throw a warning for each ramrod (for debug) */; + u8 personality /* define what type of personality is new PF */; + __le16 log_type_mask; + u8 mf_mode /* Multi function mode */; + u8 integ_phase /* Integration phase */; + u8 allow_npar_tx_switching; + u8 inner_to_outer_pri_map[8]; + u8 pri_map_valid + /* If inner_to_outer_pri_map is initialize then set pri_map_valid */ + ; + __le32 outer_tag; + u8 reserved0[4]; +}; + +/* + * Data for port update ramrod + */ +struct protocol_dcb_data { + u8 dcb_enable_flag /* dcbEnable flag value */; + u8 dcb_priority /* dcbPri flag value */; + u8 dcb_tc /* dcb TC value */; + u8 reserved; +}; + +/* + * tunnel configuration + */ +struct pf_update_tunnel_config { + u8 update_rx_pf_clss; + u8 update_tx_pf_clss; + u8 set_vxlan_udp_port_flg + /* Update VXLAN tunnel UDP destination port. */; + u8 set_geneve_udp_port_flg + /* Update GENEVE tunnel UDP destination port. */; + u8 tx_enable_vxlan /* If set, enable VXLAN tunnel in TX path. */; + u8 tx_enable_l2geneve /* If set, enable l2 GENEVE tunnel in TX path. */ + ; + u8 tx_enable_ipgeneve /* If set, enable IP GENEVE tunnel in TX path. */ + ; + u8 tx_enable_l2gre /* If set, enable l2 GRE tunnel in TX path. */; + u8 tx_enable_ipgre /* If set, enable IP GRE tunnel in TX path. */; + u8 tunnel_clss_vxlan /* Classification scheme for VXLAN tunnel. */; + u8 tunnel_clss_l2geneve + /* Classification scheme for l2 GENEVE tunnel. */; + u8 tunnel_clss_ipgeneve + /* Classification scheme for ip GENEVE tunnel. */; + u8 tunnel_clss_l2gre /* Classification scheme for l2 GRE tunnel. */; + u8 tunnel_clss_ipgre /* Classification scheme for ip GRE tunnel. */; + __le16 vxlan_udp_port /* VXLAN tunnel UDP destination port. */; + __le16 geneve_udp_port /* GENEVE tunnel UDP destination port. */; + __le16 reserved[3]; +}; + +/* + * Data for port update ramrod + */ +struct pf_update_ramrod_data { + u8 pf_id; + u8 update_eth_dcb_data_flag /* Update Eth DCB data indication */; + u8 update_fcoe_dcb_data_flag /* Update FCOE DCB data indication */; + u8 update_iscsi_dcb_data_flag /* Update iSCSI DCB data indication */; + u8 update_roce_dcb_data_flag /* Update ROCE DCB data indication */; + u8 update_iwarp_dcb_data_flag /* Update IWARP DCB data indication */; + u8 update_mf_vlan_flag /* Update MF outer vlan Id */; + u8 reserved; + struct protocol_dcb_data eth_dcb_data /* core eth related fields */; + struct protocol_dcb_data fcoe_dcb_data /* core fcoe related fields */; + struct protocol_dcb_data iscsi_dcb_data /* core iscsi related fields */ + ; + struct protocol_dcb_data roce_dcb_data /* core roce related fields */; + struct protocol_dcb_data iwarp_dcb_data /* core iwarp related fields */ + ; + __le16 mf_vlan /* new outer vlan id value */; + __le16 reserved2; + struct pf_update_tunnel_config tunnel_config /* tunnel configuration. */ + ; +}; + +/* + * Ports mode + */ +enum ports_mode { + ENGX2_PORTX1 /* 2 engines x 1 port */, + ENGX2_PORTX2 /* 2 engines x 2 ports */, + ENGX1_PORTX1 /* 1 engine x 1 port */, + ENGX1_PORTX2 /* 1 engine x 2 ports */, + ENGX1_PORTX4 /* 1 engine x 4 ports */, + MAX_PORTS_MODE +}; + +/* + * RDMA TX Stats + */ +struct rdma_sent_stats { + struct regpair sent_bytes /* number of total RDMA bytes sent */; + struct regpair sent_pkts /* number of total RDMA packets sent */; +}; + +/* + * Pstorm non-triggering VF zone + */ +struct pstorm_non_trigger_vf_zone { + struct eth_pstorm_per_queue_stat eth_queue_stat + /* VF statistic bucket */; + struct rdma_sent_stats rdma_stats /* RoCE sent statistics */; +}; + +/* + * Pstorm VF zone + */ +struct pstorm_vf_zone { + struct pstorm_non_trigger_vf_zone non_trigger + /* non-interrupt-triggering zone */; + struct regpair reserved[7] /* vf_zone size mus be power of 2 */; +}; + +/* + * Ramrod Header of SPQE + */ +struct ramrod_header { + __le32 cid /* Slowpath Connection CID */; + u8 cmd_id /* Ramrod Cmd (Per Protocol Type) */; + u8 protocol_id /* Ramrod Protocol ID */; + __le16 echo /* Ramrod echo */; +}; + +/* + * RDMA RX Stats + */ +struct rdma_rcv_stats { + struct regpair rcv_bytes /* number of total RDMA bytes received */; + struct regpair rcv_pkts /* number of total RDMA packets received */; +}; + +/* + * Slowpath Element (SPQE) + */ +struct slow_path_element { + struct ramrod_header hdr /* Ramrod Header */; + struct regpair data_ptr /* Pointer to the Ramrod Data on the Host */; +}; + +/* + * Tstorm non-triggering VF zone + */ +struct tstorm_non_trigger_vf_zone { + struct rdma_rcv_stats rdma_stats /* RoCE received statistics */; +}; + +struct tstorm_per_port_stat { + struct regpair trunc_error_discard + /* packet is dropped because it was truncated in NIG */; + struct regpair mac_error_discard + /* packet is dropped because of Ethernet FCS error */; + struct regpair mftag_filter_discard + /* packet is dropped because classification was unsuccessful */; + struct regpair eth_mac_filter_discard; + struct regpair ll2_mac_filter_discard; + struct regpair ll2_conn_disabled_discard; + struct regpair iscsi_irregular_pkt + /* packet is an ISCSI irregular packet */; + struct regpair fcoe_irregular_pkt + /* packet is an FCOE irregular packet */; + struct regpair roce_irregular_pkt + /* packet is an ROCE irregular packet */; + struct regpair eth_irregular_pkt /* packet is an ETH irregular packet */ + ; + struct regpair toe_irregular_pkt /* packet is an TOE irregular packet */ + ; + struct regpair preroce_irregular_pkt + /* packet is an PREROCE irregular packet */; +}; + +/* + * Tstorm VF zone + */ +struct tstorm_vf_zone { + struct tstorm_non_trigger_vf_zone non_trigger + /* non-interrupt-triggering zone */; +}; + +/* + * Tunnel classification scheme + */ +enum tunnel_clss { + TUNNEL_CLSS_MAC_VLAN = + 0 + /* Use MAC & VLAN from first L2 header for vport classification. */ + , + TUNNEL_CLSS_MAC_VNI + , + TUNNEL_CLSS_INNER_MAC_VLAN + /* Use MAC and VLAN from last L2 header for vport classification */ + , + TUNNEL_CLSS_INNER_MAC_VNI + , + MAX_TUNNEL_CLSS +}; + +/* + * Ustorm non-triggering VF zone + */ +struct ustorm_non_trigger_vf_zone { + struct eth_ustorm_per_queue_stat eth_queue_stat + /* VF statistic bucket */; + struct regpair vf_pf_msg_addr /* VF-PF message address */; +}; + +/* + * Ustorm triggering VF zone + */ +struct ustorm_trigger_vf_zone { + u8 vf_pf_msg_valid /* VF-PF message valid flag */; + u8 reserved[7]; +}; + +/* + * Ustorm VF zone + */ +struct ustorm_vf_zone { + struct ustorm_non_trigger_vf_zone non_trigger + /* non-interrupt-triggering zone */; + struct ustorm_trigger_vf_zone trigger /* interrupt triggering zone */; +}; + +/* + * VF-PF channel data + */ +struct vf_pf_channel_data { + __le32 ready; + u8 valid; + u8 reserved0; + __le16 reserved1; +}; + +/* + * Ramrod data for VF start ramrod + */ +struct vf_start_ramrod_data { + u8 vf_id /* VF ID */; + u8 enable_flr_ack; + __le16 opaque_fid /* VF opaque FID */; + u8 personality /* define what type of personality is new VF */; + u8 reserved[3]; +}; + +/* + * Ramrod data for VF start ramrod + */ +struct vf_stop_ramrod_data { + u8 vf_id /* VF ID */; + u8 reserved0; + __le16 reserved1; + __le32 reserved2; +}; + +/* + * Attentions status block + */ +struct atten_status_block { + __le32 atten_bits; + __le32 atten_ack; + __le16 reserved0; + __le16 sb_index /* status block running index */; + __le32 reserved1; +}; + +enum block_addr { + GRCBASE_GRC = 0x50000, + GRCBASE_MISCS = 0x9000, + GRCBASE_MISC = 0x8000, + GRCBASE_DBU = 0xa000, + GRCBASE_PGLUE_B = 0x2a8000, + GRCBASE_CNIG = 0x218000, + GRCBASE_CPMU = 0x30000, + GRCBASE_NCSI = 0x40000, + GRCBASE_OPTE = 0x53000, + GRCBASE_BMB = 0x540000, + GRCBASE_PCIE = 0x54000, + GRCBASE_MCP = 0xe00000, + GRCBASE_MCP2 = 0x52000, + GRCBASE_PSWHST = 0x2a0000, + GRCBASE_PSWHST2 = 0x29e000, + GRCBASE_PSWRD = 0x29c000, + GRCBASE_PSWRD2 = 0x29d000, + GRCBASE_PSWWR = 0x29a000, + GRCBASE_PSWWR2 = 0x29b000, + GRCBASE_PSWRQ = 0x280000, + GRCBASE_PSWRQ2 = 0x240000, + GRCBASE_PGLCS = 0x0, + GRCBASE_DMAE = 0xc000, + GRCBASE_PTU = 0x560000, + GRCBASE_TCM = 0x1180000, + GRCBASE_MCM = 0x1200000, + GRCBASE_UCM = 0x1280000, + GRCBASE_XCM = 0x1000000, + GRCBASE_YCM = 0x1080000, + GRCBASE_PCM = 0x1100000, + GRCBASE_QM = 0x2f0000, + GRCBASE_TM = 0x2c0000, + GRCBASE_DORQ = 0x100000, + GRCBASE_BRB = 0x340000, + GRCBASE_SRC = 0x238000, + GRCBASE_PRS = 0x1f0000, + GRCBASE_TSDM = 0xfb0000, + GRCBASE_MSDM = 0xfc0000, + GRCBASE_USDM = 0xfd0000, + GRCBASE_XSDM = 0xf80000, + GRCBASE_YSDM = 0xf90000, + GRCBASE_PSDM = 0xfa0000, + GRCBASE_TSEM = 0x1700000, + GRCBASE_MSEM = 0x1800000, + GRCBASE_USEM = 0x1900000, + GRCBASE_XSEM = 0x1400000, + GRCBASE_YSEM = 0x1500000, + GRCBASE_PSEM = 0x1600000, + GRCBASE_RSS = 0x238800, + GRCBASE_TMLD = 0x4d0000, + GRCBASE_MULD = 0x4e0000, + GRCBASE_YULD = 0x4c8000, + GRCBASE_XYLD = 0x4c0000, + GRCBASE_PRM = 0x230000, + GRCBASE_PBF_PB1 = 0xda0000, + GRCBASE_PBF_PB2 = 0xda4000, + GRCBASE_RPB = 0x23c000, + GRCBASE_BTB = 0xdb0000, + GRCBASE_PBF = 0xd80000, + GRCBASE_RDIF = 0x300000, + GRCBASE_TDIF = 0x310000, + GRCBASE_CDU = 0x580000, + GRCBASE_CCFC = 0x2e0000, + GRCBASE_TCFC = 0x2d0000, + GRCBASE_IGU = 0x180000, + GRCBASE_CAU = 0x1c0000, + GRCBASE_UMAC = 0x51000, + GRCBASE_XMAC = 0x210000, + GRCBASE_DBG = 0x10000, + GRCBASE_NIG = 0x500000, + GRCBASE_WOL = 0x600000, + GRCBASE_BMBN = 0x610000, + GRCBASE_IPC = 0x20000, + GRCBASE_NWM = 0x800000, + GRCBASE_NWS = 0x700000, + GRCBASE_MS = 0x6a0000, + GRCBASE_PHY_PCIE = 0x620000, + GRCBASE_MISC_AEU = 0x8000, + GRCBASE_BAR0_MAP = 0x1c00000, + MAX_BLOCK_ADDR +}; + +enum block_id { + BLOCK_GRC, + BLOCK_MISCS, + BLOCK_MISC, + BLOCK_DBU, + BLOCK_PGLUE_B, + BLOCK_CNIG, + BLOCK_CPMU, + BLOCK_NCSI, + BLOCK_OPTE, + BLOCK_BMB, + BLOCK_PCIE, + BLOCK_MCP, + BLOCK_MCP2, + BLOCK_PSWHST, + BLOCK_PSWHST2, + BLOCK_PSWRD, + BLOCK_PSWRD2, + BLOCK_PSWWR, + BLOCK_PSWWR2, + BLOCK_PSWRQ, + BLOCK_PSWRQ2, + BLOCK_PGLCS, + BLOCK_DMAE, + BLOCK_PTU, + BLOCK_TCM, + BLOCK_MCM, + BLOCK_UCM, + BLOCK_XCM, + BLOCK_YCM, + BLOCK_PCM, + BLOCK_QM, + BLOCK_TM, + BLOCK_DORQ, + BLOCK_BRB, + BLOCK_SRC, + BLOCK_PRS, + BLOCK_TSDM, + BLOCK_MSDM, + BLOCK_USDM, + BLOCK_XSDM, + BLOCK_YSDM, + BLOCK_PSDM, + BLOCK_TSEM, + BLOCK_MSEM, + BLOCK_USEM, + BLOCK_XSEM, + BLOCK_YSEM, + BLOCK_PSEM, + BLOCK_RSS, + BLOCK_TMLD, + BLOCK_MULD, + BLOCK_YULD, + BLOCK_XYLD, + BLOCK_PRM, + BLOCK_PBF_PB1, + BLOCK_PBF_PB2, + BLOCK_RPB, + BLOCK_BTB, + BLOCK_PBF, + BLOCK_RDIF, + BLOCK_TDIF, + BLOCK_CDU, + BLOCK_CCFC, + BLOCK_TCFC, + BLOCK_IGU, + BLOCK_CAU, + BLOCK_UMAC, + BLOCK_XMAC, + BLOCK_DBG, + BLOCK_NIG, + BLOCK_WOL, + BLOCK_BMBN, + BLOCK_IPC, + BLOCK_NWM, + BLOCK_NWS, + BLOCK_MS, + BLOCK_PHY_PCIE, + BLOCK_MISC_AEU, + BLOCK_BAR0_MAP, + MAX_BLOCK_ID +}; + +/* + * Igu cleanup bit values to distinguish between clean or producer consumer + */ +enum command_type_bit { + IGU_COMMAND_TYPE_NOP = 0, + IGU_COMMAND_TYPE_SET = 1, + MAX_COMMAND_TYPE_BIT +}; + +/* + * DMAE command + */ +struct dmae_cmd { + __le32 opcode; +#define DMAE_CMD_SRC_MASK 0x1 +#define DMAE_CMD_SRC_SHIFT 0 +#define DMAE_CMD_DST_MASK 0x3 +#define DMAE_CMD_DST_SHIFT 1 +#define DMAE_CMD_C_DST_MASK 0x1 +#define DMAE_CMD_C_DST_SHIFT 3 +#define DMAE_CMD_CRC_RESET_MASK 0x1 +#define DMAE_CMD_CRC_RESET_SHIFT 4 +#define DMAE_CMD_SRC_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_SRC_ADDR_RESET_SHIFT 5 +#define DMAE_CMD_DST_ADDR_RESET_MASK 0x1 +#define DMAE_CMD_DST_ADDR_RESET_SHIFT 6 +#define DMAE_CMD_COMP_FUNC_MASK 0x1 +#define DMAE_CMD_COMP_FUNC_SHIFT 7 +#define DMAE_CMD_COMP_WORD_EN_MASK 0x1 +#define DMAE_CMD_COMP_WORD_EN_SHIFT 8 +#define DMAE_CMD_COMP_CRC_EN_MASK 0x1 +#define DMAE_CMD_COMP_CRC_EN_SHIFT 9 +#define DMAE_CMD_COMP_CRC_OFFSET_MASK 0x7 +#define DMAE_CMD_COMP_CRC_OFFSET_SHIFT 10 +#define DMAE_CMD_RESERVED1_MASK 0x1 +#define DMAE_CMD_RESERVED1_SHIFT 13 +#define DMAE_CMD_ENDIANITY_MODE_MASK 0x3 +#define DMAE_CMD_ENDIANITY_MODE_SHIFT 14 +#define DMAE_CMD_ERR_HANDLING_MASK 0x3 +#define DMAE_CMD_ERR_HANDLING_SHIFT 16 +#define DMAE_CMD_PORT_ID_MASK 0x3 +#define DMAE_CMD_PORT_ID_SHIFT 18 +#define DMAE_CMD_SRC_PF_ID_MASK 0xF +#define DMAE_CMD_SRC_PF_ID_SHIFT 20 +#define DMAE_CMD_DST_PF_ID_MASK 0xF +#define DMAE_CMD_DST_PF_ID_SHIFT 24 +#define DMAE_CMD_SRC_VF_ID_VALID_MASK 0x1 +#define DMAE_CMD_SRC_VF_ID_VALID_SHIFT 28 +#define DMAE_CMD_DST_VF_ID_VALID_MASK 0x1 +#define DMAE_CMD_DST_VF_ID_VALID_SHIFT 29 +#define DMAE_CMD_RESERVED2_MASK 0x3 +#define DMAE_CMD_RESERVED2_SHIFT 30 + __le32 src_addr_lo + /* PCIe source address low in bytes or GRC source address in DW */; + __le32 src_addr_hi; + __le32 dst_addr_lo; + __le32 dst_addr_hi; + __le16 length /* Length in DW */; + __le16 opcode_b; +#define DMAE_CMD_SRC_VF_ID_MASK 0xFF +#define DMAE_CMD_SRC_VF_ID_SHIFT 0 +#define DMAE_CMD_DST_VF_ID_MASK 0xFF +#define DMAE_CMD_DST_VF_ID_SHIFT 8 + __le32 comp_addr_lo /* PCIe completion address low or grc address */; + __le32 comp_addr_hi; + __le32 comp_val /* Value to write to completion address */; + __le32 crc32 /* crc16 result */; + __le32 crc_32_c /* crc32_c result */; + __le16 crc16 /* crc16 result */; + __le16 crc16_c /* crc16_c result */; + __le16 crc10 /* crc_t10 result */; + __le16 reserved; + __le16 xsum16 /* checksum16 result */; + __le16 xsum8 /* checksum8 result */; +}; + +struct fw_ver_num { + u8 major /* Firmware major version number */; + u8 minor /* Firmware minor version number */; + u8 rev /* Firmware revision version number */; + u8 eng /* Firmware engineering version number (for bootleg versions) */ + ; +}; + +struct fw_ver_info { + __le16 tools_ver /* Tools version number */; + u8 image_id /* FW image ID (e.g. main, l2b, kuku) */; + u8 reserved1; + struct fw_ver_num num /* FW version number */; + __le32 timestamp /* FW Timestamp in unix time (sec. since 1970) */; + __le32 reserved2; +}; + +struct storm_ram_section { + __le16 offset + /* The offset of the section in the RAM (in 64 bit units) */; + __le16 size /* The size of the section (in 64 bit units) */; +}; + +struct fw_info { + struct fw_ver_info ver /* FW version information */; + struct storm_ram_section fw_asserts_section + /* The FW Asserts offset/size in Storm RAM */; + __le32 reserved; +}; + +struct fw_info_location { + __le32 grc_addr /* GRC address where the fw_info struct is located. */; + __le32 size + /* Size of the fw_info structure (thats located at the grc_addr). */ + ; +}; + +/* + * IGU cleanup command + */ +struct igu_cleanup { + __le32 sb_id_and_flags; +#define IGU_CLEANUP_RESERVED0_MASK 0x7FFFFFF +#define IGU_CLEANUP_RESERVED0_SHIFT 0 +#define IGU_CLEANUP_CLEANUP_SET_MASK 0x1 +#define IGU_CLEANUP_CLEANUP_SET_SHIFT 27 +#define IGU_CLEANUP_CLEANUP_TYPE_MASK 0x7 +#define IGU_CLEANUP_CLEANUP_TYPE_SHIFT 28 +#define IGU_CLEANUP_COMMAND_TYPE_MASK 0x1 +#define IGU_CLEANUP_COMMAND_TYPE_SHIFT 31 + __le32 reserved1; +}; + +/* + * IGU firmware driver command + */ +union igu_command { + struct igu_prod_cons_update prod_cons_update; + struct igu_cleanup cleanup; +}; + +/* + * IGU firmware driver command + */ +struct igu_command_reg_ctrl { + __le16 opaque_fid; + __le16 igu_command_reg_ctrl_fields; +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_MASK 0xFFF +#define IGU_COMMAND_REG_CTRL_PXP_BAR_ADDR_SHIFT 0 +#define IGU_COMMAND_REG_CTRL_RESERVED_MASK 0x7 +#define IGU_COMMAND_REG_CTRL_RESERVED_SHIFT 12 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_MASK 0x1 +#define IGU_COMMAND_REG_CTRL_COMMAND_TYPE_SHIFT 15 +}; + +/* + * IGU mapping line structure + */ +struct igu_mapping_line { + __le32 igu_mapping_line_fields; +#define IGU_MAPPING_LINE_VALID_MASK 0x1 +#define IGU_MAPPING_LINE_VALID_SHIFT 0 +#define IGU_MAPPING_LINE_VECTOR_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_VECTOR_NUMBER_SHIFT 1 +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_MASK 0xFF +#define IGU_MAPPING_LINE_FUNCTION_NUMBER_SHIFT 9 +#define IGU_MAPPING_LINE_PF_VALID_MASK 0x1 +#define IGU_MAPPING_LINE_PF_VALID_SHIFT 17 +#define IGU_MAPPING_LINE_IPS_GROUP_MASK 0x3F +#define IGU_MAPPING_LINE_IPS_GROUP_SHIFT 18 +#define IGU_MAPPING_LINE_RESERVED_MASK 0xFF +#define IGU_MAPPING_LINE_RESERVED_SHIFT 24 +}; + +/* + * IGU MSIX line structure + */ +struct igu_msix_vector { + struct regpair address; + __le32 data; + __le32 msix_vector_fields; +#define IGU_MSIX_VECTOR_MASK_BIT_MASK 0x1 +#define IGU_MSIX_VECTOR_MASK_BIT_SHIFT 0 +#define IGU_MSIX_VECTOR_RESERVED0_MASK 0x7FFF +#define IGU_MSIX_VECTOR_RESERVED0_SHIFT 1 +#define IGU_MSIX_VECTOR_STEERING_TAG_MASK 0xFF +#define IGU_MSIX_VECTOR_STEERING_TAG_SHIFT 16 +#define IGU_MSIX_VECTOR_RESERVED1_MASK 0xFF +#define IGU_MSIX_VECTOR_RESERVED1_SHIFT 24 +}; + +enum init_modes { + MODE_BB_A0, + MODE_BB_B0, + MODE_K2, + MODE_ASIC, + MODE_EMUL_REDUCED, + MODE_EMUL_FULL, + MODE_FPGA, + MODE_CHIPSIM, + MODE_SF, + MODE_MF_SD, + MODE_MF_SI, + MODE_PORTS_PER_ENG_1, + MODE_PORTS_PER_ENG_2, + MODE_PORTS_PER_ENG_4, + MODE_100G, + MODE_EAGLE_ENG1_WORKAROUND, + MAX_INIT_MODES +}; + +enum init_phases { + PHASE_ENGINE, + PHASE_PORT, + PHASE_PF, + PHASE_VF, + PHASE_QM_PF, + MAX_INIT_PHASES +}; + +struct mstorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define MSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0 /* word0 */; + __le16 word1 /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; +}; + +/* + * per encapsulation type enabling flags + */ +struct prs_reg_encapsulation_type_en { + u8 flags; +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT 0 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT 1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT 2 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_T_TAG_ENABLE_SHIFT 3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT 4 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_MASK 0x1 +#define PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT 5 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_MASK 0x3 +#define PRS_REG_ENCAPSULATION_TYPE_EN_RESERVED_SHIFT 6 +}; + +enum pxp_tph_st_hint { + TPH_ST_HINT_BIDIR /* Read/Write access by Host and Device */, + TPH_ST_HINT_REQUESTER /* Read/Write access by Device */, + TPH_ST_HINT_TARGET + /* Device Write and Host Read, or Host Write and Device Read */, + TPH_ST_HINT_TARGET_PRIO, + MAX_PXP_TPH_ST_HINT +}; + +/* + * QM hardware structure of enable bypass credit mask + */ +struct qm_rf_bypass_mask { + u8 flags; +#define QM_RF_BYPASS_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_BYPASS_MASK_RESERVED0_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED0_SHIFT 1 +#define QM_RF_BYPASS_MASK_PFWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFWFQ_SHIFT 2 +#define QM_RF_BYPASS_MASK_VPWFQ_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPWFQ_SHIFT 3 +#define QM_RF_BYPASS_MASK_PFRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_PFRL_SHIFT 4 +#define QM_RF_BYPASS_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_BYPASS_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_BYPASS_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_BYPASS_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_BYPASS_MASK_RESERVED1_MASK 0x1 +#define QM_RF_BYPASS_MASK_RESERVED1_SHIFT 7 +}; + +/* + * QM hardware structure of opportunistic credit mask + */ +struct qm_rf_opportunistic_mask { + __le16 flags; +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT 0 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT 1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT 2 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT 3 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT 4 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT 5 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT 6 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED0_SHIFT 7 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_MASK 0x1 +#define QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT 8 +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_MASK 0x7F +#define QM_RF_OPPORTUNISTIC_MASK_RESERVED1_SHIFT 9 +}; + +/* + * QM hardware structure of QM map memory + */ +struct qm_rf_pq_map { + __le32 reg; +#define QM_RF_PQ_MAP_PQ_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_PQ_VALID_SHIFT 0 +#define QM_RF_PQ_MAP_RL_ID_MASK 0xFF +#define QM_RF_PQ_MAP_RL_ID_SHIFT 1 +#define QM_RF_PQ_MAP_VP_PQ_ID_MASK 0x1FF +#define QM_RF_PQ_MAP_VP_PQ_ID_SHIFT 9 +#define QM_RF_PQ_MAP_VOQ_MASK 0x1F +#define QM_RF_PQ_MAP_VOQ_SHIFT 18 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_MASK 0x3 +#define QM_RF_PQ_MAP_WRR_WEIGHT_GROUP_SHIFT 23 +#define QM_RF_PQ_MAP_RL_VALID_MASK 0x1 +#define QM_RF_PQ_MAP_RL_VALID_SHIFT 25 +#define QM_RF_PQ_MAP_RESERVED_MASK 0x3F +#define QM_RF_PQ_MAP_RESERVED_SHIFT 26 +}; + +/* + * Completion params for aggregated interrupt completion + */ +struct sdm_agg_int_comp_params { + __le16 params; +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_MASK 0x3F +#define SDM_AGG_INT_COMP_PARAMS_AGG_INT_INDEX_SHIFT 0 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_MASK 0x1 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_ENABLE_SHIFT 6 +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_MASK 0x1FF +#define SDM_AGG_INT_COMP_PARAMS_AGG_VECTOR_BIT_SHIFT 7 +}; + +/* + * SDM operation gen command (generate aggregative interrupt) + */ +struct sdm_op_gen { + __le32 command; +#define SDM_OP_GEN_COMP_PARAM_MASK 0xFFFF +#define SDM_OP_GEN_COMP_PARAM_SHIFT 0 +#define SDM_OP_GEN_COMP_TYPE_MASK 0xF +#define SDM_OP_GEN_COMP_TYPE_SHIFT 16 +#define SDM_OP_GEN_RESERVED_MASK 0xFFF +#define SDM_OP_GEN_RESERVED_SHIFT 20 +}; + +struct ystorm_core_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define YSTORM_CORE_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF0_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF0_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_CF1_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF1_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_CORE_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF0EN_SHIFT 0 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF1EN_SHIFT 1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_CORE_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le16 word1 /* word1 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; + __le16 word4 /* word4 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; +}; + +#endif /* __ECORE_HSI_COMMON__ */ diff --git a/drivers/net/qede/base/ecore_hsi_eth.h b/drivers/net/qede/base/ecore_hsi_eth.h new file mode 100644 index 00000000..80f4165f --- /dev/null +++ b/drivers/net/qede/base/ecore_hsi_eth.h @@ -0,0 +1,1912 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_HSI_ETH__ +#define __ECORE_HSI_ETH__ +/************************************************************************/ +/* Add include to common eth target for both eCore and protocol driver */ +/************************************************************************/ +#include "eth_common.h" + +/* + * The eth storm context for the Tstorm + */ +struct tstorm_eth_conn_st_ctx { + __le32 reserved[4]; +}; + +/* + * The eth storm context for the Pstorm + */ +struct pstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +/* + * The eth storm context for the Xstorm + */ +struct xstorm_eth_conn_st_ctx { + __le32 reserved[60]; +}; + +struct xstorm_eth_conn_ag_ctx { + u8 reserved0 /* cdu_validation */; + u8 eth_state /* state */; + u8 flags0; +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ETH_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 word1 /* physical_q1 */; + __le16 edpm_num_bds /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_prod /* word4 */; + __le16 go_to_bd_cons /* word5 */; + __le16 conn_dpi /* conn_dpi */; + u8 byte3 /* byte3 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + u8 byte6 /* byte6 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* cf_array0 */; + __le32 reg6 /* cf_array1 */; + __le16 word7 /* word7 */; + __le16 word8 /* word8 */; + __le16 word9 /* word9 */; + __le16 word10 /* word10 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + __le32 reg9 /* reg9 */; + u8 byte7 /* byte7 */; + u8 byte8 /* byte8 */; + u8 byte9 /* byte9 */; + u8 byte10 /* byte10 */; + u8 byte11 /* byte11 */; + u8 byte12 /* byte12 */; + u8 byte13 /* byte13 */; + u8 byte14 /* byte14 */; + u8 byte15 /* byte15 */; + u8 byte16 /* byte16 */; + __le16 word11 /* word11 */; + __le32 reg10 /* reg10 */; + __le32 reg11 /* reg11 */; + __le32 reg12 /* reg12 */; + __le32 reg13 /* reg13 */; + __le32 reg14 /* reg14 */; + __le32 reg15 /* reg15 */; + __le32 reg16 /* reg16 */; + __le32 reg17 /* reg17 */; + __le32 reg18 /* reg18 */; + __le32 reg19 /* reg19 */; + __le16 word12 /* word12 */; + __le16 word13 /* word13 */; + __le16 word14 /* word14 */; + __le16 word15 /* word15 */; +}; + +/* + * The eth storm context for the Ystorm + */ +struct ystorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +struct ystorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define YSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define YSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 0 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define YSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* word0 */; + __le32 terminate_spqe /* reg0 */; + __le32 reg1 /* reg1 */; + __le16 tx_bd_cons_upd /* word1 */; + __le16 word2 /* word2 */; + __le16 word3 /* word3 */; + __le16 word4 /* word4 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; +}; + +struct tstorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define TSTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT3_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT4_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_BIT5_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 6 + u8 flags1; +#define TSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF3_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF4_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF4_SHIFT 6 + u8 flags2; +#define TSTORM_ETH_CONN_AG_CTX_CF5_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF5_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF6_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF6_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF7_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF8_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF8_SHIFT 6 + u8 flags3; +#define TSTORM_ETH_CONN_AG_CTX_CF9_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF9_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF10_MASK 0x3 +#define TSTORM_ETH_CONN_AG_CTX_CF10_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 7 + u8 flags4; +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF4EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF5EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF6EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF7EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF8EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF9EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_CF10EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags5; +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RX_BD_EN_SHIFT 5 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define TSTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; + __le32 reg5 /* reg5 */; + __le32 reg6 /* reg6 */; + __le32 reg7 /* reg7 */; + __le32 reg8 /* reg8 */; + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 rx_bd_cons /* word0 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + __le16 rx_bd_prod /* word1 */; + __le16 word2 /* conn_dpi */; + __le16 word3 /* word3 */; + __le32 reg9 /* reg9 */; + __le32 reg10 /* reg10 */; +}; + +struct ustorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define USTORM_ETH_CONN_AG_CTX_BIT0_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT0_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define USTORM_ETH_CONN_AG_CTX_CF3_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_CF3_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_MASK 0x3 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_SHIFT 6 + u8 flags2; +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_PMD_TERMINATE_CF_EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_PMD_TERMINATE_CF_EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_CF3EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_ARM_CF_EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RX_ARM_CF_EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_TX_BD_CONS_UPD_CF_EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 7 + u8 flags3; +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 0 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 2 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 3 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE5EN_SHIFT 4 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE6EN_SHIFT 5 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE7EN_SHIFT 6 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_MASK 0x1 +#define USTORM_ETH_CONN_AG_CTX_RULE8EN_SHIFT 7 + u8 byte2 /* byte2 */; + u8 byte3 /* byte3 */; + __le16 word0 /* conn_dpi */; + __le16 tx_bd_cons /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 tx_int_coallecing_timeset /* reg3 */; + __le16 tx_drv_bd_cons /* word2 */; + __le16 rx_drv_cqe_cons /* word3 */; +}; + +/* + * The eth storm context for the Ustorm + */ +struct ustorm_eth_conn_st_ctx { + __le32 reserved[40]; +}; + +/* + * The eth storm context for the Mstorm + */ +struct mstorm_eth_conn_st_ctx { + __le32 reserved[8]; +}; + +/* + * eth connection context + */ +struct eth_conn_context { + struct tstorm_eth_conn_st_ctx tstorm_st_context + /* tstorm storm context */; + struct regpair tstorm_st_padding[2] /* padding */; + struct pstorm_eth_conn_st_ctx pstorm_st_context + /* pstorm storm context */; + struct xstorm_eth_conn_st_ctx xstorm_st_context + /* xstorm storm context */; + struct xstorm_eth_conn_ag_ctx xstorm_ag_context + /* xstorm aggregative context */; + struct ystorm_eth_conn_st_ctx ystorm_st_context + /* ystorm storm context */; + struct ystorm_eth_conn_ag_ctx ystorm_ag_context + /* ystorm aggregative context */; + struct tstorm_eth_conn_ag_ctx tstorm_ag_context + /* tstorm aggregative context */; + struct ustorm_eth_conn_ag_ctx ustorm_ag_context + /* ustorm aggregative context */; + struct ustorm_eth_conn_st_ctx ustorm_st_context + /* ustorm storm context */; + struct mstorm_eth_conn_st_ctx mstorm_st_context + /* mstorm storm context */; +}; + +/* + * Ethernet filter types: mac/vlan/pair + */ +enum eth_error_code { + ETH_OK = 0x00 /* command succeeded */, + ETH_FILTERS_MAC_ADD_FAIL_FULL + /* mac add filters command failed due to cam full state */, + ETH_FILTERS_MAC_ADD_FAIL_FULL_MTT2 + /* mac add filters command failed due to mtt2 full state */, + ETH_FILTERS_MAC_ADD_FAIL_DUP_MTT2 + /* mac add filters command failed due to duplicate mac address */, + ETH_FILTERS_MAC_ADD_FAIL_DUP_STT2 + /* mac add filters command failed due to duplicate mac address */, + ETH_FILTERS_MAC_DEL_FAIL_NOF + /* mac delete filters command failed due to not found state */, + ETH_FILTERS_MAC_DEL_FAIL_NOF_MTT2 + /* mac delete filters command failed due to not found state */, + ETH_FILTERS_MAC_DEL_FAIL_NOF_STT2 + /* mac delete filters command failed due to not found state */, + ETH_FILTERS_MAC_ADD_FAIL_ZERO_MAC + /* mac add filters command failed due to MAC Address of + * 00:00:00:00:00:00 + */ + , + ETH_FILTERS_VLAN_ADD_FAIL_FULL + /* vlan add filters command failed due to cam full state */, + ETH_FILTERS_VLAN_ADD_FAIL_DUP + /* vlan add filters command failed due to duplicate VLAN filter */, + ETH_FILTERS_VLAN_DEL_FAIL_NOF + /* vlan delete filters command failed due to not found state */, + ETH_FILTERS_VLAN_DEL_FAIL_NOF_TT1 + /* vlan delete filters command failed due to not found state */, + ETH_FILTERS_PAIR_ADD_FAIL_DUP + /* pair add filters command failed due to duplicate request */, + ETH_FILTERS_PAIR_ADD_FAIL_FULL + /* pair add filters command failed due to full state */, + ETH_FILTERS_PAIR_ADD_FAIL_FULL_MAC + /* pair add filters command failed due to full state */, + ETH_FILTERS_PAIR_DEL_FAIL_NOF + /* pair add filters command failed due not found state */, + ETH_FILTERS_PAIR_DEL_FAIL_NOF_TT1 + /* pair add filters command failed due not found state */, + ETH_FILTERS_PAIR_ADD_FAIL_ZERO_MAC + /* pair add filters command failed due to MAC Address of + * 00:00:00:00:00:00 + */ + , + ETH_FILTERS_VNI_ADD_FAIL_FULL + /* vni add filters command failed due to cam full state */, + ETH_FILTERS_VNI_ADD_FAIL_DUP + /* vni add filters command failed due to duplicate VNI filter */, + MAX_ETH_ERROR_CODE +}; + +/* + * opcodes for the event ring + */ +enum eth_event_opcode { + ETH_EVENT_UNUSED, + ETH_EVENT_VPORT_START, + ETH_EVENT_VPORT_UPDATE, + ETH_EVENT_VPORT_STOP, + ETH_EVENT_TX_QUEUE_START, + ETH_EVENT_TX_QUEUE_STOP, + ETH_EVENT_RX_QUEUE_START, + ETH_EVENT_RX_QUEUE_UPDATE, + ETH_EVENT_RX_QUEUE_STOP, + ETH_EVENT_FILTERS_UPDATE, + ETH_EVENT_RX_ADD_OPENFLOW_FILTER, + ETH_EVENT_RX_DELETE_OPENFLOW_FILTER, + ETH_EVENT_RX_CREATE_OPENFLOW_ACTION, + ETH_EVENT_RX_ADD_UDP_FILTER, + ETH_EVENT_RX_DELETE_UDP_FILTER, + ETH_EVENT_RX_ADD_GFT_FILTER, + ETH_EVENT_RX_DELETE_GFT_FILTER, + ETH_EVENT_RX_CREATE_GFT_ACTION, + MAX_ETH_EVENT_OPCODE +}; + +/* + * Classify rule types in E2/E3 + */ +enum eth_filter_action { + ETH_FILTER_ACTION_UNUSED, + ETH_FILTER_ACTION_REMOVE, + ETH_FILTER_ACTION_ADD, + ETH_FILTER_ACTION_REMOVE_ALL + /* Remove all filters of given type and vport ID. */, + MAX_ETH_FILTER_ACTION +}; + +/* + * Command for adding/removing a classification rule $$KEEP_ENDIANNESS$$ + */ +struct eth_filter_cmd { + u8 type /* Filter Type (MAC/VLAN/Pair/VNI) */; + u8 vport_id /* the vport id */; + u8 action /* filter command action: add/remove/replace */; + u8 reserved0; + __le32 vni; + __le16 mac_lsb; + __le16 mac_mid; + __le16 mac_msb; + __le16 vlan_id; +}; + +/* + * $$KEEP_ENDIANNESS$$ + */ +struct eth_filter_cmd_header { + u8 rx /* If set, apply these commands to the RX path */; + u8 tx /* If set, apply these commands to the TX path */; + u8 cmd_cnt /* Number of filter commands */; + u8 assert_on_error; + u8 reserved1[4]; +}; + +/* + * Ethernet filter types: mac/vlan/pair + */ +enum eth_filter_type { + ETH_FILTER_TYPE_UNUSED, + ETH_FILTER_TYPE_MAC /* Add/remove a MAC address */, + ETH_FILTER_TYPE_VLAN /* Add/remove a VLAN */, + ETH_FILTER_TYPE_PAIR /* Add/remove a MAC-VLAN pair */, + ETH_FILTER_TYPE_INNER_MAC /* Add/remove a inner MAC address */, + ETH_FILTER_TYPE_INNER_VLAN /* Add/remove a inner VLAN */, + ETH_FILTER_TYPE_INNER_PAIR /* Add/remove a inner MAC-VLAN pair */, + ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR /* Add/remove a inner MAC-VNI pair */ + , + ETH_FILTER_TYPE_MAC_VNI_PAIR /* Add/remove a MAC-VNI pair */, + ETH_FILTER_TYPE_VNI /* Add/remove a VNI */, + MAX_ETH_FILTER_TYPE +}; + +/* + * eth IPv4 Fragment Type + */ +enum eth_ipv4_frag_type { + ETH_IPV4_NOT_FRAG /* IPV4 Packet Not Fragmented */, + ETH_IPV4_FIRST_FRAG + /* First Fragment of IPv4 Packet (contains headers) */, + ETH_IPV4_NON_FIRST_FRAG + /* Non-First Fragment of IPv4 Packet (does not contain headers) */, + MAX_ETH_IPV4_FRAG_TYPE +}; + +/* + * eth IPv4 Fragment Type + */ +enum eth_ip_type { + ETH_IPV4 /* IPv4 */, + ETH_IPV6 /* IPv6 */, + MAX_ETH_IP_TYPE +}; + +/* + * Ethernet Ramrod Command IDs + */ +enum eth_ramrod_cmd_id { + ETH_RAMROD_UNUSED, + ETH_RAMROD_VPORT_START /* VPort Start Ramrod */, + ETH_RAMROD_VPORT_UPDATE /* VPort Update Ramrod */, + ETH_RAMROD_VPORT_STOP /* VPort Stop Ramrod */, + ETH_RAMROD_RX_QUEUE_START /* RX Queue Start Ramrod */, + ETH_RAMROD_RX_QUEUE_STOP /* RX Queue Stop Ramrod */, + ETH_RAMROD_TX_QUEUE_START /* TX Queue Start Ramrod */, + ETH_RAMROD_TX_QUEUE_STOP /* TX Queue Stop Ramrod */, + ETH_RAMROD_FILTERS_UPDATE /* Add or Remove Mac/Vlan/Pair filters */, + ETH_RAMROD_RX_QUEUE_UPDATE /* RX Queue Update Ramrod */, + ETH_RAMROD_RX_CREATE_OPENFLOW_ACTION + /* RX - Create an Openflow Action */, + ETH_RAMROD_RX_ADD_OPENFLOW_FILTER + /* RX - Add an Openflow Filter to the Searcher */, + ETH_RAMROD_RX_DELETE_OPENFLOW_FILTER + /* RX - Delete an Openflow Filter to the Searcher */, + ETH_RAMROD_RX_ADD_UDP_FILTER /* RX - Add a UDP Filter to the Searcher */ + , + ETH_RAMROD_RX_DELETE_UDP_FILTER + /* RX - Delete a UDP Filter to the Searcher */, + ETH_RAMROD_RX_CREATE_GFT_ACTION /* RX - Create an Gft Action */, + ETH_RAMROD_RX_DELETE_GFT_FILTER + /* RX - Delete an GFT Filter to the Searcher */, + ETH_RAMROD_RX_ADD_GFT_FILTER + /* RX - Add an GFT Filter to the Searcher */, + MAX_ETH_RAMROD_CMD_ID +}; + +/* + * return code from eth sp ramrods + */ +struct eth_return_code { + u8 value; +#define ETH_RETURN_CODE_ERR_CODE_MASK 0x1F +#define ETH_RETURN_CODE_ERR_CODE_SHIFT 0 +#define ETH_RETURN_CODE_RESERVED_MASK 0x3 +#define ETH_RETURN_CODE_RESERVED_SHIFT 5 +#define ETH_RETURN_CODE_RX_TX_MASK 0x1 +#define ETH_RETURN_CODE_RX_TX_SHIFT 7 +}; + +/* + * What to do in case an error occurs + */ +enum eth_tx_err { + ETH_TX_ERR_DROP /* Drop erroneous packet. */, + ETH_TX_ERR_ASSERT_MALICIOUS + /* Assert an interrupt for PF, declare as malicious for VF */, + MAX_ETH_TX_ERR +}; + +/* + * Array of the different error type behaviors + */ +struct eth_tx_err_vals { + __le16 values; +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_VLAN_MODE_SHIFT 0 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_MASK 0x1 +#define ETH_TX_ERR_VALS_PACKET_TOO_SMALL_SHIFT 1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_MASK 0x1 +#define ETH_TX_ERR_VALS_ANTI_SPOOFING_ERR_SHIFT 2 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_INBAND_TAGS_SHIFT 3 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_MASK 0x1 +#define ETH_TX_ERR_VALS_VLAN_INSERTION_W_INBAND_TAG_SHIFT 4 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_MASK 0x1 +#define ETH_TX_ERR_VALS_MTU_VIOLATION_SHIFT 5 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_MASK 0x1 +#define ETH_TX_ERR_VALS_ILLEGAL_CONTROL_FRAME_SHIFT 6 +#define ETH_TX_ERR_VALS_RESERVED_MASK 0x1FF +#define ETH_TX_ERR_VALS_RESERVED_SHIFT 7 +}; + +/* + * vport rss configuration data + */ +struct eth_vport_rss_config { + __le16 capabilities; +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY_SHIFT 0 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY_SHIFT 1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY_SHIFT 2 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY_SHIFT 3 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY_SHIFT 4 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY_SHIFT 5 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_MASK 0x1 +#define ETH_VPORT_RSS_CONFIG_EN_5_TUPLE_CAPABILITY_SHIFT 6 +#define ETH_VPORT_RSS_CONFIG_RESERVED0_MASK 0x1FF +#define ETH_VPORT_RSS_CONFIG_RESERVED0_SHIFT 7 + u8 rss_id; + u8 rss_mode /* The RSS mode for this function */; + u8 update_rss_key /* if set update the rss key */; + u8 update_rss_ind_table /* if set update the indirection table */; + u8 update_rss_capabilities /* if set update the capabilities */; + u8 tbl_size /* rss mask (Tbl size) */; + __le32 reserved2[2]; + __le16 indirection_table[ETH_RSS_IND_TABLE_ENTRIES_NUM] + /* RSS indirection table */; + __le32 rss_key[ETH_RSS_KEY_SIZE_REGS] /* RSS key supplied to us by OS */ + ; + __le32 reserved3[2]; +}; + +/* + * eth vport RSS mode + */ +enum eth_vport_rss_mode { + ETH_VPORT_RSS_MODE_DISABLED /* RSS Disabled */, + ETH_VPORT_RSS_MODE_REGULAR /* Regular (ndis-like) RSS */, + MAX_ETH_VPORT_RSS_MODE +}; + +/* + * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ + */ +struct eth_vport_rx_mode { + __le16 state; +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_MASK 0x1 +#define ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED_SHIFT 2 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_DROP_ALL_SHIFT 3 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL_SHIFT 5 +#define ETH_VPORT_RX_MODE_RESERVED1_MASK 0x3FF +#define ETH_VPORT_RX_MODE_RESERVED1_SHIFT 6 + __le16 reserved2[3]; +}; + +/* + * Command for setting tpa parameters + */ +struct eth_vport_tpa_param { + u8 tpa_ipv4_en_flg /* Enable TPA for IPv4 packets */; + u8 tpa_ipv6_en_flg /* Enable TPA for IPv6 packets */; + u8 tpa_ipv4_tunn_en_flg /* Enable TPA for IPv4 over tunnel */; + u8 tpa_ipv6_tunn_en_flg /* Enable TPA for IPv6 over tunnel */; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg + /* If set, put header of first TPA segment on bd and data on SGE */ + ; + u8 tpa_gro_consistent_flg + /* If set, GRO data consistent will checked for TPA continue */; + u8 tpa_max_aggs_num + /* maximum number of opened aggregations per v-port */; + __le16 tpa_max_size /* maximal size for the aggregated TPA packets */; + __le16 tpa_min_size_to_start + /* minimum TCP payload size for a packet to start aggregation */; + __le16 tpa_min_size_to_cont + /* minimum TCP payload size for a packet to continue aggregation */ + ; + u8 max_buff_num + /* maximal number of buffers that can be used for one aggregation */ + ; + u8 reserved; +}; + +/* + * Command for setting classification flags for a vport $$KEEP_ENDIANNESS$$ + */ +struct eth_vport_tx_mode { + __le16 state; +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_DROP_ALL_SHIFT 0 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL_SHIFT 1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_DROP_ALL_SHIFT 2 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL_SHIFT 3 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_MASK 0x1 +#define ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL_SHIFT 4 +#define ETH_VPORT_TX_MODE_RESERVED1_MASK 0x7FF +#define ETH_VPORT_TX_MODE_RESERVED1_SHIFT 5 + __le16 reserved2[3]; +}; + +/* + * Ramrod data for rx add gft filter data + */ +struct rx_add_gft_filter_data { + struct regpair pkt_hdr_addr /* Packet Header That Defines GFT Filter */ + ; + __le16 action_icid /* ICID of Action to run for this filter */; + __le16 pkt_hdr_length /* Packet Header Length */; + u8 reserved[4]; +}; + +/* + * Ramrod data for rx add openflow filter + */ +struct rx_add_openflow_filter_data { + __le16 action_icid /* CID of Action to run for this filter */; + u8 priority /* Searcher String - Packet priority */; + u8 reserved0; + __le32 tenant_id /* Searcher String - Tenant ID */; + __le16 dst_mac_hi /* Searcher String - Destination Mac Bytes 0 to 1 */; + __le16 dst_mac_mid /* Searcher String - Destination Mac Bytes 2 to 3 */ + ; + __le16 dst_mac_lo /* Searcher String - Destination Mac Bytes 4 to 5 */; + __le16 src_mac_hi /* Searcher String - Source Mac 0 to 1 */; + __le16 src_mac_mid /* Searcher String - Source Mac 2 to 3 */; + __le16 src_mac_lo /* Searcher String - Source Mac 4 to 5 */; + __le16 vlan_id /* Searcher String - Vlan ID */; + __le16 l2_eth_type /* Searcher String - Last L2 Ethertype */; + u8 ipv4_dscp /* Searcher String - IPv4 6 MSBs of the TOS Field */; + u8 ipv4_frag_type /* Searcher String - IPv4 Fragmentation Type */; + u8 ipv4_over_ip /* Searcher String - IPv4 Over IP Type */; + u8 tenant_id_exists /* Searcher String - Tenant ID Exists */; + __le32 ipv4_dst_addr /* Searcher String - IPv4 Destination Address */; + __le32 ipv4_src_addr /* Searcher String - IPv4 Source Address */; + __le16 l4_dst_port /* Searcher String - TCP/UDP Destination Port */; + __le16 l4_src_port /* Searcher String - TCP/UDP Source Port */; +}; + +/* + * Ramrod data for rx create gft action + */ +struct rx_create_gft_action_data { + u8 vport_id /* Vport Id of GFT Action */; + u8 reserved[7]; +}; + +/* + * Ramrod data for rx create openflow action + */ +struct rx_create_openflow_action_data { + u8 vport_id /* ID of RX queue */; + u8 reserved[7]; +}; + +/* + * Ramrod data for rx queue start ramrod + */ +struct rx_queue_start_ramrod_data { + __le16 rx_queue_id /* ID of RX queue */; + __le16 num_of_pbl_pages /* Num of pages in CQE PBL */; + __le16 bd_max_bytes /* maximal bytes that can be places on the bd */; + __le16 sb_id /* Status block ID */; + u8 sb_index /* index of the protocol index */; + u8 vport_id /* ID of virtual port */; + u8 default_rss_queue_flg /* set queue as default rss queue if set */; + u8 complete_cqe_flg /* post completion to the CQE ring if set */; + u8 complete_event_flg /* post completion to the event ring if set */; + u8 stats_counter_id /* Statistics counter ID */; + u8 pin_context /* Pin context in CCFC to improve performance */; + u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD/SGE fetch */; + u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet placement */ + ; + u8 pxp_st_hint + /* PXP command Steering tag hint. Use enum pxp_tph_st_hint */; + __le16 pxp_st_index /* PXP command Steering tag index */; + u8 pmd_mode + /* Indicates that current queue belongs to poll-mode driver */; + u8 notify_en; + u8 toggle_val + /* Initial value for the toggle valid bit - used in PMD mode */; + u8 reserved[7]; + __le16 reserved1 /* FW reserved. */; + struct regpair cqe_pbl_addr /* Base address on host of CQE PBL */; + struct regpair bd_base /* bd address of the first bd page */; + struct regpair reserved2 /* FW reserved. */; +}; + +/* + * Ramrod data for rx queue start ramrod + */ +struct rx_queue_stop_ramrod_data { + __le16 rx_queue_id /* ID of RX queue */; + u8 complete_cqe_flg /* post completion to the CQE ring if set */; + u8 complete_event_flg /* post completion to the event ring if set */; + u8 vport_id /* ID of virtual port */; + u8 reserved[3]; +}; + +/* + * Ramrod data for rx queue update ramrod + */ +struct rx_queue_update_ramrod_data { + __le16 rx_queue_id /* ID of RX queue */; + u8 complete_cqe_flg /* post completion to the CQE ring if set */; + u8 complete_event_flg /* post completion to the event ring if set */; + u8 vport_id /* ID of virtual port */; + u8 reserved[4]; + u8 reserved1 /* FW reserved. */; + u8 reserved2 /* FW reserved. */; + u8 reserved3 /* FW reserved. */; + __le16 reserved4 /* FW reserved. */; + __le16 reserved5 /* FW reserved. */; + struct regpair reserved6 /* FW reserved. */; +}; + +/* + * Ramrod data for rx Add UDP Filter + */ +struct rx_udp_filter_data { + __le16 action_icid /* CID of Action to run for this filter */; + __le16 vlan_id /* Searcher String - Vlan ID */; + u8 ip_type /* Searcher String - IP Type */; + u8 tenant_id_exists /* Searcher String - Tenant ID Exists */; + __le16 reserved1; + __le32 ip_dst_addr[4]; + /* Searcher String-IP Dest Addr for IPv4 use ip_dst_addr[0] only */ + ; + __le32 ip_src_addr[4] + /* Searcher String-IP Src Addr, for IPv4 use ip_dst_addr[0] only */ + ; + __le16 udp_dst_port /* Searcher String - UDP Destination Port */; + __le16 udp_src_port /* Searcher String - UDP Source Port */; + __le32 tenant_id /* Searcher String - Tenant ID */; +}; + +/* + * Ramrod data for rx queue start ramrod + */ +struct tx_queue_start_ramrod_data { + __le16 sb_id /* Status block ID */; + u8 sb_index /* Status block protocol index */; + u8 vport_id /* VPort ID */; + u8 reserved0 /* FW reserved. */; + u8 stats_counter_id /* Statistics counter ID to use */; + __le16 qm_pq_id /* QM PQ ID */; + u8 flags; +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_DISABLE_OPPORTUNISTIC_SHIFT 0 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_PKT_DUP_SHIFT 1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_TEST_MODE_TX_DEST_SHIFT 2 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PMD_MODE_SHIFT 3 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_NOTIFY_EN_SHIFT 4 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_MASK 0x1 +#define TX_QUEUE_START_RAMROD_DATA_PIN_CONTEXT_SHIFT 5 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_MASK 0x3 +#define TX_QUEUE_START_RAMROD_DATA_RESERVED1_SHIFT 6 + u8 pxp_st_hint /* PXP command Steering tag hint */; + u8 pxp_tph_valid_bd /* PXP command TPH Valid - for BD fetch */; + u8 pxp_tph_valid_pkt /* PXP command TPH Valid - for packet fetch */; + __le16 pxp_st_index /* PXP command Steering tag index */; + __le16 comp_agg_size /* TX completion min agg size - for PMD queues */; + __le16 queue_zone_id /* queue zone ID to use */; + __le16 test_dup_count /* In Test Mode, number of duplications */; + __le16 pbl_size /* Number of BD pages pointed by PBL */; + __le16 tx_queue_id + /* unique Queue ID - currently used only by PMD flow */; + struct regpair pbl_base_addr /* address of the pbl page */; + struct regpair bd_cons_address + /* BD consumer address in host - for PMD queues */; +}; + +/* + * Ramrod data for tx queue stop ramrod + */ +struct tx_queue_stop_ramrod_data { + __le16 reserved[4]; +}; + +/* + * Ramrod data for vport update ramrod + */ +struct vport_filter_update_ramrod_data { + struct eth_filter_cmd_header filter_cmd_hdr + /* Header for Filter Commands (RX/TX, Add/Remove/Replace, etc) */; + struct eth_filter_cmd filter_cmds[ETH_FILTER_RULES_COUNT] + /* Filter Commands */; +}; + +/* + * Ramrod data for vport start ramrod + */ +struct vport_start_ramrod_data { + u8 vport_id; + u8 sw_fid; + __le16 mtu; + u8 drop_ttl0_en /* if set, drop packet with ttl=0 */; + u8 inner_vlan_removal_en; + struct eth_vport_rx_mode rx_mode /* Rx filter data */; + struct eth_vport_tx_mode tx_mode /* Tx filter data */; + struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */ + ; + __le16 default_vlan /* Default Vlan value to be forced by FW */; + u8 tx_switching_en /* Tx switching is enabled for current Vport */; + u8 anti_spoofing_en + /* Anti-spoofing verification is set for current Vport */; + u8 default_vlan_en + /* If set, the default Vlan value is forced by the FW */; + u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */ + ; + u8 silent_vlan_removal_en; + /* If enable then innerVlan will be striped and not written to cqe */ + u8 untagged; + struct eth_tx_err_vals tx_err_behav + /* Desired behavior per TX error type */; + u8 zero_placement_offset; + u8 reserved[7]; +}; + +/* + * Ramrod data for vport stop ramrod + */ +struct vport_stop_ramrod_data { + u8 vport_id; + u8 reserved[7]; +}; + +/* + * Ramrod data for vport update ramrod + */ +struct vport_update_ramrod_data_cmn { + u8 vport_id; + u8 update_rx_active_flg /* set if rx active flag should be handled */; + u8 rx_active_flg /* rx active flag value */; + u8 update_tx_active_flg /* set if tx active flag should be handled */; + u8 tx_active_flg /* tx active flag value */; + u8 update_rx_mode_flg /* set if rx state data should be handled */; + u8 update_tx_mode_flg /* set if tx state data should be handled */; + u8 update_approx_mcast_flg + /* set if approx. mcast data should be handled */; + u8 update_rss_flg /* set if rss data should be handled */; + u8 update_inner_vlan_removal_en_flg + /* set if inner_vlan_removal_en should be handled */; + u8 inner_vlan_removal_en; + u8 update_tpa_param_flg; + u8 update_tpa_en_flg /* set if tpa enable changes */; + u8 update_tx_switching_en_flg + /* set if tx switching en flag should be handled */; + u8 tx_switching_en /* tx switching en value */; + u8 update_anti_spoofing_en_flg + /* set if anti spoofing flag should be handled */; + u8 anti_spoofing_en /* Anti-spoofing verification en value */; + u8 update_handle_ptp_pkts + /* set if handle_ptp_pkts should be handled. */; + u8 handle_ptp_pkts /* If set, the vport handles PTP Timesync Packets */ + ; + u8 update_default_vlan_en_flg + /* If set, the default Vlan enable flag is updated */; + u8 default_vlan_en + /* If set, the default Vlan value is forced by the FW */; + u8 update_default_vlan_flg + /* If set, the default Vlan value is updated */; + __le16 default_vlan /* Default Vlan value to be forced by FW */; + u8 update_accept_any_vlan_flg + /* set if accept_any_vlan should be handled */; + u8 accept_any_vlan /* accept_any_vlan updated value */; + u8 silent_vlan_removal_en; + u8 update_mtu_flg + /* If set, MTU will be updated. Vport must be not active. */; + __le16 mtu /* New MTU value. Used if update_mtu_flg are set */; + u8 reserved[2]; +}; + +struct vport_update_ramrod_mcast { + __le32 bins[ETH_MULTICAST_MAC_BINS_IN_REGS] /* multicast bins */; +}; + +/* + * Ramrod data for vport update ramrod + */ +struct vport_update_ramrod_data { + struct vport_update_ramrod_data_cmn common + /* Common data for all vport update ramrods */; + struct eth_vport_rx_mode rx_mode /* vport rx mode bitmap */; + struct eth_vport_tx_mode tx_mode /* vport tx mode bitmap */; + struct eth_vport_tpa_param tpa_param /* TPA configuration parameters */ + ; + struct vport_update_ramrod_mcast approx_mcast; + struct eth_vport_rss_config rss_config /* rss config data */; +}; + +/* + * GFT CAM line struct + */ +struct gft_cam_line { + __le32 camline; +#define GFT_CAM_LINE_VALID_MASK 0x1 +#define GFT_CAM_LINE_VALID_SHIFT 0 +#define GFT_CAM_LINE_DATA_MASK 0x3FFF +#define GFT_CAM_LINE_DATA_SHIFT 1 +#define GFT_CAM_LINE_MASK_BITS_MASK 0x3FFF +#define GFT_CAM_LINE_MASK_BITS_SHIFT 15 +#define GFT_CAM_LINE_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_RESERVED1_SHIFT 29 +}; + +/* + * GFT CAM line struct (for driversim use) + */ +struct gft_cam_line_mapped { + __le32 camline; +#define GFT_CAM_LINE_MAPPED_VALID_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_VALID_SHIFT 0 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_SHIFT 1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_SHIFT 2 +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_SHIFT 3 +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_SHIFT 7 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_SHIFT 11 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_IP_VERSION_MASK_SHIFT 15 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_MASK 0x1 +#define GFT_CAM_LINE_MAPPED_TUNNEL_IP_VERSION_MASK_SHIFT 16 +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_UPPER_PROTOCOL_TYPE_MASK_SHIFT 17 +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_TUNNEL_TYPE_MASK_SHIFT 21 +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_MASK 0xF +#define GFT_CAM_LINE_MAPPED_PF_ID_MASK_SHIFT 25 +#define GFT_CAM_LINE_MAPPED_RESERVED1_MASK 0x7 +#define GFT_CAM_LINE_MAPPED_RESERVED1_SHIFT 29 +}; + +union gft_cam_line_union { + struct gft_cam_line cam_line; + struct gft_cam_line_mapped cam_line_mapped; +}; + +/* + * Used in gft_profile_key: Indication for ip version + */ +enum gft_profile_ip_version { + GFT_PROFILE_IPV4 = 0, + GFT_PROFILE_IPV6 = 1, + MAX_GFT_PROFILE_IP_VERSION +}; + +/* + * Profile key stucr fot GFT logic in Prs + */ +struct gft_profile_key { + __le16 profile_key; +#define GFT_PROFILE_KEY_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_IP_VERSION_SHIFT 0 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_MASK 0x1 +#define GFT_PROFILE_KEY_TUNNEL_IP_VERSION_SHIFT 1 +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_UPPER_PROTOCOL_TYPE_SHIFT 2 +#define GFT_PROFILE_KEY_TUNNEL_TYPE_MASK 0xF +#define GFT_PROFILE_KEY_TUNNEL_TYPE_SHIFT 6 +#define GFT_PROFILE_KEY_PF_ID_MASK 0xF +#define GFT_PROFILE_KEY_PF_ID_SHIFT 10 +#define GFT_PROFILE_KEY_RESERVED0_MASK 0x3 +#define GFT_PROFILE_KEY_RESERVED0_SHIFT 14 +}; + +/* + * Used in gft_profile_key: Indication for tunnel type + */ +enum gft_profile_tunnel_type { + GFT_PROFILE_NO_TUNNEL = 0, + GFT_PROFILE_VXLAN_TUNNEL = 1, + GFT_PROFILE_GRE_MAC_OR_NVGRE_TUNNEL = 2, + GFT_PROFILE_GRE_IP_TUNNEL = 3, + GFT_PROFILE_GENEVE_MAC_TUNNEL = 4, + GFT_PROFILE_GENEVE_IP_TUNNEL = 5, + MAX_GFT_PROFILE_TUNNEL_TYPE +}; + +/* + * Used in gft_profile_key: Indication for protocol type + */ +enum gft_profile_upper_protocol_type { + GFT_PROFILE_ROCE_PROTOCOL = 0, + GFT_PROFILE_RROCE_PROTOCOL = 1, + GFT_PROFILE_FCOE_PROTOCOL = 2, + GFT_PROFILE_ICMP_PROTOCOL = 3, + GFT_PROFILE_ARP_PROTOCOL = 4, + GFT_PROFILE_USER_TCP_SRC_PORT_1_INNER = 5, + GFT_PROFILE_USER_TCP_DST_PORT_1_INNER = 6, + GFT_PROFILE_TCP_PROTOCOL = 7, + GFT_PROFILE_USER_UDP_DST_PORT_1_INNER = 8, + GFT_PROFILE_USER_UDP_DST_PORT_2_OUTER = 9, + GFT_PROFILE_UDP_PROTOCOL = 10, + GFT_PROFILE_USER_IP_1_INNER = 11, + GFT_PROFILE_USER_IP_2_OUTER = 12, + GFT_PROFILE_USER_ETH_1_INNER = 13, + GFT_PROFILE_USER_ETH_2_OUTER = 14, + GFT_PROFILE_RAW = 15, + MAX_GFT_PROFILE_UPPER_PROTOCOL_TYPE +}; + +/* + * GFT RAM line struct + */ +struct gft_ram_line { + __le32 low32bits; +#define GFT_RAM_LINE_VLAN_SELECT_MASK 0x3 +#define GFT_RAM_LINE_VLAN_SELECT_SHIFT 0 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ENTROPHY_SHIFT 2 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_EQUAL_ONE_SHIFT 3 +#define GFT_RAM_LINE_TUNNEL_TTL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_TTL_SHIFT 4 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_ETHERTYPE_SHIFT 5 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_PORT_SHIFT 6 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_PORT_SHIFT 7 +#define GFT_RAM_LINE_TUNNEL_DSCP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DSCP_SHIFT 8 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_OVER_IP_PROTOCOL_SHIFT 9 +#define GFT_RAM_LINE_TUNNEL_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_IP_SHIFT 10 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_IP_SHIFT 11 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PRIORITY_SHIFT 12 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_PROVIDER_VLAN_SHIFT 13 +#define GFT_RAM_LINE_TUNNEL_VLAN_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_VLAN_SHIFT 14 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_DST_MAC_SHIFT 15 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_TUNNEL_SRC_MAC_SHIFT 16 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_MASK 0x1 +#define GFT_RAM_LINE_TTL_EQUAL_ONE_SHIFT 17 +#define GFT_RAM_LINE_TTL_MASK 0x1 +#define GFT_RAM_LINE_TTL_SHIFT 18 +#define GFT_RAM_LINE_ETHERTYPE_MASK 0x1 +#define GFT_RAM_LINE_ETHERTYPE_SHIFT 19 +#define GFT_RAM_LINE_RESERVED0_MASK 0x1 +#define GFT_RAM_LINE_RESERVED0_SHIFT 20 +#define GFT_RAM_LINE_TCP_FLAG_FIN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_FIN_SHIFT 21 +#define GFT_RAM_LINE_TCP_FLAG_SYN_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_SYN_SHIFT 22 +#define GFT_RAM_LINE_TCP_FLAG_RST_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_RST_SHIFT 23 +#define GFT_RAM_LINE_TCP_FLAG_PSH_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_PSH_SHIFT 24 +#define GFT_RAM_LINE_TCP_FLAG_ACK_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ACK_SHIFT 25 +#define GFT_RAM_LINE_TCP_FLAG_URG_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_URG_SHIFT 26 +#define GFT_RAM_LINE_TCP_FLAG_ECE_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_ECE_SHIFT 27 +#define GFT_RAM_LINE_TCP_FLAG_CWR_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_CWR_SHIFT 28 +#define GFT_RAM_LINE_TCP_FLAG_NS_MASK 0x1 +#define GFT_RAM_LINE_TCP_FLAG_NS_SHIFT 29 +#define GFT_RAM_LINE_DST_PORT_MASK 0x1 +#define GFT_RAM_LINE_DST_PORT_SHIFT 30 +#define GFT_RAM_LINE_SRC_PORT_MASK 0x1 +#define GFT_RAM_LINE_SRC_PORT_SHIFT 31 + __le32 high32bits; +#define GFT_RAM_LINE_DSCP_MASK 0x1 +#define GFT_RAM_LINE_DSCP_SHIFT 0 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_MASK 0x1 +#define GFT_RAM_LINE_OVER_IP_PROTOCOL_SHIFT 1 +#define GFT_RAM_LINE_DST_IP_MASK 0x1 +#define GFT_RAM_LINE_DST_IP_SHIFT 2 +#define GFT_RAM_LINE_SRC_IP_MASK 0x1 +#define GFT_RAM_LINE_SRC_IP_SHIFT 3 +#define GFT_RAM_LINE_PRIORITY_MASK 0x1 +#define GFT_RAM_LINE_PRIORITY_SHIFT 4 +#define GFT_RAM_LINE_PROVIDER_VLAN_MASK 0x1 +#define GFT_RAM_LINE_PROVIDER_VLAN_SHIFT 5 +#define GFT_RAM_LINE_VLAN_MASK 0x1 +#define GFT_RAM_LINE_VLAN_SHIFT 6 +#define GFT_RAM_LINE_DST_MAC_MASK 0x1 +#define GFT_RAM_LINE_DST_MAC_SHIFT 7 +#define GFT_RAM_LINE_SRC_MAC_MASK 0x1 +#define GFT_RAM_LINE_SRC_MAC_SHIFT 8 +#define GFT_RAM_LINE_TENANT_ID_MASK 0x1 +#define GFT_RAM_LINE_TENANT_ID_SHIFT 9 +#define GFT_RAM_LINE_RESERVED1_MASK 0x3FFFFF +#define GFT_RAM_LINE_RESERVED1_SHIFT 10 +}; + +/* + * Used in the first 2 bits for gft_ram_line: Indication for vlan mask + */ +enum gft_vlan_select { + INNER_PROVIDER_VLAN = 0, + INNER_VLAN = 1, + OUTER_PROVIDER_VLAN = 2, + OUTER_VLAN = 3, + MAX_GFT_VLAN_SELECT +}; + +struct mstorm_eth_conn_ag_ctx { + u8 byte0 /* cdu_validation */; + u8 byte1 /* state */; + u8 flags0; +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_BIT1_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF0_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF0_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_CF1_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF1_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_CF2_MASK 0x3 +#define MSTORM_ETH_CONN_AG_CTX_CF2_SHIFT 6 + u8 flags1; +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF0EN_SHIFT 0 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF1EN_SHIFT 1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_CF2EN_SHIFT 2 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE0EN_SHIFT 3 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE1EN_SHIFT 4 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE2EN_SHIFT 5 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE3EN_SHIFT 6 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_MASK 0x1 +#define MSTORM_ETH_CONN_AG_CTX_RULE4EN_SHIFT 7 + __le16 word0 /* word0 */; + __le16 word1 /* word1 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; +}; + +/* @DPDK: xstormEthConnAgCtxDqExtLdPart */ +struct xstorm_eth_conn_ag_ctx_dq_ext_ld_part { + u8 reserved0 /* cdu_validation */; + u8 eth_state /* state */; + u8 flags0; +#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM0_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED1_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED2_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EXIST_IN_QM3_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED3_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED4_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED5_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED7_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED8_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED9_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT11_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT12_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_BIT13_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3_SHIFT 6 + u8 flags3; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7_SHIFT 6 + u8 flags4; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11_SHIFT 6 + u8 flags5; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15_SHIFT 6 + u8 flags6; +#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED10_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF0EN_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF2EN_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF3EN_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF4EN_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF5EN_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF6EN_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF7EN_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF8EN_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF10EN_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF11EN_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF12EN_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF13EN_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF14EN_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_CF15EN_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_DQ_CF_EN_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TERMINATE_CF_EN_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_FLUSH_Q0_EN_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED11_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_SLOW_PATH_EN_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED12_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED14_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RESERVED15_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE5EN_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE6EN_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE7EN_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED1_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE10EN_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE11EN_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED2_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED3_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE14EN_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE15EN_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE16EN_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE18EN_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_RULE19EN_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED4_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED5_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED6_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED7_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED8_SHIFT 6 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORMETHCONNAGCTXDQEXTLDPART_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_MASK 0x3 +#define XSTORMETHCONNAGCTXDQEXTLDPART_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 word1 /* physical_q1 */; + __le16 edpm_num_bds /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_prod /* word4 */; + __le16 go_to_bd_cons /* word5 */; + __le16 conn_dpi /* conn_dpi */; + u8 byte3 /* byte3 */; + u8 byte4 /* byte4 */; + u8 byte5 /* byte5 */; + u8 byte6 /* byte6 */; + __le32 reg0 /* reg0 */; + __le32 reg1 /* reg1 */; + __le32 reg2 /* reg2 */; + __le32 reg3 /* reg3 */; + __le32 reg4 /* reg4 */; +}; + +struct xstorm_eth_hw_conn_ag_ctx { + u8 reserved0 /* cdu_validation */; + u8 eth_state /* state */; + u8 flags0; +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED1_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EXIST_IN_QM3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED3_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED4_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED5_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED6_SHIFT 7 + u8 flags1; +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED7_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED8_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT12_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_BIT13_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_RULE_ACTIVE_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_ACTIVE_SHIFT 7 + u8 flags2; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3_SHIFT 6 + u8 flags3; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7_SHIFT 6 + u8 flags4; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11_SHIFT 6 + u8 flags5; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15_SHIFT 6 + u8 flags6; +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_SHIFT 6 + u8 flags7; +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED10_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF0EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF1EN_SHIFT 7 + u8 flags8; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF2EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF3EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF4EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF8EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF9EN_SHIFT 7 + u8 flags9; +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF12EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF13EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_CF15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_GO_TO_BD_CONS_CF_EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_MULTI_UNICAST_CF_EN_SHIFT 7 + u8 flags10; +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_DQ_CF_EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TERMINATE_CF_EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_FLUSH_Q0_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED11_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_SLOW_PATH_EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_EN_RESERVED_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED12_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED13_SHIFT 7 + u8 flags11; +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED14_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RESERVED15_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_TX_DEC_RULE_EN_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE5EN_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE6EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE7EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED1_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE9EN_SHIFT 7 + u8 flags12; +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE10EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE11EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED2_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED3_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE14EN_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE15EN_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE16EN_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE17EN_SHIFT 7 + u8 flags13; +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE18EN_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_RULE19EN_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED4_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED5_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED6_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED7_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED8_SHIFT 6 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_A0_RESERVED9_SHIFT 7 + u8 flags14; +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_USE_EXT_HDR_SHIFT 0 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_RAW_L3L4_SHIFT 1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_INBAND_PROP_HDR_SHIFT 2 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_EDPM_SEND_EXT_TUNNEL_SHIFT 3 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_L2_EDPM_ENABLE_SHIFT 4 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_MASK 0x1 +#define XSTORM_ETH_HW_CONN_AG_CTX_ROCE_EDPM_ENABLE_SHIFT 5 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_MASK 0x3 +#define XSTORM_ETH_HW_CONN_AG_CTX_TPH_ENABLE_SHIFT 6 + u8 edpm_event_id /* byte2 */; + __le16 physical_q0 /* physical_q0 */; + __le16 word1 /* physical_q1 */; + __le16 edpm_num_bds /* physical_q2 */; + __le16 tx_bd_cons /* word3 */; + __le16 tx_bd_prod /* word4 */; + __le16 go_to_bd_cons /* word5 */; + __le16 conn_dpi /* conn_dpi */; +}; + +#endif /* __ECORE_HSI_ETH__ */ diff --git a/drivers/net/qede/base/ecore_hsi_tools.h b/drivers/net/qede/base/ecore_hsi_tools.h new file mode 100644 index 00000000..18eea762 --- /dev/null +++ b/drivers/net/qede/base/ecore_hsi_tools.h @@ -0,0 +1,1081 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_HSI_TOOLS__ +#define __ECORE_HSI_TOOLS__ +/**********************************/ +/* Tools HSI constants and macros */ +/**********************************/ + +/*********************************** Init ************************************/ + +/* Width of GRC address in bits (addresses are specified in dwords) */ +#define GRC_ADDR_BITS 23 +#define MAX_GRC_ADDR ((1 << GRC_ADDR_BITS) - 1) + +/* indicates an init that should be applied to any phase ID */ +#define ANY_PHASE_ID 0xffff + +/* init pattern size in bytes */ +#define INIT_PATTERN_SIZE_BITS 4 +#define MAX_INIT_PATTERN_SIZE (1 << INIT_PATTERN_SIZE_BITS) + +/* Max size in dwords of a zipped array */ +#define MAX_ZIPPED_SIZE 8192 + +/* Global PXP window */ +#define NUM_OF_PXP_WIN 19 +#define PXP_WIN_DWORD_SIZE_BITS 10 +#define PXP_WIN_DWORD_SIZE (1 << PXP_WIN_DWORD_SIZE_BITS) +#define PXP_WIN_BYTE_SIZE_BITS (PXP_WIN_DWORD_SIZE_BITS + 2) +#define PXP_WIN_BYTE_SIZE (PXP_WIN_DWORD_SIZE * 4) + +/********************************* GRC Dump **********************************/ + +/* width of GRC dump register sequence length in bits */ +#define DUMP_SEQ_LEN_BITS 8 +#define DUMP_SEQ_LEN_MAX_VAL ((1 << DUMP_SEQ_LEN_BITS) - 1) + +/* width of GRC dump memory length in bits */ +#define DUMP_MEM_LEN_BITS 18 +#define DUMP_MEM_LEN_MAX_VAL ((1 << DUMP_MEM_LEN_BITS) - 1) + +/* width of register type ID in bits */ +#define REG_TYPE_ID_BITS 6 +#define REG_TYPE_ID_MAX_VAL ((1 << REG_TYPE_ID_BITS) - 1) + +/* width of block ID in bits */ +#define BLOCK_ID_BITS 8 +#define BLOCK_ID_MAX_VAL ((1 << BLOCK_ID_BITS) - 1) + +/******************************** Idle Check *********************************/ + +/* max number of idle check predicate immediates */ +#define MAX_IDLE_CHK_PRED_IMM 3 + +/* max number of idle check argument registers */ +#define MAX_IDLE_CHK_READ_REGS 3 + +/* max number of idle check loops */ +#define MAX_IDLE_CHK_LOOPS 0x10000 + +/* max idle check address increment */ +#define MAX_IDLE_CHK_INCREMENT 0x10000 + +/* inicates an undefined idle check line index */ +#define IDLE_CHK_UNDEFINED_LINE_IDX 0xffffff + +/* max number of register values following the idle check header for LSI */ +#define IDLE_CHK_MAX_LSI_DUMP_REGS 2 + +/* arguments for IDLE_CHK_MACRO_TYPE_QM_RD_WR */ +#define IDLE_CHK_QM_RD_WR_PTR 0 +#define IDLE_CHK_QM_RD_WR_BANK 1 + +/**************************************/ +/* HSI Functions constants and macros */ +/**************************************/ + +/* Number of VLAN priorities */ +#define NUM_OF_VLAN_PRIORITIES 8 + +/* the MCP Trace meta data signautre is duplicated in the + * perl script that generats the NVRAM images + */ +#define MCP_TRACE_META_IMAGE_SIGNATURE 0x669955aa + +/* Maximal number of RAM lines occupied by FW Asserts data */ +#define MAX_FW_ASSERTS_RAM_LINES 800 + +/* + * Binary buffer header + */ +struct bin_buffer_hdr { + __le32 offset + /* buffer offset in bytes from the beginning of the binary file */; + __le32 length /* buffer length in bytes */; +}; + +/* + * binary buffer types + */ +enum bin_buffer_type { + BIN_BUF_FW_VER_INFO /* fw_ver_info struct */, + BIN_BUF_INIT_CMD /* init commands */, + BIN_BUF_INIT_VAL /* init data */, + BIN_BUF_INIT_MODE_TREE /* init modes tree */, + BIN_BUF_IRO /* internal RAM offsets array */, + MAX_BIN_BUFFER_TYPE +}; + +/* + * Chip IDs + */ +enum chip_ids { + CHIP_BB_A0 /* BB A0 chip ID */, + CHIP_BB_B0 /* BB B0 chip ID */, + CHIP_K2 /* AH chip ID */, + MAX_CHIP_IDS +}; + +/* + * memory dump descriptor + */ +struct dbg_dump_mem_desc { + __le32 dword0; +#define DBG_DUMP_MEM_DESC_ADDRESS_MASK 0xFFFFFF +#define DBG_DUMP_MEM_DESC_ADDRESS_SHIFT 0 +#define DBG_DUMP_MEM_DESC_ASIC_CHIP_MASK_MASK 0xF +#define DBG_DUMP_MEM_DESC_ASIC_CHIP_MASK_SHIFT 24 +#define DBG_DUMP_MEM_DESC_SIM_CHIP_MASK_MASK 0xF +#define DBG_DUMP_MEM_DESC_SIM_CHIP_MASK_SHIFT 28 + __le32 dword1; +#define DBG_DUMP_MEM_DESC_LENGTH_MASK 0x3FFFF +#define DBG_DUMP_MEM_DESC_LENGTH_SHIFT 0 +#define DBG_DUMP_MEM_DESC_REG_TYPE_ID_MASK 0x3F +#define DBG_DUMP_MEM_DESC_REG_TYPE_ID_SHIFT 18 +#define DBG_DUMP_MEM_DESC_BLOCK_ID_MASK 0xFF +#define DBG_DUMP_MEM_DESC_BLOCK_ID_SHIFT 24 +}; + +/* + * registers dump descriptor: chip + */ +struct dbg_dump_regs_chip_desc { + __le32 data; +#define DBG_DUMP_REGS_CHIP_DESC_IS_CHIP_MASK_MASK 0x1 +#define DBG_DUMP_REGS_CHIP_DESC_IS_CHIP_MASK_SHIFT 0 +#define DBG_DUMP_REGS_CHIP_DESC_ASIC_CHIP_MASK_MASK 0x7FFFFF +#define DBG_DUMP_REGS_CHIP_DESC_ASIC_CHIP_MASK_SHIFT 1 +#define DBG_DUMP_REGS_CHIP_DESC_SIM_CHIP_MASK_MASK 0xFF +#define DBG_DUMP_REGS_CHIP_DESC_SIM_CHIP_MASK_SHIFT 24 +}; + +/* + * registers dump descriptor: raw + */ +struct dbg_dump_regs_raw_desc { + __le32 data; +#define DBG_DUMP_REGS_RAW_DESC_IS_CHIP_MASK_MASK 0x1 +#define DBG_DUMP_REGS_RAW_DESC_IS_CHIP_MASK_SHIFT 0 +#define DBG_DUMP_REGS_RAW_DESC_PARAM1_MASK 0x7FFFFF +#define DBG_DUMP_REGS_RAW_DESC_PARAM1_SHIFT 1 +#define DBG_DUMP_REGS_RAW_DESC_PARAM2_MASK 0xFF +#define DBG_DUMP_REGS_RAW_DESC_PARAM2_SHIFT 24 +}; + +/* + * registers dump descriptor: sequence + */ +struct dbg_dump_regs_seq_desc { + __le32 data; +#define DBG_DUMP_REGS_SEQ_DESC_IS_CHIP_MASK_MASK 0x1 +#define DBG_DUMP_REGS_SEQ_DESC_IS_CHIP_MASK_SHIFT 0 +#define DBG_DUMP_REGS_SEQ_DESC_ADDRESS_MASK 0x7FFFFF +#define DBG_DUMP_REGS_SEQ_DESC_ADDRESS_SHIFT 1 +#define DBG_DUMP_REGS_SEQ_DESC_LENGTH_MASK 0xFF +#define DBG_DUMP_REGS_SEQ_DESC_LENGTH_SHIFT 24 +}; + +/* + * registers dump descriptor + */ +union dbg_dump_regs_desc { + struct dbg_dump_regs_raw_desc raw /* dumped registers raw descriptor */ + ; + struct dbg_dump_regs_seq_desc seq /* dumped registers seq descriptor */ + ; + struct dbg_dump_regs_chip_desc chip + /* dumped registers chip descriptor */; +}; + +/* + * idle check macro types + */ +enum idle_chk_macro_types { + IDLE_CHK_MACRO_TYPE_COMPARE /* parametric register comparison */, + IDLE_CHK_MACRO_TYPE_QM_RD_WR /* compare QM r/w pointers and banks */, + MAX_IDLE_CHK_MACRO_TYPES +}; + +/* + * Idle Check result header + */ +struct idle_chk_result_hdr { + __le16 rule_idx /* Idle check rule index in CSV file */; + __le16 loop_idx /* the loop index in which the failure occurred */; + __le16 num_fw_values; + __le16 data; +#define IDLE_CHK_RESULT_HDR_NUM_LSI_VALUES_MASK 0xF +#define IDLE_CHK_RESULT_HDR_NUM_LSI_VALUES_SHIFT 0 +#define IDLE_CHK_RESULT_HDR_LOOP_VALID_MASK 0x1 +#define IDLE_CHK_RESULT_HDR_LOOP_VALID_SHIFT 4 +#define IDLE_CHK_RESULT_HDR_SEVERITY_MASK 0x7 +#define IDLE_CHK_RESULT_HDR_SEVERITY_SHIFT 5 +#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_MASK 0xF +#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_SHIFT 8 +#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_ARG_MASK 0xF +#define IDLE_CHK_RESULT_HDR_MACRO_TYPE_ARG_SHIFT 12 +}; + +/* + * Idle Check rule + */ +struct idle_chk_rule { + __le32 data; +#define IDLE_CHK_RULE_ASIC_CHIP_MASK_MASK 0xF +#define IDLE_CHK_RULE_ASIC_CHIP_MASK_SHIFT 0 +#define IDLE_CHK_RULE_SIM_CHIP_MASK_MASK 0xF +#define IDLE_CHK_RULE_SIM_CHIP_MASK_SHIFT 4 +#define IDLE_CHK_RULE_BLOCK_ID_MASK 0xFF +#define IDLE_CHK_RULE_BLOCK_ID_SHIFT 8 +#define IDLE_CHK_RULE_MACRO_TYPE_MASK 0xF +#define IDLE_CHK_RULE_MACRO_TYPE_SHIFT 16 +#define IDLE_CHK_RULE_SEVERITY_MASK 0x7 +#define IDLE_CHK_RULE_SEVERITY_SHIFT 20 +#define IDLE_CHK_RULE_RESERVED_MASK 0x1 +#define IDLE_CHK_RULE_RESERVED_SHIFT 23 +#define IDLE_CHK_RULE_PRED_ID_MASK 0xFF +#define IDLE_CHK_RULE_PRED_ID_SHIFT 24 + __le16 loop; + __le16 increment + /* address increment of first argument register on each iteration */ + ; + __le32 reg_addr[3]; + __le32 pred_imm[3] + /* immediate values passed as arguments to the idle check rule */; +}; + +/* + * idle check severity types + */ +enum idle_chk_severity_types { + IDLE_CHK_SEVERITY_ERROR /* idle check failure should cause an error */, + IDLE_CHK_SEVERITY_ERROR_NO_TRAFFIC + , + IDLE_CHK_SEVERITY_WARNING + /* idle check failure should cause a warning */, + MAX_IDLE_CHK_SEVERITY_TYPES +}; + +/* + * init array header: raw + */ +struct init_array_raw_hdr { + __le32 data; +#define INIT_ARRAY_RAW_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_RAW_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_RAW_HDR_PARAMS_MASK 0xFFFFFFF +#define INIT_ARRAY_RAW_HDR_PARAMS_SHIFT 4 +}; + +/* + * init array header: standard + */ +struct init_array_standard_hdr { + __le32 data; +#define INIT_ARRAY_STANDARD_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_STANDARD_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_STANDARD_HDR_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_STANDARD_HDR_SIZE_SHIFT 4 +}; + +/* + * init array header: zipped + */ +struct init_array_zipped_hdr { + __le32 data; +#define INIT_ARRAY_ZIPPED_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_ZIPPED_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_MASK 0xFFFFFFF +#define INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE_SHIFT 4 +}; + +/* + * init array header: pattern + */ +struct init_array_pattern_hdr { + __le32 data; +#define INIT_ARRAY_PATTERN_HDR_TYPE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_TYPE_SHIFT 0 +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_MASK 0xF +#define INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE_SHIFT 4 +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_MASK 0xFFFFFF +#define INIT_ARRAY_PATTERN_HDR_REPETITIONS_SHIFT 8 +}; + +/* + * init array header union + */ +union init_array_hdr { + struct init_array_raw_hdr raw /* raw init array header */; + struct init_array_standard_hdr standard /* standard init array header */ + ; + struct init_array_zipped_hdr zipped /* zipped init array header */; + struct init_array_pattern_hdr pattern /* pattern init array header */; +}; + +/* + * init array types + */ +enum init_array_types { + INIT_ARR_STANDARD /* standard init array */, + INIT_ARR_ZIPPED /* zipped init array */, + INIT_ARR_PATTERN /* a repeated pattern */, + MAX_INIT_ARRAY_TYPES +}; + +/* + * init operation: callback + */ +struct init_callback_op { + __le32 op_data; +#define INIT_CALLBACK_OP_OP_MASK 0xF +#define INIT_CALLBACK_OP_OP_SHIFT 0 +#define INIT_CALLBACK_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_CALLBACK_OP_RESERVED_SHIFT 4 + __le16 callback_id /* Callback ID */; + __le16 block_id /* Blocks ID */; +}; + +/* + * init operation: delay + */ +struct init_delay_op { + __le32 op_data; +#define INIT_DELAY_OP_OP_MASK 0xF +#define INIT_DELAY_OP_OP_SHIFT 0 +#define INIT_DELAY_OP_RESERVED_MASK 0xFFFFFFF +#define INIT_DELAY_OP_RESERVED_SHIFT 4 + __le32 delay /* delay in us */; +}; + +/* + * init operation: if_mode + */ +struct init_if_mode_op { + __le32 op_data; +#define INIT_IF_MODE_OP_OP_MASK 0xF +#define INIT_IF_MODE_OP_OP_SHIFT 0 +#define INIT_IF_MODE_OP_RESERVED1_MASK 0xFFF +#define INIT_IF_MODE_OP_RESERVED1_SHIFT 4 +#define INIT_IF_MODE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_MODE_OP_CMD_OFFSET_SHIFT 16 + __le16 reserved2; + __le16 modes_buf_offset + /* offset (in bytes) in modes expression buffer */; +}; + +/* + * init operation: if_phase + */ +struct init_if_phase_op { + __le32 op_data; +#define INIT_IF_PHASE_OP_OP_MASK 0xF +#define INIT_IF_PHASE_OP_OP_SHIFT 0 +#define INIT_IF_PHASE_OP_DMAE_ENABLE_MASK 0x1 +#define INIT_IF_PHASE_OP_DMAE_ENABLE_SHIFT 4 +#define INIT_IF_PHASE_OP_RESERVED1_MASK 0x7FF +#define INIT_IF_PHASE_OP_RESERVED1_SHIFT 5 +#define INIT_IF_PHASE_OP_CMD_OFFSET_MASK 0xFFFF +#define INIT_IF_PHASE_OP_CMD_OFFSET_SHIFT 16 + __le32 phase_data; +#define INIT_IF_PHASE_OP_PHASE_MASK 0xFF +#define INIT_IF_PHASE_OP_PHASE_SHIFT 0 +#define INIT_IF_PHASE_OP_RESERVED2_MASK 0xFF +#define INIT_IF_PHASE_OP_RESERVED2_SHIFT 8 +#define INIT_IF_PHASE_OP_PHASE_ID_MASK 0xFFFF +#define INIT_IF_PHASE_OP_PHASE_ID_SHIFT 16 +}; + +/* + * init mode operators + */ +enum init_mode_ops { + INIT_MODE_OP_NOT /* init mode not operator */, + INIT_MODE_OP_OR /* init mode or operator */, + INIT_MODE_OP_AND /* init mode and operator */, + MAX_INIT_MODE_OPS +}; + +/* + * init operation: raw + */ +struct init_raw_op { + __le32 op_data; +#define INIT_RAW_OP_OP_MASK 0xF +#define INIT_RAW_OP_OP_SHIFT 0 +#define INIT_RAW_OP_PARAM1_MASK 0xFFFFFFF +#define INIT_RAW_OP_PARAM1_SHIFT 4 + __le32 param2 /* Init param 2 */; +}; + +/* + * init array params + */ +struct init_op_array_params { + __le16 size /* array size in dwords */; + __le16 offset /* array start offset in dwords */; +}; + +/* + * Write init operation arguments + */ +union init_write_args { + __le32 inline_val + /* value to write, used when init source is INIT_SRC_INLINE */; + __le32 zeros_count; + __le32 array_offset + /* array offset to write, used when init source is INIT_SRC_ARRAY */ + ; + struct init_op_array_params runtime; +}; + +/* + * init operation: write + */ +struct init_write_op { + __le32 data; +#define INIT_WRITE_OP_OP_MASK 0xF +#define INIT_WRITE_OP_OP_SHIFT 0 +#define INIT_WRITE_OP_SOURCE_MASK 0x7 +#define INIT_WRITE_OP_SOURCE_SHIFT 4 +#define INIT_WRITE_OP_RESERVED_MASK 0x1 +#define INIT_WRITE_OP_RESERVED_SHIFT 7 +#define INIT_WRITE_OP_WIDE_BUS_MASK 0x1 +#define INIT_WRITE_OP_WIDE_BUS_SHIFT 8 +#define INIT_WRITE_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_WRITE_OP_ADDRESS_SHIFT 9 + union init_write_args args /* Write init operation arguments */; +}; + +/* + * init operation: read + */ +struct init_read_op { + __le32 op_data; +#define INIT_READ_OP_OP_MASK 0xF +#define INIT_READ_OP_OP_SHIFT 0 +#define INIT_READ_OP_POLL_TYPE_MASK 0xF +#define INIT_READ_OP_POLL_TYPE_SHIFT 4 +#define INIT_READ_OP_RESERVED_MASK 0x1 +#define INIT_READ_OP_RESERVED_SHIFT 8 +#define INIT_READ_OP_ADDRESS_MASK 0x7FFFFF +#define INIT_READ_OP_ADDRESS_SHIFT 9 + __le32 expected_val + /* expected polling value, used only when polling is done */; +}; + +/* + * Init operations union + */ +union init_op { + struct init_raw_op raw /* raw init operation */; + struct init_write_op write /* write init operation */; + struct init_read_op read /* read init operation */; + struct init_if_mode_op if_mode /* if_mode init operation */; + struct init_if_phase_op if_phase /* if_phase init operation */; + struct init_callback_op callback /* callback init operation */; + struct init_delay_op delay /* delay init operation */; +}; + +/* + * Init command operation types + */ +enum init_op_types { + INIT_OP_READ /* GRC read init command */, + INIT_OP_WRITE /* GRC write init command */, + INIT_OP_IF_MODE + /* Skip init commands if the init modes expression doesn't match */, + INIT_OP_IF_PHASE + /* Skip init commands if the init phase doesn't match */, + INIT_OP_DELAY /* delay init command */, + INIT_OP_CALLBACK /* callback init command */, + MAX_INIT_OP_TYPES +}; + +/* + * init polling types + */ +enum init_poll_types { + INIT_POLL_NONE /* No polling */, + INIT_POLL_EQ /* init value is included in the init command */, + INIT_POLL_OR /* init value is all zeros */, + INIT_POLL_AND /* init value is an array of values */, + MAX_INIT_POLL_TYPES +}; + +/* + * init source types + */ +enum init_source_types { + INIT_SRC_INLINE /* init value is included in the init command */, + INIT_SRC_ZEROS /* init value is all zeros */, + INIT_SRC_ARRAY /* init value is an array of values */, + INIT_SRC_RUNTIME /* init value is provided during runtime */, + MAX_INIT_SOURCE_TYPES +}; + +/* + * Internal RAM Offsets macro data + */ +struct iro { + __le32 base /* RAM field offset */; + __le16 m1 /* multiplier 1 */; + __le16 m2 /* multiplier 2 */; + __le16 m3 /* multiplier 3 */; + __le16 size /* RAM field size */; +}; + +/* + * register descriptor + */ +struct reg_desc { + __le32 data; +#define REG_DESC_ADDRESS_MASK 0xFFFFFF +#define REG_DESC_ADDRESS_SHIFT 0 +#define REG_DESC_SIZE_MASK 0xFF +#define REG_DESC_SIZE_SHIFT 24 +}; + +/* + * Debug Bus block data + */ +struct dbg_bus_block_data { + u8 enabled /* Indicates if the block is enabled for recording (0/1) */; + u8 hw_id /* HW ID associated with the block */; + u8 line_num /* Debug line number to select */; + u8 right_shift /* Number of units to right the debug data (0-3) */; + u8 cycle_en /* 4-bit value: bit i set -> unit i is enabled. */; + u8 force_valid /* 4-bit value: bit i set -> unit i is forced valid. */; + u8 force_frame + /* 4-bit value: bit i set -> unit i frame bit is forced. */; + u8 reserved; +}; + +/* + * Debug Bus Clients + */ +enum dbg_bus_clients { + DBG_BUS_CLIENT_RBCN, + DBG_BUS_CLIENT_RBCP, + DBG_BUS_CLIENT_RBCR, + DBG_BUS_CLIENT_RBCT, + DBG_BUS_CLIENT_RBCU, + DBG_BUS_CLIENT_RBCF, + DBG_BUS_CLIENT_RBCX, + DBG_BUS_CLIENT_RBCS, + DBG_BUS_CLIENT_RBCH, + DBG_BUS_CLIENT_RBCZ, + DBG_BUS_CLIENT_OTHER_ENGINE, + DBG_BUS_CLIENT_TIMESTAMP, + DBG_BUS_CLIENT_CPU, + DBG_BUS_CLIENT_RBCY, + DBG_BUS_CLIENT_RBCQ, + DBG_BUS_CLIENT_RBCM, + DBG_BUS_CLIENT_RBCB, + DBG_BUS_CLIENT_RBCW, + DBG_BUS_CLIENT_RBCV, + MAX_DBG_BUS_CLIENTS +}; + +/* + * Debug Bus constraint operation types + */ +enum dbg_bus_constraint_ops { + DBG_BUS_CONSTRAINT_OP_EQ /* equal */, + DBG_BUS_CONSTRAINT_OP_NE /* not equal */, + DBG_BUS_CONSTRAINT_OP_LT /* less than */, + DBG_BUS_CONSTRAINT_OP_LTC /* less than (cyclic) */, + DBG_BUS_CONSTRAINT_OP_LE /* less than or equal */, + DBG_BUS_CONSTRAINT_OP_LEC /* less than or equal (cyclic) */, + DBG_BUS_CONSTRAINT_OP_GT /* greater than */, + DBG_BUS_CONSTRAINT_OP_GTC /* greater than (cyclic) */, + DBG_BUS_CONSTRAINT_OP_GE /* greater than or equal */, + DBG_BUS_CONSTRAINT_OP_GEC /* greater than or equal (cyclic) */, + MAX_DBG_BUS_CONSTRAINT_OPS +}; + +/* + * Debug Bus memory address + */ +struct dbg_bus_mem_addr { + __le32 lo; + __le32 hi; +}; + +/* + * Debug Bus PCI buffer data + */ +struct dbg_bus_pci_buf_data { + struct dbg_bus_mem_addr phys_addr /* PCI buffer physical address */; + struct dbg_bus_mem_addr virt_addr /* PCI buffer virtual address */; + __le32 size /* PCI buffer size in bytes */; +}; + +/* + * Debug Bus Storm EID range filter params + */ +struct dbg_bus_storm_eid_range_params { + u8 min /* Minimal event ID to filter on */; + u8 max /* Maximal event ID to filter on */; +}; + +/* + * Debug Bus Storm EID mask filter params + */ +struct dbg_bus_storm_eid_mask_params { + u8 val /* Event ID value */; + u8 mask /* Event ID mask. 1s in the mask = dont care bits. */; +}; + +/* + * Debug Bus Storm EID filter params + */ +union dbg_bus_storm_eid_params { + struct dbg_bus_storm_eid_range_params range + /* EID range filter params */; + struct dbg_bus_storm_eid_mask_params mask /* EID mask filter params */; +}; + +/* + * Debug Bus Storm data + */ +struct dbg_bus_storm_data { + u8 fast_enabled; + u8 fast_mode + /* Fast debug Storm mode, valid only if fast_enabled is set */; + u8 slow_enabled; + u8 slow_mode + /* Slow debug Storm mode, valid only if slow_enabled is set */; + u8 hw_id /* HW ID associated with the Storm */; + u8 eid_filter_en /* Indicates if EID filtering is performed (0/1) */; + u8 eid_range_not_mask; + u8 cid_filter_en /* Indicates if CID filtering is performed (0/1) */; + union dbg_bus_storm_eid_params eid_filter_params; + __le16 reserved; + __le32 cid /* CID to filter on. Valid only if cid_filter_en is set. */; +}; + +/* + * Debug Bus data + */ +struct dbg_bus_data { + __le32 app_version /* The tools version number of the application */; + u8 state /* The current debug bus state */; + u8 hw_dwords /* HW dwords per cycle */; + u8 next_hw_id /* Next HW ID to be associated with an input */; + u8 num_enabled_blocks /* Number of blocks enabled for recording */; + u8 num_enabled_storms /* Number of Storms enabled for recording */; + u8 target /* Output target */; + u8 next_trigger_state /* ID of next trigger state to be added */; + u8 next_constraint_id + /* ID of next filter/trigger constraint to be added */; + u8 one_shot_en /* Indicates if one-shot mode is enabled (0/1) */; + u8 grc_input_en /* Indicates if GRC recording is enabled (0/1) */; + u8 timestamp_input_en + /* Indicates if timestamp recording is enabled (0/1) */; + u8 filter_en /* Indicates if the recording filter is enabled (0/1) */; + u8 trigger_en /* Indicates if the recording trigger is enabled (0/1) */ + ; + u8 adding_filter; + u8 filter_pre_trigger; + u8 filter_post_trigger; + u8 unify_inputs; + u8 rcv_from_other_engine; + struct dbg_bus_pci_buf_data pci_buf; + __le16 reserved; + struct dbg_bus_block_data blocks[80] /* Debug Bus data for each block */ + ; + struct dbg_bus_storm_data storms[6] /* Debug Bus data for each block */ + ; +}; + +/* + * Debug bus filter types + */ +enum dbg_bus_filter_types { + DBG_BUS_FILTER_TYPE_OFF /* filter always off */, + DBG_BUS_FILTER_TYPE_PRE /* filter before trigger only */, + DBG_BUS_FILTER_TYPE_POST /* filter after trigger only */, + DBG_BUS_FILTER_TYPE_ON /* filter always on */, + MAX_DBG_BUS_FILTER_TYPES +}; + +/* + * Debug bus frame modes + */ +enum dbg_bus_frame_modes { + DBG_BUS_FRAME_MODE_0HW_4ST = 0 /* 0 HW dwords, 4 Storm dwords */, + DBG_BUS_FRAME_MODE_4HW_0ST = 3 /* 4 HW dwords, 0 Storm dwords */, + DBG_BUS_FRAME_MODE_8HW_0ST = 4 /* 8 HW dwords, 0 Storm dwords */, + MAX_DBG_BUS_FRAME_MODES +}; + +/* + * Debug bus input types + */ +enum dbg_bus_input_types { + DBG_BUS_INPUT_TYPE_STORM, + DBG_BUS_INPUT_TYPE_BLOCK, + MAX_DBG_BUS_INPUT_TYPES +}; + +/* + * Debug bus other engine mode + */ +enum dbg_bus_other_engine_modes { + DBG_BUS_OTHER_ENGINE_MODE_NONE, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_TX, + DBG_BUS_OTHER_ENGINE_MODE_DOUBLE_BW_RX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_TX, + DBG_BUS_OTHER_ENGINE_MODE_CROSS_ENGINE_RX, + MAX_DBG_BUS_OTHER_ENGINE_MODES +}; + +/* + * Debug bus post-trigger recording types + */ +enum dbg_bus_post_trigger_types { + DBG_BUS_POST_TRIGGER_RECORD /* start recording after trigger */, + DBG_BUS_POST_TRIGGER_DROP /* drop data after trigger */, + MAX_DBG_BUS_POST_TRIGGER_TYPES +}; + +/* + * Debug bus pre-trigger recording types + */ +enum dbg_bus_pre_trigger_types { + DBG_BUS_PRE_TRIGGER_START_FROM_ZERO /* start recording from time 0 */, + DBG_BUS_PRE_TRIGGER_NUM_CHUNKS + /* start recording some chunks before trigger */, + DBG_BUS_PRE_TRIGGER_DROP /* drop data before trigger */, + MAX_DBG_BUS_PRE_TRIGGER_TYPES +}; + +/* + * Debug bus SEMI frame modes + */ +enum dbg_bus_semi_frame_modes { + DBG_BUS_SEMI_FRAME_MODE_0SLOW_4FAST = + 0 /* 0 slow dwords, 4 fast dwords */, + DBG_BUS_SEMI_FRAME_MODE_4SLOW_0FAST = + 3 /* 4 slow dwords, 0 fast dwords */, + MAX_DBG_BUS_SEMI_FRAME_MODES +}; + +/* + * Debug bus states + */ +enum dbg_bus_states { + DBG_BUS_STATE_BEFORE_RECORD /* before debug bus the recording starts */ + , + DBG_BUS_STATE_DURING_RECORD /* during debug bus recording */, + DBG_BUS_STATE_AFTER_RECORD /* after debug bus recording */, + MAX_DBG_BUS_STATES +}; + +/* + * Debug Bus Storm modes + */ +enum dbg_bus_storm_modes { + DBG_BUS_STORM_MODE_PRINTF /* store data (fast debug) */, + DBG_BUS_STORM_MODE_PRAM_ADDR /* pram address (fast debug) */, + DBG_BUS_STORM_MODE_DRA_RW /* DRA read/write data (fast debug) */, + DBG_BUS_STORM_MODE_DRA_W /* DRA write data (fast debug) */, + DBG_BUS_STORM_MODE_LD_ST_ADDR /* load/store address (fast debug) */, + DBG_BUS_STORM_MODE_DRA_FSM /* DRA state machines (fast debug) */, + DBG_BUS_STORM_MODE_RH /* recording handlers (fast debug) */, + DBG_BUS_STORM_MODE_FOC /* FOC: FIN + DRA Rd (slow debug) */, + DBG_BUS_STORM_MODE_EXT_STORE /* FOC: External Store (slow) */, + MAX_DBG_BUS_STORM_MODES +}; + +/* + * Debug bus target IDs + */ +enum dbg_bus_targets { + DBG_BUS_TARGET_ID_INT_BUF + /* records debug bus to DBG block internal buffer */, + DBG_BUS_TARGET_ID_NIG /* records debug bus to the NW */, + DBG_BUS_TARGET_ID_PCI /* records debug bus to a PCI buffer */, + MAX_DBG_BUS_TARGETS +}; + +/* + * GRC Dump data + */ +struct dbg_grc_data { + u8 is_updated /* Indicates if the GRC Dump data is updated (0/1) */; + u8 chip_id /* Chip ID */; + u8 chip_mask /* Chip mask */; + u8 reserved; + __le32 max_dump_dwords /* Max GRC Dump size in dwords */; + __le32 param_val[40]; + u8 param_set_by_user[40]; +}; + +/* + * Debug GRC params + */ +enum dbg_grc_params { + DBG_GRC_PARAM_DUMP_TSTORM /* dump Tstorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_MSTORM /* dump Mstorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_USTORM /* dump Ustorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_XSTORM /* dump Xstorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_YSTORM /* dump Ystorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_PSTORM /* dump Pstorm memories (0/1) */, + DBG_GRC_PARAM_DUMP_REGS /* dump non-memory registers (0/1) */, + DBG_GRC_PARAM_DUMP_RAM /* dump Storm internal RAMs (0/1) */, + DBG_GRC_PARAM_DUMP_PBUF /* dump Storm passive buffer (0/1) */, + DBG_GRC_PARAM_DUMP_IOR /* dump Storm IORs (0/1) */, + DBG_GRC_PARAM_DUMP_VFC /* dump VFC memories (0/1) */, + DBG_GRC_PARAM_DUMP_CM_CTX /* dump CM contexts (0/1) */, + DBG_GRC_PARAM_DUMP_PXP /* dump PXP memories (0/1) */, + DBG_GRC_PARAM_DUMP_RSS /* dump RSS memories (0/1) */, + DBG_GRC_PARAM_DUMP_CAU /* dump CAU memories (0/1) */, + DBG_GRC_PARAM_DUMP_QM /* dump QM memories (0/1) */, + DBG_GRC_PARAM_DUMP_MCP /* dump MCP memories (0/1) */, + DBG_GRC_PARAM_RESERVED /* reserved */, + DBG_GRC_PARAM_DUMP_CFC /* dump CFC memories (0/1) */, + DBG_GRC_PARAM_DUMP_IGU /* dump IGU memories (0/1) */, + DBG_GRC_PARAM_DUMP_BRB /* dump BRB memories (0/1) */, + DBG_GRC_PARAM_DUMP_BTB /* dump BTB memories (0/1) */, + DBG_GRC_PARAM_DUMP_BMB /* dump BMB memories (0/1) */, + DBG_GRC_PARAM_DUMP_NIG /* dump NIG memories (0/1) */, + DBG_GRC_PARAM_DUMP_MULD /* dump MULD memories (0/1) */, + DBG_GRC_PARAM_DUMP_PRS /* dump PRS memories (0/1) */, + DBG_GRC_PARAM_DUMP_DMAE /* dump PRS memories (0/1) */, + DBG_GRC_PARAM_DUMP_TM /* dump TM (timers) memories (0/1) */, + DBG_GRC_PARAM_DUMP_SDM /* dump SDM memories (0/1) */, + DBG_GRC_PARAM_DUMP_STATIC /* dump static debug data (0/1) */, + DBG_GRC_PARAM_UNSTALL /* un-stall Storms after dump (0/1) */, + DBG_GRC_PARAM_NUM_LCIDS /* number of LCIDs (0..320) */, + DBG_GRC_PARAM_NUM_LTIDS /* number of LTIDs (0..320) */, + DBG_GRC_PARAM_EXCLUDE_ALL + /* preset: exclude all memories from dump (1 only) */, + DBG_GRC_PARAM_CRASH + /* preset: include memories for crash dump (1 only) */, + DBG_GRC_PARAM_PARITY_SAFE + /* perform dump only if MFW is responding (0/1) */, + DBG_GRC_PARAM_DUMP_CM /* dump CM memories (0/1) */, + MAX_DBG_GRC_PARAMS +}; + +/* + * Debug reset registers + */ +enum dbg_reset_regs { + DBG_RESET_REG_MISCS_PL_UA, + DBG_RESET_REG_MISCS_PL_HV, + DBG_RESET_REG_MISC_PL_UA, + DBG_RESET_REG_MISC_PL_HV, + DBG_RESET_REG_MISC_PL_PDA_VMAIN_1, + DBG_RESET_REG_MISC_PL_PDA_VMAIN_2, + DBG_RESET_REG_MISC_PL_PDA_VAUX, + MAX_DBG_RESET_REGS +}; + +/* + * @DPDK Debug status codes + */ +enum dbg_status { + DBG_STATUS_OK, + DBG_STATUS_APP_VERSION_NOT_SET, + DBG_STATUS_UNSUPPORTED_APP_VERSION, + DBG_STATUS_DBG_BLOCK_NOT_RESET, + DBG_STATUS_INVALID_ARGS, + DBG_STATUS_OUTPUT_ALREADY_SET, + DBG_STATUS_INVALID_PCI_BUF_SIZE, + DBG_STATUS_PCI_BUF_ALLOC_FAILED, + DBG_STATUS_PCI_BUF_NOT_ALLOCATED, + DBG_STATUS_TOO_MANY_INPUTS, + DBG_STATUS_INPUT_OVERLAP, + DBG_STATUS_HW_ONLY_RECORDING, + DBG_STATUS_STORM_ALREADY_ENABLED, + DBG_STATUS_STORM_NOT_ENABLED, + DBG_STATUS_BLOCK_ALREADY_ENABLED, + DBG_STATUS_BLOCK_NOT_ENABLED, + DBG_STATUS_NO_INPUT_ENABLED, + DBG_STATUS_NO_FILTER_TRIGGER_64B, + DBG_STATUS_FILTER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_ALREADY_ENABLED, + DBG_STATUS_TRIGGER_NOT_ENABLED, + DBG_STATUS_CANT_ADD_CONSTRAINT, + DBG_STATUS_TOO_MANY_TRIGGER_STATES, + DBG_STATUS_TOO_MANY_CONSTRAINTS, + DBG_STATUS_RECORDING_NOT_STARTED, + DBG_STATUS_NO_DATA_TRIGGERED, + DBG_STATUS_NO_DATA_RECORDED, + DBG_STATUS_DUMP_BUF_TOO_SMALL, + DBG_STATUS_DUMP_NOT_CHUNK_ALIGNED, + DBG_STATUS_UNKNOWN_CHIP, + DBG_STATUS_VIRT_MEM_ALLOC_FAILED, + DBG_STATUS_BLOCK_IN_RESET, + DBG_STATUS_INVALID_TRACE_SIGNATURE, + DBG_STATUS_INVALID_NVRAM_BUNDLE, + DBG_STATUS_NVRAM_GET_IMAGE_FAILED, + DBG_STATUS_NON_ALIGNED_NVRAM_IMAGE, + DBG_STATUS_NVRAM_READ_FAILED, + DBG_STATUS_IDLE_CHK_PARSE_FAILED, + DBG_STATUS_MCP_TRACE_BAD_DATA, + DBG_STATUS_MCP_TRACE_NO_META, + DBG_STATUS_MCP_COULD_NOT_HALT, + DBG_STATUS_MCP_COULD_NOT_RESUME, + DBG_STATUS_DMAE_FAILED, + DBG_STATUS_SEMI_FIFO_NOT_EMPTY, + DBG_STATUS_IGU_FIFO_BAD_DATA, + DBG_STATUS_MCP_COULD_NOT_MASK_PRTY, + DBG_STATUS_FW_ASSERTS_PARSE_FAILED, + DBG_STATUS_REG_FIFO_BAD_DATA, + DBG_STATUS_PROTECTION_OVERRIDE_BAD_DATA, + MAX_DBG_STATUS +}; + +/* + * Debug Storms IDs + */ +enum dbg_storms { + DBG_TSTORM_ID, + DBG_MSTORM_ID, + DBG_USTORM_ID, + DBG_XSTORM_ID, + DBG_YSTORM_ID, + DBG_PSTORM_ID, + MAX_DBG_STORMS +}; + +/* + * Idle Check data + */ +struct idle_chk_data { + __le32 buf_size /* Idle check buffer size in dwords */; + u8 buf_size_set + /* Indicates if the idle check buffer size was set (0/1) */; + u8 reserved1; + __le16 reserved2; +}; + +/* + * Idle Check data + */ +struct mcp_trace_data { + __le32 buf_size /* MCP Trace buffer size in dwords */; + u8 buf_size_set + /* Indicates if the MCP Trace buffer size was set (0/1) */; + u8 reserved1; + __le16 reserved2; +}; + +/* + * Debug Tools data (per HW function) + */ +struct dbg_tools_data { + struct dbg_grc_data grc /* GRC Dump data */; + struct dbg_bus_data bus /* Debug Bus data */; + struct idle_chk_data idle_chk /* Idle Check data */; + struct mcp_trace_data mcp_trace /* MCP Trace data */; + u8 block_in_reset[80] /* Indicates if a block is in reset state (0/1) */ + ; + u8 chip_id /* Chip ID (from enum chip_ids) */; + u8 chip_mask + /* Chip mask = bit index chip_id is set, the rest are cleared */; + u8 initialized /* Indicates if the data was initialized */; + u8 reset_state_updated + /* Indicates if blocks reset state is updated (0/1) */; +}; + +/* + * BRB RAM init requirements + */ +struct init_brb_ram_req { + __le32 guranteed_per_tc /* guaranteed size per TC, in bytes */; + __le32 headroom_per_tc /* headroom size per TC, in bytes */; + __le32 min_pkt_size /* min packet size, in bytes */; + __le32 max_ports_per_engine /* min packet size, in bytes */; + u8 num_active_tcs[MAX_NUM_PORTS] /* number of active TCs per port */; +}; + +/* + * ETS per-TC init requirements + */ +struct init_ets_tc_req { + u8 use_sp; + u8 use_wfq; + __le16 weight /* An arbitration weight. Valid only if use_wfq is set. */ + ; +}; + +/* + * ETS init requirements + */ +struct init_ets_req { + __le32 mtu /* Max packet size (in bytes) */; + struct init_ets_tc_req tc_req[NUM_OF_TCS] + /* ETS initialization requirements per TC. */; +}; + +/* + * NIG LB RL init requirements + */ +struct init_nig_lb_rl_req { + __le16 lb_mac_rate; + __le16 lb_rate; + __le32 mtu /* Max packet size (in bytes) */; + __le16 tc_rate[NUM_OF_PHYS_TCS]; +}; + +/* + * NIG TC mapping for each priority + */ +struct init_nig_pri_tc_map_entry { + u8 tc_id /* the mapped TC ID */; + u8 valid /* indicates if the mapping entry is valid */; +}; + +/* + * NIG priority to TC map init requirements + */ +struct init_nig_pri_tc_map_req { + struct init_nig_pri_tc_map_entry pri[NUM_OF_VLAN_PRIORITIES]; +}; + +/* + * QM per-port init parameters + */ +struct init_qm_port_params { + u8 active /* Indicates if this port is active */; + u8 num_active_phys_tcs /* number of physical TCs used by this port */; + __le16 num_pbf_cmd_lines + /* number of PBF command lines that can be used by this port */; + __le16 num_btb_blocks + /* number of BTB blocks that can be used by this port */; + __le16 reserved; +}; + +/* + * QM per-PQ init parameters + */ +struct init_qm_pq_params { + u8 vport_id /* VPORT ID */; + u8 tc_id /* TC ID */; + u8 wrr_group /* WRR group */; + u8 reserved; +}; + +/* + * QM per-vport init parameters + */ +struct init_qm_vport_params { + __le32 vport_rl; + __le16 vport_wfq; + __le16 first_tx_pq_id[NUM_OF_TCS] + /* the first Tx PQ ID associated with this VPORT for each TC. */; +}; + +#endif /* __ECORE_HSI_TOOLS__ */ diff --git a/drivers/net/qede/base/ecore_hw.c b/drivers/net/qede/base/ecore_hw.c new file mode 100644 index 00000000..5403b94b --- /dev/null +++ b/drivers/net/qede/base/ecore_hw.c @@ -0,0 +1,910 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" +#include "ecore_hsi_common.h" +#include "ecore_status.h" +#include "ecore.h" +#include "ecore_hw.h" +#include "reg_addr.h" +#include "ecore_utils.h" +#include "ecore_iov_api.h" + +#ifndef ASIC_ONLY +#define ECORE_EMUL_FACTOR 2000 +#define ECORE_FPGA_FACTOR 200 +#endif + +#define ECORE_BAR_ACQUIRE_TIMEOUT 1000 + +/* Invalid values */ +#define ECORE_BAR_INVALID_OFFSET -1 + +struct ecore_ptt { + osal_list_entry_t list_entry; + unsigned int idx; + struct pxp_ptt_entry pxp; +}; + +struct ecore_ptt_pool { + osal_list_t free_list; + osal_spinlock_t lock; + struct ecore_ptt ptts[PXP_EXTERNAL_BAR_PF_WINDOW_NUM]; +}; + +enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ptt_pool *p_pool; + int i; + + p_pool = OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(struct ecore_ptt_pool)); + if (!p_pool) + return ECORE_NOMEM; + + OSAL_LIST_INIT(&p_pool->free_list); + for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { + p_pool->ptts[i].idx = i; + p_pool->ptts[i].pxp.offset = ECORE_BAR_INVALID_OFFSET; + p_pool->ptts[i].pxp.pretend.control = 0; + + /* There are special PTT entries that are taken only by design. + * The rest are added ot the list for general usage. + */ + if (i >= RESERVED_PTT_MAX) + OSAL_LIST_PUSH_HEAD(&p_pool->ptts[i].list_entry, + &p_pool->free_list); + } + + p_hwfn->p_ptt_pool = p_pool; + OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_pool->lock); + OSAL_SPIN_LOCK_INIT(&p_pool->lock); + + return ECORE_SUCCESS; +} + +void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ptt *p_ptt; + int i; + + for (i = 0; i < PXP_EXTERNAL_BAR_PF_WINDOW_NUM; i++) { + p_ptt = &p_hwfn->p_ptt_pool->ptts[i]; + p_ptt->pxp.offset = ECORE_BAR_INVALID_OFFSET; + } +} + +void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn) +{ + if (p_hwfn->p_ptt_pool) + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->p_ptt_pool->lock); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->p_ptt_pool); + p_hwfn->p_ptt_pool = OSAL_NULL; +} + +struct ecore_ptt *ecore_ptt_acquire(struct ecore_hwfn *p_hwfn) +{ + struct ecore_ptt *p_ptt; + unsigned int i; + + /* Take the free PTT from the list */ + for (i = 0; i < ECORE_BAR_ACQUIRE_TIMEOUT; i++) { + OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock); + if (!OSAL_LIST_IS_EMPTY(&p_hwfn->p_ptt_pool->free_list)) + break; + OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock); + OSAL_MSLEEP(1); + } + + /* We should not time-out, but it can happen... --> Lock isn't held */ + if (i == ECORE_BAR_ACQUIRE_TIMEOUT) { + DP_NOTICE(p_hwfn, true, "Failed to allocate PTT\n"); + return OSAL_NULL; + } + + p_ptt = OSAL_LIST_FIRST_ENTRY(&p_hwfn->p_ptt_pool->free_list, + struct ecore_ptt, list_entry); + OSAL_LIST_REMOVE_ENTRY(&p_ptt->list_entry, + &p_hwfn->p_ptt_pool->free_list); + OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock); + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, "allocated ptt %d\n", p_ptt->idx); + + return p_ptt; +} + +void ecore_ptt_release(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + /* This PTT should not be set to pretend if it is being released */ + + OSAL_SPIN_LOCK(&p_hwfn->p_ptt_pool->lock); + OSAL_LIST_PUSH_HEAD(&p_ptt->list_entry, &p_hwfn->p_ptt_pool->free_list); + OSAL_SPIN_UNLOCK(&p_hwfn->p_ptt_pool->lock); +} + +u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + /* The HW is using DWORDS and we need to translate it to Bytes */ + return p_ptt->pxp.offset << 2; +} + +static u32 ecore_ptt_config_addr(struct ecore_ptt *p_ptt) +{ + return PXP_PF_WINDOW_ADMIN_PER_PF_START + + p_ptt->idx * sizeof(struct pxp_ptt_entry); +} + +u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt) +{ + return PXP_EXTERNAL_BAR_PF_WINDOW_START + + p_ptt->idx * PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE; +} + +void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 new_hw_addr) +{ + u32 prev_hw_addr; + + prev_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt); + + if (new_hw_addr == prev_hw_addr) + return; + + /* Update PTT entery in admin window */ + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "Updating PTT entry %d to offset 0x%x\n", + p_ptt->idx, new_hw_addr); + + /* The HW is using DWORDS and the address is in Bytes */ + p_ptt->pxp.offset = new_hw_addr >> 2; + + REG_WR(p_hwfn, + ecore_ptt_config_addr(p_ptt) + + OFFSETOF(struct pxp_ptt_entry, offset), p_ptt->pxp.offset); +} + +static u32 ecore_set_ptt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 hw_addr) +{ + u32 win_hw_addr = ecore_ptt_get_hw_addr(p_hwfn, p_ptt); + u32 offset; + + offset = hw_addr - win_hw_addr; + + /* Verify the address is within the window */ + if (hw_addr < win_hw_addr || + offset >= PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE) { + ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr); + offset = 0; + } + + return ecore_ptt_get_bar_addr(p_ptt) + offset; +} + +struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn, + enum reserved_ptts ptt_idx) +{ + if (ptt_idx >= RESERVED_PTT_MAX) { + DP_NOTICE(p_hwfn, true, + "Requested PTT %d is out of range\n", ptt_idx); + return OSAL_NULL; + } + + return &p_hwfn->p_ptt_pool->ptts[ptt_idx]; +} + +void ecore_wr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 hw_addr, u32 val) +{ + u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr); + + REG_WR(p_hwfn, bar_addr, val); + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", + bar_addr, hw_addr, val); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) + OSAL_UDELAY(100); +#endif +} + +u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr) +{ + u32 bar_addr = ecore_set_ptt(p_hwfn, p_ptt, hw_addr); + u32 val = REG_RD(p_hwfn, bar_addr); + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "bar_addr 0x%x, hw_addr 0x%x, val 0x%x\n", + bar_addr, hw_addr, val); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) + OSAL_UDELAY(100); +#endif + + return val; +} + +static void ecore_memcpy_hw(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + void *addr, + u32 hw_addr, osal_size_t n, bool to_device) +{ + u32 dw_count, *host_addr, hw_offset; + osal_size_t quota, done = 0; + u32 OSAL_IOMEM *reg_addr; + + while (done < n) { + quota = OSAL_MIN_T(osal_size_t, n - done, + PXP_EXTERNAL_BAR_PF_WINDOW_SINGLE_SIZE); + + if (IS_PF(p_hwfn->p_dev)) { + ecore_ptt_set_win(p_hwfn, p_ptt, hw_addr + done); + hw_offset = ecore_ptt_get_bar_addr(p_ptt); + } else { + hw_offset = hw_addr + done; + } + + dw_count = quota / 4; + host_addr = (u32 *)((u8 *)addr + done); + reg_addr = (u32 OSAL_IOMEM *)OSAL_REG_ADDR(p_hwfn, hw_offset); + + if (to_device) + while (dw_count--) + DIRECT_REG_WR(p_hwfn, reg_addr++, *host_addr++); + else + while (dw_count--) + *host_addr++ = DIRECT_REG_RD(p_hwfn, + reg_addr++); + + done += quota; + } +} + +void ecore_memcpy_from(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + void *dest, u32 hw_addr, osal_size_t n) +{ + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "hw_addr 0x%x, dest %p hw_addr 0x%x, size %lu\n", + hw_addr, dest, hw_addr, (unsigned long)n); + + ecore_memcpy_hw(p_hwfn, p_ptt, dest, hw_addr, n, false); +} + +void ecore_memcpy_to(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 hw_addr, void *src, osal_size_t n) +{ + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "hw_addr 0x%x, hw_addr 0x%x, src %p size %lu\n", + hw_addr, hw_addr, src, (unsigned long)n); + + ecore_memcpy_hw(p_hwfn, p_ptt, src, hw_addr, n, true); +} + +void ecore_fid_pretend(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 fid) +{ + void *p_pretend; + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_IS_CONCRETE, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_FUNCTION, 1); + + /* Every pretend undos prev pretends, including previous port pretend */ + SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + p_ptt->pxp.pretend.control = OSAL_CPU_TO_LE16(control); + + if (!GET_FIELD(fid, PXP_CONCRETE_FID_VFVALID)) + fid = GET_FIELD(fid, PXP_CONCRETE_FID_PFID); + + p_ptt->pxp.pretend.fid.concrete_fid.fid = OSAL_CPU_TO_LE16(fid); + + p_pretend = &p_ptt->pxp.pretend; + REG_WR(p_hwfn, + ecore_ptt_config_addr(p_ptt) + + OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend); +} + +void ecore_port_pretend(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 port_id) +{ + void *p_pretend; + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, port_id); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 1); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + p_ptt->pxp.pretend.control = control; + + p_pretend = &p_ptt->pxp.pretend; + REG_WR(p_hwfn, + ecore_ptt_config_addr(p_ptt) + + OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend); +} + +void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + void *p_pretend; + u16 control = 0; + + SET_FIELD(control, PXP_PRETEND_CMD_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_USE_PORT, 0); + SET_FIELD(control, PXP_PRETEND_CMD_PRETEND_PORT, 1); + p_ptt->pxp.pretend.control = control; + + p_pretend = &p_ptt->pxp.pretend; + REG_WR(p_hwfn, + ecore_ptt_config_addr(p_ptt) + + OFFSETOF(struct pxp_ptt_entry, pretend), *(u32 *)p_pretend); +} + +u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid) +{ + u32 concrete_fid = 0; + + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_PFID, p_hwfn->rel_pf_id); + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFID, vfid); + SET_FIELD(concrete_fid, PXP_CONCRETE_FID_VFVALID, 1); + + return concrete_fid; +} + +/* Not in use @DPDK + * Ecore HW lock + * ============= + * Although the implementation is ready, today we don't have any flow that + * utliizes said locks - and we want to keep it this way. + * If this changes, this needs to be revisted. + */ + +/* Ecore DMAE + * ============= + */ +static void ecore_dmae_opcode(struct ecore_hwfn *p_hwfn, + const u8 is_src_type_grc, + const u8 is_dst_type_grc, + struct ecore_dmae_params *p_params) +{ + u16 opcode_b = 0; + u32 opcode = 0; + + /* Whether the source is the PCIe or the GRC. + * 0- The source is the PCIe + * 1- The source is the GRC. + */ + opcode |= (is_src_type_grc ? DMAE_CMD_SRC_MASK_GRC + : DMAE_CMD_SRC_MASK_PCIE) << DMAE_CMD_SRC_SHIFT; + opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_SRC_PF_ID_MASK) << + DMAE_CMD_SRC_PF_ID_SHIFT; + + /* The destination of the DMA can be: 0-None 1-PCIe 2-GRC 3-None */ + opcode |= (is_dst_type_grc ? DMAE_CMD_DST_MASK_GRC + : DMAE_CMD_DST_MASK_PCIE) << DMAE_CMD_DST_SHIFT; + opcode |= (p_hwfn->rel_pf_id & DMAE_CMD_DST_PF_ID_MASK) << + DMAE_CMD_DST_PF_ID_SHIFT; + + /* DMAE_E4_TODO need to check which value to specifiy here. */ + /* opcode |= (!b_complete_to_host)<< DMAE_CMD_C_DST_SHIFT; */ + + /* Whether to write a completion word to the completion destination: + * 0-Do not write a completion word + * 1-Write the completion word + */ + opcode |= DMAE_CMD_COMP_WORD_EN_MASK << DMAE_CMD_COMP_WORD_EN_SHIFT; + opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT; + + if (p_params->flags & ECORE_DMAE_FLAG_COMPLETION_DST) + opcode |= 1 << DMAE_CMD_COMP_FUNC_SHIFT; + + /* swapping mode 3 - big endian there should be a define ifdefed in + * the HSI somewhere. Since it is currently + */ + opcode |= DMAE_CMD_ENDIANITY << DMAE_CMD_ENDIANITY_MODE_SHIFT; + + opcode |= p_hwfn->port_id << DMAE_CMD_PORT_ID_SHIFT; + + /* reset source address in next go */ + opcode |= DMAE_CMD_SRC_ADDR_RESET_MASK << DMAE_CMD_SRC_ADDR_RESET_SHIFT; + + /* reset dest address in next go */ + opcode |= DMAE_CMD_DST_ADDR_RESET_MASK << DMAE_CMD_DST_ADDR_RESET_SHIFT; + + /* SRC/DST VFID: all 1's - pf, otherwise VF id */ + if (p_params->flags & ECORE_DMAE_FLAG_VF_SRC) { + opcode |= (1 << DMAE_CMD_SRC_VF_ID_VALID_SHIFT); + opcode_b |= (p_params->src_vfid << DMAE_CMD_SRC_VF_ID_SHIFT); + } else { + opcode_b |= (DMAE_CMD_SRC_VF_ID_MASK << + DMAE_CMD_SRC_VF_ID_SHIFT); + } + if (p_params->flags & ECORE_DMAE_FLAG_VF_DST) { + opcode |= 1 << DMAE_CMD_DST_VF_ID_VALID_SHIFT; + opcode_b |= p_params->dst_vfid << DMAE_CMD_DST_VF_ID_SHIFT; + } else { + opcode_b |= DMAE_CMD_DST_VF_ID_MASK << DMAE_CMD_DST_VF_ID_SHIFT; + } + + p_hwfn->dmae_info.p_dmae_cmd->opcode = OSAL_CPU_TO_LE32(opcode); + p_hwfn->dmae_info.p_dmae_cmd->opcode_b = OSAL_CPU_TO_LE16(opcode_b); +} + +static u32 ecore_dmae_idx_to_go_cmd(u8 idx) +{ + OSAL_BUILD_BUG_ON((DMAE_REG_GO_C31 - DMAE_REG_GO_C0) != 31 * 4); + + return DMAE_REG_GO_C0 + idx * 4; +} + +static enum _ecore_status_t +ecore_dmae_post_command(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct dmae_cmd *p_command = p_hwfn->dmae_info.p_dmae_cmd; + enum _ecore_status_t ecore_status = ECORE_SUCCESS; + u8 idx_cmd = p_hwfn->dmae_info.channel, i; + + /* verify address is not OSAL_NULL */ + if ((((!p_command->dst_addr_lo) && (!p_command->dst_addr_hi)) || + ((!p_command->src_addr_lo) && (!p_command->src_addr_hi)))) { + DP_NOTICE(p_hwfn, true, + "source or destination address 0 idx_cmd=%d\n" + "opcode = [0x%08x,0x%04x] len=0x%x" + " src=0x%x:%x dst=0x%x:%x\n", + idx_cmd, (u32)p_command->opcode, + (u16)p_command->opcode_b, + (int)p_command->length, + (int)p_command->src_addr_hi, + (int)p_command->src_addr_lo, + (int)p_command->dst_addr_hi, + (int)p_command->dst_addr_lo); + + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "Posting DMAE command [idx %d]: opcode = [0x%08x,0x%04x]" + "len=0x%x src=0x%x:%x dst=0x%x:%x\n", + idx_cmd, (u32)p_command->opcode, + (u16)p_command->opcode_b, + (int)p_command->length, + (int)p_command->src_addr_hi, + (int)p_command->src_addr_lo, + (int)p_command->dst_addr_hi, (int)p_command->dst_addr_lo); + + /* Copy the command to DMAE - need to do it before every call + * for source/dest address no reset. + * The number of commands have been increased to 16 (previous was 14) + * The first 9 DWs are the command registers, the 10 DW is the + * GO register, and + * the rest are result registers (which are read only by the client). + */ + for (i = 0; i < DMAE_CMD_SIZE; i++) { + u32 data = (i < DMAE_CMD_SIZE_TO_FILL) ? + *(((u32 *)p_command) + i) : 0; + + ecore_wr(p_hwfn, p_ptt, + DMAE_REG_CMD_MEM + + (idx_cmd * DMAE_CMD_SIZE * sizeof(u32)) + + (i * sizeof(u32)), data); + } + + ecore_wr(p_hwfn, p_ptt, + ecore_dmae_idx_to_go_cmd(idx_cmd), DMAE_GO_VALUE); + + return ecore_status; +} + +enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn) +{ + dma_addr_t *p_addr = &p_hwfn->dmae_info.completion_word_phys_addr; + struct dmae_cmd **p_cmd = &p_hwfn->dmae_info.p_dmae_cmd; + u32 **p_buff = &p_hwfn->dmae_info.p_intermediate_buffer; + u32 **p_comp = &p_hwfn->dmae_info.p_completion_word; + + *p_comp = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, sizeof(u32)); + if (*p_comp == OSAL_NULL) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `p_completion_word'\n"); + ecore_dmae_info_free(p_hwfn); + return ECORE_NOMEM; + } + + p_addr = &p_hwfn->dmae_info.dmae_cmd_phys_addr; + *p_cmd = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, + sizeof(struct dmae_cmd)); + if (*p_cmd == OSAL_NULL) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `struct dmae_cmd'\n"); + ecore_dmae_info_free(p_hwfn); + return ECORE_NOMEM; + } + + p_addr = &p_hwfn->dmae_info.intermediate_buffer_phys_addr; + *p_buff = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, p_addr, + sizeof(u32) * DMAE_MAX_RW_SIZE); + if (*p_buff == OSAL_NULL) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `intermediate_buffer'\n"); + ecore_dmae_info_free(p_hwfn); + return ECORE_NOMEM; + } + + /* DMAE_E4_TODO : Need to change this to reflect proper channel */ + p_hwfn->dmae_info.channel = p_hwfn->rel_pf_id; + + return ECORE_SUCCESS; +} + +void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn) +{ + dma_addr_t p_phys; + + /* Just make sure no one is in the middle */ + OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + + if (p_hwfn->dmae_info.p_completion_word != OSAL_NULL) { + p_phys = p_hwfn->dmae_info.completion_word_phys_addr; + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_hwfn->dmae_info.p_completion_word, + p_phys, sizeof(u32)); + p_hwfn->dmae_info.p_completion_word = OSAL_NULL; + } + + if (p_hwfn->dmae_info.p_dmae_cmd != OSAL_NULL) { + p_phys = p_hwfn->dmae_info.dmae_cmd_phys_addr; + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_hwfn->dmae_info.p_dmae_cmd, + p_phys, sizeof(struct dmae_cmd)); + p_hwfn->dmae_info.p_dmae_cmd = OSAL_NULL; + } + + if (p_hwfn->dmae_info.p_intermediate_buffer != OSAL_NULL) { + p_phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_hwfn->dmae_info.p_intermediate_buffer, + p_phys, sizeof(u32) * DMAE_MAX_RW_SIZE); + p_hwfn->dmae_info.p_intermediate_buffer = OSAL_NULL; + } + + OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); +} + +static enum _ecore_status_t ecore_dmae_operation_wait(struct ecore_hwfn *p_hwfn) +{ + enum _ecore_status_t ecore_status = ECORE_SUCCESS; + u32 wait_cnt_limit = 10000, wait_cnt = 0; + +#ifndef ASIC_ONLY + u32 factor = (CHIP_REV_IS_EMUL(p_hwfn->p_dev) ? + ECORE_EMUL_FACTOR : + (CHIP_REV_IS_FPGA(p_hwfn->p_dev) ? + ECORE_FPGA_FACTOR : 1)); + + wait_cnt_limit *= factor; +#endif + + /* DMAE_E4_TODO : TODO check if we have to call any other function + * other than BARRIER to sync the completion_word since we are not + * using the volatile keyword for this + */ + OSAL_BARRIER(p_hwfn->p_dev); + while (*p_hwfn->dmae_info.p_completion_word != DMAE_COMPLETION_VAL) { + /* DMAE_E4_TODO : using OSAL_MSLEEP instead of mm_wait since mm + * functions are getting depriciated. Need to review for future. + */ + OSAL_UDELAY(DMAE_MIN_WAIT_TIME); + if (++wait_cnt > wait_cnt_limit) { + DP_NOTICE(p_hwfn->p_dev, ECORE_MSG_HW, + "Timed-out waiting for operation to" + " complete. Completion word is 0x%08x" + " expected 0x%08x.\n", + *p_hwfn->dmae_info.p_completion_word, + DMAE_COMPLETION_VAL); + ecore_status = ECORE_TIMEOUT; + break; + } + /* to sync the completion_word since we are not + * using the volatile keyword for p_completion_word + */ + OSAL_BARRIER(p_hwfn->p_dev); + } + + if (ecore_status == ECORE_SUCCESS) + *p_hwfn->dmae_info.p_completion_word = 0; + + return ecore_status; +} + +static enum _ecore_status_t +ecore_dmae_execute_sub_operation(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u64 src_addr, + u64 dst_addr, + u8 src_type, u8 dst_type, u32 length) +{ + dma_addr_t phys = p_hwfn->dmae_info.intermediate_buffer_phys_addr; + struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; + enum _ecore_status_t ecore_status = ECORE_SUCCESS; + + switch (src_type) { + case ECORE_DMAE_ADDRESS_GRC: + case ECORE_DMAE_ADDRESS_HOST_PHYS: + cmd->src_addr_hi = DMA_HI(src_addr); + cmd->src_addr_lo = DMA_LO(src_addr); + break; + /* for virt source addresses we use the intermediate buffer. */ + case ECORE_DMAE_ADDRESS_HOST_VIRT: + cmd->src_addr_hi = DMA_HI(phys); + cmd->src_addr_lo = DMA_LO(phys); + OSAL_MEMCPY(&p_hwfn->dmae_info.p_intermediate_buffer[0], + (void *)(osal_uintptr_t)src_addr, + length * sizeof(u32)); + break; + default: + return ECORE_INVAL; + } + + switch (dst_type) { + case ECORE_DMAE_ADDRESS_GRC: + case ECORE_DMAE_ADDRESS_HOST_PHYS: + cmd->dst_addr_hi = DMA_HI(dst_addr); + cmd->dst_addr_lo = DMA_LO(dst_addr); + break; + /* for virt destination address we use the intermediate buff. */ + case ECORE_DMAE_ADDRESS_HOST_VIRT: + cmd->dst_addr_hi = DMA_HI(phys); + cmd->dst_addr_lo = DMA_LO(phys); + break; + default: + return ECORE_INVAL; + } + + cmd->length = (u16)length; + + if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT || + src_type == ECORE_DMAE_ADDRESS_HOST_PHYS) + OSAL_DMA_SYNC(p_hwfn->p_dev, + (void *)HILO_U64(cmd->src_addr_hi, + cmd->src_addr_lo), + length * sizeof(u32), false); + + ecore_dmae_post_command(p_hwfn, p_ptt); + + ecore_status = ecore_dmae_operation_wait(p_hwfn); + + /* TODO - is it true ? */ + if (src_type == ECORE_DMAE_ADDRESS_HOST_VIRT || + src_type == ECORE_DMAE_ADDRESS_HOST_PHYS) + OSAL_DMA_SYNC(p_hwfn->p_dev, + (void *)HILO_U64(cmd->src_addr_hi, + cmd->src_addr_lo), + length * sizeof(u32), true); + + if (ecore_status != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, ECORE_MSG_HW, + "ecore_dmae_host2grc: Wait Failed. source_addr" + " 0x%lx, grc_addr 0x%lx, size_in_dwords 0x%x\n", + (unsigned long)src_addr, (unsigned long)dst_addr, + length); + return ecore_status; + } + + if (dst_type == ECORE_DMAE_ADDRESS_HOST_VIRT) + OSAL_MEMCPY((void *)(osal_uintptr_t)(dst_addr), + &p_hwfn->dmae_info.p_intermediate_buffer[0], + length * sizeof(u32)); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_dmae_execute_command(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u64 src_addr, + u64 dst_addr, + u8 src_type, + u8 dst_type, + u32 size_in_dwords, + struct ecore_dmae_params *p_params) +{ + dma_addr_t phys = p_hwfn->dmae_info.completion_word_phys_addr; + u16 length_cur = 0, i = 0, cnt_split = 0, length_mod = 0; + struct dmae_cmd *cmd = p_hwfn->dmae_info.p_dmae_cmd; + enum _ecore_status_t ecore_status = ECORE_SUCCESS; + u64 src_addr_split = 0, dst_addr_split = 0; + u16 length_limit = DMAE_MAX_RW_SIZE; + u32 offset = 0; + + ecore_dmae_opcode(p_hwfn, + (src_type == ECORE_DMAE_ADDRESS_GRC), + (dst_type == ECORE_DMAE_ADDRESS_GRC), p_params); + + cmd->comp_addr_lo = DMA_LO(phys); + cmd->comp_addr_hi = DMA_HI(phys); + cmd->comp_val = DMAE_COMPLETION_VAL; + + /* Check if the grc_addr is valid like < MAX_GRC_OFFSET */ + cnt_split = size_in_dwords / length_limit; + length_mod = size_in_dwords % length_limit; + + src_addr_split = src_addr; + dst_addr_split = dst_addr; + + for (i = 0; i <= cnt_split; i++) { + offset = length_limit * i; + + if (!(p_params->flags & ECORE_DMAE_FLAG_RW_REPL_SRC)) { + if (src_type == ECORE_DMAE_ADDRESS_GRC) + src_addr_split = src_addr + offset; + else + src_addr_split = src_addr + (offset * 4); + } + + if (dst_type == ECORE_DMAE_ADDRESS_GRC) + dst_addr_split = dst_addr + offset; + else + dst_addr_split = dst_addr + (offset * 4); + + length_cur = (cnt_split == i) ? length_mod : length_limit; + + /* might be zero on last iteration */ + if (!length_cur) + continue; + + ecore_status = ecore_dmae_execute_sub_operation(p_hwfn, + p_ptt, + src_addr_split, + dst_addr_split, + src_type, + dst_type, + length_cur); + if (ecore_status != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, + "ecore_dmae_execute_sub_operation Failed" + " with error 0x%x. source_addr 0x%lx," + " dest addr 0x%lx, size_in_dwords 0x%x\n", + ecore_status, (unsigned long)src_addr, + (unsigned long)dst_addr, length_cur); + + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_DMAE_FAIL); + break; + } + } + + return ecore_status; +} + +enum _ecore_status_t +ecore_dmae_host2grc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u64 source_addr, + u32 grc_addr, u32 size_in_dwords, u32 flags) +{ + u32 grc_addr_in_dw = grc_addr / sizeof(u32); + struct ecore_dmae_params params; + enum _ecore_status_t rc; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); + params.flags = flags; + + OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + + rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr, + grc_addr_in_dw, + ECORE_DMAE_ADDRESS_HOST_VIRT, + ECORE_DMAE_ADDRESS_GRC, + size_in_dwords, ¶ms); + + OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + + return rc; +} + +enum _ecore_status_t +ecore_dmae_grc2host(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 grc_addr, + dma_addr_t dest_addr, u32 size_in_dwords, u32 flags) +{ + u32 grc_addr_in_dw = grc_addr / sizeof(u32); + struct ecore_dmae_params params; + enum _ecore_status_t rc; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); + params.flags = flags; + + OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + + rc = ecore_dmae_execute_command(p_hwfn, p_ptt, grc_addr_in_dw, + dest_addr, ECORE_DMAE_ADDRESS_GRC, + ECORE_DMAE_ADDRESS_HOST_VIRT, + size_in_dwords, ¶ms); + + OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + + return rc; +} + +enum _ecore_status_t +ecore_dmae_host2host(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + dma_addr_t source_addr, + dma_addr_t dest_addr, + u32 size_in_dwords, struct ecore_dmae_params *p_params) +{ + enum _ecore_status_t rc; + + OSAL_MUTEX_ACQUIRE(&p_hwfn->dmae_info.mutex); + + rc = ecore_dmae_execute_command(p_hwfn, p_ptt, source_addr, + dest_addr, + ECORE_DMAE_ADDRESS_HOST_PHYS, + ECORE_DMAE_ADDRESS_HOST_PHYS, + size_in_dwords, p_params); + + OSAL_MUTEX_RELEASE(&p_hwfn->dmae_info.mutex); + + return rc; +} + +u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn, + enum protocol_type proto, + union ecore_qm_pq_params *p_params) +{ + u16 pq_id = 0; + + if ((proto == PROTOCOLID_CORE || + proto == PROTOCOLID_ETH) && !p_params) { + DP_NOTICE(p_hwfn, true, + "Protocol %d received NULL PQ params\n", proto); + return 0; + } + + switch (proto) { + case PROTOCOLID_CORE: + if (p_params->core.tc == LB_TC) + pq_id = p_hwfn->qm_info.pure_lb_pq; + else if (p_params->core.tc == OOO_LB_TC) + pq_id = p_hwfn->qm_info.ooo_pq; + else + pq_id = p_hwfn->qm_info.offload_pq; + break; + case PROTOCOLID_ETH: + pq_id = p_params->eth.tc; + /* TODO - multi-CoS for VFs? */ + if (p_params->eth.is_vf) + pq_id += p_hwfn->qm_info.vf_queues_offset + + p_params->eth.vf_id; + break; + default: + pq_id = 0; + } + + pq_id = CM_TX_PQ_BASE + pq_id + RESC_START(p_hwfn, ECORE_PQ); + + return pq_id; +} + +void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, + enum ecore_hw_err_type err_type) +{ + /* Fan failure cannot be masked by handling of another HW error */ + if (p_hwfn->p_dev->recov_in_prog && err_type != ECORE_HW_ERR_FAN_FAIL) { + DP_VERBOSE(p_hwfn, ECORE_MSG_DRV, + "Recovery is in progress." + "Avoid notifying about HW error %d.\n", + err_type); + return; + } + + OSAL_HW_ERROR_OCCURRED(p_hwfn, err_type); +} diff --git a/drivers/net/qede/base/ecore_hw.h b/drivers/net/qede/base/ecore_hw.h new file mode 100644 index 00000000..89499447 --- /dev/null +++ b/drivers/net/qede/base/ecore_hw.h @@ -0,0 +1,269 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_HW_H__ +#define __ECORE_HW_H__ + +#include "ecore.h" +#include "ecore_dev_api.h" + +/* Forward decleration */ +struct ecore_ptt; + +enum reserved_ptts { + RESERVED_PTT_EDIAG, + RESERVED_PTT_USER_SPACE, + RESERVED_PTT_MAIN, + RESERVED_PTT_DPC, + RESERVED_PTT_MAX +}; + +/* @@@TMP - in earlier versions of the emulation, the HW lock started from 1 + * instead of 0, this should be fixed in later HW versions. + */ +#ifndef MISC_REG_DRIVER_CONTROL_0 +#define MISC_REG_DRIVER_CONTROL_0 MISC_REG_DRIVER_CONTROL_1 +#endif +#ifndef MISC_REG_DRIVER_CONTROL_0_SIZE +#define MISC_REG_DRIVER_CONTROL_0_SIZE MISC_REG_DRIVER_CONTROL_1_SIZE +#endif + +enum _dmae_cmd_dst_mask { + DMAE_CMD_DST_MASK_NONE = 0, + DMAE_CMD_DST_MASK_PCIE = 1, + DMAE_CMD_DST_MASK_GRC = 2 +}; + +enum _dmae_cmd_src_mask { + DMAE_CMD_SRC_MASK_PCIE = 0, + DMAE_CMD_SRC_MASK_GRC = 1 +}; + +enum _dmae_cmd_crc_mask { + DMAE_CMD_COMP_CRC_EN_MASK_NONE = 0, + DMAE_CMD_COMP_CRC_EN_MASK_SET = 1 +}; + +/* definitions for DMA constants */ +#define DMAE_GO_VALUE 0x1 + +#ifdef __BIG_ENDIAN +#define DMAE_COMPLETION_VAL 0xAED10000 +#define DMAE_CMD_ENDIANITY 0x3 +#else +#define DMAE_COMPLETION_VAL 0xD1AE +#define DMAE_CMD_ENDIANITY 0x2 +#endif + +#define DMAE_CMD_SIZE 14 +/* size of DMAE command structure to fill.. DMAE_CMD_SIZE-5 */ +#define DMAE_CMD_SIZE_TO_FILL (DMAE_CMD_SIZE - 5) +/* Minimum wait for dmae opertaion to complete 2 milliseconds */ +#define DMAE_MIN_WAIT_TIME 0x2 +#define DMAE_MAX_CLIENTS 32 + +/** +* @brief ecore_gtt_init - Initialize GTT windows +* +* @param p_hwfn +*/ +void ecore_gtt_init(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_ptt_invalidate - Forces all ptt entries to be re-configured + * + * @param p_hwfn + */ +void ecore_ptt_invalidate(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_ptt_pool_alloc - Allocate and initialize PTT pool + * + * @param p_hwfn + * + * @return _ecore_status_t - success (0), negative - error. + */ +enum _ecore_status_t ecore_ptt_pool_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_ptt_pool_free - + * + * @param p_hwfn + */ +void ecore_ptt_pool_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_ptt_get_hw_addr - Get PTT's GRC/HW address + * + * @param p_hwfn + * @param p_ptt + * + * @return u32 + */ +u32 ecore_ptt_get_hw_addr(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); + +/** + * @brief ecore_ptt_get_bar_addr - Get PPT's external BAR address + * + * @param p_hwfn + * @param p_ptt + * + * @return u32 + */ +u32 ecore_ptt_get_bar_addr(struct ecore_ptt *p_ptt); + +/** + * @brief ecore_ptt_set_win - Set PTT Window's GRC BAR address + * + * @param p_hwfn + * @param new_hw_addr + * @param p_ptt + */ +void ecore_ptt_set_win(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 new_hw_addr); + +/** + * @brief ecore_get_reserved_ptt - Get a specific reserved PTT + * + * @param p_hwfn + * @param ptt_idx + * + * @return struct ecore_ptt * + */ +struct ecore_ptt *ecore_get_reserved_ptt(struct ecore_hwfn *p_hwfn, + enum reserved_ptts ptt_idx); + +/** + * @brief ecore_wr - Write value to BAR using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param val + * @param hw_addr + */ +void ecore_wr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 hw_addr, u32 val); + +/** + * @brief ecore_rd - Read value from BAR using the given ptt + * + * @param p_hwfn + * @param p_ptt + * @param val + * @param hw_addr + */ +u32 ecore_rd(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u32 hw_addr); + +/** + * @brief ecore_memcpy_from - copy n bytes from BAR using the given + * ptt + * + * @param p_hwfn + * @param p_ptt + * @param dest + * @param hw_addr + * @param n + */ +void ecore_memcpy_from(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + void *dest, u32 hw_addr, osal_size_t n); + +/** + * @brief ecore_memcpy_to - copy n bytes to BAR using the given + * ptt + * + * @param p_hwfn + * @param p_ptt + * @param hw_addr + * @param src + * @param n + */ +void ecore_memcpy_to(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 hw_addr, void *src, osal_size_t n); +/** + * @brief ecore_fid_pretend - pretend to another function when + * accessing the ptt window. There is no way to unpretend + * a function. The only way to cancel a pretend is to + * pretend back to the original function. + * + * @param p_hwfn + * @param p_ptt + * @param fid - fid field of pxp_pretend structure. Can contain + * either pf / vf, port/path fields are don't care. + */ +void ecore_fid_pretend(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 fid); + +/** + * @brief ecore_port_pretend - pretend to another port when + * accessing the ptt window + * + * @param p_hwfn + * @param p_ptt + * @param port_id - the port to pretend to + */ +void ecore_port_pretend(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 port_id); + +/** + * @brief ecore_port_unpretend - cancel any previously set port + * pretend + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_port_unpretend(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); + +/** + * @brief ecore_vfid_to_concrete - build a concrete FID for a + * given VF ID + * + * @param p_hwfn + * @param p_ptt + * @param vfid + */ +u32 ecore_vfid_to_concrete(struct ecore_hwfn *p_hwfn, u8 vfid); + +/** +* @brief ecore_dmae_info_alloc - Init the dmae_info structure +* which is part of p_hwfn. +* @param p_hwfn +*/ +enum _ecore_status_t ecore_dmae_info_alloc(struct ecore_hwfn *p_hwfn); + +/** +* @brief ecore_dmae_info_free - Free the dmae_info structure +* which is part of p_hwfn +* +* @param p_hwfn +*/ +void ecore_dmae_info_free(struct ecore_hwfn *p_hwfn); + +union ecore_qm_pq_params { + struct { + u8 tc; + } core; + + struct { + u8 is_vf; + u8 vf_id; + u8 tc; + } eth; +}; + +u16 ecore_get_qm_pq(struct ecore_hwfn *p_hwfn, + enum protocol_type proto, union ecore_qm_pq_params *params); + +enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, + const u8 *fw_data); + +void ecore_hw_err_notify(struct ecore_hwfn *p_hwfn, + enum ecore_hw_err_type err_type); + +#endif /* __ECORE_HW_H__ */ diff --git a/drivers/net/qede/base/ecore_hw_defs.h b/drivers/net/qede/base/ecore_hw_defs.h new file mode 100644 index 00000000..fa518cec --- /dev/null +++ b/drivers/net/qede/base/ecore_hw_defs.h @@ -0,0 +1,49 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef _ECORE_IGU_DEF_H_ +#define _ECORE_IGU_DEF_H_ + +/* Fields of IGU PF CONFIGRATION REGISTER */ +#define IGU_PF_CONF_FUNC_EN (0x1 << 0) /* function enable */ +#define IGU_PF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */ +#define IGU_PF_CONF_INT_LINE_EN (0x1 << 2) /* INT enable */ +#define IGU_PF_CONF_ATTN_BIT_EN (0x1 << 3) /* attention enable */ +#define IGU_PF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ +#define IGU_PF_CONF_SIMD_MODE (0x1 << 5) /* simd all ones mode */ + +/* Fields of IGU VF CONFIGRATION REGISTER */ +#define IGU_VF_CONF_FUNC_EN (0x1 << 0) /* function enable */ +#define IGU_VF_CONF_MSI_MSIX_EN (0x1 << 1) /* MSI/MSIX enable */ +#define IGU_VF_CONF_SINGLE_ISR_EN (0x1 << 4) /* single ISR mode enable */ +#define IGU_VF_CONF_PARENT_MASK (0xF) /* Parent PF */ +#define IGU_VF_CONF_PARENT_SHIFT 5 /* Parent PF */ + +/* Igu control commands + */ +enum igu_ctrl_cmd { + IGU_CTRL_CMD_TYPE_RD, + IGU_CTRL_CMD_TYPE_WR, + MAX_IGU_CTRL_CMD +}; + +/* Control register for the IGU command register + */ +struct igu_ctrl_reg { + u32 ctrl_data; +#define IGU_CTRL_REG_FID_MASK 0xFFFF /* Opaque_FID */ +#define IGU_CTRL_REG_FID_SHIFT 0 +#define IGU_CTRL_REG_PXP_ADDR_MASK 0xFFF /* Command address */ +#define IGU_CTRL_REG_PXP_ADDR_SHIFT 16 +#define IGU_CTRL_REG_RESERVED_MASK 0x1 +#define IGU_CTRL_REG_RESERVED_SHIFT 28 +#define IGU_CTRL_REG_TYPE_MASK 0x1 /* use enum igu_ctrl_cmd */ +#define IGU_CTRL_REG_TYPE_SHIFT 31 +}; + +#endif diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.c b/drivers/net/qede/base/ecore_init_fw_funcs.c new file mode 100644 index 00000000..5324e052 --- /dev/null +++ b/drivers/net/qede/base/ecore_init_fw_funcs.c @@ -0,0 +1,1275 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" +#include "ecore_hw.h" +#include "ecore_init_ops.h" +#include "reg_addr.h" +#include "ecore_rt_defs.h" +#include "ecore_hsi_common.h" +#include "ecore_hsi_tools.h" +#include "ecore_init_fw_funcs.h" + +/* @DPDK CmInterfaceEnum */ +enum cm_interface_enum { + MCM_SEC, + MCM_PRI, + UCM_SEC, + UCM_PRI, + TCM_SEC, + TCM_PRI, + YCM_SEC, + YCM_PRI, + XCM_SEC, + XCM_PRI, + NUM_OF_CM_INTERFACES +}; +/* general constants */ +#define QM_PQ_MEM_4KB(pq_size) \ +(pq_size ? DIV_ROUND_UP((pq_size + 1) * QM_PQ_ELEMENT_SIZE, 0x1000) : 0) +#define QM_PQ_SIZE_256B(pq_size) \ +(pq_size ? DIV_ROUND_UP(pq_size, 0x100) - 1 : 0) +#define QM_INVALID_PQ_ID 0xffff +/* feature enable */ +#define QM_BYPASS_EN 1 +#define QM_BYTE_CRD_EN 1 +/* other PQ constants */ +#define QM_OTHER_PQS_PER_PF 4 +/* WFQ constants */ +#define QM_WFQ_UPPER_BOUND 62500000 +#define QM_WFQ_VP_PQ_VOQ_SHIFT 0 +#define QM_WFQ_VP_PQ_PF_SHIFT 5 +#define QM_WFQ_INC_VAL(weight) ((weight) * 0x9000) +#define QM_WFQ_MAX_INC_VAL 43750000 +/* RL constants */ +#define QM_RL_UPPER_BOUND 62500000 +#define QM_RL_PERIOD 5 +#define QM_RL_PERIOD_CLK_25M (25 * QM_RL_PERIOD) +#define QM_RL_INC_VAL(rate) \ +OSAL_MAX_T(u32, (((rate ? rate : 1000000) * QM_RL_PERIOD * 1.01) / 8), 1) +#define QM_RL_MAX_INC_VAL 43750000 +/* AFullOprtnstcCrdMask constants */ +#define QM_OPPOR_LINE_VOQ_DEF 1 +#define QM_OPPOR_FW_STOP_DEF 0 +#define QM_OPPOR_PQ_EMPTY_DEF 1 +#define EAGLE_WORKAROUND_TC 7 +/* Command Queue constants */ +#define PBF_CMDQ_PURE_LB_LINES 150 +#define PBF_CMDQ_EAGLE_WORKAROUND_LINES 8 /* eagle workaround CmdQ */ +#define PBF_CMDQ_LINES_RT_OFFSET(voq) \ +(PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET + \ +voq * (PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET \ +- PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET)) +#define PBF_BTB_GUARANTEED_RT_OFFSET(voq) \ +(PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET + voq * \ +(PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET - PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET)) +#define QM_VOQ_LINE_CRD(pbf_cmd_lines) \ +((((pbf_cmd_lines) - 4) * 2) | QM_LINE_CRD_REG_SIGN_BIT) +/* BTB: blocks constants (block size = 256B) */ +#define BTB_JUMBO_PKT_BLOCKS 38 /* 256B blocks in 9700B packet */ +#define BTB_HEADROOM_BLOCKS BTB_JUMBO_PKT_BLOCKS /* headroom per-port */ +#define BTB_EAGLE_WORKAROUND_BLOCKS 4 /* eagle workaround blocks */ +#define BTB_PURE_LB_FACTOR 10 +#define BTB_PURE_LB_RATIO 7 /* factored (hence really 0.7) */ +/* QM stop command constants */ +#define QM_STOP_PQ_MASK_WIDTH 32 +#define QM_STOP_CMD_ADDR 0x2 +#define QM_STOP_CMD_STRUCT_SIZE 2 +#define QM_STOP_CMD_PAUSE_MASK_OFFSET 0 +#define QM_STOP_CMD_PAUSE_MASK_SHIFT 0 +#define QM_STOP_CMD_PAUSE_MASK_MASK -1 +#define QM_STOP_CMD_GROUP_ID_OFFSET 1 +#define QM_STOP_CMD_GROUP_ID_SHIFT 16 +#define QM_STOP_CMD_GROUP_ID_MASK 15 +#define QM_STOP_CMD_PQ_TYPE_OFFSET 1 +#define QM_STOP_CMD_PQ_TYPE_SHIFT 24 +#define QM_STOP_CMD_PQ_TYPE_MASK 1 +#define QM_STOP_CMD_MAX_POLL_COUNT 100 +#define QM_STOP_CMD_POLL_PERIOD_US 500 +/* QM command macros */ +#define QM_CMD_STRUCT_SIZE(cmd) cmd##_STRUCT_SIZE +#define QM_CMD_SET_FIELD(var, cmd, field, value) \ +SET_FIELD(var[cmd##_##field##_OFFSET], cmd##_##field, value) +/* QM: VOQ macros */ +#define PHYS_VOQ(port, tc, max_phys_tcs_per_port) \ +((port) * (max_phys_tcs_per_port) + (tc)) +#define LB_VOQ(port) (MAX_PHYS_VOQS + (port)) +#define VOQ(port, tc, max_phys_tcs_per_port) \ +((tc) < LB_TC ? PHYS_VOQ(port, tc, max_phys_tcs_per_port) : LB_VOQ(port)) +/******************** INTERNAL IMPLEMENTATION *********************/ +/* Prepare PF RL enable/disable runtime init values */ +static void ecore_enable_pf_rl(struct ecore_hwfn *p_hwfn, bool pf_rl_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_RLPFENABLE_RT_OFFSET, pf_rl_en ? 1 : 0); + if (pf_rl_en) { + /* enable RLs for all VOQs */ + STORE_RT_REG(p_hwfn, QM_REG_RLPFVOQENABLE_RT_OFFSET, + (1 << MAX_NUM_VOQS) - 1); + /* write RL period */ + STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIOD_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + STORE_RT_REG(p_hwfn, QM_REG_RLPFPERIODTIMER_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + /* set credit threshold for QM bypass flow */ + if (QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET, + QM_RL_UPPER_BOUND); + } +} + +/* Prepare PF WFQ enable/disable runtime init values */ +static void ecore_enable_pf_wfq(struct ecore_hwfn *p_hwfn, bool pf_wfq_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_WFQPFENABLE_RT_OFFSET, pf_wfq_en ? 1 : 0); + /* set credit threshold for QM bypass flow */ + if (pf_wfq_en && QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET, + QM_WFQ_UPPER_BOUND); +} + +/* Prepare VPORT RL enable/disable runtime init values */ +static void ecore_enable_vport_rl(struct ecore_hwfn *p_hwfn, bool vport_rl_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLENABLE_RT_OFFSET, + vport_rl_en ? 1 : 0); + if (vport_rl_en) { + /* write RL period (use timer 0 only) */ + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIOD_0_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET, + QM_RL_PERIOD_CLK_25M); + /* set credit threshold for QM bypass flow */ + if (QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, + QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET, + QM_RL_UPPER_BOUND); + } +} + +/* Prepare VPORT WFQ enable/disable runtime init values */ +static void ecore_enable_vport_wfq(struct ecore_hwfn *p_hwfn, bool vport_wfq_en) +{ + STORE_RT_REG(p_hwfn, QM_REG_WFQVPENABLE_RT_OFFSET, + vport_wfq_en ? 1 : 0); + /* set credit threshold for QM bypass flow */ + if (vport_wfq_en && QM_BYPASS_EN) + STORE_RT_REG(p_hwfn, QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET, + QM_WFQ_UPPER_BOUND); +} + +/* Prepare runtime init values to allocate PBF command queue lines for + * the specified VOQ + */ +static void ecore_cmdq_lines_voq_rt_init(struct ecore_hwfn *p_hwfn, + u8 voq, u16 cmdq_lines) +{ + u32 qm_line_crd; + bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev); + if (is_bb_a0) + cmdq_lines = OSAL_MIN_T(u32, cmdq_lines, 1022); + qm_line_crd = QM_VOQ_LINE_CRD(cmdq_lines); + OVERWRITE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), + (u32)cmdq_lines); + STORE_RT_REG(p_hwfn, QM_REG_VOQCRDLINE_RT_OFFSET + voq, qm_line_crd); + STORE_RT_REG(p_hwfn, QM_REG_VOQINITCRDLINE_RT_OFFSET + voq, + qm_line_crd); +} + +/* Prepare runtime init values to allocate PBF command queue lines. */ +static void ecore_cmdq_lines_rt_init(struct ecore_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params + port_params[MAX_NUM_PORTS]) +{ + u8 tc, voq, port_id; + bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn); + /* clear PBF lines for all VOQs */ + for (voq = 0; voq < MAX_NUM_VOQS; voq++) + STORE_RT_REG(p_hwfn, PBF_CMDQ_LINES_RT_OFFSET(voq), 0); + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + if (port_params[port_id].active) { + u16 phys_lines, phys_lines_per_tc; + phys_lines = + port_params[port_id].num_pbf_cmd_lines - + PBF_CMDQ_PURE_LB_LINES; + if (eagle_workaround) + phys_lines -= PBF_CMDQ_EAGLE_WORKAROUND_LINES; + /* find #lines per active physical TC */ + phys_lines_per_tc = + phys_lines / + port_params[port_id].num_active_phys_tcs; + /* init registers per active TC */ + for (tc = 0; + tc < port_params[port_id].num_active_phys_tcs; + tc++) { + voq = + PHYS_VOQ(port_id, tc, + max_phys_tcs_per_port); + ecore_cmdq_lines_voq_rt_init(p_hwfn, voq, + phys_lines_per_tc); + } + /* init registers for pure LB TC */ + ecore_cmdq_lines_voq_rt_init(p_hwfn, LB_VOQ(port_id), + PBF_CMDQ_PURE_LB_LINES); + /* init registers for eagle workaround */ + if (eagle_workaround) { + voq = + PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC, + max_phys_tcs_per_port); + ecore_cmdq_lines_voq_rt_init(p_hwfn, voq, + PBF_CMDQ_EAGLE_WORKAROUND_LINES); + } + } + } +} + +/* + * Prepare runtime init values to allocate guaranteed BTB blocks for the + * specified port. The guaranteed BTB space is divided between the TCs as + * follows (shared space Is currently not used): + * 1. Parameters: + * B BTB blocks for this port + * C Number of physical TCs for this port + * 2. Calculation: + * a. 38 blocks (9700B jumbo frame) are allocated for global per port + * headroom + * b. B = B 38 (remainder after global headroom allocation) + * c. MAX(38,B/(C+0.7)) blocks are allocated for the pure LB VOQ. + * d. B = B MAX(38, B/(C+0.7)) (remainder after pure LB allocation). + * e. B/C blocks are allocated for each physical TC. + * Assumptions: + * - MTU is up to 9700 bytes (38 blocks) + * - All TCs are considered symmetrical (same rate and packet size) + * - No optimization for lossy TC (all are considered lossless). Shared space is + * not enabled and allocated for each TC. + */ +static void ecore_btb_blocks_rt_init(struct ecore_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + struct init_qm_port_params + port_params[MAX_NUM_PORTS]) +{ + u8 tc, voq, port_id; + u32 usable_blocks, pure_lb_blocks, phys_blocks; + bool eagle_workaround = ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn); + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + if (port_params[port_id].active) { + /* subtract headroom blocks */ + usable_blocks = + port_params[port_id].num_btb_blocks - + BTB_HEADROOM_BLOCKS; + if (eagle_workaround) + usable_blocks -= BTB_EAGLE_WORKAROUND_BLOCKS; + pure_lb_blocks = + (usable_blocks * BTB_PURE_LB_FACTOR) / + (port_params[port_id].num_active_phys_tcs * + BTB_PURE_LB_FACTOR + BTB_PURE_LB_RATIO); + pure_lb_blocks = + OSAL_MAX_T(u32, BTB_JUMBO_PKT_BLOCKS, + pure_lb_blocks / BTB_PURE_LB_FACTOR); + phys_blocks = + (usable_blocks - + pure_lb_blocks) / + port_params[port_id].num_active_phys_tcs; + /* init physical TCs */ + for (tc = 0; + tc < port_params[port_id].num_active_phys_tcs; + tc++) { + voq = + PHYS_VOQ(port_id, tc, + max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, + PBF_BTB_GUARANTEED_RT_OFFSET(voq), + phys_blocks); + } + /* init pure LB TC */ + STORE_RT_REG(p_hwfn, + PBF_BTB_GUARANTEED_RT_OFFSET(LB_VOQ + (port_id)), + pure_lb_blocks); + /* init eagle workaround */ + if (eagle_workaround) { + voq = + PHYS_VOQ(port_id, EAGLE_WORKAROUND_TC, + max_phys_tcs_per_port); + STORE_RT_REG(p_hwfn, + PBF_BTB_GUARANTEED_RT_OFFSET(voq), + BTB_EAGLE_WORKAROUND_BLOCKS); + } + } + } +} + +/* Prepare Tx PQ mapping runtime init values for the specified PF */ +static void ecore_tx_pq_map_rt_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 port_id, + u8 pf_id, + u8 max_phys_tcs_per_port, + bool is_first_pf, + u32 num_pf_cids, + u32 num_vf_cids, + u16 start_pq, + u16 num_pf_pqs, + u16 num_vf_pqs, + u8 start_vport, + u32 base_mem_addr_4kb, + struct init_qm_pq_params *pq_params, + struct init_qm_vport_params *vport_params) +{ + u16 i, pq_id, pq_group; + u16 num_pqs = num_pf_pqs + num_vf_pqs; + u16 first_pq_group = start_pq / QM_PF_QUEUE_GROUP_SIZE; + u16 last_pq_group = (start_pq + num_pqs - 1) / QM_PF_QUEUE_GROUP_SIZE; + bool is_bb_a0 = ECORE_IS_BB_A0(p_hwfn->p_dev); + /* a bit per Tx PQ indicating if the PQ is associated with a VF */ + u32 tx_pq_vf_mask[MAX_QM_TX_QUEUES / QM_PF_QUEUE_GROUP_SIZE] = { 0 }; + u32 tx_pq_vf_mask_width = is_bb_a0 ? 32 : QM_PF_QUEUE_GROUP_SIZE; + u32 num_tx_pq_vf_masks = MAX_QM_TX_QUEUES / tx_pq_vf_mask_width; + u32 pq_mem_4kb = QM_PQ_MEM_4KB(num_pf_cids); + u32 vport_pq_mem_4kb = QM_PQ_MEM_4KB(num_vf_cids); + u32 mem_addr_4kb = base_mem_addr_4kb; + /* set mapping from PQ group to PF */ + for (pq_group = first_pq_group; pq_group <= last_pq_group; pq_group++) + STORE_RT_REG(p_hwfn, QM_REG_PQTX2PF_0_RT_OFFSET + pq_group, + (u32)(pf_id)); + /* set PQ sizes */ + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_0_RT_OFFSET, + QM_PQ_SIZE_256B(num_pf_cids)); + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_1_RT_OFFSET, + QM_PQ_SIZE_256B(num_vf_cids)); + /* go over all Tx PQs */ + for (i = 0, pq_id = start_pq; i < num_pqs; i++, pq_id++) { + struct qm_rf_pq_map tx_pq_map; + u8 voq = + VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port); + bool is_vf_pq = (i >= num_pf_pqs); + /* update first Tx PQ of VPORT/TC */ + u8 vport_id_in_pf = pq_params[i].vport_id - start_vport; + u16 first_tx_pq_id = + vport_params[vport_id_in_pf].first_tx_pq_id[pq_params[i]. + tc_id]; + if (first_tx_pq_id == QM_INVALID_PQ_ID) { + /* create new VP PQ */ + vport_params[vport_id_in_pf]. + first_tx_pq_id[pq_params[i].tc_id] = pq_id; + first_tx_pq_id = pq_id; + /* map VP PQ to VOQ and PF */ + STORE_RT_REG(p_hwfn, + QM_REG_WFQVPMAP_RT_OFFSET + first_tx_pq_id, + (voq << QM_WFQ_VP_PQ_VOQ_SHIFT) | (pf_id << + QM_WFQ_VP_PQ_PF_SHIFT)); + } + /* fill PQ map entry */ + OSAL_MEMSET(&tx_pq_map, 0, sizeof(tx_pq_map)); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_PQ_VALID, 1); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_VALID, + is_vf_pq ? 1 : 0); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VP_PQ_ID, first_tx_pq_id); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_RL_ID, + is_vf_pq ? pq_params[i].vport_id : 0); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_VOQ, voq); + SET_FIELD(tx_pq_map.reg, QM_RF_PQ_MAP_WRR_WEIGHT_GROUP, + pq_params[i].wrr_group); + /* write PQ map entry to CAM */ + STORE_RT_REG(p_hwfn, QM_REG_TXPQMAP_RT_OFFSET + pq_id, + *((u32 *)&tx_pq_map)); + /* set base address */ + STORE_RT_REG(p_hwfn, QM_REG_BASEADDRTXPQ_RT_OFFSET + pq_id, + mem_addr_4kb); + /* check if VF PQ */ + if (is_vf_pq) { + tx_pq_vf_mask[pq_id / tx_pq_vf_mask_width] |= + (1 << (pq_id % tx_pq_vf_mask_width)); + mem_addr_4kb += vport_pq_mem_4kb; + } else { + mem_addr_4kb += pq_mem_4kb; + } + } + /* store Tx PQ VF mask to size select register */ + for (i = 0; i < num_tx_pq_vf_masks; i++) { + if (tx_pq_vf_mask[i]) { + if (is_bb_a0) { + u32 curr_mask = + is_first_pf ? 0 : ecore_rd(p_hwfn, p_ptt, + QM_REG_MAXPQSIZETXSEL_0 + + i * 4); + STORE_RT_REG(p_hwfn, + QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + + i, curr_mask | tx_pq_vf_mask[i]); + } else + STORE_RT_REG(p_hwfn, + QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET + + i, tx_pq_vf_mask[i]); + } + } +} + +/* Prepare Other PQ mapping runtime init values for the specified PF */ +static void ecore_other_pq_map_rt_init(struct ecore_hwfn *p_hwfn, + u8 port_id, + u8 pf_id, + u32 num_pf_cids, + u32 num_tids, u32 base_mem_addr_4kb) +{ + u16 i, pq_id; + u16 pq_group = pf_id; + u32 pq_size = num_pf_cids + num_tids; + u32 pq_mem_4kb = QM_PQ_MEM_4KB(pq_size); + u32 mem_addr_4kb = base_mem_addr_4kb; + /* map PQ group to PF */ + STORE_RT_REG(p_hwfn, QM_REG_PQOTHER2PF_0_RT_OFFSET + pq_group, + (u32)(pf_id)); + /* set PQ sizes */ + STORE_RT_REG(p_hwfn, QM_REG_MAXPQSIZE_2_RT_OFFSET, + QM_PQ_SIZE_256B(pq_size)); + /* set base address */ + for (i = 0, pq_id = pf_id * QM_PF_QUEUE_GROUP_SIZE; + i < QM_OTHER_PQS_PER_PF; i++, pq_id++) { + STORE_RT_REG(p_hwfn, QM_REG_BASEADDROTHERPQ_RT_OFFSET + pq_id, + mem_addr_4kb); + mem_addr_4kb += pq_mem_4kb; + } +} + +static int ecore_pf_wfq_rt_init(struct ecore_hwfn *p_hwfn, + u8 port_id, + u8 pf_id, + u16 pf_wfq, + u8 max_phys_tcs_per_port, + u16 num_tx_pqs, + struct init_qm_pq_params *pq_params) +{ + u16 i; + u32 inc_val; + u32 crd_reg_offset = + (pf_id < + MAX_NUM_PFS_BB ? QM_REG_WFQPFCRD_RT_OFFSET : + QM_REG_WFQPFCRD_MSB_RT_OFFSET) + (pf_id % MAX_NUM_PFS_BB); + inc_val = QM_WFQ_INC_VAL(pf_wfq); + if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration"); + return -1; + } + for (i = 0; i < num_tx_pqs; i++) { + u8 voq = + VOQ(port_id, pq_params[i].tc_id, max_phys_tcs_per_port); + OVERWRITE_RT_REG(p_hwfn, crd_reg_offset + voq * MAX_NUM_PFS_BB, + QM_WFQ_CRD_REG_SIGN_BIT); + } + STORE_RT_REG(p_hwfn, QM_REG_WFQPFUPPERBOUND_RT_OFFSET + pf_id, + QM_WFQ_UPPER_BOUND | QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_WFQPFWEIGHT_RT_OFFSET + pf_id, inc_val); + return 0; +} + +/* Prepare PF RL runtime init values for the specified PF. Return -1 on err */ +static int ecore_pf_rl_rt_init(struct ecore_hwfn *p_hwfn, u8 pf_id, u32 pf_rl) +{ + u32 inc_val = QM_RL_INC_VAL(pf_rl); + if (inc_val > QM_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration"); + return -1; + } + STORE_RT_REG(p_hwfn, QM_REG_RLPFCRD_RT_OFFSET + pf_id, + QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLPFUPPERBOUND_RT_OFFSET + pf_id, + QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLPFINCVAL_RT_OFFSET + pf_id, inc_val); + return 0; +} + +static int ecore_vp_wfq_rt_init(struct ecore_hwfn *p_hwfn, + u8 num_vports, + struct init_qm_vport_params *vport_params) +{ + u8 tc, i; + u32 inc_val; + /* go over all PF VPORTs */ + for (i = 0; i < num_vports; i++) { + if (vport_params[i].vport_wfq) { + inc_val = QM_WFQ_INC_VAL(vport_params[i].vport_wfq); + if (inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, + "Invalid VPORT WFQ weight config"); + return -1; + } + for (tc = 0; tc < NUM_OF_TCS; tc++) { + u16 vport_pq_id = + vport_params[i].first_tx_pq_id[tc]; + if (vport_pq_id != QM_INVALID_PQ_ID) { + STORE_RT_REG(p_hwfn, + QM_REG_WFQVPCRD_RT_OFFSET + + vport_pq_id, + QM_WFQ_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_WFQVPWEIGHT_RT_OFFSET + + vport_pq_id, inc_val); + } + } + } + } + return 0; +} + +/* Prepare VPORT RL runtime init values for specified VPORT. Ret -1 on error. */ +static int ecore_vport_rl_rt_init(struct ecore_hwfn *p_hwfn, + u8 start_vport, + u8 num_vports, + struct init_qm_vport_params *vport_params) +{ + u8 i, vport_id; + /* go over all PF VPORTs */ + for (i = 0, vport_id = start_vport; i < num_vports; i++, vport_id++) { + u32 inc_val = QM_RL_INC_VAL(vport_params[i].vport_rl); + if (inc_val > QM_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, + "Invalid VPORT rate-limit configuration"); + return -1; + } + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLCRD_RT_OFFSET + vport_id, + QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, + QM_REG_RLGLBLUPPERBOUND_RT_OFFSET + vport_id, + QM_RL_UPPER_BOUND | QM_RL_CRD_REG_SIGN_BIT); + STORE_RT_REG(p_hwfn, QM_REG_RLGLBLINCVAL_RT_OFFSET + vport_id, + inc_val); + } + return 0; +} + +static bool ecore_poll_on_qm_cmd_ready(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 reg_val, i; + for (i = 0, reg_val = 0; i < QM_STOP_CMD_MAX_POLL_COUNT && reg_val == 0; + i++) { + OSAL_UDELAY(QM_STOP_CMD_POLL_PERIOD_US); + reg_val = ecore_rd(p_hwfn, p_ptt, QM_REG_SDMCMDREADY); + } + /* check if timeout while waiting for SDM command ready */ + if (i == QM_STOP_CMD_MAX_POLL_COUNT) { + DP_VERBOSE(p_hwfn, ECORE_MSG_HW, + "Timeout waiting for QM SDM cmd ready signal\n"); + return false; + } + return true; +} + +static bool ecore_send_qm_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd_addr, u32 cmd_data_lsb, u32 cmd_data_msb) +{ + if (!ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt)) + return false; + ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDADDR, cmd_addr); + ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATALSB, cmd_data_lsb); + ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDDATAMSB, cmd_data_msb); + ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 1); + ecore_wr(p_hwfn, p_ptt, QM_REG_SDMCMDGO, 0); + return ecore_poll_on_qm_cmd_ready(p_hwfn, p_ptt); +} + +/******************** INTERFACE IMPLEMENTATION *********************/ +u32 ecore_qm_pf_mem_size(u8 pf_id, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs) +{ + return QM_PQ_MEM_4KB(num_pf_cids) * num_pf_pqs + + QM_PQ_MEM_4KB(num_vf_cids) * num_vf_pqs + + QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; +} + +int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + bool pf_rl_en, + bool pf_wfq_en, + bool vport_rl_en, + bool vport_wfq_en, + struct init_qm_port_params + port_params[MAX_NUM_PORTS]) +{ + u8 port_id; + /* init AFullOprtnstcCrdMask */ + u32 mask = + (QM_OPPOR_LINE_VOQ_DEF << QM_RF_OPPORTUNISTIC_MASK_LINEVOQ_SHIFT) | + (QM_BYTE_CRD_EN << QM_RF_OPPORTUNISTIC_MASK_BYTEVOQ_SHIFT) | + (pf_wfq_en << QM_RF_OPPORTUNISTIC_MASK_PFWFQ_SHIFT) | + (vport_wfq_en << QM_RF_OPPORTUNISTIC_MASK_VPWFQ_SHIFT) | + (pf_rl_en << QM_RF_OPPORTUNISTIC_MASK_PFRL_SHIFT) | + (vport_rl_en << QM_RF_OPPORTUNISTIC_MASK_VPQCNRL_SHIFT) | + (QM_OPPOR_FW_STOP_DEF << QM_RF_OPPORTUNISTIC_MASK_FWPAUSE_SHIFT) | + (QM_OPPOR_PQ_EMPTY_DEF << + QM_RF_OPPORTUNISTIC_MASK_QUEUEEMPTY_SHIFT); + STORE_RT_REG(p_hwfn, QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET, mask); + /* check eagle workaround */ + for (port_id = 0; port_id < max_ports_per_engine; port_id++) { + if (port_params[port_id].active && + port_params[port_id].num_active_phys_tcs > + EAGLE_WORKAROUND_TC && + ENABLE_EAGLE_ENG1_WORKAROUND(p_hwfn)) { + DP_NOTICE(p_hwfn, true, + "Can't config 8 TCs with Eagle" + " eng1 workaround"); + return -1; + } + } + /* enable/disable PF RL */ + ecore_enable_pf_rl(p_hwfn, pf_rl_en); + /* enable/disable PF WFQ */ + ecore_enable_pf_wfq(p_hwfn, pf_wfq_en); + /* enable/disable VPORT RL */ + ecore_enable_vport_rl(p_hwfn, vport_rl_en); + /* enable/disable VPORT WFQ */ + ecore_enable_vport_wfq(p_hwfn, vport_wfq_en); + /* init PBF CMDQ line credit */ + ecore_cmdq_lines_rt_init(p_hwfn, max_ports_per_engine, + max_phys_tcs_per_port, port_params); + /* init BTB blocks in PBF */ + ecore_btb_blocks_rt_init(p_hwfn, max_ports_per_engine, + max_phys_tcs_per_port, port_params); + return 0; +} + +int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 port_id, + u8 pf_id, + u8 max_phys_tcs_per_port, + bool is_first_pf, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, + u16 start_pq, + u16 num_pf_pqs, + u16 num_vf_pqs, + u8 start_vport, + u8 num_vports, + u16 pf_wfq, + u32 pf_rl, + struct init_qm_pq_params *pq_params, + struct init_qm_vport_params *vport_params) +{ + u8 tc, i; + u32 other_mem_size_4kb = + QM_PQ_MEM_4KB(num_pf_cids + num_tids) * QM_OTHER_PQS_PER_PF; + /* clear first Tx PQ ID array for each VPORT */ + for (i = 0; i < num_vports; i++) + for (tc = 0; tc < NUM_OF_TCS; tc++) + vport_params[i].first_tx_pq_id[tc] = QM_INVALID_PQ_ID; + /* map Other PQs (if any) */ +#if QM_OTHER_PQS_PER_PF > 0 + ecore_other_pq_map_rt_init(p_hwfn, port_id, pf_id, num_pf_cids, + num_tids, 0); +#endif + /* map Tx PQs */ + ecore_tx_pq_map_rt_init(p_hwfn, p_ptt, port_id, pf_id, + max_phys_tcs_per_port, is_first_pf, num_pf_cids, + num_vf_cids, start_pq, num_pf_pqs, num_vf_pqs, + start_vport, other_mem_size_4kb, pq_params, + vport_params); + /* init PF WFQ */ + if (pf_wfq) + if (ecore_pf_wfq_rt_init + (p_hwfn, port_id, pf_id, pf_wfq, max_phys_tcs_per_port, + num_pf_pqs + num_vf_pqs, pq_params) != 0) + return -1; + /* init PF RL */ + if (ecore_pf_rl_rt_init(p_hwfn, pf_id, pf_rl) != 0) + return -1; + /* set VPORT WFQ */ + if (ecore_vp_wfq_rt_init(p_hwfn, num_vports, vport_params) != 0) + return -1; + /* set VPORT RL */ + if (ecore_vport_rl_rt_init + (p_hwfn, start_vport, num_vports, vport_params) != 0) + return -1; + return 0; +} + +int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq) +{ + u32 inc_val = QM_WFQ_INC_VAL(pf_wfq); + if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, "Invalid PF WFQ weight configuration"); + return -1; + } + ecore_wr(p_hwfn, p_ptt, QM_REG_WFQPFWEIGHT + pf_id * 4, inc_val); + return 0; +} + +int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl) +{ + u32 inc_val = QM_RL_INC_VAL(pf_rl); + if (inc_val > QM_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, "Invalid PF rate limit configuration"); + return -1; + } + ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFCRD + pf_id * 4, + QM_RL_CRD_REG_SIGN_BIT); + ecore_wr(p_hwfn, p_ptt, QM_REG_RLPFINCVAL + pf_id * 4, inc_val); + return 0; +} + +int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq) +{ + u8 tc; + u32 inc_val = QM_WFQ_INC_VAL(vport_wfq); + if (inc_val == 0 || inc_val > QM_WFQ_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, + "Invalid VPORT WFQ weight configuration"); + return -1; + } + for (tc = 0; tc < NUM_OF_TCS; tc++) { + u16 vport_pq_id = first_tx_pq_id[tc]; + if (vport_pq_id != QM_INVALID_PQ_ID) { + ecore_wr(p_hwfn, p_ptt, + QM_REG_WFQVPWEIGHT + vport_pq_id * 4, inc_val); + } + } + return 0; +} + +int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl) +{ + u32 inc_val = QM_RL_INC_VAL(vport_rl); + if (inc_val > QM_RL_MAX_INC_VAL) { + DP_NOTICE(p_hwfn, true, + "Invalid VPORT rate-limit configuration"); + return -1; + } + ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLCRD + vport_id * 4, + QM_RL_CRD_REG_SIGN_BIT); + ecore_wr(p_hwfn, p_ptt, QM_REG_RLGLBLINCVAL + vport_id * 4, inc_val); + return 0; +} + +bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, u16 start_pq, u16 num_pqs) +{ + u32 cmd_arr[QM_CMD_STRUCT_SIZE(QM_STOP_CMD)] = { 0 }; + u32 pq_mask = 0, last_pq = start_pq + num_pqs - 1, pq_id; + /* set command's PQ type */ + QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PQ_TYPE, is_tx_pq ? 0 : 1); + /* go over requested PQs */ + for (pq_id = start_pq; pq_id <= last_pq; pq_id++) { + /* set PQ bit in mask (stop command only) */ + if (!is_release_cmd) + pq_mask |= (1 << (pq_id % QM_STOP_PQ_MASK_WIDTH)); + /* if last PQ or end of PQ mask, write command */ + if ((pq_id == last_pq) || + (pq_id % QM_STOP_PQ_MASK_WIDTH == + (QM_STOP_PQ_MASK_WIDTH - 1))) { + QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, PAUSE_MASK, + pq_mask); + QM_CMD_SET_FIELD(cmd_arr, QM_STOP_CMD, GROUP_ID, + pq_id / QM_STOP_PQ_MASK_WIDTH); + if (!ecore_send_qm_cmd + (p_hwfn, p_ptt, QM_STOP_CMD_ADDR, cmd_arr[0], + cmd_arr[1])) + return false; + pq_mask = 0; + } + } + return true; +} + +/* NIG: ETS configuration constants */ +#define NIG_TX_ETS_CLIENT_OFFSET 4 +#define NIG_LB_ETS_CLIENT_OFFSET 1 +#define NIG_ETS_MIN_WFQ_BYTES 1600 +/* NIG: ETS constants */ +#define NIG_ETS_UP_BOUND(weight, mtu) \ +(2 * ((weight) > (mtu) ? (weight) : (mtu))) +/* NIG: RL constants */ +#define NIG_RL_BASE_TYPE 1 /* byte base type */ +#define NIG_RL_PERIOD 1 /* in us */ +#define NIG_RL_PERIOD_CLK_25M (25 * NIG_RL_PERIOD) +#define NIG_RL_INC_VAL(rate) (((rate) * NIG_RL_PERIOD) / 8) +#define NIG_RL_MAX_VAL(inc_val, mtu) \ +(2 * ((inc_val) > (mtu) ? (inc_val) : (mtu))) +/* NIG: packet prioritry configuration constants */ +#define NIG_PRIORITY_MAP_TC_BITS 4 +void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_ets_req *req, bool is_lb) +{ + u8 tc, sp_tc_map = 0, wfq_tc_map = 0; + u8 num_tc = is_lb ? NUM_OF_TCS : NUM_OF_PHYS_TCS; + u8 tc_client_offset = + is_lb ? NIG_LB_ETS_CLIENT_OFFSET : NIG_TX_ETS_CLIENT_OFFSET; + u32 min_weight = 0xffffffff; + u32 tc_weight_base_addr = + is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : + NIG_REG_TX_ARB_CREDIT_WEIGHT_0; + u32 tc_weight_addr_diff = + is_lb ? NIG_REG_LB_ARB_CREDIT_WEIGHT_1 - + NIG_REG_LB_ARB_CREDIT_WEIGHT_0 : NIG_REG_TX_ARB_CREDIT_WEIGHT_1 - + NIG_REG_TX_ARB_CREDIT_WEIGHT_0; + u32 tc_bound_base_addr = + is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : + NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0; + u32 tc_bound_addr_diff = + is_lb ? NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 - + NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 : + NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 - + NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0; + for (tc = 0; tc < num_tc; tc++) { + struct init_ets_tc_req *tc_req = &req->tc_req[tc]; + /* update SP map */ + if (tc_req->use_sp) + sp_tc_map |= (1 << tc); + if (tc_req->use_wfq) { + /* update WFQ map */ + wfq_tc_map |= (1 << tc); + /* find minimal weight */ + if (tc_req->weight < min_weight) + min_weight = tc_req->weight; + } + } + /* write SP map */ + ecore_wr(p_hwfn, p_ptt, + is_lb ? NIG_REG_LB_ARB_CLIENT_IS_STRICT : + NIG_REG_TX_ARB_CLIENT_IS_STRICT, + (sp_tc_map << tc_client_offset)); + /* write WFQ map */ + ecore_wr(p_hwfn, p_ptt, + is_lb ? NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ : + NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ, + (wfq_tc_map << tc_client_offset)); + /* write WFQ weights */ + for (tc = 0; tc < num_tc; tc++, tc_client_offset++) { + struct init_ets_tc_req *tc_req = &req->tc_req[tc]; + if (tc_req->use_wfq) { + /* translate weight to bytes */ + u32 byte_weight = + (NIG_ETS_MIN_WFQ_BYTES * tc_req->weight) / + min_weight; + /* write WFQ weight */ + ecore_wr(p_hwfn, p_ptt, + tc_weight_base_addr + + tc_weight_addr_diff * tc_client_offset, + byte_weight); + /* write WFQ upper bound */ + ecore_wr(p_hwfn, p_ptt, + tc_bound_base_addr + + tc_bound_addr_diff * tc_client_offset, + NIG_ETS_UP_BOUND(byte_weight, req->mtu)); + } + } +} + +void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_nig_lb_rl_req *req) +{ + u8 tc; + u32 ctrl, inc_val, reg_offset; + /* disable global MAC+LB RL */ + ctrl = + NIG_RL_BASE_TYPE << + NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT; + ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl); + /* configure and enable global MAC+LB RL */ + if (req->lb_mac_rate) { + /* configure */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD, + NIG_RL_PERIOD_CLK_25M); + inc_val = NIG_RL_INC_VAL(req->lb_mac_rate); + ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE, + inc_val); + ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE, + NIG_RL_MAX_VAL(inc_val, req->mtu)); + /* enable */ + ctrl |= + 1 << + NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT; + ecore_wr(p_hwfn, p_ptt, NIG_REG_TX_LB_GLBRATELIMIT_CTRL, ctrl); + } + /* disable global LB-only RL */ + ctrl = + NIG_RL_BASE_TYPE << + NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT; + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl); + /* configure and enable global LB-only RL */ + if (req->lb_rate) { + /* configure */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_PERIOD, + NIG_RL_PERIOD_CLK_25M); + inc_val = NIG_RL_INC_VAL(req->lb_rate); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_INC_VALUE, + inc_val); + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_MAX_VALUE, + NIG_RL_MAX_VAL(inc_val, req->mtu)); + /* enable */ + ctrl |= + 1 << NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT; + ecore_wr(p_hwfn, p_ptt, NIG_REG_LB_BRBRATELIMIT_CTRL, ctrl); + } + /* per-TC RLs */ + for (tc = 0, reg_offset = 0; tc < NUM_OF_PHYS_TCS; + tc++, reg_offset += 4) { + /* disable TC RL */ + ctrl = + NIG_RL_BASE_TYPE << + NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT; + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, ctrl); + /* configure and enable TC RL */ + if (req->tc_rate[tc]) { + /* configure */ + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 + + reg_offset, NIG_RL_PERIOD_CLK_25M); + inc_val = NIG_RL_INC_VAL(req->tc_rate[tc]); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 + + reg_offset, inc_val); + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 + + reg_offset, NIG_RL_MAX_VAL(inc_val, req->mtu)); + /* enable */ + ctrl |= + 1 << + NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT; + ecore_wr(p_hwfn, p_ptt, + NIG_REG_LB_TCRATELIMIT_CTRL_0 + reg_offset, + ctrl); + } + } +} + +void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_nig_pri_tc_map_req *req) +{ + u8 pri, tc; + u32 pri_tc_mask = 0; + u8 tc_pri_mask[NUM_OF_PHYS_TCS] = { 0 }; + for (pri = 0; pri < NUM_OF_VLAN_PRIORITIES; pri++) { + if (req->pri[pri].valid) { + pri_tc_mask |= + (req->pri[pri]. + tc_id << (pri * NIG_PRIORITY_MAP_TC_BITS)); + tc_pri_mask[req->pri[pri].tc_id] |= (1 << pri); + } + } + /* write priority -> TC mask */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_PKT_PRIORITY_TO_TC, pri_tc_mask); + /* write TC -> priority mask */ + for (tc = 0; tc < NUM_OF_PHYS_TCS; tc++) { + ecore_wr(p_hwfn, p_ptt, NIG_REG_PRIORITY_FOR_TC_0 + tc * 4, + tc_pri_mask[tc]); + ecore_wr(p_hwfn, p_ptt, NIG_REG_RX_TC0_PRIORITY_MASK + tc * 4, + tc_pri_mask[tc]); + } +} + +/* PRS: ETS configuration constants */ +#define PRS_ETS_MIN_WFQ_BYTES 1600 +#define PRS_ETS_UP_BOUND(weight, mtu) \ +(2 * ((weight) > (mtu) ? (weight) : (mtu))) +void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct init_ets_req *req) +{ + u8 tc, sp_tc_map = 0, wfq_tc_map = 0; + u32 min_weight = 0xffffffff; + u32 tc_weight_addr_diff = + PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 - PRS_REG_ETS_ARB_CREDIT_WEIGHT_0; + u32 tc_bound_addr_diff = + PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 - + PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0; + for (tc = 0; tc < NUM_OF_TCS; tc++) { + struct init_ets_tc_req *tc_req = &req->tc_req[tc]; + /* update SP map */ + if (tc_req->use_sp) + sp_tc_map |= (1 << tc); + if (tc_req->use_wfq) { + /* update WFQ map */ + wfq_tc_map |= (1 << tc); + /* find minimal weight */ + if (tc_req->weight < min_weight) + min_weight = tc_req->weight; + } + } + /* write SP map */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_STRICT, sp_tc_map); + /* write WFQ map */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ, + wfq_tc_map); + /* write WFQ weights */ + for (tc = 0; tc < NUM_OF_TCS; tc++) { + struct init_ets_tc_req *tc_req = &req->tc_req[tc]; + if (tc_req->use_wfq) { + /* translate weight to bytes */ + u32 byte_weight = + (PRS_ETS_MIN_WFQ_BYTES * tc_req->weight) / + min_weight; + /* write WFQ weight */ + ecore_wr(p_hwfn, p_ptt, + PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 + + tc * tc_weight_addr_diff, byte_weight); + /* write WFQ upper bound */ + ecore_wr(p_hwfn, p_ptt, + PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 + + tc * tc_bound_addr_diff, + PRS_ETS_UP_BOUND(byte_weight, req->mtu)); + } + } +} + +/* BRB: RAM configuration constants */ +#define BRB_TOTAL_RAM_BLOCKS_BB 4800 +#define BRB_TOTAL_RAM_BLOCKS_K2 5632 +#define BRB_BLOCK_SIZE 128 /* in bytes */ +#define BRB_MIN_BLOCKS_PER_TC 9 +#define BRB_HYST_BYTES 10240 +#define BRB_HYST_BLOCKS (BRB_HYST_BYTES / BRB_BLOCK_SIZE) +/* + * temporary big RAM allocation - should be updated + */ +void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct init_brb_ram_req *req) +{ + u8 port, active_ports = 0; + u32 active_port_blocks, reg_offset = 0; + u32 tc_headroom_blocks = + (u32)DIV_ROUND_UP(req->headroom_per_tc, BRB_BLOCK_SIZE); + u32 min_pkt_size_blocks = + (u32)DIV_ROUND_UP(req->min_pkt_size, BRB_BLOCK_SIZE); + u32 total_blocks = + ECORE_IS_K2(p_hwfn-> + p_dev) ? BRB_TOTAL_RAM_BLOCKS_K2 : + BRB_TOTAL_RAM_BLOCKS_BB; + /* find number of active ports */ + for (port = 0; port < MAX_NUM_PORTS; port++) + if (req->num_active_tcs[port]) + active_ports++; + active_port_blocks = (u32)(total_blocks / active_ports); + for (port = 0; port < req->max_ports_per_engine; port++) { + /* calculate per-port sizes */ + u32 tc_guaranteed_blocks = + (u32)DIV_ROUND_UP(req->guranteed_per_tc, BRB_BLOCK_SIZE); + u32 port_blocks = + req->num_active_tcs[port] ? active_port_blocks : 0; + u32 port_guaranteed_blocks = + req->num_active_tcs[port] * tc_guaranteed_blocks; + u32 port_shared_blocks = port_blocks - port_guaranteed_blocks; + u32 full_xoff_th = + req->num_active_tcs[port] * BRB_MIN_BLOCKS_PER_TC; + u32 full_xon_th = full_xoff_th + min_pkt_size_blocks; + u32 pause_xoff_th = tc_headroom_blocks; + u32 pause_xon_th = pause_xoff_th + min_pkt_size_blocks; + u8 tc; + /* init total size per port */ + ecore_wr(p_hwfn, p_ptt, BRB_REG_TOTAL_MAC_SIZE + port * 4, + port_blocks); + /* init shared size per port */ + ecore_wr(p_hwfn, p_ptt, BRB_REG_SHARED_HR_AREA + port * 4, + port_shared_blocks); + for (tc = 0; tc < NUM_OF_TCS; tc++, reg_offset += 4) { + /* clear init values for non-active TCs */ + if (tc == req->num_active_tcs[port]) { + tc_guaranteed_blocks = 0; + full_xoff_th = 0; + full_xon_th = 0; + pause_xoff_th = 0; + pause_xon_th = 0; + } + /* init guaranteed size per TC */ + ecore_wr(p_hwfn, p_ptt, + BRB_REG_TC_GUARANTIED_0 + reg_offset, + tc_guaranteed_blocks); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_MAIN_TC_GUARANTIED_HYST_0 + reg_offset, + BRB_HYST_BLOCKS); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 + + reg_offset, full_xoff_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 + + reg_offset, full_xon_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 + + reg_offset, pause_xoff_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 + + reg_offset, pause_xon_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 + + reg_offset, full_xoff_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 + + reg_offset, full_xon_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 + + reg_offset, pause_xoff_th); + ecore_wr(p_hwfn, p_ptt, + BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 + + reg_offset, pause_xon_th); + } + } +} + +/*In MF should be called once per engine to set EtherType of OuterTag*/ +void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 eth_type) +{ + /* update PRS register */ + STORE_RT_REG(p_hwfn, PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type); + /* update NIG register */ + STORE_RT_REG(p_hwfn, NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type); + /* update PBF register */ + STORE_RT_REG(p_hwfn, PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET, eth_type); +} + +/*In MF should be called once per port to set EtherType of OuterTag*/ +void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 eth_type) +{ + /* update DORQ register */ + STORE_RT_REG(p_hwfn, DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET, eth_type); +} + +#define SET_TUNNEL_TYPE_ENABLE_BIT(var, offset, enable) \ +(var = ((var) & ~(1 << (offset))) | ((enable) ? (1 << (offset)) : 0)) +#define PRS_ETH_TUNN_FIC_FORMAT -188897008 +void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 dest_port) +{ + /* update PRS register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_VXLAN_PORT, dest_port); + /* update NIG register */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_VXLAN_PORT, dest_port); + /* update PBF register */ + ecore_wr(p_hwfn, p_ptt, PBF_REG_VXLAN_PORT, dest_port); +} + +void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, bool vxlan_enable) +{ + u32 reg_val; + /* update PRS register */ + reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_VXLAN_ENABLE_SHIFT, + vxlan_enable); + ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) { + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + PRS_ETH_TUNN_FIC_FORMAT); + } + /* update NIG register */ + reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT, + vxlan_enable); + ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); + /* update DORQ register */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN, + vxlan_enable ? 1 : 0); +} + +void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool eth_gre_enable, bool ip_gre_enable) +{ + u32 reg_val; + /* update PRS register */ + reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GRE_ENABLE_SHIFT, + eth_gre_enable); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GRE_ENABLE_SHIFT, + ip_gre_enable); + ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) { + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + PRS_ETH_TUNN_FIC_FORMAT); + } + /* update NIG register */ + reg_val = ecore_rd(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT, + eth_gre_enable); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT, + ip_gre_enable); + ecore_wr(p_hwfn, p_ptt, NIG_REG_ENC_TYPE_ENABLE, reg_val); + /* update DORQ registers */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN, + eth_gre_enable ? 1 : 0); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN, + ip_gre_enable ? 1 : 0); +} + +void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 dest_port) +{ + /* geneve tunnel not supported in BB_A0 */ + if (ECORE_IS_BB_A0(p_hwfn->p_dev)) + return; + /* update PRS register */ + ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_PORT, dest_port); + /* update NIG register */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_PORT, dest_port); + /* update PBF register */ + ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_PORT, dest_port); +} + +void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool eth_geneve_enable, bool ip_geneve_enable) +{ + u32 reg_val; + /* geneve tunnel not supported in BB_A0 */ + if (ECORE_IS_BB_A0(p_hwfn->p_dev)) + return; + /* update PRS register */ + reg_val = ecore_rd(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_ETH_OVER_GENEVE_ENABLE_SHIFT, + eth_geneve_enable); + SET_TUNNEL_TYPE_ENABLE_BIT(reg_val, + PRS_REG_ENCAPSULATION_TYPE_EN_IP_OVER_GENEVE_ENABLE_SHIFT, + ip_geneve_enable); + ecore_wr(p_hwfn, p_ptt, PRS_REG_ENCAPSULATION_TYPE_EN, reg_val); + if (reg_val) { + ecore_wr(p_hwfn, p_ptt, PRS_REG_OUTPUT_FORMAT_4_0, + PRS_ETH_TUNN_FIC_FORMAT); + } + /* update NIG register */ + ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_ETH_ENABLE, + eth_geneve_enable ? 1 : 0); + ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_IP_ENABLE, + ip_geneve_enable ? 1 : 0); + /* comp ver */ + reg_val = (ip_geneve_enable || eth_geneve_enable) ? 1 : 0; + ecore_wr(p_hwfn, p_ptt, NIG_REG_NGE_COMP_VER, reg_val); + ecore_wr(p_hwfn, p_ptt, PBF_REG_NGE_COMP_VER, reg_val); + ecore_wr(p_hwfn, p_ptt, PRS_REG_NGE_COMP_VER, reg_val); + /* EDPM with geneve tunnel not supported in BB_B0 */ + if (ECORE_IS_BB_B0(p_hwfn->p_dev)) + return; + /* update DORQ registers */ + ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN, + eth_geneve_enable ? 1 : 0); + ecore_wr(p_hwfn, p_ptt, DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN, + ip_geneve_enable ? 1 : 0); +} diff --git a/drivers/net/qede/base/ecore_init_fw_funcs.h b/drivers/net/qede/base/ecore_init_fw_funcs.h new file mode 100644 index 00000000..5280cd7a --- /dev/null +++ b/drivers/net/qede/base/ecore_init_fw_funcs.h @@ -0,0 +1,263 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef _INIT_FW_FUNCS_H +#define _INIT_FW_FUNCS_H +/* forward declarations */ +struct init_qm_pq_params; +/** + * @brief ecore_qm_pf_mem_size - prepare QM ILT sizes + * + * Returns the required host memory size in 4KB units. + * Must be called before all QM init HSI functions. + * + * @param pf_id - physical function ID + * @param num_pf_cids - number of connections used by this PF + * @param num_vf_cids - number of connections used by VFs of this PF + * @param num_tids - number of tasks used by this PF + * @param num_pf_pqs - number of PQs used by this PF + * @param num_vf_pqs - number of PQs used by VFs of this PF + * + * @return The required host memory size in 4KB units. + */ +u32 ecore_qm_pf_mem_size(u8 pf_id, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, u16 num_pf_pqs, u16 num_vf_pqs); +/** + * @brief ecore_qm_common_rt_init - + * Prepare QM runtime init values for the engine phase + * + * @param p_hwfn + * @param max_ports_per_engine - max number of ports per engine in HW + * @param max_phys_tcs_per_port - max number of physical TCs per port in HW + * @param pf_rl_en - enable per-PF rate limiters + * @param pf_wfq_en - enable per-PF WFQ + * @param vport_rl_en - enable per-VPORT rate limiters + * @param vport_wfq_en - enable per-VPORT WFQ + * @param port_params- array of size MAX_NUM_PORTS with parameters for each port + * + * @return 0 on success, -1 on error. + */ +int ecore_qm_common_rt_init(struct ecore_hwfn *p_hwfn, + u8 max_ports_per_engine, + u8 max_phys_tcs_per_port, + bool pf_rl_en, + bool pf_wfq_en, + bool vport_rl_en, + bool vport_wfq_en, + struct init_qm_port_params + port_params[MAX_NUM_PORTS]); + +int ecore_qm_pf_rt_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 port_id, + u8 pf_id, + u8 max_phys_tcs_per_port, + bool is_first_pf, + u32 num_pf_cids, + u32 num_vf_cids, + u32 num_tids, + u16 start_pq, + u16 num_pf_pqs, + u16 num_vf_pqs, + u8 start_vport, + u8 num_vports, + u16 pf_wfq, + u32 pf_rl, + struct init_qm_pq_params *pq_params, + struct init_qm_vport_params *vport_params); +/** + * @brief ecore_init_pf_wfq Initializes the WFQ weight of the specified PF + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param pf_id - PF ID + * @param pf_wfq - WFQ weight. Must be non-zero. + * + * @return 0 on success, -1 on error. + */ +int ecore_init_pf_wfq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 pf_id, u16 pf_wfq); +/** + * @brief ecore_init_pf_rl Initializes the rate limit of the specified PF + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param pf_id - PF ID + * @param pf_rl - rate limit in Mb/sec units + * + * @return 0 on success, -1 on error. + */ +int ecore_init_pf_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 pf_id, u32 pf_rl); +/** + * @brief ecore_init_vport_wfq Initializes the WFQ weight of the specified VPORT + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param first_tx_pq_id- An array containing the first Tx PQ ID associated + * with the VPORT for each TC. This array is filled by + * ecore_qm_pf_rt_init + * @param vport_wfq - WFQ weight. Must be non-zero. + * + * @return 0 on success, -1 on error. + */ +int ecore_init_vport_wfq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 first_tx_pq_id[NUM_OF_TCS], u16 vport_wfq); +/** + * @brief ecore_init_vport_rl Initializes the rate limit of the specified VPORT + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param vport_id - VPORT ID + * @param vport_rl - rate limit in Mb/sec units + * + * @return 0 on success, -1 on error. + */ +int ecore_init_vport_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 vport_id, u32 vport_rl); +/** + * @brief ecore_send_qm_stop_cmd Sends a stop command to the QM + * + * @param p_hwfn + * @param p_ptt - ptt window used for writing the registers + * @param is_release_cmd - true for release, false for stop. + * @param is_tx_pq - true for Tx PQs, false for Other PQs. + * @param start_pq - first PQ ID to stop + * @param num_pqs - Number of PQs to stop, starting from start_pq. + * + * @return bool, true if successful, false if timeout occurred while + * waiting for QM command done. + */ +bool ecore_send_qm_stop_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool is_release_cmd, + bool is_tx_pq, u16 start_pq, u16 num_pqs); +/** + * @brief ecore_init_nig_ets - initializes the NIG ETS arbiter + * + * Based on weight/priority requirements per-TC. + * + * @param p_ptt - ptt window used for writing the registers. + * @param req - the NIG ETS initialization requirements. + * @param is_lb - if set, the loopback port arbiter is initialized, otherwise + * the physical port arbiter is initialized. The pure-LB TC + * requirements are ignored when is_lb is cleared. + */ +void ecore_init_nig_ets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_ets_req *req, bool is_lb); +/** + * @brief ecore_init_nig_lb_rl - initializes the NIG LB RLs + * + * Based on global and per-TC rate requirements + * + * @param p_ptt - ptt window used for writing the registers. + * @param req - the NIG LB RLs initialization requirements. + */ +void ecore_init_nig_lb_rl(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_nig_lb_rl_req *req); +/** + * @brief ecore_init_nig_pri_tc_map - initializes the NIG priority to TC map. + * + * Assumes valid arguments. + * + * @param p_ptt - ptt window used for writing the registers. + * @param req - required mapping from prioirties to TCs. + */ +void ecore_init_nig_pri_tc_map(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_nig_pri_tc_map_req *req); +/** + * @brief ecore_init_prs_ets - initializes the PRS Rx ETS arbiter + * + * Based on weight/priority requirements per-TC. + * + * @param p_ptt - ptt window used for writing the registers. + * @param req - the PRS ETS initialization requirements. + */ +void ecore_init_prs_ets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct init_ets_req *req); +/** + * @brief ecore_init_brb_ram - initializes BRB RAM sizes per TC + * + * Based on weight/priority requirements per-TC. + * + * @param p_ptt - ptt window used for writing the registers. + * @param req - the BRB RAM initialization requirements. + */ +void ecore_init_brb_ram(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct init_brb_ram_req *req); +/** + * @brief ecore_set_engine_mf_ovlan_eth_type - initializes Nig,Prs,Pbf + * and llh ethType Regs to input ethType + * should Be called once per engine if engine is in BD mode. + * + * @param p_ptt - ptt window used for writing the registers. + * @param ethType - etherType to configure + */ +void ecore_set_engine_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 eth_type); +/** + * @brief ecore_set_port_mf_ovlan_eth_type - initializes DORQ ethType Regs + * to input ethType + * should Be called once per port. + * + * @param p_ptt - ptt window used for writing the registers. + * @param ethType - etherType to configure + */ +void ecore_set_port_mf_ovlan_eth_type(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 eth_type); +/** + * @brief ecore_set_vxlan_dest_port - init vxlan tunnel destination udp port + * + * @param p_ptt - ptt window used for writing the registers. + * @param dest_port - vxlan destination udp port. + */ +void ecore_set_vxlan_dest_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 dest_port); +/** + * @brief ecore_set_vxlan_enable - enable or disable VXLAN tunnel in HW + * + * @param p_ptt - ptt window used for writing the registers. + * @param vxlan_enable - vxlan enable flag. + */ +void ecore_set_vxlan_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, bool vxlan_enable); +/** + * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW + * + * @param p_ptt - ptt window used for writing the registers. + * @param eth_gre_enable - eth GRE enable enable flag. + * @param ip_gre_enable - IP GRE enable enable flag. + */ +void ecore_set_gre_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool eth_gre_enable, bool ip_gre_enable); +/** + * @brief ecore_set_geneve_dest_port - init geneve tunnel destination udp port + * + * @param p_ptt - ptt window used for writing the registers. + * @param dest_port - geneve destination udp port. + */ +void ecore_set_geneve_dest_port(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 dest_port); +/** + * @brief ecore_set_gre_enable - enable or disable GRE tunnel in HW + * + * @param p_ptt - ptt window used for writing the registers. + * @param eth_geneve_enable - eth GENEVE enable enable flag. + * @param ip_geneve_enable - IP GENEVE enable enable flag. + */ +void ecore_set_geneve_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool eth_geneve_enable, bool ip_geneve_enable); +#endif diff --git a/drivers/net/qede/base/ecore_init_ops.c b/drivers/net/qede/base/ecore_init_ops.c new file mode 100644 index 00000000..326eb926 --- /dev/null +++ b/drivers/net/qede/base/ecore_init_ops.c @@ -0,0 +1,599 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +/* include the precompiled configuration values - only once */ +#include "bcm_osal.h" +#include "ecore_hsi_common.h" +#include "ecore.h" +#include "ecore_hw.h" +#include "ecore_status.h" +#include "ecore_rt_defs.h" +#include "ecore_init_fw_funcs.h" + +#include "ecore_iro_values.h" +#include "ecore_sriov.h" +#include "ecore_gtt_values.h" +#include "reg_addr.h" +#include "ecore_init_ops.h" + +#define ECORE_INIT_MAX_POLL_COUNT 100 +#define ECORE_INIT_POLL_PERIOD_US 500 + +void ecore_init_iro_array(struct ecore_dev *p_dev) +{ + p_dev->iro_arr = iro_arr; +} + +/* Runtime configuration helpers */ +void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn) +{ + int i; + + for (i = 0; i < RUNTIME_ARRAY_SIZE; i++) + p_hwfn->rt_data.b_valid[i] = false; +} + +void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val) +{ + p_hwfn->rt_data.init_val[rt_offset] = val; + p_hwfn->rt_data.b_valid[rt_offset] = true; +} + +void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn, + u32 rt_offset, u32 *p_val, osal_size_t size) +{ + osal_size_t i; + + for (i = 0; i < size / sizeof(u32); i++) { + p_hwfn->rt_data.init_val[rt_offset + i] = p_val[i]; + p_hwfn->rt_data.b_valid[rt_offset + i] = true; + } +} + +static enum _ecore_status_t ecore_init_rt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, + u16 rt_offset, + u16 size, bool b_must_dmae) +{ + u32 *p_init_val = &p_hwfn->rt_data.init_val[rt_offset]; + bool *p_valid = &p_hwfn->rt_data.b_valid[rt_offset]; + enum _ecore_status_t rc = ECORE_SUCCESS; + u16 i, segment; + + /* Since not all RT entries are initialized, go over the RT and + * for each segment of initialized values use DMA. + */ + for (i = 0; i < size; i++) { + if (!p_valid[i]) + continue; + + /* In case there isn't any wide-bus configuration here, + * simply write the data instead of using dmae. + */ + if (!b_must_dmae) { + ecore_wr(p_hwfn, p_ptt, addr + (i << 2), p_init_val[i]); + continue; + } + + /* Start of a new segment */ + for (segment = 1; i + segment < size; segment++) + if (!p_valid[i + segment]) + break; + + rc = ecore_dmae_host2grc(p_hwfn, p_ptt, + (osal_uintptr_t)(p_init_val + i), + addr + (i << 2), segment, 0); + if (rc != ECORE_SUCCESS) + return rc; + + /* Jump over the entire segment, including invalid entry */ + i += segment; + } + + return rc; +} + +enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_rt_data *rt_data = &p_hwfn->rt_data; + + if (IS_VF(p_hwfn->p_dev)) + return ECORE_SUCCESS; + + rt_data->b_valid = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(bool) * RUNTIME_ARRAY_SIZE); + if (!rt_data->b_valid) + return ECORE_NOMEM; + + rt_data->init_val = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(u32) * RUNTIME_ARRAY_SIZE); + if (!rt_data->init_val) { + OSAL_FREE(p_hwfn->p_dev, rt_data->b_valid); + return ECORE_NOMEM; + } + + return ECORE_SUCCESS; +} + +void ecore_init_free(struct ecore_hwfn *p_hwfn) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.init_val); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->rt_data.b_valid); +} + +static enum _ecore_status_t ecore_init_array_dmae(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, + u32 dmae_data_offset, + u32 size, const u32 *p_buf, + bool b_must_dmae, + bool b_can_dmae) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Perform DMAE only for lengthy enough sections or for wide-bus */ +#ifndef ASIC_ONLY + if ((CHIP_REV_IS_SLOW(p_hwfn->p_dev) && (size < 16)) || + !b_can_dmae || (!b_must_dmae && (size < 16))) { +#else + if (!b_can_dmae || (!b_must_dmae && (size < 16))) { +#endif + const u32 *data = p_buf + dmae_data_offset; + u32 i; + + for (i = 0; i < size; i++) + ecore_wr(p_hwfn, p_ptt, addr + (i << 2), data[i]); + } else { + rc = ecore_dmae_host2grc(p_hwfn, p_ptt, + (osal_uintptr_t)(p_buf + + dmae_data_offset), + addr, size, 0); + } + + return rc; +} + +static enum _ecore_status_t ecore_init_fill_dmae(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, u32 fill, + u32 fill_count) +{ + static u32 zero_buffer[DMAE_MAX_RW_SIZE]; + + OSAL_MEMSET(zero_buffer, 0, sizeof(u32) * DMAE_MAX_RW_SIZE); + + return ecore_dmae_host2grc(p_hwfn, p_ptt, + (osal_uintptr_t)&zero_buffer[0], + addr, fill_count, + ECORE_DMAE_FLAG_RW_REPL_SRC); +} + +static void ecore_init_fill(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 addr, u32 fill, u32 fill_count) +{ + u32 i; + + for (i = 0; i < fill_count; i++, addr += sizeof(u32)) + ecore_wr(p_hwfn, p_ptt, addr, fill); +} + +static enum _ecore_status_t ecore_init_cmd_array(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_write_op *cmd, + bool b_must_dmae, + bool b_can_dmae) +{ +#ifdef CONFIG_ECORE_ZIPPED_FW + u32 offset, output_len, input_len, max_size; +#endif + u32 dmae_array_offset = OSAL_LE32_TO_CPU(cmd->args.array_offset); + struct ecore_dev *p_dev = p_hwfn->p_dev; + enum _ecore_status_t rc = ECORE_SUCCESS; + union init_array_hdr *hdr; + const u32 *array_data; + u32 size, addr, data; + + array_data = p_dev->fw_data->arr_data; + data = OSAL_LE32_TO_CPU(cmd->data); + addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + + hdr = (union init_array_hdr *) + (uintptr_t)(array_data + dmae_array_offset); + data = OSAL_LE32_TO_CPU(hdr->raw.data); + switch (GET_FIELD(data, INIT_ARRAY_RAW_HDR_TYPE)) { + case INIT_ARR_ZIPPED: +#ifdef CONFIG_ECORE_ZIPPED_FW + offset = dmae_array_offset + 1; + input_len = GET_FIELD(data, INIT_ARRAY_ZIPPED_HDR_ZIPPED_SIZE); + max_size = MAX_ZIPPED_SIZE * 4; + OSAL_MEMSET(p_hwfn->unzip_buf, 0, max_size); + + output_len = OSAL_UNZIP_DATA(p_hwfn, input_len, + (u8 *)(uintptr_t)&array_data[offset], + max_size, + (u8 *)p_hwfn->unzip_buf); + if (output_len) { + rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, 0, + output_len, + p_hwfn->unzip_buf, + b_must_dmae, b_can_dmae); + } else { + DP_NOTICE(p_hwfn, true, "Failed to unzip dmae data\n"); + rc = ECORE_INVAL; + } +#else + DP_NOTICE(p_hwfn, true, + "Using zipped firmware without config enabled\n"); + rc = ECORE_INVAL; +#endif + break; + case INIT_ARR_PATTERN: + { + u32 repeats = GET_FIELD(data, + INIT_ARRAY_PATTERN_HDR_REPETITIONS); + u32 i; + + size = GET_FIELD(data, + INIT_ARRAY_PATTERN_HDR_PATTERN_SIZE); + + for (i = 0; i < repeats; i++, addr += size << 2) { + rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, + dmae_array_offset + + 1, size, array_data, + b_must_dmae, + b_can_dmae); + if (rc) + break; + } + break; + } + case INIT_ARR_STANDARD: + size = GET_FIELD(data, INIT_ARRAY_STANDARD_HDR_SIZE); + rc = ecore_init_array_dmae(p_hwfn, p_ptt, addr, + dmae_array_offset + 1, + size, array_data, + b_must_dmae, b_can_dmae); + break; + } + + return rc; +} + +/* init_ops write command */ +static enum _ecore_status_t ecore_init_cmd_wr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_write_op *p_cmd, + bool b_can_dmae) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + bool b_must_dmae; + u32 addr, data; + + data = OSAL_LE32_TO_CPU(p_cmd->data); + b_must_dmae = GET_FIELD(data, INIT_WRITE_OP_WIDE_BUS); + addr = GET_FIELD(data, INIT_WRITE_OP_ADDRESS) << 2; + + /* Sanitize */ + if (b_must_dmae && !b_can_dmae) { + DP_NOTICE(p_hwfn, true, + "Need to write to %08x for Wide-bus but DMAE isn't" + " allowed\n", + addr); + return ECORE_INVAL; + } + + switch (GET_FIELD(data, INIT_WRITE_OP_SOURCE)) { + case INIT_SRC_INLINE: + data = OSAL_LE32_TO_CPU(p_cmd->args.inline_val); + ecore_wr(p_hwfn, p_ptt, addr, data); + break; + case INIT_SRC_ZEROS: + data = OSAL_LE32_TO_CPU(p_cmd->args.zeros_count); + if (b_must_dmae || (b_can_dmae && (data >= 64))) + rc = ecore_init_fill_dmae(p_hwfn, p_ptt, addr, 0, data); + else + ecore_init_fill(p_hwfn, p_ptt, addr, 0, data); + break; + case INIT_SRC_ARRAY: + rc = ecore_init_cmd_array(p_hwfn, p_ptt, p_cmd, + b_must_dmae, b_can_dmae); + break; + case INIT_SRC_RUNTIME: + ecore_init_rt(p_hwfn, p_ptt, addr, + OSAL_LE16_TO_CPU(p_cmd->args.runtime.offset), + OSAL_LE16_TO_CPU(p_cmd->args.runtime.size), + b_must_dmae); + break; + } + + return rc; +} + +static OSAL_INLINE bool comp_eq(u32 val, u32 expected_val) +{ + return (val == expected_val); +} + +static OSAL_INLINE bool comp_and(u32 val, u32 expected_val) +{ + return (val & expected_val) == expected_val; +} + +static OSAL_INLINE bool comp_or(u32 val, u32 expected_val) +{ + return (val | expected_val) > 0; +} + +/* init_ops read/poll commands */ +static void ecore_init_cmd_rd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct init_read_op *cmd) +{ + bool (*comp_check)(u32 val, u32 expected_val); + u32 delay = ECORE_INIT_POLL_PERIOD_US, val; + u32 data, addr, poll; + int i; + + data = OSAL_LE32_TO_CPU(cmd->op_data); + addr = GET_FIELD(data, INIT_READ_OP_ADDRESS) << 2; + poll = GET_FIELD(data, INIT_READ_OP_POLL_TYPE); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + delay *= 100; +#endif + + val = ecore_rd(p_hwfn, p_ptt, addr); + + if (poll == INIT_POLL_NONE) + return; + + switch (poll) { + case INIT_POLL_EQ: + comp_check = comp_eq; + break; + case INIT_POLL_OR: + comp_check = comp_or; + break; + case INIT_POLL_AND: + comp_check = comp_and; + break; + default: + DP_ERR(p_hwfn, "Invalid poll comparison type %08x\n", + cmd->op_data); + return; + } + + data = OSAL_LE32_TO_CPU(cmd->expected_val); + for (i = 0; + i < ECORE_INIT_MAX_POLL_COUNT && !comp_check(val, data); i++) { + OSAL_UDELAY(delay); + val = ecore_rd(p_hwfn, p_ptt, addr); + } + + if (i == ECORE_INIT_MAX_POLL_COUNT) + DP_ERR(p_hwfn, + "Timeout when polling reg: 0x%08x [ Waiting-for: %08x" + " Got: %08x (comparsion %08x)]\n", + addr, OSAL_LE32_TO_CPU(cmd->expected_val), val, + OSAL_LE32_TO_CPU(cmd->op_data)); +} + +/* init_ops callbacks entry point */ +static void ecore_init_cmd_cb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct init_callback_op *p_cmd) +{ + DP_NOTICE(p_hwfn, true, + "Currently init values have no need of callbacks\n"); +} + +static u8 ecore_init_cmd_mode_match(struct ecore_hwfn *p_hwfn, + u16 *p_offset, int modes) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + const u8 *modes_tree_buf; + u8 arg1, arg2, tree_val; + + modes_tree_buf = p_dev->fw_data->modes_tree_buf; + tree_val = modes_tree_buf[(*p_offset)++]; + switch (tree_val) { + case INIT_MODE_OP_NOT: + return ecore_init_cmd_mode_match(p_hwfn, p_offset, modes) ^ 1; + case INIT_MODE_OP_OR: + arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); + arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); + return arg1 | arg2; + case INIT_MODE_OP_AND: + arg1 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); + arg2 = ecore_init_cmd_mode_match(p_hwfn, p_offset, modes); + return arg1 & arg2; + default: + tree_val -= MAX_INIT_MODE_OPS; + return (modes & (1 << tree_val)) ? 1 : 0; + } +} + +static u32 ecore_init_cmd_mode(struct ecore_hwfn *p_hwfn, + struct init_if_mode_op *p_cmd, int modes) +{ + u16 offset = OSAL_LE16_TO_CPU(p_cmd->modes_buf_offset); + + if (ecore_init_cmd_mode_match(p_hwfn, &offset, modes)) + return 0; + else + return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data), + INIT_IF_MODE_OP_CMD_OFFSET); +} + +static u32 ecore_init_cmd_phase(struct ecore_hwfn *p_hwfn, + struct init_if_phase_op *p_cmd, + u32 phase, u32 phase_id) +{ + u32 data = OSAL_LE32_TO_CPU(p_cmd->phase_data); + + if (!(GET_FIELD(data, INIT_IF_PHASE_OP_PHASE) == phase && + (GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == ANY_PHASE_ID || + GET_FIELD(data, INIT_IF_PHASE_OP_PHASE_ID) == phase_id))) + return GET_FIELD(OSAL_LE32_TO_CPU(p_cmd->op_data), + INIT_IF_PHASE_OP_CMD_OFFSET); + else + return 0; +} + +enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int phase, int phase_id, int modes) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 cmd_num, num_init_ops; + union init_op *init_ops; + bool b_dmae = false; + + num_init_ops = p_dev->fw_data->init_ops_size; + init_ops = p_dev->fw_data->init_ops; + +#ifdef CONFIG_ECORE_ZIPPED_FW + p_hwfn->unzip_buf = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, + MAX_ZIPPED_SIZE * 4); + if (!p_hwfn->unzip_buf) { + DP_NOTICE(p_hwfn, true, "Failed to allocate unzip buffer\n"); + return ECORE_NOMEM; + } +#endif + + for (cmd_num = 0; cmd_num < num_init_ops; cmd_num++) { + union init_op *cmd = &init_ops[cmd_num]; + u32 data = OSAL_LE32_TO_CPU(cmd->raw.op_data); + + switch (GET_FIELD(data, INIT_CALLBACK_OP_OP)) { + case INIT_OP_WRITE: + rc = ecore_init_cmd_wr(p_hwfn, p_ptt, &cmd->write, + b_dmae); + break; + + case INIT_OP_READ: + ecore_init_cmd_rd(p_hwfn, p_ptt, &cmd->read); + break; + + case INIT_OP_IF_MODE: + cmd_num += ecore_init_cmd_mode(p_hwfn, &cmd->if_mode, + modes); + break; + case INIT_OP_IF_PHASE: + cmd_num += ecore_init_cmd_phase(p_hwfn, &cmd->if_phase, + phase, phase_id); + b_dmae = GET_FIELD(data, INIT_IF_PHASE_OP_DMAE_ENABLE); + break; + case INIT_OP_DELAY: + /* ecore_init_run is always invoked from + * sleep-able context + */ + OSAL_UDELAY(cmd->delay.delay); + break; + + case INIT_OP_CALLBACK: + ecore_init_cmd_cb(p_hwfn, p_ptt, &cmd->callback); + break; + } + + if (rc) + break; + } +#ifdef CONFIG_ECORE_ZIPPED_FW + OSAL_FREE(p_hwfn->p_dev, p_hwfn->unzip_buf); +#endif + return rc; +} + +void ecore_gtt_init(struct ecore_hwfn *p_hwfn) +{ + u32 gtt_base; + u32 i; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + /* This is done by MFW on ASIC; regardless, this should only + * be done once per chip [i.e., common]. Implementation is + * not too bright, but it should work on the simple FPGA/EMUL + * scenarios. + */ + bool initialized = false; /* @DPDK */ + int poll_cnt = 500; + u32 val; + + /* initialize PTT/GTT (poll for completion) */ + if (!initialized) { + ecore_wr(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_START_INIT_PTT_GTT, 1); + initialized = true; + } + + do { + /* ptt might be overrided by HW until this is done */ + OSAL_UDELAY(10); + ecore_ptt_invalidate(p_hwfn); + val = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, + PGLUE_B_REG_INIT_DONE_PTT_GTT); + } while ((val != 1) && --poll_cnt); + + if (!poll_cnt) + DP_ERR(p_hwfn, + "PGLUE_B_REG_INIT_DONE didn't complete\n"); + } +#endif + + /* Set the global windows */ + gtt_base = PXP_PF_WINDOW_ADMIN_START + PXP_PF_WINDOW_ADMIN_GLOBAL_START; + + for (i = 0; i < OSAL_ARRAY_SIZE(pxp_global_win); i++) + if (pxp_global_win[i]) + REG_WR(p_hwfn, gtt_base + i * PXP_GLOBAL_ENTRY_SIZE, + pxp_global_win[i]); +} + +enum _ecore_status_t ecore_init_fw_data(struct ecore_dev *p_dev, + const u8 *data) +{ + struct ecore_fw_data *fw = p_dev->fw_data; + +#ifdef CONFIG_ECORE_BINARY_FW + struct bin_buffer_hdr *buf_hdr; + u32 offset, len; + + if (!data) { + DP_NOTICE(p_dev, true, "Invalid fw data\n"); + return ECORE_INVAL; + } + + buf_hdr = (struct bin_buffer_hdr *)(uintptr_t)data; + + offset = buf_hdr[BIN_BUF_FW_VER_INFO].offset; + fw->fw_ver_info = (struct fw_ver_info *)((uintptr_t)(data + offset)); + + offset = buf_hdr[BIN_BUF_INIT_CMD].offset; + fw->init_ops = (union init_op *)((uintptr_t)(data + offset)); + + offset = buf_hdr[BIN_BUF_INIT_VAL].offset; + fw->arr_data = (u32 *)((uintptr_t)(data + offset)); + + offset = buf_hdr[BIN_BUF_INIT_MODE_TREE].offset; + fw->modes_tree_buf = (u8 *)((uintptr_t)(data + offset)); + len = buf_hdr[BIN_BUF_INIT_CMD].length; + fw->init_ops_size = len / sizeof(struct init_raw_op); +#else + fw->init_ops = (union init_op *)init_ops; + fw->arr_data = (u32 *)init_val; + fw->modes_tree_buf = (u8 *)modes_tree_buf; + fw->init_ops_size = init_ops_size; +#endif + + return ECORE_SUCCESS; +} diff --git a/drivers/net/qede/base/ecore_init_ops.h b/drivers/net/qede/base/ecore_init_ops.h new file mode 100644 index 00000000..8a6fce41 --- /dev/null +++ b/drivers/net/qede/base/ecore_init_ops.h @@ -0,0 +1,103 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_INIT_OPS__ +#define __ECORE_INIT_OPS__ + +#include "ecore.h" + +/** + * @brief ecore_init_iro_array - init iro_arr. + * + * + * @param p_dev + */ +void ecore_init_iro_array(struct ecore_dev *p_dev); + +/** + * @brief ecore_init_run - Run the init-sequence. + * + * + * @param p_hwfn + * @param p_ptt + * @param phase + * @param phase_id + * @param modes + * @return _ecore_status_t + */ +enum _ecore_status_t ecore_init_run(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int phase, int phase_id, int modes); + +/** + * @brief ecore_init_hwfn_allocate - Allocate RT array, Store 'values' ptrs. + * + * + * @param p_hwfn + * + * @return _ecore_status_t + */ +enum _ecore_status_t ecore_init_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_init_hwfn_deallocate + * + * + * @param p_hwfn + */ +void ecore_init_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_init_clear_rt_data - Clears the runtime init array. + * + * + * @param p_hwfn + */ +void ecore_init_clear_rt_data(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_init_store_rt_reg - Store a configuration value in the RT array. + * + * + * @param p_hwfn + * @param rt_offset + * @param val + */ +void ecore_init_store_rt_reg(struct ecore_hwfn *p_hwfn, u32 rt_offset, u32 val); + +#define STORE_RT_REG(hwfn, offset, val) \ + ecore_init_store_rt_reg(hwfn, offset, val) + +#define OVERWRITE_RT_REG(hwfn, offset, val) \ + ecore_init_store_rt_reg(hwfn, offset, val) + +/** +* @brief +* +* +* @param p_hwfn +* @param rt_offset +* @param val +* @param size +*/ + +void ecore_init_store_rt_agg(struct ecore_hwfn *p_hwfn, + u32 rt_offset, u32 *val, osal_size_t size); + +#define STORE_RT_REG_AGG(hwfn, offset, val) \ + ecore_init_store_rt_agg(hwfn, offset, (u32 *)&val, sizeof(val)) + +/** + * @brief + * Initialize GTT global windows and set admin window + * related params of GTT/PTT to default values. + * + * @param p_hwfn + */ +void ecore_gtt_init(struct ecore_hwfn *p_hwfn); +#endif /* __ECORE_INIT_OPS__ */ diff --git a/drivers/net/qede/base/ecore_int.c b/drivers/net/qede/base/ecore_int.c new file mode 100644 index 00000000..ea0fd7aa --- /dev/null +++ b/drivers/net/qede/base/ecore_int.c @@ -0,0 +1,2225 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_spq.h" +#include "reg_addr.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_init_ops.h" +#include "ecore_rt_defs.h" +#include "ecore_int.h" +#include "reg_addr.h" +#include "ecore_hw.h" +#include "ecore_sriov.h" +#include "ecore_vf.h" +#include "ecore_hw_defs.h" +#include "ecore_hsi_common.h" +#include "ecore_mcp.h" +#include "ecore_attn_values.h" + +struct ecore_pi_info { + ecore_int_comp_cb_t comp_cb; + void *cookie; /* Will be sent to the compl cb function */ +}; + +struct ecore_sb_sp_info { + struct ecore_sb_info sb_info; + /* per protocol index data */ + struct ecore_pi_info pi_info_arr[PIS_PER_SB]; +}; + +enum ecore_attention_type { + ECORE_ATTN_TYPE_ATTN, + ECORE_ATTN_TYPE_PARITY, +}; + +#define SB_ATTN_ALIGNED_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(struct atten_status_block, p_hwfn) + +struct aeu_invert_reg_bit { + char bit_name[30]; + +#define ATTENTION_PARITY (1 << 0) + +#define ATTENTION_LENGTH_MASK (0x00000ff0) +#define ATTENTION_LENGTH_SHIFT (4) +#define ATTENTION_LENGTH(flags) (((flags) & ATTENTION_LENGTH_MASK) >> \ + ATTENTION_LENGTH_SHIFT) +#define ATTENTION_SINGLE (1 << ATTENTION_LENGTH_SHIFT) +#define ATTENTION_PAR (ATTENTION_SINGLE | ATTENTION_PARITY) +#define ATTENTION_PAR_INT ((2 << ATTENTION_LENGTH_SHIFT) | \ + ATTENTION_PARITY) + +/* Multiple bits start with this offset */ +#define ATTENTION_OFFSET_MASK (0x000ff000) +#define ATTENTION_OFFSET_SHIFT (12) + +#define ATTENTION_CLEAR_ENABLE (1 << 28) +#define ATTENTION_FW_DUMP (1 << 29) +#define ATTENTION_PANIC_DUMP (1 << 30) + unsigned int flags; + + /* Callback to call if attention will be triggered */ + enum _ecore_status_t (*cb)(struct ecore_hwfn *p_hwfn); + + enum block_id block_index; +}; + +struct aeu_invert_reg { + struct aeu_invert_reg_bit bits[32]; +}; + +#define MAX_ATTN_GRPS (8) +#define NUM_ATTN_REGS (9) + +static enum _ecore_status_t ecore_mcp_attn_cb(struct ecore_hwfn *p_hwfn) +{ + u32 tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_STATE); + + DP_INFO(p_hwfn->p_dev, "MCP_REG_CPU_STATE: %08x - Masking...\n", tmp); + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, MCP_REG_CPU_EVENT_MASK, 0xffffffff); + + return ECORE_SUCCESS; +} + +#define ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK (0x3c000) +#define ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT (14) +#define ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK (0x03fc0) +#define ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT (6) +#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK (0x00020) +#define ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT (5) +#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK (0x0001e) +#define ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT (1) +#define ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK (0x1) +#define ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT (0) +#define ECORE_PSWHST_ATTENTION_VF_DISABLED (0x1) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS (0x1) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK (0x1) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT (0) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK (0x1e) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT (1) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK (0x20) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT (5) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK (0x3fc0) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT (6) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK (0x3c000) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT (14) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK (0x3fc0000) +#define ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT (18) +static enum _ecore_status_t ecore_pswhst_attn_cb(struct ecore_hwfn *p_hwfn) +{ + u32 tmp = + ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_VF_DISABLED_ERROR_VALID); + + /* Disabled VF access */ + if (tmp & ECORE_PSWHST_ATTENTION_VF_DISABLED) { + u32 addr, data; + + addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_VF_DISABLED_ERROR_ADDRESS); + data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_VF_DISABLED_ERROR_DATA); + DP_INFO(p_hwfn->p_dev, + "PF[0x%02x] VF [0x%02x] [Valid 0x%02x] Client [0x%02x]" + " Write [0x%02x] Addr [0x%08x]\n", + (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_PF_MASK) + >> ECORE_PSWHST_ATTENTION_DISABLED_PF_SHIFT), + (u8)((data & ECORE_PSWHST_ATTENTION_DISABLED_VF_MASK) + >> ECORE_PSWHST_ATTENTION_DISABLED_VF_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_DISABLED_VALID_MASK) >> + ECORE_PSWHST_ATTENTION_DISABLED_VALID_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_MASK) >> + ECORE_PSWHST_ATTENTION_DISABLED_CLIENT_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_DISABLED_WRITE_MASK) >> + ECORE_PSWHST_ATTNETION_DISABLED_WRITE_SHIFT), + addr); + } + + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_VALID); + if (tmp & ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS) { + u32 addr, data, length; + + addr = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_ADDRESS); + data = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_DATA); + length = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PSWHST_REG_INCORRECT_ACCESS_LENGTH); + + DP_INFO(p_hwfn->p_dev, + "Incorrect access to %08x of length %08x - PF [%02x]" + " VF [%04x] [valid %02x] client [%02x] write [%02x]" + " Byte-Enable [%04x] [%08x]\n", + addr, length, + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_PF_ID_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_ID_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_VF_VALID_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_CLIENT_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_WR_SHIFT), + (u8)((data & + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_MASK) >> + ECORE_PSWHST_ATTENTION_INCORRECT_ACCESS_BYTE_EN_SHIFT), + data); + } + + /* TODO - We know 'some' of these are legal due to virtualization, + * but is it true for all of them? + */ + return ECORE_SUCCESS; +} + +#define ECORE_GRC_ATTENTION_VALID_BIT (1 << 0) +#define ECORE_GRC_ATTENTION_ADDRESS_MASK (0x7fffff << 0) +#define ECORE_GRC_ATTENTION_RDWR_BIT (1 << 23) +#define ECORE_GRC_ATTENTION_MASTER_MASK (0xf << 24) +#define ECORE_GRC_ATTENTION_MASTER_SHIFT (24) +#define ECORE_GRC_ATTENTION_PF_MASK (0xf) +#define ECORE_GRC_ATTENTION_VF_MASK (0xff << 4) +#define ECORE_GRC_ATTENTION_VF_SHIFT (4) +#define ECORE_GRC_ATTENTION_PRIV_MASK (0x3 << 14) +#define ECORE_GRC_ATTENTION_PRIV_SHIFT (14) +#define ECORE_GRC_ATTENTION_PRIV_VF (0) +static const char *grc_timeout_attn_master_to_str(u8 master) +{ + switch (master) { + case 1: + return "PXP"; + case 2: + return "MCP"; + case 3: + return "MSDM"; + case 4: + return "PSDM"; + case 5: + return "YSDM"; + case 6: + return "USDM"; + case 7: + return "TSDM"; + case 8: + return "XSDM"; + case 9: + return "DBU"; + case 10: + return "DMAE"; + default: + return "Unknown"; + } +} + +static enum _ecore_status_t ecore_grc_attn_cb(struct ecore_hwfn *p_hwfn) +{ + u32 tmp, tmp2; + + /* We've already cleared the timeout interrupt register, so we learn + * of interrupts via the validity register + */ + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_VALID); + if (!(tmp & ECORE_GRC_ATTENTION_VALID_BIT)) + goto out; + + /* Read the GRC timeout information */ + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0); + tmp2 = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1); + + DP_INFO(p_hwfn->p_dev, + "GRC timeout [%08x:%08x] - %s Address [%08x] [Master %s]" + " [PF: %02x %s %02x]\n", + tmp2, tmp, + (tmp & ECORE_GRC_ATTENTION_RDWR_BIT) ? "Write to" : "Read from", + (tmp & ECORE_GRC_ATTENTION_ADDRESS_MASK) << 2, + grc_timeout_attn_master_to_str((tmp & + ECORE_GRC_ATTENTION_MASTER_MASK) >> + ECORE_GRC_ATTENTION_MASTER_SHIFT), + (tmp2 & ECORE_GRC_ATTENTION_PF_MASK), + (((tmp2 & ECORE_GRC_ATTENTION_PRIV_MASK) >> + ECORE_GRC_ATTENTION_PRIV_SHIFT) == + ECORE_GRC_ATTENTION_PRIV_VF) ? "VF" : "(Irrelevant:)", + (tmp2 & ECORE_GRC_ATTENTION_VF_MASK) >> + ECORE_GRC_ATTENTION_VF_SHIFT); + +out: + /* Regardles of anything else, clean the validity bit */ + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, + GRC_REG_TIMEOUT_ATTN_ACCESS_VALID, 0); + return ECORE_SUCCESS; +} + +#define ECORE_PGLUE_ATTENTION_VALID (1 << 29) +#define ECORE_PGLUE_ATTENTION_RD_VALID (1 << 26) +#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK (0xf << 20) +#define ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT (20) +#define ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID (1 << 19) +#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK (0xff << 24) +#define ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT (24) +#define ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR (1 << 21) +#define ECORE_PGLUE_ATTENTION_DETAILS2_BME (1 << 22) +#define ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN (1 << 23) +#define ECORE_PGLUE_ATTENTION_ICPL_VALID (1 << 23) +#define ECORE_PGLUE_ATTENTION_ZLR_VALID (1 << 25) +#define ECORE_PGLUE_ATTENTION_ILT_VALID (1 << 23) +static enum _ecore_status_t ecore_pglub_rbc_attn_cb(struct ecore_hwfn *p_hwfn) +{ + u32 tmp, reg_addr; + + reg_addr = + attn_blocks[BLOCK_PGLUE_B].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)]. + int_regs[0]->mask_addr; + + /* Mask unnecessary attentions -@TBD move to MFW */ + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr); + tmp |= (1 << 19); /* Was PGL_PCIE_ATTN */ + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp); + + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS2); + if (tmp & ECORE_PGLUE_ATTENTION_VALID) { + u32 addr_lo, addr_hi, details; + + addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_ADD_31_0); + addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_ADD_63_32); + details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS); + + DP_INFO(p_hwfn, + "Illegal write by chip to [%08x:%08x] blocked." + "Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]" + " Details2 %08x [Was_error %02x BME deassert %02x" + " FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)((details & + ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> + ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), + (u8)((details & + ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> + ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), + (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) + ? 1 : 0), tmp, + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 + : 0), + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : + 0), + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 + : 0)); + } + + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_DETAILS2); + if (tmp & ECORE_PGLUE_ATTENTION_RD_VALID) { + u32 addr_lo, addr_hi, details; + + addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_ADD_31_0); + addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_ADD_63_32); + details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_RD_DETAILS); + + DP_INFO(p_hwfn, + "Illegal read by chip from [%08x:%08x] blocked." + " Details: %08x [PFID %02x, VFID %02x, VF_VALID %02x]" + " Details2 %08x [Was_error %02x BME deassert %02x" + " FID_enable deassert %02x]\n", + addr_hi, addr_lo, details, + (u8)((details & + ECORE_PGLUE_ATTENTION_DETAILS_PFID_MASK) >> + ECORE_PGLUE_ATTENTION_DETAILS_PFID_SHIFT), + (u8)((details & + ECORE_PGLUE_ATTENTION_DETAILS_VFID_MASK) >> + ECORE_PGLUE_ATTENTION_DETAILS_VFID_SHIFT), + (u8)((details & ECORE_PGLUE_ATTENTION_DETAILS_VF_VALID) + ? 1 : 0), tmp, + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_WAS_ERR) ? 1 + : 0), + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_BME) ? 1 : + 0), + (u8)((tmp & ECORE_PGLUE_ATTENTION_DETAILS2_FID_EN) ? 1 + : 0)); + } + + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL); + if (tmp & ECORE_PGLUE_ATTENTION_ICPL_VALID) + DP_INFO(p_hwfn, "ICPL error - %08x\n", tmp); + + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS); + if (tmp & ECORE_PGLUE_ATTENTION_ZLR_VALID) { + u32 addr_hi, addr_lo; + + addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0); + addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32); + + DP_INFO(p_hwfn, "ICPL error - %08x [Address %08x:%08x]\n", + tmp, addr_hi, addr_lo); + } + + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_DETAILS2); + if (tmp & ECORE_PGLUE_ATTENTION_ILT_VALID) { + u32 addr_hi, addr_lo, details; + + addr_lo = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_ADD_31_0); + addr_hi = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_ADD_63_32); + details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_VF_ILT_ERR_DETAILS); + + DP_INFO(p_hwfn, + "ILT error - Details %08x Details2 %08x" + " [Address %08x:%08x]\n", + details, tmp, addr_hi, addr_lo); + } + + /* Clear the indications */ + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, + PGLUE_B_REG_LATCHED_ERRORS_CLR, (1 << 2)); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_nig_attn_cb(struct ecore_hwfn *p_hwfn) +{ + u32 tmp, reg_addr; + + /* Mask unnecessary attentions -@TBD move to MFW */ + reg_addr = + attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)]. + int_regs[3]->mask_addr; + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr); + tmp |= (1 << 0); /* Was 3_P0_TX_PAUSE_TOO_LONG_INT */ + tmp |= NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT; + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp); + + reg_addr = + attn_blocks[BLOCK_NIG].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)]. + int_regs[5]->mask_addr; + tmp = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr); + tmp |= (1 << 0); /* Was 5_P1_TX_PAUSE_TOO_LONG_INT */ + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, reg_addr, tmp); + + /* TODO - a bit risky to return success here; But alternative is to + * actually read the multitdue of interrupt register of the block. + */ + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_fw_assertion(struct ecore_hwfn *p_hwfn) +{ + DP_NOTICE(p_hwfn, false, "FW assertion!\n"); + + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FW_ASSERT); + + return ECORE_INVAL; +} + +static enum _ecore_status_t +ecore_general_attention_35(struct ecore_hwfn *p_hwfn) +{ + DP_INFO(p_hwfn, "General attention 35!\n"); + + return ECORE_SUCCESS; +} + +#define ECORE_DORQ_ATTENTION_REASON_MASK (0xfffff) +#define ECORE_DORQ_ATTENTION_OPAQUE_MASK (0xffff) +#define ECORE_DORQ_ATTENTION_SIZE_MASK (0x7f) +#define ECORE_DORQ_ATTENTION_SIZE_SHIFT (16) + +static enum _ecore_status_t ecore_dorq_attn_cb(struct ecore_hwfn *p_hwfn) +{ + u32 reason; + + reason = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, DORQ_REG_DB_DROP_REASON) & + ECORE_DORQ_ATTENTION_REASON_MASK; + if (reason) { + u32 details = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + DORQ_REG_DB_DROP_DETAILS); + + DP_INFO(p_hwfn->p_dev, + "DORQ db_drop: address 0x%08x Opaque FID 0x%04x" + " Size [bytes] 0x%08x Reason: 0x%08x\n", + ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + DORQ_REG_DB_DROP_DETAILS_ADDRESS), + (u16)(details & ECORE_DORQ_ATTENTION_OPAQUE_MASK), + ((details & ECORE_DORQ_ATTENTION_SIZE_MASK) >> + ECORE_DORQ_ATTENTION_SIZE_SHIFT) * 4, reason); + } + + return ECORE_INVAL; +} + +static enum _ecore_status_t ecore_tm_attn_cb(struct ecore_hwfn *p_hwfn) +{ +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL_B0(p_hwfn->p_dev)) { + u32 val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + TM_REG_INT_STS_1); + + if (val & ~(TM_REG_INT_STS_1_PEND_TASK_SCAN | + TM_REG_INT_STS_1_PEND_CONN_SCAN)) + return ECORE_INVAL; + + if (val & (TM_REG_INT_STS_1_PEND_TASK_SCAN | + TM_REG_INT_STS_1_PEND_CONN_SCAN)) + DP_INFO(p_hwfn, + "TM attention on emulation - most likely" + " results of clock-ratios\n"); + val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1); + val |= TM_REG_INT_MASK_1_PEND_CONN_SCAN | + TM_REG_INT_MASK_1_PEND_TASK_SCAN; + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, TM_REG_INT_MASK_1, val); + + return ECORE_SUCCESS; + } +#endif + + return ECORE_INVAL; +} + +/* Notice aeu_invert_reg must be defined in the same order of bits as HW; */ +static struct aeu_invert_reg aeu_descs[NUM_ATTN_REGS] = { + { + { /* After Invert 1 */ + {"GPIO0 function%d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 2 */ + {"PGLUE config_space", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"PGLUE misc_flr", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"PGLUE B RBC", ATTENTION_PAR_INT, ecore_pglub_rbc_attn_cb, + BLOCK_PGLUE_B}, + {"PGLUE misc_mctp", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"Flash event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"SMB event", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"Main Power", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"SW timers #%d", + (8 << ATTENTION_LENGTH_SHIFT) | (1 << ATTENTION_OFFSET_SHIFT), + OSAL_NULL, MAX_BLOCK_ID}, + {"PCIE glue/PXP VPD %d", (16 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + BLOCK_PGLCS}, + } + }, + + { + { /* After Invert 3 */ + {"General Attention %d", (32 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 4 */ + {"General Attention 32", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, + ecore_fw_assertion, MAX_BLOCK_ID}, + {"General Attention %d", + (2 << ATTENTION_LENGTH_SHIFT) | (33 << ATTENTION_OFFSET_SHIFT), + OSAL_NULL, MAX_BLOCK_ID}, + {"General Attention 35", ATTENTION_SINGLE | ATTENTION_CLEAR_ENABLE, + ecore_general_attention_35, MAX_BLOCK_ID}, + {"CNIG port %d", (4 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + BLOCK_CNIG}, + {"MCP CPU", ATTENTION_SINGLE, ecore_mcp_attn_cb, MAX_BLOCK_ID}, + {"MCP Watchdog timer", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"MCP M2P", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"AVS stop status ready", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"MSTAT", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, + {"MSTAT per-path", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, + {"Reserved %d", (6 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + MAX_BLOCK_ID}, + {"NIG", ATTENTION_PAR_INT, ecore_nig_attn_cb, BLOCK_NIG}, + {"BMB/OPTE/MCP", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BMB}, + {"BTB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BTB}, + {"BRB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_BRB}, + {"PRS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRS}, + } + }, + + { + { /* After Invert 5 */ + {"SRC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_SRC}, + {"PB Client1", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB1}, + {"PB Client2", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF_PB2}, + {"RPB", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RPB}, + {"PBF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PBF}, + {"QM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_QM}, + {"TM", ATTENTION_PAR_INT, ecore_tm_attn_cb, BLOCK_TM}, + {"MCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MCM}, + {"MSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSDM}, + {"MSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MSEM}, + {"PCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PCM}, + {"PSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSDM}, + {"PSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSEM}, + {"TCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCM}, + {"TSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSDM}, + {"TSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TSEM}, + } + }, + + { + { /* After Invert 6 */ + {"UCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_UCM}, + {"USDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USDM}, + {"USEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_USEM}, + {"XCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XCM}, + {"XSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSDM}, + {"XSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XSEM}, + {"YCM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YCM}, + {"YSDM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSDM}, + {"YSEM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YSEM}, + {"XYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_XYLD}, + {"TMLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TMLD}, + {"MYLD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MULD}, + {"YULD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_YULD}, + {"DORQ", ATTENTION_PAR_INT, ecore_dorq_attn_cb, BLOCK_DORQ}, + {"DBG", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DBG}, + {"IPC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IPC}, + } + }, + + { + { /* After Invert 7 */ + {"CCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CCFC}, + {"CDU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CDU}, + {"DMAE", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_DMAE}, + {"IGU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_IGU}, + {"ATC", ATTENTION_PAR_INT, OSAL_NULL, MAX_BLOCK_ID}, + {"CAU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CAU}, + {"PTU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PTU}, + {"PRM", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PRM}, + {"TCFC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TCFC}, + {"RDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RDIF}, + {"TDIF", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_TDIF}, + {"RSS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_RSS}, + {"MISC", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISC}, + {"MISCS", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_MISCS}, + {"PCIE", ATTENTION_PAR, OSAL_NULL, BLOCK_PCIE}, + {"Vaux PCI core", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, + {"PSWRQ", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ}, + } + }, + + { + { /* After Invert 8 */ + {"PSWRQ (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRQ2}, + {"PSWWR", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR}, + {"PSWWR (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWWR2}, + {"PSWRD", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD}, + {"PSWRD (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWRD2}, + {"PSWHST", ATTENTION_PAR_INT, ecore_pswhst_attn_cb, BLOCK_PSWHST}, + {"PSWHST (pci_clk)", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_PSWHST2}, + {"GRC", ATTENTION_PAR_INT, ecore_grc_attn_cb, BLOCK_GRC}, + {"CPMU", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_CPMU}, + {"NCSI", ATTENTION_PAR_INT, OSAL_NULL, BLOCK_NCSI}, + {"MSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"PSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"TSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"USEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"XSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"YSEM PRAM", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"pxp_misc_mps", ATTENTION_PAR, OSAL_NULL, BLOCK_PGLCS}, + {"PCIE glue/PXP Exp. ROM", ATTENTION_SINGLE, OSAL_NULL, BLOCK_PGLCS}, + {"PERST_B assertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"PERST_B deassertion", ATTENTION_SINGLE, OSAL_NULL, MAX_BLOCK_ID}, + {"Reserved %d", (2 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + MAX_BLOCK_ID}, + } + }, + + { + { /* After Invert 9 */ + {"MCP Latched memory", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"MCP Latched scratchpad cache", ATTENTION_SINGLE, OSAL_NULL, + MAX_BLOCK_ID}, + {"MCP Latched ump_tx", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"MCP Latched scratchpad", ATTENTION_PAR, OSAL_NULL, MAX_BLOCK_ID}, + {"Reserved %d", (28 << ATTENTION_LENGTH_SHIFT), OSAL_NULL, + MAX_BLOCK_ID}, + } + }, + +}; + +#define ATTN_STATE_BITS (0xfff) +#define ATTN_BITS_MASKABLE (0x3ff) +struct ecore_sb_attn_info { + /* Virtual & Physical address of the SB */ + struct atten_status_block *sb_attn; + dma_addr_t sb_phys; + + /* Last seen running index */ + u16 index; + + /* A mask of the AEU bits resulting in a parity error */ + u32 parity_mask[NUM_ATTN_REGS]; + + /* A pointer to the attention description structure */ + struct aeu_invert_reg *p_aeu_desc; + + /* Previously asserted attentions, which are still unasserted */ + u16 known_attn; + + /* Cleanup address for the link's general hw attention */ + u32 mfw_attn_addr; +}; + +static u16 ecore_attn_update_idx(struct ecore_hwfn *p_hwfn, + struct ecore_sb_attn_info *p_sb_desc) +{ + u16 rc = 0, index; + + OSAL_MMIOWB(p_hwfn->p_dev); + + index = OSAL_LE16_TO_CPU(p_sb_desc->sb_attn->sb_index); + if (p_sb_desc->index != index) { + p_sb_desc->index = index; + rc = ECORE_SB_ATT_IDX; + } + + OSAL_MMIOWB(p_hwfn->p_dev); + + return rc; +} + +/** + * @brief ecore_int_assertion - handles asserted attention bits + * + * @param p_hwfn + * @param asserted_bits newly asserted bits + * @return enum _ecore_status_t + */ +static enum _ecore_status_t ecore_int_assertion(struct ecore_hwfn *p_hwfn, + u16 asserted_bits) +{ + struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; + u32 igu_mask; + + /* Mask the source of the attention in the IGU */ + igu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + IGU_REG_ATTENTION_ENABLE); + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "IGU mask: 0x%08x --> 0x%08x\n", + igu_mask, igu_mask & ~(asserted_bits & ATTN_BITS_MASKABLE)); + igu_mask &= ~(asserted_bits & ATTN_BITS_MASKABLE); + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, igu_mask); + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "inner known ATTN state: 0x%04x --> 0x%04x\n", + sb_attn_sw->known_attn, + sb_attn_sw->known_attn | asserted_bits); + sb_attn_sw->known_attn |= asserted_bits; + + /* Handle MCP events */ + if (asserted_bits & 0x100) { + ecore_mcp_handle_events(p_hwfn, p_hwfn->p_dpc_ptt); + /* Clean the MCP attention */ + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, + sb_attn_sw->mfw_attn_addr, 0); + } + + /* FIXME - this will change once we'll have GOOD gtt definitions */ + DIRECT_REG_WR(p_hwfn, + (u8 OSAL_IOMEM *) p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_SET_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), (u32)asserted_bits); + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "set cmd IGU: 0x%04x\n", + asserted_bits); + + return ECORE_SUCCESS; +} + +static void ecore_int_deassertion_print_bit(struct ecore_hwfn *p_hwfn, + struct attn_hw_reg *p_reg_desc, + struct attn_hw_block *p_block, + enum ecore_attention_type type, + u32 val, u32 mask) +{ + int j; +#ifdef ATTN_DESC + const char **description; + + if (type == ECORE_ATTN_TYPE_ATTN) + description = p_block->int_desc; + else + description = p_block->prty_desc; +#endif + + for (j = 0; j < p_reg_desc->num_of_bits; j++) { + if (val & (1 << j)) { +#ifdef ATTN_DESC + DP_NOTICE(p_hwfn, false, + "%s (%s): %s [reg %d [0x%08x], bit %d]%s\n", + p_block->name, + type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" : + "Parity", + description[p_reg_desc->bit_attn_idx[j]], + p_reg_desc->reg_idx, + p_reg_desc->sts_addr, j, + (mask & (1 << j)) ? " [MASKED]" : ""); +#else + DP_NOTICE(p_hwfn->p_dev, false, + "%s (%s): [reg %d [0x%08x], bit %d]%s\n", + p_block->name, + type == ECORE_ATTN_TYPE_ATTN ? "Interrupt" : + "Parity", + p_reg_desc->reg_idx, + p_reg_desc->sts_addr, j, + (mask & (1 << j)) ? " [MASKED]" : ""); +#endif + } + } +} + +/** + * @brief ecore_int_deassertion_aeu_bit - handles the effects of a single + * cause of the attention + * + * @param p_hwfn + * @param p_aeu - descriptor of an AEU bit which caused the attention + * @param aeu_en_reg - register offset of the AEU enable reg. which configured + * this bit to this group. + * @param bit_index - index of this bit in the aeu_en_reg + * + * @return enum _ecore_status_t + */ +static enum _ecore_status_t +ecore_int_deassertion_aeu_bit(struct ecore_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + u32 aeu_en_reg, u32 bitmask) +{ + enum _ecore_status_t rc = ECORE_INVAL; + u32 val, mask; + +#ifndef REMOVE_DBG + u32 interrupts[20]; /* TODO- change into HSI define once supplied */ + + OSAL_MEMSET(interrupts, 0, sizeof(u32) * 20); /* FIXME real size) */ +#endif + + DP_INFO(p_hwfn, "Deasserted attention `%s'[%08x]\n", + p_aeu->bit_name, bitmask); + + /* Call callback before clearing the interrupt status */ + if (p_aeu->cb) { + DP_INFO(p_hwfn, "`%s (attention)': Calling Callback function\n", + p_aeu->bit_name); + rc = p_aeu->cb(p_hwfn); + } + + /* Handle HW block interrupt registers */ + if (p_aeu->block_index != MAX_BLOCK_ID) { + u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev); + struct attn_hw_block *p_block; + int i; + + p_block = &attn_blocks[p_aeu->block_index]; + + /* Handle each interrupt register */ + for (i = 0; + i < p_block->chip_regs[chip_type].num_of_int_regs; i++) { + struct attn_hw_reg *p_reg_desc; + u32 sts_addr; + + p_reg_desc = p_block->chip_regs[chip_type].int_regs[i]; + + /* In case of fatal attention, don't clear the status + * so it would appear in idle check. + */ + if (rc == ECORE_SUCCESS) + sts_addr = p_reg_desc->sts_clr_addr; + else + sts_addr = p_reg_desc->sts_addr; + + val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, sts_addr); + mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + p_reg_desc->mask_addr); + ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc, + p_block, + ECORE_ATTN_TYPE_ATTN, + val, mask); + +#ifndef REMOVE_DBG + interrupts[i] = val; +#endif + } + } + + /* Reach assertion if attention is fatal */ + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, "`%s': Fatal attention\n", + p_aeu->bit_name); + + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_HW_ATTN); + } + + /* Prevent this Attention from being asserted in the future */ + if (p_aeu->flags & ATTENTION_CLEAR_ENABLE) { + u32 val; + u32 mask = ~bitmask; + val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg); + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en_reg, (val & mask)); + DP_INFO(p_hwfn, "`%s' - Disabled future attentions\n", + p_aeu->bit_name); + } + + if (p_aeu->flags & (ATTENTION_FW_DUMP | ATTENTION_PANIC_DUMP)) { + /* @@@TODO - what to dump? <yuvalmin 04/02/13> */ + DP_ERR(p_hwfn->p_dev, "`%s' - Dumps aren't implemented yet\n", + p_aeu->bit_name); + return ECORE_NOTIMPL; + } + + return rc; +} + +static void ecore_int_parity_print(struct ecore_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + struct attn_hw_block *p_block, u8 bit_index) +{ + u16 chip_type = ECORE_GET_TYPE(p_hwfn->p_dev); + int i; + + for (i = 0; i < p_block->chip_regs[chip_type].num_of_prty_regs; i++) { + struct attn_hw_reg *p_reg_desc; + u32 val, mask; + + p_reg_desc = p_block->chip_regs[chip_type].prty_regs[i]; + + val = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + p_reg_desc->sts_clr_addr); + mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + p_reg_desc->mask_addr); + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "%s[%d] - parity register[%d] is %08x [mask is %08x]\n", + p_aeu->bit_name, bit_index, i, val, mask); + ecore_int_deassertion_print_bit(p_hwfn, p_reg_desc, + p_block, + ECORE_ATTN_TYPE_PARITY, + val, mask); + } +} + +/** + * @brief ecore_int_deassertion_parity - handle a single parity AEU source + * + * @param p_hwfn + * @param p_aeu - descriptor of an AEU bit which caused the + * parity + * @param bit_index + */ +static void ecore_int_deassertion_parity(struct ecore_hwfn *p_hwfn, + struct aeu_invert_reg_bit *p_aeu, + u8 bit_index) +{ + u32 block_id = p_aeu->block_index; + + DP_INFO(p_hwfn->p_dev, "%s[%d] parity attention is set\n", + p_aeu->bit_name, bit_index); + + if (block_id != MAX_BLOCK_ID) { + ecore_int_parity_print(p_hwfn, p_aeu, &attn_blocks[block_id], + bit_index); + + /* In A0, there's a single parity bit for several blocks */ + if (block_id == BLOCK_BTB) { + ecore_int_parity_print(p_hwfn, p_aeu, + &attn_blocks[BLOCK_OPTE], + bit_index); + ecore_int_parity_print(p_hwfn, p_aeu, + &attn_blocks[BLOCK_MCP], + bit_index); + } + } +} + +/** + * @brief - handles deassertion of previously asserted attentions. + * + * @param p_hwfn + * @param deasserted_bits - newly deasserted bits + * @return enum _ecore_status_t + * + */ +static enum _ecore_status_t ecore_int_deassertion(struct ecore_hwfn *p_hwfn, + u16 deasserted_bits) +{ + struct ecore_sb_attn_info *sb_attn_sw = p_hwfn->p_sb_attn; + u32 aeu_inv_arr[NUM_ATTN_REGS], aeu_mask; + bool b_parity = false; + u8 i, j, k, bit_idx; + enum _ecore_status_t rc = ECORE_SUCCESS; + + /* Read the attention registers in the AEU */ + for (i = 0; i < NUM_ATTN_REGS; i++) { + aeu_inv_arr[i] = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_AFTER_INVERT_1_IGU + + i * 0x4); + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "Deasserted bits [%d]: %08x\n", i, aeu_inv_arr[i]); + } + + /* Handle parity attentions first */ + for (i = 0; i < NUM_ATTN_REGS; i++) { + struct aeu_invert_reg *p_aeu = &sb_attn_sw->p_aeu_desc[i]; + u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + MISC_REG_AEU_ENABLE1_IGU_OUT_0 + + i * sizeof(u32)); + + u32 parities = sb_attn_sw->parity_mask[i] & aeu_inv_arr[i] & en; + + /* Skip register in which no parity bit is currently set */ + if (!parities) + continue; + + for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + struct aeu_invert_reg_bit *p_bit = &p_aeu->bits[j]; + + if ((p_bit->flags & ATTENTION_PARITY) && + !!(parities & (1 << bit_idx))) { + ecore_int_deassertion_parity(p_hwfn, p_bit, + bit_idx); + b_parity = true; + } + + bit_idx += ATTENTION_LENGTH(p_bit->flags); + } + } + + /* Find non-parity cause for attention and act */ + for (k = 0; k < MAX_ATTN_GRPS; k++) { + struct aeu_invert_reg_bit *p_aeu; + + /* Handle only groups whose attention is currently deasserted */ + if (!(deasserted_bits & (1 << k))) + continue; + + for (i = 0; i < NUM_ATTN_REGS; i++) { + u32 aeu_en = MISC_REG_AEU_ENABLE1_IGU_OUT_0 + + i * sizeof(u32) + k * sizeof(u32) * NUM_ATTN_REGS; + u32 en = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, aeu_en); + u32 bits = aeu_inv_arr[i] & en; + + /* Skip if no bit from this group is currently set */ + if (!bits) + continue; + + /* Find all set bits from current register which belong + * to current group, making them responsible for the + * previous assertion. + */ + for (j = 0, bit_idx = 0; bit_idx < 32; j++) { + u8 bit, bit_len; + u32 bitmask; + + p_aeu = &sb_attn_sw->p_aeu_desc[i].bits[j]; + + /* No need to handle attention-only bits */ + if (p_aeu->flags == ATTENTION_PAR) + continue; + + bit = bit_idx; + bit_len = ATTENTION_LENGTH(p_aeu->flags); + if (p_aeu->flags & ATTENTION_PAR_INT) { + /* Skip Parity */ + bit++; + bit_len--; + } + + bitmask = bits & (((1 << bit_len) - 1) << bit); + if (bitmask) { + /* Handle source of the attention */ + ecore_int_deassertion_aeu_bit(p_hwfn, + p_aeu, + aeu_en, + bitmask); + } + + bit_idx += ATTENTION_LENGTH(p_aeu->flags); + } + } + } + + /* Clear IGU indication for the deasserted bits */ + /* FIXME - this will change once we'll have GOOD gtt definitions */ + DIRECT_REG_WR(p_hwfn, + (u8 OSAL_IOMEM *) p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + + ((IGU_CMD_ATTN_BIT_CLR_UPPER - + IGU_CMD_INT_ACK_BASE) << 3), ~((u32)deasserted_bits)); + + /* Unmask deasserted attentions in IGU */ + aeu_mask = ecore_rd(p_hwfn, p_hwfn->p_dpc_ptt, + IGU_REG_ATTENTION_ENABLE); + aeu_mask |= (deasserted_bits & ATTN_BITS_MASKABLE); + ecore_wr(p_hwfn, p_hwfn->p_dpc_ptt, IGU_REG_ATTENTION_ENABLE, aeu_mask); + + /* Clear deassertion from inner state */ + sb_attn_sw->known_attn &= ~deasserted_bits; + + return rc; +} + +static enum _ecore_status_t ecore_int_attentions(struct ecore_hwfn *p_hwfn) +{ + struct ecore_sb_attn_info *p_sb_attn_sw = p_hwfn->p_sb_attn; + struct atten_status_block *p_sb_attn = p_sb_attn_sw->sb_attn; + u16 index = 0, asserted_bits, deasserted_bits; + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 attn_bits = 0, attn_acks = 0; + + /* Read current attention bits/acks - safeguard against attentions + * by guaranting work on a synchronized timeframe + */ + do { + index = OSAL_LE16_TO_CPU(p_sb_attn->sb_index); + attn_bits = OSAL_LE32_TO_CPU(p_sb_attn->atten_bits); + attn_acks = OSAL_LE32_TO_CPU(p_sb_attn->atten_ack); + } while (index != OSAL_LE16_TO_CPU(p_sb_attn->sb_index)); + p_sb_attn->sb_index = index; + + /* Attention / Deassertion are meaningful (and in correct state) + * only when they differ and consistent with known state - deassertion + * when previous attention & current ack, and assertion when current + * attention with no previous attention + */ + asserted_bits = (attn_bits & ~attn_acks & ATTN_STATE_BITS) & + ~p_sb_attn_sw->known_attn; + deasserted_bits = (~attn_bits & attn_acks & ATTN_STATE_BITS) & + p_sb_attn_sw->known_attn; + + if ((asserted_bits & ~0x100) || (deasserted_bits & ~0x100)) + DP_INFO(p_hwfn, + "Attention: Index: 0x%04x, Bits: 0x%08x, Acks: 0x%08x, asserted: 0x%04x, De-asserted 0x%04x [Prev. known: 0x%04x]\n", + index, attn_bits, attn_acks, asserted_bits, + deasserted_bits, p_sb_attn_sw->known_attn); + else if (asserted_bits == 0x100) + DP_INFO(p_hwfn, "MFW indication via attention\n"); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "MFW indication [deassertion]\n"); + + if (asserted_bits) { + rc = ecore_int_assertion(p_hwfn, asserted_bits); + if (rc) + return rc; + } + + if (deasserted_bits) + rc = ecore_int_deassertion(p_hwfn, deasserted_bits); + + return rc; +} + +static void ecore_sb_ack_attn(struct ecore_hwfn *p_hwfn, + void OSAL_IOMEM *igu_addr, u32 ack_cons) +{ + struct igu_prod_cons_update igu_ack = { 0 }; + + igu_ack.sb_id_and_flags = + ((ack_cons << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (1 << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (IGU_INT_NOP << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_ATTN << + IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + + DIRECT_REG_WR(p_hwfn, igu_addr, igu_ack.sb_id_and_flags); + + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + OSAL_MMIOWB(p_hwfn->p_dev); + OSAL_BARRIER(p_hwfn->p_dev); +} + +void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie) +{ + struct ecore_hwfn *p_hwfn = (struct ecore_hwfn *)hwfn_cookie; + struct ecore_pi_info *pi_info = OSAL_NULL; + struct ecore_sb_attn_info *sb_attn; + struct ecore_sb_info *sb_info; + static int arr_size; + u16 rc = 0; + + if (!p_hwfn) { + DP_ERR(p_hwfn->p_dev, "DPC called - no hwfn!\n"); + return; + } + + if (!p_hwfn->p_sp_sb) { + DP_ERR(p_hwfn->p_dev, "DPC called - no p_sp_sb\n"); + return; + } + + sb_info = &p_hwfn->p_sp_sb->sb_info; + arr_size = OSAL_ARRAY_SIZE(p_hwfn->p_sp_sb->pi_info_arr); + if (!sb_info) { + DP_ERR(p_hwfn->p_dev, + "Status block is NULL - cannot ack interrupts\n"); + return; + } + + if (!p_hwfn->p_sb_attn) { + DP_ERR(p_hwfn->p_dev, "DPC called - no p_sb_attn"); + return; + } + sb_attn = p_hwfn->p_sb_attn; + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, "DPC Called! (hwfn %p %d)\n", + p_hwfn, p_hwfn->my_id); + + /* Disable ack for def status block. Required both for msix + + * inta in non-mask mode, in inta does no harm. + */ + ecore_sb_ack(sb_info, IGU_INT_DISABLE, 0); + + /* Gather Interrupts/Attentions information */ + if (!sb_info->sb_virt) { + DP_ERR(p_hwfn->p_dev, + "Interrupt Status block is NULL -" + " cannot check for new interrupts!\n"); + } else { + u32 tmp_index = sb_info->sb_ack; + rc = ecore_sb_update_sb_idx(sb_info); + DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, + "Interrupt indices: 0x%08x --> 0x%08x\n", + tmp_index, sb_info->sb_ack); + } + + if (!sb_attn || !sb_attn->sb_attn) { + DP_ERR(p_hwfn->p_dev, + "Attentions Status block is NULL -" + " cannot check for new attentions!\n"); + } else { + u16 tmp_index = sb_attn->index; + + rc |= ecore_attn_update_idx(p_hwfn, sb_attn); + DP_VERBOSE(p_hwfn->p_dev, ECORE_MSG_INTR, + "Attention indices: 0x%08x --> 0x%08x\n", + tmp_index, sb_attn->index); + } + + /* Check if we expect interrupts at this time. if not just ack them */ + if (!(rc & ECORE_SB_EVENT_MASK)) { + ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); + return; + } + + /* Check the validity of the DPC ptt. If not ack interrupts and fail */ + if (!p_hwfn->p_dpc_ptt) { + DP_NOTICE(p_hwfn->p_dev, true, "Failed to allocate PTT\n"); + ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); + return; + } + + if (rc & ECORE_SB_ATT_IDX) + ecore_int_attentions(p_hwfn); + + if (rc & ECORE_SB_IDX) { + int pi; + + /* Since we only looked at the SB index, it's possible more + * than a single protocol-index on the SB incremented. + * Iterate over all configured protocol indices and check + * whether something happened for each. + */ + for (pi = 0; pi < arr_size; pi++) { + pi_info = &p_hwfn->p_sp_sb->pi_info_arr[pi]; + if (pi_info->comp_cb != OSAL_NULL) + pi_info->comp_cb(p_hwfn, pi_info->cookie); + } + } + + if (sb_attn && (rc & ECORE_SB_ATT_IDX)) { + /* This should be done before the interrupts are enabled, + * since otherwise a new attention will be generated. + */ + ecore_sb_ack_attn(p_hwfn, sb_info->igu_addr, sb_attn->index); + } + + ecore_sb_ack(sb_info, IGU_INT_ENABLE, 1); +} + +static void ecore_int_sb_attn_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_sb_attn_info *p_sb = p_hwfn->p_sb_attn; + + if (!p_sb) + return; + + if (p_sb->sb_attn) { + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sb->sb_attn, + p_sb->sb_phys, + SB_ATTN_ALIGNED_SIZE(p_hwfn)); + } + OSAL_FREE(p_hwfn->p_dev, p_sb); +} + +static void ecore_int_sb_attn_setup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + + OSAL_MEMSET(sb_info->sb_attn, 0, sizeof(*sb_info->sb_attn)); + + sb_info->index = 0; + sb_info->known_attn = 0; + + /* Configure Attention Status Block in IGU */ + ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_L, + DMA_LO(p_hwfn->p_sb_attn->sb_phys)); + ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTN_MSG_ADDR_H, + DMA_HI(p_hwfn->p_sb_attn->sb_phys)); +} + +static void ecore_int_sb_attn_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + void *sb_virt_addr, dma_addr_t sb_phy_addr) +{ + struct ecore_sb_attn_info *sb_info = p_hwfn->p_sb_attn; + int i, j, k; + + sb_info->sb_attn = sb_virt_addr; + sb_info->sb_phys = sb_phy_addr; + + /* Set the pointer to the AEU descriptors */ + sb_info->p_aeu_desc = aeu_descs; + + /* Calculate Parity Masks */ + OSAL_MEMSET(sb_info->parity_mask, 0, sizeof(u32) * NUM_ATTN_REGS); + for (i = 0; i < NUM_ATTN_REGS; i++) { + /* j is array index, k is bit index */ + for (j = 0, k = 0; k < 32; j++) { + unsigned int flags = aeu_descs[i].bits[j].flags; + + if (flags & ATTENTION_PARITY) + sb_info->parity_mask[i] |= 1 << k; + + k += ATTENTION_LENGTH(flags); + } + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "Attn Mask [Reg %d]: 0x%08x\n", + i, sb_info->parity_mask[i]); + } + + /* Set the address of cleanup for the mcp attention */ + sb_info->mfw_attn_addr = (p_hwfn->rel_pf_id << 3) + + MISC_REG_AEU_GENERAL_ATTN_0; + + ecore_int_sb_attn_setup(p_hwfn, p_ptt); +} + +static enum _ecore_status_t ecore_int_sb_attn_alloc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + struct ecore_sb_attn_info *p_sb; + dma_addr_t p_phys = 0; + void *p_virt; + + /* SB struct */ + p_sb = OSAL_ALLOC(p_dev, GFP_KERNEL, sizeof(struct ecore_sb_attn_info)); + if (!p_sb) { + DP_NOTICE(p_dev, true, + "Failed to allocate `struct ecore_sb_attn_info'"); + return ECORE_NOMEM; + } + + /* SB ring */ + p_virt = OSAL_DMA_ALLOC_COHERENT(p_dev, &p_phys, + SB_ATTN_ALIGNED_SIZE(p_hwfn)); + if (!p_virt) { + DP_NOTICE(p_dev, true, + "Failed to allocate status block (attentions)"); + OSAL_FREE(p_dev, p_sb); + return ECORE_NOMEM; + } + + /* Attention setup */ + p_hwfn->p_sb_attn = p_sb; + ecore_int_sb_attn_init(p_hwfn, p_ptt, p_virt, p_phys); + + return ECORE_SUCCESS; +} + +/* coalescing timeout = timeset << (timer_res + 1) */ +#ifdef RTE_LIBRTE_QEDE_RX_COAL_US +#define ECORE_CAU_DEF_RX_USECS RTE_LIBRTE_QEDE_RX_COAL_US +#else +#define ECORE_CAU_DEF_RX_USECS 24 +#endif + +#ifdef RTE_LIBRTE_QEDE_TX_COAL_US +#define ECORE_CAU_DEF_TX_USECS RTE_LIBRTE_QEDE_TX_COAL_US +#else +#define ECORE_CAU_DEF_TX_USECS 48 +#endif + +void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, + struct cau_sb_entry *p_sb_entry, + u8 pf_id, u16 vf_number, u8 vf_valid) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + u32 cau_state; + + OSAL_MEMSET(p_sb_entry, 0, sizeof(*p_sb_entry)); + + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_PF_NUMBER, pf_id); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_NUMBER, vf_number); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_VF_VALID, vf_valid); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET0, 0x7F); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_SB_TIMESET1, 0x7F); + + /* setting the time resultion to a fixed value ( = 1) */ + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES0, + ECORE_CAU_DEF_RX_TIMER_RES); + SET_FIELD(p_sb_entry->params, CAU_SB_ENTRY_TIMER_RES1, + ECORE_CAU_DEF_TX_TIMER_RES); + + cau_state = CAU_HC_DISABLE_STATE; + + if (p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { + cau_state = CAU_HC_ENABLE_STATE; + if (!p_dev->rx_coalesce_usecs) { + p_dev->rx_coalesce_usecs = ECORE_CAU_DEF_RX_USECS; + DP_INFO(p_dev, "Coalesce params rx-usecs=%u\n", + p_dev->rx_coalesce_usecs); + } + if (!p_dev->tx_coalesce_usecs) { + p_dev->tx_coalesce_usecs = ECORE_CAU_DEF_TX_USECS; + DP_INFO(p_dev, "Coalesce params tx-usecs=%u\n", + p_dev->tx_coalesce_usecs); + } + } + + SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE0, cau_state); + SET_FIELD(p_sb_entry->data, CAU_SB_ENTRY_STATE1, cau_state); +} + +void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + dma_addr_t sb_phys, u16 igu_sb_id, + u16 vf_number, u8 vf_valid) +{ + struct cau_sb_entry sb_entry; + + ecore_init_cau_sb_entry(p_hwfn, &sb_entry, p_hwfn->rel_pf_id, + vf_number, vf_valid); + + if (p_hwfn->hw_init_done) { + /* Wide-bus, initialize via DMAE */ + u64 phys_addr = (u64)sb_phys; + + ecore_dmae_host2grc(p_hwfn, p_ptt, + (u64)(osal_uintptr_t)&phys_addr, + CAU_REG_SB_ADDR_MEMORY + + igu_sb_id * sizeof(u64), 2, 0); + ecore_dmae_host2grc(p_hwfn, p_ptt, + (u64)(osal_uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + igu_sb_id * sizeof(u64), 2, 0); + } else { + /* Initialize Status Block Address */ + STORE_RT_REG_AGG(p_hwfn, + CAU_REG_SB_ADDR_MEMORY_RT_OFFSET + + igu_sb_id * 2, sb_phys); + + STORE_RT_REG_AGG(p_hwfn, + CAU_REG_SB_VAR_MEMORY_RT_OFFSET + + igu_sb_id * 2, sb_entry); + } + + /* Configure pi coalescing if set */ + if (p_hwfn->p_dev->int_coalescing_mode == ECORE_COAL_MODE_ENABLE) { + u8 num_tc = 1; /* @@@TBD aelior ECORE_MULTI_COS */ + u8 timeset = p_hwfn->p_dev->rx_coalesce_usecs >> + (ECORE_CAU_DEF_RX_TIMER_RES + 1); + u8 i; + + ecore_int_cau_conf_pi(p_hwfn, p_ptt, igu_sb_id, RX_PI, + ECORE_COAL_RX_STATE_MACHINE, timeset); + + timeset = p_hwfn->p_dev->tx_coalesce_usecs >> + (ECORE_CAU_DEF_TX_TIMER_RES + 1); + + for (i = 0; i < num_tc; i++) { + ecore_int_cau_conf_pi(p_hwfn, p_ptt, + igu_sb_id, TX_PI(i), + ECORE_COAL_TX_STATE_MACHINE, + timeset); + } + } +} + +void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 igu_sb_id, u32 pi_index, + enum ecore_coalescing_fsm coalescing_fsm, u8 timeset) +{ + struct cau_pi_entry pi_entry; + u32 sb_offset, pi_offset; + + if (IS_VF(p_hwfn->p_dev)) + return; /* @@@TBD MichalK- VF CAU... */ + + sb_offset = igu_sb_id * PIS_PER_SB; + OSAL_MEMSET(&pi_entry, 0, sizeof(struct cau_pi_entry)); + + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_PI_TIMESET, timeset); + if (coalescing_fsm == ECORE_COAL_RX_STATE_MACHINE) + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 0); + else + SET_FIELD(pi_entry.prod, CAU_PI_ENTRY_FSM_SEL, 1); + + pi_offset = sb_offset + pi_index; + if (p_hwfn->hw_init_done) { + ecore_wr(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + pi_offset * sizeof(u32), + *((u32 *)&(pi_entry))); + } else { + STORE_RT_REG(p_hwfn, + CAU_REG_PI_MEMORY_RT_OFFSET + pi_offset, + *((u32 *)&(pi_entry))); + } +} + +void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info) +{ + /* zero status block and ack counter */ + sb_info->sb_ack = 0; + OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); + + if (IS_PF(p_hwfn->p_dev)) + ecore_int_cau_conf_sb(p_hwfn, p_ptt, sb_info->sb_phys, + sb_info->igu_sb_id, 0, 0); +} + +/** + * @brief ecore_get_igu_sb_id - given a sw sb_id return the + * igu_sb_id + * + * @param p_hwfn + * @param sb_id + * + * @return u16 + */ +static u16 ecore_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) +{ + u16 igu_sb_id; + + /* Assuming continuous set of IGU SBs dedicated for given PF */ + if (sb_id == ECORE_SP_SB_ID) + igu_sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; + else if (IS_PF(p_hwfn->p_dev)) + igu_sb_id = sb_id + p_hwfn->hw_info.p_igu_info->igu_base_sb; + else + igu_sb_id = ecore_vf_get_igu_sb_id(p_hwfn, sb_id); + + if (sb_id == ECORE_SP_SB_ID) + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "Slowpath SB index in IGU is 0x%04x\n", igu_sb_id); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "SB [%04x] <--> IGU SB [%04x]\n", sb_id, igu_sb_id); + + return igu_sb_id; +} + +enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, u16 sb_id) +{ + sb_info->sb_virt = sb_virt_addr; + sb_info->sb_phys = sb_phy_addr; + + sb_info->igu_sb_id = ecore_get_igu_sb_id(p_hwfn, sb_id); + + if (sb_id != ECORE_SP_SB_ID) { + p_hwfn->sbs_info[sb_id] = sb_info; + p_hwfn->num_sbs++; + } +#ifdef ECORE_CONFIG_DIRECT_HWFN + sb_info->p_hwfn = p_hwfn; +#endif + sb_info->p_dev = p_hwfn->p_dev; + + /* The igu address will hold the absolute address that needs to be + * written to for a specific status block + */ + if (IS_PF(p_hwfn->p_dev)) { + sb_info->igu_addr = (u8 OSAL_IOMEM *)p_hwfn->regview + + GTT_BAR0_MAP_REG_IGU_CMD + (sb_info->igu_sb_id << 3); + + } else { + sb_info->igu_addr = + (u8 OSAL_IOMEM *)p_hwfn->regview + + PXP_VF_BAR0_START_IGU + + ((IGU_CMD_INT_ACK_BASE + sb_info->igu_sb_id) << 3); + } + + sb_info->flags |= ECORE_SB_INFO_INIT; + + ecore_int_sb_setup(p_hwfn, p_ptt, sb_info); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, + struct ecore_sb_info *sb_info, + u16 sb_id) +{ + if (sb_id == ECORE_SP_SB_ID) { + DP_ERR(p_hwfn, "Do Not free sp sb using this function"); + return ECORE_INVAL; + } + + /* zero status block and ack counter */ + sb_info->sb_ack = 0; + OSAL_MEMSET(sb_info->sb_virt, 0, sizeof(*sb_info->sb_virt)); + + if (p_hwfn->sbs_info[sb_id] != OSAL_NULL) { + p_hwfn->sbs_info[sb_id] = OSAL_NULL; + p_hwfn->num_sbs--; + } + + return ECORE_SUCCESS; +} + +static void ecore_int_sp_sb_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_sb_sp_info *p_sb = p_hwfn->p_sp_sb; + + if (!p_sb) + return; + + if (p_sb->sb_info.sb_virt) { + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_sb->sb_info.sb_virt, + p_sb->sb_info.sb_phys, + SB_ALIGNED_SIZE(p_hwfn)); + } + + OSAL_FREE(p_hwfn->p_dev, p_sb); +} + +static enum _ecore_status_t ecore_int_sp_sb_alloc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_sb_sp_info *p_sb; + dma_addr_t p_phys = 0; + void *p_virt; + + /* SB struct */ + p_sb = + OSAL_ALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(struct ecore_sb_sp_info)); + if (!p_sb) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `struct ecore_sb_info'"); + return ECORE_NOMEM; + } + + /* SB ring */ + p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_phys, SB_ALIGNED_SIZE(p_hwfn)); + if (!p_virt) { + DP_NOTICE(p_hwfn, true, "Failed to allocate status block"); + OSAL_FREE(p_hwfn->p_dev, p_sb); + return ECORE_NOMEM; + } + + /* Status Block setup */ + p_hwfn->p_sp_sb = p_sb; + ecore_int_sb_init(p_hwfn, p_ptt, &p_sb->sb_info, + p_virt, p_phys, ECORE_SP_SB_ID); + + OSAL_MEMSET(p_sb->pi_info_arr, 0, sizeof(p_sb->pi_info_arr)); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, + ecore_int_comp_cb_t comp_cb, + void *cookie, + u8 *sb_idx, __le16 **p_fw_cons) +{ + struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; + enum _ecore_status_t rc = ECORE_NOMEM; + u8 pi; + + /* Look for a free index */ + for (pi = 0; pi < OSAL_ARRAY_SIZE(p_sp_sb->pi_info_arr); pi++) { + if (p_sp_sb->pi_info_arr[pi].comp_cb != OSAL_NULL) + continue; + + p_sp_sb->pi_info_arr[pi].comp_cb = comp_cb; + p_sp_sb->pi_info_arr[pi].cookie = cookie; + *sb_idx = pi; + *p_fw_cons = &p_sp_sb->sb_info.sb_virt->pi_array[pi]; + rc = ECORE_SUCCESS; + break; + } + + return rc; +} + +enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi) +{ + struct ecore_sb_sp_info *p_sp_sb = p_hwfn->p_sp_sb; + + if (p_sp_sb->pi_info_arr[pi].comp_cb == OSAL_NULL) + return ECORE_NOMEM; + + p_sp_sb->pi_info_arr[pi].comp_cb = OSAL_NULL; + p_sp_sb->pi_info_arr[pi].cookie = OSAL_NULL; + return ECORE_SUCCESS; +} + +u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn) +{ + return p_hwfn->p_sp_sb->sb_info.igu_sb_id; +} + +void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_int_mode int_mode) +{ + u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) + DP_INFO(p_hwfn, "FPGA - don't enable ATTN generation in IGU\n"); + else +#endif + igu_pf_conf |= IGU_PF_CONF_ATTN_BIT_EN; + + p_hwfn->p_dev->int_mode = int_mode; + switch (p_hwfn->p_dev->int_mode) { + case ECORE_INT_MODE_INTA: + igu_pf_conf |= IGU_PF_CONF_INT_LINE_EN; + igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + break; + + case ECORE_INT_MODE_MSI: + igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; + igu_pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN; + break; + + case ECORE_INT_MODE_MSIX: + igu_pf_conf |= IGU_PF_CONF_MSI_MSIX_EN; + break; + case ECORE_INT_MODE_POLL: + break; + } + + ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, igu_pf_conf); +} + +static void ecore_int_igu_enable_attn(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + DP_INFO(p_hwfn, + "FPGA - Don't enable Attentions in IGU and MISC\n"); + return; + } +#endif + + /* Configure AEU signal change to produce attentions */ + ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0); + ecore_wr(p_hwfn, p_ptt, IGU_REG_LEADING_EDGE_LATCH, 0xfff); + ecore_wr(p_hwfn, p_ptt, IGU_REG_TRAILING_EDGE_LATCH, 0xfff); + ecore_wr(p_hwfn, p_ptt, IGU_REG_ATTENTION_ENABLE, 0xfff); + + OSAL_MMIOWB(p_hwfn->p_dev); + + /* Unmask AEU signals toward IGU */ + ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_MASK_ATTN_IGU, 0xff); +} + +enum _ecore_status_t +ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + enum ecore_int_mode int_mode) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 tmp, reg_addr; + + /* @@@tmp - Mask General HW attentions 0-31, Enable 32-36 */ + tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0); + tmp |= 0xf; + ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE3_IGU_OUT_0, 0); + ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp); + + /* @@@tmp - Starting with MFW 8.2.1.0 we've started hitting AVS stop + * attentions. Since we're waiting for BRCM answer regarding this + * attention, in the meanwhile we simply mask it. + */ + tmp = ecore_rd(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0); + tmp &= ~0x800; + ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_ENABLE4_IGU_OUT_0, tmp); + + /* @@@tmp - Mask interrupt sources - should move to init tool; + * Also, correct for A0 [might still change in B0. + */ + reg_addr = + attn_blocks[BLOCK_BRB].chip_regs[ECORE_GET_TYPE(p_hwfn->p_dev)]. + int_regs[0]->mask_addr; + tmp = ecore_rd(p_hwfn, p_ptt, reg_addr); + tmp |= (1 << 21); /* Was PKT4_LEN_ERROR */ + ecore_wr(p_hwfn, p_ptt, reg_addr, tmp); + + ecore_int_igu_enable_attn(p_hwfn, p_ptt); + + if ((int_mode != ECORE_INT_MODE_INTA) || IS_LEAD_HWFN(p_hwfn)) { + rc = OSAL_SLOWPATH_IRQ_REQ(p_hwfn); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, + "Slowpath IRQ request failed\n"); + return ECORE_NORESOURCES; + } + p_hwfn->b_int_requested = true; + } + + /* Enable interrupt Generation */ + ecore_int_igu_enable_int(p_hwfn, p_ptt, int_mode); + + p_hwfn->b_int_enabled = 1; + + return rc; +} + +void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + p_hwfn->b_int_enabled = 0; + + if (IS_VF(p_hwfn->p_dev)) + return; + + ecore_wr(p_hwfn, p_ptt, IGU_REG_PF_CONFIGURATION, 0); +} + +#define IGU_CLEANUP_SLEEP_LENGTH (1000) +void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 sb_id, bool cleanup_set, u16 opaque_fid) +{ + u32 cmd_ctrl = 0, val = 0, sb_bit = 0, sb_bit_addr = 0, data = 0; + u32 pxp_addr = IGU_CMD_INT_ACK_BASE + sb_id; + u32 sleep_cnt = IGU_CLEANUP_SLEEP_LENGTH; + u8 type = 0; /* FIXME MichalS type??? */ + + OSAL_BUILD_BUG_ON((IGU_REG_CLEANUP_STATUS_4 - + IGU_REG_CLEANUP_STATUS_0) != 0x200); + + /* USE Control Command Register to perform cleanup. There is an + * option to do this using IGU bar, but then it can't be used for VFs. + */ + + /* Set the data field */ + SET_FIELD(data, IGU_CLEANUP_CLEANUP_SET, cleanup_set ? 1 : 0); + SET_FIELD(data, IGU_CLEANUP_CLEANUP_TYPE, type); + SET_FIELD(data, IGU_CLEANUP_COMMAND_TYPE, IGU_COMMAND_TYPE_SET); + + /* Set the control register */ + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_PXP_ADDR, pxp_addr); + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_FID, opaque_fid); + SET_FIELD(cmd_ctrl, IGU_CTRL_REG_TYPE, IGU_CTRL_CMD_TYPE_WR); + + ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_32LSB_DATA, data); + + OSAL_BARRIER(p_hwfn->p_dev); + + ecore_wr(p_hwfn, p_ptt, IGU_REG_COMMAND_REG_CTRL, cmd_ctrl); + + OSAL_MMIOWB(p_hwfn->p_dev); + + /* calculate where to read the status bit from */ + sb_bit = 1 << (sb_id % 32); + sb_bit_addr = sb_id / 32 * sizeof(u32); + + sb_bit_addr += IGU_REG_CLEANUP_STATUS_0 + (0x80 * type); + + /* Now wait for the command to complete */ + while (--sleep_cnt) { + val = ecore_rd(p_hwfn, p_ptt, sb_bit_addr); + if ((val & sb_bit) == (cleanup_set ? sb_bit : 0)) + break; + OSAL_MSLEEP(5); + } + + if (!sleep_cnt) + DP_NOTICE(p_hwfn, true, + "Timeout waiting for clear status 0x%08x [for sb %d]\n", + val, sb_id); +} + +void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 sb_id, u16 opaque, bool b_set) +{ + int pi; + + /* Set */ + if (b_set) + ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 1, opaque); + + /* Clear */ + ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, sb_id, 0, opaque); + + /* Clear the CAU for the SB */ + for (pi = 0; pi < 12; pi++) + ecore_wr(p_hwfn, p_ptt, + CAU_REG_PI_MEMORY + (sb_id * 12 + pi) * 4, 0); +} + +void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool b_set, bool b_slowpath) +{ + u32 igu_base_sb = p_hwfn->hw_info.p_igu_info->igu_base_sb; + u32 igu_sb_cnt = p_hwfn->hw_info.p_igu_info->igu_sb_cnt; + u32 sb_id = 0, val = 0; + + /* @@@TBD MichalK temporary... should be moved to init-tool... */ + val = ecore_rd(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION); + val |= IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN; + val &= ~IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN; + ecore_wr(p_hwfn, p_ptt, IGU_REG_BLOCK_CONFIGURATION, val); + /* end temporary */ + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "IGU cleaning SBs [%d,...,%d]\n", + igu_base_sb, igu_base_sb + igu_sb_cnt - 1); + + for (sb_id = igu_base_sb; sb_id < igu_base_sb + igu_sb_cnt; sb_id++) + ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, + p_hwfn->hw_info.opaque_fid, + b_set); + + if (!b_slowpath) + return; + + sb_id = p_hwfn->hw_info.p_igu_info->igu_dsb_id; + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "IGU cleaning slowpath SB [%d]\n", sb_id); + ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, sb_id, + p_hwfn->hw_info.opaque_fid, b_set); +} + +static u32 ecore_int_igu_read_cam_block(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 sb_id) +{ + u32 val = ecore_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * sb_id); + struct ecore_igu_block *p_block; + + p_block = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; + + /* stop scanning when hit first invalid PF entry */ + if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && + GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) + goto out; + + /* Fill the block information */ + p_block->status = ECORE_IGU_STATUS_VALID; + p_block->function_id = GET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER); + p_block->is_pf = GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID); + p_block->vector_number = GET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER); + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "IGU_BLOCK: [SB 0x%04x, Value in CAM 0x%08x] func_id = %d" + " is_pf = %d vector_num = 0x%x\n", + sb_id, val, p_block->function_id, p_block->is_pf, + p_block->vector_number); + +out: + return val; +} + +enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_igu_info *p_igu_info; + struct ecore_igu_block *p_block; + u16 sb_id, last_iov_sb_id = 0; + u32 min_vf, max_vf, val; + u16 prev_sb_id = 0xFF; + + p_hwfn->hw_info.p_igu_info = OSAL_ALLOC(p_hwfn->p_dev, + GFP_KERNEL, + sizeof(*p_igu_info)); + if (!p_hwfn->hw_info.p_igu_info) + return ECORE_NOMEM; + + OSAL_MEMSET(p_hwfn->hw_info.p_igu_info, 0, sizeof(*p_igu_info)); + + p_igu_info = p_hwfn->hw_info.p_igu_info; + + /* Initialize base sb / sb cnt for PFs and VFs */ + p_igu_info->igu_base_sb = 0xffff; + p_igu_info->igu_sb_cnt = 0; + p_igu_info->igu_dsb_id = 0xffff; + p_igu_info->igu_base_sb_iov = 0xffff; + +#ifdef CONFIG_ECORE_SRIOV + min_vf = p_hwfn->hw_info.first_vf_in_pf; + max_vf = p_hwfn->hw_info.first_vf_in_pf + + p_hwfn->p_dev->sriov_info.total_vfs; +#else + min_vf = 0; + max_vf = 0; +#endif + + for (sb_id = 0; sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); + sb_id++) { + p_block = &p_igu_info->igu_map.igu_blocks[sb_id]; + val = ecore_int_igu_read_cam_block(p_hwfn, p_ptt, sb_id); + if (!GET_FIELD(val, IGU_MAPPING_LINE_VALID) && + GET_FIELD(val, IGU_MAPPING_LINE_PF_VALID)) + break; + + if (p_block->is_pf) { + if (p_block->function_id == p_hwfn->rel_pf_id) { + p_block->status |= ECORE_IGU_STATUS_PF; + + if (p_block->vector_number == 0) { + if (p_igu_info->igu_dsb_id == 0xffff) + p_igu_info->igu_dsb_id = sb_id; + } else { + if (p_igu_info->igu_base_sb == 0xffff) { + p_igu_info->igu_base_sb = sb_id; + } else if (prev_sb_id != sb_id - 1) { + DP_NOTICE(p_hwfn->p_dev, false, + "consecutive igu" + " vectors for HWFN" + " %x broken", + p_hwfn->rel_pf_id); + break; + } + prev_sb_id = sb_id; + /* we don't count the default */ + (p_igu_info->igu_sb_cnt)++; + } + } + } else { + if ((p_block->function_id >= min_vf) && + (p_block->function_id < max_vf)) { + /* Available for VFs of this PF */ + if (p_igu_info->igu_base_sb_iov == 0xffff) { + p_igu_info->igu_base_sb_iov = sb_id; + } else if (last_iov_sb_id != sb_id - 1) { + if (!val) + DP_VERBOSE(p_hwfn->p_dev, + ECORE_MSG_INTR, + "First uninited IGU" + " CAM entry at" + " index 0x%04x\n", + sb_id); + else + DP_NOTICE(p_hwfn->p_dev, false, + "Consecutive igu" + " vectors for HWFN" + " %x vfs is broken" + " [jumps from %04x" + " to %04x]\n", + p_hwfn->rel_pf_id, + last_iov_sb_id, + sb_id); + break; + } + p_block->status |= ECORE_IGU_STATUS_FREE; + p_hwfn->hw_info.p_igu_info->free_blks++; + last_iov_sb_id = sb_id; + } + } + } + p_igu_info->igu_sb_cnt_iov = p_igu_info->free_blks; + + DP_VERBOSE(p_hwfn, ECORE_MSG_INTR, + "IGU igu_base_sb=0x%x [IOV 0x%x] igu_sb_cnt=%d [IOV 0x%x] " + "igu_dsb_id=0x%x\n", + p_igu_info->igu_base_sb, p_igu_info->igu_base_sb_iov, + p_igu_info->igu_sb_cnt, p_igu_info->igu_sb_cnt_iov, + p_igu_info->igu_dsb_id); + + if (p_igu_info->igu_base_sb == 0xffff || + p_igu_info->igu_dsb_id == 0xffff || p_igu_info->igu_sb_cnt == 0) { + DP_NOTICE(p_hwfn, true, + "IGU CAM returned invalid values igu_base_sb=0x%x " + "igu_sb_cnt=%d igu_dsb_id=0x%x\n", + p_igu_info->igu_base_sb, p_igu_info->igu_sb_cnt, + p_igu_info->igu_dsb_id); + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +/** + * @brief Initialize igu runtime registers + * + * @param p_hwfn + */ +void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn) +{ + u32 igu_pf_conf = IGU_PF_CONF_FUNC_EN; + + STORE_RT_REG(p_hwfn, IGU_REG_PF_CONFIGURATION_RT_OFFSET, igu_pf_conf); +} + +#define LSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_LSB_UPPER - \ + IGU_CMD_INT_ACK_BASE) +#define MSB_IGU_CMD_ADDR (IGU_REG_SISR_MDPC_WMASK_MSB_UPPER - \ + IGU_CMD_INT_ACK_BASE) +u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn) +{ + u32 intr_status_hi = 0, intr_status_lo = 0; + u64 intr_status = 0; + + intr_status_lo = REG_RD(p_hwfn, + GTT_BAR0_MAP_REG_IGU_CMD + + LSB_IGU_CMD_ADDR * 8); + intr_status_hi = REG_RD(p_hwfn, + GTT_BAR0_MAP_REG_IGU_CMD + + MSB_IGU_CMD_ADDR * 8); + intr_status = ((u64)intr_status_hi << 32) + (u64)intr_status_lo; + + return intr_status; +} + +static void ecore_int_sp_dpc_setup(struct ecore_hwfn *p_hwfn) +{ + OSAL_DPC_INIT(p_hwfn->sp_dpc, p_hwfn); + p_hwfn->b_sp_dpc_enabled = true; +} + +static enum _ecore_status_t ecore_int_sp_dpc_alloc(struct ecore_hwfn *p_hwfn) +{ + p_hwfn->sp_dpc = OSAL_DPC_ALLOC(p_hwfn); + if (!p_hwfn->sp_dpc) + return ECORE_NOMEM; + + return ECORE_SUCCESS; +} + +static void ecore_int_sp_dpc_free(struct ecore_hwfn *p_hwfn) +{ + OSAL_FREE(p_hwfn->p_dev, p_hwfn->sp_dpc); +} + +enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + rc = ecore_int_sp_dpc_alloc(p_hwfn); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn->p_dev, "Failed to allocate sp dpc mem\n"); + return rc; + } + + rc = ecore_int_sp_sb_alloc(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn->p_dev, "Failed to allocate sp sb mem\n"); + return rc; + } + + rc = ecore_int_sb_attn_alloc(p_hwfn, p_ptt); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn->p_dev, "Failed to allocate sb attn mem\n"); + + return rc; +} + +void ecore_int_free(struct ecore_hwfn *p_hwfn) +{ + ecore_int_sp_sb_free(p_hwfn); + ecore_int_sb_attn_free(p_hwfn); + ecore_int_sp_dpc_free(p_hwfn); +} + +void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + if (!p_hwfn || !p_hwfn->p_sp_sb || !p_hwfn->p_sb_attn) + return; + + ecore_int_sb_setup(p_hwfn, p_ptt, &p_hwfn->p_sp_sb->sb_info); + ecore_int_sb_attn_setup(p_hwfn, p_ptt); + ecore_int_sp_dpc_setup(p_hwfn); +} + +void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, + struct ecore_sb_cnt_info *p_sb_cnt_info) +{ + struct ecore_igu_info *info = p_hwfn->hw_info.p_igu_info; + + if (!info || !p_sb_cnt_info) + return; + + p_sb_cnt_info->sb_cnt = info->igu_sb_cnt; + p_sb_cnt_info->sb_iov_cnt = info->igu_sb_cnt_iov; + p_sb_cnt_info->sb_free_blk = info->free_blks; +} + +u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) +{ + struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + + /* Determine origin of SB id */ + if ((sb_id >= p_info->igu_base_sb) && + (sb_id < p_info->igu_base_sb + p_info->igu_sb_cnt)) { + return sb_id - p_info->igu_base_sb; + } else if ((sb_id >= p_info->igu_base_sb_iov) && + (sb_id < p_info->igu_base_sb_iov + p_info->igu_sb_cnt_iov)) { + return sb_id - p_info->igu_base_sb_iov + p_info->igu_sb_cnt; + } + + DP_NOTICE(p_hwfn, true, "SB %d not in range for function\n", + sb_id); + return 0; +} + +void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev) +{ + int i; + + for_each_hwfn(p_dev, i) + p_dev->hwfns[i].b_int_requested = false; +} diff --git a/drivers/net/qede/base/ecore_int.h b/drivers/net/qede/base/ecore_int.h new file mode 100644 index 00000000..17c95213 --- /dev/null +++ b/drivers/net/qede/base/ecore_int.h @@ -0,0 +1,234 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_INT_H__ +#define __ECORE_INT_H__ + +#include "ecore.h" +#include "ecore_int_api.h" + +#define ECORE_CAU_DEF_RX_TIMER_RES 0 +#define ECORE_CAU_DEF_TX_TIMER_RES 0 + +#define ECORE_SB_ATT_IDX 0x0001 +#define ECORE_SB_EVENT_MASK 0x0003 + +#define SB_ALIGNED_SIZE(p_hwfn) \ + ALIGNED_TYPE_SIZE(struct status_block, p_hwfn) + +struct ecore_igu_block { + u8 status; +#define ECORE_IGU_STATUS_FREE 0x01 +#define ECORE_IGU_STATUS_VALID 0x02 +#define ECORE_IGU_STATUS_PF 0x04 + + u8 vector_number; + u8 function_id; + u8 is_pf; +}; + +struct ecore_igu_map { + struct ecore_igu_block igu_blocks[MAX_TOT_SB_PER_PATH]; +}; + +struct ecore_igu_info { + struct ecore_igu_map igu_map; + u16 igu_dsb_id; + u16 igu_base_sb; + u16 igu_base_sb_iov; + u16 igu_sb_cnt; + u16 igu_sb_cnt_iov; + u16 free_blks; +}; + +/* TODO Names of function may change... */ +void ecore_int_igu_init_pure_rt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + bool b_set, bool b_slowpath); + +void ecore_int_igu_init_rt(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_int_igu_read_cam - Reads the IGU CAM. + * This function needs to be called during hardware + * prepare. It reads the info from igu cam to know which + * status block is the default / base status block etc. + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_igu_read_cam(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +typedef enum _ecore_status_t (*ecore_int_comp_cb_t) (struct ecore_hwfn *p_hwfn, + void *cookie); +/** + * @brief ecore_int_register_cb - Register callback func for + * slowhwfn statusblock. + * + * Every protocol that uses the slowhwfn status block + * should register a callback function that will be called + * once there is an update of the sp status block. + * + * @param p_hwfn + * @param comp_cb - function to be called when there is an + * interrupt on the sp sb + * + * @param cookie - passed to the callback function + * @param sb_idx - OUT parameter which gives the chosen index + * for this protocol. + * @param p_fw_cons - pointer to the actual address of the + * consumer for this protocol. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_register_cb(struct ecore_hwfn *p_hwfn, + ecore_int_comp_cb_t comp_cb, + void *cookie, + u8 *sb_idx, __le16 **p_fw_cons); +/** + * @brief ecore_int_unregister_cb - Unregisters callback + * function from sp sb. + * Partner of ecore_int_register_cb -> should be called + * when no longer required. + * + * @param p_hwfn + * @param pi + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_unregister_cb(struct ecore_hwfn *p_hwfn, u8 pi); + +/** + * @brief ecore_int_get_sp_sb_id - Get the slowhwfn sb id. + * + * @param p_hwfn + * + * @return u16 + */ +u16 ecore_int_get_sp_sb_id(struct ecore_hwfn *p_hwfn); + +/** + * @brief Status block cleanup. Should be called for each status + * block that will be used -> both PF / VF + * + * @param p_hwfn + * @param p_ptt + * @param sb_id - igu status block id + * @param cleanup_set - set(1) / clear(0) + * @param opaque_fid - the function for which to perform + * cleanup, for example a PF on behalf of + * its VFs. + */ +void ecore_int_igu_cleanup_sb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 sb_id, bool cleanup_set, u16 opaque_fid); + +/** + * @brief Status block cleanup. Should be called for each status + * block that will be used -> both PF / VF + * + * @param p_hwfn + * @param p_ptt + * @param sb_id - igu status block id + * @param opaque - opaque fid of the sb owner. + * @param cleanup_set - set(1) / clear(0) + */ +void ecore_int_igu_init_pure_rt_single(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 sb_id, u16 opaque, bool b_set); + +/** + * @brief ecore_int_cau_conf - configure cau for a given status + * block + * + * @param p_hwfn + * @param ptt + * @param sb_phys + * @param igu_sb_id + * @param vf_number + * @param vf_valid + */ +void ecore_int_cau_conf_sb(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + dma_addr_t sb_phys, + u16 igu_sb_id, u16 vf_number, u8 vf_valid); + +/** +* @brief ecore_int_alloc +* +* @param p_hwfn + * @param p_ptt +* +* @return enum _ecore_status_t +*/ +enum _ecore_status_t ecore_int_alloc(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** +* @brief ecore_int_free +* +* @param p_hwfn +*/ +void ecore_int_free(struct ecore_hwfn *p_hwfn); + +/** +* @brief ecore_int_setup +* +* @param p_hwfn +* @param p_ptt +*/ +void ecore_int_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); + +/** + * @brief - Returns an Rx queue index appropriate for usage with given SB. + * + * @param p_hwfn + * @param sb_id - absolute index of SB + * + * @return index of Rx queue + */ +u16 ecore_int_queue_id_from_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id); + +/** + * @brief - Enable Interrupt & Attention for hw function + * + * @param p_hwfn + * @param p_ptt + * @param int_mode + * +* @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_igu_enable(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_int_mode int_mode); + +/** + * @brief - Initialize CAU status block entry + * + * @param p_hwfn + * @param p_sb_entry + * @param pf_id + * @param vf_number + * @param vf_valid + */ +void ecore_init_cau_sb_entry(struct ecore_hwfn *p_hwfn, + struct cau_sb_entry *p_sb_entry, u8 pf_id, + u16 vf_number, u8 vf_valid); + +#ifndef ASIC_ONLY +#define ECORE_MAPPING_MEMORY_SIZE(dev) \ + ((CHIP_REV_IS_SLOW(dev) && (!(dev)->b_is_emul_full)) ? \ + 136 : NUM_OF_SBS(dev)) +#else +#define ECORE_MAPPING_MEMORY_SIZE(dev) NUM_OF_SBS(dev) +#endif + +#endif /* __ECORE_INT_H__ */ diff --git a/drivers/net/qede/base/ecore_int_api.h b/drivers/net/qede/base/ecore_int_api.h new file mode 100644 index 00000000..f6db807e --- /dev/null +++ b/drivers/net/qede/base/ecore_int_api.h @@ -0,0 +1,277 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_INT_API_H__ +#define __ECORE_INT_API_H__ + +#ifndef __EXTRACT__LINUX__ +#define ECORE_SB_IDX 0x0002 + +#define RX_PI 0 +#define TX_PI(tc) (RX_PI + 1 + tc) + +#ifndef ECORE_INT_MODE +#define ECORE_INT_MODE +enum ecore_int_mode { + ECORE_INT_MODE_INTA, + ECORE_INT_MODE_MSIX, + ECORE_INT_MODE_MSI, + ECORE_INT_MODE_POLL, +}; +#endif + +struct ecore_sb_info { + struct status_block *sb_virt; + dma_addr_t sb_phys; + u32 sb_ack; /* Last given ack */ + u16 igu_sb_id; + void OSAL_IOMEM *igu_addr; + u8 flags; +#define ECORE_SB_INFO_INIT 0x1 +#define ECORE_SB_INFO_SETUP 0x2 + +#ifdef ECORE_CONFIG_DIRECT_HWFN + struct ecore_hwfn *p_hwfn; +#endif + struct ecore_dev *p_dev; +}; + +struct ecore_sb_cnt_info { + int sb_cnt; + int sb_iov_cnt; + int sb_free_blk; +}; + +static OSAL_INLINE u16 ecore_sb_update_sb_idx(struct ecore_sb_info *sb_info) +{ + u32 prod = 0; + u16 rc = 0; + + /* barrier(); status block is written to by the chip */ + /* FIXME: need some sort of barrier. */ + prod = OSAL_LE32_TO_CPU(sb_info->sb_virt->prod_index) & + STATUS_BLOCK_PROD_INDEX_MASK; + if (sb_info->sb_ack != prod) { + sb_info->sb_ack = prod; + rc |= ECORE_SB_IDX; + } + + OSAL_MMIOWB(sb_info->p_dev); + return rc; +} + +/** + * + * @brief This function creates an update command for interrupts that is + * written to the IGU. + * + * @param sb_info - This is the structure allocated and + * initialized per status block. Assumption is + * that it was initialized using ecore_sb_init + * @param int_cmd - Enable/Disable/Nop + * @param upd_flg - whether igu consumer should be + * updated. + * + * @return OSAL_INLINE void + */ +static OSAL_INLINE void ecore_sb_ack(struct ecore_sb_info *sb_info, + enum igu_int_cmd int_cmd, u8 upd_flg) +{ + struct igu_prod_cons_update igu_ack = { 0 }; + + igu_ack.sb_id_and_flags = + ((sb_info->sb_ack << IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT) | + (upd_flg << IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT) | + (int_cmd << IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT) | + (IGU_SEG_ACCESS_REG << IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT)); + +#ifdef ECORE_CONFIG_DIRECT_HWFN + DIRECT_REG_WR(sb_info->p_hwfn, sb_info->igu_addr, + igu_ack.sb_id_and_flags); +#else + DIRECT_REG_WR(OSAL_NULL, sb_info->igu_addr, igu_ack.sb_id_and_flags); +#endif + /* Both segments (interrupts & acks) are written to same place address; + * Need to guarantee all commands will be received (in-order) by HW. + */ + OSAL_MMIOWB(sb_info->p_dev); + OSAL_BARRIER(sb_info->p_dev); +} + +#ifdef ECORE_CONFIG_DIRECT_HWFN +static OSAL_INLINE void __internal_ram_wr(struct ecore_hwfn *p_hwfn, + void OSAL_IOMEM *addr, + int size, u32 *data) +#else +static OSAL_INLINE void __internal_ram_wr(void *p_hwfn, + void OSAL_IOMEM *addr, + int size, u32 *data) +#endif +{ + unsigned int i; + + for (i = 0; i < size / sizeof(*data); i++) + DIRECT_REG_WR(p_hwfn, &((u32 OSAL_IOMEM *)addr)[i], data[i]); +} + +#ifdef ECORE_CONFIG_DIRECT_HWFN +static OSAL_INLINE void internal_ram_wr(struct ecore_hwfn *p_hwfn, + void OSAL_IOMEM *addr, + int size, u32 *data) +{ + __internal_ram_wr(p_hwfn, addr, size, data); +} +#else +static OSAL_INLINE void internal_ram_wr(void OSAL_IOMEM *addr, + int size, u32 *data) +{ + __internal_ram_wr(OSAL_NULL, addr, size, data); +} +#endif +#endif + +struct ecore_hwfn; +struct ecore_ptt; + +enum ecore_coalescing_fsm { + ECORE_COAL_RX_STATE_MACHINE, + ECORE_COAL_TX_STATE_MACHINE +}; + +/** + * @brief ecore_int_cau_conf_pi - configure cau for a given + * status block + * + * @param p_hwfn + * @param p_ptt + * @param igu_sb_id + * @param pi_index + * @param state + * @param timeset + */ +void ecore_int_cau_conf_pi(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 igu_sb_id, + u32 pi_index, + enum ecore_coalescing_fsm coalescing_fsm, + u8 timeset); + +/** + * + * @brief ecore_int_igu_enable_int - enable device interrupts + * + * @param p_hwfn + * @param p_ptt + * @param int_mode - interrupt mode to use + */ +void ecore_int_igu_enable_int(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_int_mode int_mode); + +/** + * + * @brief ecore_int_igu_disable_int - disable device interrupts + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_int_igu_disable_int(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * + * @brief ecore_int_igu_read_sisr_reg - Reads the single isr multiple dpc + * register from igu. + * + * @param p_hwfn + * + * @return u64 + */ +u64 ecore_int_igu_read_sisr_reg(struct ecore_hwfn *p_hwfn); + +#define ECORE_SP_SB_ID 0xffff +/** + * @brief ecore_int_sb_init - Initializes the sb_info structure. + * + * once the structure is initialized it can be passed to sb related functions. + * + * @param p_hwfn + * @param p_ptt + * @param sb_info points to an uninitialized (but + * allocated) sb_info structure + * @param sb_virt_addr + * @param sb_phy_addr + * @param sb_id the sb_id to be used (zero based in driver) + * should use ECORE_SP_SB_ID for SP Status block + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_sb_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, u16 sb_id); +/** + * @brief ecore_int_sb_setup - Setup the sb. + * + * @param p_hwfn + * @param p_ptt + * @param sb_info initialized sb_info structure + */ +void ecore_int_sb_setup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct ecore_sb_info *sb_info); + +/** + * @brief ecore_int_sb_release - releases the sb_info structure. + * + * once the structure is released, it's memory can be freed + * + * @param p_hwfn + * @param sb_info points to an allocated sb_info structure + * @param sb_id the sb_id to be used (zero based in driver) + * should never be equal to ECORE_SP_SB_ID + * (SP Status block) + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_int_sb_release(struct ecore_hwfn *p_hwfn, + struct ecore_sb_info *sb_info, + u16 sb_id); + +/** + * @brief ecore_int_sp_dpc - To be called when an interrupt is received on the + * default status block. + * + * @param p_hwfn - pointer to hwfn + * + */ +void ecore_int_sp_dpc(osal_int_ptr_t hwfn_cookie); + +/** + * @brief ecore_int_get_num_sbs - get the number of status + * blocks configured for this funciton in the igu. + * + * @param p_hwfn + * @param p_sb_cnt_info + * + * @return + */ +void ecore_int_get_num_sbs(struct ecore_hwfn *p_hwfn, + struct ecore_sb_cnt_info *p_sb_cnt_info); + +/** + * @brief ecore_int_disable_post_isr_release - performs the cleanup post ISR + * release. The API need to be called after releasing all slowpath IRQs + * of the device. + * + * @param p_dev + * + */ +void ecore_int_disable_post_isr_release(struct ecore_dev *p_dev); + +#endif diff --git a/drivers/net/qede/base/ecore_iov_api.h b/drivers/net/qede/base/ecore_iov_api.h new file mode 100644 index 00000000..b34a9c6b --- /dev/null +++ b/drivers/net/qede/base/ecore_iov_api.h @@ -0,0 +1,933 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_SRIOV_API_H__ +#define __ECORE_SRIOV_API_H__ + +#include "ecore_status.h" + +#define ECORE_VF_ARRAY_LENGTH (3) + +#define IS_VF(p_dev) ((p_dev)->b_is_vf) +#define IS_PF(p_dev) (!((p_dev)->b_is_vf)) +#ifdef CONFIG_ECORE_SRIOV +#define IS_PF_SRIOV(p_hwfn) (!!((p_hwfn)->p_dev->sriov_info.total_vfs)) +#else +#define IS_PF_SRIOV(p_hwfn) (0) +#endif +#define IS_PF_SRIOV_ALLOC(p_hwfn) (!!((p_hwfn)->pf_iov_info)) +#define IS_PF_PDA(p_hwfn) 0 /* @@TBD Michalk */ + +/* @@@ TBD MichalK - what should this number be*/ +#define ECORE_MAX_VF_CHAINS_PER_PF 16 + +/* vport update extended feature tlvs flags */ +enum ecore_iov_vport_update_flag { + ECORE_IOV_VP_UPDATE_ACTIVATE = 0, + ECORE_IOV_VP_UPDATE_VLAN_STRIP = 1, + ECORE_IOV_VP_UPDATE_TX_SWITCH = 2, + ECORE_IOV_VP_UPDATE_MCAST = 3, + ECORE_IOV_VP_UPDATE_ACCEPT_PARAM = 4, + ECORE_IOV_VP_UPDATE_RSS = 5, + ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN = 6, + ECORE_IOV_VP_UPDATE_SGE_TPA = 7, + ECORE_IOV_VP_UPDATE_MAX = 8, +}; + +struct ecore_mcp_link_params; +struct ecore_mcp_link_state; +struct ecore_mcp_link_capabilities; + +/* These defines are used by the hw-channel; should never change order */ +#define VFPF_ACQUIRE_OS_LINUX (0) +#define VFPF_ACQUIRE_OS_WINDOWS (1) +#define VFPF_ACQUIRE_OS_ESX (2) +#define VFPF_ACQUIRE_OS_SOLARIS (3) +#define VFPF_ACQUIRE_OS_LINUX_USERSPACE (4) + +struct ecore_vf_acquire_sw_info { + u32 driver_version; + u8 os_type; + bool override_fw_version; +}; + +struct ecore_public_vf_info { + /* These copies will later be reflected in the bulletin board, + * but this copy should be newer. + */ + u8 forced_mac[ETH_ALEN]; + u16 forced_vlan; +}; + +#ifdef CONFIG_ECORE_SW_CHANNEL +/* This is SW channel related only... */ +enum mbx_state { + VF_PF_UNKNOWN_STATE = 0, + VF_PF_WAIT_FOR_START_REQUEST = 1, + VF_PF_WAIT_FOR_NEXT_CHUNK_OF_REQUEST = 2, + VF_PF_REQUEST_IN_PROCESSING = 3, + VF_PF_RESPONSE_READY = 4, +}; + +struct ecore_iov_sw_mbx { + enum mbx_state mbx_state; + + u32 request_size; + u32 request_offset; + + u32 response_size; + u32 response_offset; +}; + +/** + * @brief Get the vf sw mailbox params + * + * @param p_hwfn + * @param rel_vf_id + * + * @return struct ecore_iov_sw_mbx* + */ +struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); +#endif + +#ifdef CONFIG_ECORE_SRIOV +/** + * @brief mark/clear all VFs before/after an incoming PCIe sriov + * disable. + * + * @param p_hwfn + * @param to_disable + */ +void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable); + +/** + * @brief mark/clear chosen VFs before/after an incoming PCIe + * sriov disable. + * + * @param p_hwfn + * @param to_disable + */ +void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, u8 to_disable); + +/** + * + * @brief ecore_iov_init_hw_for_vf - initialize the HW for + * enabling access of a VF. Also includes preparing the + * IGU for VF access. This needs to be called AFTER hw is + * initialized and BEFORE VF is loaded inside the VM. + * + * @param p_hwfn + * @param p_ptt + * @param rel_vf_id + * @param num_rx_queues + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rel_vf_id, u16 num_rx_queues); + +/** + * @brief ecore_iov_process_mbx_req - process a request received + * from the VF + * + * @param p_hwfn + * @param p_ptt + * @param vfid + */ +void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, int vfid); + +/** + * @brief ecore_iov_release_hw_for_vf - called once upper layer + * knows VF is done with - can release any resources + * allocated for VF at this point. this must be done once + * we know VF is no longer loaded in VM. + * + * @param p_hwfn + * @param p_ptt + * @param rel_vf_id + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rel_vf_id); + +#ifndef LINUX_REMOVE +/** + * @brief ecore_iov_set_vf_ctx - set a context for a given VF + * + * @param p_hwfn + * @param vf_id + * @param ctx + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn, + u16 vf_id, void *ctx); +#endif + +/** + * @brief FLR cleanup for all VFs + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief FLR cleanup for single VF + * + * @param p_hwfn + * @param p_ptt + * @param rel_vf_id + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 rel_vf_id); + +/** + * @brief Update the bulletin with link information. Notice this does NOT + * send a bulletin update, only updates the PF's bulletin. + * + * @param p_hwfn + * @param p_vf + * @param params - the link params to use for the VF link configuration + * @param link - the link output to use for the VF link configuration + * @param p_caps - the link default capabilities. + */ +void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, + u16 vfid, + struct ecore_mcp_link_params *params, + struct ecore_mcp_link_state *link, + struct ecore_mcp_link_capabilities *p_caps); + +/** + * @brief Returns link information as perceived by VF. + * + * @param p_hwfn + * @param p_vf + * @param p_params - the link params visible to vf. + * @param p_link - the link state visible to vf. + * @param p_caps - the link default capabilities visible to vf. + */ +void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, + u16 vfid, + struct ecore_mcp_link_params *params, + struct ecore_mcp_link_state *link, + struct ecore_mcp_link_capabilities *p_caps); + +/** + * @brief return if the VF is pending FLR + * + * @param p_hwfn + * @param rel_vf_id + * + * @return bool + */ +bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id); + +/** + * @brief Check if given VF ID @vfid is valid + * w.r.t. @b_enabled_only value + * if b_enabled_only = true - only enabled VF id is valid + * else any VF id less than max_vfs is valid + * + * @param p_hwfn + * @param rel_vf_id - Relative VF ID + * @param b_enabled_only - consider only enabled VF + * + * @return bool - true for valid VF ID + */ +bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, + int rel_vf_id, bool b_enabled_only); + +/** + * @brief Get VF's public info structure + * + * @param p_hwfn + * @param vfid - Relative VF ID + * @param b_enabled_only - false if want to access even if vf is disabled + * + * @return struct ecore_public_vf_info * + */ +struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn + *p_hwfn, u16 vfid, + bool b_enabled_only); + +/** + * @brief Set pending events bitmap for given @vfid + * + * @param p_hwfn + * @param vfid + */ +void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid); + +/** + * @brief Copy pending events bitmap in @events and clear + * original copy of events + * + * @param p_hwfn + */ +void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn, + u64 *events); + +/** + * @brief Copy VF's message to PF's buffer + * + * @param p_hwfn + * @param ptt + * @param vfid + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *ptt, int vfid); +/** + * @brief Set forced MAC address in PFs copy of bulletin board + * and configures FW/HW to support the configuration. + * + * @param p_hwfn + * @param mac + * @param vfid + */ +void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, + u8 *mac, int vfid); + +/** + * @brief Set MAC address in PFs copy of bulletin board without + * configuring FW/HW. + * + * @param p_hwfn + * @param mac + * @param vfid + */ +enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, + u8 *mac, int vfid); + +/** + * @brief Set forced VLAN [pvid] in PFs copy of bulletin board + * and configures FW/HW to support the configuration. + * Setting of pvid 0 would clear the feature. + * @param p_hwfn + * @param pvid + * @param vfid + */ +void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn, + u16 pvid, int vfid); + +/** + * @brief Set default behaviour of VF in case no vlans are configured for it + * whether to accept only untagged traffic or all. + * Must be called prior to the VF vport-start. + * + * @param p_hwfn + * @param b_untagged_only + * @param vfid + * + * @return ECORE_SUCCESS if configuration would stick. + */ +enum _ecore_status_t +ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, + bool b_untagged_only, int vfid); +/** + * @brief Get VFs opaque fid. + * + * @param p_hwfn + * @param vfid + * @param opaque_fid + */ +void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, + u16 *opaque_fid); + +/** + * @brief Get VFs VPORT id. + * + * @param p_hwfn + * @param vfid + * @param vport id + */ +void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid, + u8 *p_vport_id); + +/** + * @brief Check if VF has VPORT instance. This can be used + * to check if VPORT is active. + * + * @param p_hwfn + */ +bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid); + +/** + * @brief PF posts the bulletin to the VF + * + * @param p_hwfn + * @param p_vf + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn, + int vfid, + struct ecore_ptt *p_ptt); + +/** + * @brief Check if given VF (@vfid) is marked as stopped + * + * @param p_hwfn + * @param vfid + * + * @return bool : true if stopped + */ +bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid); + +/** + * @brief Configure VF anti spoofing + * + * @param p_hwfn + * @param vfid + * @param val - spoofchk value - true/false + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, + int vfid, bool val); + +/** + * @brief Get VF's configured spoof value. + * + * @param p_hwfn + * @param vfid + * + * @return bool - spoofchk value - true/false + */ +bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid); + +/** + * @brief Check for SRIOV sanity by PF. + * + * @param p_hwfn + * @param vfid + * + * @return bool - true if sanity checks passes, else false + */ +bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid); + +/** + * @brief Get the num of VF chains. + * + * @param p_hwfn + * + * @return u8 + */ +u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn); + +/** + * @brief Get vf request mailbox params + * + * @param p_hwfn + * @param rel_vf_id + * @param pp_req_virt_addr + * @param p_req_virt_size + */ +void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, + void **pp_req_virt_addr, + u16 *p_req_virt_size); + +/** + * @brief Get vf mailbox params + * + * @param p_hwfn + * @param rel_vf_id + * @param pp_reply_virt_addr + * @param p_reply_virt_size + */ +void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, + void **pp_reply_virt_addr, + u16 *p_reply_virt_size); + +/** + * @brief Validate if the given length is a valid vfpf message + * length + * + * @param length + * + * @return bool + */ +bool ecore_iov_is_valid_vfpf_msg_length(u32 length); + +/** + * @brief Return the max pfvf message length + * + * @return u32 + */ +u32 ecore_iov_pfvf_msg_length(void); + +/** + * @brief Returns forced MAC address if one is configured + * + * @parm p_hwfn + * @parm rel_vf_id + * + * @return OSAL_NULL if mac isn't forced; Otherwise, returns MAC. + */ +u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id); + +/** + * @brief Returns pvid if one is configured + * + * @parm p_hwfn + * @parm rel_vf_id + * + * @return 0 if no pvid is configured, otherwise the pvid. + */ +u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); +/** + * @brief Configure VFs tx rate + * + * @param p_hwfn + * @param p_ptt + * @param vfid + * @param val - tx rate value in Mb/sec. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int vfid, int val); + +/** + * @brief - Retrieves the statistics associated with a VF + * + * @param p_hwfn + * @param p_ptt + * @param vfid + * @param p_stats - this will be filled with the VF statistics + * + * @return ECORE_SUCCESS iff statistics were retrieved. Error otherwise. + */ +enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int vfid, + struct ecore_eth_stats *p_stats); + +/** + * @brief - Retrieves num of rxqs chains + * + * @param p_hwfn + * @param rel_vf_id + * + * @return num of rxqs chains. + */ +u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id); + +/** + * @brief - Retrieves num of active rxqs chains + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id); + +/** + * @brief - Retrieves ctx pointer + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id); + +/** + * @brief - Retrieves VF`s num sbs + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id); + +/** + * @brief - Returm true if VF is waiting for acquire + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id); + +/** + * @brief - Returm true if VF is acquired but not initialized + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id); + +/** + * @brief - Returm true if VF is acquired and initialized + * + * @param p_hwfn + * @param rel_vf_id + * + * @return + */ +bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id); + +/** + * @brief - Get VF's vport min rate configured. + * @param p_hwfn + * @param rel_vf_id + * + * @return - rate in Mbps + */ +int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid); + +/** + * @brief - Configure min rate for VF's vport. + * @param p_dev + * @param vfid + * @param - rate in Mbps + * + * @return + */ +enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev, + int vfid, u32 rate); +#else +static OSAL_INLINE void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, + u8 to_disable) +{ +} + +static OSAL_INLINE void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, + u8 to_disable) +{ +} + +static OSAL_INLINE enum _ecore_status_t ecore_iov_init_hw_for_vf(struct + ecore_hwfn + * p_hwfn, + struct + ecore_ptt + * p_ptt, + u16 rel_vf_id, + u16 + num_rx_queues) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int vfid) +{ +} + +static OSAL_INLINE enum _ecore_status_t ecore_iov_release_hw_for_vf(struct + ecore_hwfn + * p_hwfn, + struct + ecore_ptt + * p_ptt, + u16 + rel_vf_id) +{ + return ECORE_SUCCESS; +} + +#ifndef LINUX_REMOVE +static OSAL_INLINE enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn + *p_hwfn, u16 vf_id, + void *ctx) +{ + return ECORE_INVAL; +} +#endif +static OSAL_INLINE enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct + ecore_hwfn + * p_hwfn, + struct + ecore_ptt + * p_ptt) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_iov_single_vf_flr_cleanup( + struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, u16 rel_vf_id) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, u16 vfid, + struct ecore_mcp_link_params *params, + struct ecore_mcp_link_state *link, + struct ecore_mcp_link_capabilities + *p_caps) +{ +} + +static OSAL_INLINE void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, u16 vfid, + struct ecore_mcp_link_params *params, + struct ecore_mcp_link_state *link, + struct ecore_mcp_link_capabilities + *p_caps) +{ +} + +static OSAL_INLINE bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + return false; +} + +static OSAL_INLINE bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, + int rel_vf_id, + bool b_enabled_only) +{ + return false; +} + +static OSAL_INLINE struct ecore_public_vf_info * + ecore_iov_get_public_vf_info(struct ecore_hwfn *p_hwfn, u16 vfid, + bool b_enabled_only) +{ + return OSAL_NULL; +} + +static OSAL_INLINE void ecore_iov_pf_add_pending_events(struct ecore_hwfn + *p_hwfn, u8 vfid) +{ +} + +static OSAL_INLINE void ecore_iov_pf_get_and_clear_pending_events(struct + ecore_hwfn + * p_hwfn, + u64 *events) +{ +} + +static OSAL_INLINE enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn + *p_hwfn, + struct ecore_ptt + *ptt, int vfid) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn + *p_hwfn, u8 *mac, + int vfid) +{ +} + +static OSAL_INLINE enum _ecore_status_t ecore_iov_bulletin_set_mac(struct + ecore_hwfn + * p_hwfn, + u8 *mac, + int vfid) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn + p_hwfn, u16 pvid, + int vfid) +{ +} + +static OSAL_INLINE void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, + int vfid, u16 *opaque_fid) +{ +} + +static OSAL_INLINE void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, + int vfid, u8 *p_vport_id) +{ +} + +static OSAL_INLINE bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn + *p_hwfn, int vfid) +{ + return false; +} + +static OSAL_INLINE enum _ecore_status_t ecore_iov_post_vf_bulletin(struct + ecore_hwfn + * p_hwfn, + int vfid, + struct + ecore_ptt + * p_ptt) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, + int vfid) +{ + return false; +} + +static OSAL_INLINE enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn + *p_hwfn, + int vfid, + bool val) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, + int vfid) +{ + return false; +} + +static OSAL_INLINE bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, + int vfid) +{ + return false; +} + +static OSAL_INLINE u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn) +{ + return 0; +} + +static OSAL_INLINE void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn + *p_hwfn, + u16 rel_vf_id, + void + **pp_req_virt_addr, + u16 * + p_req_virt_size) +{ +} + +static OSAL_INLINE void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn + *p_hwfn, + u16 rel_vf_id, + void + **pp_reply_virt_addr, + u16 * + p_reply_virt_size) +{ +} + +static OSAL_INLINE bool ecore_iov_is_valid_vfpf_msg_length(u32 length) +{ + return false; +} + +static OSAL_INLINE u32 ecore_iov_pfvf_msg_length(void) +{ + return 0; +} + +static OSAL_INLINE u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn + *p_hwfn, u16 rel_vf_id) +{ + return OSAL_NULL; +} + +static OSAL_INLINE u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn + *p_hwfn, + u16 rel_vf_id) +{ + return 0; +} + +static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_tx_rate(struct + ecore_hwfn + * p_hwfn, + struct + ecore_ptt + * p_ptt, + int vfid, + int val) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + return 0; +} + +static OSAL_INLINE u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn + *p_hwfn, u16 rel_vf_id) +{ + return 0; +} + +static OSAL_INLINE void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + return OSAL_NULL; +} + +static OSAL_INLINE u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + return 0; +} + +static OSAL_INLINE bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn + *p_hwfn, u16 rel_vf_id) +{ + return false; +} + +static OSAL_INLINE bool ecore_iov_is_vf_acquired_not_initialized(struct + ecore_hwfn + * p_hwfn, + u16 rel_vf_id) +{ + return false; +} + +static OSAL_INLINE bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + return false; +} + +static OSAL_INLINE int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, + int vfid) +{ + return 0; +} + +static OSAL_INLINE enum _ecore_status_t ecore_iov_configure_min_tx_rate( + struct ecore_dev *p_dev, int vfid, u32 rate) +{ + return ECORE_INVAL; +} +#endif +#endif diff --git a/drivers/net/qede/base/ecore_iro.h b/drivers/net/qede/base/ecore_iro.h new file mode 100644 index 00000000..dd53ea9c --- /dev/null +++ b/drivers/net/qede/base/ecore_iro.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __IRO_H__ +#define __IRO_H__ + +/* Ystorm flow control mode. Use enum fw_flow_ctrl_mode */ +#define YSTORM_FLOW_CONTROL_MODE_OFFSET (IRO[0].base) +#define YSTORM_FLOW_CONTROL_MODE_SIZE (IRO[0].size) +/* Tstorm port statistics */ +#define TSTORM_PORT_STAT_OFFSET(port_id) \ +(IRO[1].base + ((port_id) * IRO[1].m1)) +#define TSTORM_PORT_STAT_SIZE (IRO[1].size) +/* Ustorm VF-PF Channel ready flag */ +#define USTORM_VF_PF_CHANNEL_READY_OFFSET(vf_id) \ +(IRO[3].base + ((vf_id) * IRO[3].m1)) +#define USTORM_VF_PF_CHANNEL_READY_SIZE (IRO[3].size) +/* Ustorm Final flr cleanup ack */ +#define USTORM_FLR_FINAL_ACK_OFFSET(pf_id) \ +(IRO[4].base + ((pf_id) * IRO[4].m1)) +#define USTORM_FLR_FINAL_ACK_SIZE (IRO[4].size) +/* Ustorm Event ring consumer */ +#define USTORM_EQE_CONS_OFFSET(pf_id) \ +(IRO[5].base + ((pf_id) * IRO[5].m1)) +#define USTORM_EQE_CONS_SIZE (IRO[5].size) +/* Ustorm Common Queue ring consumer */ +#define USTORM_COMMON_QUEUE_CONS_OFFSET(global_queue_id) \ +(IRO[6].base + ((global_queue_id) * IRO[6].m1)) +#define USTORM_COMMON_QUEUE_CONS_SIZE (IRO[6].size) +/* Xstorm Integration Test Data */ +#define XSTORM_INTEG_TEST_DATA_OFFSET (IRO[7].base) +#define XSTORM_INTEG_TEST_DATA_SIZE (IRO[7].size) +/* Ystorm Integration Test Data */ +#define YSTORM_INTEG_TEST_DATA_OFFSET (IRO[8].base) +#define YSTORM_INTEG_TEST_DATA_SIZE (IRO[8].size) +/* Pstorm Integration Test Data */ +#define PSTORM_INTEG_TEST_DATA_OFFSET (IRO[9].base) +#define PSTORM_INTEG_TEST_DATA_SIZE (IRO[9].size) +/* Tstorm Integration Test Data */ +#define TSTORM_INTEG_TEST_DATA_OFFSET (IRO[10].base) +#define TSTORM_INTEG_TEST_DATA_SIZE (IRO[10].size) +/* Mstorm Integration Test Data */ +#define MSTORM_INTEG_TEST_DATA_OFFSET (IRO[11].base) +#define MSTORM_INTEG_TEST_DATA_SIZE (IRO[11].size) +/* Ustorm Integration Test Data */ +#define USTORM_INTEG_TEST_DATA_OFFSET (IRO[12].base) +#define USTORM_INTEG_TEST_DATA_SIZE (IRO[12].size) +/* Mstorm queue statistics */ +#define MSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ +(IRO[17].base + ((stat_counter_id) * IRO[17].m1)) +#define MSTORM_QUEUE_STAT_SIZE (IRO[17].size) +/* Mstorm producers */ +#define MSTORM_PRODS_OFFSET(queue_id) \ +(IRO[18].base + ((queue_id) * IRO[18].m1)) +#define MSTORM_PRODS_SIZE (IRO[18].size) +/* TPA agregation timeout in us resolution (on ASIC) */ +#define MSTORM_TPA_TIMEOUT_US_OFFSET (IRO[19].base) +#define MSTORM_TPA_TIMEOUT_US_SIZE (IRO[19].size) +/* Ustorm queue statistics */ +#define USTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ +(IRO[20].base + ((stat_counter_id) * IRO[20].m1)) +#define USTORM_QUEUE_STAT_SIZE (IRO[20].size) +/* Ustorm queue zone */ +#define USTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ +(IRO[21].base + ((queue_id) * IRO[21].m1)) +#define USTORM_ETH_QUEUE_ZONE_SIZE (IRO[21].size) +/* Pstorm queue statistics */ +#define PSTORM_QUEUE_STAT_OFFSET(stat_counter_id) \ +(IRO[22].base + ((stat_counter_id) * IRO[22].m1)) +#define PSTORM_QUEUE_STAT_SIZE (IRO[22].size) +/* Tstorm last parser message */ +#define TSTORM_ETH_PRS_INPUT_OFFSET (IRO[23].base) +#define TSTORM_ETH_PRS_INPUT_SIZE (IRO[23].size) +/* Tstorm Eth limit Rx rate */ +#define ETH_RX_RATE_LIMIT_OFFSET(pf_id) \ +(IRO[24].base + ((pf_id) * IRO[24].m1)) +#define ETH_RX_RATE_LIMIT_SIZE (IRO[24].size) +/* Ystorm queue zone */ +#define YSTORM_ETH_QUEUE_ZONE_OFFSET(queue_id) \ +(IRO[25].base + ((queue_id) * IRO[25].m1)) +#define YSTORM_ETH_QUEUE_ZONE_SIZE (IRO[25].size) +/* Ystorm cqe producer */ +#define YSTORM_TOE_CQ_PROD_OFFSET(rss_id) \ +(IRO[26].base + ((rss_id) * IRO[26].m1)) +#define YSTORM_TOE_CQ_PROD_SIZE (IRO[26].size) +/* Ustorm cqe producer */ +#define USTORM_TOE_CQ_PROD_OFFSET(rss_id) \ +(IRO[27].base + ((rss_id) * IRO[27].m1)) +#define USTORM_TOE_CQ_PROD_SIZE (IRO[27].size) +/* Ustorm grq producer */ +#define USTORM_TOE_GRQ_PROD_OFFSET(pf_id) \ +(IRO[28].base + ((pf_id) * IRO[28].m1)) +#define USTORM_TOE_GRQ_PROD_SIZE (IRO[28].size) +/* Tstorm cmdq-cons of given command queue-id */ +#define TSTORM_SCSI_CMDQ_CONS_OFFSET(cmdq_queue_id) \ +(IRO[29].base + ((cmdq_queue_id) * IRO[29].m1)) +#define TSTORM_SCSI_CMDQ_CONS_SIZE (IRO[29].size) +#define TSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ +(IRO[30].base + ((func_id) * IRO[30].m1) + ((bdq_id) * IRO[30].m2)) +#define TSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[30].size) +/* Mstorm rq-cons of given queue-id */ +#define MSTORM_SCSI_RQ_CONS_OFFSET(rq_queue_id) \ +(IRO[31].base + ((rq_queue_id) * IRO[31].m1)) +#define MSTORM_SCSI_RQ_CONS_SIZE (IRO[31].size) +/* Mstorm bdq-external-producer of given BDQ function ID, BDqueue-id */ +#define MSTORM_SCSI_BDQ_EXT_PROD_OFFSET(func_id, bdq_id) \ +(IRO[32].base + ((func_id) * IRO[32].m1) + ((bdq_id) * IRO[32].m2)) +#define MSTORM_SCSI_BDQ_EXT_PROD_SIZE (IRO[32].size) + +#endif /* __IRO_H__ */ diff --git a/drivers/net/qede/base/ecore_iro_values.h b/drivers/net/qede/base/ecore_iro_values.h new file mode 100644 index 00000000..c818b580 --- /dev/null +++ b/drivers/net/qede/base/ecore_iro_values.h @@ -0,0 +1,59 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __IRO_VALUES_H__ +#define __IRO_VALUES_H__ + +static const struct iro iro_arr[44] = { + {0x0, 0x0, 0x0, 0x0, 0x8}, + {0x4db0, 0x60, 0x0, 0x0, 0x60}, + {0x6418, 0x20, 0x0, 0x0, 0x20}, + {0x500, 0x8, 0x0, 0x0, 0x4}, + {0x480, 0x8, 0x0, 0x0, 0x4}, + {0x0, 0x8, 0x0, 0x0, 0x2}, + {0x80, 0x8, 0x0, 0x0, 0x2}, + {0x4938, 0x0, 0x0, 0x0, 0x78}, + {0x3df0, 0x0, 0x0, 0x0, 0x78}, + {0x29b0, 0x0, 0x0, 0x0, 0x78}, + {0x4d38, 0x0, 0x0, 0x0, 0x78}, + {0x56c8, 0x0, 0x0, 0x0, 0x78}, + {0x7e48, 0x0, 0x0, 0x0, 0x78}, + {0xa28, 0x8, 0x0, 0x0, 0x8}, + {0x61f8, 0x10, 0x0, 0x0, 0x10}, + {0xb500, 0x30, 0x0, 0x0, 0x30}, + {0x95b8, 0x30, 0x0, 0x0, 0x30}, + {0x5898, 0x40, 0x0, 0x0, 0x40}, + {0x1f8, 0x10, 0x0, 0x0, 0x8}, + {0xa228, 0x0, 0x0, 0x0, 0x4}, + {0x8050, 0x40, 0x0, 0x0, 0x30}, + {0xcf8, 0x8, 0x0, 0x0, 0x8}, + {0x2b48, 0x80, 0x0, 0x0, 0x38}, + {0xadf0, 0x0, 0x0, 0x0, 0xf0}, + {0xaee0, 0x8, 0x0, 0x0, 0x8}, + {0x80, 0x8, 0x0, 0x0, 0x8}, + {0xac0, 0x8, 0x0, 0x0, 0x8}, + {0x2578, 0x8, 0x0, 0x0, 0x8}, + {0x24f8, 0x8, 0x0, 0x0, 0x8}, + {0x0, 0x8, 0x0, 0x0, 0x8}, + {0x200, 0x10, 0x8, 0x0, 0x8}, + {0x17f8, 0x8, 0x0, 0x0, 0x2}, + {0x19f8, 0x10, 0x8, 0x0, 0x2}, + {0xd988, 0x38, 0x0, 0x0, 0x24}, + {0x11040, 0x10, 0x0, 0x0, 0x8}, + {0x11670, 0x38, 0x0, 0x0, 0x18}, + {0xaeb8, 0x30, 0x0, 0x0, 0x10}, + {0x86f8, 0x28, 0x0, 0x0, 0x18}, + {0xebf8, 0x10, 0x0, 0x0, 0x10}, + {0xde08, 0x40, 0x0, 0x0, 0x30}, + {0x121a0, 0x38, 0x0, 0x0, 0x8}, + {0xf060, 0x20, 0x0, 0x0, 0x20}, + {0x2b80, 0x80, 0x0, 0x0, 0x10}, + {0x50a0, 0x10, 0x0, 0x0, 0x10}, +}; + +#endif /* __IRO_VALUES_H__ */ diff --git a/drivers/net/qede/base/ecore_l2.c b/drivers/net/qede/base/ecore_l2.c new file mode 100644 index 00000000..9e6ef5a4 --- /dev/null +++ b/drivers/net/qede/base/ecore_l2.c @@ -0,0 +1,1807 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" + +#include "ecore.h" +#include "ecore_status.h" +#include "ecore_hsi_eth.h" +#include "ecore_chain.h" +#include "ecore_spq.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_cxt.h" +#include "ecore_l2.h" +#include "ecore_sp_commands.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_iro.h" +#include "reg_addr.h" +#include "ecore_int.h" +#include "ecore_hw.h" +#include "ecore_vf.h" +#include "ecore_sriov.h" +#include "ecore_mcp.h" + +#define ECORE_MAX_SGES_NUM 16 +#define CRC32_POLY 0x1edc6f41 + +enum _ecore_status_t +ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_start_params *p_params) +{ + struct vport_start_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + u8 abs_vport_id = 0; + u16 rx_mode = 0; + + rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_params->opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_START, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.vport_start; + p_ramrod->vport_id = abs_vport_id; + + p_ramrod->mtu = OSAL_CPU_TO_LE16(p_params->mtu); + p_ramrod->inner_vlan_removal_en = p_params->remove_inner_vlan; + p_ramrod->handle_ptp_pkts = p_params->handle_ptp_pkts; + p_ramrod->drop_ttl0_en = p_params->drop_ttl0; + p_ramrod->untagged = p_params->only_untagged; + p_ramrod->zero_placement_offset = p_params->zero_placement_offset; + + SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, 1); + SET_FIELD(rx_mode, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, 1); + + p_ramrod->rx_mode.state = OSAL_CPU_TO_LE16(rx_mode); + + /* TPA related fields */ + OSAL_MEMSET(&p_ramrod->tpa_param, 0, + sizeof(struct eth_vport_tpa_param)); + p_ramrod->tpa_param.max_buff_num = p_params->max_buffers_per_cqe; + + switch (p_params->tpa_mode) { + case ECORE_TPA_MODE_GRO: + p_ramrod->tpa_param.tpa_max_aggs_num = ETH_TPA_MAX_AGGS_NUM; + p_ramrod->tpa_param.tpa_max_size = (u16)-1; + p_ramrod->tpa_param.tpa_min_size_to_cont = p_params->mtu / 2; + p_ramrod->tpa_param.tpa_min_size_to_start = p_params->mtu / 2; + p_ramrod->tpa_param.tpa_ipv4_en_flg = 1; + p_ramrod->tpa_param.tpa_ipv6_en_flg = 1; + p_ramrod->tpa_param.tpa_pkt_split_flg = 1; + p_ramrod->tpa_param.tpa_gro_consistent_flg = 1; + break; + default: + break; + } + + p_ramrod->tx_switching_en = p_params->tx_switching; +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) + p_ramrod->tx_switching_en = 0; +#endif + + /* Software Function ID in hwfn (PFs are 0 - 15, VFs are 16 - 135) */ + p_ramrod->sw_fid = ecore_concrete_to_sw_fid(p_hwfn->p_dev, + p_params->concrete_fid); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t +ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_start_params *p_params) +{ + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_vport_start(p_hwfn, p_params->vport_id, + p_params->mtu, + p_params->remove_inner_vlan, + p_params->tpa_mode, + p_params->max_buffers_per_cqe, + p_params->only_untagged); + + return ecore_sp_eth_vport_start(p_hwfn, p_params); +} + +static enum _ecore_status_t +ecore_sp_vport_update_rss(struct ecore_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct ecore_rss_params *p_rss) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct eth_vport_rss_config *p_config; + u16 abs_l2_queue = 0; + int i; + + if (!p_rss) { + p_ramrod->common.update_rss_flg = 0; + return rc; + } + p_config = &p_ramrod->rss_config; + + OSAL_BUILD_BUG_ON(ECORE_RSS_IND_TABLE_SIZE != + ETH_RSS_IND_TABLE_ENTRIES_NUM); + + rc = ecore_fw_rss_eng(p_hwfn, p_rss->rss_eng_id, &p_config->rss_id); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod->common.update_rss_flg = p_rss->update_rss_config; + p_config->update_rss_capabilities = p_rss->update_rss_capabilities; + p_config->update_rss_ind_table = p_rss->update_rss_ind_table; + p_config->update_rss_key = p_rss->update_rss_key; + + p_config->rss_mode = p_rss->rss_enable ? + ETH_VPORT_RSS_MODE_REGULAR : ETH_VPORT_RSS_MODE_DISABLED; + + p_config->capabilities = 0; + + SET_FIELD(p_config->capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV4)); + SET_FIELD(p_config->capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV6)); + SET_FIELD(p_config->capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_TCP_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV4_TCP)); + SET_FIELD(p_config->capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_TCP_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV6_TCP)); + SET_FIELD(p_config->capabilities, + ETH_VPORT_RSS_CONFIG_IPV4_UDP_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV4_UDP)); + SET_FIELD(p_config->capabilities, + ETH_VPORT_RSS_CONFIG_IPV6_UDP_CAPABILITY, + !!(p_rss->rss_caps & ECORE_RSS_IPV6_UDP)); + p_config->tbl_size = p_rss->rss_table_size_log; + p_config->capabilities = OSAL_CPU_TO_LE16(p_config->capabilities); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, + "update rss flag %d, rss_mode = %d, update_caps = %d, capabilities = %d, update_ind = %d, update_rss_key = %d\n", + p_ramrod->common.update_rss_flg, + p_config->rss_mode, + p_config->update_rss_capabilities, + p_config->capabilities, + p_config->update_rss_ind_table, p_config->update_rss_key); + + for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) { + rc = ecore_fw_l2_queue(p_hwfn, + (u8)p_rss->rss_ind_table[i], + &abs_l2_queue); + if (rc != ECORE_SUCCESS) + return rc; + + p_config->indirection_table[i] = OSAL_CPU_TO_LE16(abs_l2_queue); + DP_VERBOSE(p_hwfn, ECORE_MSG_IFUP, "i= %d, queue = %d\n", + i, p_config->indirection_table[i]); + } + + for (i = 0; i < 10; i++) + p_config->rss_key[i] = OSAL_CPU_TO_LE32(p_rss->rss_key[i]); + + return rc; +} + +static void +ecore_sp_update_accept_mode(struct ecore_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct ecore_filter_accept_flags flags) +{ + p_ramrod->common.update_rx_mode_flg = flags.update_rx_mode_config; + p_ramrod->common.update_tx_mode_flg = flags.update_tx_mode_config; + +#ifndef ASIC_ONLY + /* On B0 emulation we cannot enable Tx, since this would cause writes + * to PVFC HW block which isn't implemented in emulation. + */ + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Non-Asic - prevent Tx mode in vport update\n"); + p_ramrod->common.update_tx_mode_flg = 0; + } +#endif + + /* Set Rx mode accept flags */ + if (p_ramrod->common.update_rx_mode_flg) { + __le16 *state = &p_ramrod->rx_mode.state; + u8 accept_filter = flags.rx_accept_filter; + +/* + * SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, + * !!(accept_filter & ECORE_ACCEPT_NONE)); + */ + + SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL, + (!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) && + !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED))); + + SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_DROP_ALL, + !(!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) || + !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED))); + + SET_FIELD(*state, ETH_VPORT_RX_MODE_UCAST_ACCEPT_UNMATCHED, + !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED)); +/* + * SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, + * !!(accept_filter & ECORE_ACCEPT_NONE)); + */ + SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_DROP_ALL, + !(!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) || + !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(*state, ETH_VPORT_RX_MODE_MCAST_ACCEPT_ALL, + (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && + !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(*state, ETH_VPORT_RX_MODE_BCAST_ACCEPT_ALL, + !!(accept_filter & ECORE_ACCEPT_BCAST)); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "p_ramrod->rx_mode.state = 0x%x\n", + p_ramrod->rx_mode.state); + } + + /* Set Tx mode accept flags */ + if (p_ramrod->common.update_tx_mode_flg) { + __le16 *state = &p_ramrod->tx_mode.state; + u8 accept_filter = flags.tx_accept_filter; + + SET_FIELD(*state, ETH_VPORT_TX_MODE_UCAST_DROP_ALL, + !!(accept_filter & ECORE_ACCEPT_NONE)); + + SET_FIELD(*state, ETH_VPORT_TX_MODE_MCAST_DROP_ALL, + !!(accept_filter & ECORE_ACCEPT_NONE)); + + SET_FIELD(*state, ETH_VPORT_TX_MODE_MCAST_ACCEPT_ALL, + (!!(accept_filter & ECORE_ACCEPT_MCAST_MATCHED) && + !!(accept_filter & ECORE_ACCEPT_MCAST_UNMATCHED))); + + SET_FIELD(*state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL, + !!(accept_filter & ECORE_ACCEPT_BCAST)); + /* @DPDK */ + /* ETH_VPORT_RX_MODE_UCAST_ACCEPT_ALL and + * ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL + * needs to be set for VF-VF communication to work + * when dest macaddr is unknown. + */ + SET_FIELD(*state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL, + (!!(accept_filter & ECORE_ACCEPT_UCAST_MATCHED) && + !!(accept_filter & ECORE_ACCEPT_UCAST_UNMATCHED))); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "p_ramrod->tx_mode.state = 0x%x\n", + p_ramrod->tx_mode.state); + } +} + +static void +ecore_sp_vport_update_sge_tpa(struct ecore_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct ecore_sge_tpa_params *p_params) +{ + struct eth_vport_tpa_param *p_tpa; + + if (!p_params) { + p_ramrod->common.update_tpa_param_flg = 0; + p_ramrod->common.update_tpa_en_flg = 0; + p_ramrod->common.update_tpa_param_flg = 0; + return; + } + + p_ramrod->common.update_tpa_en_flg = p_params->update_tpa_en_flg; + p_tpa = &p_ramrod->tpa_param; + p_tpa->tpa_ipv4_en_flg = p_params->tpa_ipv4_en_flg; + p_tpa->tpa_ipv6_en_flg = p_params->tpa_ipv6_en_flg; + p_tpa->tpa_ipv4_tunn_en_flg = p_params->tpa_ipv4_tunn_en_flg; + p_tpa->tpa_ipv6_tunn_en_flg = p_params->tpa_ipv6_tunn_en_flg; + + p_ramrod->common.update_tpa_param_flg = p_params->update_tpa_param_flg; + p_tpa->max_buff_num = p_params->max_buffers_per_cqe; + p_tpa->tpa_pkt_split_flg = p_params->tpa_pkt_split_flg; + p_tpa->tpa_hdr_data_split_flg = p_params->tpa_hdr_data_split_flg; + p_tpa->tpa_gro_consistent_flg = p_params->tpa_gro_consistent_flg; + p_tpa->tpa_max_aggs_num = p_params->tpa_max_aggs_num; + p_tpa->tpa_max_size = p_params->tpa_max_size; + p_tpa->tpa_min_size_to_start = p_params->tpa_min_size_to_start; + p_tpa->tpa_min_size_to_cont = p_params->tpa_min_size_to_cont; +} + +static void +ecore_sp_update_mcast_bin(struct ecore_hwfn *p_hwfn, + struct vport_update_ramrod_data *p_ramrod, + struct ecore_sp_vport_update_params *p_params) +{ + int i; + + OSAL_MEMSET(&p_ramrod->approx_mcast.bins, 0, + sizeof(p_ramrod->approx_mcast.bins)); + + if (!p_params->update_approx_mcast_flg) + return; + + p_ramrod->common.update_approx_mcast_flg = 1; + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + u32 *p_bins = (u32 *)p_params->bins; + + p_ramrod->approx_mcast.bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); + } +} + +enum _ecore_status_t +ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct ecore_rss_params *p_rss_params = p_params->rss_params; + struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + u8 abs_vport_id = 0, val; + u16 wordval; + + if (IS_VF(p_hwfn->p_dev)) { + rc = ecore_vf_pf_vport_update(p_hwfn, p_params); + return rc; + } + + rc = ecore_fw_vport(p_hwfn, p_params->vport_id, &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_params->opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + /* Copy input params to ramrod according to FW struct */ + p_ramrod = &p_ent->ramrod.vport_update; + + p_ramrod->common.vport_id = abs_vport_id; + + p_ramrod->common.rx_active_flg = p_params->vport_active_rx_flg; + p_ramrod->common.tx_active_flg = p_params->vport_active_tx_flg; + val = p_params->update_vport_active_rx_flg; + p_ramrod->common.update_rx_active_flg = val; + val = p_params->update_vport_active_tx_flg; + p_ramrod->common.update_tx_active_flg = val; + val = p_params->update_inner_vlan_removal_flg; + p_ramrod->common.update_inner_vlan_removal_en_flg = val; + val = p_params->inner_vlan_removal_flg; + p_ramrod->common.inner_vlan_removal_en = val; + val = p_params->silent_vlan_removal_flg; + p_ramrod->common.silent_vlan_removal_en = val; + val = p_params->update_tx_switching_flg; + p_ramrod->common.update_tx_switching_en_flg = val; + val = p_params->update_default_vlan_enable_flg; + p_ramrod->common.update_default_vlan_en_flg = val; + p_ramrod->common.default_vlan_en = p_params->default_vlan_enable_flg; + val = p_params->update_default_vlan_flg; + p_ramrod->common.update_default_vlan_flg = val; + wordval = p_params->default_vlan; + p_ramrod->common.default_vlan = OSAL_CPU_TO_LE16(wordval); + + p_ramrod->common.tx_switching_en = p_params->tx_switching_flg; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) + if (p_ramrod->common.tx_switching_en || + p_ramrod->common.update_tx_switching_en_flg) { + DP_NOTICE(p_hwfn, false, + "FPGA - why are we seeing tx-switching? Overriding it\n"); + p_ramrod->common.tx_switching_en = 0; + p_ramrod->common.update_tx_switching_en_flg = 1; + } +#endif + + val = p_params->update_anti_spoofing_en_flg; + p_ramrod->common.update_anti_spoofing_en_flg = val; + p_ramrod->common.anti_spoofing_en = p_params->anti_spoofing_en; + p_ramrod->common.accept_any_vlan = p_params->accept_any_vlan; + val = p_params->update_accept_any_vlan_flg; + p_ramrod->common.update_accept_any_vlan_flg = val; + + rc = ecore_sp_vport_update_rss(p_hwfn, p_ramrod, p_rss_params); + if (rc != ECORE_SUCCESS) { + /* Return spq entry which is taken in ecore_sp_init_request() */ + ecore_spq_return_entry(p_hwfn, p_ent); + return rc; + } + + /* Update mcast bins for VFs, PF doesn't use this functionality */ + ecore_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params); + + ecore_sp_update_accept_mode(p_hwfn, p_ramrod, p_params->accept_flags); + ecore_sp_vport_update_sge_tpa(p_hwfn, p_ramrod, + p_params->sge_tpa_params); + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, u8 vport_id) +{ + struct vport_stop_ramrod_data *p_ramrod; + struct ecore_sp_init_data init_data; + struct ecore_spq_entry *p_ent; + enum _ecore_status_t rc; + u8 abs_vport_id = 0; + + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_vport_stop(p_hwfn); + + rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_STOP, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.vport_stop; + p_ramrod->vport_id = abs_vport_id; + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +static enum _ecore_status_t +ecore_vf_pf_accept_flags(struct ecore_hwfn *p_hwfn, + struct ecore_filter_accept_flags *p_accept_flags) +{ + struct ecore_sp_vport_update_params s_params; + + OSAL_MEMSET(&s_params, 0, sizeof(s_params)); + OSAL_MEMCPY(&s_params.accept_flags, p_accept_flags, + sizeof(struct ecore_filter_accept_flags)); + + return ecore_vf_pf_vport_update(p_hwfn, &s_params); +} + +enum _ecore_status_t +ecore_filter_accept_cmd(struct ecore_dev *p_dev, + u8 vport, + struct ecore_filter_accept_flags accept_flags, + u8 update_accept_any_vlan, + u8 accept_any_vlan, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct ecore_sp_vport_update_params update_params; + int i, rc; + + /* Prepare and send the vport rx_mode change */ + OSAL_MEMSET(&update_params, 0, sizeof(update_params)); + update_params.vport_id = vport; + update_params.accept_flags = accept_flags; + update_params.update_accept_any_vlan_flg = update_accept_any_vlan; + update_params.accept_any_vlan = accept_any_vlan; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + update_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + + if (IS_VF(p_dev)) { + rc = ecore_vf_pf_accept_flags(p_hwfn, &accept_flags); + if (rc != ECORE_SUCCESS) + return rc; + continue; + } + + rc = ecore_sp_vport_update(p_hwfn, &update_params, + comp_mode, p_comp_data); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_dev, "Update rx_mode failed %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Accept filter configured, flags = [Rx]%x [Tx]%x\n", + accept_flags.rx_accept_filter, + accept_flags.tx_accept_filter); + + if (update_accept_any_vlan) + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "accept_any_vlan=%d configured\n", + accept_any_vlan); + } + + return 0; +} + +static void ecore_sp_release_queue_cid(struct ecore_hwfn *p_hwfn, + struct ecore_hw_cid_data *p_cid_data) +{ + if (!p_cid_data->b_cid_allocated) + return; + + ecore_cxt_release_cid(p_hwfn, p_cid_data->cid); + p_cid_data->b_cid_allocated = false; +} + +enum _ecore_status_t +ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + u16 rx_queue_id, + u8 vport_id, + u8 stats_id, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size) +{ + struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; + struct rx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + u16 abs_rx_q_id = 0; + u8 abs_vport_id = 0; + + /* Store information for the stop */ + p_rx_cid->cid = cid; + p_rx_cid->opaque_fid = opaque_fid; + p_rx_cid->vport_id = vport_id; + + rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id); + if (rc != ECORE_SUCCESS) + return rc; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "opaque_fid=0x%x, cid=0x%x, rx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", + opaque_fid, cid, rx_queue_id, vport_id, sb); + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = cid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_START, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_start; + + p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb); + p_ramrod->sb_index = sb_index; + p_ramrod->vport_id = abs_vport_id; + p_ramrod->stats_counter_id = stats_id; + p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id); + p_ramrod->complete_cqe_flg = 0; + p_ramrod->complete_event_flg = 1; + + p_ramrod->bd_max_bytes = OSAL_CPU_TO_LE16(bd_max_bytes); + DMA_REGPAIR_LE(p_ramrod->bd_base, bd_chain_phys_addr); + + p_ramrod->num_of_pbl_pages = OSAL_CPU_TO_LE16(cqe_pbl_size); + DMA_REGPAIR_LE(p_ramrod->cqe_pbl_addr, cqe_pbl_addr); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + u8 rx_queue_id, + u8 vport_id, + u8 stats_id, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void OSAL_IOMEM * *pp_prod) +{ + struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; + u8 abs_stats_id = 0; + u16 abs_l2_queue = 0; + enum _ecore_status_t rc; + u64 init_prod_val = 0; + + if (IS_VF(p_hwfn->p_dev)) { + return ecore_vf_pf_rxq_start(p_hwfn, + rx_queue_id, + sb, + sb_index, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, + cqe_pbl_size, pp_prod); + } + + rc = ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_l2_queue); + if (rc != ECORE_SUCCESS) + return rc; + + rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id); + if (rc != ECORE_SUCCESS) + return rc; + + *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + + GTT_BAR0_MAP_REG_MSDM_RAM + MSTORM_PRODS_OFFSET(abs_l2_queue); + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + (u32 *)(&init_prod_val)); + + /* Allocate a CID for the queue */ + rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_rx_cid->cid); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n"); + return rc; + } + p_rx_cid->b_cid_allocated = true; + + rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, + opaque_fid, + p_rx_cid->cid, + rx_queue_id, + vport_id, + abs_stats_id, + sb, + sb_index, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size); + + if (rc != ECORE_SUCCESS) + ecore_sp_release_queue_cid(p_hwfn, p_rx_cid); + + return rc; +} + +enum _ecore_status_t +ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, + u16 rx_queue_id, + u8 num_rxqs, + u8 complete_cqe_flg, + u8 complete_event_flg, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct rx_queue_update_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + struct ecore_hw_cid_data *p_rx_cid; + u16 qid, abs_rx_q_id = 0; + u8 i; + + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_rxqs_update(p_hwfn, + rx_queue_id, + num_rxqs, + complete_cqe_flg, + complete_event_flg); + + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + for (i = 0; i < num_rxqs; i++) { + qid = rx_queue_id + i; + p_rx_cid = &p_hwfn->p_rx_cids[qid]; + + /* Get SPQ entry */ + init_data.cid = p_rx_cid->cid; + init_data.opaque_fid = p_rx_cid->opaque_fid; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_update; + + ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); + ecore_fw_l2_queue(p_hwfn, qid, &abs_rx_q_id); + p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id); + p_ramrod->complete_cqe_flg = complete_cqe_flg; + p_ramrod->complete_event_flg = complete_event_flg; + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + if (rc) + return rc; + } + + return rc; +} + +enum _ecore_status_t +ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, + u16 rx_queue_id, + bool eq_completion_only, bool cqe_completion) +{ + struct ecore_hw_cid_data *p_rx_cid = &p_hwfn->p_rx_cids[rx_queue_id]; + struct rx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + u16 abs_rx_q_id = 0; + + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_rxq_stop(p_hwfn, rx_queue_id, + cqe_completion); + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = p_rx_cid->cid; + init_data.opaque_fid = p_rx_cid->opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_RX_QUEUE_STOP, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.rx_queue_stop; + + ecore_fw_vport(p_hwfn, p_rx_cid->vport_id, &p_ramrod->vport_id); + ecore_fw_l2_queue(p_hwfn, rx_queue_id, &abs_rx_q_id); + p_ramrod->rx_queue_id = OSAL_CPU_TO_LE16(abs_rx_q_id); + + /* Cleaning the queue requires the completion to arrive there. + * In addition, VFs require the answer to come as eqe to PF. + */ + p_ramrod->complete_cqe_flg = (!!(p_rx_cid->opaque_fid == + p_hwfn->hw_info.opaque_fid) && + !eq_completion_only) || cqe_completion; + p_ramrod->complete_event_flg = !(p_rx_cid->opaque_fid == + p_hwfn->hw_info.opaque_fid) || + eq_completion_only; + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + if (rc != ECORE_SUCCESS) + return rc; + + ecore_sp_release_queue_cid(p_hwfn, p_rx_cid); + + return rc; +} + +enum _ecore_status_t +ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + u16 tx_queue_id, + u32 cid, + u8 vport_id, + u8 stats_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, + union ecore_qm_pq_params *p_pq_params) +{ + struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; + struct tx_queue_start_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + u16 pq_id, abs_tx_q_id = 0; + u8 abs_vport_id; + + /* Store information for the stop */ + p_tx_cid->cid = cid; + p_tx_cid->opaque_fid = opaque_fid; + + rc = ecore_fw_vport(p_hwfn, vport_id, &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + rc = ecore_fw_l2_queue(p_hwfn, tx_queue_id, &abs_tx_q_id); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = cid; + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_TX_QUEUE_START, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.tx_queue_start; + p_ramrod->vport_id = abs_vport_id; + + p_ramrod->sb_id = OSAL_CPU_TO_LE16(sb); + p_ramrod->sb_index = sb_index; + p_ramrod->stats_counter_id = stats_id; + + p_ramrod->queue_zone_id = OSAL_CPU_TO_LE16(abs_tx_q_id); + + p_ramrod->pbl_size = OSAL_CPU_TO_LE16(pbl_size); + p_ramrod->pbl_base_addr.hi = DMA_HI_LE(pbl_addr); + p_ramrod->pbl_base_addr.lo = DMA_LO_LE(pbl_addr); + + pq_id = ecore_get_qm_pq(p_hwfn, PROTOCOLID_ETH, p_pq_params); + p_ramrod->qm_pq_id = OSAL_CPU_TO_LE16(pq_id); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + u16 tx_queue_id, + u8 vport_id, + u8 stats_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, + void OSAL_IOMEM * *pp_doorbell) +{ + struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; + union ecore_qm_pq_params pq_params; + enum _ecore_status_t rc; + u8 abs_stats_id = 0; + + if (IS_VF(p_hwfn->p_dev)) { + return ecore_vf_pf_txq_start(p_hwfn, + tx_queue_id, + sb, + sb_index, + pbl_addr, pbl_size, pp_doorbell); + } + + rc = ecore_fw_vport(p_hwfn, stats_id, &abs_stats_id); + if (rc != ECORE_SUCCESS) + return rc; + + OSAL_MEMSET(p_tx_cid, 0, sizeof(*p_tx_cid)); + OSAL_MEMSET(&pq_params, 0, sizeof(pq_params)); + + /* Allocate a CID for the queue */ + rc = ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_ETH, &p_tx_cid->cid); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, true, "Failed to acquire cid\n"); + return rc; + } + p_tx_cid->b_cid_allocated = true; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "opaque_fid=0x%x, cid=0x%x, tx_qid=0x%x, vport_id=0x%x, sb_id=0x%x\n", + opaque_fid, p_tx_cid->cid, tx_queue_id, vport_id, sb); + + /* TODO - set tc in the pq_params for multi-cos */ + rc = ecore_sp_eth_txq_start_ramrod(p_hwfn, + opaque_fid, + tx_queue_id, + p_tx_cid->cid, + vport_id, + abs_stats_id, + sb, + sb_index, + pbl_addr, pbl_size, &pq_params); + + *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells + + DB_ADDR(p_tx_cid->cid, DQ_DEMS_LEGACY); + + if (rc != ECORE_SUCCESS) + ecore_sp_release_queue_cid(p_hwfn, p_tx_cid); + + return rc; +} + +enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn) +{ + return ECORE_NOTIMPL; +} + +enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, + u16 tx_queue_id) +{ + struct ecore_hw_cid_data *p_tx_cid = &p_hwfn->p_tx_cids[tx_queue_id]; + struct tx_queue_stop_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + + if (IS_VF(p_hwfn->p_dev)) + return ecore_vf_pf_txq_stop(p_hwfn, tx_queue_id); + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = p_tx_cid->cid; + init_data.opaque_fid = p_tx_cid->opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_TX_QUEUE_STOP, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.tx_queue_stop; + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + if (rc != ECORE_SUCCESS) + return rc; + + ecore_sp_release_queue_cid(p_hwfn, p_tx_cid); + return rc; +} + +static enum eth_filter_action +ecore_filter_action(enum ecore_filter_opcode opcode) +{ + enum eth_filter_action action = MAX_ETH_FILTER_ACTION; + + switch (opcode) { + case ECORE_FILTER_ADD: + action = ETH_FILTER_ACTION_ADD; + break; + case ECORE_FILTER_REMOVE: + action = ETH_FILTER_ACTION_REMOVE; + break; + case ECORE_FILTER_FLUSH: + action = ETH_FILTER_ACTION_REMOVE_ALL; + break; + default: + action = MAX_ETH_FILTER_ACTION; + } + + return action; +} + +static void ecore_set_fw_mac_addr(__le16 *fw_msb, + __le16 *fw_mid, __le16 *fw_lsb, u8 *mac) +{ + ((u8 *)fw_msb)[0] = mac[1]; + ((u8 *)fw_msb)[1] = mac[0]; + ((u8 *)fw_mid)[0] = mac[3]; + ((u8 *)fw_mid)[1] = mac[2]; + ((u8 *)fw_lsb)[0] = mac[5]; + ((u8 *)fw_lsb)[1] = mac[4]; +} + +static enum _ecore_status_t +ecore_filter_ucast_common(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_filter_ucast *p_filter_cmd, + struct vport_filter_update_ramrod_data **pp_ramrod, + struct ecore_spq_entry **pp_ent, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct vport_filter_update_ramrod_data *p_ramrod; + u8 vport_to_add_to = 0, vport_to_remove_from = 0; + struct eth_filter_cmd *p_first_filter; + struct eth_filter_cmd *p_second_filter; + struct ecore_sp_init_data init_data; + enum eth_filter_action action; + enum _ecore_status_t rc; + + rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_remove_from, + &vport_to_remove_from); + if (rc != ECORE_SUCCESS) + return rc; + + rc = ecore_fw_vport(p_hwfn, p_filter_cmd->vport_to_add_to, + &vport_to_add_to); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = ecore_sp_init_request(p_hwfn, pp_ent, + ETH_RAMROD_FILTERS_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + *pp_ramrod = &(*pp_ent)->ramrod.vport_filter_update; + p_ramrod = *pp_ramrod; + p_ramrod->filter_cmd_hdr.rx = p_filter_cmd->is_rx_filter ? 1 : 0; + p_ramrod->filter_cmd_hdr.tx = p_filter_cmd->is_tx_filter ? 1 : 0; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Non-Asic - prevent Tx filters\n"); + p_ramrod->filter_cmd_hdr.tx = 0; + } +#endif + + switch (p_filter_cmd->opcode) { + case ECORE_FILTER_REPLACE: + case ECORE_FILTER_MOVE: + p_ramrod->filter_cmd_hdr.cmd_cnt = 2; + break; + default: + p_ramrod->filter_cmd_hdr.cmd_cnt = 1; + break; + } + + p_first_filter = &p_ramrod->filter_cmds[0]; + p_second_filter = &p_ramrod->filter_cmds[1]; + + switch (p_filter_cmd->type) { + case ECORE_FILTER_MAC: + p_first_filter->type = ETH_FILTER_TYPE_MAC; + break; + case ECORE_FILTER_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_VLAN; + break; + case ECORE_FILTER_MAC_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_PAIR; + break; + case ECORE_FILTER_INNER_MAC: + p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC; + break; + case ECORE_FILTER_INNER_VLAN: + p_first_filter->type = ETH_FILTER_TYPE_INNER_VLAN; + break; + case ECORE_FILTER_INNER_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_INNER_PAIR; + break; + case ECORE_FILTER_INNER_MAC_VNI_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR; + break; + case ECORE_FILTER_MAC_VNI_PAIR: + p_first_filter->type = ETH_FILTER_TYPE_MAC_VNI_PAIR; + break; + case ECORE_FILTER_VNI: + p_first_filter->type = ETH_FILTER_TYPE_VNI; + break; + } + + if ((p_first_filter->type == ETH_FILTER_TYPE_MAC) || + (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR)) + ecore_set_fw_mac_addr(&p_first_filter->mac_msb, + &p_first_filter->mac_mid, + &p_first_filter->mac_lsb, + (u8 *)p_filter_cmd->mac); + + if ((p_first_filter->type == ETH_FILTER_TYPE_VLAN) || + (p_first_filter->type == ETH_FILTER_TYPE_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_VLAN) || + (p_first_filter->type == ETH_FILTER_TYPE_INNER_PAIR)) + p_first_filter->vlan_id = OSAL_CPU_TO_LE16(p_filter_cmd->vlan); + + if ((p_first_filter->type == ETH_FILTER_TYPE_INNER_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_MAC_VNI_PAIR) || + (p_first_filter->type == ETH_FILTER_TYPE_VNI)) + p_first_filter->vni = OSAL_CPU_TO_LE32(p_filter_cmd->vni); + + if (p_filter_cmd->opcode == ECORE_FILTER_MOVE) { + p_second_filter->type = p_first_filter->type; + p_second_filter->mac_msb = p_first_filter->mac_msb; + p_second_filter->mac_mid = p_first_filter->mac_mid; + p_second_filter->mac_lsb = p_first_filter->mac_lsb; + p_second_filter->vlan_id = p_first_filter->vlan_id; + p_second_filter->vni = p_first_filter->vni; + + p_first_filter->action = ETH_FILTER_ACTION_REMOVE; + + p_first_filter->vport_id = vport_to_remove_from; + + p_second_filter->action = ETH_FILTER_ACTION_ADD; + p_second_filter->vport_id = vport_to_add_to; + } else if (p_filter_cmd->opcode == ECORE_FILTER_REPLACE) { + p_first_filter->vport_id = vport_to_add_to; + OSAL_MEMCPY(p_second_filter, p_first_filter, + sizeof(*p_second_filter)); + p_first_filter->action = ETH_FILTER_ACTION_REMOVE_ALL; + p_second_filter->action = ETH_FILTER_ACTION_ADD; + } else { + action = ecore_filter_action(p_filter_cmd->opcode); + + if (action == MAX_ETH_FILTER_ACTION) { + DP_NOTICE(p_hwfn, true, + "%d is not supported yet\n", + p_filter_cmd->opcode); + return ECORE_NOTIMPL; + } + + p_first_filter->action = action; + p_first_filter->vport_id = + (p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? + vport_to_remove_from : vport_to_add_to; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t +ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct vport_filter_update_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct eth_filter_cmd_header *p_header; + enum _ecore_status_t rc; + + rc = ecore_filter_ucast_common(p_hwfn, opaque_fid, p_filter_cmd, + &p_ramrod, &p_ent, + comp_mode, p_comp_data); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "Uni. filter command failed %d\n", rc); + return rc; + } + p_header = &p_ramrod->filter_cmd_hdr; + p_header->assert_on_error = p_filter_cmd->assert_on_error; + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "Unicast filter ADD command failed %d\n", rc); + return rc; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Unicast filter configured, opcode = %s, type = %s, cmd_cnt = %d, is_rx_filter = %d, is_tx_filter = %d\n", + (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? "ADD" : + ((p_filter_cmd->opcode == ECORE_FILTER_REMOVE) ? + "REMOVE" : + ((p_filter_cmd->opcode == ECORE_FILTER_MOVE) ? + "MOVE" : "REPLACE")), + (p_filter_cmd->type == ECORE_FILTER_MAC) ? "MAC" : + ((p_filter_cmd->type == ECORE_FILTER_VLAN) ? + "VLAN" : "MAC & VLAN"), + p_ramrod->filter_cmd_hdr.cmd_cnt, + p_filter_cmd->is_rx_filter, p_filter_cmd->is_tx_filter); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "vport_to_add_to = %d, vport_to_remove_from = %d, mac = %2x:%2x:%2x:%2x:%2x:%2x, vlan = %d\n", + p_filter_cmd->vport_to_add_to, + p_filter_cmd->vport_to_remove_from, + p_filter_cmd->mac[0], p_filter_cmd->mac[1], + p_filter_cmd->mac[2], p_filter_cmd->mac[3], + p_filter_cmd->mac[4], p_filter_cmd->mac[5], + p_filter_cmd->vlan); + + return ECORE_SUCCESS; +} + +/******************************************************************************* + * Description: + * Calculates crc 32 on a buffer + * Note: crc32_length MUST be aligned to 8 + * Return: + ******************************************************************************/ +static u32 ecore_calc_crc32c(u8 *crc32_packet, + u32 crc32_length, u32 crc32_seed, u8 complement) +{ + u32 byte = 0, bit = 0, crc32_result = crc32_seed; + u8 msb = 0, current_byte = 0; + + if ((crc32_packet == OSAL_NULL) || + (crc32_length == 0) || ((crc32_length % 8) != 0)) { + return crc32_result; + } + + for (byte = 0; byte < crc32_length; byte++) { + current_byte = crc32_packet[byte]; + for (bit = 0; bit < 8; bit++) { + msb = (u8)(crc32_result >> 31); + crc32_result = crc32_result << 1; + if (msb != (0x1 & (current_byte >> bit))) { + crc32_result = crc32_result ^ CRC32_POLY; + crc32_result |= 1; + } + } + } + + return crc32_result; +} + +static OSAL_INLINE u32 ecore_crc32c_le(u32 seed, u8 *mac, u32 len) +{ + u32 packet_buf[2] = { 0 }; + + OSAL_MEMCPY((u8 *)(&packet_buf[0]), &mac[0], 6); + return ecore_calc_crc32c((u8 *)packet_buf, 8, seed, 0); +} + +u8 ecore_mcast_bin_from_mac(u8 *mac) +{ + u32 crc = ecore_crc32c_le(ETH_MULTICAST_BIN_FROM_MAC_SEED, + mac, ETH_ALEN); + + return crc & 0xff; +} + +static enum _ecore_status_t +ecore_sp_eth_filter_mcast(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct vport_update_ramrod_data *p_ramrod = OSAL_NULL; + unsigned long bins[ETH_MULTICAST_MAC_BINS_IN_REGS]; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + enum _ecore_status_t rc; + u8 abs_vport_id = 0; + int i; + + rc = ecore_fw_vport(p_hwfn, + (p_filter_cmd->opcode == ECORE_FILTER_ADD) ? + p_filter_cmd->vport_to_add_to : + p_filter_cmd->vport_to_remove_from, &abs_vport_id); + if (rc != ECORE_SUCCESS) + return rc; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + ETH_RAMROD_VPORT_UPDATE, + PROTOCOLID_ETH, &init_data); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "Multi-cast command failed %d\n", rc); + return rc; + } + + p_ramrod = &p_ent->ramrod.vport_update; + p_ramrod->common.update_approx_mcast_flg = 1; + + /* explicitly clear out the entire vector */ + OSAL_MEMSET(&p_ramrod->approx_mcast.bins, + 0, sizeof(p_ramrod->approx_mcast.bins)); + OSAL_MEMSET(bins, 0, sizeof(unsigned long) * + ETH_MULTICAST_MAC_BINS_IN_REGS); + + if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { + /* filter ADD op is explicit set op and it removes + * any existing filters for the vport. + */ + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { + u32 bit; + + bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); + OSAL_SET_BIT(bit, bins); + } + + /* Convert to correct endianity */ + for (i = 0; i < ETH_MULTICAST_MAC_BINS_IN_REGS; i++) { + struct vport_update_ramrod_mcast *p_ramrod_bins; + u32 *p_bins = (u32 *)bins; + + p_ramrod_bins = &p_ramrod->approx_mcast; + p_ramrod_bins->bins[i] = OSAL_CPU_TO_LE32(p_bins[i]); + } + } + + p_ramrod->common.vport_id = abs_vport_id; + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "Multicast filter command failed %d\n", rc); + + return rc; +} + +enum _ecore_status_t +ecore_filter_mcast_cmd(struct ecore_dev *p_dev, + struct ecore_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + int i; + + /* only ADD and REMOVE operations are supported for multi-cast */ + if ((p_filter_cmd->opcode != ECORE_FILTER_ADD && + (p_filter_cmd->opcode != ECORE_FILTER_REMOVE)) || + (p_filter_cmd->num_mc_addrs > ECORE_MAX_MC_ADDRS)) { + return ECORE_INVAL; + } + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + if (IS_VF(p_dev)) { + ecore_vf_pf_filter_mcast(p_hwfn, p_filter_cmd); + continue; + } + + rc = ecore_sp_eth_filter_mcast(p_hwfn, + p_hwfn->hw_info.opaque_fid, + p_filter_cmd, + comp_mode, p_comp_data); + if (rc != ECORE_SUCCESS) + break; + } + + return rc; +} + +enum _ecore_status_t +ecore_filter_ucast_cmd(struct ecore_dev *p_dev, + struct ecore_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + int i; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + if (IS_VF(p_dev)) { + rc = ecore_vf_pf_filter_ucast(p_hwfn, p_filter_cmd); + continue; + } + + rc = ecore_sp_eth_filter_ucast(p_hwfn, + p_hwfn->hw_info.opaque_fid, + p_filter_cmd, + comp_mode, p_comp_data); + if (rc != ECORE_SUCCESS) + break; + } + + return rc; +} + +/* IOV related */ +enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn, + u32 concrete_vfid, u16 opaque_vfid) +{ + struct vf_start_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_vfid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_VF_START, + PROTOCOLID_COMMON, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.vf_start; + + p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); + p_ramrod->opaque_fid = OSAL_CPU_TO_LE16(opaque_vfid); + + switch (p_hwfn->hw_info.personality) { + case ECORE_PCI_ETH: + p_ramrod->personality = PERSONALITY_ETH; + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown VF personality %d\n", + p_hwfn->hw_info.personality); + return ECORE_INVAL; + } + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn) +{ + return ECORE_NOTIMPL; +} + +enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn, + u32 concrete_vfid, u16 opaque_vfid) +{ + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct vf_stop_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = opaque_vfid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_VF_STOP, + PROTOCOLID_COMMON, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + p_ramrod = &p_ent->ramrod.vf_stop; + + p_ramrod->vf_id = GET_FIELD(concrete_vfid, PXP_CONCRETE_FID_VFID); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +/* Statistics related code */ +static void __ecore_get_vport_pstats_addrlen(struct ecore_hwfn *p_hwfn, + u32 *p_addr, u32 *p_len, + u16 statistics_bin) +{ + if (IS_PF(p_hwfn->p_dev)) { + *p_addr = BAR0_MAP_REG_PSDM_RAM + + PSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_pstorm_per_queue_stat); + } else { + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.pstats.address; + *p_len = p_resp->pfdev_info.stats_info.pstats.len; + } +} + +static void __ecore_get_vport_pstats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_pstorm_per_queue_stat pstats; + u32 pstats_addr = 0, pstats_len = 0; + + __ecore_get_vport_pstats_addrlen(p_hwfn, &pstats_addr, &pstats_len, + statistics_bin); + + OSAL_MEMSET(&pstats, 0, sizeof(pstats)); + ecore_memcpy_from(p_hwfn, p_ptt, &pstats, pstats_addr, pstats_len); + + p_stats->tx_ucast_bytes += HILO_64_REGPAIR(pstats.sent_ucast_bytes); + p_stats->tx_mcast_bytes += HILO_64_REGPAIR(pstats.sent_mcast_bytes); + p_stats->tx_bcast_bytes += HILO_64_REGPAIR(pstats.sent_bcast_bytes); + p_stats->tx_ucast_pkts += HILO_64_REGPAIR(pstats.sent_ucast_pkts); + p_stats->tx_mcast_pkts += HILO_64_REGPAIR(pstats.sent_mcast_pkts); + p_stats->tx_bcast_pkts += HILO_64_REGPAIR(pstats.sent_bcast_pkts); + p_stats->tx_err_drop_pkts += HILO_64_REGPAIR(pstats.error_drop_pkts); +} + +static void __ecore_get_vport_tstats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *p_stats, + u16 statistics_bin) +{ + struct tstorm_per_port_stat tstats; + u32 tstats_addr, tstats_len; + + if (IS_PF(p_hwfn->p_dev)) { + tstats_addr = BAR0_MAP_REG_TSDM_RAM + + TSTORM_PORT_STAT_OFFSET(MFW_PORT(p_hwfn)); + tstats_len = sizeof(struct tstorm_per_port_stat); + } else { + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + tstats_addr = p_resp->pfdev_info.stats_info.tstats.address; + tstats_len = p_resp->pfdev_info.stats_info.tstats.len; + } + + OSAL_MEMSET(&tstats, 0, sizeof(tstats)); + ecore_memcpy_from(p_hwfn, p_ptt, &tstats, tstats_addr, tstats_len); + + p_stats->mftag_filter_discards += + HILO_64_REGPAIR(tstats.mftag_filter_discard); + p_stats->mac_filter_discards += + HILO_64_REGPAIR(tstats.eth_mac_filter_discard); +} + +static void __ecore_get_vport_ustats_addrlen(struct ecore_hwfn *p_hwfn, + u32 *p_addr, u32 *p_len, + u16 statistics_bin) +{ + if (IS_PF(p_hwfn->p_dev)) { + *p_addr = BAR0_MAP_REG_USDM_RAM + + USTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_ustorm_per_queue_stat); + } else { + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.ustats.address; + *p_len = p_resp->pfdev_info.stats_info.ustats.len; + } +} + +static void __ecore_get_vport_ustats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_ustorm_per_queue_stat ustats; + u32 ustats_addr = 0, ustats_len = 0; + + __ecore_get_vport_ustats_addrlen(p_hwfn, &ustats_addr, &ustats_len, + statistics_bin); + + OSAL_MEMSET(&ustats, 0, sizeof(ustats)); + ecore_memcpy_from(p_hwfn, p_ptt, &ustats, ustats_addr, ustats_len); + + p_stats->rx_ucast_bytes += HILO_64_REGPAIR(ustats.rcv_ucast_bytes); + p_stats->rx_mcast_bytes += HILO_64_REGPAIR(ustats.rcv_mcast_bytes); + p_stats->rx_bcast_bytes += HILO_64_REGPAIR(ustats.rcv_bcast_bytes); + p_stats->rx_ucast_pkts += HILO_64_REGPAIR(ustats.rcv_ucast_pkts); + p_stats->rx_mcast_pkts += HILO_64_REGPAIR(ustats.rcv_mcast_pkts); + p_stats->rx_bcast_pkts += HILO_64_REGPAIR(ustats.rcv_bcast_pkts); +} + +static void __ecore_get_vport_mstats_addrlen(struct ecore_hwfn *p_hwfn, + u32 *p_addr, u32 *p_len, + u16 statistics_bin) +{ + if (IS_PF(p_hwfn->p_dev)) { + *p_addr = BAR0_MAP_REG_MSDM_RAM + + MSTORM_QUEUE_STAT_OFFSET(statistics_bin); + *p_len = sizeof(struct eth_mstorm_per_queue_stat); + } else { + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *p_resp = &p_iov->acquire_resp; + + *p_addr = p_resp->pfdev_info.stats_info.mstats.address; + *p_len = p_resp->pfdev_info.stats_info.mstats.len; + } +} + +static void __ecore_get_vport_mstats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *p_stats, + u16 statistics_bin) +{ + struct eth_mstorm_per_queue_stat mstats; + u32 mstats_addr = 0, mstats_len = 0; + + __ecore_get_vport_mstats_addrlen(p_hwfn, &mstats_addr, &mstats_len, + statistics_bin); + + OSAL_MEMSET(&mstats, 0, sizeof(mstats)); + ecore_memcpy_from(p_hwfn, p_ptt, &mstats, mstats_addr, mstats_len); + + p_stats->no_buff_discards += HILO_64_REGPAIR(mstats.no_buff_discard); + p_stats->packet_too_big_discard += + HILO_64_REGPAIR(mstats.packet_too_big_discard); + p_stats->ttl0_discard += HILO_64_REGPAIR(mstats.ttl0_discard); + p_stats->tpa_coalesced_pkts += + HILO_64_REGPAIR(mstats.tpa_coalesced_pkts); + p_stats->tpa_coalesced_events += + HILO_64_REGPAIR(mstats.tpa_coalesced_events); + p_stats->tpa_aborts_num += HILO_64_REGPAIR(mstats.tpa_aborts_num); + p_stats->tpa_coalesced_bytes += + HILO_64_REGPAIR(mstats.tpa_coalesced_bytes); +} + +static void __ecore_get_vport_port_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *p_stats) +{ + struct port_stats port_stats; + int j; + + OSAL_MEMSET(&port_stats, 0, sizeof(port_stats)); + + ecore_memcpy_from(p_hwfn, p_ptt, &port_stats, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, stats), + sizeof(port_stats)); + + p_stats->rx_64_byte_packets += port_stats.pmm.r64; + p_stats->rx_65_to_127_byte_packets += port_stats.pmm.r127; + p_stats->rx_128_to_255_byte_packets += port_stats.pmm.r255; + p_stats->rx_256_to_511_byte_packets += port_stats.pmm.r511; + p_stats->rx_512_to_1023_byte_packets += port_stats.pmm.r1023; + p_stats->rx_1024_to_1518_byte_packets += port_stats.pmm.r1518; + p_stats->rx_1519_to_1522_byte_packets += port_stats.pmm.r1522; + p_stats->rx_1519_to_2047_byte_packets += port_stats.pmm.r2047; + p_stats->rx_2048_to_4095_byte_packets += port_stats.pmm.r4095; + p_stats->rx_4096_to_9216_byte_packets += port_stats.pmm.r9216; + p_stats->rx_9217_to_16383_byte_packets += port_stats.pmm.r16383; + p_stats->rx_crc_errors += port_stats.pmm.rfcs; + p_stats->rx_mac_crtl_frames += port_stats.pmm.rxcf; + p_stats->rx_pause_frames += port_stats.pmm.rxpf; + p_stats->rx_pfc_frames += port_stats.pmm.rxpp; + p_stats->rx_align_errors += port_stats.pmm.raln; + p_stats->rx_carrier_errors += port_stats.pmm.rfcr; + p_stats->rx_oversize_packets += port_stats.pmm.rovr; + p_stats->rx_jabbers += port_stats.pmm.rjbr; + p_stats->rx_undersize_packets += port_stats.pmm.rund; + p_stats->rx_fragments += port_stats.pmm.rfrg; + p_stats->tx_64_byte_packets += port_stats.pmm.t64; + p_stats->tx_65_to_127_byte_packets += port_stats.pmm.t127; + p_stats->tx_128_to_255_byte_packets += port_stats.pmm.t255; + p_stats->tx_256_to_511_byte_packets += port_stats.pmm.t511; + p_stats->tx_512_to_1023_byte_packets += port_stats.pmm.t1023; + p_stats->tx_1024_to_1518_byte_packets += port_stats.pmm.t1518; + p_stats->tx_1519_to_2047_byte_packets += port_stats.pmm.t2047; + p_stats->tx_2048_to_4095_byte_packets += port_stats.pmm.t4095; + p_stats->tx_4096_to_9216_byte_packets += port_stats.pmm.t9216; + p_stats->tx_9217_to_16383_byte_packets += port_stats.pmm.t16383; + p_stats->tx_pause_frames += port_stats.pmm.txpf; + p_stats->tx_pfc_frames += port_stats.pmm.txpp; + p_stats->tx_lpi_entry_count += port_stats.pmm.tlpiec; + p_stats->tx_total_collisions += port_stats.pmm.tncl; + p_stats->rx_mac_bytes += port_stats.pmm.rbyte; + p_stats->rx_mac_uc_packets += port_stats.pmm.rxuca; + p_stats->rx_mac_mc_packets += port_stats.pmm.rxmca; + p_stats->rx_mac_bc_packets += port_stats.pmm.rxbca; + p_stats->rx_mac_frames_ok += port_stats.pmm.rxpok; + p_stats->tx_mac_bytes += port_stats.pmm.tbyte; + p_stats->tx_mac_uc_packets += port_stats.pmm.txuca; + p_stats->tx_mac_mc_packets += port_stats.pmm.txmca; + p_stats->tx_mac_bc_packets += port_stats.pmm.txbca; + p_stats->tx_mac_ctrl_frames += port_stats.pmm.txcf; + for (j = 0; j < 8; j++) { + p_stats->brb_truncates += port_stats.brb.brb_truncate[j]; + p_stats->brb_discards += port_stats.brb.brb_discard[j]; + } +} + +void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *stats, + u16 statistics_bin, bool b_get_port_stats) +{ + __ecore_get_vport_mstats(p_hwfn, p_ptt, stats, statistics_bin); + __ecore_get_vport_ustats(p_hwfn, p_ptt, stats, statistics_bin); + __ecore_get_vport_tstats(p_hwfn, p_ptt, stats, statistics_bin); + __ecore_get_vport_pstats(p_hwfn, p_ptt, stats, statistics_bin); + +#ifndef ASIC_ONLY + /* Avoid getting PORT stats for emulation. */ + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + return; +#endif + + if (b_get_port_stats && p_hwfn->mcp_info) + __ecore_get_vport_port_stats(p_hwfn, p_ptt, stats); +} + +static void _ecore_get_vport_stats(struct ecore_dev *p_dev, + struct ecore_eth_stats *stats) +{ + u8 fw_vport = 0; + int i; + + OSAL_MEMSET(stats, 0, sizeof(*stats)); + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct ecore_ptt *p_ptt = IS_PF(p_dev) ? + ecore_ptt_acquire(p_hwfn) : OSAL_NULL; + + if (IS_PF(p_dev)) { + /* The main vport index is relative first */ + if (ecore_fw_vport(p_hwfn, 0, &fw_vport)) { + DP_ERR(p_hwfn, "No vport available!\n"); + goto out; + } + } + + if (IS_PF(p_dev) && !p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + __ecore_get_vport_stats(p_hwfn, p_ptt, stats, fw_vport, + IS_PF(p_dev) ? true : false); + +out: + if (IS_PF(p_dev)) + ecore_ptt_release(p_hwfn, p_ptt); + } +} + +void ecore_get_vport_stats(struct ecore_dev *p_dev, + struct ecore_eth_stats *stats) +{ + u32 i; + + if (!p_dev) { + OSAL_MEMSET(stats, 0, sizeof(*stats)); + return; + } + + _ecore_get_vport_stats(p_dev, stats); + + if (!p_dev->reset_stats) + return; + + /* Reduce the statistics baseline */ + for (i = 0; i < sizeof(struct ecore_eth_stats) / sizeof(u64); i++) + ((u64 *)stats)[i] -= ((u64 *)p_dev->reset_stats)[i]; +} + +/* zeroes V-PORT specific portion of stats (Port stats remains untouched) */ +void ecore_reset_vport_stats(struct ecore_dev *p_dev) +{ + int i; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + struct eth_mstorm_per_queue_stat mstats; + struct eth_ustorm_per_queue_stat ustats; + struct eth_pstorm_per_queue_stat pstats; + struct ecore_ptt *p_ptt = IS_PF(p_dev) ? + ecore_ptt_acquire(p_hwfn) : OSAL_NULL; + u32 addr = 0, len = 0; + + if (IS_PF(p_dev) && !p_ptt) { + DP_ERR(p_hwfn, "Failed to acquire ptt\n"); + continue; + } + + OSAL_MEMSET(&mstats, 0, sizeof(mstats)); + __ecore_get_vport_mstats_addrlen(p_hwfn, &addr, &len, 0); + ecore_memcpy_to(p_hwfn, p_ptt, addr, &mstats, len); + + OSAL_MEMSET(&ustats, 0, sizeof(ustats)); + __ecore_get_vport_ustats_addrlen(p_hwfn, &addr, &len, 0); + ecore_memcpy_to(p_hwfn, p_ptt, addr, &ustats, len); + + OSAL_MEMSET(&pstats, 0, sizeof(pstats)); + __ecore_get_vport_pstats_addrlen(p_hwfn, &addr, &len, 0); + ecore_memcpy_to(p_hwfn, p_ptt, addr, &pstats, len); + + if (IS_PF(p_dev)) + ecore_ptt_release(p_hwfn, p_ptt); + } + + /* PORT statistics are not necessarily reset, so we need to + * read and create a baseline for future statistics. + */ + if (!p_dev->reset_stats) + DP_INFO(p_dev, "Reset stats not allocated\n"); + else + _ecore_get_vport_stats(p_dev, p_dev->reset_stats); +} diff --git a/drivers/net/qede/base/ecore_l2.h b/drivers/net/qede/base/ecore_l2.h new file mode 100644 index 00000000..b0850ca4 --- /dev/null +++ b/drivers/net/qede/base/ecore_l2.h @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_L2_H__ +#define __ECORE_L2_H__ + +#include "ecore.h" +#include "ecore_hw.h" +#include "ecore_spq.h" +#include "ecore_l2_api.h" + +/** + * @brief ecore_sp_vf_start - VF Function Start + * + * This ramrod is sent to initialize a virtual function (VF) is loaded. + * It will configure the function related parameters. + * + * @note Final phase API. + * + * @param p_hwfn + * @param concrete_vfid VF ID + * @param opaque_vfid + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_vf_start(struct ecore_hwfn *p_hwfn, + u32 concrete_vfid, u16 opaque_vfid); + +/** + * @brief ecore_sp_vf_update - VF Function Update Ramrod + * + * This ramrod performs updates of a virtual function (VF). + * It currently contains no functionality. + * + * @note Final phase API. + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_vf_update(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_sp_vf_stop - VF Function Stop Ramrod + * + * This ramrod is sent to unload a virtual function (VF). + * + * @note Final phase API. + * + * @param p_hwfn + * @param concrete_vfid + * @param opaque_vfid + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_vf_stop(struct ecore_hwfn *p_hwfn, + u32 concrete_vfid, u16 opaque_vfid); + +/** + * @brief ecore_sp_eth_tx_queue_update - + * + * This ramrod updates a TX queue. It is used for setting the active + * state of the queue. + * + * @note Final phase API. + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_sp_eth_tx_queue_update(struct ecore_hwfn *p_hwfn); + +enum _ecore_status_t +ecore_sp_eth_vport_start(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_start_params *p_params); + +/** + * @brief - Starts an Rx queue; Should be used where contexts are handled + * outside of the ramrod area [specifically iov scenarios] + * + * @param p_hwfn + * @param opaque_fid + * @param cid + * @param rx_queue_id + * @param vport_id + * @param stats_id + * @param sb + * @param sb_index + * @param bd_max_bytes + * @param bd_chain_phys_addr + * @param cqe_pbl_addr + * @param cqe_pbl_size + * @param leading + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_sp_eth_rxq_start_ramrod(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + u32 cid, + u16 rx_queue_id, + u8 vport_id, + u8 stats_id, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size); + +/** + * @brief - Starts a Tx queue; Should be used where contexts are handled + * outside of the ramrod area [specifically iov scenarios] + * + * @param p_hwfn + * @param opaque_fid + * @param tx_queue_id + * @param cid + * @param vport_id + * @param stats_id + * @param sb + * @param sb_index + * @param pbl_addr + * @param pbl_size + * @param p_pq_params - parameters for choosing the PQ for this Tx queue + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_sp_eth_txq_start_ramrod(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + u16 tx_queue_id, + u32 cid, + u8 vport_id, + u8 stats_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, + union ecore_qm_pq_params *p_pq_params); + +u8 ecore_mcast_bin_from_mac(u8 *mac); + +#endif diff --git a/drivers/net/qede/base/ecore_l2_api.h b/drivers/net/qede/base/ecore_l2_api.h new file mode 100644 index 00000000..b41dd7fe --- /dev/null +++ b/drivers/net/qede/base/ecore_l2_api.h @@ -0,0 +1,401 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_L2_API_H__ +#define __ECORE_L2_API_H__ + +#include "ecore_status.h" +#include "ecore_sp_api.h" + +#ifndef __EXTRACT__LINUX__ +enum ecore_rss_caps { + ECORE_RSS_IPV4 = 0x1, + ECORE_RSS_IPV6 = 0x2, + ECORE_RSS_IPV4_TCP = 0x4, + ECORE_RSS_IPV6_TCP = 0x8, + ECORE_RSS_IPV4_UDP = 0x10, + ECORE_RSS_IPV6_UDP = 0x20, +}; + +/* Should be the same as ETH_RSS_IND_TABLE_ENTRIES_NUM */ +#define ECORE_RSS_IND_TABLE_SIZE 128 +#define ECORE_RSS_KEY_SIZE 10 /* size in 32b chunks */ +#endif + +struct ecore_rss_params { + u8 update_rss_config; + u8 rss_enable; + u8 rss_eng_id; + u8 update_rss_capabilities; + u8 update_rss_ind_table; + u8 update_rss_key; + u8 rss_caps; + u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ + u16 rss_ind_table[ECORE_RSS_IND_TABLE_SIZE]; + u32 rss_key[ECORE_RSS_KEY_SIZE]; +}; + +struct ecore_sge_tpa_params { + u8 max_buffers_per_cqe; + + u8 update_tpa_en_flg; + u8 tpa_ipv4_en_flg; + u8 tpa_ipv6_en_flg; + u8 tpa_ipv4_tunn_en_flg; + u8 tpa_ipv6_tunn_en_flg; + + u8 update_tpa_param_flg; + u8 tpa_pkt_split_flg; + u8 tpa_hdr_data_split_flg; + u8 tpa_gro_consistent_flg; + u8 tpa_max_aggs_num; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; +}; + +enum ecore_filter_opcode { + ECORE_FILTER_ADD, + ECORE_FILTER_REMOVE, + ECORE_FILTER_MOVE, + ECORE_FILTER_REPLACE, /* Delete all MACs and add new one instead */ + ECORE_FILTER_FLUSH, /* Removes all filters */ +}; + +enum ecore_filter_ucast_type { + ECORE_FILTER_MAC, + ECORE_FILTER_VLAN, + ECORE_FILTER_MAC_VLAN, + ECORE_FILTER_INNER_MAC, + ECORE_FILTER_INNER_VLAN, + ECORE_FILTER_INNER_PAIR, + ECORE_FILTER_INNER_MAC_VNI_PAIR, + ECORE_FILTER_MAC_VNI_PAIR, + ECORE_FILTER_VNI, +}; + +struct ecore_filter_ucast { + enum ecore_filter_opcode opcode; + enum ecore_filter_ucast_type type; + u8 is_rx_filter; + u8 is_tx_filter; + u8 vport_to_add_to; + u8 vport_to_remove_from; + unsigned char mac[ETH_ALEN]; + u8 assert_on_error; + u16 vlan; + u32 vni; +}; + +struct ecore_filter_mcast { + /* MOVE is not supported for multicast */ + enum ecore_filter_opcode opcode; + u8 vport_to_add_to; + u8 vport_to_remove_from; + u8 num_mc_addrs; +#define ECORE_MAX_MC_ADDRS 64 + unsigned char mac[ECORE_MAX_MC_ADDRS][ETH_ALEN]; +}; + +struct ecore_filter_accept_flags { + u8 update_rx_mode_config; + u8 update_tx_mode_config; + u8 rx_accept_filter; + u8 tx_accept_filter; +#define ECORE_ACCEPT_NONE 0x01 +#define ECORE_ACCEPT_UCAST_MATCHED 0x02 +#define ECORE_ACCEPT_UCAST_UNMATCHED 0x04 +#define ECORE_ACCEPT_MCAST_MATCHED 0x08 +#define ECORE_ACCEPT_MCAST_UNMATCHED 0x10 +#define ECORE_ACCEPT_BCAST 0x20 +}; + +/* Add / remove / move / remove-all unicast MAC-VLAN filters. + * FW will assert in the following cases, so driver should take care...: + * 1. Adding a filter to a full table. + * 2. Adding a filter which already exists on that vport. + * 3. Removing a filter which doesn't exist. + */ + +enum _ecore_status_t +ecore_filter_ucast_cmd(struct ecore_dev *p_dev, + struct ecore_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +/* Add / remove / move multicast MAC filters. */ +enum _ecore_status_t +ecore_filter_mcast_cmd(struct ecore_dev *p_dev, + struct ecore_filter_mcast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +/* Set "accept" filters */ +enum _ecore_status_t +ecore_filter_accept_cmd(struct ecore_dev *p_dev, + u8 vport, + struct ecore_filter_accept_flags accept_flags, + u8 update_accept_any_vlan, + u8 accept_any_vlan, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +/** + * @brief ecore_sp_eth_rx_queue_start - RX Queue Start Ramrod + * + * This ramrod initializes an RX Queue for a VPort. An Assert is generated if + * the VPort ID is not currently initialized. + * + * @param p_hwfn + * @param opaque_fid + * @param rx_queue_id RX Queue ID: Zero based, per VPort, allocated + * by assignment (=rssId) + * @param vport_id VPort ID + * @param u8 stats_id VPort ID which the queue stats + * will be added to + * @param sb Status Block of the Function Event Ring + * @param sb_index Index into the status block of the + * Function Event Ring + * @param bd_max_bytes Maximum bytes that can be placed on a BD + * @param bd_chain_phys_addr Physical address of BDs for receive. + * @param cqe_pbl_addr Physical address of the CQE PBL Table. + * @param cqe_pbl_size Size of the CQE PBL Table + * @param pp_prod Pointer to place producer's + * address for the Rx Q (May be + * NULL). + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_sp_eth_rx_queue_start(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + u8 rx_queue_id, + u8 vport_id, + u8 stats_id, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void OSAL_IOMEM * *pp_prod); + +/** + * @brief ecore_sp_eth_rx_queue_stop - + * + * This ramrod closes an RX queue. It sends RX queue stop ramrod + * + CFC delete ramrod + * + * @param p_hwfn + * @param rx_queue_id RX Queue ID + * @param eq_completion_only If True completion will be on + * EQe, if False completion will be + * on EQe if p_hwfn opaque + * different from the RXQ opaque + * otherwise on CQe. + * @param cqe_completion If True completion will be + * receive on CQe. + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_sp_eth_rx_queue_stop(struct ecore_hwfn *p_hwfn, + u16 rx_queue_id, + bool eq_completion_only, bool cqe_completion); + +/** + * @brief ecore_sp_eth_tx_queue_start - TX Queue Start Ramrod + * + * This ramrod initializes a TX Queue for a VPort. An Assert is generated if + * the VPort is not currently initialized. + * + * @param p_hwfn + * @param opaque_fid + * @param tx_queue_id TX Queue ID + * @param vport_id VPort ID + * @param stats_id VPort ID which the queue stats + * will be added to + * @param sb Status Block of the Function Event Ring + * @param sb_index Index into the status block of the Function + * Event Ring + * @param pbl_addr address of the pbl array + * @param pbl_size number of entries in pbl + * @param pp_doorbell Pointer to place doorbell pointer (May be NULL). + * This address should be used with the + * DIRECT_REG_WR macro. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_sp_eth_tx_queue_start(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + u16 tx_queue_id, + u8 vport_id, + u8 stats_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, + void OSAL_IOMEM * * + pp_doorbell); + +/** + * @brief ecore_sp_eth_tx_queue_stop - + * + * This ramrod closes a TX queue. It sends TX queue stop ramrod + * + CFC delete ramrod + * + * @param p_hwfn + * @param tx_queue_id TX Queue ID + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_sp_eth_tx_queue_stop(struct ecore_hwfn *p_hwfn, + u16 tx_queue_id); + +enum ecore_tpa_mode { + ECORE_TPA_MODE_NONE, + ECORE_TPA_MODE_RSC, + ECORE_TPA_MODE_GRO, + ECORE_TPA_MODE_MAX +}; + +struct ecore_sp_vport_start_params { + enum ecore_tpa_mode tpa_mode; + bool remove_inner_vlan; /* Inner VLAN removal is enabled */ + bool tx_switching; /* Vport supports tx-switching */ + bool handle_ptp_pkts; /* Handle PTP packets */ + bool only_untagged; /* Untagged pkt control */ + bool drop_ttl0; /* Drop packets with TTL = 0 */ + u8 max_buffers_per_cqe; + u32 concrete_fid; + u16 opaque_fid; + u8 vport_id; /* VPORT ID */ + u16 mtu; /* VPORT MTU */ + bool zero_placement_offset; +}; + +/** + * @brief ecore_sp_vport_start - + * + * This ramrod initializes a VPort. An Assert if generated if the Function ID + * of the VPort is not enabled. + * + * @param p_hwfn + * @param p_params VPORT start params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_sp_vport_start(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_start_params *p_params); + +struct ecore_sp_vport_update_params { + u16 opaque_fid; + u8 vport_id; + u8 update_vport_active_rx_flg; + u8 vport_active_rx_flg; + u8 update_vport_active_tx_flg; + u8 vport_active_tx_flg; + u8 update_inner_vlan_removal_flg; + u8 inner_vlan_removal_flg; + u8 silent_vlan_removal_flg; + u8 update_default_vlan_enable_flg; + u8 default_vlan_enable_flg; + u8 update_default_vlan_flg; + u16 default_vlan; + u8 update_tx_switching_flg; + u8 tx_switching_flg; + u8 update_approx_mcast_flg; + u8 update_anti_spoofing_en_flg; + u8 anti_spoofing_en; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + unsigned long bins[8]; + struct ecore_rss_params *rss_params; + struct ecore_filter_accept_flags accept_flags; + struct ecore_sge_tpa_params *sge_tpa_params; +}; + +/** + * @brief ecore_sp_vport_update - + * + * This ramrod updates the parameters of the VPort. Every field can be updated + * independently, according to flags. + * + * This ramrod is also used to set the VPort state to active after creation. + * An Assert is generated if the VPort does not contain an RX queue. + * + * @param p_hwfn + * @param p_params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_sp_vport_update(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_params, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); +/** + * @brief ecore_sp_vport_stop - + * + * This ramrod closes a VPort after all its RX and TX queues are terminated. + * An Assert is generated if any queues are left open. + * + * @param p_hwfn + * @param opaque_fid + * @param vport_id VPort ID + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_sp_vport_stop(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, u8 vport_id); + +enum _ecore_status_t +ecore_sp_eth_filter_ucast(struct ecore_hwfn *p_hwfn, + u16 opaque_fid, + struct ecore_filter_ucast *p_filter_cmd, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +/** + * @brief ecore_sp_rx_eth_queues_update - + * + * This ramrod updates an RX queue. It is used for setting the active state + * of the queue and updating the TPA and SGE parameters. + * + * @note Final phase API. + * + * @param p_hwfn + * @param rx_queue_id RX Queue ID + * @param num_rxqs Allow to update multiple rx + * queues, from rx_queue_id to + * (rx_queue_id + num_rxqs) + * @param complete_cqe_flg Post completion to the CQE Ring if set + * @param complete_event_flg Post completion to the Event Ring if set + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t +ecore_sp_eth_rx_queues_update(struct ecore_hwfn *p_hwfn, + u16 rx_queue_id, + u8 num_rxqs, + u8 complete_cqe_flg, + u8 complete_event_flg, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +void __ecore_get_vport_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_eth_stats *stats, + u16 statistics_bin, bool b_get_port_stats); + +void ecore_get_vport_stats(struct ecore_dev *p_dev, + struct ecore_eth_stats *stats); + +void ecore_reset_vport_stats(struct ecore_dev *p_dev); + +#endif diff --git a/drivers/net/qede/base/ecore_mcp.c b/drivers/net/qede/base/ecore_mcp.c new file mode 100644 index 00000000..9dd2eed3 --- /dev/null +++ b/drivers/net/qede/base/ecore_mcp.c @@ -0,0 +1,1932 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_status.h" +#include "ecore_mcp.h" +#include "mcp_public.h" +#include "reg_addr.h" +#include "ecore_hw.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_sriov.h" +#include "ecore_iov_api.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_iro.h" +#include "ecore_dcbx.h" + +#define CHIP_MCP_RESP_ITER_US 10 +#define EMUL_MCP_RESP_ITER_US (1000 * 1000) + +#define ECORE_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */ +#define ECORE_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */ + +#define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \ + ecore_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \ + _val) + +#define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \ + ecore_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset)) + +#define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \ + DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \ + OFFSETOF(struct public_drv_mb, _field), _val) + +#define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \ + DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \ + OFFSETOF(struct public_drv_mb, _field)) + +#define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \ + DRV_ID_PDA_COMP_VER_SHIFT) + +#define MCP_BYTES_PER_MBIT_SHIFT 17 + +#ifndef ASIC_ONLY +static int loaded; +static int loaded_port[MAX_NUM_PORTS] = { 0 }; +#endif + +bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn->mcp_info || !p_hwfn->mcp_info->public_base) + return false; + return true; +} + +void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PORT); + u32 mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, addr); + + p_hwfn->mcp_info->port_addr = SECTION_ADDR(mfw_mb_offsize, + MFW_PORT(p_hwfn)); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "port_addr = 0x%x, port_id 0x%02x\n", + p_hwfn->mcp_info->port_addr, MFW_PORT(p_hwfn)); +} + +void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + u32 length = MFW_DRV_MSG_MAX_DWORDS(p_hwfn->mcp_info->mfw_mb_length); + OSAL_BE32 tmp; + u32 i; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_TEDIBEAR(p_hwfn->p_dev)) + return; +#endif + + if (!p_hwfn->mcp_info->public_base) + return; + + for (i = 0; i < length; i++) { + tmp = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->mfw_mb_addr + + (i << 2) + sizeof(u32)); + + ((u32 *)p_hwfn->mcp_info->mfw_mb_cur)[i] = + OSAL_BE32_TO_CPU(tmp); + } +} + +enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn) +{ + if (p_hwfn->mcp_info) { + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_cur); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info->mfw_mb_shadow); + OSAL_SPIN_LOCK_DEALLOC(&p_hwfn->mcp_info->lock); + } + OSAL_FREE(p_hwfn->p_dev, p_hwfn->mcp_info); + p_hwfn->mcp_info = OSAL_NULL; + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_load_mcp_offsets(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_info *p_info = p_hwfn->mcp_info; + u32 drv_mb_offsize, mfw_mb_offsize; + u32 mcp_pf_id = MCP_PF_ID(p_hwfn); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, false, "Emulation - assume no MFW\n"); + p_info->public_base = 0; + return ECORE_INVAL; + } +#endif + + p_info->public_base = ecore_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR); + if (!p_info->public_base) + return ECORE_INVAL; + + p_info->public_base |= GRCBASE_MCP; + + /* Calculate the driver and MFW mailbox address */ + drv_mb_offsize = ecore_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_DRV_MB)); + p_info->drv_mb_addr = SECTION_ADDR(drv_mb_offsize, mcp_pf_id); + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n", + drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id); + + /* Set the MFW MB address */ + mfw_mb_offsize = ecore_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_info->public_base, + PUBLIC_MFW_MB)); + p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id); + p_info->mfw_mb_length = (u16)ecore_rd(p_hwfn, p_ptt, + p_info->mfw_mb_addr); + + /* Get the current driver mailbox sequence before sending + * the first command + */ + p_info->drv_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; + + /* Get current FW pulse sequence */ + p_info->drv_pulse_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_pulse_mb) & + DRV_PULSE_SEQ_MASK; + + p_info->mcp_hist = (u16)ecore_rd(p_hwfn, p_ptt, + MISCS_REG_GENERIC_POR_0); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_info *p_info; + u32 size; + + /* Allocate mcp_info structure */ + p_hwfn->mcp_info = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(*p_hwfn->mcp_info)); + if (!p_hwfn->mcp_info) + goto err; + p_info = p_hwfn->mcp_info; + + if (ecore_load_mcp_offsets(p_hwfn, p_ptt) != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, false, "MCP is not initialized\n"); + /* Do not free mcp_info here, since public_base indicate that + * the MCP is not initialized + */ + return ECORE_SUCCESS; + } + + size = MFW_DRV_MSG_MAX_DWORDS(p_info->mfw_mb_length) * sizeof(u32); + p_info->mfw_mb_cur = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); + p_info->mfw_mb_shadow = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, size); + if (!p_info->mfw_mb_shadow || !p_info->mfw_mb_addr) + goto err; + + /* Initialize the MFW spinlock */ + OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_info->lock); + OSAL_SPIN_LOCK_INIT(&p_info->lock); + + return ECORE_SUCCESS; + +err: + DP_NOTICE(p_hwfn, true, "Failed to allocate mcp memory\n"); + ecore_mcp_free(p_hwfn); + return ECORE_NOMEM; +} + +enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 seq = ++p_hwfn->mcp_info->drv_mb_seq; + u32 delay = CHIP_MCP_RESP_ITER_US; + u32 org_mcp_reset_seq, cnt = 0; + enum _ecore_status_t rc = ECORE_SUCCESS; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + delay = EMUL_MCP_RESP_ITER_US; +#endif + + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock); + + /* Set drv command along with the updated sequence */ + org_mcp_reset_seq = ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0); + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (DRV_MSG_CODE_MCP_RESET | seq)); + + do { + /* Wait for MFW response */ + OSAL_UDELAY(delay); + /* Give the FW up to 500 second (50*1000*10usec) */ + } while ((org_mcp_reset_seq == ecore_rd(p_hwfn, p_ptt, + MISCS_REG_GENERIC_POR_0)) && + (cnt++ < ECORE_MCP_RESET_RETRIES)); + + if (org_mcp_reset_seq != + ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "MCP was reset after %d usec\n", cnt * delay); + } else { + DP_ERR(p_hwfn, "Failed to reset MCP\n"); + rc = ECORE_AGAIN; + } + + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock); + + return rc; +} + +/* Should be called while the dedicated spinlock is acquired */ +static enum _ecore_status_t ecore_do_mcp_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param) +{ + u32 delay = CHIP_MCP_RESP_ITER_US; + u32 seq, cnt = 1, actual_mb_seq; + enum _ecore_status_t rc = ECORE_SUCCESS; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + delay = EMUL_MCP_RESP_ITER_US; +#endif + + /* Get actual driver mailbox sequence */ + actual_mb_seq = DRV_MB_RD(p_hwfn, p_ptt, drv_mb_header) & + DRV_MSG_SEQ_NUMBER_MASK; + + /* Use MCP history register to check if MCP reset occurred between + * init time and now. + */ + if (p_hwfn->mcp_info->mcp_hist != + ecore_rd(p_hwfn, p_ptt, MISCS_REG_GENERIC_POR_0)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Rereading MCP offsets\n"); + ecore_load_mcp_offsets(p_hwfn, p_ptt); + ecore_mcp_cmd_port_init(p_hwfn, p_ptt); + } + seq = ++p_hwfn->mcp_info->drv_mb_seq; + + /* Set drv param */ + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_param, param); + + /* Set drv command along with the updated sequence */ + DRV_MB_WR(p_hwfn, p_ptt, drv_mb_header, (cmd | seq)); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "wrote command (%x) to MFW MB param 0x%08x\n", + (cmd | seq), param); + + do { + /* Wait for MFW response */ + OSAL_UDELAY(delay); + *o_mcp_resp = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_header); + + /* Give the FW up to 5 second (500*10ms) */ + } while ((seq != (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) && + (cnt++ < ECORE_DRV_MB_MAX_RETRIES)); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "[after %d ms] read (%x) seq is (%x) from FW MB\n", + cnt * delay, *o_mcp_resp, seq); + + /* Is this a reply to our command? */ + if (seq == (*o_mcp_resp & FW_MSG_SEQ_NUMBER_MASK)) { + *o_mcp_resp &= FW_MSG_CODE_MASK; + /* Get the MCP param */ + *o_mcp_param = DRV_MB_RD(p_hwfn, p_ptt, fw_mb_param); + } else { + /* FW BUG! */ + DP_ERR(p_hwfn, "MFW failed to respond [cmd 0x%x param 0x%x]\n", + cmd, param); + *o_mcp_resp = 0; + rc = ECORE_AGAIN; + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_MFW_RESP_FAIL); + } + return rc; +} + +enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 cmd, u32 param, + u32 *o_mcp_resp, u32 *o_mcp_param) +{ +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { + if (cmd == DRV_MSG_CODE_UNLOAD_REQ) { + loaded--; + loaded_port[p_hwfn->port_id]--; + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Unload cnt: 0x%x\n", + loaded); + } + return ECORE_SUCCESS; + } +#endif + + return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, OSAL_NULL, + o_mcp_resp, o_mcp_param); +} + +enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, u32 param, + union drv_union_data *p_union_data, + u32 *o_mcp_resp, + u32 *o_mcp_param) +{ + u32 union_data_addr; + enum _ecore_status_t rc; + + /* MCP not initialized */ + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + return ECORE_BUSY; + } + + /* Acquiring a spinlock is needed to ensure that only a single thread + * is accessing the mailbox at a certain time. + */ + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock); + + if (p_union_data != OSAL_NULL) { + union_data_addr = p_hwfn->mcp_info->drv_mb_addr + + OFFSETOF(struct public_drv_mb, union_data); + ecore_memcpy_to(p_hwfn, p_ptt, union_data_addr, p_union_data, + sizeof(*p_union_data)); + } + + rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp, + o_mcp_param); + + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock); + + return rc; +} + +enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + u32 i_txn_size, u32 *i_buf) +{ + union drv_union_data union_data; + + OSAL_MEMCPY((u32 *)&union_data.raw_data, i_buf, i_txn_size); + + return ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, param, &union_data, + o_mcp_resp, o_mcp_param); +} + +enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf) +{ + enum _ecore_status_t rc; + u32 i; + + /* MCP not initialized */ + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + return ECORE_BUSY; + } + + OSAL_SPIN_LOCK(&p_hwfn->mcp_info->lock); + rc = ecore_do_mcp_cmd(p_hwfn, p_ptt, cmd, param, o_mcp_resp, + o_mcp_param); + if (rc != ECORE_SUCCESS) + goto out; + + /* Get payload after operation completes successfully */ + *o_txn_size = *o_mcp_param; + for (i = 0; i < *o_txn_size; i += 4) + o_buf[i / sizeof(u32)] = DRV_MB_RD(p_hwfn, p_ptt, + union_data.raw_data[i]); + +out: + OSAL_SPIN_UNLOCK(&p_hwfn->mcp_info->lock); + return rc; +} + +#ifndef ASIC_ONLY +static void ecore_mcp_mf_workaround(struct ecore_hwfn *p_hwfn, + u32 *p_load_code) +{ + static int load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; + + if (!loaded) + load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; + else if (!loaded_port[p_hwfn->port_id]) + load_phase = FW_MSG_CODE_DRV_LOAD_PORT; + else + load_phase = FW_MSG_CODE_DRV_LOAD_FUNCTION; + + /* On CMT, always tell that it's engine */ + if (p_hwfn->p_dev->num_hwfns > 1) + load_phase = FW_MSG_CODE_DRV_LOAD_ENGINE; + + *p_load_code = load_phase; + loaded++; + loaded_port[p_hwfn->port_id]++; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Load phase: %x load cnt: 0x%x port id=%d port_load=%d\n", + *p_load_code, loaded, p_hwfn->port_id, + loaded_port[p_hwfn->port_id]); +} +#endif + +enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_load_code) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + union drv_union_data union_data; + u32 param; + enum _ecore_status_t rc; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { + ecore_mcp_mf_workaround(p_hwfn, p_load_code); + return ECORE_SUCCESS; + } +#endif + + OSAL_MEMCPY(&union_data.ver_str, p_dev->ver_str, MCP_DRV_VER_STR_SIZE); + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_LOAD_REQ, + (PDA_COMP | DRV_ID_MCP_HSI_VER_CURRENT | + p_dev->drv_type), + &union_data, p_load_code, ¶m); + + /* if mcp fails to respond we must abort */ + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } + + /* If MFW refused (e.g. other port is in diagnostic mode) we + * must abort. This can happen in the following cases: + * - Other port is in diagnostic mode + * - Previously loaded function on the engine is not compliant with + * the requester. + * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION. + * - + */ + if (!(*p_load_code) || + ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI) || + ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA) || + ((*p_load_code) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG)) { + DP_ERR(p_hwfn, "MCP refused load request, aborting\n"); + return ECORE_BUSY; + } + + return ECORE_SUCCESS; +} + +static void ecore_mcp_handle_vf_flr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); + u32 path_addr = SECTION_ADDR(mfw_path_offsize, + ECORE_PATH_ID(p_hwfn)); + u32 disabled_vfs[VF_MAX_STATIC / 32]; + int i; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, + "Reading Disabled VF information from [offset %08x]," + " path_addr %08x\n", + mfw_path_offsize, path_addr); + + for (i = 0; i < (VF_MAX_STATIC / 32); i++) { + disabled_vfs[i] = ecore_rd(p_hwfn, p_ptt, + path_addr + + OFFSETOF(struct public_path, + mcp_vf_disabled) + + sizeof(u32) * i); + DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), + "FLR-ed VFs [%08x,...,%08x] - %08x\n", + i * 32, (i + 1) * 32 - 1, disabled_vfs[i]); + } + + if (ecore_iov_mark_vf_flr(p_hwfn, disabled_vfs)) + OSAL_VF_FLR_UPDATE(p_hwfn); +} + +enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *vfs_to_ack) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_func_offsize = ecore_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_func_offsize, + MCP_PF_ID(p_hwfn)); + union drv_union_data union_data; + u32 resp, param; + enum _ecore_status_t rc; + int i; + + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), + "Acking VFs [%08x,...,%08x] - %08x\n", + i * 32, (i + 1) * 32 - 1, vfs_to_ack[i]); + + OSAL_MEMCPY(&union_data.ack_vf_disabled, vfs_to_ack, VF_MAX_STATIC / 8); + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, + DRV_MSG_CODE_VF_DISABLED_DONE, 0, + &union_data, &resp, ¶m); + if (rc != ECORE_SUCCESS) { + DP_NOTICE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IOV), + "Failed to pass ACK for VF flr to MFW\n"); + return ECORE_TIMEOUT; + } + + /* TMP - clear the ACK bits; should be done by MFW */ + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + ecore_wr(p_hwfn, p_ptt, + func_addr + + OFFSETOF(struct public_func, drv_ack_vf_disabled) + + i * sizeof(u32), 0); + + return rc; +} + +static void ecore_mcp_handle_transceiver_change(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 transceiver_state; + + transceiver_state = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, + transceiver_data)); + + DP_VERBOSE(p_hwfn, (ECORE_MSG_HW | ECORE_MSG_SP), + "Received transceiver state update [0x%08x] from mfw" + "[Addr 0x%x]\n", + transceiver_state, (u32)(p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, + transceiver_data))); + + transceiver_state = GET_FIELD(transceiver_state, PMM_TRANSCEIVER_STATE); + + if (transceiver_state == PMM_TRANSCEIVER_STATE_PRESENT) + DP_NOTICE(p_hwfn, false, "Transceiver is present.\n"); + else + DP_NOTICE(p_hwfn, false, "Transceiver is unplugged.\n"); +} + +static void ecore_mcp_handle_link_change(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, bool b_reset) +{ + struct ecore_mcp_link_state *p_link; + u32 status = 0; + + p_link = &p_hwfn->mcp_info->link_output; + OSAL_MEMSET(p_link, 0, sizeof(*p_link)); + if (!b_reset) { + status = ecore_rd(p_hwfn, p_ptt, + p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, link_status)); + DP_VERBOSE(p_hwfn, (ECORE_MSG_LINK | ECORE_MSG_SP), + "Received link update [0x%08x] from mfw" + " [Addr 0x%x]\n", + status, (u32)(p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, + link_status))); + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Resetting link indications\n"); + return; + } + + if (p_hwfn->b_drv_link_init) + p_link->link_up = !!(status & LINK_STATUS_LINK_UP); + else + p_link->link_up = false; + + p_link->full_duplex = true; + switch ((status & LINK_STATUS_SPEED_AND_DUPLEX_MASK)) { + case LINK_STATUS_SPEED_AND_DUPLEX_100G: + p_link->speed = 100000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_50G: + p_link->speed = 50000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_40G: + p_link->speed = 40000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_25G: + p_link->speed = 25000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_20G: + p_link->speed = 20000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_10G: + p_link->speed = 10000; + break; + case LINK_STATUS_SPEED_AND_DUPLEX_1000THD: + p_link->full_duplex = false; + /* Fall-through */ + case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD: + p_link->speed = 1000; + break; + default: + p_link->speed = 0; + } + + /* We never store total line speed as p_link->speed is + * again changes according to bandwidth allocation. + */ + if (p_link->link_up && p_link->speed) + p_link->line_speed = p_link->speed; + else + p_link->line_speed = 0; + + /* Correct speed according to bandwidth allocation */ + if (p_hwfn->mcp_info->func_info.bandwidth_max && p_link->speed) { + u8 max_bw = p_hwfn->mcp_info->func_info.bandwidth_max; + + __ecore_configure_pf_max_bandwidth(p_hwfn, p_ptt, + p_link, max_bw); + } + + if (p_hwfn->mcp_info->func_info.bandwidth_min && p_link->speed) { + u8 min_bw = p_hwfn->mcp_info->func_info.bandwidth_min; + + __ecore_configure_pf_min_bandwidth(p_hwfn, p_ptt, + p_link, min_bw); + + ecore_configure_vp_wfq_on_link_change(p_hwfn->p_dev, + p_link->min_pf_rate); + } + + p_link->an = !!(status & LINK_STATUS_AUTO_NEGOTIATE_ENABLED); + p_link->an_complete = !!(status & LINK_STATUS_AUTO_NEGOTIATE_COMPLETE); + p_link->parallel_detection = !!(status & + LINK_STATUS_PARALLEL_DETECTION_USED); + p_link->pfc_enabled = !!(status & LINK_STATUS_PFC_ENABLED); + + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_1G_FD : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_1G_HD : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_10G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_10G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_20G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_20G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_25G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_25G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_40G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_40G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_50G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_50G : 0; + p_link->partner_adv_speed |= + (status & LINK_STATUS_LINK_PARTNER_100G_CAPABLE) ? + ECORE_LINK_PARTNER_SPEED_100G : 0; + + p_link->partner_tx_flow_ctrl_en = + !!(status & LINK_STATUS_TX_FLOW_CONTROL_ENABLED); + p_link->partner_rx_flow_ctrl_en = + !!(status & LINK_STATUS_RX_FLOW_CONTROL_ENABLED); + + switch (status & LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK) { + case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE: + p_link->partner_adv_pause = ECORE_LINK_PARTNER_SYMMETRIC_PAUSE; + break; + case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE: + p_link->partner_adv_pause = ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE; + break; + case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE: + p_link->partner_adv_pause = ECORE_LINK_PARTNER_BOTH_PAUSE; + break; + default: + p_link->partner_adv_pause = 0; + } + + p_link->sfp_tx_fault = !!(status & LINK_STATUS_SFP_TX_FAULT); + + if (p_link->link_up) + ecore_dcbx_eagle_workaround(p_hwfn, p_ptt, p_link->pfc_enabled); + + OSAL_LINK_UPDATE(p_hwfn); +} + +enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, bool b_up) +{ + struct ecore_mcp_link_params *params = &p_hwfn->mcp_info->link_input; + union drv_union_data union_data; + struct pmm_phy_cfg *p_phy_cfg; + u32 param = 0, reply = 0, cmd; + enum _ecore_status_t rc = ECORE_SUCCESS; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) + return ECORE_SUCCESS; +#endif + + /* Set the shmem configuration according to params */ + p_phy_cfg = &union_data.drv_phy_cfg; + OSAL_MEMSET(p_phy_cfg, 0, sizeof(*p_phy_cfg)); + cmd = b_up ? DRV_MSG_CODE_INIT_PHY : DRV_MSG_CODE_LINK_RESET; + if (!params->speed.autoneg) + p_phy_cfg->speed = params->speed.forced_speed; + p_phy_cfg->pause |= (params->pause.autoneg) ? PMM_PAUSE_AUTONEG : 0; + p_phy_cfg->pause |= (params->pause.forced_rx) ? PMM_PAUSE_RX : 0; + p_phy_cfg->pause |= (params->pause.forced_tx) ? PMM_PAUSE_TX : 0; + p_phy_cfg->adv_speed = params->speed.advertised_speeds; + p_phy_cfg->loopback_mode = params->loopback_mode; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + DP_INFO(p_hwfn, + "Link on FPGA - Ask for loopback mode '5' at 10G\n"); + p_phy_cfg->loopback_mode = 5; + p_phy_cfg->speed = 10000; + } +#endif + + p_hwfn->b_drv_link_init = b_up; + + if (b_up) + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Configuring Link: Speed 0x%08x, Pause 0x%08x," + " adv_speed 0x%08x, loopback 0x%08x," + " features 0x%08x\n", + p_phy_cfg->speed, p_phy_cfg->pause, + p_phy_cfg->adv_speed, p_phy_cfg->loopback_mode, + p_phy_cfg->feature_config_flags); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, "Resetting link\n"); + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, cmd, 0, &union_data, &reply, + ¶m); + + /* if mcp fails to respond we must abort */ + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + return rc; + } + + /* Reset the link status if needed */ + if (!b_up) + ecore_mcp_handle_link_change(p_hwfn, p_ptt, true); + + return rc; +} + +u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 path_offsize_addr, path_offsize, path_addr, proc_kill_cnt; + + /* TODO - Add support for VFs */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + path_offsize_addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_PATH); + path_offsize = ecore_rd(p_hwfn, p_ptt, path_offsize_addr); + path_addr = SECTION_ADDR(path_offsize, ECORE_PATH_ID(p_hwfn)); + + proc_kill_cnt = ecore_rd(p_hwfn, p_ptt, + path_addr + + OFFSETOF(struct public_path, process_kill)) & + PROCESS_KILL_COUNTER_MASK; + + return proc_kill_cnt; +} + +static void ecore_mcp_handle_process_kill(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + u32 proc_kill_cnt; + + /* Prevent possible attentions/interrupts during the recovery handling + * and till its load phase, during which they will be re-enabled. + */ + ecore_int_igu_disable_int(p_hwfn, p_ptt); + + DP_NOTICE(p_hwfn, false, "Received a process kill indication\n"); + + /* The following operations should be done once, and thus in CMT mode + * are carried out by only the first HW function. + */ + if (p_hwfn != ECORE_LEADING_HWFN(p_dev)) + return; + + if (p_dev->recov_in_prog) { + DP_NOTICE(p_hwfn, false, + "Ignoring the indication since a recovery" + " process is already in progress\n"); + return; + } + + p_dev->recov_in_prog = true; + + proc_kill_cnt = ecore_get_process_kill_counter(p_hwfn, p_ptt); + DP_NOTICE(p_hwfn, false, "Process kill counter: %d\n", proc_kill_cnt); + + OSAL_SCHEDULE_RECOVERY_HANDLER(p_hwfn); +} + +static void ecore_mcp_send_protocol_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum MFW_DRV_MSG_TYPE type) +{ + enum ecore_mcp_protocol_type stats_type; + union ecore_mcp_protocol_stats stats; + u32 hsi_param, param = 0, reply = 0; + union drv_union_data union_data; + + switch (type) { + case MFW_DRV_MSG_GET_LAN_STATS: + stats_type = ECORE_MCP_LAN_STATS; + hsi_param = DRV_MSG_CODE_STATS_TYPE_LAN; + break; + default: + DP_NOTICE(p_hwfn, false, "Invalid protocol type %d\n", type); + return; + } + + OSAL_GET_PROTOCOL_STATS(p_hwfn->p_dev, stats_type, &stats); + + OSAL_MEMCPY(&union_data, &stats, sizeof(stats)); + + ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_GET_STATS, + hsi_param, &union_data, &reply, ¶m); +} + +static u32 ecore_mcp_get_shmem_func(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct public_func *p_data, int pfid) +{ + u32 addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base, + PUBLIC_FUNC); + u32 mfw_path_offsize = ecore_rd(p_hwfn, p_ptt, addr); + u32 func_addr = SECTION_ADDR(mfw_path_offsize, pfid); + u32 i, size; + + OSAL_MEM_ZERO(p_data, sizeof(*p_data)); + + size = OSAL_MIN_T(u32, sizeof(*p_data), SECTION_SIZE(mfw_path_offsize)); + for (i = 0; i < size / sizeof(u32); i++) + ((u32 *)p_data)[i] = ecore_rd(p_hwfn, p_ptt, + func_addr + (i << 2)); + + return size; +} + +static void +ecore_read_pf_bandwidth(struct ecore_hwfn *p_hwfn, + struct public_func *p_shmem_info) +{ + struct ecore_mcp_function_info *p_info; + + p_info = &p_hwfn->mcp_info->func_info; + + /* TODO - bandwidth min/max should have valid values of 1-100, + * as well as some indication that the feature is disabled. + * Until MFW/qlediag enforce those limitations, Assume THERE IS ALWAYS + * limit and correct value to min `1' and max `100' if limit isn't in + * range. + */ + p_info->bandwidth_min = (p_shmem_info->config & + FUNC_MF_CFG_MIN_BW_MASK) >> + FUNC_MF_CFG_MIN_BW_SHIFT; + if (p_info->bandwidth_min < 1 || p_info->bandwidth_min > 100) { + DP_INFO(p_hwfn, + "bandwidth minimum out of bounds [%02x]. Set to 1\n", + p_info->bandwidth_min); + p_info->bandwidth_min = 1; + } + + p_info->bandwidth_max = (p_shmem_info->config & + FUNC_MF_CFG_MAX_BW_MASK) >> + FUNC_MF_CFG_MAX_BW_SHIFT; + if (p_info->bandwidth_max < 1 || p_info->bandwidth_max > 100) { + DP_INFO(p_hwfn, + "bandwidth maximum out of bounds [%02x]. Set to 100\n", + p_info->bandwidth_max); + p_info->bandwidth_max = 100; + } +} + +static void +ecore_mcp_update_bw(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_function_info *p_info; + struct public_func shmem_info; + u32 resp = 0, param = 0; + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + + ecore_read_pf_bandwidth(p_hwfn, &shmem_info); + + p_info = &p_hwfn->mcp_info->func_info; + + ecore_configure_pf_min_bandwidth(p_hwfn->p_dev, p_info->bandwidth_min); + + ecore_configure_pf_max_bandwidth(p_hwfn->p_dev, p_info->bandwidth_max); + + /* Acknowledge the MFW */ + ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_BW_UPDATE_ACK, 0, &resp, + ¶m); +} + +static void ecore_mcp_handle_fan_failure(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + /* A single notification should be sent to upper driver in CMT mode */ + if (p_hwfn != ECORE_LEADING_HWFN(p_hwfn->p_dev)) + return; + + DP_NOTICE(p_hwfn, false, + "Fan failure was detected on the network interface card" + " and it's going to be shut down.\n"); + + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_FAN_FAIL); +} + +enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_info *info = p_hwfn->mcp_info; + enum _ecore_status_t rc = ECORE_SUCCESS; + bool found = false; + u16 i; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SP, "Received message from MFW\n"); + + /* Read Messages from MFW */ + ecore_mcp_read_mb(p_hwfn, p_ptt); + + /* Compare current messages to old ones */ + for (i = 0; i < info->mfw_mb_length; i++) { + if (info->mfw_mb_cur[i] == info->mfw_mb_shadow[i]) + continue; + + found = true; + + DP_VERBOSE(p_hwfn, ECORE_MSG_LINK, + "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n", + i, info->mfw_mb_shadow[i], info->mfw_mb_cur[i]); + + switch (i) { + case MFW_DRV_MSG_LINK_CHANGE: + ecore_mcp_handle_link_change(p_hwfn, p_ptt, false); + break; + case MFW_DRV_MSG_VF_DISABLED: + ecore_mcp_handle_vf_flr(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_LLDP_DATA_UPDATED: + ecore_dcbx_mib_update_event(p_hwfn, p_ptt, + ECORE_DCBX_REMOTE_LLDP_MIB); + break; + case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED: + ecore_dcbx_mib_update_event(p_hwfn, p_ptt, + ECORE_DCBX_REMOTE_MIB); + break; + case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED: + ecore_dcbx_mib_update_event(p_hwfn, p_ptt, + ECORE_DCBX_OPERATIONAL_MIB); + break; + case MFW_DRV_MSG_ERROR_RECOVERY: + ecore_mcp_handle_process_kill(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_GET_LAN_STATS: + case MFW_DRV_MSG_GET_FCOE_STATS: + case MFW_DRV_MSG_GET_ISCSI_STATS: + case MFW_DRV_MSG_GET_RDMA_STATS: + ecore_mcp_send_protocol_stats(p_hwfn, p_ptt, i); + break; + case MFW_DRV_MSG_BW_UPDATE: + ecore_mcp_update_bw(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE: + ecore_mcp_handle_transceiver_change(p_hwfn, p_ptt); + break; + case MFW_DRV_MSG_FAILURE_DETECTED: + ecore_mcp_handle_fan_failure(p_hwfn, p_ptt); + break; + default: + /* @DPDK */ + DP_NOTICE(p_hwfn, false, + "Unimplemented MFW message %d\n", i); + rc = ECORE_INVAL; + } + } + + /* ACK everything */ + for (i = 0; i < MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length); i++) { + OSAL_BE32 val = OSAL_CPU_TO_BE32(((u32 *)info->mfw_mb_cur)[i]); + + /* MFW expect answer in BE, so we force write in that format */ + ecore_wr(p_hwfn, p_ptt, + info->mfw_mb_addr + sizeof(u32) + + MFW_DRV_MSG_MAX_DWORDS(info->mfw_mb_length) * + sizeof(u32) + i * sizeof(u32), val); + } + + if (!found) { + DP_NOTICE(p_hwfn, false, + "Received an MFW message indication but no" + " new message!\n"); + rc = ECORE_INVAL; + } + + /* Copy the new mfw messages into the shadow */ + OSAL_MEMCPY(info->mfw_mb_shadow, info->mfw_mb_cur, info->mfw_mb_length); + + return rc; +} + +enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev, + struct ecore_ptt *p_ptt, + u32 *p_mfw_ver, + u32 *p_running_bundle_id) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + u32 global_offsize; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_dev)) { + DP_NOTICE(p_dev, false, "Emulation - can't get MFW version\n"); + return ECORE_SUCCESS; + } +#endif + + if (IS_VF(p_dev)) { + if (p_hwfn->vf_iov_info) { + struct pfvf_acquire_resp_tlv *p_resp; + + p_resp = &p_hwfn->vf_iov_info->acquire_resp; + *p_mfw_ver = p_resp->pfdev_info.mfw_ver; + return ECORE_SUCCESS; + } + + DP_VERBOSE(p_dev, ECORE_MSG_IOV, + "VF requested MFW vers prior to ACQUIRE\n"); + return ECORE_INVAL; + } + + global_offsize = ecore_rd(p_hwfn, p_ptt, + SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info-> + public_base, + PUBLIC_GLOBAL)); + *p_mfw_ver = + ecore_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, + 0) + OFFSETOF(struct public_global, mfw_ver)); + + if (p_running_bundle_id != OSAL_NULL) { + *p_running_bundle_id = ecore_rd(p_hwfn, p_ptt, + SECTION_ADDR(global_offsize, + 0) + + OFFSETOF(struct public_global, + running_bundle_id)); + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev, + u32 *p_media_type) +{ + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0]; + struct ecore_ptt *p_ptt; + + /* TODO - Add support for VFs */ + if (IS_VF(p_dev)) + return ECORE_INVAL; + + if (!ecore_mcp_is_init(p_hwfn)) { + DP_NOTICE(p_hwfn, true, "MFW is not initialized !\n"); + return ECORE_BUSY; + } + + *p_media_type = MEDIA_UNSPECIFIED; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + *p_media_type = ecore_rd(p_hwfn, p_ptt, p_hwfn->mcp_info->port_addr + + OFFSETOF(struct public_port, media_type)); + + ecore_ptt_release(p_hwfn, p_ptt); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_mcp_get_shmem_proto(struct ecore_hwfn *p_hwfn, + struct public_func *p_info, + enum ecore_pci_personality *p_proto) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + switch (p_info->config & FUNC_MF_CFG_PROTOCOL_MASK) { + case FUNC_MF_CFG_PROTOCOL_ETHERNET: + *p_proto = ECORE_PCI_ETH; + break; + default: + rc = ECORE_INVAL; + } + + return rc; +} + +enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_mcp_function_info *info; + struct public_func shmem_info; + + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, MCP_PF_ID(p_hwfn)); + info = &p_hwfn->mcp_info->func_info; + + info->pause_on_host = (shmem_info.config & + FUNC_MF_CFG_PAUSE_ON_HOST_RING) ? 1 : 0; + + if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, &info->protocol)) { + DP_ERR(p_hwfn, "Unknown personality %08x\n", + (u32)(shmem_info.config & FUNC_MF_CFG_PROTOCOL_MASK)); + return ECORE_INVAL; + } + + ecore_read_pf_bandwidth(p_hwfn, &shmem_info); + + if (shmem_info.mac_upper || shmem_info.mac_lower) { + info->mac[0] = (u8)(shmem_info.mac_upper >> 8); + info->mac[1] = (u8)(shmem_info.mac_upper); + info->mac[2] = (u8)(shmem_info.mac_lower >> 24); + info->mac[3] = (u8)(shmem_info.mac_lower >> 16); + info->mac[4] = (u8)(shmem_info.mac_lower >> 8); + info->mac[5] = (u8)(shmem_info.mac_lower); + } else { + /* TODO - are there protocols for which there's no MAC? */ + DP_NOTICE(p_hwfn, false, "MAC is 0 in shmem\n"); + } + + info->ovlan = (u16)(shmem_info.ovlan_stag & FUNC_MF_CFG_OV_STAG_MASK); + + DP_VERBOSE(p_hwfn, (ECORE_MSG_SP | ECORE_MSG_IFUP), + "Read configuration from shmem: pause_on_host %02x" + " protocol %02x BW [%02x - %02x]" + " MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %" PRIx64 + " node %" PRIx64 " ovlan %04x\n", + info->pause_on_host, info->protocol, + info->bandwidth_min, info->bandwidth_max, + info->mac[0], info->mac[1], info->mac[2], + info->mac[3], info->mac[4], info->mac[5], + info->wwn_port, info->wwn_node, info->ovlan); + + return ECORE_SUCCESS; +} + +struct ecore_mcp_link_params +*ecore_mcp_get_link_params(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return OSAL_NULL; + return &p_hwfn->mcp_info->link_input; +} + +struct ecore_mcp_link_state +*ecore_mcp_get_link_state(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return OSAL_NULL; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) { + DP_INFO(p_hwfn, "Non-ASIC - always notify that link is up\n"); + p_hwfn->mcp_info->link_output.link_up = true; + } +#endif + + return &p_hwfn->mcp_info->link_output; +} + +struct ecore_mcp_link_capabilities +*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return OSAL_NULL; + return &p_hwfn->mcp_info->link_capabilities; +} + +enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + enum _ecore_status_t rc; + u32 resp = 0, param = 0; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, + DRV_MSG_CODE_NIG_DRAIN, 100, &resp, ¶m); + + /* Wait for the drain to complete before returning */ + OSAL_MSLEEP(120); + + return rc; +} + +const struct ecore_mcp_function_info +*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn || !p_hwfn->mcp_info) + return OSAL_NULL; + return &p_hwfn->mcp_info->func_info; +} + +enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_nvm_params *params) +{ + enum _ecore_status_t rc; + + switch (params->type) { + case ECORE_MCP_NVM_RD: + rc = ecore_mcp_nvm_rd_cmd(p_hwfn, p_ptt, params->nvm_common.cmd, + params->nvm_common.offset, + ¶ms->nvm_common.resp, + ¶ms->nvm_common.param, + params->nvm_rd.buf_size, + params->nvm_rd.buf); + break; + case ECORE_MCP_CMD: + rc = ecore_mcp_cmd(p_hwfn, p_ptt, params->nvm_common.cmd, + params->nvm_common.offset, + ¶ms->nvm_common.resp, + ¶ms->nvm_common.param); + break; + case ECORE_MCP_NVM_WR: + rc = ecore_mcp_nvm_wr_cmd(p_hwfn, p_ptt, params->nvm_common.cmd, + params->nvm_common.offset, + ¶ms->nvm_common.resp, + ¶ms->nvm_common.param, + params->nvm_wr.buf_size, + params->nvm_wr.buf); + break; + default: + rc = ECORE_NOTIMPL; + break; + } + return rc; +} + +int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 personalities) +{ + enum ecore_pci_personality protocol = ECORE_PCI_DEFAULT; + struct public_func shmem_info; + int i, count = 0, num_pfs; + + num_pfs = NUM_OF_ENG_PFS(p_hwfn->p_dev); + + for (i = 0; i < num_pfs; i++) { + ecore_mcp_get_shmem_func(p_hwfn, p_ptt, &shmem_info, + MCP_PF_ID_BY_REL(p_hwfn, i)); + if (shmem_info.config & FUNC_MF_CFG_FUNC_HIDE) + continue; + + if (ecore_mcp_get_shmem_proto(p_hwfn, &shmem_info, + &protocol) != ECORE_SUCCESS) + continue; + + if ((1 << ((u32)protocol)) & personalities) + count++; + } + + return count; +} + +enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_flash_size) +{ + u32 flash_size; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_EMUL(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, false, "Emulation - can't get flash size\n"); + return ECORE_INVAL; + } +#endif + + if (IS_VF(p_hwfn->p_dev)) + return ECORE_INVAL; + + flash_size = ecore_rd(p_hwfn, p_ptt, MCP_REG_NVM_CFG4); + flash_size = (flash_size & MCP_REG_NVM_CFG4_FLASH_SIZE) >> + MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT; + flash_size = (1 << (flash_size + MCP_BYTES_PER_MBIT_SHIFT)); + + *p_flash_size = flash_size; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_dev *p_dev = p_hwfn->p_dev; + + if (p_dev->recov_in_prog) { + DP_NOTICE(p_hwfn, false, + "Avoid triggering a recovery since such a process" + " is already in progress\n"); + return ECORE_AGAIN; + } + + DP_NOTICE(p_hwfn, false, "Triggering a recovery process\n"); + ecore_wr(p_hwfn, p_ptt, MISC_REG_AEU_GENERAL_ATTN_35, 0x1); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 vf_id, u8 num) +{ + u32 resp = 0, param = 0, rc_param = 0; + enum _ecore_status_t rc; + + param |= (vf_id << DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT) & + DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK; + param |= (num << DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT) & + DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_CFG_VF_MSIX, param, + &resp, &rc_param); + + if (resp != FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE) { + DP_NOTICE(p_hwfn, true, "VF[%d]: MFW failed to set MSI-X\n", + vf_id); + rc = ECORE_INVAL; + } + + return rc; +} + +enum _ecore_status_t +ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mcp_drv_version *p_ver) +{ + u32 param = 0, reply = 0, num_words, i; + struct drv_version_stc *p_drv_version; + union drv_union_data union_data; + void *p_name; + OSAL_BE32 val; + enum _ecore_status_t rc; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_SLOW(p_hwfn->p_dev)) + return ECORE_SUCCESS; +#endif + + p_drv_version = &union_data.drv_version; + p_drv_version->version = p_ver->version; + num_words = (MCP_DRV_VER_STR_SIZE - 4) / 4; + for (i = 0; i < num_words; i++) { + p_name = &p_ver->name[i * sizeof(u32)]; + val = OSAL_CPU_TO_BE32(*(u32 *)p_name); + *(u32 *)&p_drv_version->name[i * sizeof(u32)] = val; + } + + rc = ecore_mcp_cmd_and_union(p_hwfn, p_ptt, DRV_MSG_CODE_SET_VERSION, 0, + &union_data, &reply, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + enum _ecore_status_t rc; + u32 resp = 0, param = 0; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp, + ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 value, cpu_mode; + + ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff); + + value = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + value &= ~MCP_REG_CPU_MODE_SOFT_HALT; + ecore_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value); + cpu_mode = ecore_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE); + + return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -1 : 0; +} + +enum _ecore_status_t +ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_ov_config_method config, + enum ecore_ov_client client) +{ + enum _ecore_status_t rc; + u32 resp = 0, param = 0; + u32 drv_mb_param; + + switch (config) { + case ECORE_OV_CLIENT_DRV: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OS; + break; + case ECORE_OV_CLIENT_USER: + drv_mb_param = DRV_MB_PARAM_OV_CURR_CFG_OTHER; + break; + default: + DP_NOTICE(p_hwfn, true, "Invalid client type %d\n", config); + return ECORE_INVAL; + } + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_CURR_CFG, + drv_mb_param, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +enum _ecore_status_t +ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_ov_driver_state drv_state) +{ + enum _ecore_status_t rc; + u32 resp = 0, param = 0; + u32 drv_mb_param; + + switch (drv_state) { + case ECORE_OV_DRIVER_STATE_NOT_LOADED: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED; + break; + case ECORE_OV_DRIVER_STATE_DISABLED: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED; + break; + case ECORE_OV_DRIVER_STATE_ACTIVE: + drv_mb_param = DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE; + break; + default: + DP_NOTICE(p_hwfn, true, "Invalid driver state %d\n", drv_state); + return ECORE_INVAL; + } + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE, + drv_state, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +enum _ecore_status_t +ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_fc_npiv_tbl *p_table) +{ + return 0; +} + +enum _ecore_status_t +ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 mtu) +{ + return 0; +} + +enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_led_mode mode) +{ + u32 resp = 0, param = 0, drv_mb_param; + enum _ecore_status_t rc; + + switch (mode) { + case ECORE_LED_MODE_ON: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_ON; + break; + case ECORE_LED_MODE_OFF: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OFF; + break; + case ECORE_LED_MODE_RESTORE: + drv_mb_param = DRV_MB_PARAM_SET_LED_MODE_OPER; + break; + default: + DP_NOTICE(p_hwfn, true, "Invalid LED mode %d\n", mode); + return ECORE_INVAL; + } + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_SET_LED_MODE, + drv_mb_param, &resp, ¶m); + if (rc != ECORE_SUCCESS) + DP_ERR(p_hwfn, "MCP response failure, aborting\n"); + + return rc; +} + +enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 mask_parities) +{ + enum _ecore_status_t rc; + u32 resp = 0, param = 0; + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MASK_PARITIES, + mask_parities, &resp, ¶m); + + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, + "MCP response failure for mask parities, aborting\n"); + } else if (resp != FW_MSG_CODE_OK) { + DP_ERR(p_hwfn, + "MCP did not ack mask parity request. Old MFW?\n"); + rc = ECORE_INVAL; + } + + return rc; +} + +enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, + u8 *p_buf, u32 len) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + u32 bytes_left, offset, bytes_to_copy, buf_size; + struct ecore_mcp_nvm_params params; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc = ECORE_SUCCESS; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); + bytes_left = len; + offset = 0; + params.type = ECORE_MCP_NVM_RD; + params.nvm_rd.buf_size = &buf_size; + params.nvm_common.cmd = DRV_MSG_CODE_NVM_READ_NVRAM; + while (bytes_left > 0) { + bytes_to_copy = OSAL_MIN_T(u32, bytes_left, + MCP_DRV_NVM_BUF_LEN); + params.nvm_common.offset = (addr + offset) | + (bytes_to_copy << DRV_MB_PARAM_NVM_LEN_SHIFT); + params.nvm_rd.buf = (u32 *)(p_buf + offset); + rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + if (rc != ECORE_SUCCESS || (params.nvm_common.resp != + FW_MSG_CODE_NVM_OK)) { + DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); + break; + } + offset += *params.nvm_rd.buf_size; + bytes_left -= *params.nvm_rd.buf_size; + } + + p_dev->mcp_nvm_resp = params.nvm_common.resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 len) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_mcp_nvm_params params; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); + params.type = ECORE_MCP_NVM_RD; + params.nvm_rd.buf_size = &len; + params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_READ) ? + DRV_MSG_CODE_PHY_CORE_READ : DRV_MSG_CODE_PHY_RAW_READ; + params.nvm_common.offset = addr; + params.nvm_rd.buf = (u32 *)p_buf; + rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); + + p_dev->mcp_nvm_resp = params.nvm_common.resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_mcp_nvm_params params; + struct ecore_ptt *p_ptt; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); + OSAL_MEMCPY(p_buf, &p_dev->mcp_nvm_resp, sizeof(p_dev->mcp_nvm_resp)); + ecore_ptt_release(p_hwfn, p_ptt); + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_mcp_nvm_params params; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); + params.type = ECORE_MCP_CMD; + params.nvm_common.cmd = DRV_MSG_CODE_NVM_DEL_FILE; + params.nvm_common.offset = addr; + rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + p_dev->mcp_nvm_resp = params.nvm_common.resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, + u32 addr) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_mcp_nvm_params params; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); + params.type = ECORE_MCP_CMD; + params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_BEGIN; + params.nvm_common.offset = addr; + rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + p_dev->mcp_nvm_resp = params.nvm_common.resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +/* rc receives ECORE_INVAL as default parameter because + * it might not enter the while loop if the len is 0 + */ +enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 len) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_mcp_nvm_params params; + struct ecore_ptt *p_ptt; + u32 buf_idx, buf_size; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); + params.type = ECORE_MCP_NVM_WR; + if (cmd == ECORE_PUT_FILE_DATA) + params.nvm_common.cmd = DRV_MSG_CODE_NVM_PUT_FILE_DATA; + else + params.nvm_common.cmd = DRV_MSG_CODE_NVM_WRITE_NVRAM; + buf_idx = 0; + while (buf_idx < len) { + buf_size = OSAL_MIN_T(u32, (len - buf_idx), + MCP_DRV_NVM_BUF_LEN); + params.nvm_common.offset = ((buf_size << + DRV_MB_PARAM_NVM_LEN_SHIFT) + | addr) + buf_idx; + params.nvm_wr.buf_size = buf_size; + params.nvm_wr.buf = (u32 *)&p_buf[buf_idx]; + rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + if (rc != ECORE_SUCCESS || + ((params.nvm_common.resp != FW_MSG_CODE_NVM_OK) && + (params.nvm_common.resp != + FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK))) + DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); + + buf_idx += buf_size; + } + + p_dev->mcp_nvm_resp = params.nvm_common.resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 len) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_mcp_nvm_params params; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); + params.type = ECORE_MCP_NVM_WR; + params.nvm_wr.buf_size = len; + params.nvm_common.cmd = (cmd == ECORE_PHY_CORE_WRITE) ? + DRV_MSG_CODE_PHY_CORE_WRITE : DRV_MSG_CODE_PHY_RAW_WRITE; + params.nvm_common.offset = addr; + params.nvm_wr.buf = (u32 *)p_buf; + rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_dev, false, "MCP command rc = %d\n", rc); + p_dev->mcp_nvm_resp = params.nvm_common.resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, + u32 addr) +{ + struct ecore_hwfn *p_hwfn = ECORE_LEADING_HWFN(p_dev); + struct ecore_mcp_nvm_params params; + struct ecore_ptt *p_ptt; + enum _ecore_status_t rc; + + p_ptt = ecore_ptt_acquire(p_hwfn); + if (!p_ptt) + return ECORE_BUSY; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); + params.type = ECORE_MCP_CMD; + params.nvm_common.cmd = DRV_MSG_CODE_SET_SECURE_MODE; + params.nvm_common.offset = addr; + rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + p_dev->mcp_nvm_resp = params.nvm_common.resp; + ecore_ptt_release(p_hwfn, p_ptt); + + return rc; +} + +enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 port, u32 addr, u32 offset, + u32 len, u8 *p_buf) +{ + struct ecore_mcp_nvm_params params; + enum _ecore_status_t rc; + u32 bytes_left, bytes_to_copy, buf_size; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); + SET_FIELD(params.nvm_common.offset, + DRV_MB_PARAM_TRANSCEIVER_PORT, port); + SET_FIELD(params.nvm_common.offset, + DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr); + addr = offset; + offset = 0; + bytes_left = len; + params.type = ECORE_MCP_NVM_RD; + params.nvm_rd.buf_size = &buf_size; + params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_READ; + while (bytes_left > 0) { + bytes_to_copy = OSAL_MIN_T(u32, bytes_left, + MAX_I2C_TRANSACTION_SIZE); + params.nvm_rd.buf = (u32 *)(p_buf + offset); + SET_FIELD(params.nvm_common.offset, + DRV_MB_PARAM_TRANSCEIVER_OFFSET, addr + offset); + SET_FIELD(params.nvm_common.offset, + DRV_MB_PARAM_TRANSCEIVER_SIZE, bytes_to_copy); + rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + if ((params.nvm_common.resp & FW_MSG_CODE_MASK) == + FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) { + return ECORE_NODEV; + } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) != + FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + return ECORE_UNKNOWN_ERROR; + + offset += *params.nvm_rd.buf_size; + bytes_left -= *params.nvm_rd.buf_size; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 port, u32 addr, u32 offset, + u32 len, u8 *p_buf) +{ + struct ecore_mcp_nvm_params params; + enum _ecore_status_t rc; + u32 buf_idx, buf_size; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_mcp_nvm_params)); + SET_FIELD(params.nvm_common.offset, + DRV_MB_PARAM_TRANSCEIVER_PORT, port); + SET_FIELD(params.nvm_common.offset, + DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS, addr); + params.type = ECORE_MCP_NVM_WR; + params.nvm_common.cmd = DRV_MSG_CODE_TRANSCEIVER_WRITE; + buf_idx = 0; + while (buf_idx < len) { + buf_size = OSAL_MIN_T(u32, (len - buf_idx), + MAX_I2C_TRANSACTION_SIZE); + SET_FIELD(params.nvm_common.offset, + DRV_MB_PARAM_TRANSCEIVER_OFFSET, offset + buf_idx); + SET_FIELD(params.nvm_common.offset, + DRV_MB_PARAM_TRANSCEIVER_SIZE, buf_size); + params.nvm_wr.buf_size = buf_size; + params.nvm_wr.buf = (u32 *)&p_buf[buf_idx]; + rc = ecore_mcp_nvm_command(p_hwfn, p_ptt, ¶ms); + if ((params.nvm_common.resp & FW_MSG_CODE_MASK) == + FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT) { + return ECORE_NODEV; + } else if ((params.nvm_common.resp & FW_MSG_CODE_MASK) != + FW_MSG_CODE_TRANSCEIVER_DIAG_OK) + return ECORE_UNKNOWN_ERROR; + + buf_idx += buf_size; + } + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 gpio, u32 *gpio_val) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 drv_mb_param = 0, rsp; + + SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio); + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_READ, + drv_mb_param, &rsp, gpio_val); + + if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) + return ECORE_UNKNOWN_ERROR; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 gpio, u16 gpio_val) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + u32 drv_mb_param = 0, param, rsp; + + SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_NUMBER, gpio); + SET_FIELD(drv_mb_param, DRV_MB_PARAM_GPIO_VALUE, gpio_val); + + rc = ecore_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GPIO_WRITE, + drv_mb_param, &rsp, ¶m); + + if ((rsp & FW_MSG_CODE_MASK) != FW_MSG_CODE_GPIO_OK) + return ECORE_UNKNOWN_ERROR; + + return ECORE_SUCCESS; +} diff --git a/drivers/net/qede/base/ecore_mcp.h b/drivers/net/qede/base/ecore_mcp.h new file mode 100644 index 00000000..448c30bb --- /dev/null +++ b/drivers/net/qede/base/ecore_mcp.h @@ -0,0 +1,304 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_MCP_H__ +#define __ECORE_MCP_H__ + +#include "bcm_osal.h" +#include "mcp_public.h" +#include "ecore_mcp_api.h" + +/* Using hwfn number (and not pf_num) is required since in CMT mode, + * same pf_num may be used by two different hwfn + * TODO - this shouldn't really be in .h file, but until all fields + * required during hw-init will be placed in their correct place in shmem + * we need it in ecore_dev.c [for readin the nvram reflection in shmem]. + */ +#define MCP_PF_ID_BY_REL(p_hwfn, rel_pfid) (ECORE_IS_BB((p_hwfn)->p_dev) ? \ + ((rel_pfid) | \ + ((p_hwfn)->abs_pf_id & 1) << 3) : \ + rel_pfid) +#define MCP_PF_ID(p_hwfn) MCP_PF_ID_BY_REL(p_hwfn, (p_hwfn)->rel_pf_id) + +/* TODO - this is only correct as long as only BB is supported, and + * no port-swapping is implemented; Afterwards we'll need to fix it. + */ +#define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \ + ((_p_hwfn)->p_dev->num_ports_in_engines * 2)) +struct ecore_mcp_info { + osal_spinlock_t lock; /* Spinlock used for accessing MCP mailbox */ + u32 public_base; /* Address of the MCP public area */ + u32 drv_mb_addr; /* Address of the driver mailbox */ + u32 mfw_mb_addr; /* Address of the MFW mailbox */ + u32 port_addr; /* Address of the port configuration (link) */ + u16 drv_mb_seq; /* Current driver mailbox sequence */ + u16 drv_pulse_seq; /* Current driver pulse sequence */ + struct ecore_mcp_link_params link_input; + struct ecore_mcp_link_state link_output; + struct ecore_mcp_link_capabilities link_capabilities; + struct ecore_mcp_function_info func_info; + + u8 *mfw_mb_cur; + u8 *mfw_mb_shadow; + u16 mfw_mb_length; + u16 mcp_hist; +}; + +/** + * @brief Initialize the interface with the MCP + * + * @param p_hwfn - HW func + * @param p_ptt - PTT required for register access + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_mcp_cmd_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Initialize the port interface with the MCP + * + * @param p_hwfn + * @param p_ptt + * Can only be called after `num_ports_in_engines' is set + */ +void ecore_mcp_cmd_port_init(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); +/** + * @brief Releases resources allocated during the init process. + * + * @param p_hwfn - HW func + * @param p_ptt - PTT required for register access + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_mcp_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief This function is called from the DPC context. After + * pointing PTT to the mfw mb, check for events sent by the MCP + * to the driver and ack them. In case a critical event + * detected, it will be handled here, otherwise the work will be + * queued to a sleepable work-queue. + * + * @param p_hwfn - HW function + * @param p_ptt - PTT required for register access + * @return enum _ecore_status_t - ECORE_SUCCESS - operation + * was successul. + */ +enum _ecore_status_t ecore_mcp_handle_events(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief When MFW doesn't get driver pulse for couple of seconds, at some + * threshold before timeout expires, it will generate interrupt + * through a dedicated status block (DPSB - Driver Pulse Status + * Block), which the driver should respond immediately, by + * providing keepalive indication after setting the PTT to the + * driver-MFW mailbox. This function is called directly from the + * DPC upon receiving the DPSB attention. + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @return enum _ecore_status_t - ECORE_SUCCESS - operation + * was successul. + */ +enum _ecore_status_t ecore_issue_pulse(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Sends a LOAD_REQ to the MFW, and in case operation + * succeed, returns whether this PF is the first on the + * chip/engine/port or function. This function should be + * called when driver is ready to accept MFW events after + * Storms initializations are done. + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param p_load_code - The MCP response param containing one + * of the following: + * FW_MSG_CODE_DRV_LOAD_ENGINE + * FW_MSG_CODE_DRV_LOAD_PORT + * FW_MSG_CODE_DRV_LOAD_FUNCTION + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successul. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_load_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_load_code); + +/** + * @brief Read the MFW mailbox into Current buffer. + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_mcp_read_mb(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); + +/** + * @brief Ack to mfw that driver finished FLR process for VFs + * + * @param p_hwfn + * @param p_ptt + * @param vfs_to_ack - bit mask of all engine VFs for which the PF acks. + * + * @param return enum _ecore_status_t - ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_ack_vf_flr(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *vfs_to_ack); + +/** + * @brief - calls during init to read shmem of all function-related info. + * + * @param p_hwfn + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_fill_shmem_func_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief - Reset the MCP using mailbox command. + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_reset(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief - Sets the union data in the MCP mailbox and sends a mailbox command. + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param cmd - command to be sent to the MCP + * @param param - optional param + * @param p_union_data - pointer to a drv_union_data + * @param o_mcp_resp - the MCP response code (exclude sequence) + * @param o_mcp_param - optional parameter provided by the MCP response + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - operation was successful + * ECORE_BUSY - operation failed + */ +enum _ecore_status_t ecore_mcp_cmd_and_union(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, u32 param, + union drv_union_data *p_union_data, + u32 *o_mcp_resp, + u32 *o_mcp_param); + +/** + * @brief - Sends an NVM write command request to the MFW with + * payload. + * + * @param p_hwfn + * @param p_ptt + * @param cmd - Command: Either DRV_MSG_CODE_NVM_WRITE_NVRAM or + * DRV_MSG_CODE_NVM_PUT_FILE_DATA + * @param param - [0:23] - Offset [24:31] - Size + * @param o_mcp_resp - MCP response + * @param o_mcp_param - MCP response param + * @param i_txn_size - Buffer size + * @param i_buf - Pointer to the buffer + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_nvm_wr_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + u32 i_txn_size, u32 *i_buf); + +/** + * @brief - Sends an NVM read command request to the MFW to get + * a buffer. + * + * @param p_hwfn + * @param p_ptt + * @param cmd - Command: DRV_MSG_CODE_NVM_GET_FILE_DATA or + * DRV_MSG_CODE_NVM_READ_NVRAM commands + * @param param - [0:23] - Offset [24:31] - Size + * @param o_mcp_resp - MCP response + * @param o_mcp_param - MCP response param + * @param o_txn_size - Buffer size output + * @param o_buf - Pointer to the buffer returned by the MFW. + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_nvm_rd_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 cmd, + u32 param, + u32 *o_mcp_resp, + u32 *o_mcp_param, + u32 *o_txn_size, u32 *o_buf); + +/** + * @brief indicates whether the MFW objects [under mcp_info] are accessible + * + * @param p_hwfn + * + * @return true iff MFW is running and mcp_info is initialized + */ +bool ecore_mcp_is_init(struct ecore_hwfn *p_hwfn); + +/** + * @brief request MFW to configure MSI-X for a VF + * + * @param p_hwfn + * @param p_ptt + * @param vf_id - absolute inside engine + * @param num_sbs - number of entries to request + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_mcp_config_vf_msix(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u8 vf_id, u8 num); + +/** + * @brief - Halt the MCP. + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_halt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief - Wake up the MCP. + * + * @param p_hwfn + * @param p_ptt + * + * @param return ECORE_SUCCESS upon success. + */ +enum _ecore_status_t ecore_mcp_resume(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); +int __ecore_configure_pf_max_bandwidth(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_link_state *p_link, + u8 max_bw); +int __ecore_configure_pf_min_bandwidth(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_link_state *p_link, + u8 min_bw); +enum _ecore_status_t ecore_mcp_mask_parities(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 mask_parities); +#endif /* __ECORE_MCP_H__ */ diff --git a/drivers/net/qede/base/ecore_mcp_api.h b/drivers/net/qede/base/ecore_mcp_api.h new file mode 100644 index 00000000..7360b356 --- /dev/null +++ b/drivers/net/qede/base/ecore_mcp_api.h @@ -0,0 +1,611 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_MCP_API_H__ +#define __ECORE_MCP_API_H__ + +#include "ecore_status.h" + +struct ecore_mcp_link_speed_params { + bool autoneg; + u32 advertised_speeds; /* bitmask of DRV_SPEED_CAPABILITY */ + u32 forced_speed; /* In Mb/s */ +}; + +struct ecore_mcp_link_pause_params { + bool autoneg; + bool forced_rx; + bool forced_tx; +}; + +struct ecore_mcp_link_params { + struct ecore_mcp_link_speed_params speed; + struct ecore_mcp_link_pause_params pause; + u32 loopback_mode; /* in PMM_LOOPBACK values */ +}; + +struct ecore_mcp_link_capabilities { + u32 speed_capabilities; +}; + +struct ecore_mcp_link_state { + bool link_up; + + u32 line_speed; /* In Mb/s */ + u32 min_pf_rate; /* In Mb/s */ + u32 speed; /* In Mb/s */ + bool full_duplex; + + bool an; + bool an_complete; + bool parallel_detection; + bool pfc_enabled; + +#define ECORE_LINK_PARTNER_SPEED_1G_HD (1 << 0) +#define ECORE_LINK_PARTNER_SPEED_1G_FD (1 << 1) +#define ECORE_LINK_PARTNER_SPEED_10G (1 << 2) +#define ECORE_LINK_PARTNER_SPEED_20G (1 << 3) +#define ECORE_LINK_PARTNER_SPEED_25G (1 << 4) +#define ECORE_LINK_PARTNER_SPEED_40G (1 << 5) +#define ECORE_LINK_PARTNER_SPEED_50G (1 << 6) +#define ECORE_LINK_PARTNER_SPEED_100G (1 << 7) + u32 partner_adv_speed; + + bool partner_tx_flow_ctrl_en; + bool partner_rx_flow_ctrl_en; + +#define ECORE_LINK_PARTNER_SYMMETRIC_PAUSE (1) +#define ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE (2) +#define ECORE_LINK_PARTNER_BOTH_PAUSE (3) + u8 partner_adv_pause; + + bool sfp_tx_fault; +}; + +struct ecore_mcp_function_info { + u8 pause_on_host; + + enum ecore_pci_personality protocol; + + u8 bandwidth_min; + u8 bandwidth_max; + + u8 mac[ETH_ALEN]; + + u64 wwn_port; + u64 wwn_node; + +#define ECORE_MCP_VLAN_UNSET (0xffff) + u16 ovlan; +}; + +struct ecore_mcp_nvm_common { + u32 offset; + u32 param; + u32 resp; + u32 cmd; +}; + +struct ecore_mcp_nvm_rd { + u32 *buf_size; + u32 *buf; +}; + +struct ecore_mcp_nvm_wr { + u32 buf_size; + u32 *buf; +}; + +struct ecore_mcp_nvm_params { +#define ECORE_MCP_CMD (1 << 0) +#define ECORE_MCP_NVM_RD (1 << 1) +#define ECORE_MCP_NVM_WR (1 << 2) + u8 type; + + struct ecore_mcp_nvm_common nvm_common; + + union { + struct ecore_mcp_nvm_rd nvm_rd; + struct ecore_mcp_nvm_wr nvm_wr; + }; +}; + +struct ecore_mcp_drv_version { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +struct ecore_mcp_lan_stats { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; +}; + +#ifndef ECORE_PROTO_STATS +#define ECORE_PROTO_STATS + +enum ecore_mcp_protocol_type { + ECORE_MCP_LAN_STATS, +}; + +union ecore_mcp_protocol_stats { + struct ecore_mcp_lan_stats lan_stats; +}; +#endif + +enum ecore_ov_config_method { + ECORE_OV_CONFIG_MTU, + ECORE_OV_CONFIG_MAC, + ECORE_OV_CONFIG_WOL +}; + +enum ecore_ov_client { + ECORE_OV_CLIENT_DRV, + ECORE_OV_CLIENT_USER +}; + +enum ecore_ov_driver_state { + ECORE_OV_DRIVER_STATE_NOT_LOADED, + ECORE_OV_DRIVER_STATE_DISABLED, + ECORE_OV_DRIVER_STATE_ACTIVE +}; + +#define ECORE_MAX_NPIV_ENTRIES 128 +#define ECORE_WWN_SIZE 8 +struct ecore_fc_npiv_tbl { + u32 count; + u8 wwpn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE]; + u8 wwnn[ECORE_MAX_NPIV_ENTRIES][ECORE_WWN_SIZE]; +}; + +#ifndef __EXTRACT__LINUX__ +enum ecore_led_mode { + ECORE_LED_MODE_OFF, + ECORE_LED_MODE_ON, + ECORE_LED_MODE_RESTORE +}; +#endif + +/** + * @brief - returns the link params of the hw function + * + * @param p_hwfn + * + * @returns pointer to link params + */ +struct ecore_mcp_link_params *ecore_mcp_get_link_params(struct ecore_hwfn *); + +/** + * @brief - return the link state of the hw function + * + * @param p_hwfn + * + * @returns pointer to link state + */ +struct ecore_mcp_link_state *ecore_mcp_get_link_state(struct ecore_hwfn *); + +/** + * @brief - return the link capabilities of the hw function + * + * @param p_hwfn + * + * @returns pointer to link capabilities + */ +struct ecore_mcp_link_capabilities +*ecore_mcp_get_link_capabilities(struct ecore_hwfn *p_hwfn); + +/** + * @brief Request the MFW to set the the link according to 'link_input'. + * + * @param p_hwfn + * @param p_ptt + * @param b_up - raise link if `true'. Reset link if `false'. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_mcp_set_link(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, bool b_up); + +/** + * @brief Get the management firmware version value + * + * @param p_dev - ecore dev pointer + * @param p_ptt + * @param p_mfw_ver - mfw version value + * @param p_running_bundle_id - image id in nvram; Optional. + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_get_mfw_ver(struct ecore_dev *p_dev, + struct ecore_ptt *p_ptt, + u32 *p_mfw_ver, + u32 *p_running_bundle_id); + +/** + * @brief Get media type value of the port. + * + * @param p_dev - ecore dev pointer + * @param mfw_ver - media type value + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - Operation was successful. + * ECORE_BUSY - Operation failed + */ +enum _ecore_status_t ecore_mcp_get_media_type(struct ecore_dev *p_dev, + u32 *media_type); + +/** + * @brief - Sends a command to the MCP mailbox. + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param cmd - command to be sent to the MCP + * @param param - optional param + * @param o_mcp_resp - the MCP response code (exclude sequence) + * @param o_mcp_param - optional parameter provided by the MCP response + * + * @return enum _ecore_status_t - + * ECORE_SUCCESS - operation was successful + * ECORE_BUSY - operation failed + */ +enum _ecore_status_t ecore_mcp_cmd(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 cmd, u32 param, + u32 *o_mcp_resp, u32 *o_mcp_param); + +/** + * @brief - drains the nig, allowing completion to pass in case of pauses. + * (Should be called only from sleepable context) + * + * @param p_hwfn + * @param p_ptt + */ +enum _ecore_status_t ecore_mcp_drain(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief - return the mcp function info of the hw function + * + * @param p_hwfn + * + * @returns pointer to mcp function info + */ +const struct ecore_mcp_function_info +*ecore_mcp_get_function_info(struct ecore_hwfn *p_hwfn); + +/** + * @brief - Function for reading/manipulating the nvram. Following are supported + * functionalities. + * 1. Read: Read the specified nvram offset. + * input values: + * type - ECORE_MCP_NVM_RD + * cmd - command code (e.g. DRV_MSG_CODE_NVM_READ_NVRAM) + * offset - nvm offset + * + * output values: + * buf - buffer + * buf_size - buffer size + * + * 2. Write: Write the data at the specified nvram offset + * input values: + * type - ECORE_MCP_NVM_WR + * cmd - command code (e.g. DRV_MSG_CODE_NVM_WRITE_NVRAM) + * offset - nvm offset + * buf - buffer + * buf_size - buffer size + * + * 3. Command: Send the NVM command to MCP. + * input values: + * type - ECORE_MCP_CMD + * cmd - command code (e.g. DRV_MSG_CODE_NVM_DEL_FILE) + * offset - nvm offset + * + * + * @param p_hwfn + * @param p_ptt + * @param params + * + * @return ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_command(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_mcp_nvm_params *params); + +/** + * @brief - count number of function with a matching personality on engine. + * + * @param p_hwfn + * @param p_ptt + * @param personalities - a bitmask of ecore_pci_personality values + * + * @returns the count of all devices on engine whose personality match one of + * the bitsmasks. + */ +int ecore_mcp_get_personality_cnt(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u32 personalities); + +/** + * @brief Get the flash size value + * + * @param p_hwfn + * @param p_ptt + * @param p_flash_size - flash size in bytes to be filled. + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_get_flash_size(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 *p_flash_size); + +/** + * @brief Send driver version to MFW + * + * @param p_hwfn + * @param p_ptt + * @param version - Version value + * @param name - Protocol driver name + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_send_drv_version(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_mcp_drv_version *p_ver); + +/** + * @brief Read the MFW process kill counter + * + * @param p_hwfn + * @param p_ptt + * + * @return u32 + */ +u32 ecore_get_process_kill_counter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Trigger a recovery process + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_start_recovery_process(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief Notify MFW about the change in base device properties + * + * @param p_hwfn + * @param p_ptt + * @param config - Configuation that has been updated + * @param client - ecore client type + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_ov_update_current_config(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_ov_config_method config, + enum ecore_ov_client client); + +/** + * @brief Notify MFW about the driver state + * + * @param p_hwfn + * @param p_ptt + * @param drv_state - Driver state + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_ov_update_driver_state(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_ov_driver_state drv_state); + +/** + * @brief Read NPIV settings form the MFW + * + * @param p_hwfn + * @param p_ptt + * @param p_table - Array to hold the FC NPIV data. Client need allocate the + * required buffer. The field 'count' specifies number of NPIV + * entries. A value of 0 means the table was not populated. + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t +ecore_mcp_ov_get_fc_npiv(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt, + struct ecore_fc_npiv_tbl *p_table); + +/** + * @brief Send MTU size to MFW + * + * @param p_hwfn + * @param p_ptt + * @param mtu - MTU size + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_ov_update_mtu(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 mtu); + +/** + * @brief Set LED status + * + * @param p_hwfn + * @param p_ptt + * @param mode - LED mode + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_set_led(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + enum ecore_led_mode mode); + +/** + * @brief Set secure mode + * + * @param p_dev + * @param addr - nvm offset + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_set_secure_mode(struct ecore_dev *p_dev, + u32 addr); + +/** + * @brief Write to phy + * + * @param p_dev + * @param addr - nvm offset + * @param cmd - nvm command + * @param p_buf - nvm write buffer + * @param len - buffer len + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_phy_write(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 len); + +/** + * @brief Write to nvm + * + * @param p_dev + * @param addr - nvm offset + * @param cmd - nvm command + * @param p_buf - nvm write buffer + * @param len - buffer len + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_write(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 len); + +/** + * @brief Put file begin + * + * @param p_dev + * @param addr - nvm offset + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_put_file_begin(struct ecore_dev *p_dev, + u32 addr); + +/** + * @brief Delete file + * + * @param p_dev + * @param addr - nvm offset + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_del_file(struct ecore_dev *p_dev, u32 addr); + +/** + * @brief Check latest response + * + * @param p_dev + * @param p_buf - nvm write buffer + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_resp(struct ecore_dev *p_dev, u8 *p_buf); + +/** + * @brief Read from phy + * + * @param p_dev + * @param addr - nvm offset + * @param cmd - nvm command + * @param p_buf - nvm write buffer + * @param len - buffer len + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_phy_read(struct ecore_dev *p_dev, u32 cmd, + u32 addr, u8 *p_buf, u32 len); + +/** + * @brief Read from nvm + * + * @param p_dev + * @param addr - nvm offset + * @param p_buf - nvm write buffer + * @param len - buffer len + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_nvm_read(struct ecore_dev *p_dev, u32 addr, + u8 *p_buf, u32 len); + +/** + * @brief Read from sfp + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param port - transceiver port + * @param addr - I2C address + * @param offset - offset in sfp + * @param len - buffer length + * @param p_buf - buffer to read into + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_phy_sfp_read(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 port, u32 addr, u32 offset, + u32 len, u8 *p_buf); + +/** + * @brief Write to sfp + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param port - transceiver port + * @param addr - I2C address + * @param offset - offset in sfp + * @param len - buffer length + * @param p_buf - buffer to write from + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_phy_sfp_write(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u32 port, u32 addr, u32 offset, + u32 len, u8 *p_buf); + +/** + * @brief Gpio read + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param gpio - gpio number + * @param gpio_val - value read from gpio + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_gpio_read(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 gpio, u32 *gpio_val); + +/** + * @brief Gpio write + * + * @param p_hwfn - hw function + * @param p_ptt - PTT required for register access + * @param gpio - gpio number + * @param gpio_val - value to write to gpio + * + * @return enum _ecore_status_t - ECORE_SUCCESS - operation was successful. + */ +enum _ecore_status_t ecore_mcp_gpio_write(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 gpio, u16 gpio_val); + +#endif diff --git a/drivers/net/qede/base/ecore_proto_if.h b/drivers/net/qede/base/ecore_proto_if.h new file mode 100644 index 00000000..2fecbc86 --- /dev/null +++ b/drivers/net/qede/base/ecore_proto_if.h @@ -0,0 +1,28 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_PROTO_IF_H__ +#define __ECORE_PROTO_IF_H__ + +/* + * PF parameters (according to personality/protocol) + */ + +struct ecore_eth_pf_params { + /* The following parameters are used during HW-init + * and these parameters need to be passed as arguments + * to update_pf_params routine invoked before slowpath start + */ + u16 num_cons; +}; + +struct ecore_pf_params { + struct ecore_eth_pf_params eth_pf_params; +}; + +#endif diff --git a/drivers/net/qede/base/ecore_rt_defs.h b/drivers/net/qede/base/ecore_rt_defs.h new file mode 100644 index 00000000..1f5139ea --- /dev/null +++ b/drivers/net/qede/base/ecore_rt_defs.h @@ -0,0 +1,446 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __RT_DEFS_H__ +#define __RT_DEFS_H__ + +/* Runtime array offsets */ +#define DORQ_REG_PF_MAX_ICID_0_RT_OFFSET 0 +#define DORQ_REG_PF_MAX_ICID_1_RT_OFFSET 1 +#define DORQ_REG_PF_MAX_ICID_2_RT_OFFSET 2 +#define DORQ_REG_PF_MAX_ICID_3_RT_OFFSET 3 +#define DORQ_REG_PF_MAX_ICID_4_RT_OFFSET 4 +#define DORQ_REG_PF_MAX_ICID_5_RT_OFFSET 5 +#define DORQ_REG_PF_MAX_ICID_6_RT_OFFSET 6 +#define DORQ_REG_PF_MAX_ICID_7_RT_OFFSET 7 +#define DORQ_REG_VF_MAX_ICID_0_RT_OFFSET 8 +#define DORQ_REG_VF_MAX_ICID_1_RT_OFFSET 9 +#define DORQ_REG_VF_MAX_ICID_2_RT_OFFSET 10 +#define DORQ_REG_VF_MAX_ICID_3_RT_OFFSET 11 +#define DORQ_REG_VF_MAX_ICID_4_RT_OFFSET 12 +#define DORQ_REG_VF_MAX_ICID_5_RT_OFFSET 13 +#define DORQ_REG_VF_MAX_ICID_6_RT_OFFSET 14 +#define DORQ_REG_VF_MAX_ICID_7_RT_OFFSET 15 +#define DORQ_REG_PF_WAKE_ALL_RT_OFFSET 16 +#define DORQ_REG_TAG1_ETHERTYPE_RT_OFFSET 17 +#define IGU_REG_PF_CONFIGURATION_RT_OFFSET 18 +#define IGU_REG_VF_CONFIGURATION_RT_OFFSET 19 +#define IGU_REG_ATTN_MSG_ADDR_L_RT_OFFSET 20 +#define IGU_REG_ATTN_MSG_ADDR_H_RT_OFFSET 21 +#define IGU_REG_LEADING_EDGE_LATCH_RT_OFFSET 22 +#define IGU_REG_TRAILING_EDGE_LATCH_RT_OFFSET 23 +#define CAU_REG_CQE_AGG_UNIT_SIZE_RT_OFFSET 24 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_VAR_MEMORY_RT_OFFSET 761 +#define CAU_REG_SB_VAR_MEMORY_RT_SIZE 736 +#define CAU_REG_SB_ADDR_MEMORY_RT_OFFSET 1497 +#define CAU_REG_SB_ADDR_MEMORY_RT_SIZE 736 +#define CAU_REG_PI_MEMORY_RT_OFFSET 2233 +#define CAU_REG_PI_MEMORY_RT_SIZE 4416 +#define PRS_REG_SEARCH_RESP_INITIATOR_TYPE_RT_OFFSET 6649 +#define PRS_REG_TASK_ID_MAX_INITIATOR_PF_RT_OFFSET 6650 +#define PRS_REG_TASK_ID_MAX_INITIATOR_VF_RT_OFFSET 6651 +#define PRS_REG_TASK_ID_MAX_TARGET_PF_RT_OFFSET 6652 +#define PRS_REG_TASK_ID_MAX_TARGET_VF_RT_OFFSET 6653 +#define PRS_REG_SEARCH_TCP_RT_OFFSET 6654 +#define PRS_REG_SEARCH_OPENFLOW_RT_OFFSET 6659 +#define PRS_REG_SEARCH_NON_IP_AS_OPENFLOW_RT_OFFSET 6660 +#define PRS_REG_OPENFLOW_SUPPORT_ONLY_KNOWN_OVER_IP_RT_OFFSET 6661 +#define PRS_REG_OPENFLOW_SEARCH_KEY_MASK_RT_OFFSET 6662 +#define PRS_REG_TAG_ETHERTYPE_0_RT_OFFSET 6663 +#define PRS_REG_LIGHT_L2_ETHERTYPE_EN_RT_OFFSET 6664 +#define SRC_REG_FIRSTFREE_RT_OFFSET 6665 +#define SRC_REG_FIRSTFREE_RT_SIZE 2 +#define SRC_REG_LASTFREE_RT_OFFSET 6667 +#define SRC_REG_LASTFREE_RT_SIZE 2 +#define SRC_REG_COUNTFREE_RT_OFFSET 6669 +#define SRC_REG_NUMBER_HASH_BITS_RT_OFFSET 6670 +#define PSWRQ2_REG_CDUT_P_SIZE_RT_OFFSET 6671 +#define PSWRQ2_REG_CDUC_P_SIZE_RT_OFFSET 6672 +#define PSWRQ2_REG_TM_P_SIZE_RT_OFFSET 6673 +#define PSWRQ2_REG_QM_P_SIZE_RT_OFFSET 6674 +#define PSWRQ2_REG_SRC_P_SIZE_RT_OFFSET 6675 +#define PSWRQ2_REG_TSDM_P_SIZE_RT_OFFSET 6676 +#define PSWRQ2_REG_TM_FIRST_ILT_RT_OFFSET 6677 +#define PSWRQ2_REG_TM_LAST_ILT_RT_OFFSET 6678 +#define PSWRQ2_REG_QM_FIRST_ILT_RT_OFFSET 6679 +#define PSWRQ2_REG_QM_LAST_ILT_RT_OFFSET 6680 +#define PSWRQ2_REG_SRC_FIRST_ILT_RT_OFFSET 6681 +#define PSWRQ2_REG_SRC_LAST_ILT_RT_OFFSET 6682 +#define PSWRQ2_REG_CDUC_FIRST_ILT_RT_OFFSET 6683 +#define PSWRQ2_REG_CDUC_LAST_ILT_RT_OFFSET 6684 +#define PSWRQ2_REG_CDUT_FIRST_ILT_RT_OFFSET 6685 +#define PSWRQ2_REG_CDUT_LAST_ILT_RT_OFFSET 6686 +#define PSWRQ2_REG_TSDM_FIRST_ILT_RT_OFFSET 6687 +#define PSWRQ2_REG_TSDM_LAST_ILT_RT_OFFSET 6688 +#define PSWRQ2_REG_TM_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6689 +#define PSWRQ2_REG_CDUT_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6690 +#define PSWRQ2_REG_CDUC_NUMBER_OF_PF_BLOCKS_RT_OFFSET 6691 +#define PSWRQ2_REG_TM_VF_BLOCKS_RT_OFFSET 6692 +#define PSWRQ2_REG_CDUT_VF_BLOCKS_RT_OFFSET 6693 +#define PSWRQ2_REG_CDUC_VF_BLOCKS_RT_OFFSET 6694 +#define PSWRQ2_REG_TM_BLOCKS_FACTOR_RT_OFFSET 6695 +#define PSWRQ2_REG_CDUT_BLOCKS_FACTOR_RT_OFFSET 6696 +#define PSWRQ2_REG_CDUC_BLOCKS_FACTOR_RT_OFFSET 6697 +#define PSWRQ2_REG_VF_BASE_RT_OFFSET 6698 +#define PSWRQ2_REG_VF_LAST_ILT_RT_OFFSET 6699 +#define PSWRQ2_REG_WR_MBS0_RT_OFFSET 6700 +#define PSWRQ2_REG_RD_MBS0_RT_OFFSET 6701 +#define PSWRQ2_REG_DRAM_ALIGN_WR_RT_OFFSET 6702 +#define PSWRQ2_REG_DRAM_ALIGN_RD_RT_OFFSET 6703 +#define PSWRQ2_REG_ILT_MEMORY_RT_OFFSET 6704 +#define PSWRQ2_REG_ILT_MEMORY_RT_SIZE 22000 +#define PGLUE_REG_B_VF_BASE_RT_OFFSET 28704 +#define PGLUE_REG_B_CACHE_LINE_SIZE_RT_OFFSET 28705 +#define PGLUE_REG_B_PF_BAR0_SIZE_RT_OFFSET 28706 +#define PGLUE_REG_B_PF_BAR1_SIZE_RT_OFFSET 28707 +#define PGLUE_REG_B_VF_BAR1_SIZE_RT_OFFSET 28708 +#define TM_REG_VF_ENABLE_CONN_RT_OFFSET 28709 +#define TM_REG_PF_ENABLE_CONN_RT_OFFSET 28710 +#define TM_REG_PF_ENABLE_TASK_RT_OFFSET 28711 +#define TM_REG_GROUP_SIZE_RESOLUTION_CONN_RT_OFFSET 28712 +#define TM_REG_GROUP_SIZE_RESOLUTION_TASK_RT_OFFSET 28713 +#define TM_REG_CONFIG_CONN_MEM_RT_OFFSET 28714 +#define TM_REG_CONFIG_CONN_MEM_RT_SIZE 416 +#define TM_REG_CONFIG_TASK_MEM_RT_OFFSET 29130 +#define TM_REG_CONFIG_TASK_MEM_RT_SIZE 512 +#define QM_REG_MAXPQSIZE_0_RT_OFFSET 29642 +#define QM_REG_MAXPQSIZE_1_RT_OFFSET 29643 +#define QM_REG_MAXPQSIZE_2_RT_OFFSET 29644 +#define QM_REG_MAXPQSIZETXSEL_0_RT_OFFSET 29645 +#define QM_REG_MAXPQSIZETXSEL_1_RT_OFFSET 29646 +#define QM_REG_MAXPQSIZETXSEL_2_RT_OFFSET 29647 +#define QM_REG_MAXPQSIZETXSEL_3_RT_OFFSET 29648 +#define QM_REG_MAXPQSIZETXSEL_4_RT_OFFSET 29649 +#define QM_REG_MAXPQSIZETXSEL_5_RT_OFFSET 29650 +#define QM_REG_MAXPQSIZETXSEL_6_RT_OFFSET 29651 +#define QM_REG_MAXPQSIZETXSEL_7_RT_OFFSET 29652 +#define QM_REG_MAXPQSIZETXSEL_8_RT_OFFSET 29653 +#define QM_REG_MAXPQSIZETXSEL_9_RT_OFFSET 29654 +#define QM_REG_MAXPQSIZETXSEL_10_RT_OFFSET 29655 +#define QM_REG_MAXPQSIZETXSEL_11_RT_OFFSET 29656 +#define QM_REG_MAXPQSIZETXSEL_12_RT_OFFSET 29657 +#define QM_REG_MAXPQSIZETXSEL_13_RT_OFFSET 29658 +#define QM_REG_MAXPQSIZETXSEL_14_RT_OFFSET 29659 +#define QM_REG_MAXPQSIZETXSEL_15_RT_OFFSET 29660 +#define QM_REG_MAXPQSIZETXSEL_16_RT_OFFSET 29661 +#define QM_REG_MAXPQSIZETXSEL_17_RT_OFFSET 29662 +#define QM_REG_MAXPQSIZETXSEL_18_RT_OFFSET 29663 +#define QM_REG_MAXPQSIZETXSEL_19_RT_OFFSET 29664 +#define QM_REG_MAXPQSIZETXSEL_20_RT_OFFSET 29665 +#define QM_REG_MAXPQSIZETXSEL_21_RT_OFFSET 29666 +#define QM_REG_MAXPQSIZETXSEL_22_RT_OFFSET 29667 +#define QM_REG_MAXPQSIZETXSEL_23_RT_OFFSET 29668 +#define QM_REG_MAXPQSIZETXSEL_24_RT_OFFSET 29669 +#define QM_REG_MAXPQSIZETXSEL_25_RT_OFFSET 29670 +#define QM_REG_MAXPQSIZETXSEL_26_RT_OFFSET 29671 +#define QM_REG_MAXPQSIZETXSEL_27_RT_OFFSET 29672 +#define QM_REG_MAXPQSIZETXSEL_28_RT_OFFSET 29673 +#define QM_REG_MAXPQSIZETXSEL_29_RT_OFFSET 29674 +#define QM_REG_MAXPQSIZETXSEL_30_RT_OFFSET 29675 +#define QM_REG_MAXPQSIZETXSEL_31_RT_OFFSET 29676 +#define QM_REG_MAXPQSIZETXSEL_32_RT_OFFSET 29677 +#define QM_REG_MAXPQSIZETXSEL_33_RT_OFFSET 29678 +#define QM_REG_MAXPQSIZETXSEL_34_RT_OFFSET 29679 +#define QM_REG_MAXPQSIZETXSEL_35_RT_OFFSET 29680 +#define QM_REG_MAXPQSIZETXSEL_36_RT_OFFSET 29681 +#define QM_REG_MAXPQSIZETXSEL_37_RT_OFFSET 29682 +#define QM_REG_MAXPQSIZETXSEL_38_RT_OFFSET 29683 +#define QM_REG_MAXPQSIZETXSEL_39_RT_OFFSET 29684 +#define QM_REG_MAXPQSIZETXSEL_40_RT_OFFSET 29685 +#define QM_REG_MAXPQSIZETXSEL_41_RT_OFFSET 29686 +#define QM_REG_MAXPQSIZETXSEL_42_RT_OFFSET 29687 +#define QM_REG_MAXPQSIZETXSEL_43_RT_OFFSET 29688 +#define QM_REG_MAXPQSIZETXSEL_44_RT_OFFSET 29689 +#define QM_REG_MAXPQSIZETXSEL_45_RT_OFFSET 29690 +#define QM_REG_MAXPQSIZETXSEL_46_RT_OFFSET 29691 +#define QM_REG_MAXPQSIZETXSEL_47_RT_OFFSET 29692 +#define QM_REG_MAXPQSIZETXSEL_48_RT_OFFSET 29693 +#define QM_REG_MAXPQSIZETXSEL_49_RT_OFFSET 29694 +#define QM_REG_MAXPQSIZETXSEL_50_RT_OFFSET 29695 +#define QM_REG_MAXPQSIZETXSEL_51_RT_OFFSET 29696 +#define QM_REG_MAXPQSIZETXSEL_52_RT_OFFSET 29697 +#define QM_REG_MAXPQSIZETXSEL_53_RT_OFFSET 29698 +#define QM_REG_MAXPQSIZETXSEL_54_RT_OFFSET 29699 +#define QM_REG_MAXPQSIZETXSEL_55_RT_OFFSET 29700 +#define QM_REG_MAXPQSIZETXSEL_56_RT_OFFSET 29701 +#define QM_REG_MAXPQSIZETXSEL_57_RT_OFFSET 29702 +#define QM_REG_MAXPQSIZETXSEL_58_RT_OFFSET 29703 +#define QM_REG_MAXPQSIZETXSEL_59_RT_OFFSET 29704 +#define QM_REG_MAXPQSIZETXSEL_60_RT_OFFSET 29705 +#define QM_REG_MAXPQSIZETXSEL_61_RT_OFFSET 29706 +#define QM_REG_MAXPQSIZETXSEL_62_RT_OFFSET 29707 +#define QM_REG_MAXPQSIZETXSEL_63_RT_OFFSET 29708 +#define QM_REG_BASEADDROTHERPQ_RT_OFFSET 29709 +#define QM_REG_BASEADDROTHERPQ_RT_SIZE 128 +#define QM_REG_VOQCRDLINE_RT_OFFSET 29837 +#define QM_REG_VOQCRDLINE_RT_SIZE 20 +#define QM_REG_VOQINITCRDLINE_RT_OFFSET 29857 +#define QM_REG_VOQINITCRDLINE_RT_SIZE 20 +#define QM_REG_AFULLQMBYPTHRPFWFQ_RT_OFFSET 29877 +#define QM_REG_AFULLQMBYPTHRVPWFQ_RT_OFFSET 29878 +#define QM_REG_AFULLQMBYPTHRPFRL_RT_OFFSET 29879 +#define QM_REG_AFULLQMBYPTHRGLBLRL_RT_OFFSET 29880 +#define QM_REG_AFULLOPRTNSTCCRDMASK_RT_OFFSET 29881 +#define QM_REG_WRROTHERPQGRP_0_RT_OFFSET 29882 +#define QM_REG_WRROTHERPQGRP_1_RT_OFFSET 29883 +#define QM_REG_WRROTHERPQGRP_2_RT_OFFSET 29884 +#define QM_REG_WRROTHERPQGRP_3_RT_OFFSET 29885 +#define QM_REG_WRROTHERPQGRP_4_RT_OFFSET 29886 +#define QM_REG_WRROTHERPQGRP_5_RT_OFFSET 29887 +#define QM_REG_WRROTHERPQGRP_6_RT_OFFSET 29888 +#define QM_REG_WRROTHERPQGRP_7_RT_OFFSET 29889 +#define QM_REG_WRROTHERPQGRP_8_RT_OFFSET 29890 +#define QM_REG_WRROTHERPQGRP_9_RT_OFFSET 29891 +#define QM_REG_WRROTHERPQGRP_10_RT_OFFSET 29892 +#define QM_REG_WRROTHERPQGRP_11_RT_OFFSET 29893 +#define QM_REG_WRROTHERPQGRP_12_RT_OFFSET 29894 +#define QM_REG_WRROTHERPQGRP_13_RT_OFFSET 29895 +#define QM_REG_WRROTHERPQGRP_14_RT_OFFSET 29896 +#define QM_REG_WRROTHERPQGRP_15_RT_OFFSET 29897 +#define QM_REG_WRROTHERGRPWEIGHT_0_RT_OFFSET 29898 +#define QM_REG_WRROTHERGRPWEIGHT_1_RT_OFFSET 29899 +#define QM_REG_WRROTHERGRPWEIGHT_2_RT_OFFSET 29900 +#define QM_REG_WRROTHERGRPWEIGHT_3_RT_OFFSET 29901 +#define QM_REG_WRRTXGRPWEIGHT_0_RT_OFFSET 29902 +#define QM_REG_WRRTXGRPWEIGHT_1_RT_OFFSET 29903 +#define QM_REG_PQTX2PF_0_RT_OFFSET 29904 +#define QM_REG_PQTX2PF_1_RT_OFFSET 29905 +#define QM_REG_PQTX2PF_2_RT_OFFSET 29906 +#define QM_REG_PQTX2PF_3_RT_OFFSET 29907 +#define QM_REG_PQTX2PF_4_RT_OFFSET 29908 +#define QM_REG_PQTX2PF_5_RT_OFFSET 29909 +#define QM_REG_PQTX2PF_6_RT_OFFSET 29910 +#define QM_REG_PQTX2PF_7_RT_OFFSET 29911 +#define QM_REG_PQTX2PF_8_RT_OFFSET 29912 +#define QM_REG_PQTX2PF_9_RT_OFFSET 29913 +#define QM_REG_PQTX2PF_10_RT_OFFSET 29914 +#define QM_REG_PQTX2PF_11_RT_OFFSET 29915 +#define QM_REG_PQTX2PF_12_RT_OFFSET 29916 +#define QM_REG_PQTX2PF_13_RT_OFFSET 29917 +#define QM_REG_PQTX2PF_14_RT_OFFSET 29918 +#define QM_REG_PQTX2PF_15_RT_OFFSET 29919 +#define QM_REG_PQTX2PF_16_RT_OFFSET 29920 +#define QM_REG_PQTX2PF_17_RT_OFFSET 29921 +#define QM_REG_PQTX2PF_18_RT_OFFSET 29922 +#define QM_REG_PQTX2PF_19_RT_OFFSET 29923 +#define QM_REG_PQTX2PF_20_RT_OFFSET 29924 +#define QM_REG_PQTX2PF_21_RT_OFFSET 29925 +#define QM_REG_PQTX2PF_22_RT_OFFSET 29926 +#define QM_REG_PQTX2PF_23_RT_OFFSET 29927 +#define QM_REG_PQTX2PF_24_RT_OFFSET 29928 +#define QM_REG_PQTX2PF_25_RT_OFFSET 29929 +#define QM_REG_PQTX2PF_26_RT_OFFSET 29930 +#define QM_REG_PQTX2PF_27_RT_OFFSET 29931 +#define QM_REG_PQTX2PF_28_RT_OFFSET 29932 +#define QM_REG_PQTX2PF_29_RT_OFFSET 29933 +#define QM_REG_PQTX2PF_30_RT_OFFSET 29934 +#define QM_REG_PQTX2PF_31_RT_OFFSET 29935 +#define QM_REG_PQTX2PF_32_RT_OFFSET 29936 +#define QM_REG_PQTX2PF_33_RT_OFFSET 29937 +#define QM_REG_PQTX2PF_34_RT_OFFSET 29938 +#define QM_REG_PQTX2PF_35_RT_OFFSET 29939 +#define QM_REG_PQTX2PF_36_RT_OFFSET 29940 +#define QM_REG_PQTX2PF_37_RT_OFFSET 29941 +#define QM_REG_PQTX2PF_38_RT_OFFSET 29942 +#define QM_REG_PQTX2PF_39_RT_OFFSET 29943 +#define QM_REG_PQTX2PF_40_RT_OFFSET 29944 +#define QM_REG_PQTX2PF_41_RT_OFFSET 29945 +#define QM_REG_PQTX2PF_42_RT_OFFSET 29946 +#define QM_REG_PQTX2PF_43_RT_OFFSET 29947 +#define QM_REG_PQTX2PF_44_RT_OFFSET 29948 +#define QM_REG_PQTX2PF_45_RT_OFFSET 29949 +#define QM_REG_PQTX2PF_46_RT_OFFSET 29950 +#define QM_REG_PQTX2PF_47_RT_OFFSET 29951 +#define QM_REG_PQTX2PF_48_RT_OFFSET 29952 +#define QM_REG_PQTX2PF_49_RT_OFFSET 29953 +#define QM_REG_PQTX2PF_50_RT_OFFSET 29954 +#define QM_REG_PQTX2PF_51_RT_OFFSET 29955 +#define QM_REG_PQTX2PF_52_RT_OFFSET 29956 +#define QM_REG_PQTX2PF_53_RT_OFFSET 29957 +#define QM_REG_PQTX2PF_54_RT_OFFSET 29958 +#define QM_REG_PQTX2PF_55_RT_OFFSET 29959 +#define QM_REG_PQTX2PF_56_RT_OFFSET 29960 +#define QM_REG_PQTX2PF_57_RT_OFFSET 29961 +#define QM_REG_PQTX2PF_58_RT_OFFSET 29962 +#define QM_REG_PQTX2PF_59_RT_OFFSET 29963 +#define QM_REG_PQTX2PF_60_RT_OFFSET 29964 +#define QM_REG_PQTX2PF_61_RT_OFFSET 29965 +#define QM_REG_PQTX2PF_62_RT_OFFSET 29966 +#define QM_REG_PQTX2PF_63_RT_OFFSET 29967 +#define QM_REG_PQOTHER2PF_0_RT_OFFSET 29968 +#define QM_REG_PQOTHER2PF_1_RT_OFFSET 29969 +#define QM_REG_PQOTHER2PF_2_RT_OFFSET 29970 +#define QM_REG_PQOTHER2PF_3_RT_OFFSET 29971 +#define QM_REG_PQOTHER2PF_4_RT_OFFSET 29972 +#define QM_REG_PQOTHER2PF_5_RT_OFFSET 29973 +#define QM_REG_PQOTHER2PF_6_RT_OFFSET 29974 +#define QM_REG_PQOTHER2PF_7_RT_OFFSET 29975 +#define QM_REG_PQOTHER2PF_8_RT_OFFSET 29976 +#define QM_REG_PQOTHER2PF_9_RT_OFFSET 29977 +#define QM_REG_PQOTHER2PF_10_RT_OFFSET 29978 +#define QM_REG_PQOTHER2PF_11_RT_OFFSET 29979 +#define QM_REG_PQOTHER2PF_12_RT_OFFSET 29980 +#define QM_REG_PQOTHER2PF_13_RT_OFFSET 29981 +#define QM_REG_PQOTHER2PF_14_RT_OFFSET 29982 +#define QM_REG_PQOTHER2PF_15_RT_OFFSET 29983 +#define QM_REG_RLGLBLPERIOD_0_RT_OFFSET 29984 +#define QM_REG_RLGLBLPERIOD_1_RT_OFFSET 29985 +#define QM_REG_RLGLBLPERIODTIMER_0_RT_OFFSET 29986 +#define QM_REG_RLGLBLPERIODTIMER_1_RT_OFFSET 29987 +#define QM_REG_RLGLBLPERIODSEL_0_RT_OFFSET 29988 +#define QM_REG_RLGLBLPERIODSEL_1_RT_OFFSET 29989 +#define QM_REG_RLGLBLPERIODSEL_2_RT_OFFSET 29990 +#define QM_REG_RLGLBLPERIODSEL_3_RT_OFFSET 29991 +#define QM_REG_RLGLBLPERIODSEL_4_RT_OFFSET 29992 +#define QM_REG_RLGLBLPERIODSEL_5_RT_OFFSET 29993 +#define QM_REG_RLGLBLPERIODSEL_6_RT_OFFSET 29994 +#define QM_REG_RLGLBLPERIODSEL_7_RT_OFFSET 29995 +#define QM_REG_RLGLBLINCVAL_RT_OFFSET 29996 +#define QM_REG_RLGLBLINCVAL_RT_SIZE 256 +#define QM_REG_RLGLBLUPPERBOUND_RT_OFFSET 30252 +#define QM_REG_RLGLBLUPPERBOUND_RT_SIZE 256 +#define QM_REG_RLGLBLCRD_RT_OFFSET 30508 +#define QM_REG_RLGLBLCRD_RT_SIZE 256 +#define QM_REG_RLGLBLENABLE_RT_OFFSET 30764 +#define QM_REG_RLPFPERIOD_RT_OFFSET 30765 +#define QM_REG_RLPFPERIODTIMER_RT_OFFSET 30766 +#define QM_REG_RLPFINCVAL_RT_OFFSET 30767 +#define QM_REG_RLPFINCVAL_RT_SIZE 16 +#define QM_REG_RLPFUPPERBOUND_RT_OFFSET 30783 +#define QM_REG_RLPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_RLPFCRD_RT_OFFSET 30799 +#define QM_REG_RLPFCRD_RT_SIZE 16 +#define QM_REG_RLPFENABLE_RT_OFFSET 30815 +#define QM_REG_RLPFVOQENABLE_RT_OFFSET 30816 +#define QM_REG_WFQPFWEIGHT_RT_OFFSET 30817 +#define QM_REG_WFQPFWEIGHT_RT_SIZE 16 +#define QM_REG_WFQPFUPPERBOUND_RT_OFFSET 30833 +#define QM_REG_WFQPFUPPERBOUND_RT_SIZE 16 +#define QM_REG_WFQPFCRD_RT_OFFSET 30849 +#define QM_REG_WFQPFCRD_RT_SIZE 160 +#define QM_REG_WFQPFENABLE_RT_OFFSET 31009 +#define QM_REG_WFQVPENABLE_RT_OFFSET 31010 +#define QM_REG_BASEADDRTXPQ_RT_OFFSET 31011 +#define QM_REG_BASEADDRTXPQ_RT_SIZE 512 +#define QM_REG_TXPQMAP_RT_OFFSET 31523 +#define QM_REG_TXPQMAP_RT_SIZE 512 +#define QM_REG_WFQVPWEIGHT_RT_OFFSET 32035 +#define QM_REG_WFQVPWEIGHT_RT_SIZE 512 +#define QM_REG_WFQVPCRD_RT_OFFSET 32547 +#define QM_REG_WFQVPCRD_RT_SIZE 512 +#define QM_REG_WFQVPMAP_RT_OFFSET 33059 +#define QM_REG_WFQVPMAP_RT_SIZE 512 +#define QM_REG_WFQPFCRD_MSB_RT_OFFSET 33571 +#define QM_REG_WFQPFCRD_MSB_RT_SIZE 160 +#define NIG_REG_TAG_ETHERTYPE_0_RT_OFFSET 33731 +#define NIG_REG_OUTER_TAG_VALUE_LIST0_RT_OFFSET 33732 +#define NIG_REG_OUTER_TAG_VALUE_LIST1_RT_OFFSET 33733 +#define NIG_REG_OUTER_TAG_VALUE_LIST2_RT_OFFSET 33734 +#define NIG_REG_OUTER_TAG_VALUE_LIST3_RT_OFFSET 33735 +#define NIG_REG_OUTER_TAG_VALUE_MASK_RT_OFFSET 33736 +#define NIG_REG_LLH_FUNC_TAGMAC_CLS_TYPE_RT_OFFSET 33737 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_OFFSET 33738 +#define NIG_REG_LLH_FUNC_TAG_EN_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_OFFSET 33742 +#define NIG_REG_LLH_FUNC_TAG_HDR_SEL_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_OFFSET 33746 +#define NIG_REG_LLH_FUNC_TAG_VALUE_RT_SIZE 4 +#define NIG_REG_LLH_FUNC_NO_TAG_RT_OFFSET 33750 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_OFFSET 33751 +#define NIG_REG_LLH_FUNC_FILTER_VALUE_RT_SIZE 32 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_OFFSET 33783 +#define NIG_REG_LLH_FUNC_FILTER_EN_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_OFFSET 33799 +#define NIG_REG_LLH_FUNC_FILTER_MODE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_OFFSET 33815 +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE_RT_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_OFFSET 33831 +#define NIG_REG_LLH_FUNC_FILTER_HDR_SEL_RT_SIZE 16 +#define NIG_REG_TX_EDPM_CTRL_RT_OFFSET 33847 +#define CDU_REG_CID_ADDR_PARAMS_RT_OFFSET 33848 +#define CDU_REG_SEGMENT0_PARAMS_RT_OFFSET 33849 +#define CDU_REG_SEGMENT1_PARAMS_RT_OFFSET 33850 +#define CDU_REG_PF_SEG0_TYPE_OFFSET_RT_OFFSET 33851 +#define CDU_REG_PF_SEG1_TYPE_OFFSET_RT_OFFSET 33852 +#define CDU_REG_PF_SEG2_TYPE_OFFSET_RT_OFFSET 33853 +#define CDU_REG_PF_SEG3_TYPE_OFFSET_RT_OFFSET 33854 +#define CDU_REG_PF_FL_SEG0_TYPE_OFFSET_RT_OFFSET 33855 +#define CDU_REG_PF_FL_SEG1_TYPE_OFFSET_RT_OFFSET 33856 +#define CDU_REG_PF_FL_SEG2_TYPE_OFFSET_RT_OFFSET 33857 +#define CDU_REG_PF_FL_SEG3_TYPE_OFFSET_RT_OFFSET 33858 +#define CDU_REG_VF_SEG_TYPE_OFFSET_RT_OFFSET 33859 +#define CDU_REG_VF_FL_SEG_TYPE_OFFSET_RT_OFFSET 33860 +#define PBF_REG_TAG_ETHERTYPE_0_RT_OFFSET 33861 +#define PBF_REG_BTB_SHARED_AREA_SIZE_RT_OFFSET 33862 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ0_RT_OFFSET 33863 +#define PBF_REG_BTB_GUARANTEED_VOQ0_RT_OFFSET 33864 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ0_RT_OFFSET 33865 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ1_RT_OFFSET 33866 +#define PBF_REG_BTB_GUARANTEED_VOQ1_RT_OFFSET 33867 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ1_RT_OFFSET 33868 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ2_RT_OFFSET 33869 +#define PBF_REG_BTB_GUARANTEED_VOQ2_RT_OFFSET 33870 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ2_RT_OFFSET 33871 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ3_RT_OFFSET 33872 +#define PBF_REG_BTB_GUARANTEED_VOQ3_RT_OFFSET 33873 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ3_RT_OFFSET 33874 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ4_RT_OFFSET 33875 +#define PBF_REG_BTB_GUARANTEED_VOQ4_RT_OFFSET 33876 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ4_RT_OFFSET 33877 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ5_RT_OFFSET 33878 +#define PBF_REG_BTB_GUARANTEED_VOQ5_RT_OFFSET 33879 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ5_RT_OFFSET 33880 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ6_RT_OFFSET 33881 +#define PBF_REG_BTB_GUARANTEED_VOQ6_RT_OFFSET 33882 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ6_RT_OFFSET 33883 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ7_RT_OFFSET 33884 +#define PBF_REG_BTB_GUARANTEED_VOQ7_RT_OFFSET 33885 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ7_RT_OFFSET 33886 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ8_RT_OFFSET 33887 +#define PBF_REG_BTB_GUARANTEED_VOQ8_RT_OFFSET 33888 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ8_RT_OFFSET 33889 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ9_RT_OFFSET 33890 +#define PBF_REG_BTB_GUARANTEED_VOQ9_RT_OFFSET 33891 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ9_RT_OFFSET 33892 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ10_RT_OFFSET 33893 +#define PBF_REG_BTB_GUARANTEED_VOQ10_RT_OFFSET 33894 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ10_RT_OFFSET 33895 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ11_RT_OFFSET 33896 +#define PBF_REG_BTB_GUARANTEED_VOQ11_RT_OFFSET 33897 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ11_RT_OFFSET 33898 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ12_RT_OFFSET 33899 +#define PBF_REG_BTB_GUARANTEED_VOQ12_RT_OFFSET 33900 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ12_RT_OFFSET 33901 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ13_RT_OFFSET 33902 +#define PBF_REG_BTB_GUARANTEED_VOQ13_RT_OFFSET 33903 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ13_RT_OFFSET 33904 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ14_RT_OFFSET 33905 +#define PBF_REG_BTB_GUARANTEED_VOQ14_RT_OFFSET 33906 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ14_RT_OFFSET 33907 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ15_RT_OFFSET 33908 +#define PBF_REG_BTB_GUARANTEED_VOQ15_RT_OFFSET 33909 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ15_RT_OFFSET 33910 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ16_RT_OFFSET 33911 +#define PBF_REG_BTB_GUARANTEED_VOQ16_RT_OFFSET 33912 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ16_RT_OFFSET 33913 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ17_RT_OFFSET 33914 +#define PBF_REG_BTB_GUARANTEED_VOQ17_RT_OFFSET 33915 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ17_RT_OFFSET 33916 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ18_RT_OFFSET 33917 +#define PBF_REG_BTB_GUARANTEED_VOQ18_RT_OFFSET 33918 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ18_RT_OFFSET 33919 +#define PBF_REG_YCMD_QS_NUM_LINES_VOQ19_RT_OFFSET 33920 +#define PBF_REG_BTB_GUARANTEED_VOQ19_RT_OFFSET 33921 +#define PBF_REG_BTB_SHARED_AREA_SETUP_VOQ19_RT_OFFSET 33922 +#define XCM_REG_CON_PHY_Q3_RT_OFFSET 33923 + +#define RUNTIME_ARRAY_SIZE 33924 + +#endif /* __RT_DEFS_H__ */ diff --git a/drivers/net/qede/base/ecore_sp_api.h b/drivers/net/qede/base/ecore_sp_api.h new file mode 100644 index 00000000..e80f5ef3 --- /dev/null +++ b/drivers/net/qede/base/ecore_sp_api.h @@ -0,0 +1,42 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_SP_API_H__ +#define __ECORE_SP_API_H__ + +#include "ecore_status.h" + +enum spq_mode { + ECORE_SPQ_MODE_BLOCK, /* Client will poll a designated mem. address */ + ECORE_SPQ_MODE_CB, /* Client supplies a callback */ + ECORE_SPQ_MODE_EBLOCK, /* ECORE should block until completion */ +}; + +struct ecore_hwfn; +union event_ring_data; +struct eth_slow_path_rx_cqe; + +struct ecore_spq_comp_cb { + void (*function)(struct ecore_hwfn *, + void *, union event_ring_data *, u8 fw_return_code); + void *cookie; +}; + +/** + * @brief ecore_eth_cqe_completion - handles the completion of a + * ramrod on the cqe ring + * + * @param p_hwfn + * @param cqe + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe); + +#endif diff --git a/drivers/net/qede/base/ecore_sp_commands.c b/drivers/net/qede/base/ecore_sp_commands.c new file mode 100644 index 00000000..e9ac8988 --- /dev/null +++ b/drivers/net/qede/base/ecore_sp_commands.c @@ -0,0 +1,525 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" + +#include "ecore.h" +#include "ecore_status.h" +#include "ecore_chain.h" +#include "ecore_spq.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_cxt.h" +#include "ecore_sp_commands.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_iro.h" +#include "reg_addr.h" +#include "ecore_int.h" +#include "ecore_hw.h" +#include "ecore_dcbx.h" + +enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry **pp_ent, + u8 cmd, + u8 protocol, + struct ecore_sp_init_data *p_data) +{ + u32 opaque_cid = p_data->opaque_fid << 16 | p_data->cid; + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + + /* Get an SPQ entry */ + rc = ecore_spq_get_entry(p_hwfn, pp_ent); + if (rc != ECORE_SUCCESS) + return rc; + + /* Fill the SPQ entry */ + p_ent = *pp_ent; + p_ent->elem.hdr.cid = OSAL_CPU_TO_LE32(opaque_cid); + p_ent->elem.hdr.cmd_id = cmd; + p_ent->elem.hdr.protocol_id = protocol; + p_ent->priority = ECORE_SPQ_PRIORITY_NORMAL; + p_ent->comp_mode = p_data->comp_mode; + p_ent->comp_done.done = 0; + + switch (p_ent->comp_mode) { + case ECORE_SPQ_MODE_EBLOCK: + p_ent->comp_cb.cookie = &p_ent->comp_done; + break; + + case ECORE_SPQ_MODE_BLOCK: + if (!p_data->p_comp_data) + return ECORE_INVAL; + + p_ent->comp_cb.cookie = p_data->p_comp_data->cookie; + break; + + case ECORE_SPQ_MODE_CB: + if (!p_data->p_comp_data) + p_ent->comp_cb.function = OSAL_NULL; + else + p_ent->comp_cb = *p_data->p_comp_data; + break; + + default: + DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n", + p_ent->comp_mode); + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Initialized: CID %08x cmd %02x protocol %02x data_addr %lu comp_mode [%s]\n", + opaque_cid, cmd, protocol, + (unsigned long)&p_ent->ramrod, + D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK, + ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", + "MODE_CB")); + + OSAL_MEMSET(&p_ent->ramrod, 0, sizeof(p_ent->ramrod)); + + return ECORE_SUCCESS; +} + +static enum tunnel_clss ecore_tunn_get_clss_type(u8 type) +{ + switch (type) { + case ECORE_TUNN_CLSS_MAC_VLAN: + return TUNNEL_CLSS_MAC_VLAN; + case ECORE_TUNN_CLSS_MAC_VNI: + return TUNNEL_CLSS_MAC_VNI; + case ECORE_TUNN_CLSS_INNER_MAC_VLAN: + return TUNNEL_CLSS_INNER_MAC_VLAN; + case ECORE_TUNN_CLSS_INNER_MAC_VNI: + return TUNNEL_CLSS_INNER_MAC_VNI; + default: + return TUNNEL_CLSS_MAC_VLAN; + } +} + +static void +ecore_tunn_set_pf_fix_tunn_mode(struct ecore_hwfn *p_hwfn, + struct ecore_tunn_update_params *p_src, + struct pf_update_tunnel_config *p_tunn_cfg) +{ + unsigned long cached_tunn_mode = p_hwfn->p_dev->tunn_mode; + unsigned long update_mask = p_src->tunn_mode_update_mask; + unsigned long tunn_mode = p_src->tunn_mode; + unsigned long new_tunn_mode = 0; + + if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &update_mask)) { + if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) + OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode); + } else { + if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &cached_tunn_mode)) + OSAL_SET_BIT(ECORE_MODE_L2GRE_TUNN, &new_tunn_mode); + } + + if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &update_mask)) { + if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) + OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode); + } else { + if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &cached_tunn_mode)) + OSAL_SET_BIT(ECORE_MODE_IPGRE_TUNN, &new_tunn_mode); + } + + if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &update_mask)) { + if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) + OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode); + } else { + if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &cached_tunn_mode)) + OSAL_SET_BIT(ECORE_MODE_VXLAN_TUNN, &new_tunn_mode); + } + + if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { + if (p_src->update_geneve_udp_port) + DP_NOTICE(p_hwfn, true, "Geneve not supported\n"); + p_src->update_geneve_udp_port = 0; + p_src->tunn_mode = new_tunn_mode; + return; + } + + if (p_src->update_geneve_udp_port) { + p_tunn_cfg->set_geneve_udp_port_flg = 1; + p_tunn_cfg->geneve_udp_port = + OSAL_CPU_TO_LE16(p_src->geneve_udp_port); + } + + if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &update_mask)) { + if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) + OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode); + } else { + if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &cached_tunn_mode)) + OSAL_SET_BIT(ECORE_MODE_L2GENEVE_TUNN, &new_tunn_mode); + } + + if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &update_mask)) { + if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) + OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode); + } else { + if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &cached_tunn_mode)) + OSAL_SET_BIT(ECORE_MODE_IPGENEVE_TUNN, &new_tunn_mode); + } + + p_src->tunn_mode = new_tunn_mode; +} + +static void +ecore_tunn_set_pf_update_params(struct ecore_hwfn *p_hwfn, + struct ecore_tunn_update_params *p_src, + struct pf_update_tunnel_config *p_tunn_cfg) +{ + unsigned long tunn_mode = p_src->tunn_mode; + enum tunnel_clss type; + + ecore_tunn_set_pf_fix_tunn_mode(p_hwfn, p_src, p_tunn_cfg); + p_tunn_cfg->update_rx_pf_clss = p_src->update_rx_pf_clss; + p_tunn_cfg->update_tx_pf_clss = p_src->update_tx_pf_clss; + + type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan); + p_tunn_cfg->tunnel_clss_vxlan = type; + type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre); + p_tunn_cfg->tunnel_clss_l2gre = type; + type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre); + p_tunn_cfg->tunnel_clss_ipgre = type; + + if (p_src->update_vxlan_udp_port) { + p_tunn_cfg->set_vxlan_udp_port_flg = 1; + p_tunn_cfg->vxlan_udp_port = + OSAL_CPU_TO_LE16(p_src->vxlan_udp_port); + } + + if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2gre = 1; + + if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgre = 1; + + if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_vxlan = 1; + + if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { + if (p_src->update_geneve_udp_port) + DP_NOTICE(p_hwfn, true, "Geneve not supported\n"); + p_src->update_geneve_udp_port = 0; + return; + } + + if (p_src->update_geneve_udp_port) { + p_tunn_cfg->set_geneve_udp_port_flg = 1; + p_tunn_cfg->geneve_udp_port = + OSAL_CPU_TO_LE16(p_src->geneve_udp_port); + } + + if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2geneve = 1; + + if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgeneve = 1; + + type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve); + p_tunn_cfg->tunnel_clss_l2geneve = type; + type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); + p_tunn_cfg->tunnel_clss_ipgeneve = type; +} + +static void ecore_set_hw_tunn_mode(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + unsigned long tunn_mode) +{ + u8 l2gre_enable = 0, ipgre_enable = 0, vxlan_enable = 0; + u8 l2geneve_enable = 0, ipgeneve_enable = 0; + + if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) + l2gre_enable = 1; + + if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) + ipgre_enable = 1; + + if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) + vxlan_enable = 1; + + ecore_set_gre_enable(p_hwfn, p_ptt, l2gre_enable, ipgre_enable); + ecore_set_vxlan_enable(p_hwfn, p_ptt, vxlan_enable); + + if (ECORE_IS_BB_A0(p_hwfn->p_dev)) + return; + + if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) + l2geneve_enable = 1; + + if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) + ipgeneve_enable = 1; + + ecore_set_geneve_enable(p_hwfn, p_ptt, l2geneve_enable, + ipgeneve_enable); +} + +static void +ecore_tunn_set_pf_start_params(struct ecore_hwfn *p_hwfn, + struct ecore_tunn_start_params *p_src, + struct pf_start_tunnel_config *p_tunn_cfg) +{ + unsigned long tunn_mode; + enum tunnel_clss type; + + if (!p_src) + return; + + tunn_mode = p_src->tunn_mode; + type = ecore_tunn_get_clss_type(p_src->tunn_clss_vxlan); + p_tunn_cfg->tunnel_clss_vxlan = type; + type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2gre); + p_tunn_cfg->tunnel_clss_l2gre = type; + type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgre); + p_tunn_cfg->tunnel_clss_ipgre = type; + + if (p_src->update_vxlan_udp_port) { + p_tunn_cfg->set_vxlan_udp_port_flg = 1; + p_tunn_cfg->vxlan_udp_port = + OSAL_CPU_TO_LE16(p_src->vxlan_udp_port); + } + + if (OSAL_TEST_BIT(ECORE_MODE_L2GRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2gre = 1; + + if (OSAL_TEST_BIT(ECORE_MODE_IPGRE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgre = 1; + + if (OSAL_TEST_BIT(ECORE_MODE_VXLAN_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_vxlan = 1; + + if (ECORE_IS_BB_A0(p_hwfn->p_dev)) { + if (p_src->update_geneve_udp_port) + DP_NOTICE(p_hwfn, true, "Geneve not supported\n"); + p_src->update_geneve_udp_port = 0; + return; + } + + if (p_src->update_geneve_udp_port) { + p_tunn_cfg->set_geneve_udp_port_flg = 1; + p_tunn_cfg->geneve_udp_port = + OSAL_CPU_TO_LE16(p_src->geneve_udp_port); + } + + if (OSAL_TEST_BIT(ECORE_MODE_L2GENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_l2geneve = 1; + + if (OSAL_TEST_BIT(ECORE_MODE_IPGENEVE_TUNN, &tunn_mode)) + p_tunn_cfg->tx_enable_ipgeneve = 1; + + type = ecore_tunn_get_clss_type(p_src->tunn_clss_l2geneve); + p_tunn_cfg->tunnel_clss_l2geneve = type; + type = ecore_tunn_get_clss_type(p_src->tunn_clss_ipgeneve); + p_tunn_cfg->tunnel_clss_ipgeneve = type; +} + +enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, + struct ecore_tunn_start_params *p_tunn, + enum ecore_mf_mode mode, + bool allow_npar_tx_switch) +{ + struct pf_start_ramrod_data *p_ramrod = OSAL_NULL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + u16 sb = ecore_int_get_sp_sb_id(p_hwfn); + u8 sb_index = p_hwfn->p_eq->eq_sb_index; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + u8 page_cnt; + + /* update initial eq producer */ + ecore_eq_prod_update(p_hwfn, + ecore_chain_get_prod_idx(&p_hwfn->p_eq->chain)); + + /* Initialize the SPQ entry for the ramrod */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_START, + PROTOCOLID_COMMON, &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + /* Fill the ramrod data */ + p_ramrod = &p_ent->ramrod.pf_start; + p_ramrod->event_ring_sb_id = OSAL_CPU_TO_LE16(sb); + p_ramrod->event_ring_sb_index = sb_index; + p_ramrod->path_id = ECORE_PATH_ID(p_hwfn); + p_ramrod->outer_tag = p_hwfn->hw_info.ovlan; + + /* For easier debugging */ + p_ramrod->dont_log_ramrods = 0; + p_ramrod->log_type_mask = OSAL_CPU_TO_LE16(0xf); + + switch (mode) { + case ECORE_MF_DEFAULT: + case ECORE_MF_NPAR: + p_ramrod->mf_mode = MF_NPAR; + break; + case ECORE_MF_OVLAN: + p_ramrod->mf_mode = MF_OVLAN; + break; + default: + DP_NOTICE(p_hwfn, true, + "Unsupported MF mode, init as DEFAULT\n"); + p_ramrod->mf_mode = MF_NPAR; + } + + /* Place EQ address in RAMROD */ + DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, + p_hwfn->p_eq->chain.pbl.p_phys_table); + page_cnt = (u8)ecore_chain_get_page_cnt(&p_hwfn->p_eq->chain); + p_ramrod->event_ring_num_pages = page_cnt; + DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, + p_hwfn->p_consq->chain.pbl.p_phys_table); + + ecore_tunn_set_pf_start_params(p_hwfn, p_tunn, + &p_ramrod->tunnel_config); + + if (IS_MF_SI(p_hwfn)) + p_ramrod->allow_npar_tx_switching = allow_npar_tx_switch; + + switch (p_hwfn->hw_info.personality) { + case ECORE_PCI_ETH: + p_ramrod->personality = PERSONALITY_ETH; + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown personality %d\n", + p_hwfn->hw_info.personality); + p_ramrod->personality = PERSONALITY_ETH; + } + + p_ramrod->base_vf_id = (u8)p_hwfn->hw_info.first_vf_in_pf; + p_ramrod->num_vfs = (u8)p_hwfn->p_dev->sriov_info.total_vfs; + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Setting event_ring_sb [id %04x index %02x], outer_tag [%d]\n", + sb, sb_index, p_ramrod->outer_tag); + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + + if (p_tunn) { + ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->tunn_mode); + p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode; + } + + return rc; +} + +enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_CB; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + ecore_dcbx_set_pf_update_params(&p_hwfn->p_dcbx_info->results, + &p_ent->ramrod.pf_update); + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +/* Set pf update ramrod command params */ +enum _ecore_status_t +ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn, + struct ecore_tunn_update_params *p_tunn, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = comp_mode; + init_data.p_comp_data = p_comp_data; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_UPDATE, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + ecore_tunn_set_pf_update_params(p_hwfn, p_tunn, + &p_ent->ramrod.pf_update.tunnel_config); + + rc = ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); + + if ((rc == ECORE_SUCCESS) && p_tunn) { + if (p_tunn->update_vxlan_udp_port) + ecore_set_vxlan_dest_port(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->vxlan_udp_port); + if (p_tunn->update_geneve_udp_port) + ecore_set_geneve_dest_port(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->geneve_udp_port); + + ecore_set_hw_tunn_mode(p_hwfn, p_hwfn->p_main_ptt, + p_tunn->tunn_mode); + p_hwfn->p_dev->tunn_mode = p_tunn->tunn_mode; + } + + return rc; +} + +enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn) +{ + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_sp_init_data init_data; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_PF_STOP, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} + +enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq_entry *p_ent = OSAL_NULL; + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_sp_init_data init_data; + + /* Get SPQ entry */ + OSAL_MEMSET(&init_data, 0, sizeof(init_data)); + init_data.cid = ecore_spq_get_cid(p_hwfn); + init_data.opaque_fid = p_hwfn->hw_info.opaque_fid; + init_data.comp_mode = ECORE_SPQ_MODE_EBLOCK; + + rc = ecore_sp_init_request(p_hwfn, &p_ent, + COMMON_RAMROD_EMPTY, PROTOCOLID_COMMON, + &init_data); + if (rc != ECORE_SUCCESS) + return rc; + + return ecore_spq_post(p_hwfn, p_ent, OSAL_NULL); +} diff --git a/drivers/net/qede/base/ecore_sp_commands.h b/drivers/net/qede/base/ecore_sp_commands.h new file mode 100644 index 00000000..e281ab0e --- /dev/null +++ b/drivers/net/qede/base/ecore_sp_commands.h @@ -0,0 +1,137 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_SP_COMMANDS_H__ +#define __ECORE_SP_COMMANDS_H__ + +#include "ecore.h" +#include "ecore_spq.h" +#include "ecore_sp_api.h" + +#define ECORE_SP_EQ_COMPLETION 0x01 +#define ECORE_SP_CQE_COMPLETION 0x02 + +struct ecore_sp_init_data { + /* The CID and FID aren't necessarily derived from hwfn, + * e.g., in IOV scenarios. CID might defer between SPQ and + * other elements. + */ + u32 cid; + u16 opaque_fid; + + /* Information regarding operation upon sending & completion */ + enum spq_mode comp_mode; + struct ecore_spq_comp_cb *p_comp_data; + +}; + +/** + * @brief Acquire and initialize and SPQ entry for a given ramrod. + * + * @param p_hwfn + * @param pp_ent - will be filled with a pointer to an entry upon success + * @param cmd - dependent upon protocol + * @param protocol + * @param p_data - various configuration required for ramrod + * + * @return ECORE_SUCCESS upon success, otherwise failure. + */ +enum _ecore_status_t ecore_sp_init_request(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry **pp_ent, + u8 cmd, + u8 protocol, + struct ecore_sp_init_data *p_data); + +/** + * @brief ecore_sp_pf_start - PF Function Start Ramrod + * + * This ramrod is sent to initialize a physical function (PF). It will + * configure the function related parameters and write its completion to the + * event ring specified in the parameters. + * + * Ramrods complete on the common event ring for the PF. This ring is + * allocated by the driver on host memory and its parameters are written + * to the internal RAM of the UStorm by the Function Start Ramrod. + * + * @param p_hwfn + * @param p_tunn - pf start tunneling configuration + * @param mode + * @param allow_npar_tx_switch - npar tx switching to be used + * for vports configured for tx-switching. + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_pf_start(struct ecore_hwfn *p_hwfn, + struct ecore_tunn_start_params *p_tunn, + enum ecore_mf_mode mode, + bool allow_npar_tx_switch); + +/** + * @brief ecore_sp_pf_update_tunn_cfg - PF Function Tunnel configuration + * update Ramrod + * + * This ramrod is sent to update a tunneling configuration + * for a physical function (PF). + * + * @param p_hwfn + * @param p_tunn - pf update tunneling parameters + * @param comp_mode - completion mode + * @param p_comp_data - callback function + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t +ecore_sp_pf_update_tunn_cfg(struct ecore_hwfn *p_hwfn, + struct ecore_tunn_update_params *p_tunn, + enum spq_mode comp_mode, + struct ecore_spq_comp_cb *p_comp_data); + +/** + * @brief ecore_sp_pf_update - PF Function Update Ramrod + * + * This ramrod updates function-related parameters. Every parameter can be + * updated independently, according to configuration flags. + * + * @note Final phase API. + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_pf_update(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_sp_pf_stop - PF Function Stop Ramrod + * + * This ramrod is sent to close a Physical Function (PF). It is the last ramrod + * sent and the last completion written to the PFs Event Ring. This ramrod also + * deletes the context for the Slowhwfn connection on this PF. + * + * @note Not required for first packet. + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_pf_stop(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_sp_heartbeat_ramrod - Send empty Ramrod + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ + +enum _ecore_status_t ecore_sp_heartbeat_ramrod(struct ecore_hwfn *p_hwfn); + +#endif /*__ECORE_SP_COMMANDS_H__*/ diff --git a/drivers/net/qede/base/ecore_spq.c b/drivers/net/qede/base/ecore_spq.c new file mode 100644 index 00000000..b263693f --- /dev/null +++ b/drivers/net/qede/base/ecore_spq.c @@ -0,0 +1,943 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" +#include "reg_addr.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_hsi_common.h" +#include "ecore.h" +#include "ecore_sp_api.h" +#include "ecore_spq.h" +#include "ecore_iro.h" +#include "ecore_init_fw_funcs.h" +#include "ecore_cxt.h" +#include "ecore_int.h" +#include "ecore_dev_api.h" +#include "ecore_mcp.h" +#include "ecore_hw.h" +#include "ecore_sriov.h" + +/*************************************************************************** + * Structures & Definitions + ***************************************************************************/ + +#define SPQ_HIGH_PRI_RESERVE_DEFAULT (1) +#define SPQ_BLOCK_SLEEP_LENGTH (1000) + +/*************************************************************************** + * Blocking Imp. (BLOCK/EBLOCK mode) + ***************************************************************************/ +static void ecore_spq_blocking_cb(struct ecore_hwfn *p_hwfn, + void *cookie, + union event_ring_data *data, + u8 fw_return_code) +{ + struct ecore_spq_comp_done *comp_done; + + comp_done = (struct ecore_spq_comp_done *)cookie; + + comp_done->done = 0x1; + comp_done->fw_return_code = fw_return_code; + + /* make update visible to waiting thread */ + OSAL_SMP_WMB(p_hwfn->p_dev); +} + +static enum _ecore_status_t ecore_spq_block(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, + u8 *p_fw_ret) +{ + int sleep_count = SPQ_BLOCK_SLEEP_LENGTH; + struct ecore_spq_comp_done *comp_done; + enum _ecore_status_t rc; + + comp_done = (struct ecore_spq_comp_done *)p_ent->comp_cb.cookie; + while (sleep_count) { + OSAL_POLL_MODE_DPC(p_hwfn); + /* validate we receive completion update */ + OSAL_SMP_RMB(p_hwfn->p_dev); + if (comp_done->done == 1) { + if (p_fw_ret) + *p_fw_ret = comp_done->fw_return_code; + return ECORE_SUCCESS; + } + OSAL_MSLEEP(5); + sleep_count--; + } + + DP_INFO(p_hwfn, "Ramrod is stuck, requesting MCP drain\n"); + rc = ecore_mcp_drain(p_hwfn, p_hwfn->p_main_ptt); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, true, "MCP drain failed\n"); + + /* Retry after drain */ + sleep_count = SPQ_BLOCK_SLEEP_LENGTH; + while (sleep_count) { + /* validate we receive completion update */ + OSAL_SMP_RMB(p_hwfn->p_dev); + if (comp_done->done == 1) { + if (p_fw_ret) + *p_fw_ret = comp_done->fw_return_code; + return ECORE_SUCCESS; + } + OSAL_MSLEEP(5); + sleep_count--; + } + + if (comp_done->done == 1) { + if (p_fw_ret) + *p_fw_ret = comp_done->fw_return_code; + return ECORE_SUCCESS; + } + + DP_NOTICE(p_hwfn, true, + "Ramrod is stuck [CID %08x cmd %02x proto %02x echo %04x]\n", + OSAL_LE32_TO_CPU(p_ent->elem.hdr.cid), + p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id, + OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo)); + + ecore_hw_err_notify(p_hwfn, ECORE_HW_ERR_RAMROD_FAIL); + + return ECORE_BUSY; +} + +/*************************************************************************** + * SPQ entries inner API + ***************************************************************************/ +static enum _ecore_status_t +ecore_spq_fill_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry *p_ent) +{ + p_ent->flags = 0; + + switch (p_ent->comp_mode) { + case ECORE_SPQ_MODE_EBLOCK: + case ECORE_SPQ_MODE_BLOCK: + p_ent->comp_cb.function = ecore_spq_blocking_cb; + break; + case ECORE_SPQ_MODE_CB: + break; + default: + DP_NOTICE(p_hwfn, true, "Unknown SPQE completion mode %d\n", + p_ent->comp_mode); + return ECORE_INVAL; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Ramrod header: [CID 0x%08x CMD 0x%02x protocol 0x%02x]" + " Data pointer: [%08x:%08x] Completion Mode: %s\n", + p_ent->elem.hdr.cid, p_ent->elem.hdr.cmd_id, + p_ent->elem.hdr.protocol_id, + p_ent->elem.data_ptr.hi, p_ent->elem.data_ptr.lo, + D_TRINE(p_ent->comp_mode, ECORE_SPQ_MODE_EBLOCK, + ECORE_SPQ_MODE_BLOCK, "MODE_EBLOCK", "MODE_BLOCK", + "MODE_CB")); + + return ECORE_SUCCESS; +} + +/*************************************************************************** + * HSI access + ***************************************************************************/ +static void ecore_spq_hw_initialize(struct ecore_hwfn *p_hwfn, + struct ecore_spq *p_spq) +{ + u16 pq; + struct ecore_cxt_info cxt_info; + struct core_conn_context *p_cxt; + union ecore_qm_pq_params pq_params; + enum _ecore_status_t rc; + + cxt_info.iid = p_spq->cid; + + rc = ecore_cxt_get_cid_info(p_hwfn, &cxt_info); + + if (rc < 0) { + DP_NOTICE(p_hwfn, true, "Cannot find context info for cid=%d", + p_spq->cid); + return; + } + + p_cxt = cxt_info.p_cxt; + + SET_FIELD(p_cxt->xstorm_ag_context.flags10, + XSTORM_CORE_CONN_AG_CTX_DQ_CF_EN, 1); + SET_FIELD(p_cxt->xstorm_ag_context.flags1, + XSTORM_CORE_CONN_AG_CTX_DQ_CF_ACTIVE, 1); + /* SET_FIELD(p_cxt->xstorm_ag_context.flags10, + * XSTORM_CORE_CONN_AG_CTX_SLOW_PATH_EN, 1); + */ + SET_FIELD(p_cxt->xstorm_ag_context.flags9, + XSTORM_CORE_CONN_AG_CTX_CONSOLID_PROD_CF_EN, 1); + + /* CDU validation - FIXME currently disabled */ + + /* QM physical queue */ + OSAL_MEMSET(&pq_params, 0, sizeof(pq_params)); + pq_params.core.tc = LB_TC; + pq = ecore_get_qm_pq(p_hwfn, PROTOCOLID_CORE, &pq_params); + p_cxt->xstorm_ag_context.physical_q0 = OSAL_CPU_TO_LE16(pq); + + p_cxt->xstorm_st_context.spq_base_lo = + DMA_LO_LE(p_spq->chain.p_phys_addr); + p_cxt->xstorm_st_context.spq_base_hi = + DMA_HI_LE(p_spq->chain.p_phys_addr); + + p_cxt->xstorm_st_context.consolid_base_addr.lo = + DMA_LO_LE(p_hwfn->p_consq->chain.p_phys_addr); + p_cxt->xstorm_st_context.consolid_base_addr.hi = + DMA_HI_LE(p_hwfn->p_consq->chain.p_phys_addr); +} + +static enum _ecore_status_t ecore_spq_hw_post(struct ecore_hwfn *p_hwfn, + struct ecore_spq *p_spq, + struct ecore_spq_entry *p_ent) +{ + struct ecore_chain *p_chain = &p_hwfn->p_spq->chain; + u16 echo = ecore_chain_get_prod_idx(p_chain); + struct slow_path_element *elem; + struct core_db_data db; + + p_ent->elem.hdr.echo = OSAL_CPU_TO_LE16(echo); + elem = ecore_chain_produce(p_chain); + if (!elem) { + DP_NOTICE(p_hwfn, true, "Failed to produce from SPQ chain\n"); + return ECORE_INVAL; + } + + *elem = p_ent->elem; /* struct assignment */ + + /* send a doorbell on the slow hwfn session */ + OSAL_MEMSET(&db, 0, sizeof(db)); + SET_FIELD(db.params, CORE_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(db.params, CORE_DB_DATA_AGG_CMD, DB_AGG_CMD_SET); + SET_FIELD(db.params, CORE_DB_DATA_AGG_VAL_SEL, + DQ_XCM_CORE_SPQ_PROD_CMD); + db.agg_flags = DQ_XCM_CORE_DQ_CF_CMD; + + /* validate producer is up to-date */ + OSAL_RMB(p_hwfn->p_dev); + + db.spq_prod = OSAL_CPU_TO_LE16(ecore_chain_get_prod_idx(p_chain)); + + /* do not reorder */ + OSAL_BARRIER(p_hwfn->p_dev); + + DOORBELL(p_hwfn, DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), *(u32 *)&db); + + /* make sure doorbell is rang */ + OSAL_MMIOWB(p_hwfn->p_dev); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Doorbelled [0x%08x, CID 0x%08x] with Flags: %02x" + " agg_params: %02x, prod: %04x\n", + DB_ADDR(p_spq->cid, DQ_DEMS_LEGACY), p_spq->cid, db.params, + db.agg_flags, ecore_chain_get_prod_idx(p_chain)); + + return ECORE_SUCCESS; +} + +/*************************************************************************** + * Asynchronous events + ***************************************************************************/ + +static enum _ecore_status_t +ecore_async_event_completion(struct ecore_hwfn *p_hwfn, + struct event_ring_entry *p_eqe) +{ + switch (p_eqe->protocol_id) { + case PROTOCOLID_COMMON: + return ecore_sriov_eqe_event(p_hwfn, + p_eqe->opcode, + p_eqe->echo, &p_eqe->data); + default: + DP_NOTICE(p_hwfn, + true, "Unknown Async completion for protocol: %d\n", + p_eqe->protocol_id); + return ECORE_INVAL; + } +} + +/*************************************************************************** + * EQ API + ***************************************************************************/ +void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod) +{ + u32 addr = GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_EQE_CONS_OFFSET(p_hwfn->rel_pf_id); + + REG_WR16(p_hwfn, addr, prod); + + /* keep prod updates ordered */ + OSAL_MMIOWB(p_hwfn->p_dev); +} + +enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn, + void *cookie) +{ + struct ecore_eq *p_eq = cookie; + struct ecore_chain *p_chain = &p_eq->chain; + enum _ecore_status_t rc = 0; + + /* take a snapshot of the FW consumer */ + u16 fw_cons_idx = OSAL_LE16_TO_CPU(*p_eq->p_fw_cons); + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, "fw_cons_idx %x\n", fw_cons_idx); + + /* Need to guarantee the fw_cons index we use points to a usuable + * element (to comply with our chain), so our macros would comply + */ + if ((fw_cons_idx & ecore_chain_get_usable_per_page(p_chain)) == + ecore_chain_get_usable_per_page(p_chain)) { + fw_cons_idx += ecore_chain_get_unusable_per_page(p_chain); + } + + /* Complete current segment of eq entries */ + while (fw_cons_idx != ecore_chain_get_cons_idx(p_chain)) { + struct event_ring_entry *p_eqe = ecore_chain_consume(p_chain); + if (!p_eqe) { + rc = ECORE_INVAL; + break; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "op %x prot %x res0 %x echo %x " + "fwret %x flags %x\n", p_eqe->opcode, + p_eqe->protocol_id, /* Event Protocol ID */ + p_eqe->reserved0, /* Reserved */ + OSAL_LE16_TO_CPU(p_eqe->echo), + p_eqe->fw_return_code, /* FW return code for SP + * ramrods + */ + p_eqe->flags); + + if (GET_FIELD(p_eqe->flags, EVENT_RING_ENTRY_ASYNC)) { + if (ecore_async_event_completion(p_hwfn, p_eqe)) + rc = ECORE_INVAL; + } else if (ecore_spq_completion(p_hwfn, + p_eqe->echo, + p_eqe->fw_return_code, + &p_eqe->data)) { + rc = ECORE_INVAL; + } + + ecore_chain_recycle_consumed(p_chain); + } + + ecore_eq_prod_update(p_hwfn, ecore_chain_get_prod_idx(p_chain)); + + return rc; +} + +struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem) +{ + struct ecore_eq *p_eq; + + /* Allocate EQ struct */ + p_eq = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_eq)); + if (!p_eq) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `struct ecore_eq'\n"); + return OSAL_NULL; + } + + /* Allocate and initialize EQ chain */ + if (ecore_chain_alloc(p_hwfn->p_dev, + ECORE_CHAIN_USE_TO_PRODUCE, + ECORE_CHAIN_MODE_PBL, + ECORE_CHAIN_CNT_TYPE_U16, + num_elem, + sizeof(union event_ring_element), &p_eq->chain)) { + DP_NOTICE(p_hwfn, true, "Failed to allocate eq chain"); + goto eq_allocate_fail; + } + + /* register EQ completion on the SP SB */ + ecore_int_register_cb(p_hwfn, + ecore_eq_completion, + p_eq, &p_eq->eq_sb_index, &p_eq->p_fw_cons); + + return p_eq; + +eq_allocate_fail: + ecore_eq_free(p_hwfn, p_eq); + return OSAL_NULL; +} + +void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq) +{ + ecore_chain_reset(&p_eq->chain); +} + +void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq) +{ + if (!p_eq) + return; + ecore_chain_free(p_hwfn->p_dev, &p_eq->chain); + OSAL_FREE(p_hwfn->p_dev, p_eq); +} + +/*************************************************************************** +* CQE API - manipulate EQ functionality +***************************************************************************/ +static enum _ecore_status_t ecore_cqe_completion(struct ecore_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe + *cqe, + enum protocol_type protocol) +{ + if (IS_VF(p_hwfn->p_dev)) + return OSAL_VF_CQE_COMPLETION(p_hwfn, cqe, protocol); + + /* @@@tmp - it's possible we'll eventually want to handle some + * actual commands that can arrive here, but for now this is only + * used to complete the ramrod using the echo value on the cqe + */ + return ecore_spq_completion(p_hwfn, cqe->echo, 0, OSAL_NULL); +} + +enum _ecore_status_t ecore_eth_cqe_completion(struct ecore_hwfn *p_hwfn, + struct eth_slow_path_rx_cqe *cqe) +{ + enum _ecore_status_t rc; + + rc = ecore_cqe_completion(p_hwfn, cqe, PROTOCOLID_ETH); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Failed to handle RXQ CQE [cmd 0x%02x]\n", + cqe->ramrod_cmd_id); + } + + return rc; +} + +/*************************************************************************** + * Slow hwfn Queue (spq) + ***************************************************************************/ +void ecore_spq_setup(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq_entry *p_virt = OSAL_NULL; + struct ecore_spq *p_spq = p_hwfn->p_spq; + dma_addr_t p_phys = 0; + u32 i, capacity; + + OSAL_LIST_INIT(&p_spq->pending); + OSAL_LIST_INIT(&p_spq->completion_pending); + OSAL_LIST_INIT(&p_spq->free_pool); + OSAL_LIST_INIT(&p_spq->unlimited_pending); + OSAL_SPIN_LOCK_INIT(&p_spq->lock); + + /* SPQ empty pool */ + p_phys = p_spq->p_phys + OFFSETOF(struct ecore_spq_entry, ramrod); + p_virt = p_spq->p_virt; + + capacity = ecore_chain_get_capacity(&p_spq->chain); + for (i = 0; i < capacity; i++) { + p_virt->elem.data_ptr.hi = DMA_HI_LE(p_phys); + p_virt->elem.data_ptr.lo = DMA_LO_LE(p_phys); + + OSAL_LIST_PUSH_TAIL(&p_virt->list, &p_spq->free_pool); + + p_virt++; + p_phys += sizeof(struct ecore_spq_entry); + } + + /* Statistics */ + p_spq->normal_count = 0; + p_spq->comp_count = 0; + p_spq->comp_sent_count = 0; + p_spq->unlimited_pending_count = 0; + + OSAL_MEM_ZERO(p_spq->p_comp_bitmap, + SPQ_COMP_BMAP_SIZE * sizeof(unsigned long)); + p_spq->comp_bitmap_idx = 0; + + /* SPQ cid, cannot fail */ + ecore_cxt_acquire_cid(p_hwfn, PROTOCOLID_CORE, &p_spq->cid); + ecore_spq_hw_initialize(p_hwfn, p_spq); + + /* reset the chain itself */ + ecore_chain_reset(&p_spq->chain); +} + +enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq_entry *p_virt = OSAL_NULL; + struct ecore_spq *p_spq = OSAL_NULL; + dma_addr_t p_phys = 0; + u32 capacity; + + /* SPQ struct */ + p_spq = + OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_spq)); + if (!p_spq) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `struct ecore_spq'"); + return ECORE_NOMEM; + } + + /* SPQ ring */ + if (ecore_chain_alloc(p_hwfn->p_dev, ECORE_CHAIN_USE_TO_PRODUCE, + ECORE_CHAIN_MODE_SINGLE, ECORE_CHAIN_CNT_TYPE_U16, 0, + /* N/A when the mode is SINGLE */ + sizeof(struct slow_path_element), &p_spq->chain)) { + DP_NOTICE(p_hwfn, true, "Failed to allocate spq chain"); + goto spq_allocate_fail; + } + + /* allocate and fill the SPQ elements (incl. ramrod data list) */ + capacity = ecore_chain_get_capacity(&p_spq->chain); + p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, &p_phys, + capacity * + sizeof(struct ecore_spq_entry)); + if (!p_virt) + goto spq_allocate_fail; + + p_spq->p_virt = p_virt; + p_spq->p_phys = p_phys; + + OSAL_SPIN_LOCK_ALLOC(p_hwfn, &p_spq->lock); + + p_hwfn->p_spq = p_spq; + return ECORE_SUCCESS; + +spq_allocate_fail: + ecore_chain_free(p_hwfn->p_dev, &p_spq->chain); + OSAL_FREE(p_hwfn->p_dev, p_spq); + return ECORE_NOMEM; +} + +void ecore_spq_free(struct ecore_hwfn *p_hwfn) +{ + struct ecore_spq *p_spq = p_hwfn->p_spq; + u32 capacity; + + if (!p_spq) + return; + + if (p_spq->p_virt) { + capacity = ecore_chain_get_capacity(&p_spq->chain); + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_spq->p_virt, + p_spq->p_phys, + capacity * + sizeof(struct ecore_spq_entry)); + } + + ecore_chain_free(p_hwfn->p_dev, &p_spq->chain); + OSAL_SPIN_LOCK_DEALLOC(&p_spq->lock); + OSAL_FREE(p_hwfn->p_dev, p_spq); +} + +enum _ecore_status_t +ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent) +{ + struct ecore_spq *p_spq = p_hwfn->p_spq; + struct ecore_spq_entry *p_ent = OSAL_NULL; + + OSAL_SPIN_LOCK(&p_spq->lock); + + if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) { + p_ent = OSAL_ZALLOC(p_hwfn->p_dev, GFP_ATOMIC, + sizeof(struct ecore_spq_entry)); + if (!p_ent) { + OSAL_SPIN_UNLOCK(&p_spq->lock); + DP_NOTICE(p_hwfn, true, + "Failed to allocate an SPQ entry" + " for a pending ramrod\n"); + return ECORE_NOMEM; + } + p_ent->queue = &p_spq->unlimited_pending; + } else { + p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool, + struct ecore_spq_entry, list); + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->free_pool); + p_ent->queue = &p_spq->pending; + } + + *pp_ent = p_ent; + + OSAL_SPIN_UNLOCK(&p_spq->lock); + + return ECORE_SUCCESS; +} + +/* Locked variant; Should be called while the SPQ lock is taken */ +static void __ecore_spq_return_entry(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent) +{ + OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_hwfn->p_spq->free_pool); +} + +void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent) +{ + OSAL_SPIN_LOCK(&p_hwfn->p_spq->lock); + __ecore_spq_return_entry(p_hwfn, p_ent); + OSAL_SPIN_UNLOCK(&p_hwfn->p_spq->lock); +} + +/** + * @brief ecore_spq_add_entry - adds a new entry to the pending + * list. Should be used while lock is being held. + * + * Addes an entry to the pending list is there is room (en empty + * element is available in the free_pool), or else places the + * entry in the unlimited_pending pool. + * + * @param p_hwfn + * @param p_ent + * @param priority + * + * @return enum _ecore_status_t + */ +static enum _ecore_status_t +ecore_spq_add_entry(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, enum spq_priority priority) +{ + struct ecore_spq *p_spq = p_hwfn->p_spq; + + if (p_ent->queue == &p_spq->unlimited_pending) { + if (OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) { + OSAL_LIST_PUSH_TAIL(&p_ent->list, + &p_spq->unlimited_pending); + p_spq->unlimited_pending_count++; + + return ECORE_SUCCESS; + } + + struct ecore_spq_entry *p_en2; + + p_en2 = OSAL_LIST_FIRST_ENTRY(&p_spq->free_pool, + struct ecore_spq_entry, + list); + OSAL_LIST_REMOVE_ENTRY(&p_en2->list, &p_spq->free_pool); + + /* Copy the ring element physical pointer to the new + * entry, since we are about to override the entire ring + * entry and don't want to lose the pointer. + */ + p_ent->elem.data_ptr = p_en2->elem.data_ptr; + + /* Setting the cookie to the comp_done of the + * new element. + */ + if (p_ent->comp_cb.cookie == &p_ent->comp_done) + p_ent->comp_cb.cookie = &p_en2->comp_done; + + *p_en2 = *p_ent; + + OSAL_FREE(p_hwfn->p_dev, p_ent); + + p_ent = p_en2; + } + + /* entry is to be placed in 'pending' queue */ + switch (priority) { + case ECORE_SPQ_PRIORITY_NORMAL: + OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->pending); + p_spq->normal_count++; + break; + case ECORE_SPQ_PRIORITY_HIGH: + OSAL_LIST_PUSH_HEAD(&p_ent->list, &p_spq->pending); + p_spq->high_count++; + break; + default: + return ECORE_INVAL; + } + + return ECORE_SUCCESS; +} + +/*************************************************************************** + * Accessor + ***************************************************************************/ + +u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn) +{ + if (!p_hwfn->p_spq) + return 0xffffffff; /* illegal */ + return p_hwfn->p_spq->cid; +} + +/*************************************************************************** + * Posting new Ramrods + ***************************************************************************/ + +static enum _ecore_status_t ecore_spq_post_list(struct ecore_hwfn *p_hwfn, + osal_list_t *head, + u32 keep_reserve) +{ + struct ecore_spq *p_spq = p_hwfn->p_spq; + enum _ecore_status_t rc; + + /* TODO - implementation might be wasteful; will always keep room + * for an additional high priority ramrod (even if one is already + * pending FW) + */ + while (ecore_chain_get_elem_left(&p_spq->chain) > keep_reserve && + !OSAL_LIST_IS_EMPTY(head)) { + struct ecore_spq_entry *p_ent = + OSAL_LIST_FIRST_ENTRY(head, struct ecore_spq_entry, list); + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, head); + OSAL_LIST_PUSH_TAIL(&p_ent->list, &p_spq->completion_pending); + p_spq->comp_sent_count++; + + rc = ecore_spq_hw_post(p_hwfn, p_spq, p_ent); + if (rc) { + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, + &p_spq->completion_pending); + __ecore_spq_return_entry(p_hwfn, p_ent); + return rc; + } + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_spq_pend_post(struct ecore_hwfn *p_hwfn) +{ + enum _ecore_status_t rc = ECORE_NOTIMPL; + struct ecore_spq *p_spq = p_hwfn->p_spq; + struct ecore_spq_entry *p_ent = OSAL_NULL; + + while (!OSAL_LIST_IS_EMPTY(&p_spq->free_pool)) { + if (OSAL_LIST_IS_EMPTY(&p_spq->unlimited_pending)) + break; + + p_ent = OSAL_LIST_FIRST_ENTRY(&p_spq->unlimited_pending, + struct ecore_spq_entry, list); + if (!p_ent) + return ECORE_INVAL; + + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->unlimited_pending); + + ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority); + } + + rc = ecore_spq_post_list(p_hwfn, + &p_spq->pending, SPQ_HIGH_PRI_RESERVE_DEFAULT); + if (rc) + return rc; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, + u8 *fw_return_code) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_spq *p_spq = p_hwfn ? p_hwfn->p_spq : OSAL_NULL; + bool b_ret_ent = true; + + if (!p_hwfn) + return ECORE_INVAL; + + if (!p_ent) { + DP_NOTICE(p_hwfn, true, "Got a NULL pointer\n"); + return ECORE_INVAL; + } + + if (p_hwfn->p_dev->recov_in_prog) { + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Recovery is in progress -> skip spq post" + " [cmd %02x protocol %02x]", + p_ent->elem.hdr.cmd_id, p_ent->elem.hdr.protocol_id); + /* Return success to let the flows to be completed successfully + * w/o any error handling. + */ + return ECORE_SUCCESS; + } + + OSAL_SPIN_LOCK(&p_spq->lock); + + /* Complete the entry */ + rc = ecore_spq_fill_entry(p_hwfn, p_ent); + + /* Check return value after LOCK is taken for cleaner error flow */ + if (rc) + goto spq_post_fail; + + /* Add the request to the pending queue */ + rc = ecore_spq_add_entry(p_hwfn, p_ent, p_ent->priority); + if (rc) + goto spq_post_fail; + + rc = ecore_spq_pend_post(p_hwfn); + if (rc) { + /* Since it's possible that pending failed for a different + * entry [although unlikely], the failed entry was already + * dealt with; No need to return it here. + */ + b_ret_ent = false; + goto spq_post_fail; + } + + OSAL_SPIN_UNLOCK(&p_spq->lock); + + if (p_ent->comp_mode == ECORE_SPQ_MODE_EBLOCK) { + /* For entries in ECORE BLOCK mode, the completion code cannot + * perform the necessary cleanup - if it did, we couldn't + * access p_ent here to see whether it's successful or not. + * Thus, after gaining the answer perform the cleanup here. + */ + rc = ecore_spq_block(p_hwfn, p_ent, fw_return_code); + if (rc) + goto spq_post_fail2; + + /* return to pool */ + ecore_spq_return_entry(p_hwfn, p_ent); + } + return rc; + +spq_post_fail2: + OSAL_SPIN_LOCK(&p_spq->lock); + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, &p_spq->completion_pending); + ecore_chain_return_produced(&p_spq->chain); + +spq_post_fail: + /* return to the free pool */ + if (b_ret_ent) + __ecore_spq_return_entry(p_hwfn, p_ent); + OSAL_SPIN_UNLOCK(&p_spq->lock); + + return rc; +} + +enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn, + __le16 echo, + u8 fw_return_code, + union event_ring_data *p_data) +{ + struct ecore_spq *p_spq; + struct ecore_spq_entry *p_ent = OSAL_NULL; + struct ecore_spq_entry *tmp; + struct ecore_spq_entry *found = OSAL_NULL; + enum _ecore_status_t rc; + + if (!p_hwfn) + return ECORE_INVAL; + + p_spq = p_hwfn->p_spq; + if (!p_spq) + return ECORE_INVAL; + + OSAL_SPIN_LOCK(&p_spq->lock); + OSAL_LIST_FOR_EACH_ENTRY_SAFE(p_ent, + tmp, + &p_spq->completion_pending, + list, struct ecore_spq_entry) { + if (p_ent->elem.hdr.echo == echo) { + OSAL_LIST_REMOVE_ENTRY(&p_ent->list, + &p_spq->completion_pending); + + /* Avoid overriding of SPQ entries when getting + * out-of-order completions, by marking the completions + * in a bitmap and increasing the chain consumer only + * for the first successive completed entries. + */ + SPQ_COMP_BMAP_SET_BIT(p_spq, echo); + while (SPQ_COMP_BMAP_TEST_BIT(p_spq, + p_spq->comp_bitmap_idx)) { + SPQ_COMP_BMAP_CLEAR_BIT(p_spq, + p_spq->comp_bitmap_idx); + p_spq->comp_bitmap_idx++; + ecore_chain_return_produced(&p_spq->chain); + } + + p_spq->comp_count++; + found = p_ent; + break; + } + + /* This is debug and should be relatively uncommon - depends + * on scenarios which have mutliple per-PF sent ramrods. + */ + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Got completion for echo %04x - doesn't match" + " echo %04x in completion pending list\n", + OSAL_LE16_TO_CPU(echo), + OSAL_LE16_TO_CPU(p_ent->elem.hdr.echo)); + } + + /* Release lock before callback, as callback may post + * an additional ramrod. + */ + OSAL_SPIN_UNLOCK(&p_spq->lock); + + if (!found) { + DP_NOTICE(p_hwfn, true, + "Failed to find an entry this" + " EQE [echo %04x] completes\n", + OSAL_LE16_TO_CPU(echo)); + return ECORE_EXISTS; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_SPQ, + "Complete EQE [echo %04x]: func %p cookie %p)\n", + OSAL_LE16_TO_CPU(echo), + p_ent->comp_cb.function, p_ent->comp_cb.cookie); + if (found->comp_cb.function) + found->comp_cb.function(p_hwfn, found->comp_cb.cookie, p_data, + fw_return_code); + + if (found->comp_mode != ECORE_SPQ_MODE_EBLOCK) { + /* EBLOCK is responsible for freeing its own entry */ + ecore_spq_return_entry(p_hwfn, found); + } + + /* Attempt to post pending requests */ + OSAL_SPIN_LOCK(&p_spq->lock); + rc = ecore_spq_pend_post(p_hwfn); + OSAL_SPIN_UNLOCK(&p_spq->lock); + + return rc; +} + +struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn) +{ + struct ecore_consq *p_consq; + + /* Allocate ConsQ struct */ + p_consq = + OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(struct ecore_consq)); + if (!p_consq) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `struct ecore_consq'\n"); + return OSAL_NULL; + } + + /* Allocate and initialize EQ chain */ + if (ecore_chain_alloc(p_hwfn->p_dev, + ECORE_CHAIN_USE_TO_PRODUCE, + ECORE_CHAIN_MODE_PBL, + ECORE_CHAIN_CNT_TYPE_U16, + ECORE_CHAIN_PAGE_SIZE / 0x80, + 0x80, &p_consq->chain)) { + DP_NOTICE(p_hwfn, true, "Failed to allocate consq chain"); + goto consq_allocate_fail; + } + + return p_consq; + +consq_allocate_fail: + ecore_consq_free(p_hwfn, p_consq); + return OSAL_NULL; +} + +void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq) +{ + ecore_chain_reset(&p_consq->chain); +} + +void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq) +{ + if (!p_consq) + return; + ecore_chain_free(p_hwfn->p_dev, &p_consq->chain); + OSAL_FREE(p_hwfn->p_dev, p_consq); +} diff --git a/drivers/net/qede/base/ecore_spq.h b/drivers/net/qede/base/ecore_spq.h new file mode 100644 index 00000000..5c168654 --- /dev/null +++ b/drivers/net/qede/base/ecore_spq.h @@ -0,0 +1,284 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_SPQ_H__ +#define __ECORE_SPQ_H__ + +#include "ecore_hsi_common.h" +#include "ecore_status.h" +#include "ecore_hsi_eth.h" +#include "ecore_chain.h" +#include "ecore_sp_api.h" + +union ramrod_data { + struct pf_start_ramrod_data pf_start; + struct pf_update_ramrod_data pf_update; + struct rx_queue_start_ramrod_data rx_queue_start; + struct rx_queue_update_ramrod_data rx_queue_update; + struct rx_queue_stop_ramrod_data rx_queue_stop; + struct tx_queue_start_ramrod_data tx_queue_start; + struct tx_queue_stop_ramrod_data tx_queue_stop; + struct vport_start_ramrod_data vport_start; + struct vport_stop_ramrod_data vport_stop; + struct vport_update_ramrod_data vport_update; + struct core_rx_start_ramrod_data core_rx_queue_start; + struct core_rx_stop_ramrod_data core_rx_queue_stop; + struct core_tx_start_ramrod_data core_tx_queue_start; + struct core_tx_stop_ramrod_data core_tx_queue_stop; + struct vport_filter_update_ramrod_data vport_filter_update; + + struct vf_start_ramrod_data vf_start; + struct vf_stop_ramrod_data vf_stop; +}; + +#define EQ_MAX_CREDIT 0xffffffff + +enum spq_priority { + ECORE_SPQ_PRIORITY_NORMAL, + ECORE_SPQ_PRIORITY_HIGH, +}; + +union ecore_spq_req_comp { + struct ecore_spq_comp_cb cb; + u64 *done_addr; +}; + +/* SPQ_MODE_EBLOCK */ +struct ecore_spq_comp_done { + u64 done; + u8 fw_return_code; +}; + +struct ecore_spq_entry { + osal_list_entry_t list; + + u8 flags; + + /* HSI slow path element */ + struct slow_path_element elem; + + union ramrod_data ramrod; + + enum spq_priority priority; + + /* pending queue for this entry */ + osal_list_t *queue; + + enum spq_mode comp_mode; + struct ecore_spq_comp_cb comp_cb; + struct ecore_spq_comp_done comp_done; /* SPQ_MODE_EBLOCK */ +}; + +struct ecore_eq { + struct ecore_chain chain; + u8 eq_sb_index; /* index within the SB */ + __le16 *p_fw_cons; /* ptr to index value */ +}; + +struct ecore_consq { + struct ecore_chain chain; +}; + +struct ecore_spq { + osal_spinlock_t lock; + + osal_list_t unlimited_pending; + osal_list_t pending; + osal_list_t completion_pending; + osal_list_t free_pool; + + struct ecore_chain chain; + + /* allocated dma-able memory for spq entries (+ramrod data) */ + dma_addr_t p_phys; + struct ecore_spq_entry *p_virt; + + /* Bitmap for handling out-of-order completions */ +#define SPQ_RING_SIZE \ + (CORE_SPQE_PAGE_SIZE_BYTES / sizeof(struct slow_path_element)) +#define SPQ_COMP_BMAP_SIZE \ +(SPQ_RING_SIZE / (sizeof(unsigned long) * 8 /* BITS_PER_LONG */)) + unsigned long p_comp_bitmap[SPQ_COMP_BMAP_SIZE]; + u8 comp_bitmap_idx; +#define SPQ_COMP_BMAP_SET_BIT(p_spq, idx) \ +(OSAL_SET_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap)) + +#define SPQ_COMP_BMAP_CLEAR_BIT(p_spq, idx) \ +(OSAL_CLEAR_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap)) + +#define SPQ_COMP_BMAP_TEST_BIT(p_spq, idx) \ +(OSAL_TEST_BIT(((idx) % SPQ_RING_SIZE), (p_spq)->p_comp_bitmap)) + + /* Statistics */ + u32 unlimited_pending_count; + u32 normal_count; + u32 high_count; + u32 comp_sent_count; + u32 comp_count; + + u32 cid; +}; + +struct ecore_port; +struct ecore_hwfn; + +/** + * @brief ecore_spq_post - Posts a Slow hwfn request to FW, or lacking that + * Pends it to the future list. + * + * @param p_hwfn + * @param p_req + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_spq_post(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent, + u8 *fw_return_code); + +/** + * @brief ecore_spq_allocate - Alloocates & initializes the SPQ and EQ. + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_spq_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_spq_setup - Reset the SPQ to its start state. + * + * @param p_hwfn + */ +void ecore_spq_setup(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_spq_deallocate - Deallocates the given SPQ struct. + * + * @param p_hwfn + */ +void ecore_spq_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_spq_get_entry - Obtain an entrry from the spq + * free pool list. + * + * + * + * @param p_hwfn + * @param pp_ent + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_spq_get_entry(struct ecore_hwfn *p_hwfn, struct ecore_spq_entry **pp_ent); + +/** + * @brief ecore_spq_return_entry - Return an entry to spq free + * pool list + * + * @param p_hwfn + * @param p_ent + */ +void ecore_spq_return_entry(struct ecore_hwfn *p_hwfn, + struct ecore_spq_entry *p_ent); +/** + * @brief ecore_eq_allocate - Allocates & initializes an EQ struct + * + * @param p_hwfn + * @param num_elem number of elements in the eq + * + * @return struct ecore_eq* - a newly allocated structure; NULL upon error. + */ +struct ecore_eq *ecore_eq_alloc(struct ecore_hwfn *p_hwfn, u16 num_elem); + +/** + * @brief ecore_eq_setup - Reset the SPQ to its start state. + * + * @param p_hwfn + * @param p_eq + */ +void ecore_eq_setup(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq); + +/** + * @brief ecore_eq_deallocate - deallocates the given EQ struct. + * + * @param p_hwfn + * @param p_eq + */ +void ecore_eq_free(struct ecore_hwfn *p_hwfn, struct ecore_eq *p_eq); + +/** + * @brief ecore_eq_prod_update - update the FW with default EQ producer + * + * @param p_hwfn + * @param prod + */ +void ecore_eq_prod_update(struct ecore_hwfn *p_hwfn, u16 prod); + +/** + * @brief ecore_eq_completion - Completes currently pending EQ elements + * + * @param p_hwfn + * @param cookie + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_eq_completion(struct ecore_hwfn *p_hwfn, + void *cookie); + +/** + * @brief ecore_spq_completion - Completes a single event + * + * @param p_hwfn + * @param echo - echo value from cookie (used for determining completion) + * @param p_data - data from cookie (used in callback function if applicable) + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_spq_completion(struct ecore_hwfn *p_hwfn, + __le16 echo, + u8 fw_return_code, + union event_ring_data *p_data); + +/** + * @brief ecore_spq_get_cid - Given p_hwfn, return cid for the hwfn's SPQ + * + * @param p_hwfn + * + * @return u32 - SPQ CID + */ +u32 ecore_spq_get_cid(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_consq_alloc - Allocates & initializes an ConsQ + * struct + * + * @param p_hwfn + * + * @return struct ecore_eq* - a newly allocated structure; NULL upon error. + */ +struct ecore_consq *ecore_consq_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_consq_setup - Reset the ConsQ to its start + * state. + * + * @param p_hwfn + * @param p_eq + */ +void ecore_consq_setup(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq); + +/** + * @brief ecore_consq_free - deallocates the given ConsQ struct. + * + * @param p_hwfn + * @param p_eq + */ +void ecore_consq_free(struct ecore_hwfn *p_hwfn, struct ecore_consq *p_consq); + +#endif /* __ECORE_SPQ_H__ */ diff --git a/drivers/net/qede/base/ecore_sriov.c b/drivers/net/qede/base/ecore_sriov.c new file mode 100644 index 00000000..1b3119d2 --- /dev/null +++ b/drivers/net/qede/base/ecore_sriov.c @@ -0,0 +1,3422 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" +#include "ecore.h" +#include "reg_addr.h" +#include "ecore_sriov.h" +#include "ecore_status.h" +#include "ecore_hw.h" +#include "ecore_hw_defs.h" +#include "ecore_int.h" +#include "ecore_hsi_eth.h" +#include "ecore_l2.h" +#include "ecore_vfpf_if.h" +#include "ecore_rt_defs.h" +#include "ecore_init_ops.h" +#include "ecore_gtt_reg_addr.h" +#include "ecore_iro.h" +#include "ecore_mcp.h" +#include "ecore_cxt.h" +#include "ecore_vf.h" +#include "ecore_init_fw_funcs.h" + +/* TEMPORARY until we implement print_enums... */ +const char *ecore_channel_tlvs_string[] = { + "CHANNEL_TLV_NONE", /* ends tlv sequence */ + "CHANNEL_TLV_ACQUIRE", + "CHANNEL_TLV_VPORT_START", + "CHANNEL_TLV_VPORT_UPDATE", + "CHANNEL_TLV_VPORT_TEARDOWN", + "CHANNEL_TLV_START_RXQ", + "CHANNEL_TLV_START_TXQ", + "CHANNEL_TLV_STOP_RXQ", + "CHANNEL_TLV_STOP_TXQ", + "CHANNEL_TLV_UPDATE_RXQ", + "CHANNEL_TLV_INT_CLEANUP", + "CHANNEL_TLV_CLOSE", + "CHANNEL_TLV_RELEASE", + "CHANNEL_TLV_LIST_END", + "CHANNEL_TLV_UCAST_FILTER", + "CHANNEL_TLV_VPORT_UPDATE_ACTIVATE", + "CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH", + "CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP", + "CHANNEL_TLV_VPORT_UPDATE_MCAST", + "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM", + "CHANNEL_TLV_VPORT_UPDATE_RSS", + "CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN", + "CHANNEL_TLV_VPORT_UPDATE_SGE_TPA", + "CHANNEL_TLV_MAX" +}; + +/* TODO - this is linux crc32; Need a way to ifdef it out for linux */ +u32 ecore_crc32(u32 crc, u8 *ptr, u32 length) +{ + int i; + + while (length--) { + crc ^= *ptr++; + for (i = 0; i < 8; i++) + crc = (crc >> 1) ^ ((crc & 1) ? 0xedb88320 : 0); + } + return crc; +} + +enum _ecore_status_t ecore_iov_post_vf_bulletin(struct ecore_hwfn *p_hwfn, + int vfid, + struct ecore_ptt *p_ptt) +{ + struct ecore_bulletin_content *p_bulletin; + struct ecore_dmae_params params; + struct ecore_vf_info *p_vf; + int crc_size = sizeof(p_bulletin->crc); + + p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf) + return ECORE_INVAL; + + /* TODO - check VF is in a state where it can accept message */ + if (!p_vf->vf_bulletin) + return ECORE_INVAL; + + p_bulletin = p_vf->bulletin.p_virt; + + /* Increment bulletin board version and compute crc */ + p_bulletin->version++; + p_bulletin->crc = ecore_crc32(0, (u8 *)p_bulletin + crc_size, + p_vf->bulletin.size - crc_size); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Posting Bulletin 0x%08x to VF[%d] (CRC 0x%08x)\n", + p_bulletin->version, p_vf->relative_vf_id, p_bulletin->crc); + + /* propagate bulletin board via dmae to vm memory */ + OSAL_MEMSET(¶ms, 0, sizeof(params)); + params.flags = ECORE_DMAE_FLAG_VF_DST; + params.dst_vfid = p_vf->abs_vf_id; + return ecore_dmae_host2host(p_hwfn, p_ptt, p_vf->bulletin.phys, + p_vf->vf_bulletin, p_vf->bulletin.size / 4, + ¶ms); +} + +static enum _ecore_status_t ecore_iov_pci_cfg_info(struct ecore_dev *p_dev) +{ + struct ecore_hw_sriov_info *iov = &p_dev->sriov_info; + int pos = iov->pos; + + DP_VERBOSE(p_dev, ECORE_MSG_IOV, "sriov ext pos %d\n", pos); + OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_CTRL, &iov->ctrl); + + OSAL_PCI_READ_CONFIG_WORD(p_dev, + pos + PCI_SRIOV_TOTAL_VF, &iov->total_vfs); + OSAL_PCI_READ_CONFIG_WORD(p_dev, + pos + PCI_SRIOV_INITIAL_VF, + &iov->initial_vfs); + + OSAL_PCI_READ_CONFIG_WORD(p_dev, pos + PCI_SRIOV_NUM_VF, &iov->num_vfs); + if (iov->num_vfs) { + /* @@@TODO - in future we might want to add an OSAL here to + * allow each OS to decide on its own how to act. + */ + DP_VERBOSE(p_dev, ECORE_MSG_IOV, + "Number of VFs are already set to non-zero value." + " Ignoring PCI configuration value\n"); + iov->num_vfs = 0; + } + + OSAL_PCI_READ_CONFIG_WORD(p_dev, + pos + PCI_SRIOV_VF_OFFSET, &iov->offset); + + OSAL_PCI_READ_CONFIG_WORD(p_dev, + pos + PCI_SRIOV_VF_STRIDE, &iov->stride); + + OSAL_PCI_READ_CONFIG_WORD(p_dev, + pos + PCI_SRIOV_VF_DID, &iov->vf_device_id); + + OSAL_PCI_READ_CONFIG_DWORD(p_dev, + pos + PCI_SRIOV_SUP_PGSIZE, &iov->pgsz); + + OSAL_PCI_READ_CONFIG_DWORD(p_dev, pos + PCI_SRIOV_CAP, &iov->cap); + + OSAL_PCI_READ_CONFIG_BYTE(p_dev, pos + PCI_SRIOV_FUNC_LINK, &iov->link); + + DP_VERBOSE(p_dev, ECORE_MSG_IOV, "IOV info[%d]: nres %d, cap 0x%x," + "ctrl 0x%x, total %d, initial %d, num vfs %d, offset %d," + " stride %d, page size 0x%x\n", 0, + iov->nres, iov->cap, iov->ctrl, + iov->total_vfs, iov->initial_vfs, iov->nr_virtfn, + iov->offset, iov->stride, iov->pgsz); + + /* Some sanity checks */ + if (iov->num_vfs > NUM_OF_VFS(p_dev) || + iov->total_vfs > NUM_OF_VFS(p_dev)) { + /* This can happen only due to a bug. In this case we set + * num_vfs to zero to avoid memory corruption in the code that + * assumes max number of vfs + */ + DP_NOTICE(p_dev, false, + "IOV: Unexpected number of vfs set: %d" + " setting num_vf to zero\n", + iov->num_vfs); + + iov->num_vfs = 0; + iov->total_vfs = 0; + } + + return ECORE_SUCCESS; +} + +static void ecore_iov_clear_vf_igu_blocks(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + struct ecore_igu_block *p_sb; + u16 sb_id; + u32 val; + + if (!p_hwfn->hw_info.p_igu_info) { + DP_ERR(p_hwfn, + "ecore_iov_clear_vf_igu_blocks IGU Info not inited\n"); + return; + } + + for (sb_id = 0; + sb_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev); sb_id++) { + p_sb = &p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks[sb_id]; + if ((p_sb->status & ECORE_IGU_STATUS_FREE) && + !(p_sb->status & ECORE_IGU_STATUS_PF)) { + val = ecore_rd(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sb_id * 4); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); + ecore_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + 4 * sb_id, val); + } + } +} + +static void ecore_iov_setup_vfdb(struct ecore_hwfn *p_hwfn) +{ + u16 num_vfs = p_hwfn->p_dev->sriov_info.total_vfs; + union pfvf_tlvs *p_reply_virt_addr; + union vfpf_tlvs *p_req_virt_addr; + struct ecore_bulletin_content *p_bulletin_virt; + struct ecore_pf_iov *p_iov_info; + dma_addr_t req_p, rply_p, bulletin_p; + u8 idx = 0; + + p_iov_info = p_hwfn->pf_iov_info; + + OSAL_MEMSET(p_iov_info->vfs_array, 0, sizeof(p_iov_info->vfs_array)); + + p_req_virt_addr = p_iov_info->mbx_msg_virt_addr; + req_p = p_iov_info->mbx_msg_phys_addr; + p_reply_virt_addr = p_iov_info->mbx_reply_virt_addr; + rply_p = p_iov_info->mbx_reply_phys_addr; + p_bulletin_virt = p_iov_info->p_bulletins; + bulletin_p = p_iov_info->bulletins_phys; + if (!p_req_virt_addr || !p_reply_virt_addr || !p_bulletin_virt) { + DP_ERR(p_hwfn, + "ecore_iov_setup_vfdb called without alloc mem first\n"); + return; + } + + p_iov_info->base_vport_id = 1; /* @@@TBD resource allocation */ + + for (idx = 0; idx < num_vfs; idx++) { + struct ecore_vf_info *vf = &p_iov_info->vfs_array[idx]; + u32 concrete; + + vf->vf_mbx.req_virt = p_req_virt_addr + idx; + vf->vf_mbx.req_phys = req_p + idx * sizeof(union vfpf_tlvs); + vf->vf_mbx.reply_virt = p_reply_virt_addr + idx; + vf->vf_mbx.reply_phys = rply_p + idx * sizeof(union pfvf_tlvs); + +#ifdef CONFIG_ECORE_SW_CHANNEL + vf->vf_mbx.sw_mbx.request_size = sizeof(union vfpf_tlvs); + vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST; +#endif + vf->state = VF_STOPPED; + + vf->bulletin.phys = idx * + sizeof(struct ecore_bulletin_content) + bulletin_p; + vf->bulletin.p_virt = p_bulletin_virt + idx; + vf->bulletin.size = sizeof(struct ecore_bulletin_content); + + vf->relative_vf_id = idx; + vf->abs_vf_id = idx + p_hwfn->hw_info.first_vf_in_pf; + concrete = ecore_vfid_to_concrete(p_hwfn, vf->abs_vf_id); + vf->concrete_fid = concrete; + /* TODO - need to devise a better way of getting opaque */ + vf->opaque_fid = (p_hwfn->hw_info.opaque_fid & 0xff) | + (vf->abs_vf_id << 8); + /* @@TBD MichalK - add base vport_id of VFs to equation */ + vf->vport_id = p_iov_info->base_vport_id + idx; + } +} + +static enum _ecore_status_t ecore_iov_allocate_vfdb(struct ecore_hwfn *p_hwfn) +{ + struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + void **p_v_addr; + u16 num_vfs = 0; + + num_vfs = p_hwfn->p_dev->sriov_info.total_vfs; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "ecore_iov_allocate_vfdb for %d VFs\n", num_vfs); + + /* Allocate PF Mailbox buffer (per-VF) */ + p_iov_info->mbx_msg_size = sizeof(union vfpf_tlvs) * num_vfs; + p_v_addr = &p_iov_info->mbx_msg_virt_addr; + *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_iov_info->mbx_msg_phys_addr, + p_iov_info->mbx_msg_size); + if (!*p_v_addr) + return ECORE_NOMEM; + + /* Allocate PF Mailbox Reply buffer (per-VF) */ + p_iov_info->mbx_reply_size = sizeof(union pfvf_tlvs) * num_vfs; + p_v_addr = &p_iov_info->mbx_reply_virt_addr; + *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_iov_info->mbx_reply_phys_addr, + p_iov_info->mbx_reply_size); + if (!*p_v_addr) + return ECORE_NOMEM; + + p_iov_info->bulletins_size = sizeof(struct ecore_bulletin_content) * + num_vfs; + p_v_addr = &p_iov_info->p_bulletins; + *p_v_addr = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_iov_info->bulletins_phys, + p_iov_info->bulletins_size); + if (!*p_v_addr) + return ECORE_NOMEM; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "PF's Requests mailbox [%p virt 0x%" PRIx64 " phys], " + "Response mailbox [%p virt 0x%" PRIx64 " phys] Bulletins" + " [%p virt 0x%" PRIx64 " phys]\n", + p_iov_info->mbx_msg_virt_addr, + (u64)p_iov_info->mbx_msg_phys_addr, + p_iov_info->mbx_reply_virt_addr, + (u64)p_iov_info->mbx_reply_phys_addr, + p_iov_info->p_bulletins, (u64)p_iov_info->bulletins_phys); + + /* @@@TBD MichalK - statistics / RSS */ + + return ECORE_SUCCESS; +} + +static void ecore_iov_free_vfdb(struct ecore_hwfn *p_hwfn) +{ + struct ecore_pf_iov *p_iov_info = p_hwfn->pf_iov_info; + + if (p_hwfn->pf_iov_info->mbx_msg_virt_addr) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov_info->mbx_msg_virt_addr, + p_iov_info->mbx_msg_phys_addr, + p_iov_info->mbx_msg_size); + + if (p_hwfn->pf_iov_info->mbx_reply_virt_addr) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov_info->mbx_reply_virt_addr, + p_iov_info->mbx_reply_phys_addr, + p_iov_info->mbx_reply_size); + + if (p_iov_info->p_bulletins) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov_info->p_bulletins, + p_iov_info->bulletins_phys, + p_iov_info->bulletins_size); + + /* @@@TBD MichalK - statistics / RSS */ +} + +enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_pf_iov *p_sriov; + + if (!IS_PF_SRIOV(p_hwfn)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No SR-IOV - no need for IOV db\n"); + return rc; + } + + p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov)); + if (!p_sriov) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `struct ecore_sriov'"); + return ECORE_NOMEM; + } + + p_hwfn->pf_iov_info = p_sriov; + + rc = ecore_iov_allocate_vfdb(p_hwfn); + + return rc; +} + +void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt) +{ + if (!IS_PF_SRIOV(p_hwfn) || !p_hwfn->pf_iov_info) + return; + + ecore_iov_setup_vfdb(p_hwfn); + ecore_iov_clear_vf_igu_blocks(p_hwfn, p_ptt); +} + +void ecore_iov_free(struct ecore_hwfn *p_hwfn) +{ + if (p_hwfn->pf_iov_info) { + ecore_iov_free_vfdb(p_hwfn); + OSAL_FREE(p_hwfn->p_dev, p_hwfn->pf_iov_info); + } +} + +enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + enum _ecore_status_t rc; + + /* @@@ TBD get this information from shmem / pci cfg */ + if (IS_VF(p_hwfn->p_dev)) + return ECORE_SUCCESS; + + /* First hwfn should learn the PCI configuration */ + if (IS_LEAD_HWFN(p_hwfn)) { + struct ecore_dev *p_dev = p_hwfn->p_dev; + int *pos = &p_hwfn->p_dev->sriov_info.pos; + + *pos = OSAL_PCI_FIND_EXT_CAPABILITY(p_hwfn->p_dev, + PCI_EXT_CAP_ID_SRIOV); + if (!*pos) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No PCIe IOV support\n"); + return ECORE_SUCCESS; + } + + rc = ecore_iov_pci_cfg_info(p_dev); + if (rc) + return rc; + } else if (!p_hwfn->p_dev->sriov_info.pos) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "No PCIe IOV support\n"); + return ECORE_SUCCESS; + } + + /* Calculate the first VF index - this is a bit tricky; Basically, + * VFs start at offset 16 relative to PF0, and 2nd engine VFs begin + * after the first engine's VFs. + */ + p_hwfn->hw_info.first_vf_in_pf = p_hwfn->p_dev->sriov_info.offset + + p_hwfn->abs_pf_id - 16; + if (ECORE_PATH_ID(p_hwfn)) + p_hwfn->hw_info.first_vf_in_pf -= MAX_NUM_VFS_BB; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "First VF in hwfn 0x%08x\n", p_hwfn->hw_info.first_vf_in_pf); + + return ECORE_SUCCESS; +} + +struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only) +{ + struct ecore_vf_info *vf = OSAL_NULL; + + if (!p_hwfn->pf_iov_info) { + DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n"); + return OSAL_NULL; + } + + if (ecore_iov_is_valid_vfid(p_hwfn, relative_vf_id, b_enabled_only)) + vf = &p_hwfn->pf_iov_info->vfs_array[relative_vf_id]; + else + DP_ERR(p_hwfn, "ecore_iov_get_vf_info: VF[%d] is not enabled\n", + relative_vf_id); + + return vf; +} + +void ecore_iov_set_vf_to_disable(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, u8 to_disable) +{ + struct ecore_vf_info *vf; + + vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!vf) + return; + + vf->to_disable = to_disable; +} + +void ecore_iov_set_vfs_to_disable(struct ecore_hwfn *p_hwfn, u8 to_disable) +{ + u16 i; + + for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++) + ecore_iov_set_vf_to_disable(p_hwfn, i, to_disable); +} + +#ifndef LINUX_REMOVE +/* @@@TBD Consider taking outside of ecore... */ +enum _ecore_status_t ecore_iov_set_vf_ctx(struct ecore_hwfn *p_hwfn, + u16 vf_id, void *ctx) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_vf_info *vf = ecore_iov_get_vf_info(p_hwfn, vf_id, true); + + if (vf != OSAL_NULL) { + vf->ctx = ctx; +#ifdef CONFIG_ECORE_SW_CHANNEL + vf->vf_mbx.sw_mbx.mbx_state = VF_PF_WAIT_FOR_START_REQUEST; +#endif + } else { + rc = ECORE_UNKNOWN_ERROR; + } + return rc; +} +#endif + +/** + * VF enable primitives + * + * when pretend is required the caller is reponsible + * for calling pretend prioir to calling these routines + */ + +/* clears vf error in all semi blocks + * Assumption: called under VF pretend... + */ +static OSAL_INLINE void ecore_iov_vf_semi_clear_err(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + ecore_wr(p_hwfn, p_ptt, TSEM_REG_VF_ERROR, 1); + ecore_wr(p_hwfn, p_ptt, USEM_REG_VF_ERROR, 1); + ecore_wr(p_hwfn, p_ptt, MSEM_REG_VF_ERROR, 1); + ecore_wr(p_hwfn, p_ptt, XSEM_REG_VF_ERROR, 1); + ecore_wr(p_hwfn, p_ptt, YSEM_REG_VF_ERROR, 1); + ecore_wr(p_hwfn, p_ptt, PSEM_REG_VF_ERROR, 1); +} + +static void ecore_iov_vf_pglue_clear_err(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u8 abs_vfid) +{ + ecore_wr(p_hwfn, p_ptt, + PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR + (abs_vfid >> 5) * 4, + 1 << (abs_vfid & 0x1f)); +} + +static void ecore_iov_vf_igu_reset(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + int i; + u16 igu_sb_id; + + /* Set VF masks and configuration - pretend */ + ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); + + ecore_wr(p_hwfn, p_ptt, IGU_REG_STATISTIC_NUM_VF_MSG_SENT, 0); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "value in VF_CONFIGURATION of vf %d after write %x\n", + vf->abs_vf_id, + ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION)); + + /* unpretend */ + ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); + + /* iterate ove all queues, clear sb consumer */ + for (i = 0; i < vf->num_sbs; i++) { + igu_sb_id = vf->igu_sbs[i]; + /* Set then clear... */ + ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 1, + vf->opaque_fid); + ecore_int_igu_cleanup_sb(p_hwfn, p_ptt, igu_sb_id, 0, + vf->opaque_fid); + } +} + +static void ecore_iov_vf_igu_set_int(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf, bool enable) +{ + u32 igu_vf_conf; + + ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); + + igu_vf_conf = ecore_rd(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION); + + if (enable) + igu_vf_conf |= IGU_VF_CONF_MSI_MSIX_EN; + else + igu_vf_conf &= ~IGU_VF_CONF_MSI_MSIX_EN; + + ecore_wr(p_hwfn, p_ptt, IGU_REG_VF_CONFIGURATION, igu_vf_conf); + + /* unpretend */ + ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); +} + +static enum _ecore_status_t +ecore_iov_enable_vf_access(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, struct ecore_vf_info *vf) +{ + u32 igu_vf_conf = IGU_VF_CONF_FUNC_EN; + enum _ecore_status_t rc; + + if (vf->to_disable) + return ECORE_SUCCESS; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Enable internal access for vf %x [abs %x]\n", vf->abs_vf_id, + ECORE_VF_ABS_ID(p_hwfn, vf)); + + ecore_iov_vf_pglue_clear_err(p_hwfn, p_ptt, + ECORE_VF_ABS_ID(p_hwfn, vf)); + + rc = ecore_mcp_config_vf_msix(p_hwfn, p_ptt, + vf->abs_vf_id, vf->num_sbs); + if (rc) + return rc; + + ecore_fid_pretend(p_hwfn, p_ptt, (u16)vf->concrete_fid); + + SET_FIELD(igu_vf_conf, IGU_VF_CONF_PARENT, p_hwfn->rel_pf_id); + STORE_RT_REG(p_hwfn, IGU_REG_VF_CONFIGURATION_RT_OFFSET, igu_vf_conf); + + ecore_init_run(p_hwfn, p_ptt, PHASE_VF, vf->abs_vf_id, + p_hwfn->hw_info.hw_mode); + + /* unpretend */ + ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); + + if (vf->state != VF_STOPPED) { + DP_NOTICE(p_hwfn, true, "VF[%02x] is already started\n", + vf->abs_vf_id); + return ECORE_INVAL; + } + + /* Start VF */ + rc = ecore_sp_vf_start(p_hwfn, vf->concrete_fid, vf->opaque_fid); + if (rc != ECORE_SUCCESS) + DP_NOTICE(p_hwfn, true, "Failed to start VF[%02x]\n", + vf->abs_vf_id); + + vf->state = VF_FREE; + + return rc; +} + +/** + * + * @brief ecore_iov_config_perm_table - configure the permission + * zone table. + * In E4, queue zone permission table size is 320x9. There + * are 320 VF queues for single engine device (256 for dual + * engine device), and each entry has the following format: + * {Valid, VF[7:0]} + * @param p_hwfn + * @param p_ptt + * @param vf + * @param enable + */ +static void ecore_iov_config_perm_table(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf, u8 enable) +{ + u32 reg_addr; + u32 val; + u16 qzone_id = 0; + int qid; + + for (qid = 0; qid < vf->num_rxqs; qid++) { + ecore_fw_l2_queue(p_hwfn, vf->vf_queues[qid].fw_rx_qid, + &qzone_id); + + reg_addr = PSWHST_REG_ZONE_PERMISSION_TABLE + qzone_id * 4; + val = enable ? (vf->abs_vf_id | (1 << 8)) : 0; + ecore_wr(p_hwfn, p_ptt, reg_addr, val); + } +} + +static void ecore_iov_enable_vf_traffic(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + /* Reset vf in IGU interrupts are still disabled */ + ecore_iov_vf_igu_reset(p_hwfn, p_ptt, vf); + + ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 1 /* enable */); + + /* Permission Table */ + ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, true /* enable */); +} + +static u8 ecore_iov_alloc_vf_igu_sbs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf, + u16 num_rx_queues) +{ + int igu_id = 0; + int qid = 0; + u32 val = 0; + struct ecore_igu_block *igu_blocks = + p_hwfn->hw_info.p_igu_info->igu_map.igu_blocks; + + if (num_rx_queues > p_hwfn->hw_info.p_igu_info->free_blks) + num_rx_queues = p_hwfn->hw_info.p_igu_info->free_blks; + + p_hwfn->hw_info.p_igu_info->free_blks -= num_rx_queues; + + SET_FIELD(val, IGU_MAPPING_LINE_FUNCTION_NUMBER, vf->abs_vf_id); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 1); + SET_FIELD(val, IGU_MAPPING_LINE_PF_VALID, 0); + + while ((qid < num_rx_queues) && + (igu_id < ECORE_MAPPING_MEMORY_SIZE(p_hwfn->p_dev))) { + if (igu_blocks[igu_id].status & ECORE_IGU_STATUS_FREE) { + struct cau_sb_entry sb_entry; + + vf->igu_sbs[qid] = (u16)igu_id; + igu_blocks[igu_id].status &= ~ECORE_IGU_STATUS_FREE; + + SET_FIELD(val, IGU_MAPPING_LINE_VECTOR_NUMBER, qid); + + ecore_wr(p_hwfn, p_ptt, + IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id, + val); + + /* Configure igu sb in CAU which were marked valid */ + ecore_init_cau_sb_entry(p_hwfn, &sb_entry, + p_hwfn->rel_pf_id, + vf->abs_vf_id, 1); + ecore_dmae_host2grc(p_hwfn, p_ptt, + (u64)(osal_uintptr_t)&sb_entry, + CAU_REG_SB_VAR_MEMORY + + igu_id * sizeof(u64), 2, 0); + qid++; + } + igu_id++; + } + + vf->num_sbs = (u8)num_rx_queues; + + return vf->num_sbs; +} + +/** + * + * @brief The function invalidates all the VF entries, + * technically this isn't required, but added for + * cleaness and ease of debugging incase a VF attempts to + * produce an interrupt after it has been taken down. + * + * @param p_hwfn + * @param p_ptt + * @param vf + */ +static void ecore_iov_free_vf_igu_sbs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_igu_info *p_info = p_hwfn->hw_info.p_igu_info; + int idx, igu_id; + u32 addr, val; + + /* Invalidate igu CAM lines and mark them as free */ + for (idx = 0; idx < vf->num_sbs; idx++) { + igu_id = vf->igu_sbs[idx]; + addr = IGU_REG_MAPPING_MEMORY + sizeof(u32) * igu_id; + + val = ecore_rd(p_hwfn, p_ptt, addr); + SET_FIELD(val, IGU_MAPPING_LINE_VALID, 0); + ecore_wr(p_hwfn, p_ptt, addr, val); + + p_info->igu_map.igu_blocks[igu_id].status |= + ECORE_IGU_STATUS_FREE; + + p_hwfn->hw_info.p_igu_info->free_blks++; + } + + vf->num_sbs = 0; +} + +enum _ecore_status_t ecore_iov_init_hw_for_vf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rel_vf_id, u16 num_rx_queues) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_vf_info *vf = OSAL_NULL; + u8 num_of_vf_available_chains = 0; + u32 cids; + u8 i; + + if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) { + DP_NOTICE(p_hwfn, true, "VF[%d] is already active.\n", + rel_vf_id); + return ECORE_INVAL; + } + + vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!vf) { + DP_ERR(p_hwfn, "ecore_iov_init_hw_for_vf : vf is OSAL_NULL\n"); + return ECORE_UNKNOWN_ERROR; + } + + /* Limit number of queues according to number of CIDs */ + ecore_cxt_get_proto_cid_count(p_hwfn, PROTOCOLID_ETH, &cids); + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - requesting to initialize for 0x%04x queues" + " [0x%04x CIDs available]\n", + vf->relative_vf_id, num_rx_queues, (u16)cids); + num_rx_queues = OSAL_MIN_T(u16, num_rx_queues, ((u16)cids)); + + num_of_vf_available_chains = ecore_iov_alloc_vf_igu_sbs(p_hwfn, + p_ptt, + vf, + num_rx_queues); + if (num_of_vf_available_chains == 0) { + DP_ERR(p_hwfn, "no available igu sbs\n"); + return ECORE_NOMEM; + } + + /* Choose queue number and index ranges */ + vf->num_rxqs = num_of_vf_available_chains; + vf->num_txqs = num_of_vf_available_chains; + + for (i = 0; i < vf->num_rxqs; i++) { + u16 queue_id = ecore_int_queue_id_from_sb_id(p_hwfn, + vf->igu_sbs[i]); + + if (queue_id > RESC_NUM(p_hwfn, ECORE_L2_QUEUE)) { + DP_NOTICE(p_hwfn, true, + "VF[%d] will require utilizing of" + " out-of-bounds queues - %04x\n", + vf->relative_vf_id, queue_id); + /* TODO - cleanup the already allocate SBs */ + return ECORE_INVAL; + } + + /* CIDs are per-VF, so no problem having them 0-based. */ + vf->vf_queues[i].fw_rx_qid = queue_id; + vf->vf_queues[i].fw_tx_qid = queue_id; + vf->vf_queues[i].fw_cid = i; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - [%d] SB %04x, Tx/Rx queue %04x CID %04x\n", + vf->relative_vf_id, i, vf->igu_sbs[i], queue_id, i); + } + + rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, vf); + + if (rc == ECORE_SUCCESS) { + struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info; + u16 vf_id = vf->relative_vf_id; + + p_iov->num_vfs++; + p_iov->active_vfs[vf_id / 64] |= (1ULL << (vf_id % 64)); + } + + return rc; +} + +enum _ecore_status_t ecore_iov_release_hw_for_vf(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rel_vf_id) +{ + struct ecore_vf_info *vf = OSAL_NULL; + enum _ecore_status_t rc = ECORE_SUCCESS; + + vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!vf) { + DP_ERR(p_hwfn, "ecore_iov_release_hw_for_vf : vf is NULL\n"); + return ECORE_UNKNOWN_ERROR; + } + + if (vf->state != VF_STOPPED) { + /* Stopping the VF */ + rc = ecore_sp_vf_stop(p_hwfn, vf->concrete_fid, vf->opaque_fid); + + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, "ecore_sp_vf_stop returned error %d\n", + rc); + return rc; + } + + vf->state = VF_STOPPED; + } + + /* disablng interrupts and resetting permission table was done during + * vf-close, however, we could get here without going through vf_close + */ + /* Disable Interrupts for VF */ + ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */); + + /* Reset Permission table */ + ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */); + + vf->num_rxqs = 0; + vf->num_txqs = 0; + ecore_iov_free_vf_igu_sbs(p_hwfn, p_ptt, vf); + + if (ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id)) { + struct ecore_hw_sriov_info *p_iov = &p_hwfn->p_dev->sriov_info; + u16 vf_id = vf->relative_vf_id; + + p_iov->num_vfs--; + p_iov->active_vfs[vf_id / 64] &= ~(1ULL << (vf_id % 64)); + } + + return ECORE_SUCCESS; +} + +static bool ecore_iov_tlv_supported(u16 tlvtype) +{ + return tlvtype > CHANNEL_TLV_NONE && tlvtype < CHANNEL_TLV_MAX; +} + +static void ecore_iov_lock_vf_pf_channel(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, u16 tlv) +{ + /* we don't lock the channel for unsupported tlvs */ + if (!ecore_iov_tlv_supported(tlv)) + return; + + /* lock the channel */ + /* mutex_lock(&vf->op_mutex); @@@TBD MichalK - add lock... */ + + /* record the locking op */ + /* vf->op_current = tlv; @@@TBD MichalK */ + + /* log the lock */ + DP_VERBOSE(p_hwfn, + ECORE_MSG_IOV, + "VF[%d]: vf pf channel locked by %s\n", + vf->abs_vf_id, ecore_channel_tlvs_string[tlv]); +} + +static void ecore_iov_unlock_vf_pf_channel(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, + u16 expected_tlv) +{ + /* we don't unlock the channel for unsupported tlvs */ + if (!ecore_iov_tlv_supported(expected_tlv)) + return; + + /* WARN(expected_tlv != vf->op_current, + * "lock mismatch: expected %s found %s", + * channel_tlvs_string[expected_tlv], + * channel_tlvs_string[vf->op_current]); + * @@@TBD MichalK + */ + + /* lock the channel */ + /* mutex_unlock(&vf->op_mutex); @@@TBD MichalK add the lock */ + + /* log the unlock */ + DP_VERBOSE(p_hwfn, + ECORE_MSG_IOV, + "VF[%d]: vf pf channel unlocked by %s\n", + vf->abs_vf_id, ecore_channel_tlvs_string[expected_tlv]); + + /* record the locking op */ + /* vf->op_current = CHANNEL_TLV_NONE; */ +} + +/* place a given tlv on the tlv buffer, continuing current tlv list */ +void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, + u8 **offset, u16 type, u16 length) +{ + struct channel_tlv *tl = (struct channel_tlv *)*offset; + + tl->type = type; + tl->length = length; + + /* Offset should keep pointing to next TLV (the end of the last) */ + *offset += length; + + /* Return a pointer to the start of the added tlv */ + return *offset - length; +} + +/* list the types and lengths of the tlvs on the buffer */ +void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list) +{ + u16 i = 1, total_length = 0; + struct channel_tlv *tlv; + + do { + /* cast current tlv list entry to channel tlv header */ + tlv = (struct channel_tlv *)((u8 *)tlvs_list + total_length); + + /* output tlv */ + if (ecore_iov_tlv_supported(tlv->type)) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "TLV number %d: type %s, length %d\n", + i, ecore_channel_tlvs_string[tlv->type], + tlv->length); + else + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "TLV number %d: type %d, length %d\n", + i, tlv->type, tlv->length); + + if (tlv->type == CHANNEL_TLV_LIST_END) + return; + + /* Validate entry - protect against malicious VFs */ + if (!tlv->length) { + DP_NOTICE(p_hwfn, false, "TLV of length 0 found\n"); + return; + } + total_length += tlv->length; + if (total_length >= sizeof(struct tlv_buffer_size)) { + DP_NOTICE(p_hwfn, false, "TLV ==> Buffer overflow\n"); + return; + } + + i++; + } while (1); +} + +static void ecore_iov_send_response(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf, + u16 length, u8 status) +{ + struct ecore_iov_vf_mbx *mbx = &p_vf->vf_mbx; + struct ecore_dmae_params params; + u8 eng_vf_id; + + mbx->reply_virt->default_resp.hdr.status = status; + +#ifdef CONFIG_ECORE_SW_CHANNEL + mbx->sw_mbx.response_size = + length + sizeof(struct channel_list_end_tlv); +#endif + + ecore_dp_tlv_list(p_hwfn, mbx->reply_virt); + + if (!p_hwfn->p_dev->sriov_info.b_hw_channel) + return; + + eng_vf_id = p_vf->abs_vf_id; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); + params.flags = ECORE_DMAE_FLAG_VF_DST; + params.dst_vfid = eng_vf_id; + + ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys + sizeof(u64), + mbx->req_virt->first_tlv.reply_address + + sizeof(u64), + (sizeof(union pfvf_tlvs) - sizeof(u64)) / 4, + ¶ms); + + ecore_dmae_host2host(p_hwfn, p_ptt, mbx->reply_phys, + mbx->req_virt->first_tlv.reply_address, + sizeof(u64) / 4, ¶ms); + + REG_WR(p_hwfn, + GTT_BAR0_MAP_REG_USDM_RAM + + USTORM_VF_PF_CHANNEL_READY_OFFSET(eng_vf_id), 1); +} + +static u16 ecore_iov_vport_to_tlv(struct ecore_hwfn *p_hwfn, + enum ecore_iov_vport_update_flag flag) +{ + switch (flag) { + case ECORE_IOV_VP_UPDATE_ACTIVATE: + return CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + case ECORE_IOV_VP_UPDATE_VLAN_STRIP: + return CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + case ECORE_IOV_VP_UPDATE_TX_SWITCH: + return CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + case ECORE_IOV_VP_UPDATE_MCAST: + return CHANNEL_TLV_VPORT_UPDATE_MCAST; + case ECORE_IOV_VP_UPDATE_ACCEPT_PARAM: + return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + case ECORE_IOV_VP_UPDATE_RSS: + return CHANNEL_TLV_VPORT_UPDATE_RSS; + case ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN: + return CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + case ECORE_IOV_VP_UPDATE_SGE_TPA: + return CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; + default: + return 0; + } +} + +static u16 ecore_iov_prep_vp_update_resp_tlvs(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + struct ecore_iov_vf_mbx *p_mbx, + u8 status, u16 tlvs_mask, + u16 tlvs_accepted) +{ + struct pfvf_def_resp_tlv *resp; + u16 size, total_len, i; + + OSAL_MEMSET(p_mbx->reply_virt, 0, sizeof(union pfvf_tlvs)); + p_mbx->offset = (u8 *)(p_mbx->reply_virt); + size = sizeof(struct pfvf_def_resp_tlv); + total_len = size; + + ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_VPORT_UPDATE, size); + + /* Prepare response for all extended tlvs if they are found by PF */ + for (i = 0; i < ECORE_IOV_VP_UPDATE_MAX; i++) { + if (!(tlvs_mask & (1 << i))) + continue; + + resp = ecore_add_tlv(p_hwfn, &p_mbx->offset, + ecore_iov_vport_to_tlv(p_hwfn, i), size); + + if (tlvs_accepted & (1 << i)) + resp->hdr.status = status; + else + resp->hdr.status = PFVF_STATUS_NOT_SUPPORTED; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - vport_update resp: TLV %d, status %02x\n", + p_vf->relative_vf_id, + ecore_iov_vport_to_tlv(p_hwfn, i), resp->hdr.status); + + total_len += size; + } + + ecore_add_tlv(p_hwfn, &p_mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + return total_len; +} + +static void ecore_iov_prepare_resp(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf_info, + u16 type, u16 length, u8 status) +{ + struct ecore_iov_vf_mbx *mbx = &vf_info->vf_mbx; + + mbx->offset = (u8 *)(mbx->reply_virt); + + ecore_add_tlv(p_hwfn, &mbx->offset, type, length); + ecore_add_tlv(p_hwfn, &mbx->offset, CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + ecore_iov_send_response(p_hwfn, p_ptt, vf_info, length, status); +} + +static void ecore_iov_vf_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf) +{ + p_vf->vf_bulletin = 0; + p_vf->vport_instance = 0; + p_vf->num_mac_filters = 0; + p_vf->num_vlan_filters = 0; + p_vf->num_mc_filters = 0; + p_vf->configured_features = 0; + + /* If VF previously requested less resources, go back to default */ + p_vf->num_rxqs = p_vf->num_sbs; + p_vf->num_txqs = p_vf->num_sbs; + + p_vf->num_active_rxqs = 0; + + OSAL_MEMSET(&p_vf->shadow_config, 0, sizeof(p_vf->shadow_config)); + OSAL_IOV_VF_CLEANUP(p_hwfn, p_vf->relative_vf_id); +} + +static void ecore_iov_vf_mbx_acquire(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_acquire_tlv *req = &mbx->req_virt->acquire; + struct pfvf_acquire_resp_tlv *resp = &mbx->reply_virt->acquire_resp; + struct pf_vf_resc *resc = &resp->resc; + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; + u16 length; + u8 i, vfpf_status = PFVF_STATUS_SUCCESS; + + /* Validate FW compatibility */ + if (req->vfdev_info.fw_major != FW_MAJOR_VERSION || + req->vfdev_info.fw_minor != FW_MINOR_VERSION || + req->vfdev_info.fw_revision != FW_REVISION_VERSION || + req->vfdev_info.fw_engineering != FW_ENGINEERING_VERSION) { + DP_INFO(p_hwfn, + "VF[%d] is running an incompatible driver [VF needs" + " FW %02x:%02x:%02x:%02x but Hypervisor is" + " using %02x:%02x:%02x:%02x]\n", + vf->abs_vf_id, req->vfdev_info.fw_major, + req->vfdev_info.fw_minor, req->vfdev_info.fw_revision, + req->vfdev_info.fw_engineering, FW_MAJOR_VERSION, + FW_MINOR_VERSION, FW_REVISION_VERSION, + FW_ENGINEERING_VERSION); + vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } +#ifndef __EXTRACT__LINUX__ + if (OSAL_IOV_VF_ACQUIRE(p_hwfn, vf->relative_vf_id) != ECORE_SUCCESS) { + vfpf_status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } +#endif + + OSAL_MEMSET(resp, 0, sizeof(*resp)); + + /* Fill in vf info stuff : @@@TBD MichalK Hard Coded for now... */ + vf->opaque_fid = req->vfdev_info.opaque_fid; + vf->num_mac_filters = 1; + vf->num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS; + vf->num_mc_filters = ECORE_MAX_MC_ADDRS; + + vf->vf_bulletin = req->bulletin_addr; + vf->bulletin.size = (vf->bulletin.size < req->bulletin_size) ? + vf->bulletin.size : req->bulletin_size; + + /* fill in pfdev info */ + pfdev_info->chip_num = p_hwfn->p_dev->chip_num; + pfdev_info->db_size = 0; /* @@@ TBD MichalK Vf Doorbells */ + pfdev_info->indices_per_sb = PIS_PER_SB; + pfdev_info->capabilities = PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED; + + pfdev_info->stats_info.mstats.address = + PXP_VF_BAR0_START_MSDM_ZONE_B + + OFFSETOF(struct mstorm_vf_zone, non_trigger.eth_queue_stat); + pfdev_info->stats_info.mstats.len = + sizeof(struct eth_mstorm_per_queue_stat); + + pfdev_info->stats_info.ustats.address = + PXP_VF_BAR0_START_USDM_ZONE_B + + OFFSETOF(struct ustorm_vf_zone, non_trigger.eth_queue_stat); + pfdev_info->stats_info.ustats.len = + sizeof(struct eth_ustorm_per_queue_stat); + + pfdev_info->stats_info.pstats.address = + PXP_VF_BAR0_START_PSDM_ZONE_B + + OFFSETOF(struct pstorm_vf_zone, non_trigger.eth_queue_stat); + pfdev_info->stats_info.pstats.len = + sizeof(struct eth_pstorm_per_queue_stat); + + pfdev_info->stats_info.tstats.address = 0; + pfdev_info->stats_info.tstats.len = 0; + + OSAL_MEMCPY(pfdev_info->port_mac, p_hwfn->hw_info.hw_mac_addr, + ETH_ALEN); + + pfdev_info->fw_major = FW_MAJOR_VERSION; + pfdev_info->fw_minor = FW_MINOR_VERSION; + pfdev_info->fw_rev = FW_REVISION_VERSION; + pfdev_info->fw_eng = FW_ENGINEERING_VERSION; + pfdev_info->os_type = OSAL_IOV_GET_OS_TYPE(); + ecore_mcp_get_mfw_ver(p_hwfn->p_dev, p_ptt, &pfdev_info->mfw_ver, + OSAL_NULL); + + pfdev_info->dev_type = p_hwfn->p_dev->type; + pfdev_info->chip_rev = p_hwfn->p_dev->chip_rev; + + /* Fill in resc : @@@TBD MichalK Hard Coded for now... */ + resc->num_rxqs = vf->num_rxqs; + resc->num_txqs = vf->num_txqs; + resc->num_sbs = vf->num_sbs; + for (i = 0; i < resc->num_sbs; i++) { + resc->hw_sbs[i].hw_sb_id = vf->igu_sbs[i]; + resc->hw_sbs[i].sb_qid = 0; + } + + for (i = 0; i < resc->num_rxqs; i++) { + ecore_fw_l2_queue(p_hwfn, vf->vf_queues[i].fw_rx_qid, + (u16 *)&resc->hw_qid[i]); + resc->cid[i] = vf->vf_queues[i].fw_cid; + } + + resc->num_mac_filters = OSAL_MIN_T(u8, vf->num_mac_filters, + req->resc_request.num_mac_filters); + resc->num_vlan_filters = OSAL_MIN_T(u8, vf->num_vlan_filters, + req->resc_request.num_vlan_filters); + resc->num_mc_filters = OSAL_MIN_T(u8, vf->num_mc_filters, + req->resc_request.num_mc_filters); + + /* Fill agreed size of bulletin board in response, and post + * an initial image to the bulletin board. + */ + resp->bulletin_size = vf->bulletin.size; + ecore_iov_post_vf_bulletin(p_hwfn, vf->relative_vf_id, p_ptt); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] ACQUIRE_RESPONSE: pfdev_info- chip_num=0x%x," + " db_size=%d, idx_per_sb=%d, pf_cap=0x%" PRIx64 "\n" + "resources- n_rxq-%d, n_txq-%d, n_sbs-%d, n_macs-%d," + " n_vlans-%d, n_mcs-%d\n", + vf->abs_vf_id, resp->pfdev_info.chip_num, + resp->pfdev_info.db_size, resp->pfdev_info.indices_per_sb, + resp->pfdev_info.capabilities, resc->num_rxqs, + resc->num_txqs, resc->num_sbs, resc->num_mac_filters, + resc->num_vlan_filters, resc->num_mc_filters); + + vf->state = VF_ACQUIRED; + + /* Prepare Response */ + length = sizeof(struct pfvf_acquire_resp_tlv); + +out: + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_ACQUIRE, + length, vfpf_status); + + /* @@@TBD Bulletin */ +} + +static enum _ecore_status_t +__ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, bool val) +{ + struct ecore_sp_vport_update_params params; + enum _ecore_status_t rc; + + if (val == p_vf->spoof_chk) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Spoofchk value[%d] is already configured\n", val); + return ECORE_SUCCESS; + } + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_sp_vport_update_params)); + params.opaque_fid = p_vf->opaque_fid; + params.vport_id = p_vf->vport_id; + params.update_anti_spoofing_en_flg = 1; + params.anti_spoofing_en = val; + + rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + if (rc == ECORE_SUCCESS) { + p_vf->spoof_chk = val; + p_vf->req_spoofchk_val = p_vf->spoof_chk; + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Spoofchk val[%d] configured\n", val); + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Spoofchk configuration[val:%d] failed for VF[%d]\n", + val, p_vf->relative_vf_id); + } + + return rc; +} + +static enum _ecore_status_t +ecore_iov_reconfigure_unicast_vlan(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_filter_ucast filter; + int i; + + OSAL_MEMSET(&filter, 0, sizeof(filter)); + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + filter.opcode = ECORE_FILTER_ADD; + + /* Reconfigure vlans */ + for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) { + if (p_vf->shadow_config.vlans[i].used) { + filter.type = ECORE_FILTER_VLAN; + filter.vlan = p_vf->shadow_config.vlans[i].vid; + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Reconfig VLAN [0x%04x] for VF [%04x]\n", + filter.vlan, p_vf->relative_vf_id); + rc = ecore_sp_eth_filter_ucast(p_hwfn, + p_vf->opaque_fid, + &filter, + ECORE_SPQ_MODE_CB, + OSAL_NULL); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Failed to configure VLAN [%04x]" + " to VF [%04x]\n", + filter.vlan, p_vf->relative_vf_id); + break; + } + } + } + + return rc; +} + +static enum _ecore_status_t +ecore_iov_reconfigure_unicast_shadow(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, u64 events) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + + /*TODO - what about MACs? */ + + if ((events & (1 << VLAN_ADDR_FORCED)) && + !(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) + rc = ecore_iov_reconfigure_unicast_vlan(p_hwfn, p_vf); + + return rc; +} + +static int ecore_iov_configure_vport_forced(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + u64 events) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_filter_ucast filter; + + if (!p_vf->vport_instance) + return ECORE_INVAL; + + if (events & (1 << MAC_ADDR_FORCED)) { + /* Since there's no way [currently] of removing the MAC, + * we can always assume this means we need to force it. + */ + OSAL_MEMSET(&filter, 0, sizeof(filter)); + filter.type = ECORE_FILTER_MAC; + filter.opcode = ECORE_FILTER_REPLACE; + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + OSAL_MEMCPY(filter.mac, p_vf->bulletin.p_virt->mac, ETH_ALEN); + + rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, + ECORE_SPQ_MODE_CB, OSAL_NULL); + if (rc) { + DP_NOTICE(p_hwfn, true, + "PF failed to configure MAC for VF\n"); + return rc; + } + + p_vf->configured_features |= 1 << MAC_ADDR_FORCED; + } + + if (events & (1 << VLAN_ADDR_FORCED)) { + struct ecore_sp_vport_update_params vport_update; + u8 removal; + int i; + + OSAL_MEMSET(&filter, 0, sizeof(filter)); + filter.type = ECORE_FILTER_VLAN; + filter.is_rx_filter = 1; + filter.is_tx_filter = 1; + filter.vport_to_add_to = p_vf->vport_id; + filter.vlan = p_vf->bulletin.p_virt->pvid; + filter.opcode = filter.vlan ? ECORE_FILTER_REPLACE : + ECORE_FILTER_FLUSH; + + /* Send the ramrod */ + rc = ecore_sp_eth_filter_ucast(p_hwfn, p_vf->opaque_fid, + &filter, + ECORE_SPQ_MODE_CB, OSAL_NULL); + if (rc) { + DP_NOTICE(p_hwfn, true, + "PF failed to configure VLAN for VF\n"); + return rc; + } + + /* Update the default-vlan & silent vlan stripping */ + OSAL_MEMSET(&vport_update, 0, sizeof(vport_update)); + vport_update.opaque_fid = p_vf->opaque_fid; + vport_update.vport_id = p_vf->vport_id; + vport_update.update_default_vlan_enable_flg = 1; + vport_update.default_vlan_enable_flg = filter.vlan ? 1 : 0; + vport_update.update_default_vlan_flg = 1; + vport_update.default_vlan = filter.vlan; + + vport_update.update_inner_vlan_removal_flg = 1; + removal = filter.vlan ? + 1 : p_vf->shadow_config.inner_vlan_removal; + vport_update.inner_vlan_removal_flg = removal; + vport_update.silent_vlan_removal_flg = filter.vlan ? 1 : 0; + rc = ecore_sp_vport_update(p_hwfn, &vport_update, + ECORE_SPQ_MODE_EBLOCK, OSAL_NULL); + if (rc) { + DP_NOTICE(p_hwfn, true, + "PF failed to configure VF vport for vlan\n"); + return rc; + } + + /* Update all the Rx queues */ + for (i = 0; i < ECORE_MAX_VF_CHAINS_PER_PF; i++) { + u16 qid; + + if (!p_vf->vf_queues[i].rxq_active) + continue; + + qid = p_vf->vf_queues[i].fw_rx_qid; + + rc = ecore_sp_eth_rx_queues_update(p_hwfn, qid, + 1, 0, 1, + ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + if (rc) { + DP_NOTICE(p_hwfn, true, + "Failed to send Rx update" + " queue[0x%04x]\n", + qid); + return rc; + } + } + + if (filter.vlan) + p_vf->configured_features |= 1 << VLAN_ADDR_FORCED; + else + p_vf->configured_features &= ~(1 << VLAN_ADDR_FORCED); + } + + /* If forced features are terminated, we need to configure the shadow + * configuration back again. + */ + if (events) + ecore_iov_reconfigure_unicast_shadow(p_hwfn, p_vf, events); + + return rc; +} + +static void ecore_iov_vf_mbx_start_vport(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_vport_start_tlv *start = &mbx->req_virt->start_vport; + struct ecore_sp_vport_start_params params = { 0 }; + u8 status = PFVF_STATUS_SUCCESS; + struct ecore_vf_info *vf_info; + enum _ecore_status_t rc; + u64 *p_bitmap; + int sb_id; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vf->relative_vf_id, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Failed to get VF info, invalid vfid [%d]\n", + vf->relative_vf_id); + return; + } + + vf->state = VF_ENABLED; + + /* Initialize Status block in CAU */ + for (sb_id = 0; sb_id < vf->num_sbs; sb_id++) { + if (!start->sb_addr[sb_id]) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] did not fill the address of SB %d\n", + vf->relative_vf_id, sb_id); + break; + } + + ecore_int_cau_conf_sb(p_hwfn, p_ptt, + start->sb_addr[sb_id], + vf->igu_sbs[sb_id], + vf->abs_vf_id, 1 /* VF Valid */); + } + ecore_iov_enable_vf_traffic(p_hwfn, p_ptt, vf); + + vf->mtu = start->mtu; + vf->shadow_config.inner_vlan_removal = start->inner_vlan_removal; + + /* Take into consideration configuration forced by hypervisor; + * If none is configured, use the supplied VF values [for old + * vfs that would still be fine, since they passed '0' as padding]. + */ + p_bitmap = &vf_info->bulletin.p_virt->valid_bitmap; + if (!(*p_bitmap & (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED))) { + u8 vf_req = start->only_untagged; + + vf_info->bulletin.p_virt->default_only_untagged = vf_req; + *p_bitmap |= 1 << VFPF_BULLETIN_UNTAGGED_DEFAULT; + } + + params.tpa_mode = start->tpa_mode; + params.remove_inner_vlan = start->inner_vlan_removal; + params.tx_switching = true; + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, false, + "FPGA: Don't confi VF for Tx-switching [no pVFC]\n"); + params.tx_switching = false; + } +#endif + + params.only_untagged = vf_info->bulletin.p_virt->default_only_untagged; + params.drop_ttl0 = false; + params.concrete_fid = vf->concrete_fid; + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + params.max_buffers_per_cqe = start->max_buffers_per_cqe; + params.mtu = vf->mtu; + + rc = ecore_sp_eth_vport_start(p_hwfn, ¶ms); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, + "ecore_iov_vf_mbx_start_vport returned error %d\n", rc); + status = PFVF_STATUS_FAILURE; + } else { + vf->vport_instance++; + + /* Force configuration if needed on the newly opened vport */ + ecore_iov_configure_vport_forced(p_hwfn, vf, *p_bitmap); + OSAL_IOV_POST_START_VPORT(p_hwfn, vf->relative_vf_id, + vf->vport_id, vf->opaque_fid); + __ecore_iov_spoofchk_set(p_hwfn, vf, vf->req_spoofchk_val); + } + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_START, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void ecore_iov_vf_mbx_stop_vport(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + u8 status = PFVF_STATUS_SUCCESS; + enum _ecore_status_t rc; + + vf->vport_instance--; + vf->spoof_chk = false; + + rc = ecore_sp_vport_stop(p_hwfn, vf->opaque_fid, vf->vport_id); + if (rc != ECORE_SUCCESS) { + DP_ERR(p_hwfn, + "ecore_iov_vf_mbx_stop_vport returned error %d\n", rc); + status = PFVF_STATUS_FAILURE; + } + + /* Forget the configuration on the vport */ + vf->configured_features = 0; + OSAL_MEMSET(&vf->shadow_config, 0, sizeof(vf->shadow_config)); + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_VPORT_TEARDOWN, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void ecore_iov_vf_mbx_start_rxq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_start_rxq_tlv *req = &mbx->req_virt->start_rxq; + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + enum _ecore_status_t rc; + + rc = ecore_sp_eth_rxq_start_ramrod(p_hwfn, vf->opaque_fid, + vf->vf_queues[req->rx_qid].fw_cid, + vf->vf_queues[req->rx_qid].fw_rx_qid, + vf->vport_id, + vf->abs_vf_id + 0x10, + req->hw_sb, + req->sb_index, + req->bd_max_bytes, + req->rxq_addr, + req->cqe_pbl_addr, + req->cqe_pbl_size); + + if (rc) { + status = PFVF_STATUS_FAILURE; + } else { + vf->vf_queues[req->rx_qid].rxq_active = true; + vf->num_active_rxqs++; + } + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_RXQ, + length, status); +} + +static void ecore_iov_vf_mbx_start_txq(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_start_txq_tlv *req = &mbx->req_virt->start_txq; + u16 length = sizeof(struct pfvf_def_resp_tlv); + union ecore_qm_pq_params pq_params; + u8 status = PFVF_STATUS_SUCCESS; + enum _ecore_status_t rc; + + /* Prepare the parameters which would choose the right PQ */ + OSAL_MEMSET(&pq_params, 0, sizeof(pq_params)); + pq_params.eth.is_vf = 1; + pq_params.eth.vf_id = vf->relative_vf_id; + + rc = ecore_sp_eth_txq_start_ramrod(p_hwfn, + vf->opaque_fid, + vf->vf_queues[req->tx_qid].fw_tx_qid, + vf->vf_queues[req->tx_qid].fw_cid, + vf->vport_id, + vf->abs_vf_id + 0x10, + req->hw_sb, + req->sb_index, + req->pbl_addr, + req->pbl_size, &pq_params); + + if (rc) + status = PFVF_STATUS_FAILURE; + else + vf->vf_queues[req->tx_qid].txq_active = true; + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_START_TXQ, + length, status); +} + +static enum _ecore_status_t ecore_iov_vf_stop_rxqs(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, + u16 rxq_id, + u8 num_rxqs, + bool cqe_completion) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + int qid; + + if (rxq_id + num_rxqs > OSAL_ARRAY_SIZE(vf->vf_queues)) + return ECORE_INVAL; + + for (qid = rxq_id; qid < rxq_id + num_rxqs; qid++) { + if (vf->vf_queues[qid].rxq_active) { + rc = ecore_sp_eth_rx_queue_stop(p_hwfn, + vf->vf_queues[qid]. + fw_rx_qid, false, + cqe_completion); + + if (rc) + return rc; + } + vf->vf_queues[qid].rxq_active = false; + vf->num_active_rxqs--; + } + + return rc; +} + +static enum _ecore_status_t ecore_iov_vf_stop_txqs(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, + u16 txq_id, u8 num_txqs) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + int qid; + + if (txq_id + num_txqs > OSAL_ARRAY_SIZE(vf->vf_queues)) + return ECORE_INVAL; + + for (qid = txq_id; qid < txq_id + num_txqs; qid++) { + if (vf->vf_queues[qid].txq_active) { + rc = ecore_sp_eth_tx_queue_stop(p_hwfn, + vf->vf_queues[qid]. + fw_tx_qid); + + if (rc) + return rc; + } + vf->vf_queues[qid].txq_active = false; + } + return rc; +} + +static void ecore_iov_vf_mbx_stop_rxqs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_stop_rxqs_tlv *req = &mbx->req_virt->stop_rxqs; + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + enum _ecore_status_t rc; + + /* We give the option of starting from qid != 0, in this case we + * need to make sure that qid + num_qs doesn't exceed the actual + * amount of queues that exist. + */ + rc = ecore_iov_vf_stop_rxqs(p_hwfn, vf, req->rx_qid, + req->num_rxqs, req->cqe_completion); + if (rc) + status = PFVF_STATUS_FAILURE; + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_RXQS, + length, status); +} + +static void ecore_iov_vf_mbx_stop_txqs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_stop_txqs_tlv *req = &mbx->req_virt->stop_txqs; + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + enum _ecore_status_t rc; + + /* We give the option of starting from qid != 0, in this case we + * need to make sure that qid + num_qs doesn't exceed the actual + * amount of queues that exist. + */ + rc = ecore_iov_vf_stop_txqs(p_hwfn, vf, req->tx_qid, req->num_txqs); + if (rc) + status = PFVF_STATUS_FAILURE; + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_STOP_TXQS, + length, status); +} + +static void ecore_iov_vf_mbx_update_rxqs(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_update_rxq_tlv *req = &mbx->req_virt->update_rxq; + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + u8 complete_event_flg; + u8 complete_cqe_flg; + enum _ecore_status_t rc; + u16 qid; + u8 i; + + complete_cqe_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_CQE_FLAG); + complete_event_flg = !!(req->flags & VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG); + + for (i = 0; i < req->num_rxqs; i++) { + qid = req->rx_qid + i; + + if (!vf->vf_queues[qid].rxq_active) { + DP_NOTICE(p_hwfn, true, + "VF rx_qid = %d isn`t active!\n", qid); + status = PFVF_STATUS_FAILURE; + break; + } + + rc = ecore_sp_eth_rx_queues_update(p_hwfn, + vf->vf_queues[qid].fw_rx_qid, + 1, + complete_cqe_flg, + complete_event_flg, + ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + + if (rc) { + status = PFVF_STATUS_FAILURE; + break; + } + } + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UPDATE_RXQ, + length, status); +} + +void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, + void *p_tlvs_list, u16 req_type) +{ + struct channel_tlv *p_tlv = (struct channel_tlv *)p_tlvs_list; + int len = 0; + + do { + if (!p_tlv->length) { + DP_NOTICE(p_hwfn, true, "Zero length TLV found\n"); + return OSAL_NULL; + } + + if (p_tlv->type == req_type) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Extended tlv type %s, length %d found\n", + ecore_channel_tlvs_string[p_tlv->type], + p_tlv->length); + return p_tlv; + } + + len += p_tlv->length; + p_tlv = (struct channel_tlv *)((u8 *)p_tlv + p_tlv->length); + + if ((len + p_tlv->length) > TLV_BUFFER_SIZE) { + DP_NOTICE(p_hwfn, true, + "TLVs has overrun the buffer size\n"); + return OSAL_NULL; + } + } while (p_tlv->type != CHANNEL_TLV_LIST_END); + + return OSAL_NULL; +} + +static void +ecore_iov_vp_update_act_param(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_activate_tlv *p_act_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + + p_act_tlv = (struct vfpf_vport_update_activate_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (p_act_tlv) { + p_data->update_vport_active_rx_flg = p_act_tlv->update_rx; + p_data->vport_active_rx_flg = p_act_tlv->active_rx; + p_data->update_vport_active_tx_flg = p_act_tlv->update_tx; + p_data->vport_active_tx_flg = p_act_tlv->active_tx; + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACTIVATE; + } +} + +static void +ecore_iov_vp_update_vlan_param(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_vf_info *p_vf, + struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + + p_vlan_tlv = (struct vfpf_vport_update_vlan_strip_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (!p_vlan_tlv) + return; + + p_vf->shadow_config.inner_vlan_removal = p_vlan_tlv->remove_vlan; + + /* Ignore the VF request if we're forcing a vlan */ + if (!(p_vf->configured_features & (1 << VLAN_ADDR_FORCED))) { + p_data->update_inner_vlan_removal_flg = 1; + p_data->inner_vlan_removal_flg = p_vlan_tlv->remove_vlan; + } + + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_VLAN_STRIP; +} + +static void +ecore_iov_vp_update_tx_switch(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + + p_tx_switch_tlv = (struct vfpf_vport_update_tx_switch_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + +#ifndef ASIC_ONLY + if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) { + DP_NOTICE(p_hwfn, false, + "FPGA: Ignore tx-switching configuration originating from VFs\n"); + return; + } +#endif + + if (p_tx_switch_tlv) { + p_data->update_tx_switching_flg = 1; + p_data->tx_switching_flg = p_tx_switch_tlv->tx_switching; + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_TX_SWITCH; + } +} + +static void +ecore_iov_vp_update_mcast_bin_param(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_iov_vf_mbx *p_mbx, + u16 *tlvs_mask) +{ + struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; + + p_mcast_tlv = (struct vfpf_vport_update_mcast_bin_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + + if (p_mcast_tlv) { + p_data->update_approx_mcast_flg = 1; + OSAL_MEMCPY(p_data->bins, p_mcast_tlv->bins, + sizeof(unsigned long) * + ETH_MULTICAST_MAC_BINS_IN_REGS); + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_MCAST; + } +} + +static void +ecore_iov_vp_update_accept_flag(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + + p_accept_tlv = (struct vfpf_vport_update_accept_param_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + + if (p_accept_tlv) { + p_data->accept_flags.update_rx_mode_config = + p_accept_tlv->update_rx_mode; + p_data->accept_flags.rx_accept_filter = + p_accept_tlv->rx_accept_filter; + p_data->accept_flags.update_tx_mode_config = + p_accept_tlv->update_tx_mode; + p_data->accept_flags.tx_accept_filter = + p_accept_tlv->tx_accept_filter; + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_PARAM; + } +} + +static void +ecore_iov_vp_update_accept_any_vlan(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data, + struct ecore_iov_vf_mbx *p_mbx, + u16 *tlvs_mask) +{ + struct vfpf_vport_update_accept_any_vlan_tlv *p_accept_any_vlan; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + + p_accept_any_vlan = (struct vfpf_vport_update_accept_any_vlan_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + + if (p_accept_any_vlan) { + p_data->accept_any_vlan = p_accept_any_vlan->accept_any_vlan; + p_data->update_accept_any_vlan_flg = + p_accept_any_vlan->update_accept_any_vlan_flg; + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_ACCEPT_ANY_VLAN; + } +} + +static void +ecore_iov_vp_update_rss_param(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, + struct ecore_sp_vport_update_params *p_data, + struct ecore_rss_params *p_rss, + struct ecore_iov_vf_mbx *p_mbx, u16 *tlvs_mask) +{ + struct vfpf_vport_update_rss_tlv *p_rss_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; + u16 table_size; + u16 i, q_idx, max_q_idx; + + p_rss_tlv = (struct vfpf_vport_update_rss_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + if (p_rss_tlv) { + OSAL_MEMSET(p_rss, 0, sizeof(struct ecore_rss_params)); + + p_rss->update_rss_config = + !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_CONFIG_FLAG); + p_rss->update_rss_capabilities = + !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_CAPS_FLAG); + p_rss->update_rss_ind_table = + !!(p_rss_tlv->update_rss_flags & + VFPF_UPDATE_RSS_IND_TABLE_FLAG); + p_rss->update_rss_key = + !!(p_rss_tlv->update_rss_flags & VFPF_UPDATE_RSS_KEY_FLAG); + + p_rss->rss_enable = p_rss_tlv->rss_enable; + p_rss->rss_eng_id = vf->relative_vf_id + 1; + p_rss->rss_caps = p_rss_tlv->rss_caps; + p_rss->rss_table_size_log = p_rss_tlv->rss_table_size_log; + OSAL_MEMCPY(p_rss->rss_ind_table, p_rss_tlv->rss_ind_table, + sizeof(p_rss->rss_ind_table)); + OSAL_MEMCPY(p_rss->rss_key, p_rss_tlv->rss_key, + sizeof(p_rss->rss_key)); + + table_size = OSAL_MIN_T(u16, + OSAL_ARRAY_SIZE(p_rss->rss_ind_table), + (1 << p_rss_tlv->rss_table_size_log)); + + max_q_idx = OSAL_ARRAY_SIZE(vf->vf_queues); + + for (i = 0; i < table_size; i++) { + q_idx = p_rss->rss_ind_table[i]; + if (q_idx >= max_q_idx) { + DP_NOTICE(p_hwfn, true, + "rss_ind_table[%d] = %d, rxq is out of range\n", + i, q_idx); + /* TBD: fail the request mark VF as malicious */ + p_rss->rss_ind_table[i] = + vf->vf_queues[0].fw_rx_qid; + } else if (!vf->vf_queues[q_idx].rxq_active) { + DP_NOTICE(p_hwfn, true, + "rss_ind_table[%d] = %d, rxq is not active\n", + i, q_idx); + /* TBD: fail the request mark VF as malicious */ + p_rss->rss_ind_table[i] = + vf->vf_queues[0].fw_rx_qid; + } else { + p_rss->rss_ind_table[i] = + vf->vf_queues[q_idx].fw_rx_qid; + } + } + + p_data->rss_params = p_rss; + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_RSS; + } else { + p_data->rss_params = OSAL_NULL; + } +} + +static void +ecore_iov_vp_update_sge_tpa_param(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *vf, + struct ecore_sp_vport_update_params *p_data, + struct ecore_sge_tpa_params *p_sge_tpa, + struct ecore_iov_vf_mbx *p_mbx, + u16 *tlvs_mask) +{ + struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; + u16 tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; + + p_sge_tpa_tlv = (struct vfpf_vport_update_sge_tpa_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_mbx->req_virt, tlv); + + if (!p_sge_tpa_tlv) { + p_data->sge_tpa_params = OSAL_NULL; + return; + } + + OSAL_MEMSET(p_sge_tpa, 0, sizeof(struct ecore_sge_tpa_params)); + + p_sge_tpa->update_tpa_en_flg = + !!(p_sge_tpa_tlv->update_sge_tpa_flags & VFPF_UPDATE_TPA_EN_FLAG); + p_sge_tpa->update_tpa_param_flg = + !!(p_sge_tpa_tlv->update_sge_tpa_flags & + VFPF_UPDATE_TPA_PARAM_FLAG); + + p_sge_tpa->tpa_ipv4_en_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV4_EN_FLAG); + p_sge_tpa->tpa_ipv6_en_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_IPV6_EN_FLAG); + p_sge_tpa->tpa_pkt_split_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_PKT_SPLIT_FLAG); + p_sge_tpa->tpa_hdr_data_split_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_HDR_DATA_SPLIT_FLAG); + p_sge_tpa->tpa_gro_consistent_flg = + !!(p_sge_tpa_tlv->sge_tpa_flags & VFPF_TPA_GRO_CONSIST_FLAG); + + p_sge_tpa->tpa_max_aggs_num = p_sge_tpa_tlv->tpa_max_aggs_num; + p_sge_tpa->tpa_max_size = p_sge_tpa_tlv->tpa_max_size; + p_sge_tpa->tpa_min_size_to_start = p_sge_tpa_tlv->tpa_min_size_to_start; + p_sge_tpa->tpa_min_size_to_cont = p_sge_tpa_tlv->tpa_min_size_to_cont; + p_sge_tpa->max_buffers_per_cqe = p_sge_tpa_tlv->max_buffers_per_cqe; + + p_data->sge_tpa_params = p_sge_tpa; + + *tlvs_mask |= 1 << ECORE_IOV_VP_UPDATE_SGE_TPA; +} + +static void ecore_iov_vf_mbx_vport_update(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_sp_vport_update_params params; + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct ecore_sge_tpa_params sge_tpa_params; + struct ecore_rss_params rss_params; + u8 status = PFVF_STATUS_SUCCESS; + enum _ecore_status_t rc; + u16 tlvs_mask = 0, tlvs_accepted; + u16 length; + + OSAL_MEMSET(¶ms, 0, sizeof(params)); + params.opaque_fid = vf->opaque_fid; + params.vport_id = vf->vport_id; + params.rss_params = OSAL_NULL; + + /* Search for extended tlvs list and update values + * from VF in struct ecore_sp_vport_update_params. + */ + ecore_iov_vp_update_act_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + ecore_iov_vp_update_vlan_param(p_hwfn, ¶ms, vf, mbx, &tlvs_mask); + ecore_iov_vp_update_tx_switch(p_hwfn, ¶ms, mbx, &tlvs_mask); + ecore_iov_vp_update_mcast_bin_param(p_hwfn, ¶ms, mbx, &tlvs_mask); + ecore_iov_vp_update_accept_flag(p_hwfn, ¶ms, mbx, &tlvs_mask); + ecore_iov_vp_update_rss_param(p_hwfn, vf, ¶ms, &rss_params, + mbx, &tlvs_mask); + ecore_iov_vp_update_accept_any_vlan(p_hwfn, ¶ms, mbx, &tlvs_mask); + ecore_iov_vp_update_sge_tpa_param(p_hwfn, vf, ¶ms, + &sge_tpa_params, mbx, &tlvs_mask); + + /* Just log a message if there is no single extended tlv in buffer. + * When all features of vport update ramrod would be requested by VF + * as extended TLVs in buffer then an error can be returned in response + * if there is no extended TLV present in buffer. + */ + tlvs_accepted = tlvs_mask; + +#ifndef __EXTRACT__LINUX__ + if (OSAL_IOV_VF_VPORT_UPDATE(p_hwfn, vf->relative_vf_id, + ¶ms, &tlvs_accepted) != + ECORE_SUCCESS) { + tlvs_accepted = 0; + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } +#endif + + if (!tlvs_accepted) { + if (tlvs_mask) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Upper-layer prevents said VF configuration\n"); + else + DP_NOTICE(p_hwfn, true, + "No feature tlvs found for vport update\n"); + status = PFVF_STATUS_NOT_SUPPORTED; + goto out; + } + + rc = ecore_sp_vport_update(p_hwfn, ¶ms, ECORE_SPQ_MODE_EBLOCK, + OSAL_NULL); + + if (rc) + status = PFVF_STATUS_FAILURE; + +out: + length = ecore_iov_prep_vp_update_resp_tlvs(p_hwfn, vf, mbx, status, + tlvs_mask, tlvs_accepted); + ecore_iov_send_response(p_hwfn, p_ptt, vf, length, status); +} + +static enum _ecore_status_t +ecore_iov_vf_update_unicast_shadow(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + struct ecore_filter_ucast *p_params) +{ + int i; + + /* TODO - do we need a MAC shadow registery? */ + if (p_params->type == ECORE_FILTER_MAC) + return ECORE_SUCCESS; + + /* First remove entries and then add new ones */ + if (p_params->opcode == ECORE_FILTER_REMOVE) { + for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + if (p_vf->shadow_config.vlans[i].used && + p_vf->shadow_config.vlans[i].vid == + p_params->vlan) { + p_vf->shadow_config.vlans[i].used = false; + break; + } + if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF [%d] - Tries to remove a non-existing vlan\n", + p_vf->relative_vf_id); + return ECORE_INVAL; + } + } else if (p_params->opcode == ECORE_FILTER_REPLACE || + p_params->opcode == ECORE_FILTER_FLUSH) { + for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + p_vf->shadow_config.vlans[i].used = false; + } + + /* In forced mode, we're willing to remove entries - but we don't add + * new ones. + */ + if (p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED)) + return ECORE_SUCCESS; + + if (p_params->opcode == ECORE_FILTER_ADD || + p_params->opcode == ECORE_FILTER_REPLACE) { + for (i = 0; i < ECORE_ETH_VF_NUM_VLAN_FILTERS + 1; i++) + if (!p_vf->shadow_config.vlans[i].used) { + p_vf->shadow_config.vlans[i].used = true; + p_vf->shadow_config.vlans[i].vid = + p_params->vlan; + break; + } + if (i == ECORE_ETH_VF_NUM_VLAN_FILTERS + 1) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF [%d] - Tries to configure more than %d vlan filters\n", + p_vf->relative_vf_id, + ECORE_ETH_VF_NUM_VLAN_FILTERS + 1); + return ECORE_INVAL; + } + } + + return ECORE_SUCCESS; +} + +static void ecore_iov_vf_mbx_ucast_filter(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + struct ecore_iov_vf_mbx *mbx = &vf->vf_mbx; + struct vfpf_ucast_filter_tlv *req = &mbx->req_virt->ucast_filter; + struct ecore_bulletin_content *p_bulletin = vf->bulletin.p_virt; + struct ecore_filter_ucast params; + u8 status = PFVF_STATUS_SUCCESS; + enum _ecore_status_t rc; + + /* Prepare the unicast filter params */ + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_filter_ucast)); + params.opcode = (enum ecore_filter_opcode)req->opcode; + params.type = (enum ecore_filter_ucast_type)req->type; + + /* @@@TBD - We might need logic on HV side in determining this */ + params.is_rx_filter = 1; + params.is_tx_filter = 1; + params.vport_to_remove_from = vf->vport_id; + params.vport_to_add_to = vf->vport_id; + OSAL_MEMCPY(params.mac, req->mac, ETH_ALEN); + params.vlan = req->vlan; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d]: opcode 0x%02x type 0x%02x [%s %s] [vport 0x%02x] MAC %02x:%02x:%02x:%02x:%02x:%02x, vlan 0x%04x\n", + vf->abs_vf_id, params.opcode, params.type, + params.is_rx_filter ? "RX" : "", + params.is_tx_filter ? "TX" : "", + params.vport_to_add_to, + params.mac[0], params.mac[1], params.mac[2], + params.mac[3], params.mac[4], params.mac[5], params.vlan); + + if (!vf->vport_instance) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "No VPORT instance available for VF[%d], failing ucast MAC configuration\n", + vf->abs_vf_id); + status = PFVF_STATUS_FAILURE; + goto out; + } + + /* Update shadow copy of the VF configuration */ + if (ecore_iov_vf_update_unicast_shadow(p_hwfn, vf, ¶ms) != + ECORE_SUCCESS) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + /* Determine if the unicast filtering is acceptible by PF */ + if ((p_bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED)) && + (params.type == ECORE_FILTER_VLAN || + params.type == ECORE_FILTER_MAC_VLAN)) { + /* Once VLAN is forced or PVID is set, do not allow + * to add/replace any further VLANs. + */ + if (params.opcode == ECORE_FILTER_ADD || + params.opcode == ECORE_FILTER_REPLACE) + status = PFVF_STATUS_FORCED; + goto out; + } + + if ((p_bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) && + (params.type == ECORE_FILTER_MAC || + params.type == ECORE_FILTER_MAC_VLAN)) { + if (OSAL_MEMCMP(p_bulletin->mac, params.mac, ETH_ALEN) || + (params.opcode != ECORE_FILTER_ADD && + params.opcode != ECORE_FILTER_REPLACE)) + status = PFVF_STATUS_FORCED; + goto out; + } + + rc = OSAL_IOV_CHK_UCAST(p_hwfn, vf->relative_vf_id, ¶ms); + if (rc == ECORE_EXISTS) { + goto out; + } else if (rc == ECORE_INVAL) { + status = PFVF_STATUS_FAILURE; + goto out; + } + + rc = ecore_sp_eth_filter_ucast(p_hwfn, vf->opaque_fid, ¶ms, + ECORE_SPQ_MODE_CB, OSAL_NULL); + if (rc) + status = PFVF_STATUS_FAILURE; + +out: + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_UCAST_FILTER, + sizeof(struct pfvf_def_resp_tlv), status); +} + +static void ecore_iov_vf_mbx_int_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + int i; + + /* Reset the SBs */ + for (i = 0; i < vf->num_sbs; i++) + ecore_int_igu_init_pure_rt_single(p_hwfn, p_ptt, + vf->igu_sbs[i], + vf->opaque_fid, false); + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_INT_CLEANUP, + sizeof(struct pfvf_def_resp_tlv), + PFVF_STATUS_SUCCESS); +} + +static void ecore_iov_vf_mbx_close(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + u8 status = PFVF_STATUS_SUCCESS; + + /* Disable Interrupts for VF */ + ecore_iov_vf_igu_set_int(p_hwfn, p_ptt, vf, 0 /* disable */); + + /* Reset Permission table */ + ecore_iov_config_perm_table(p_hwfn, p_ptt, vf, 0 /* disable */); + + ecore_iov_prepare_resp(p_hwfn, p_ptt, vf, CHANNEL_TLV_CLOSE, + length, status); +} + +static void ecore_iov_vf_mbx_release(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + struct ecore_vf_info *p_vf) +{ + u16 length = sizeof(struct pfvf_def_resp_tlv); + + ecore_iov_vf_cleanup(p_hwfn, p_vf); + + ecore_iov_prepare_resp(p_hwfn, p_ptt, p_vf, CHANNEL_TLV_RELEASE, + length, PFVF_STATUS_SUCCESS); +} + +static enum _ecore_status_t +ecore_iov_vf_flr_poll_dorq(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt) +{ + int cnt; + u32 val; + + ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_vf->concrete_fid); + + for (cnt = 0; cnt < 50; cnt++) { + val = ecore_rd(p_hwfn, p_ptt, DORQ_REG_VF_USAGE_CNT); + if (!val) + break; + OSAL_MSLEEP(20); + } + ecore_fid_pretend(p_hwfn, p_ptt, (u16)p_hwfn->hw_info.concrete_fid); + + if (cnt == 50) { + DP_ERR(p_hwfn, + "VF[%d] - dorq failed to cleanup [usage 0x%08x]\n", + p_vf->abs_vf_id, val); + return ECORE_TIMEOUT; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_iov_vf_flr_poll_pbf(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt) +{ + u32 cons[MAX_NUM_VOQS], distance[MAX_NUM_VOQS]; + int i, cnt; + + /* Read initial consumers & producers */ + for (i = 0; i < MAX_NUM_VOQS; i++) { + u32 prod; + + cons[i] = ecore_rd(p_hwfn, p_ptt, + PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + + i * 0x40); + prod = ecore_rd(p_hwfn, p_ptt, + PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 + + i * 0x40); + distance[i] = prod - cons[i]; + } + + /* Wait for consumers to pass the producers */ + i = 0; + for (cnt = 0; cnt < 50; cnt++) { + for (; i < MAX_NUM_VOQS; i++) { + u32 tmp; + + tmp = ecore_rd(p_hwfn, p_ptt, + PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 + + i * 0x40); + if (distance[i] > tmp - cons[i]) + break; + } + + if (i == MAX_NUM_VOQS) + break; + + OSAL_MSLEEP(20); + } + + if (cnt == 50) { + DP_ERR(p_hwfn, "VF[%d] - pbf polling failed on VOQ %d\n", + p_vf->abs_vf_id, i); + return ECORE_TIMEOUT; + } + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_iov_vf_flr_poll_prs(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, struct ecore_ptt *p_ptt) +{ + u16 tc_cons[NUM_OF_TCS], tc_lb_cons[NUM_OF_TCS]; + u16 prod[NUM_OF_TCS]; + int i, cnt; + + /* Read initial consumers & producers */ + for (i = 0; i < NUM_OF_TCS; i++) { + tc_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt, + PRS_REG_MSG_CT_MAIN_0 + i * 0x4); + tc_lb_cons[i] = (u16)ecore_rd(p_hwfn, p_ptt, + PRS_REG_MSG_CT_LB_0 + i * 0x4); + prod[i] = (u16)ecore_rd(p_hwfn, p_ptt, + BRB_REG_PER_TC_COUNTERS + + p_hwfn->port_id * 0x20 + i * 0x4); + } + + /* Wait for consumers to pass the producers */ + i = 0; + for (cnt = 0; cnt < 50; cnt++) { + for (; i < NUM_OF_TCS; i++) { + u16 cons; + + cons = (u16)ecore_rd(p_hwfn, p_ptt, + PRS_REG_MSG_CT_MAIN_0 + i * 0x4); + if (prod[i] - tc_cons[i] > cons - tc_cons[i]) + break; + + cons = (u16)ecore_rd(p_hwfn, p_ptt, + PRS_REG_MSG_CT_LB_0 + i * 0x4); + if (prod[i] - tc_lb_cons[i] > cons - tc_lb_cons[i]) + break; + } + + if (i == NUM_OF_TCS) + break; + + /* 16-bit counters; Delay instead of sleep... */ + OSAL_UDELAY(10); + } + + /* This is only optional polling for BB, since registers are only + * 16-bit wide and guarantee is not good enough. Don't fail things + * if polling didn't return the expected results. + */ + if (cnt == 50) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - prs polling failed on TC %d\n", + p_vf->abs_vf_id, i); + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t ecore_iov_vf_flr_poll(struct ecore_hwfn *p_hwfn, + struct ecore_vf_info *p_vf, + struct ecore_ptt *p_ptt) +{ + enum _ecore_status_t rc; + + /* TODO - add SRC and TM polling once we add storage IOV */ + + rc = ecore_iov_vf_flr_poll_dorq(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + rc = ecore_iov_vf_flr_poll_pbf(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + rc = ecore_iov_vf_flr_poll_prs(p_hwfn, p_vf, p_ptt); + if (rc) + return rc; + + return ECORE_SUCCESS; +} + +static enum _ecore_status_t +ecore_iov_execute_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + u16 rel_vf_id, u32 *ack_vfs) +{ + enum _ecore_status_t rc = ECORE_SUCCESS; + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, false); + if (!p_vf) + return ECORE_SUCCESS; + + if (p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & + (1ULL << (rel_vf_id % 64))) { + u16 vfid = p_vf->abs_vf_id; + + /* TODO - should we lock channel? */ + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] - Handling FLR\n", vfid); + + ecore_iov_vf_cleanup(p_hwfn, p_vf); + + /* If VF isn't active, no need for anything but SW */ + if (!ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, p_vf->relative_vf_id)) + goto cleanup; + + /* TODO - what to do in case of failure? */ + rc = ecore_iov_vf_flr_poll(p_hwfn, p_vf, p_ptt); + if (rc != ECORE_SUCCESS) + goto cleanup; + + rc = ecore_final_cleanup(p_hwfn, p_ptt, vfid, true); + if (rc) { + /* TODO - what's now? What a mess.... */ + DP_ERR(p_hwfn, "Failed handle FLR of VF[%d]\n", vfid); + return rc; + } + + /* VF_STOPPED has to be set only after final cleanup + * but prior to re-enabling the VF. + */ + p_vf->state = VF_STOPPED; + + rc = ecore_iov_enable_vf_access(p_hwfn, p_ptt, p_vf); + if (rc) { + /* TODO - again, a mess... */ + DP_ERR(p_hwfn, "Failed to re-enable VF[%d] acces\n", + vfid); + return rc; + } +cleanup: + /* Mark VF for ack and clean pending state */ + if (p_vf->state == VF_RESET) + p_vf->state = VF_STOPPED; + ack_vfs[vfid / 32] |= (1 << (vfid % 32)); + p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] &= + ~(1ULL << (rel_vf_id % 64)); + p_hwfn->pf_iov_info->pending_events[rel_vf_id / 64] &= + ~(1ULL << (rel_vf_id % 64)); + } + + return rc; +} + +enum _ecore_status_t ecore_iov_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ + u32 ack_vfs[VF_MAX_STATIC / 32]; + enum _ecore_status_t rc = ECORE_SUCCESS; + u16 i; + + OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); + + for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++) + ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, i, ack_vfs); + + rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); + return rc; +} + +enum _ecore_status_t +ecore_iov_single_vf_flr_cleanup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, u16 rel_vf_id) +{ + u32 ack_vfs[VF_MAX_STATIC / 32]; + enum _ecore_status_t rc = ECORE_SUCCESS; + + OSAL_MEMSET(ack_vfs, 0, sizeof(u32) * (VF_MAX_STATIC / 32)); + + ecore_iov_execute_vf_flr_cleanup(p_hwfn, p_ptt, rel_vf_id, ack_vfs); + + rc = ecore_mcp_ack_vf_flr(p_hwfn, p_ptt, ack_vfs); + return rc; +} + +int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *p_disabled_vfs) +{ + u16 i, found = 0; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, "Marking FLR-ed VFs\n"); + for (i = 0; i < (VF_MAX_STATIC / 32); i++) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "[%08x,...,%08x]: %08x\n", + i * 32, (i + 1) * 32 - 1, p_disabled_vfs[i]); + + /* Mark VFs */ + for (i = 0; i < p_hwfn->p_dev->sriov_info.total_vfs; i++) { + struct ecore_vf_info *p_vf; + u8 vfid; + + p_vf = ecore_iov_get_vf_info(p_hwfn, i, false); + if (!p_vf) + continue; + + vfid = p_vf->abs_vf_id; + if ((1 << (vfid % 32)) & p_disabled_vfs[vfid / 32]) { + u64 *p_flr = p_hwfn->pf_iov_info->pending_flr; + u16 rel_vf_id = p_vf->relative_vf_id; + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF[%d] [rel %d] got FLR-ed\n", + vfid, rel_vf_id); + + p_vf->state = VF_RESET; + + /* No need to lock here, since pending_flr should + * only change here and before ACKing MFw. Since + * MFW will not trigger an additional attention for + * VF flr until ACKs, we're safe. + */ + p_flr[rel_vf_id / 64] |= 1ULL << (rel_vf_id % 64); + found = 1; + } + } + + return found; +} + +void ecore_iov_set_link(struct ecore_hwfn *p_hwfn, + u16 vfid, + struct ecore_mcp_link_params *params, + struct ecore_mcp_link_state *link, + struct ecore_mcp_link_capabilities *p_caps) +{ + struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false); + struct ecore_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + p_bulletin->req_autoneg = params->speed.autoneg; + p_bulletin->req_adv_speed = params->speed.advertised_speeds; + p_bulletin->req_forced_speed = params->speed.forced_speed; + p_bulletin->req_autoneg_pause = params->pause.autoneg; + p_bulletin->req_forced_rx = params->pause.forced_rx; + p_bulletin->req_forced_tx = params->pause.forced_tx; + p_bulletin->req_loopback = params->loopback_mode; + + p_bulletin->link_up = link->link_up; + p_bulletin->speed = link->speed; + p_bulletin->full_duplex = link->full_duplex; + p_bulletin->autoneg = link->an; + p_bulletin->autoneg_complete = link->an_complete; + p_bulletin->parallel_detection = link->parallel_detection; + p_bulletin->pfc_enabled = link->pfc_enabled; + p_bulletin->partner_adv_speed = link->partner_adv_speed; + p_bulletin->partner_tx_flow_ctrl_en = link->partner_tx_flow_ctrl_en; + p_bulletin->partner_rx_flow_ctrl_en = link->partner_rx_flow_ctrl_en; + p_bulletin->partner_adv_pause = link->partner_adv_pause; + p_bulletin->sfp_tx_fault = link->sfp_tx_fault; + + p_bulletin->capability_speed = p_caps->speed_capabilities; +} + +void ecore_iov_get_link(struct ecore_hwfn *p_hwfn, + u16 vfid, + struct ecore_mcp_link_params *p_params, + struct ecore_mcp_link_state *p_link, + struct ecore_mcp_link_capabilities *p_caps) +{ + struct ecore_vf_info *p_vf = ecore_iov_get_vf_info(p_hwfn, vfid, false); + struct ecore_bulletin_content *p_bulletin; + + if (!p_vf) + return; + + p_bulletin = p_vf->bulletin.p_virt; + + if (p_params) + __ecore_vf_get_link_params(p_hwfn, p_params, p_bulletin); + if (p_link) + __ecore_vf_get_link_state(p_hwfn, p_link, p_bulletin); + if (p_caps) + __ecore_vf_get_link_caps(p_hwfn, p_caps, p_bulletin); +} + +void ecore_iov_process_mbx_req(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, int vfid) +{ + struct ecore_iov_vf_mbx *mbx; + struct ecore_vf_info *p_vf; + int i; + + p_vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf) + return; + + mbx = &p_vf->vf_mbx; + + /* ecore_iov_process_mbx_request */ + DP_VERBOSE(p_hwfn, + ECORE_MSG_IOV, + "ecore_iov_process_mbx_req vfid %d\n", p_vf->abs_vf_id); + + mbx->first_tlv = mbx->req_virt->first_tlv; + + /* check if tlv type is known */ + if (ecore_iov_tlv_supported(mbx->first_tlv.tl.type)) { + /* Lock the per vf op mutex and note the locker's identity. + * The unlock will take place in mbx response. + */ + ecore_iov_lock_vf_pf_channel(p_hwfn, + p_vf, mbx->first_tlv.tl.type); + + /* switch on the opcode */ + switch (mbx->first_tlv.tl.type) { + case CHANNEL_TLV_ACQUIRE: + ecore_iov_vf_mbx_acquire(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_START: + ecore_iov_vf_mbx_start_vport(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_TEARDOWN: + ecore_iov_vf_mbx_stop_vport(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_START_RXQ: + ecore_iov_vf_mbx_start_rxq(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_START_TXQ: + ecore_iov_vf_mbx_start_txq(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_STOP_RXQS: + ecore_iov_vf_mbx_stop_rxqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_STOP_TXQS: + ecore_iov_vf_mbx_stop_txqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UPDATE_RXQ: + ecore_iov_vf_mbx_update_rxqs(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_VPORT_UPDATE: + ecore_iov_vf_mbx_vport_update(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_UCAST_FILTER: + ecore_iov_vf_mbx_ucast_filter(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_CLOSE: + ecore_iov_vf_mbx_close(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_INT_CLEANUP: + ecore_iov_vf_mbx_int_cleanup(p_hwfn, p_ptt, p_vf); + break; + case CHANNEL_TLV_RELEASE: + ecore_iov_vf_mbx_release(p_hwfn, p_ptt, p_vf); + break; + } + + ecore_iov_unlock_vf_pf_channel(p_hwfn, + p_vf, mbx->first_tlv.tl.type); + + } else { + /* unknown TLV - this may belong to a VF driver from the future + * - a version written after this PF driver was written, which + * supports features unknown as of yet. Too bad since we don't + * support them. Or this may be because someone wrote a crappy + * VF driver and is sending garbage over the channel. + */ + DP_ERR(p_hwfn, + "unknown TLV. type %d length %d. first 20 bytes of mailbox buffer:\n", + mbx->first_tlv.tl.type, mbx->first_tlv.tl.length); + + for (i = 0; i < 20; i++) { + DP_VERBOSE(p_hwfn, + ECORE_MSG_IOV, + "%x ", + mbx->req_virt->tlv_buf_size.tlv_buffer[i]); + } + + /* test whether we can respond to the VF (do we have an address + * for it?) + */ + if (p_vf->state == VF_ACQUIRED) + DP_ERR(p_hwfn, "UNKNOWN TLV Not supported yet\n"); + } + +#ifdef CONFIG_ECORE_SW_CHANNEL + mbx->sw_mbx.mbx_state = VF_PF_RESPONSE_READY; + mbx->sw_mbx.response_offset = 0; +#endif +} + +static enum _ecore_status_t ecore_sriov_vfpf_msg(struct ecore_hwfn *p_hwfn, + __le16 vfid, + struct regpair *vf_msg) +{ + struct ecore_vf_info *p_vf; + u8 min, max; + + if (!p_hwfn->pf_iov_info || !p_hwfn->pf_iov_info->vfs_array) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Got a message from VF while PF is not initialized for IOV support\n"); + return ECORE_SUCCESS; + } + + /* Find the VF record - message comes with realtive [engine] vfid */ + min = (u8)p_hwfn->hw_info.first_vf_in_pf; + max = min + p_hwfn->p_dev->sriov_info.total_vfs; + /* @@@TBD - for BE machines, should echo field be reversed? */ + if ((u8)vfid < min || (u8)vfid >= max) { + DP_INFO(p_hwfn, + "Got a message from VF with relative id 0x%08x, but PF's range is [0x%02x,...,0x%02x)\n", + (u8)vfid, min, max); + return ECORE_INVAL; + } + p_vf = &p_hwfn->pf_iov_info->vfs_array[(u8)vfid - min]; + + /* List the physical address of the request so that handler + * could later on copy the message from it. + */ + p_vf->vf_mbx.pending_req = (((u64)vf_msg->hi) << 32) | vf_msg->lo; + + return OSAL_PF_VF_MSG(p_hwfn, p_vf->relative_vf_id); +} + +enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data) +{ + switch (opcode) { + case COMMON_EVENT_VF_PF_CHANNEL: + return ecore_sriov_vfpf_msg(p_hwfn, echo, + &data->vf_pf_channel.msg_addr); + case COMMON_EVENT_VF_FLR: + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF-FLR is still not supported\n"); + return ECORE_SUCCESS; + default: + DP_INFO(p_hwfn->p_dev, "Unknown sriov eqe event 0x%02x\n", + opcode); + return ECORE_INVAL; + } +} + +bool ecore_iov_is_vf_pending_flr(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + return !!(p_hwfn->pf_iov_info->pending_flr[rel_vf_id / 64] & + (1ULL << (rel_vf_id % 64))); +} + +bool ecore_iov_is_valid_vfid(struct ecore_hwfn *p_hwfn, int rel_vf_id, + bool b_enabled_only) +{ + if (!p_hwfn->pf_iov_info) { + DP_NOTICE(p_hwfn->p_dev, true, "No iov info\n"); + return false; + } + + return b_enabled_only ? ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, rel_vf_id) : + (rel_vf_id < p_hwfn->p_dev->sriov_info.total_vfs); +} + +struct ecore_public_vf_info *ecore_iov_get_public_vf_info(struct ecore_hwfn + *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only) +{ + struct ecore_vf_info *vf = OSAL_NULL; + + vf = ecore_iov_get_vf_info(p_hwfn, relative_vf_id, b_enabled_only); + if (!vf) + return OSAL_NULL; + + return &vf->p_vf_info; +} + +void ecore_iov_pf_add_pending_events(struct ecore_hwfn *p_hwfn, u8 vfid) +{ + u64 add_bit = 1ULL << (vfid % 64); + + /* TODO - add locking mechanisms [no atomics in ecore, so we can't + * add the lock inside the ecore_pf_iov struct]. + */ + p_hwfn->pf_iov_info->pending_events[vfid / 64] |= add_bit; +} + +void ecore_iov_pf_get_and_clear_pending_events(struct ecore_hwfn *p_hwfn, + u64 *events) +{ + u64 *p_pending_events = p_hwfn->pf_iov_info->pending_events; + + /* TODO - Take a lock */ + OSAL_MEMCPY(events, p_pending_events, + sizeof(u64) * ECORE_VF_ARRAY_LENGTH); + OSAL_MEMSET(p_pending_events, 0, sizeof(u64) * ECORE_VF_ARRAY_LENGTH); +} + +enum _ecore_status_t ecore_iov_copy_vf_msg(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *ptt, int vfid) +{ + struct ecore_dmae_params params; + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return ECORE_INVAL; + + OSAL_MEMSET(¶ms, 0, sizeof(struct ecore_dmae_params)); + params.flags = ECORE_DMAE_FLAG_VF_SRC | ECORE_DMAE_FLAG_COMPLETION_DST; + params.src_vfid = vf_info->abs_vf_id; + + if (ecore_dmae_host2host(p_hwfn, ptt, + vf_info->vf_mbx.pending_req, + vf_info->vf_mbx.req_phys, + sizeof(union vfpf_tlvs) / 4, ¶ms)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Failed to copy message from VF 0x%02x\n", vfid); + + return ECORE_IO; + } + + return ECORE_SUCCESS; +} + +void ecore_iov_bulletin_set_forced_mac(struct ecore_hwfn *p_hwfn, + u8 *mac, int vfid) +{ + struct ecore_vf_info *vf_info; + u64 feature; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Can not set forced MAC, invalid vfid [%d]\n", vfid); + return; + } + + feature = 1 << MAC_ADDR_FORCED; + OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + /* Forced MAC will disable MAC_ADDR */ + vf_info->bulletin.p_virt->valid_bitmap &= + ~(1 << VFPF_BULLETIN_MAC_ADDR); + + ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); +} + +enum _ecore_status_t ecore_iov_bulletin_set_mac(struct ecore_hwfn *p_hwfn, + u8 *mac, int vfid) +{ + struct ecore_vf_info *vf_info; + u64 feature; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Can not set MAC, invalid vfid [%d]\n", vfid); + return ECORE_INVAL; + } + + if (vf_info->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED)) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Can not set MAC, Forced MAC is configured\n"); + return ECORE_INVAL; + } + + feature = 1 << VFPF_BULLETIN_MAC_ADDR; + OSAL_MEMCPY(vf_info->bulletin.p_virt->mac, mac, ETH_ALEN); + + vf_info->bulletin.p_virt->valid_bitmap |= feature; + + return ECORE_SUCCESS; +} + +void ecore_iov_bulletin_set_forced_vlan(struct ecore_hwfn *p_hwfn, + u16 pvid, int vfid) +{ + struct ecore_vf_info *vf_info; + u64 feature; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Can not set forced MAC, invalid vfid [%d]\n", vfid); + return; + } + + feature = 1 << VLAN_ADDR_FORCED; + vf_info->bulletin.p_virt->pvid = pvid; + if (pvid) + vf_info->bulletin.p_virt->valid_bitmap |= feature; + else + vf_info->bulletin.p_virt->valid_bitmap &= ~feature; + + ecore_iov_configure_vport_forced(p_hwfn, vf_info, feature); +} + +enum _ecore_status_t +ecore_iov_bulletin_set_forced_untagged_default(struct ecore_hwfn *p_hwfn, + bool b_untagged_only, int vfid) +{ + struct ecore_vf_info *vf_info; + u64 feature; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) { + DP_NOTICE(p_hwfn->p_dev, true, + "Can not set forced MAC, invalid vfid [%d]\n", vfid); + return ECORE_INVAL; + } + + /* Since this is configurable only during vport-start, don't take it + * if we're past that point. + */ + if (vf_info->state == VF_ENABLED) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Can't support untagged change for vfid[%d] - VF is already active\n", + vfid); + return ECORE_INVAL; + } + + /* Set configuration; This will later be taken into account during the + * VF initialization. + */ + feature = (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT) | + (1 << VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED); + vf_info->bulletin.p_virt->valid_bitmap |= feature; + + vf_info->bulletin.p_virt->default_only_untagged = b_untagged_only ? 1 + : 0; + + return ECORE_SUCCESS; +} + +void ecore_iov_get_vfs_opaque_fid(struct ecore_hwfn *p_hwfn, int vfid, + u16 *opaque_fid) +{ + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return; + + *opaque_fid = vf_info->opaque_fid; +} + +void ecore_iov_get_vfs_vport_id(struct ecore_hwfn *p_hwfn, int vfid, + u8 *p_vort_id) +{ + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return; + + *p_vort_id = vf_info->vport_id; +} + +bool ecore_iov_vf_has_vport_instance(struct ecore_hwfn *p_hwfn, int vfid) +{ + struct ecore_vf_info *p_vf_info; + + p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!p_vf_info) + return false; + + return !!p_vf_info->vport_instance; +} + +bool ecore_iov_is_vf_stopped(struct ecore_hwfn *p_hwfn, int vfid) +{ + struct ecore_vf_info *p_vf_info; + + p_vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + + return p_vf_info->state == VF_STOPPED; +} + +bool ecore_iov_spoofchk_get(struct ecore_hwfn *p_hwfn, int vfid) +{ + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return false; + + return vf_info->spoof_chk; +} + +bool ecore_iov_pf_sanity_check(struct ecore_hwfn *p_hwfn, int vfid) +{ + if (IS_VF(p_hwfn->p_dev) || !IS_ECORE_SRIOV(p_hwfn->p_dev) || + !IS_PF_SRIOV_ALLOC(p_hwfn) || + !ECORE_IS_VF_ACTIVE(p_hwfn->p_dev, vfid)) + return false; + else + return true; +} + +enum _ecore_status_t ecore_iov_spoofchk_set(struct ecore_hwfn *p_hwfn, + int vfid, bool val) +{ + enum _ecore_status_t rc = ECORE_INVAL; + struct ecore_vf_info *vf; + + if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, true, + "SR-IOV sanity check failed, can't set spoofchk\n"); + goto out; + } + + vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf) + goto out; + + if (!ecore_iov_vf_has_vport_instance(p_hwfn, vfid)) { + /* After VF VPORT start PF will configure spoof check */ + vf->req_spoofchk_val = val; + rc = ECORE_SUCCESS; + goto out; + } + + rc = __ecore_iov_spoofchk_set(p_hwfn, vf, val); + +out: + return rc; +} + +u8 ecore_iov_vf_chains_per_pf(struct ecore_hwfn *p_hwfn) +{ + u8 max_chains_per_vf = p_hwfn->hw_info.max_chains_per_vf; + + max_chains_per_vf = (max_chains_per_vf) ? max_chains_per_vf + : ECORE_MAX_VF_CHAINS_PER_PF; + + return max_chains_per_vf; +} + +void ecore_iov_get_vf_req_virt_mbx_params(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, + void **pp_req_virt_addr, + u16 *p_req_virt_size) +{ + struct ecore_vf_info *vf_info = + ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + + if (!vf_info) + return; + + if (pp_req_virt_addr) + *pp_req_virt_addr = vf_info->vf_mbx.req_virt; + + if (p_req_virt_size) + *p_req_virt_size = sizeof(*vf_info->vf_mbx.req_virt); +} + +void ecore_iov_get_vf_reply_virt_mbx_params(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id, + void **pp_reply_virt_addr, + u16 *p_reply_virt_size) +{ + struct ecore_vf_info *vf_info = + ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + + if (!vf_info) + return; + + if (pp_reply_virt_addr) + *pp_reply_virt_addr = vf_info->vf_mbx.reply_virt; + + if (p_reply_virt_size) + *p_reply_virt_size = sizeof(*vf_info->vf_mbx.reply_virt); +} + +#ifdef CONFIG_ECORE_SW_CHANNEL +struct ecore_iov_sw_mbx *ecore_iov_get_vf_sw_mbx(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *vf_info = + ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + + if (!vf_info) + return OSAL_NULL; + + return &vf_info->vf_mbx.sw_mbx; +} +#endif + +bool ecore_iov_is_valid_vfpf_msg_length(u32 length) +{ + return (length >= sizeof(struct vfpf_first_tlv) && + (length <= sizeof(union vfpf_tlvs))); +} + +u32 ecore_iov_pfvf_msg_length(void) +{ + return sizeof(union pfvf_tlvs); +} + +u8 *ecore_iov_bulletin_get_forced_mac(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return OSAL_NULL; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << MAC_ADDR_FORCED))) + return OSAL_NULL; + + return p_vf->bulletin.p_virt->mac; +} + +u16 ecore_iov_bulletin_get_forced_vlan(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf || !p_vf->bulletin.p_virt) + return 0; + + if (!(p_vf->bulletin.p_virt->valid_bitmap & (1 << VLAN_ADDR_FORCED))) + return 0; + + return p_vf->bulletin.p_virt->pvid; +} + +enum _ecore_status_t ecore_iov_configure_tx_rate(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int vfid, int val) +{ + struct ecore_vf_info *vf; + enum _ecore_status_t rc; + u8 abs_vp_id = 0; + + vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + + if (!vf) + return ECORE_INVAL; + + rc = ecore_fw_vport(p_hwfn, vf->vport_id, &abs_vp_id); + if (rc != ECORE_SUCCESS) + return rc; + + rc = ecore_init_vport_rl(p_hwfn, p_ptt, abs_vp_id, (u32)val); + + return rc; +} + +enum _ecore_status_t ecore_iov_configure_min_tx_rate(struct ecore_dev *p_dev, + int vfid, u32 rate) +{ + struct ecore_vf_info *vf; + enum _ecore_status_t rc; + u8 vport_id; + int i; + + for_each_hwfn(p_dev, i) { + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[i]; + + if (!ecore_iov_pf_sanity_check(p_hwfn, vfid)) { + DP_NOTICE(p_hwfn, true, + "SR-IOV sanity check failed, can't set min rate\n"); + return ECORE_INVAL; + } + } + + vf = ecore_iov_get_vf_info(ECORE_LEADING_HWFN(p_dev), (u16)vfid, true); + vport_id = vf->vport_id; + + rc = ecore_configure_vport_wfq(p_dev, vport_id, rate); + + return rc; +} + +enum _ecore_status_t ecore_iov_get_vf_stats(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt, + int vfid, + struct ecore_eth_stats *p_stats) +{ + struct ecore_vf_info *vf; + + vf = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf) + return ECORE_INVAL; + + if (vf->state != VF_ENABLED) + return ECORE_INVAL; + + __ecore_get_vport_stats(p_hwfn, p_ptt, p_stats, + vf->abs_vf_id + 0x10, false); + + return ECORE_SUCCESS; +} + +u8 ecore_iov_get_vf_num_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return 0; + + return p_vf->num_rxqs; +} + +u8 ecore_iov_get_vf_num_active_rxqs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return 0; + + return p_vf->num_active_rxqs; +} + +void *ecore_iov_get_vf_ctx(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return OSAL_NULL; + + return p_vf->ctx; +} + +u8 ecore_iov_get_vf_num_sbs(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return 0; + + return p_vf->num_sbs; +} + +bool ecore_iov_is_vf_wait_for_acquire(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return false; + + return (p_vf->state == VF_FREE); +} + +bool ecore_iov_is_vf_acquired_not_initialized(struct ecore_hwfn *p_hwfn, + u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return false; + + return (p_vf->state == VF_ACQUIRED); +} + +bool ecore_iov_is_vf_initialized(struct ecore_hwfn *p_hwfn, u16 rel_vf_id) +{ + struct ecore_vf_info *p_vf; + + p_vf = ecore_iov_get_vf_info(p_hwfn, rel_vf_id, true); + if (!p_vf) + return false; + + return (p_vf->state == VF_ENABLED); +} + +int ecore_iov_get_vf_min_rate(struct ecore_hwfn *p_hwfn, int vfid) +{ + struct ecore_wfq_data *vf_vp_wfq; + struct ecore_vf_info *vf_info; + + vf_info = ecore_iov_get_vf_info(p_hwfn, (u16)vfid, true); + if (!vf_info) + return 0; + + vf_vp_wfq = &p_hwfn->qm_info.wfq_data[vf_info->vport_id]; + + if (vf_vp_wfq->configured) + return vf_vp_wfq->min_speed; + else + return 0; +} diff --git a/drivers/net/qede/base/ecore_sriov.h b/drivers/net/qede/base/ecore_sriov.h new file mode 100644 index 00000000..3471e5c6 --- /dev/null +++ b/drivers/net/qede/base/ecore_sriov.h @@ -0,0 +1,390 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_SRIOV_H__ +#define __ECORE_SRIOV_H__ + +#include "ecore_status.h" +#include "ecore_vfpf_if.h" +#include "ecore_iov_api.h" +#include "ecore_hsi_common.h" + +#define ECORE_ETH_VF_NUM_VLAN_FILTERS 2 + +#define ECORE_ETH_MAX_VF_NUM_VLAN_FILTERS \ + (MAX_NUM_VFS * ECORE_ETH_VF_NUM_VLAN_FILTERS) + +/* Represents a full message. Both the request filled by VF + * and the response filled by the PF. The VF needs one copy + * of this message, it fills the request part and sends it to + * the PF. The PF will copy the response to the response part for + * the VF to later read it. The PF needs to hold a message like this + * per VF, the request that is copied to the PF is placed in the + * request size, and the response is filled by the PF before sending + * it to the VF. + */ +struct ecore_vf_mbx_msg { + union vfpf_tlvs req; + union pfvf_tlvs resp; +}; + +/* This data is held in the ecore_hwfn structure for VFs only. */ +struct ecore_vf_iov { + union vfpf_tlvs *vf2pf_request; + dma_addr_t vf2pf_request_phys; + union pfvf_tlvs *pf2vf_reply; + dma_addr_t pf2vf_reply_phys; + + /* Should be taken whenever the mailbox buffers are accessed */ + osal_mutex_t mutex; + u8 *offset; + + /* Bulletin Board */ + struct ecore_bulletin bulletin; + struct ecore_bulletin_content bulletin_shadow; + + /* we set aside a copy of the acquire response */ + struct pfvf_acquire_resp_tlv acquire_resp; +}; + +/* This mailbox is maintained per VF in its PF + * contains all information required for sending / receiving + * a message + */ +struct ecore_iov_vf_mbx { + union vfpf_tlvs *req_virt; + dma_addr_t req_phys; + union pfvf_tlvs *reply_virt; + dma_addr_t reply_phys; + + /* Address in VF where a pending message is located */ + dma_addr_t pending_req; + + u8 *offset; + +#ifdef CONFIG_ECORE_SW_CHANNEL + struct ecore_iov_sw_mbx sw_mbx; +#endif + + /* VF GPA address */ + u32 vf_addr_lo; + u32 vf_addr_hi; + + struct vfpf_first_tlv first_tlv; /* saved VF request header */ + + u8 flags; +#define VF_MSG_INPROCESS 0x1 /* failsafe - the FW should prevent + * more then one pending msg + */ +}; + +struct ecore_vf_q_info { + u16 fw_rx_qid; + u16 fw_tx_qid; + u8 fw_cid; + u8 rxq_active; + u8 txq_active; +}; + +enum int_mod { + VPORT_INT_MOD_UNDEFINED = 0, + VPORT_INT_MOD_ADAPTIVE = 1, + VPORT_INT_MOD_OFF = 2, + VPORT_INT_MOD_LOW = 100, + VPORT_INT_MOD_MEDIUM = 200, + VPORT_INT_MOD_HIGH = 300 +}; + +enum vf_state { + VF_FREE = 0, /* VF ready to be acquired holds no resc */ + VF_ACQUIRED = 1, /* VF, acquired, but not initalized */ + VF_ENABLED = 2, /* VF, Enabled */ + VF_RESET = 3, /* VF, FLR'd, pending cleanup */ + VF_STOPPED = 4 /* VF, Stopped */ +}; + +struct ecore_vf_vlan_shadow { + bool used; + u16 vid; +}; + +struct ecore_vf_shadow_config { + /* Shadow copy of all guest vlans */ + struct ecore_vf_vlan_shadow vlans[ECORE_ETH_VF_NUM_VLAN_FILTERS + 1]; + + u8 inner_vlan_removal; +}; + +/* PFs maintain an array of this structure, per VF */ +struct ecore_vf_info { + struct ecore_iov_vf_mbx vf_mbx; + enum vf_state state; + u8 to_disable; + + struct ecore_bulletin bulletin; + dma_addr_t vf_bulletin; + + u32 concrete_fid; + u16 opaque_fid; + u16 mtu; + + u8 vport_id; + u8 relative_vf_id; + u8 abs_vf_id; +#define ECORE_VF_ABS_ID(p_hwfn, p_vf) (ECORE_PATH_ID(p_hwfn) ? \ + (p_vf)->abs_vf_id + MAX_NUM_VFS_BB : \ + (p_vf)->abs_vf_id) + + u8 vport_instance; /* Number of active vports */ + u8 num_rxqs; + u8 num_txqs; + + u8 num_sbs; + + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + + struct ecore_vf_q_info vf_queues[ECORE_MAX_VF_CHAINS_PER_PF]; + u16 igu_sbs[ECORE_MAX_VF_CHAINS_PER_PF]; + + /* TODO - Only windows is using it - should be removed */ + u8 was_malicious; + u8 num_active_rxqs; + void *ctx; + struct ecore_public_vf_info p_vf_info; + bool spoof_chk; /* Current configured on HW */ + bool req_spoofchk_val; /* Requested value */ + + /* Stores the configuration requested by VF */ + struct ecore_vf_shadow_config shadow_config; + + /* A bitfield using bulletin's valid-map bits, used to indicate + * which of the bulletin board features have been configured. + */ + u64 configured_features; +#define ECORE_IOV_CONFIGURED_FEATURES_MASK ((1 << MAC_ADDR_FORCED) | \ + (1 << VLAN_ADDR_FORCED)) +}; + +/* This structure is part of ecore_hwfn and used only for PFs that have sriov + * capability enabled. + */ +struct ecore_pf_iov { + struct ecore_vf_info vfs_array[MAX_NUM_VFS]; + u64 pending_events[ECORE_VF_ARRAY_LENGTH]; + u64 pending_flr[ECORE_VF_ARRAY_LENGTH]; + u16 base_vport_id; + + /* Allocate message address continuosuly and split to each VF */ + void *mbx_msg_virt_addr; + dma_addr_t mbx_msg_phys_addr; + u32 mbx_msg_size; + void *mbx_reply_virt_addr; + dma_addr_t mbx_reply_phys_addr; + u32 mbx_reply_size; + void *p_bulletins; + dma_addr_t bulletins_phys; + u32 bulletins_size; +}; + +#ifdef CONFIG_ECORE_SRIOV +/** + * @brief Read sriov related information and allocated resources + * reads from configuraiton space, shmem, and allocates the VF + * database in the PF. + * + * @param p_hwfn + * @param p_ptt + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt); + +/** + * @brief ecore_add_tlv - place a given tlv on the tlv buffer at next offset + * + * @param p_hwfn + * @param p_iov + * @param type + * @param length + * + * @return pointer to the newly placed tlv + */ +void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, + u8 **offset, u16 type, u16 length); + +/** + * @brief list the types and lengths of the tlvs on the buffer + * + * @param p_hwfn + * @param tlvs_list + */ +void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, void *tlvs_list); + +/** + * @brief ecore_iov_alloc - allocate sriov related resources + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_iov_setup - setup sriov related resources + * + * @param p_hwfn + * @param p_ptt + */ +void ecore_iov_setup(struct ecore_hwfn *p_hwfn, struct ecore_ptt *p_ptt); + +/** + * @brief ecore_iov_free - free sriov related resources + * + * @param p_hwfn + */ +void ecore_iov_free(struct ecore_hwfn *p_hwfn); + +/** + * @brief ecore_sriov_eqe_event - handle async sriov event arrived on eqe. + * + * @param p_hwfn + * @param opcode + * @param echo + * @param data + */ +enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn *p_hwfn, + u8 opcode, + __le16 echo, + union event_ring_data *data); + +/** + * @brief calculate CRC for bulletin board validation + * + * @param basic crc seed + * @param ptr to beginning of buffer + * @length in bytes of buffer + * + * @return calculated crc over buffer [with respect to seed]. + */ +u32 ecore_crc32(u32 crc, u8 *ptr, u32 length); + +/** + * @brief Mark structs of vfs that have been FLR-ed. + * + * @param p_hwfn + * @param disabled_vfs - bitmask of all VFs on path that were FLRed + * + * @return 1 iff one of the PF's vfs got FLRed. 0 otherwise. + */ +int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, u32 *disabled_vfs); + +/** + * @brief Search extended TLVs in request/reply buffer. + * + * @param p_hwfn + * @param p_tlvs_list - Pointer to tlvs list + * @param req_type - Type of TLV + * + * @return pointer to tlv type if found, otherwise returns NULL. + */ +void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, + void *p_tlvs_list, u16 req_type); + +/** + * @brief ecore_iov_get_vf_info - return the database of a + * specific VF + * + * @param p_hwfn + * @param relative_vf_id - relative id of the VF for which info + * is requested + * @param b_enabled_only - false iff want to access even if vf is disabled + * + * @return struct ecore_vf_info* + */ +struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn *p_hwfn, + u16 relative_vf_id, + bool b_enabled_only); +#else +static OSAL_INLINE enum _ecore_status_t ecore_iov_hw_info(struct ecore_hwfn + *p_hwfn, + struct ecore_ptt + *p_ptt) +{ + return ECORE_SUCCESS; +} + +static OSAL_INLINE void *ecore_add_tlv(struct ecore_hwfn *p_hwfn, u8 **offset, + u16 type, u16 length) +{ + return OSAL_NULL; +} + +static OSAL_INLINE void ecore_dp_tlv_list(struct ecore_hwfn *p_hwfn, + void *tlvs_list) +{ +} + +static OSAL_INLINE enum _ecore_status_t ecore_iov_alloc(struct ecore_hwfn + *p_hwfn) +{ + return ECORE_SUCCESS; +} + +static OSAL_INLINE void ecore_iov_setup(struct ecore_hwfn *p_hwfn, + struct ecore_ptt *p_ptt) +{ +} + +static OSAL_INLINE void ecore_iov_free(struct ecore_hwfn *p_hwfn) +{ +} + +static OSAL_INLINE enum _ecore_status_t ecore_sriov_eqe_event(struct ecore_hwfn + *p_hwfn, + u8 opcode, + __le16 echo, + union + event_ring_data + * data) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE u32 ecore_crc32(u32 crc, u8 *ptr, u32 length) +{ + return 0; +} + +static OSAL_INLINE int ecore_iov_mark_vf_flr(struct ecore_hwfn *p_hwfn, + u32 *disabled_vfs) +{ + return 0; +} + +static OSAL_INLINE void *ecore_iov_search_list_tlvs(struct ecore_hwfn *p_hwfn, + void *p_tlvs_list, + u16 req_type) +{ + return OSAL_NULL; +} + +static OSAL_INLINE struct ecore_vf_info *ecore_iov_get_vf_info(struct ecore_hwfn + *p_hwfn, + u16 + relative_vf_id, + bool + b_enabled_only) +{ + return OSAL_NULL; +} + +#endif +#endif /* __ECORE_SRIOV_H__ */ diff --git a/drivers/net/qede/base/ecore_status.h b/drivers/net/qede/base/ecore_status.h new file mode 100644 index 00000000..98d40bb5 --- /dev/null +++ b/drivers/net/qede/base/ecore_status.h @@ -0,0 +1,30 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_STATUS_H__ +#define __ECORE_STATUS_H__ + +enum _ecore_status_t { + ECORE_UNKNOWN_ERROR = -12, + ECORE_NORESOURCES = -11, + ECORE_NODEV = -10, + ECORE_ABORTED = -9, + ECORE_AGAIN = -8, + ECORE_NOTIMPL = -7, + ECORE_EXISTS = -6, + ECORE_IO = -5, + ECORE_TIMEOUT = -4, + ECORE_INVAL = -3, + ECORE_BUSY = -2, + ECORE_NOMEM = -1, + ECORE_SUCCESS = 0, + /* PENDING is not an error and should be positive */ + ECORE_PENDING = 1, +}; + +#endif /* __ECORE_STATUS_H__ */ diff --git a/drivers/net/qede/base/ecore_utils.h b/drivers/net/qede/base/ecore_utils.h new file mode 100644 index 00000000..616b44c2 --- /dev/null +++ b/drivers/net/qede/base/ecore_utils.h @@ -0,0 +1,31 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_UTILS_H__ +#define __ECORE_UTILS_H__ + +/* dma_addr_t manip */ +#define DMA_LO(x) ((u32)(((dma_addr_t)(x)) & 0xffffffff)) +#define DMA_HI(x) ((u32)(((dma_addr_t)(x)) >> 32)) + +#define DMA_LO_LE(x) OSAL_CPU_TO_LE32(DMA_LO(x)) +#define DMA_HI_LE(x) OSAL_CPU_TO_LE32(DMA_HI(x)) + +/* It's assumed that whoever includes this has previously included an hsi + * file defining the regpair. + */ +#define DMA_REGPAIR_LE(x, val) (x).hi = DMA_HI_LE((val)); \ + (x).lo = DMA_LO_LE((val)) + +#define HILO_GEN(hi, lo, type) ((((type)(hi)) << 32) + (lo)) +#define HILO_DMA(hi, lo) HILO_GEN(hi, lo, dma_addr_t) +#define HILO_64(hi, lo) HILO_GEN(hi, lo, u64) +#define HILO_DMA_REGPAIR(regpair) (HILO_DMA(regpair.hi, regpair.lo)) +#define HILO_64_REGPAIR(regpair) (HILO_64(regpair.hi, regpair.lo)) + +#endif diff --git a/drivers/net/qede/base/ecore_vf.c b/drivers/net/qede/base/ecore_vf.c new file mode 100644 index 00000000..d32fb35a --- /dev/null +++ b/drivers/net/qede/base/ecore_vf.c @@ -0,0 +1,1332 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "bcm_osal.h" +#include "ecore.h" +#include "ecore_hsi_eth.h" +#include "ecore_sriov.h" +#include "ecore_l2_api.h" +#include "ecore_vf.h" +#include "ecore_vfpf_if.h" +#include "ecore_status.h" +#include "reg_addr.h" +#include "ecore_int.h" +#include "ecore_l2.h" +#include "ecore_mcp_api.h" +#include "ecore_vf_api.h" + +static void *ecore_vf_pf_prep(struct ecore_hwfn *p_hwfn, u16 type, u16 length) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + void *p_tlv; + + /* This lock is released when we receive PF's response + * in ecore_send_msg2pf(). + * So, ecore_vf_pf_prep() and ecore_send_msg2pf() + * must come in sequence. + */ + OSAL_MUTEX_ACQUIRE(&p_iov->mutex); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "preparing to send %s tlv over vf pf channel\n", + ecore_channel_tlvs_string[type]); + + /* Reset Request offset */ + p_iov->offset = (u8 *)(p_iov->vf2pf_request); + + /* Clear mailbox - both request and reply */ + OSAL_MEMSET(p_iov->vf2pf_request, 0, sizeof(union vfpf_tlvs)); + OSAL_MEMSET(p_iov->pf2vf_reply, 0, sizeof(union pfvf_tlvs)); + + /* Init type and length */ + p_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, type, length); + + /* Init first tlv header */ + ((struct vfpf_first_tlv *)p_tlv)->reply_address = + (u64)p_iov->pf2vf_reply_phys; + + return p_tlv; +} + +static int ecore_send_msg2pf(struct ecore_hwfn *p_hwfn, + u8 *done, u32 resp_size) +{ + struct ustorm_vf_zone *zone_data = (struct ustorm_vf_zone *) + ((u8 *)PXP_VF_BAR0_START_USDM_ZONE_B); + union vfpf_tlvs *p_req = p_hwfn->vf_iov_info->vf2pf_request; + struct ustorm_trigger_vf_zone trigger; + int rc = ECORE_SUCCESS, time = 100; + u8 pf_id; + + /* output tlvs list */ + ecore_dp_tlv_list(p_hwfn, p_req); + + /* need to add the END TLV to the message size */ + resp_size += sizeof(struct channel_list_end_tlv); + + if (!p_hwfn->p_dev->sriov_info.b_hw_channel) { + rc = OSAL_VF_SEND_MSG2PF(p_hwfn->p_dev, + done, + p_req, + p_hwfn->vf_iov_info->pf2vf_reply, + sizeof(union vfpf_tlvs), resp_size); + /* TODO - no prints about message ? */ + goto exit; + } + + /* Send TLVs over HW channel */ + OSAL_MEMSET(&trigger, 0, sizeof(struct ustorm_trigger_vf_zone)); + trigger.vf_pf_msg_valid = 1; + /* TODO - FW should remove this requirement */ + pf_id = GET_FIELD(p_hwfn->hw_info.concrete_fid, PXP_CONCRETE_FID_PFID); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF -> PF [%02x] message: [%08x, %08x] --> %p, %08x --> %p\n", + pf_id, + U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys), + U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys), + &zone_data->non_trigger.vf_pf_msg_addr, + *((u32 *)&trigger), &zone_data->trigger); + + REG_WR(p_hwfn, + (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.lo, + U64_LO(p_hwfn->vf_iov_info->vf2pf_request_phys)); + + REG_WR(p_hwfn, + (osal_uintptr_t)&zone_data->non_trigger.vf_pf_msg_addr.hi, + U64_HI(p_hwfn->vf_iov_info->vf2pf_request_phys)); + + /* The message data must be written first, to prevent trigger before + * data is written. + */ + OSAL_WMB(p_hwfn->p_dev); + + REG_WR(p_hwfn, (osal_uintptr_t)&zone_data->trigger, + *((u32 *)&trigger)); + + while ((!*done) && time) { + OSAL_MSLEEP(25); + time--; + } + + if (!*done) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF <-- PF Timeout [Type %d]\n", + p_req->first_tlv.tl.type); + rc = ECORE_TIMEOUT; + goto exit; + } else { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "PF response: %d [Type %d]\n", + *done, p_req->first_tlv.tl.type); + } + +exit: + OSAL_MUTEX_RELEASE(&p_hwfn->vf_iov_info->mutex); + + return rc; +} + +#define VF_ACQUIRE_THRESH 3 +#define VF_ACQUIRE_MAC_FILTERS 1 +#define VF_ACQUIRE_MC_FILTERS 10 + +static enum _ecore_status_t ecore_vf_pf_acquire(struct ecore_hwfn *p_hwfn) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp; + struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info; + struct ecore_vf_acquire_sw_info vf_sw_info; + struct vfpf_acquire_tlv *req; + int rc = 0, attempts = 0; + bool resources_acquired = false; + + /* @@@ TBD: MichalK take this from somewhere else... */ + u8 rx_count = 1, tx_count = 1, num_sbs = 1; + u8 num_mac = VF_ACQUIRE_MAC_FILTERS, num_mc = VF_ACQUIRE_MC_FILTERS; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_ACQUIRE, sizeof(*req)); + + /* @@@ TBD: PF may not be ready bnx2x_get_vf_id... */ + req->vfdev_info.opaque_fid = p_hwfn->hw_info.opaque_fid; + + req->resc_request.num_rxqs = rx_count; + req->resc_request.num_txqs = tx_count; + req->resc_request.num_sbs = num_sbs; + req->resc_request.num_mac_filters = num_mac; + req->resc_request.num_mc_filters = num_mc; + req->resc_request.num_vlan_filters = ECORE_ETH_VF_NUM_VLAN_FILTERS; + + OSAL_MEMSET(&vf_sw_info, 0, sizeof(vf_sw_info)); + OSAL_VF_FILL_ACQUIRE_RESC_REQ(p_hwfn, &req->resc_request, &vf_sw_info); + + req->vfdev_info.os_type = vf_sw_info.os_type; + req->vfdev_info.driver_version = vf_sw_info.driver_version; + req->vfdev_info.fw_major = FW_MAJOR_VERSION; + req->vfdev_info.fw_minor = FW_MINOR_VERSION; + req->vfdev_info.fw_revision = FW_REVISION_VERSION; + req->vfdev_info.fw_engineering = FW_ENGINEERING_VERSION; + + if (vf_sw_info.override_fw_version) + req->vfdev_info.capabilties |= VFPF_ACQUIRE_CAP_OVERRIDE_FW_VER; + + /* pf 2 vf bulletin board address */ + req->bulletin_addr = p_iov->bulletin.phys; + req->bulletin_size = p_iov->bulletin.size; + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + while (!resources_acquired) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "attempting to acquire resources\n"); + + /* send acquire request */ + rc = ecore_send_msg2pf(p_hwfn, + &resp->hdr.status, sizeof(*resp)); + + /* PF timeout */ + if (rc) + return rc; + + /* copy acquire response from buffer to p_hwfn */ + OSAL_MEMCPY(&p_iov->acquire_resp, + resp, sizeof(p_iov->acquire_resp)); + + attempts++; + + /* PF agrees to allocate our resources */ + if (resp->hdr.status == PFVF_STATUS_SUCCESS) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "resources acquired\n"); + resources_acquired = true; + } /* PF refuses to allocate our resources */ + else if (resp->hdr.status == + PFVF_STATUS_NO_RESOURCE && + attempts < VF_ACQUIRE_THRESH) { + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "PF unwilling to fullfill resource request. Try PF recommended amount\n"); + + /* humble our request */ + req->resc_request.num_txqs = resp->resc.num_txqs; + req->resc_request.num_rxqs = resp->resc.num_rxqs; + req->resc_request.num_sbs = resp->resc.num_sbs; + req->resc_request.num_mac_filters = + resp->resc.num_mac_filters; + req->resc_request.num_vlan_filters = + resp->resc.num_vlan_filters; + req->resc_request.num_mc_filters = + resp->resc.num_mc_filters; + + /* Clear response buffer */ + OSAL_MEMSET(p_iov->pf2vf_reply, 0, + sizeof(union pfvf_tlvs)); + } else { + DP_ERR(p_hwfn, + "PF returned error %d to VF acquisition request\n", + resp->hdr.status); + return ECORE_AGAIN; + } + } + + rc = OSAL_VF_UPDATE_ACQUIRE_RESC_RESP(p_hwfn, &resp->resc); + if (rc) { + DP_NOTICE(p_hwfn, true, + "VF_UPDATE_ACQUIRE_RESC_RESP Failed: status = 0x%x.\n", + rc); + return ECORE_AGAIN; + } + + /* Update bulletin board size with response from PF */ + p_iov->bulletin.size = resp->bulletin_size; + + /* get HW info */ + p_hwfn->p_dev->type = resp->pfdev_info.dev_type; + p_hwfn->p_dev->chip_rev = resp->pfdev_info.chip_rev; + + DP_INFO(p_hwfn, "Chip details - %s%d\n", + ECORE_IS_BB(p_hwfn->p_dev) ? "BB" : "AH", + CHIP_REV_IS_A0(p_hwfn->p_dev) ? 0 : 1); + + /* @@@TBD MichalK: Fw ver... */ + /* strlcpy(p_hwfn->fw_ver, p_hwfn->acquire_resp.pfdev_info.fw_ver, + * sizeof(p_hwfn->fw_ver)); + */ + + p_hwfn->p_dev->chip_num = pfdev_info->chip_num & 0xffff; + + return 0; +} + +enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev) +{ + enum _ecore_status_t rc = ECORE_NOMEM; + struct ecore_vf_iov *p_sriov; + struct ecore_hwfn *p_hwfn = &p_dev->hwfns[0]; /* @@@TBD CMT */ + + p_dev->num_hwfns = 1; /* @@@TBD CMT must be fixed... */ + + p_hwfn->regview = p_dev->regview; + if (p_hwfn->regview == OSAL_NULL) { + DP_ERR(p_hwfn, + "regview should be initialized before" + " ecore_vf_hw_prepare is called\n"); + return ECORE_INVAL; + } + + /* Set the doorbell bar. Assumption: regview is set */ + p_hwfn->doorbells = (u8 OSAL_IOMEM *)p_hwfn->regview + + PXP_VF_BAR0_START_DQ; + + p_hwfn->hw_info.opaque_fid = (u16)REG_RD(p_hwfn, + PXP_VF_BAR0_ME_OPAQUE_ADDRESS); + + p_hwfn->hw_info.concrete_fid = REG_RD(p_hwfn, + PXP_VF_BAR0_ME_CONCRETE_ADDRESS); + + /* Allocate vf sriov info */ + p_sriov = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, sizeof(*p_sriov)); + if (!p_sriov) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `struct ecore_sriov'\n"); + return ECORE_NOMEM; + } + + OSAL_MEMSET(p_sriov, 0, sizeof(*p_sriov)); + + /* Allocate vf2pf msg */ + p_sriov->vf2pf_request = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_sriov-> + vf2pf_request_phys, + sizeof(union + vfpf_tlvs)); + if (!p_sriov->vf2pf_request) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `vf2pf_request' DMA memory\n"); + goto free_p_sriov; + } + + p_sriov->pf2vf_reply = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_sriov-> + pf2vf_reply_phys, + sizeof(union pfvf_tlvs)); + if (!p_sriov->pf2vf_reply) { + DP_NOTICE(p_hwfn, true, + "Failed to allocate `pf2vf_reply' DMA memory\n"); + goto free_vf2pf_request; + } + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF's Request mailbox [%p virt 0x%" PRIx64 " phys], " + "Response mailbox [%p virt 0x%" PRIx64 " phys]\n", + p_sriov->vf2pf_request, + (u64)p_sriov->vf2pf_request_phys, + p_sriov->pf2vf_reply, (u64)p_sriov->pf2vf_reply_phys); + + /* Allocate Bulletin board */ + p_sriov->bulletin.size = sizeof(struct ecore_bulletin_content); + p_sriov->bulletin.p_virt = OSAL_DMA_ALLOC_COHERENT(p_hwfn->p_dev, + &p_sriov->bulletin. + phys, + p_sriov->bulletin. + size); + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VF's bulletin Board [%p virt 0x%" PRIx64 " phys 0x%08x bytes]\n", + p_sriov->bulletin.p_virt, (u64)p_sriov->bulletin.phys, + p_sriov->bulletin.size); + + OSAL_MUTEX_ALLOC(p_hwfn, &p_sriov->mutex); + OSAL_MUTEX_INIT(&p_sriov->mutex); + + p_hwfn->vf_iov_info = p_sriov; + + p_hwfn->hw_info.personality = ECORE_PCI_ETH; + + /* First VF needs to query for information from PF */ + if (!p_hwfn->my_id) + rc = ecore_vf_pf_acquire(p_hwfn); + + return rc; + +free_vf2pf_request: + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, p_sriov->vf2pf_request, + p_sriov->vf2pf_request_phys, + sizeof(union vfpf_tlvs)); +free_p_sriov: + OSAL_FREE(p_hwfn->p_dev, p_sriov); + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn) +{ + p_hwfn->b_int_enabled = 1; + + return 0; +} + +/* TEMP TEMP until in HSI */ +#define TSTORM_QZONE_START PXP_VF_BAR0_START_SDM_ZONE_A +#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ + (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) +#define USTORM_QZONE_START(dev) (MSTORM_QZONE_START + \ + (MSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) + +enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, + u8 rx_qid, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void OSAL_IOMEM * *pp_prod) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_start_rxq_tlv *req; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + u8 hw_qid; + u64 init_prod_val = 0; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_RXQ, sizeof(*req)); + + /* @@@TBD MichalK TPA */ + + req->rx_qid = rx_qid; + req->cqe_pbl_addr = cqe_pbl_addr; + req->cqe_pbl_size = cqe_pbl_size; + req->rxq_addr = bd_chain_phys_addr; + req->hw_sb = sb; + req->sb_index = sb_index; + req->hc_rate = 0; /* @@@TBD MichalK -> host coalescing! */ + req->bd_max_bytes = bd_max_bytes; + req->stat_id = -1; /* No stats at the moment */ + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + if (pp_prod) { + hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; + + *pp_prod = (u8 OSAL_IOMEM *)p_hwfn->regview + + MSTORM_QZONE_START(p_hwfn->p_dev) + + (hw_qid) * MSTORM_QZONE_SIZE + + OFFSETOF(struct mstorm_eth_queue_zone, rx_producers); + + /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ + __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u64), + (u32 *)(&init_prod_val)); + } + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_INVAL; + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn, + u16 rx_qid, bool cqe_completion) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_stop_rxqs_tlv *req; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); + + /* @@@TBD MichalK TPA */ + + /* @@@TBD MichalK - relevant ??? + * flags VFPF_QUEUE_FLG_OV VFPF_QUEUE_FLG_VLAN + */ + req->rx_qid = rx_qid; + req->num_rxqs = 1; + req->cqe_completion = cqe_completion; + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_INVAL; + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn, + u16 tx_queue_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, + void OSAL_IOMEM * *pp_doorbell) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_start_txq_tlv *req; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); + + /* @@@TBD MichalK TPA */ + + req->tx_qid = tx_queue_id; + + /* Tx */ + req->pbl_addr = pbl_addr; + req->pbl_size = pbl_size; + req->hw_sb = sb; + req->sb_index = sb_index; + req->hc_rate = 0; /* @@@TBD MichalK -> host coalescing! */ + req->flags = 0; /* @@@TBD MichalK -> flags... */ + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_INVAL; + + if (pp_doorbell) { + u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id]; + + *pp_doorbell = (u8 OSAL_IOMEM *)p_hwfn->doorbells + + DB_ADDR_VF(cid, DQ_DEMS_LEGACY); + } + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, u16 tx_qid) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_stop_txqs_tlv *req; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); + + /* @@@TBD MichalK TPA */ + + /* @@@TBD MichalK - relevant ??? flags + * VFPF_QUEUE_FLG_OV VFPF_QUEUE_FLG_VLAN + */ + req->tx_qid = tx_qid; + req->num_txqs = 1; + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_INVAL; + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn, + u16 rx_queue_id, + u8 num_rxqs, + u8 comp_cqe_flg, u8 comp_event_flg) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + struct vfpf_update_rxq_tlv *req; + int rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UPDATE_RXQ, sizeof(*req)); + + req->rx_qid = rx_queue_id; + req->num_rxqs = num_rxqs; + + if (comp_cqe_flg) + req->flags |= VFPF_RXQ_UPD_COMPLETE_CQE_FLAG; + if (comp_event_flg) + req->flags |= VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG; + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_INVAL; + + return rc; +} + +enum _ecore_status_t +ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, u8 vport_id, + u16 mtu, u8 inner_vlan_removal, + enum ecore_tpa_mode tpa_mode, u8 max_buffers_per_cqe, + u8 only_untagged) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_vport_start_tlv *req; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc, i; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_START, sizeof(*req)); + + req->mtu = mtu; + req->vport_id = vport_id; + req->inner_vlan_removal = inner_vlan_removal; + req->tpa_mode = tpa_mode; + req->max_buffers_per_cqe = max_buffers_per_cqe; + req->only_untagged = only_untagged; + + /* status blocks */ + for (i = 0; i < p_hwfn->vf_iov_info->acquire_resp.resc.num_sbs; i++) + if (p_hwfn->sbs_info[i]) + req->sb_addr[i] = p_hwfn->sbs_info[i]->sb_phys; + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_INVAL; + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_TEARDOWN, + sizeof(struct vfpf_first_tlv)); + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_INVAL; + + return rc; +} + +static void +ecore_vf_handle_vp_update_tlvs_resp(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_data) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *p_resp; + u16 tlv; + + if (p_data->update_vport_active_rx_flg || + p_data->update_vport_active_tx_flg) { + tlv = CHANNEL_TLV_VPORT_UPDATE_ACTIVATE; + p_resp = (struct pfvf_def_resp_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VP update activate tlv configured\n"); + else + DP_NOTICE(p_hwfn, true, + "VP update activate tlv config failed\n"); + } + + if (p_data->update_tx_switching_flg) { + tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + p_resp = (struct pfvf_def_resp_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VP update tx switch tlv configured\n"); +#ifndef ASIC_ONLY + else if (CHIP_REV_IS_FPGA(p_hwfn->p_dev)) + DP_NOTICE(p_hwfn, false, + "FPGA: Skip checking whether PF" + " replied to Tx-switching request\n"); +#endif + else + DP_NOTICE(p_hwfn, true, + "VP update tx switch tlv config failed\n"); + } + + if (p_data->update_inner_vlan_removal_flg) { + tlv = CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP; + p_resp = (struct pfvf_def_resp_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VP update vlan strip tlv configured\n"); + else + DP_NOTICE(p_hwfn, true, + "VP update vlan strip tlv config failed\n"); + } + + if (p_data->update_approx_mcast_flg) { + tlv = CHANNEL_TLV_VPORT_UPDATE_MCAST; + p_resp = (struct pfvf_def_resp_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VP update mcast tlv configured\n"); + else + DP_NOTICE(p_hwfn, true, + "VP update mcast tlv config failed\n"); + } + + if (p_data->accept_flags.update_rx_mode_config || + p_data->accept_flags.update_tx_mode_config) { + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + p_resp = (struct pfvf_def_resp_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VP update accept_mode tlv configured\n"); + else + DP_NOTICE(p_hwfn, true, + "VP update accept_mode tlv config failed\n"); + } + + if (p_data->rss_params) { + tlv = CHANNEL_TLV_VPORT_UPDATE_RSS; + p_resp = (struct pfvf_def_resp_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VP update rss tlv configured\n"); + else + DP_NOTICE(p_hwfn, true, + "VP update rss tlv config failed\n"); + } + + if (p_data->sge_tpa_params) { + tlv = CHANNEL_TLV_VPORT_UPDATE_SGE_TPA; + p_resp = (struct pfvf_def_resp_tlv *) + ecore_iov_search_list_tlvs(p_hwfn, p_iov->pf2vf_reply, tlv); + if (p_resp && p_resp->hdr.status) + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "VP update sge tpa tlv configured\n"); + else + DP_NOTICE(p_hwfn, true, + "VP update sge tpa tlv config failed\n"); + } +} + +enum _ecore_status_t +ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_params) +{ + struct vfpf_vport_update_accept_any_vlan_tlv *p_any_vlan_tlv; + struct vfpf_vport_update_accept_param_tlv *p_accept_tlv; + struct vfpf_vport_update_tx_switch_tlv *p_tx_switch_tlv; + struct vfpf_vport_update_mcast_bin_tlv *p_mcast_tlv; + struct vfpf_vport_update_vlan_strip_tlv *p_vlan_tlv; + struct vfpf_vport_update_sge_tpa_tlv *p_sge_tpa_tlv; + struct vfpf_vport_update_activate_tlv *p_act_tlv; + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_vport_update_rss_tlv *p_rss_tlv; + struct vfpf_vport_update_tlv *req; + struct pfvf_def_resp_tlv *resp; + u8 update_rx, update_tx; + u32 resp_size = 0; + u16 size, tlv; + int rc; + + resp = &p_iov->pf2vf_reply->default_resp; + resp_size = sizeof(*resp); + + update_rx = p_params->update_vport_active_rx_flg; + update_tx = p_params->update_vport_active_tx_flg; + + /* clear mailbox and prep header tlv */ + ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_VPORT_UPDATE, sizeof(*req)); + + /* Prepare extended tlvs */ + if (update_rx || update_tx) { + size = sizeof(struct vfpf_vport_update_activate_tlv); + p_act_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (update_rx) { + p_act_tlv->update_rx = update_rx; + p_act_tlv->active_rx = p_params->vport_active_rx_flg; + } + + if (update_tx) { + p_act_tlv->update_tx = update_tx; + p_act_tlv->active_tx = p_params->vport_active_tx_flg; + } + } + + if (p_params->update_inner_vlan_removal_flg) { + size = sizeof(struct vfpf_vport_update_vlan_strip_tlv); + p_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, + size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + p_vlan_tlv->remove_vlan = p_params->inner_vlan_removal_flg; + } + + if (p_params->update_tx_switching_flg) { + size = sizeof(struct vfpf_vport_update_tx_switch_tlv); + tlv = CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH; + p_tx_switch_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, + tlv, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + p_tx_switch_tlv->tx_switching = p_params->tx_switching_flg; + } + + if (p_params->update_approx_mcast_flg) { + size = sizeof(struct vfpf_vport_update_mcast_bin_tlv); + p_mcast_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_MCAST, + size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + OSAL_MEMCPY(p_mcast_tlv->bins, p_params->bins, + sizeof(unsigned long) * + ETH_MULTICAST_MAC_BINS_IN_REGS); + } + + update_rx = p_params->accept_flags.update_rx_mode_config; + update_tx = p_params->accept_flags.update_tx_mode_config; + + if (update_rx || update_tx) { + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM; + size = sizeof(struct vfpf_vport_update_accept_param_tlv); + p_accept_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, tlv, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (update_rx) { + p_accept_tlv->update_rx_mode = update_rx; + p_accept_tlv->rx_accept_filter = + p_params->accept_flags.rx_accept_filter; + } + + if (update_tx) { + p_accept_tlv->update_tx_mode = update_tx; + p_accept_tlv->tx_accept_filter = + p_params->accept_flags.tx_accept_filter; + } + } + + if (p_params->rss_params) { + struct ecore_rss_params *rss_params = p_params->rss_params; + + size = sizeof(struct vfpf_vport_update_rss_tlv); + p_rss_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_RSS, size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (rss_params->update_rss_config) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_CONFIG_FLAG; + if (rss_params->update_rss_capabilities) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_CAPS_FLAG; + if (rss_params->update_rss_ind_table) + p_rss_tlv->update_rss_flags |= + VFPF_UPDATE_RSS_IND_TABLE_FLAG; + if (rss_params->update_rss_key) + p_rss_tlv->update_rss_flags |= VFPF_UPDATE_RSS_KEY_FLAG; + + p_rss_tlv->rss_enable = rss_params->rss_enable; + p_rss_tlv->rss_caps = rss_params->rss_caps; + p_rss_tlv->rss_table_size_log = rss_params->rss_table_size_log; + OSAL_MEMCPY(p_rss_tlv->rss_ind_table, rss_params->rss_ind_table, + sizeof(rss_params->rss_ind_table)); + OSAL_MEMCPY(p_rss_tlv->rss_key, rss_params->rss_key, + sizeof(rss_params->rss_key)); + } + + if (p_params->update_accept_any_vlan_flg) { + size = sizeof(struct vfpf_vport_update_accept_any_vlan_tlv); + tlv = CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN; + p_any_vlan_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, + tlv, size); + + resp_size += sizeof(struct pfvf_def_resp_tlv); + p_any_vlan_tlv->accept_any_vlan = p_params->accept_any_vlan; + p_any_vlan_tlv->update_accept_any_vlan_flg = + p_params->update_accept_any_vlan_flg; + } + + if (p_params->sge_tpa_params) { + struct ecore_sge_tpa_params *sge_tpa_params = + p_params->sge_tpa_params; + + size = sizeof(struct vfpf_vport_update_sge_tpa_tlv); + p_sge_tpa_tlv = ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, + size); + resp_size += sizeof(struct pfvf_def_resp_tlv); + + if (sge_tpa_params->update_tpa_en_flg) + p_sge_tpa_tlv->update_sge_tpa_flags |= + VFPF_UPDATE_TPA_EN_FLAG; + if (sge_tpa_params->update_tpa_param_flg) + p_sge_tpa_tlv->update_sge_tpa_flags |= + VFPF_UPDATE_TPA_PARAM_FLAG; + + if (sge_tpa_params->tpa_ipv4_en_flg) + p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV4_EN_FLAG; + if (sge_tpa_params->tpa_ipv6_en_flg) + p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_IPV6_EN_FLAG; + if (sge_tpa_params->tpa_pkt_split_flg) + p_sge_tpa_tlv->sge_tpa_flags |= VFPF_TPA_PKT_SPLIT_FLAG; + if (sge_tpa_params->tpa_hdr_data_split_flg) + p_sge_tpa_tlv->sge_tpa_flags |= + VFPF_TPA_HDR_DATA_SPLIT_FLAG; + if (sge_tpa_params->tpa_gro_consistent_flg) + p_sge_tpa_tlv->sge_tpa_flags |= + VFPF_TPA_GRO_CONSIST_FLAG; + + p_sge_tpa_tlv->tpa_max_aggs_num = + sge_tpa_params->tpa_max_aggs_num; + p_sge_tpa_tlv->tpa_max_size = sge_tpa_params->tpa_max_size; + p_sge_tpa_tlv->tpa_min_size_to_start = + sge_tpa_params->tpa_min_size_to_start; + p_sge_tpa_tlv->tpa_min_size_to_cont = + sge_tpa_params->tpa_min_size_to_cont; + + p_sge_tpa_tlv->max_buffers_per_cqe = + sge_tpa_params->max_buffers_per_cqe; + } + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, resp_size); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_INVAL; + + ecore_vf_handle_vp_update_tlvs_resp(p_hwfn, p_params); + + return rc; +} + +enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_first_tlv *req; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_CLOSE, sizeof(*req)); + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_AGAIN; + + p_hwfn->b_int_enabled = 0; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_first_tlv *req; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + u32 size; + int rc; + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_RELEASE, sizeof(*req)); + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + + if (rc == ECORE_SUCCESS && resp->hdr.status != PFVF_STATUS_SUCCESS) + rc = ECORE_AGAIN; + + p_hwfn->b_int_enabled = 0; + + /* TODO - might need to revise this for 100g */ + if (IS_LEAD_HWFN(p_hwfn)) + OSAL_MUTEX_DEALLOC(&p_iov->mutex); + + if (p_iov->vf2pf_request) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov->vf2pf_request, + p_iov->vf2pf_request_phys, + sizeof(union vfpf_tlvs)); + if (p_iov->pf2vf_reply) + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov->pf2vf_reply, + p_iov->pf2vf_reply_phys, + sizeof(union pfvf_tlvs)); + + if (p_iov->bulletin.p_virt) { + size = sizeof(struct ecore_bulletin_content); + OSAL_DMA_FREE_COHERENT(p_hwfn->p_dev, + p_iov->bulletin.p_virt, + p_iov->bulletin.phys, size); + } + + OSAL_FREE(p_hwfn->p_dev, p_hwfn->vf_iov_info); + p_hwfn->vf_iov_info = OSAL_NULL; + + return rc; +} + +void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, + struct ecore_filter_mcast *p_filter_cmd) +{ + struct ecore_sp_vport_update_params sp_params; + int i; + + OSAL_MEMSET(&sp_params, 0, sizeof(sp_params)); + sp_params.update_approx_mcast_flg = 1; + + if (p_filter_cmd->opcode == ECORE_FILTER_ADD) { + for (i = 0; i < p_filter_cmd->num_mc_addrs; i++) { + u32 bit; + + bit = ecore_mcast_bin_from_mac(p_filter_cmd->mac[i]); + OSAL_SET_BIT(bit, sp_params.bins); + } + } + + ecore_vf_pf_vport_update(p_hwfn, &sp_params); +} + +enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn, + struct ecore_filter_ucast + *p_ucast) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct vfpf_ucast_filter_tlv *req; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* Sanitize */ + if (p_ucast->opcode == ECORE_FILTER_MOVE) { + DP_NOTICE(p_hwfn, true, + "VFs don't support Moving of filters\n"); + return ECORE_INVAL; + } + + /* clear mailbox and prep first tlv */ + req = ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_UCAST_FILTER, sizeof(*req)); + req->opcode = (u8)p_ucast->opcode; + req->type = (u8)p_ucast->type; + OSAL_MEMCPY(req->mac, p_ucast->mac, ETH_ALEN); + req->vlan = p_ucast->vlan; + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_AGAIN; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + struct pfvf_def_resp_tlv *resp = &p_iov->pf2vf_reply->default_resp; + int rc; + + /* clear mailbox and prep first tlv */ + ecore_vf_pf_prep(p_hwfn, CHANNEL_TLV_INT_CLEANUP, + sizeof(struct vfpf_first_tlv)); + + /* add list termination tlv */ + ecore_add_tlv(p_hwfn, &p_iov->offset, + CHANNEL_TLV_LIST_END, + sizeof(struct channel_list_end_tlv)); + + rc = ecore_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp)); + if (rc) + return rc; + + if (resp->hdr.status != PFVF_STATUS_SUCCESS) + return ECORE_INVAL; + + return ECORE_SUCCESS; +} + +enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn, + u8 *p_change) +{ + struct ecore_bulletin_content shadow; + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + u32 crc, crc_size = sizeof(p_iov->bulletin.p_virt->crc); + + *p_change = 0; + + /* Need to guarantee PF is not in the middle of writing it */ + OSAL_MEMCPY(&shadow, p_iov->bulletin.p_virt, p_iov->bulletin.size); + + /* If version did not update, no need to do anything */ + if (shadow.version == p_iov->bulletin_shadow.version) + return ECORE_SUCCESS; + + /* Verify the bulletin we see is valid */ + crc = ecore_crc32(0, (u8 *)&shadow + crc_size, + p_iov->bulletin.size - crc_size); + if (crc != shadow.crc) + return ECORE_AGAIN; + + /* Set the shadow bulletin and process it */ + OSAL_MEMCPY(&p_iov->bulletin_shadow, &shadow, p_iov->bulletin.size); + + DP_VERBOSE(p_hwfn, ECORE_MSG_IOV, + "Read a bulletin update %08x\n", shadow.version); + + *p_change = 1; + + return ECORE_SUCCESS; +} + +u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id) +{ + struct ecore_vf_iov *p_iov = p_hwfn->vf_iov_info; + + if (!p_iov) { + DP_NOTICE(p_hwfn, true, "vf_sriov_info isn't initialized\n"); + return 0; + } + + return p_iov->acquire_resp.resc.hw_sbs[sb_id].hw_sb_id; +} + +void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_params *p_params, + struct ecore_bulletin_content *p_bulletin) +{ + OSAL_MEMSET(p_params, 0, sizeof(*p_params)); + + p_params->speed.autoneg = p_bulletin->req_autoneg; + p_params->speed.advertised_speeds = p_bulletin->req_adv_speed; + p_params->speed.forced_speed = p_bulletin->req_forced_speed; + p_params->pause.autoneg = p_bulletin->req_autoneg_pause; + p_params->pause.forced_rx = p_bulletin->req_forced_rx; + p_params->pause.forced_tx = p_bulletin->req_forced_tx; + p_params->loopback_mode = p_bulletin->req_loopback; +} + +void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_params *params) +{ + __ecore_vf_get_link_params(p_hwfn, params, + &p_hwfn->vf_iov_info->bulletin_shadow); +} + +void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_state *p_link, + struct ecore_bulletin_content *p_bulletin) +{ + OSAL_MEMSET(p_link, 0, sizeof(*p_link)); + + p_link->link_up = p_bulletin->link_up; + p_link->speed = p_bulletin->speed; + p_link->full_duplex = p_bulletin->full_duplex; + p_link->an = p_bulletin->autoneg; + p_link->an_complete = p_bulletin->autoneg_complete; + p_link->parallel_detection = p_bulletin->parallel_detection; + p_link->pfc_enabled = p_bulletin->pfc_enabled; + p_link->partner_adv_speed = p_bulletin->partner_adv_speed; + p_link->partner_tx_flow_ctrl_en = p_bulletin->partner_tx_flow_ctrl_en; + p_link->partner_rx_flow_ctrl_en = p_bulletin->partner_rx_flow_ctrl_en; + p_link->partner_adv_pause = p_bulletin->partner_adv_pause; + p_link->sfp_tx_fault = p_bulletin->sfp_tx_fault; +} + +void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_state *link) +{ + __ecore_vf_get_link_state(p_hwfn, link, + &p_hwfn->vf_iov_info->bulletin_shadow); +} + +void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_capabilities *p_link_caps, + struct ecore_bulletin_content *p_bulletin) +{ + OSAL_MEMSET(p_link_caps, 0, sizeof(*p_link_caps)); + p_link_caps->speed_capabilities = p_bulletin->capability_speed; +} + +void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_capabilities *p_link_caps) +{ + __ecore_vf_get_link_caps(p_hwfn, p_link_caps, + &p_hwfn->vf_iov_info->bulletin_shadow); +} + +void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs) +{ + *num_rxqs = p_hwfn->vf_iov_info->acquire_resp.resc.num_rxqs; +} + +void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac) +{ + OSAL_MEMCPY(port_mac, + p_hwfn->vf_iov_info->acquire_resp.pfdev_info.port_mac, + ETH_ALEN); +} + +void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, + u8 *num_vlan_filters) +{ + struct ecore_vf_iov *p_vf; + + p_vf = p_hwfn->vf_iov_info; + *num_vlan_filters = p_vf->acquire_resp.resc.num_vlan_filters; +} + +/* @DPDK */ +void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, + u32 *num_mac) +{ + struct ecore_vf_iov *p_vf; + + p_vf = p_hwfn->vf_iov_info; + *num_mac = p_vf->acquire_resp.resc.num_mac_filters; +} + +bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac) +{ + struct ecore_bulletin_content *bulletin; + + bulletin = &p_hwfn->vf_iov_info->bulletin_shadow; + if (!(bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED))) + return true; + + /* Forbid VF from changing a MAC enforced by PF */ + if (OSAL_MEMCMP(bulletin->mac, mac, ETH_ALEN)) + return false; + + return false; +} + +bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac, + u8 *p_is_forced) +{ + struct ecore_bulletin_content *bulletin; + + bulletin = &hwfn->vf_iov_info->bulletin_shadow; + + if (bulletin->valid_bitmap & (1 << MAC_ADDR_FORCED)) { + if (p_is_forced) + *p_is_forced = 1; + } else if (bulletin->valid_bitmap & (1 << VFPF_BULLETIN_MAC_ADDR)) { + if (p_is_forced) + *p_is_forced = 0; + } else { + return false; + } + + OSAL_MEMCPY(dst_mac, bulletin->mac, ETH_ALEN); + + return true; +} + +bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid) +{ + struct ecore_bulletin_content *bulletin; + + bulletin = &hwfn->vf_iov_info->bulletin_shadow; + + if (!(bulletin->valid_bitmap & (1 << VLAN_ADDR_FORCED))) + return false; + + if (dst_pvid) + *dst_pvid = bulletin->pvid; + + return true; +} + +void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, u16 *fw_rev, + u16 *fw_eng) +{ + struct pf_vf_pfdev_info *info; + + info = &p_hwfn->vf_iov_info->acquire_resp.pfdev_info; + + *fw_major = info->fw_major; + *fw_minor = info->fw_minor; + *fw_rev = info->fw_rev; + *fw_eng = info->fw_eng; +} diff --git a/drivers/net/qede/base/ecore_vf.h b/drivers/net/qede/base/ecore_vf.h new file mode 100644 index 00000000..334b588c --- /dev/null +++ b/drivers/net/qede/base/ecore_vf.h @@ -0,0 +1,415 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_VF_H__ +#define __ECORE_VF_H__ + +#include "ecore_status.h" +#include "ecore_vf_api.h" +#include "ecore_l2_api.h" +#include "ecore_vfpf_if.h" + +#ifdef CONFIG_ECORE_SRIOV +/** + * + * @brief hw preparation for VF + * sends ACQUIRE message + * + * @param p_dev + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev *p_dev); + +/** + * + * @brief VF init in hw (equivalent to hw_init in PF) + * mark interrupts as enabled + * + * @param p_hwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn *p_hwfn); + +/** + * + * @brief VF - start the RX Queue by sending a message to the PF + * + * @param p_hwfn + * @param cid - zero based within the VF + * @param rx_queue_id - zero based within the VF + * @param sb - VF status block for this queue + * @param sb_index - Index within the status block + * @param bd_max_bytes - maximum number of bytes per bd + * @param bd_chain_phys_addr - physical address of bd chain + * @param cqe_pbl_addr - physical address of pbl + * @param cqe_pbl_size - pbl size + * @param pp_prod - pointer to the producer to be + * used in fasthwfn + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn *p_hwfn, + u8 rx_queue_id, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + u16 cqe_pbl_size, + void OSAL_IOMEM * *pp_prod); + +/** + * + * @brief VF - start the TX queue by sending a message to the + * PF. + * + * @param p_hwfn + * @param tx_queue_id - zero based within the VF + * @param sb - status block for this queue + * @param sb_index - index within the status block + * @param bd_chain_phys_addr - physical address of tx chain + * @param pp_doorbell - pointer to address to which to + * write the doorbell too.. + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn *p_hwfn, + u16 tx_queue_id, + u16 sb, + u8 sb_index, + dma_addr_t pbl_addr, + u16 pbl_size, + void OSAL_IOMEM * *pp_doorbell); + +/** + * + * @brief VF - stop the RX queue by sending a message to the PF + * + * @param p_hwfn + * @param rx_qid + * @param cqe_completion + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn *p_hwfn, + u16 rx_qid, bool cqe_completion); + +/** + * + * @brief VF - stop the TX queue by sending a message to the PF + * + * @param p_hwfn + * @param tx_qid + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn *p_hwfn, + u16 tx_qid); + +/** + * @brief VF - update the RX queue by sending a message to the + * PF + * + * @param p_hwfn + * @param rx_queue_id + * @param num_rxqs + * @param init_sge_ring + * @param comp_cqe_flg + * @param comp_event_flg + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t ecore_vf_pf_rxqs_update(struct ecore_hwfn *p_hwfn, + u16 rx_queue_id, + u8 num_rxqs, + u8 comp_cqe_flg, + u8 comp_event_flg); + +/** + * + * @brief VF - send a vport update command + * + * @param p_hwfn + * @param params + * + * @return enum _ecore_status_t + */ +enum _ecore_status_t +ecore_vf_pf_vport_update(struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_params); + +/** + * + * @brief VF - send a close message to PF + * + * @param p_hwfn + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn *p_hwfn); + +/** + * + * @brief VF - free vf`s memories + * + * @param p_hwfn + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn *p_hwfn); + +/** + * + * @brief ecore_vf_get_igu_sb_id - Get the IGU SB ID for a given + * sb_id. For VFs igu sbs don't have to be contiguous + * + * @param p_hwfn + * @param sb_id + * + * @return INLINE u16 + */ +u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, u16 sb_id); + +/** + * @brief ecore_vf_pf_vport_start - perform vport start for VF. + * + * @param p_hwfn + * @param vport_id + * @param mtu + * @param inner_vlan_removal + * @param tpa_mode + * @param max_buffers_per_cqe, + * @param only_untagged - default behavior regarding vlan acceptance + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_pf_vport_start(struct ecore_hwfn *p_hwfn, + u8 vport_id, + u16 mtu, + u8 inner_vlan_removal, + enum ecore_tpa_mode tpa_mode, + u8 max_buffers_per_cqe, + u8 only_untagged); + +/** + * @brief ecore_vf_pf_vport_stop - stop the VF's vport + * + * @param p_hwfn + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_pf_vport_stop(struct ecore_hwfn *p_hwfn); + +enum _ecore_status_t ecore_vf_pf_filter_ucast(struct ecore_hwfn *p_hwfn, + struct ecore_filter_ucast + *p_param); + +void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, + struct ecore_filter_mcast *p_filter_cmd); + +/** + * @brief ecore_vf_pf_int_cleanup - clean the SB of the VF + * + * @param p_hwfn + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_pf_int_cleanup(struct ecore_hwfn *p_hwfn); + +/** + * @brief - return the link params in a given bulletin board + * + * @param p_hwfn + * @param p_params - pointer to a struct to fill with link params + * @param p_bulletin + */ +void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_params *p_params, + struct ecore_bulletin_content *p_bulletin); + +/** + * @brief - return the link state in a given bulletin board + * + * @param p_hwfn + * @param p_link - pointer to a struct to fill with link state + * @param p_bulletin + */ +void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_state *p_link, + struct ecore_bulletin_content *p_bulletin); + +/** + * @brief - return the link capabilities in a given bulletin board + * + * @param p_hwfn + * @param p_link - pointer to a struct to fill with link capabilities + * @param p_bulletin + */ +void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_capabilities *p_link_caps, + struct ecore_bulletin_content *p_bulletin); + +#else +static OSAL_INLINE enum _ecore_status_t ecore_vf_hw_prepare(struct ecore_dev + *p_dev) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_init(struct ecore_hwfn + *p_hwfn) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_start(struct ecore_hwfn + *p_hwfn, + u8 rx_queue_id, + u16 sb, + u8 sb_index, + u16 bd_max_bytes, + dma_addr_t + bd_chain_phys_adr, + dma_addr_t + cqe_pbl_addr, + u16 cqe_pbl_size, + void OSAL_IOMEM * + *pp_prod) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_start(struct ecore_hwfn + *p_hwfn, + u16 tx_queue_id, + u16 sb, + u8 sb_index, + dma_addr_t + pbl_addr, + u16 pbl_size, + void OSAL_IOMEM * + *pp_doorbell) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxq_stop(struct ecore_hwfn + *p_hwfn, + u16 rx_qid, + bool + cqe_completion) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_txq_stop(struct ecore_hwfn + *p_hwfn, + u16 tx_qid) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_rxqs_update(struct + ecore_hwfn + * p_hwfn, + u16 rx_queue_id, + u8 num_rxqs, + u8 comp_cqe_flg, + u8 + comp_event_flg) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_update( + struct ecore_hwfn *p_hwfn, + struct ecore_sp_vport_update_params *p_params) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_reset(struct ecore_hwfn + *p_hwfn) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_release(struct ecore_hwfn + *p_hwfn) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE u16 ecore_vf_get_igu_sb_id(struct ecore_hwfn *p_hwfn, + u16 sb_id) +{ + return 0; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_start( + struct ecore_hwfn *p_hwfn, u8 vport_id, u16 mtu, + u8 inner_vlan_removal, enum ecore_tpa_mode tpa_mode, + u8 max_buffers_per_cqe, u8 only_untagged) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_vport_stop( + struct ecore_hwfn *p_hwfn) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_filter_ucast( + struct ecore_hwfn *p_hwfn, struct ecore_filter_ucast *p_param) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE void ecore_vf_pf_filter_mcast(struct ecore_hwfn *p_hwfn, + struct ecore_filter_mcast + *p_filter_cmd) +{ +} + +static OSAL_INLINE enum _ecore_status_t ecore_vf_pf_int_cleanup(struct + ecore_hwfn + * p_hwfn) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE void __ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_params + *p_params, + struct ecore_bulletin_content + *p_bulletin) +{ +} + +static OSAL_INLINE void __ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_state + *p_link, + struct ecore_bulletin_content + *p_bulletin) +{ +} + +static OSAL_INLINE void __ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, + struct + ecore_mcp_link_capabilities + * p_link_caps, + struct ecore_bulletin_content + *p_bulletin) +{ +} +#endif + +#endif /* __ECORE_VF_H__ */ diff --git a/drivers/net/qede/base/ecore_vf_api.h b/drivers/net/qede/base/ecore_vf_api.h new file mode 100644 index 00000000..f28b6860 --- /dev/null +++ b/drivers/net/qede/base/ecore_vf_api.h @@ -0,0 +1,200 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_VF_API_H__ +#define __ECORE_VF_API_H__ + +#include "ecore_sp_api.h" +#include "ecore_mcp_api.h" + +#ifdef CONFIG_ECORE_SRIOV +/** + * @brief Read the VF bulletin and act on it if needed + * + * @param p_hwfn + * @param p_change - ecore fills 1 iff bulletin board has changed, 0 otherwise. + * + * @return enum _ecore_status + */ +enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn *p_hwfn, + u8 *p_change); + +/** + * @brief Get link parameters for VF from ecore + * + * @param p_hwfn + * @param params - the link params structure to be filled for the VF + */ +void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_params *params); + +/** + * @brief Get link state for VF from ecore + * + * @param p_hwfn + * @param link - the link state structure to be filled for the VF + */ +void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_state *link); + +/** + * @brief Get link capabilities for VF from ecore + * + * @param p_hwfn + * @param p_link_caps - the link capabilities structure to be filled for the VF + */ +void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_capabilities *p_link_caps); + +/** + * @brief Get number of Rx queues allocated for VF by ecore + * + * @param p_hwfn + * @param num_rxqs - allocated RX queues + */ +void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, u8 *num_rxqs); + +/** + * @brief Get port mac address for VF + * + * @param p_hwfn + * @param port_mac - destination location for port mac + */ +void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, u8 *port_mac); + +/** + * @brief Get number of VLAN filters allocated for VF by ecore + * + * @param p_hwfn + * @param num_rxqs - allocated VLAN filters + */ +void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, + u8 *num_vlan_filters); + +/** + * @brief Get number of MAC filters allocated for VF by ecore + * + * @param p_hwfn + * @param num_mac - allocated MAC filters + */ +void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, + u32 *num_mac_filters); + +/** + * @brief Check if VF can set a MAC address + * + * @param p_hwfn + * @param mac + * + * @return bool + */ +bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac); + +/** + * @brief Copy forced MAC address from bulletin board + * + * @param hwfn + * @param dst_mac + * @param p_is_forced - out param which indicate in case mac + * exist if it forced or not. + * + * @return bool - return true if mac exist and false if + * not. + */ +bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn *hwfn, u8 *dst_mac, + u8 *p_is_forced); + +/** + * @brief Check if force vlan is set and copy the forced vlan + * from bulletin board + * + * @param hwfn + * @param dst_pvid + * @return bool + */ +bool ecore_vf_bulletin_get_forced_vlan(struct ecore_hwfn *hwfn, u16 *dst_pvid); + +/** + * @brief Set firmware version information in dev_info from VFs acquire response + * tlv + * + * @param p_hwfn + * @param fw_major + * @param fw_minor + * @param fw_rev + * @param fw_eng + */ +void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, + u16 *fw_major, + u16 *fw_minor, u16 *fw_rev, u16 *fw_eng); +#else +static OSAL_INLINE enum _ecore_status_t ecore_vf_read_bulletin(struct ecore_hwfn + *p_hwfn, + u8 *p_change) +{ + return ECORE_INVAL; +} + +static OSAL_INLINE void ecore_vf_get_link_params(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_params + *params) +{ +} + +static OSAL_INLINE void ecore_vf_get_link_state(struct ecore_hwfn *p_hwfn, + struct ecore_mcp_link_state + *link) +{ +} + +static OSAL_INLINE void ecore_vf_get_link_caps(struct ecore_hwfn *p_hwfn, + struct + ecore_mcp_link_capabilities + * p_link_caps) +{ +} + +static OSAL_INLINE void ecore_vf_get_num_rxqs(struct ecore_hwfn *p_hwfn, + u8 *num_rxqs) +{ +} + +static OSAL_INLINE void ecore_vf_get_port_mac(struct ecore_hwfn *p_hwfn, + u8 *port_mac) +{ +} + +static OSAL_INLINE void ecore_vf_get_num_vlan_filters(struct ecore_hwfn *p_hwfn, + u8 *num_vlan_filters) +{ +} + +static OSAL_INLINE void ecore_vf_get_num_mac_filters(struct ecore_hwfn *p_hwfn, + u32 *num_mac) +{ +} + +static OSAL_INLINE bool ecore_vf_check_mac(struct ecore_hwfn *p_hwfn, u8 *mac) +{ + return false; +} + +static OSAL_INLINE bool ecore_vf_bulletin_get_forced_mac(struct ecore_hwfn + *hwfn, u8 *dst_mac, + u8 *p_is_forced) +{ + return false; +} + +static OSAL_INLINE void ecore_vf_get_fw_version(struct ecore_hwfn *p_hwfn, + u16 *fw_major, u16 *fw_minor, + u16 *fw_rev, u16 *fw_eng) +{ +} +#endif +#endif diff --git a/drivers/net/qede/base/ecore_vfpf_if.h b/drivers/net/qede/base/ecore_vfpf_if.h new file mode 100644 index 00000000..2fa4d155 --- /dev/null +++ b/drivers/net/qede/base/ecore_vfpf_if.h @@ -0,0 +1,590 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ECORE_VF_PF_IF_H__ +#define __ECORE_VF_PF_IF_H__ + +#define T_ETH_INDIRECTION_TABLE_SIZE 128 +#define T_ETH_RSS_KEY_SIZE 10 +#ifndef aligned_u64 +#define aligned_u64 u64 +#endif + +/*********************************************** + * + * Common definitions for all HVs + * + **/ +struct vf_pf_resc_request { + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; /* No limit so superfluous */ + u16 padding; +}; + +struct hw_sb_info { + u16 hw_sb_id; /* aka absolute igu id, used to ack the sb */ + u8 sb_qid; /* used to update DHC for sb */ + u8 padding[5]; +}; + +/*********************************************** + * + * HW VF-PF channel definitions + * + * A.K.A VF-PF mailbox + * + **/ +#define TLV_BUFFER_SIZE 1024 +#define TLV_ALIGN sizeof(u64) +#define PF_VF_BULLETIN_SIZE 512 + +#define VFPF_RX_MASK_ACCEPT_NONE 0x00000000 +#define VFPF_RX_MASK_ACCEPT_MATCHED_UNICAST 0x00000001 +#define VFPF_RX_MASK_ACCEPT_MATCHED_MULTICAST 0x00000002 +#define VFPF_RX_MASK_ACCEPT_ALL_UNICAST 0x00000004 +#define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST 0x00000008 +#define VFPF_RX_MASK_ACCEPT_BROADCAST 0x00000010 +/* TODO: #define VFPF_RX_MASK_ACCEPT_ANY_VLAN 0x00000020 */ + +#define BULLETIN_CONTENT_SIZE (sizeof(struct pf_vf_bulletin_content)) +#define BULLETIN_ATTEMPTS 5 /* crc failures before throwing towel */ +#define BULLETIN_CRC_SEED 0 + +enum { + PFVF_STATUS_WAITING = 0, + PFVF_STATUS_SUCCESS, + PFVF_STATUS_FAILURE, + PFVF_STATUS_NOT_SUPPORTED, + PFVF_STATUS_NO_RESOURCE, + PFVF_STATUS_FORCED, +}; + +/* vf pf channel tlvs */ +/* general tlv header (used for both vf->pf request and pf->vf response) */ +struct channel_tlv { + u16 type; + u16 length; +}; + +/* header of first vf->pf tlv carries the offset used to calculate response + * buffer address + */ +struct vfpf_first_tlv { + struct channel_tlv tl; + u32 padding; + aligned_u64 reply_address; +}; + +/* header of pf->vf tlvs, carries the status of handling the request */ +struct pfvf_tlv { + struct channel_tlv tl; + u8 status; + u8 padding[3]; +}; + +/* response tlv used for most tlvs */ +struct pfvf_def_resp_tlv { + struct pfvf_tlv hdr; +}; + +/* used to terminate and pad a tlv list */ +struct channel_list_end_tlv { + struct channel_tlv tl; + u8 padding[4]; +}; + +/* Acquire */ +struct vfpf_acquire_tlv { + struct vfpf_first_tlv first_tlv; + + struct vf_pf_vfdev_info { +#define VFPF_ACQUIRE_CAP_OVERRIDE_FW_VER (1 << 0) + aligned_u64 capabilties; + u8 fw_major; + u8 fw_minor; + u8 fw_revision; + u8 fw_engineering; + u32 driver_version; + u16 opaque_fid; /* ME register value */ + u8 os_type; /* VFPF_ACQUIRE_OS_* value */ + u8 padding[5]; + } vfdev_info; + + struct vf_pf_resc_request resc_request; + + aligned_u64 bulletin_addr; + u32 bulletin_size; + u32 padding; +}; + +/* receive side scaling tlv */ +struct vfpf_vport_update_rss_tlv { + struct channel_tlv tl; + + u8 update_rss_flags; +#define VFPF_UPDATE_RSS_CONFIG_FLAG (1 << 0) +#define VFPF_UPDATE_RSS_CAPS_FLAG (1 << 1) +#define VFPF_UPDATE_RSS_IND_TABLE_FLAG (1 << 2) +#define VFPF_UPDATE_RSS_KEY_FLAG (1 << 3) + + u8 rss_enable; + u8 rss_caps; + u8 rss_table_size_log; /* The table size is 2 ^ rss_table_size_log */ + u16 rss_ind_table[T_ETH_INDIRECTION_TABLE_SIZE]; + u32 rss_key[T_ETH_RSS_KEY_SIZE]; +}; + +struct pfvf_storm_stats { + u32 address; + u32 len; +}; + +struct pfvf_stats_info { + struct pfvf_storm_stats mstats; + struct pfvf_storm_stats pstats; + struct pfvf_storm_stats tstats; + struct pfvf_storm_stats ustats; +}; + +/* acquire response tlv - carries the allocated resources */ +struct pfvf_acquire_resp_tlv { + struct pfvf_tlv hdr; + + struct pf_vf_pfdev_info { + u32 chip_num; + u32 mfw_ver; + + u16 fw_major; + u16 fw_minor; + u16 fw_rev; + u16 fw_eng; + + aligned_u64 capabilities; +#define PFVF_ACQUIRE_CAP_DEFAULT_UNTAGGED (1 << 0) + + u16 db_size; + u8 indices_per_sb; + u8 os_type; + + /* Thesee should match the PF's ecore_dev values */ + u16 chip_rev; + u8 dev_type; + + u8 padding; + + struct pfvf_stats_info stats_info; + + u8 port_mac[ETH_ALEN]; + u8 padding2[2]; + } pfdev_info; + + struct pf_vf_resc { + /* in case of status NO_RESOURCE in message hdr, pf will fill + * this struct with suggested amount of resources for next + * acquire request + */ +#define PFVF_MAX_QUEUES_PER_VF 16 +#define PFVF_MAX_SBS_PER_VF 16 + struct hw_sb_info hw_sbs[PFVF_MAX_SBS_PER_VF]; + u8 hw_qid[PFVF_MAX_QUEUES_PER_VF]; + u8 cid[PFVF_MAX_QUEUES_PER_VF]; + + u8 num_rxqs; + u8 num_txqs; + u8 num_sbs; + u8 num_mac_filters; + u8 num_vlan_filters; + u8 num_mc_filters; + u8 padding[2]; + } resc; + + u32 bulletin_size; + u32 padding; +}; + +/* Init VF */ +struct vfpf_init_tlv { + struct vfpf_first_tlv first_tlv; + aligned_u64 stats_addr; + + u16 rx_mask; + u16 tx_mask; + u8 drop_ttl0_flg; + u8 padding[3]; + +}; + +/* Setup Queue */ +struct vfpf_start_rxq_tlv { + struct vfpf_first_tlv first_tlv; + + /* physical addresses */ + aligned_u64 rxq_addr; + aligned_u64 deprecated_sge_addr; + aligned_u64 cqe_pbl_addr; + + u16 cqe_pbl_size; + u16 hw_sb; + u16 rx_qid; + u16 hc_rate; /* desired interrupts per sec. */ + + u16 bd_max_bytes; + u16 stat_id; + u8 sb_index; + u8 padding[3]; + +}; + +struct vfpf_start_txq_tlv { + struct vfpf_first_tlv first_tlv; + + /* physical addresses */ + aligned_u64 pbl_addr; + u16 pbl_size; + u16 stat_id; + u16 tx_qid; + u16 hw_sb; + + u32 flags; /* VFPF_QUEUE_FLG_X flags */ + u16 hc_rate; /* desired interrupts per sec. */ + u8 sb_index; + u8 padding[3]; +}; + +/* Stop RX Queue */ +struct vfpf_stop_rxqs_tlv { + struct vfpf_first_tlv first_tlv; + + u16 rx_qid; + u8 num_rxqs; + u8 cqe_completion; + u8 padding[4]; +}; + +/* Stop TX Queues */ +struct vfpf_stop_txqs_tlv { + struct vfpf_first_tlv first_tlv; + + u16 tx_qid; + u8 num_txqs; + u8 padding[5]; +}; + +struct vfpf_update_rxq_tlv { + struct vfpf_first_tlv first_tlv; + + aligned_u64 deprecated_sge_addr[PFVF_MAX_QUEUES_PER_VF]; + + u16 rx_qid; + u8 num_rxqs; + u8 flags; +#define VFPF_RXQ_UPD_INIT_SGE_DEPRECATE_FLAG (1 << 0) +#define VFPF_RXQ_UPD_COMPLETE_CQE_FLAG (1 << 1) +#define VFPF_RXQ_UPD_COMPLETE_EVENT_FLAG (1 << 2) + + u8 padding[4]; +}; + +/* Set Queue Filters */ +struct vfpf_q_mac_vlan_filter { + u32 flags; +#define VFPF_Q_FILTER_DEST_MAC_VALID 0x01 +#define VFPF_Q_FILTER_VLAN_TAG_VALID 0x02 +#define VFPF_Q_FILTER_SET_MAC 0x100 /* set/clear */ + + u8 mac[ETH_ALEN]; + u16 vlan_tag; + + u8 padding[4]; +}; + +/* Start a vport */ +struct vfpf_vport_start_tlv { + struct vfpf_first_tlv first_tlv; + + aligned_u64 sb_addr[PFVF_MAX_SBS_PER_VF]; + + u32 tpa_mode; + u16 dep1; + u16 mtu; + + u8 vport_id; + u8 inner_vlan_removal; + + u8 only_untagged; + u8 max_buffers_per_cqe; + + u8 padding[4]; +}; + +/* Extended tlvs - need to add rss, mcast, accept mode tlvs */ +struct vfpf_vport_update_activate_tlv { + struct channel_tlv tl; + u8 update_rx; + u8 update_tx; + u8 active_rx; + u8 active_tx; +}; + +struct vfpf_vport_update_tx_switch_tlv { + struct channel_tlv tl; + u8 tx_switching; + u8 padding[3]; +}; + +struct vfpf_vport_update_vlan_strip_tlv { + struct channel_tlv tl; + u8 remove_vlan; + u8 padding[3]; +}; + +struct vfpf_vport_update_mcast_bin_tlv { + struct channel_tlv tl; + u8 padding[4]; + + aligned_u64 bins[8]; +}; + +struct vfpf_vport_update_accept_param_tlv { + struct channel_tlv tl; + u8 update_rx_mode; + u8 update_tx_mode; + u8 rx_accept_filter; + u8 tx_accept_filter; +}; + +struct vfpf_vport_update_accept_any_vlan_tlv { + struct channel_tlv tl; + u8 update_accept_any_vlan_flg; + u8 accept_any_vlan; + + u8 padding[2]; +}; + +struct vfpf_vport_update_sge_tpa_tlv { + struct channel_tlv tl; + + u16 sge_tpa_flags; +#define VFPF_TPA_IPV4_EN_FLAG (1 << 0) +#define VFPF_TPA_IPV6_EN_FLAG (1 << 1) +#define VFPF_TPA_PKT_SPLIT_FLAG (1 << 2) +#define VFPF_TPA_HDR_DATA_SPLIT_FLAG (1 << 3) +#define VFPF_TPA_GRO_CONSIST_FLAG (1 << 4) + + u8 update_sge_tpa_flags; +#define VFPF_UPDATE_SGE_DEPRECATED_FLAG (1 << 0) +#define VFPF_UPDATE_TPA_EN_FLAG (1 << 1) +#define VFPF_UPDATE_TPA_PARAM_FLAG (1 << 2) + + u8 max_buffers_per_cqe; + + u16 deprecated_sge_buff_size; + u16 tpa_max_size; + u16 tpa_min_size_to_start; + u16 tpa_min_size_to_cont; + + u8 tpa_max_aggs_num; + u8 padding[7]; + +}; + +/* Primary tlv as a header for various extended tlvs for + * various functionalities in vport update ramrod. + */ +struct vfpf_vport_update_tlv { + struct vfpf_first_tlv first_tlv; +}; + +struct vfpf_ucast_filter_tlv { + struct vfpf_first_tlv first_tlv; + + u8 opcode; + u8 type; + + u8 mac[ETH_ALEN]; + + u16 vlan; + u16 padding[3]; +}; + +struct tlv_buffer_size { + u8 tlv_buffer[TLV_BUFFER_SIZE]; +}; + +union vfpf_tlvs { + struct vfpf_first_tlv first_tlv; + struct vfpf_acquire_tlv acquire; + struct vfpf_init_tlv init; + struct vfpf_start_rxq_tlv start_rxq; + struct vfpf_start_txq_tlv start_txq; + struct vfpf_stop_rxqs_tlv stop_rxqs; + struct vfpf_stop_txqs_tlv stop_txqs; + struct vfpf_update_rxq_tlv update_rxq; + struct vfpf_vport_start_tlv start_vport; + struct vfpf_vport_update_tlv vport_update; + struct vfpf_ucast_filter_tlv ucast_filter; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +union pfvf_tlvs { + struct pfvf_def_resp_tlv default_resp; + struct pfvf_acquire_resp_tlv acquire_resp; + struct channel_list_end_tlv list_end; + struct tlv_buffer_size tlv_buf_size; +}; + +/* This is a structure which is allocated in the VF, which the PF may update + * when it deems it necessary to do so. The bulletin board is sampled + * periodically by the VF. A copy per VF is maintained in the PF (to prevent + * loss of data upon multiple updates (or the need for read modify write)). + */ +enum ecore_bulletin_bit { + /* Alert the VF that a forced MAC was set by the PF */ + MAC_ADDR_FORCED = 0, + + /* The VF should not access the vfpf channel */ + VFPF_CHANNEL_INVALID = 1, + + /* Alert the VF that a forced VLAN was set by the PF */ + VLAN_ADDR_FORCED = 2, + + /* Indicate that `default_only_untagged' contains actual data */ + VFPF_BULLETIN_UNTAGGED_DEFAULT = 3, + VFPF_BULLETIN_UNTAGGED_DEFAULT_FORCED = 4, + + /* Alert the VF that suggested mac was sent by the PF. + * MAC_ADDR will be disabled in case MAC_ADDR_FORCED is set + */ + VFPF_BULLETIN_MAC_ADDR = 5 +}; + +struct ecore_bulletin_content { + u32 crc; /* crc of structure to ensure is not in + * mid-update + */ + u32 version; + + aligned_u64 valid_bitmap; /* bitmap indicating wich fields + * hold valid values + */ + + u8 mac[ETH_ALEN]; /* used for MAC_ADDR or MAC_ADDR_FORCED */ + + u8 default_only_untagged; /* If valid, 1 => only untagged Rx + * if no vlan filter is configured. + */ + u8 padding; + + /* The following is a 'copy' of ecore_mcp_link_state, + * ecore_mcp_link_params and ecore_mcp_link_capabilities. Since it's + * possible the structs will increase further along the road we cannot + * have it here; Instead we need to have all of its fields. + */ + u8 req_autoneg; + u8 req_autoneg_pause; + u8 req_forced_rx; + u8 req_forced_tx; + u8 padding2[4]; + + u32 req_adv_speed; + u32 req_forced_speed; + u32 req_loopback; + u32 padding3; + + u8 link_up; + u8 full_duplex; + u8 autoneg; + u8 autoneg_complete; + u8 parallel_detection; + u8 pfc_enabled; + u8 partner_tx_flow_ctrl_en; + u8 partner_rx_flow_ctrl_en; + u8 partner_adv_pause; + u8 sfp_tx_fault; + u8 padding4[6]; + + u32 speed; + u32 partner_adv_speed; + + u32 capability_speed; + + /* Forced vlan */ + u16 pvid; + u16 padding5; +}; + +struct ecore_bulletin { + dma_addr_t phys; + struct ecore_bulletin_content *p_virt; + u32 size; +}; + +#ifndef print_enum +enum { +/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/ + + CHANNEL_TLV_NONE, /* ends tlv sequence */ + CHANNEL_TLV_ACQUIRE, + CHANNEL_TLV_VPORT_START, + CHANNEL_TLV_VPORT_UPDATE, + CHANNEL_TLV_VPORT_TEARDOWN, + CHANNEL_TLV_START_RXQ, + CHANNEL_TLV_START_TXQ, + CHANNEL_TLV_STOP_RXQS, + CHANNEL_TLV_STOP_TXQS, + CHANNEL_TLV_UPDATE_RXQ, + CHANNEL_TLV_INT_CLEANUP, + CHANNEL_TLV_CLOSE, + CHANNEL_TLV_RELEASE, + CHANNEL_TLV_LIST_END, + CHANNEL_TLV_UCAST_FILTER, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH, + CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, + CHANNEL_TLV_VPORT_UPDATE_MCAST, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM, + CHANNEL_TLV_VPORT_UPDATE_RSS, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, + CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, + CHANNEL_TLV_MAX +/*!!!!! Make sure to update STRINGS structure accordingly !!!!!*/ +}; +extern const char *ecore_channel_tlvs_string[]; + +#else +print_enum(channel_tlvs, CHANNEL_TLV_NONE, /* ends tlv sequence */ + CHANNEL_TLV_ACQUIRE, + CHANNEL_TLV_VPORT_START, + CHANNEL_TLV_VPORT_UPDATE, + CHANNEL_TLV_VPORT_TEARDOWN, + CHANNEL_TLV_SETUP_RXQ, + CHANNEL_TLV_SETUP_TXQ, + CHANNEL_TLV_STOP_RXQS, + CHANNEL_TLV_STOP_TXQS, + CHANNEL_TLV_UPDATE_RXQ, + CHANNEL_TLV_INT_CLEANUP, + CHANNEL_TLV_CLOSE, + CHANNEL_TLV_RELEASE, + CHANNEL_TLV_LIST_END, + CHANNEL_TLV_UCAST_FILTER, + CHANNEL_TLV_VPORT_UPDATE_ACTIVATE, + CHANNEL_TLV_VPORT_UPDATE_TX_SWITCH, + CHANNEL_TLV_VPORT_UPDATE_VLAN_STRIP, + CHANNEL_TLV_VPORT_UPDATE_MCAST, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_PARAM, + CHANNEL_TLV_VPORT_UPDATE_RSS, + CHANNEL_TLV_VPORT_UPDATE_ACCEPT_ANY_VLAN, + CHANNEL_TLV_VPORT_UPDATE_SGE_TPA, CHANNEL_TLV_MAX); +#endif + +#endif /* __ECORE_VF_PF_IF_H__ */ diff --git a/drivers/net/qede/base/eth_common.h b/drivers/net/qede/base/eth_common.h new file mode 100644 index 00000000..046bbb20 --- /dev/null +++ b/drivers/net/qede/base/eth_common.h @@ -0,0 +1,526 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef __ETH_COMMON__ +#define __ETH_COMMON__ +/********************/ +/* ETH FW CONSTANTS */ +/********************/ +#define ETH_CACHE_LINE_SIZE 64 +#define ETH_RX_CQE_GAP 32 +#define ETH_MAX_RAMROD_PER_CON 8 +#define ETH_TX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_BD_PAGE_SIZE_BYTES 4096 +#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096 +#define ETH_RX_NUM_NEXT_PAGE_BDS 2 + +#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1 +#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18 +#define ETH_TX_MAX_LSO_HDR_NBD 4 +#define ETH_TX_MIN_BDS_PER_LSO_PKT 3 +#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3 +#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2 +#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2 +#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 12 + 8)) +#define ETH_TX_MAX_LSO_HDR_BYTES 510 +#define ETH_TX_LSO_WINDOW_BDS_NUM 18 +#define ETH_TX_LSO_WINDOW_MIN_LEN 9700 +#define ETH_TX_MAX_LSO_PAYLOAD_LEN 0xFFFF + +#define ETH_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS + +#define ETH_RX_MAX_BUFF_PER_PKT 5 + +/* num of MAC/VLAN filters */ +#define ETH_NUM_MAC_FILTERS 512 +#define ETH_NUM_VLAN_FILTERS 512 + +/* approx. multicast constants */ +#define ETH_MULTICAST_BIN_FROM_MAC_SEED 0 +#define ETH_MULTICAST_MAC_BINS 256 +#define ETH_MULTICAST_MAC_BINS_IN_REGS (ETH_MULTICAST_MAC_BINS / 32) + +/* ethernet vport update constants */ +#define ETH_FILTER_RULES_COUNT 10 +#define ETH_RSS_IND_TABLE_ENTRIES_NUM 128 +#define ETH_RSS_KEY_SIZE_REGS 10 +#define ETH_RSS_ENGINE_NUM_K2 207 +#define ETH_RSS_ENGINE_NUM_BB 127 + +/* TPA constants */ +#define ETH_TPA_MAX_AGGS_NUM 64 +#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT +#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6 +#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4 + +/* + * Interrupt coalescing TimeSet + */ +struct coalescing_timeset { + u8 timeset; + u8 valid /* Only if this flag is set, timeset will take effect */; +}; + +/* + * Destination port mode + */ +enum dest_port_mode { + DEST_PORT_PHY /* Send to physical port. */, + DEST_PORT_LOOPBACK /* Send to loopback port. */, + DEST_PORT_PHY_LOOPBACK /* Send to physical and loopback port. */, + DEST_PORT_DROP /* Drop the packet in PBF. */, + MAX_DEST_PORT_MODE +}; + +/* + * Ethernet address type + */ +enum eth_addr_type { + BROADCAST_ADDRESS, + MULTICAST_ADDRESS, + UNICAST_ADDRESS, + UNKNOWN_ADDRESS, + MAX_ETH_ADDR_TYPE +}; + +struct eth_tx_1st_bd_flags { + u8 bitfields; +#define ETH_TX_1ST_BD_FLAGS_START_BD_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT 0 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_FORCE_VLAN_MODE_SHIFT 1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT 2 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT 3 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT 4 +#define ETH_TX_1ST_BD_FLAGS_LSO_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_LSO_SHIFT 5 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_IP_CSUM_SHIFT 6 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_MASK 0x1 +#define ETH_TX_1ST_BD_FLAGS_TUNN_L4_CSUM_SHIFT 7 +}; + +/* + * The parsing information data for the first tx bd of a given packet. + */ +struct eth_tx_data_1st_bd { + __le16 vlan /* VLAN to insert to packet (if needed). */; + /* Number of BDs in packet. Should be at least 2 in non-LSO + * packet and at least 3 in LSO (or Tunnel with IPv6+ext) packet. + */ + u8 nbds; + struct eth_tx_1st_bd_flags bd_flags; + __le16 bitfields; +#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_TUNN_CFG_OVERRIDE_SHIFT 0 +#define ETH_TX_DATA_1ST_BD_RESERVED0_MASK 0x1 +#define ETH_TX_DATA_1ST_BD_RESERVED0_SHIFT 1 +#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_MASK 0x3FFF +#define ETH_TX_DATA_1ST_BD_FW_USE_ONLY_SHIFT 2 +}; + +/* + * The parsing information data for the second tx bd of a given packet. + */ +struct eth_tx_data_2nd_bd { + __le16 tunn_ip_size; + __le16 bitfields1; +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_MASK 0xF +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6 +#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3 +#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_SHIFT 9 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_INNER_IPV6_SHIFT 11 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_IPV6_EXT_SHIFT 12 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_TUNN_IPV6_EXT_SHIFT 13 +#define ETH_TX_DATA_2ND_BD_L4_UDP_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_UDP_SHIFT 14 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_MASK 0x1 +#define ETH_TX_DATA_2ND_BD_L4_PSEUDO_CSUM_MODE_SHIFT 15 + __le16 bitfields2; +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_MASK 0x1FFF +#define ETH_TX_DATA_2ND_BD_L4_HDR_START_OFFSET_W_SHIFT 0 +#define ETH_TX_DATA_2ND_BD_RESERVED0_MASK 0x7 +#define ETH_TX_DATA_2ND_BD_RESERVED0_SHIFT 13 +}; + +/* + * Firmware data for L2-EDPM packet. + */ +struct eth_edpm_fw_data { + struct eth_tx_data_1st_bd data_1st_bd + /* Parsing information data from the 1st BD. */; + struct eth_tx_data_2nd_bd data_2nd_bd + /* Parsing information data from the 2nd BD. */; + __le32 reserved; +}; + +/* + * FW debug. + */ +struct eth_fast_path_cqe_fw_debug { + u8 reserved0 /* FW reserved. */; + u8 reserved1 /* FW reserved. */; + __le16 reserved2 /* FW reserved. */; +}; + +struct tunnel_parsing_flags { + u8 flags; +#define TUNNEL_PARSING_FLAGS_TYPE_MASK 0x3 +#define TUNNEL_PARSING_FLAGS_TYPE_SHIFT 0 +#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_TENNANT_ID_EXIST_SHIFT 2 +#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_MASK 0x3 +#define TUNNEL_PARSING_FLAGS_NEXT_PROTOCOL_SHIFT 3 +#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_FIRSTHDRIPMATCH_SHIFT 5 +#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_IPV4_FRAGMENT_SHIFT 6 +#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_MASK 0x1 +#define TUNNEL_PARSING_FLAGS_IPV4_OPTIONS_SHIFT 7 +}; + +/* + * Regular ETH Rx FP CQE. + */ +struct eth_fast_path_rx_reg_cqe { + u8 type /* CQE type */; + u8 bitfields; +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_REG_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED0_SHIFT 7 + __le16 pkt_len /* Total packet length (from the parser) */; + struct parsing_and_err_flags pars_flags + /* Parsing and error flags from the parser */; + __le16 vlan_tag /* 802.1q VLAN tag */; + __le32 rss_hash /* RSS hash result */; + __le16 len_on_first_bd /* Number of bytes placed on first BD */; + u8 placement_offset /* Offset of placement from BD start */; + struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */ + ; + u8 bd_num /* Number of BDs, used for packet */; + u8 reserved[7]; + struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */; + u8 reserved1[3]; + u8 flags; +#define ETH_FAST_PATH_RX_REG_CQE_VALID_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_SHIFT 0 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_MASK 0x1 +#define ETH_FAST_PATH_RX_REG_CQE_VALID_TOGGLE_SHIFT 1 +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_MASK 0x3F +#define ETH_FAST_PATH_RX_REG_CQE_RESERVED2_SHIFT 2 +}; + +/* + * TPA-continue ETH Rx FP CQE. + */ +struct eth_fast_path_rx_tpa_cont_cqe { + u8 type /* CQE type */; + u8 tpa_agg_index /* TPA aggregation index */; + __le16 len_list[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] + /* List of the segment sizes */; + u8 reserved[5]; + u8 reserved1 /* FW reserved. */; + __le16 reserved2[ETH_TPA_CQE_CONT_LEN_LIST_SIZE] /* FW reserved. */; +}; + +/* + * TPA-end ETH Rx FP CQE . + */ +struct eth_fast_path_rx_tpa_end_cqe { + u8 type /* CQE type */; + u8 tpa_agg_index /* TPA aggregation index */; + __le16 total_packet_len /* Total aggregated packet length */; + u8 num_of_bds /* Total number of BDs comprising the packet */; + u8 end_reason /* Aggregation end reason. Use enum eth_tpa_end_reason */ + ; + __le16 num_of_coalesced_segs /* Number of coalesced TCP segments */; + __le32 ts_delta /* TCP timestamp delta */; + __le16 len_list[ETH_TPA_CQE_END_LEN_LIST_SIZE] + /* List of the segment sizes */; + u8 reserved1[3]; + u8 reserved2 /* FW reserved. */; + __le16 reserved3[ETH_TPA_CQE_END_LEN_LIST_SIZE] /* FW reserved. */; +}; + +/* + * TPA-start ETH Rx FP CQE. + */ +struct eth_fast_path_rx_tpa_start_cqe { + u8 type /* CQE type */; + u8 bitfields; +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_MASK 0x7 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RSS_HASH_TYPE_SHIFT 0 +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_MASK 0xF +#define ETH_FAST_PATH_RX_TPA_START_CQE_TC_SHIFT 3 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_MASK 0x1 +#define ETH_FAST_PATH_RX_TPA_START_CQE_RESERVED0_SHIFT 7 + __le16 seg_len /* Segment length (packetLen from the parser) */; + struct parsing_and_err_flags pars_flags + /* Parsing and error flags from the parser */; + __le16 vlan_tag /* 802.1q VLAN tag */; + __le32 rss_hash /* RSS hash result */; + __le16 len_on_first_bd /* Number of bytes placed on first BD */; + u8 placement_offset /* Offset of placement from BD start */; + struct tunnel_parsing_flags tunnel_pars_flags /* Tunnel Parsing Flags */ + ; + u8 tpa_agg_index /* TPA aggregation index */; + u8 header_len /* Packet L2+L3+L4 header length */; + __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE] + /* Additional BDs length list. */; + struct eth_fast_path_cqe_fw_debug fw_debug /* FW reserved. */; +}; + +/* + * The L4 pseudo checksum mode for Ethernet + */ +enum eth_l4_pseudo_checksum_mode { + ETH_L4_PSEUDO_CSUM_CORRECT_LENGTH + /* Pseudo Header checksum on packet is calculated + * with the correct packet length field. + */ + , + ETH_L4_PSEUDO_CSUM_ZERO_LENGTH + /* Pseudo Hdr checksum on packet is calc with zero len field. */ + , + MAX_ETH_L4_PSEUDO_CHECKSUM_MODE +}; + +struct eth_rx_bd { + struct regpair addr /* single continues buffer */; +}; + +/* + * regular ETH Rx SP CQE + */ +struct eth_slow_path_rx_cqe { + u8 type /* CQE type */; + u8 ramrod_cmd_id; + u8 error_flag; + u8 reserved[25]; + __le16 echo; + u8 reserved1; + u8 flags; +#define ETH_SLOW_PATH_RX_CQE_VALID_MASK 0x1 +#define ETH_SLOW_PATH_RX_CQE_VALID_SHIFT 0 +#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_MASK 0x1 +#define ETH_SLOW_PATH_RX_CQE_VALID_TOGGLE_SHIFT 1 +#define ETH_SLOW_PATH_RX_CQE_RESERVED2_MASK 0x3F +#define ETH_SLOW_PATH_RX_CQE_RESERVED2_SHIFT 2 +}; + +/* + * union for all ETH Rx CQE types + */ +union eth_rx_cqe { + struct eth_fast_path_rx_reg_cqe fast_path_regular /* Regular FP CQE */; + struct eth_fast_path_rx_tpa_start_cqe fast_path_tpa_start + /* TPA-start CQE */; + struct eth_fast_path_rx_tpa_cont_cqe fast_path_tpa_cont + /* TPA-continue CQE */; + struct eth_fast_path_rx_tpa_end_cqe fast_path_tpa_end /* TPA-end CQE */ + ; + struct eth_slow_path_rx_cqe slow_path /* SP CQE */; +}; + +/* + * ETH Rx CQE type + */ +enum eth_rx_cqe_type { + ETH_RX_CQE_TYPE_UNUSED, + ETH_RX_CQE_TYPE_REGULAR /* Regular FP ETH Rx CQE */, + ETH_RX_CQE_TYPE_SLOW_PATH /* Slow path ETH Rx CQE */, + ETH_RX_CQE_TYPE_TPA_START /* TPA start ETH Rx CQE */, + ETH_RX_CQE_TYPE_TPA_CONT /* TPA Continue ETH Rx CQE */, + ETH_RX_CQE_TYPE_TPA_END /* TPA end ETH Rx CQE */, + MAX_ETH_RX_CQE_TYPE +}; + +/* + * Wrapp for PD RX CQE used in order to cover full cache line when writing CQE + */ +struct eth_rx_pmd_cqe { + union eth_rx_cqe cqe /* CQE data itself */; + u8 reserved[ETH_RX_CQE_GAP]; +}; + +/* + * ETH Rx producers data + */ +struct eth_rx_prod_data { + __le16 bd_prod /* BD producer */; + __le16 cqe_prod /* CQE producer */; + __le16 reserved; + __le16 reserved1 /* FW reserved. */; +}; + +/* + * Aggregation end reason. + */ +enum eth_tpa_end_reason { + ETH_AGG_END_UNUSED, + ETH_AGG_END_SP_UPDATE /* SP configuration update */, + ETH_AGG_END_MAX_LEN + /* Maximum aggregation length or maximum buffer number used. */, + ETH_AGG_END_LAST_SEG + /* TCP PSH flag or TCP payload length below continue threshold. */, + ETH_AGG_END_TIMEOUT /* Timeout expiration. */, + ETH_AGG_END_NOT_CONSISTENT, + ETH_AGG_END_OUT_OF_ORDER, + ETH_AGG_END_NON_TPA_SEG, + MAX_ETH_TPA_END_REASON +}; + +/* + * Eth Tunnel Type + */ +enum eth_tunn_type { + ETH_TUNN_GENEVE /* GENEVE Tunnel. */, + ETH_TUNN_TTAG /* T-Tag Tunnel. */, + ETH_TUNN_GRE /* GRE Tunnel. */, + ETH_TUNN_VXLAN /* VXLAN Tunnel. */, + MAX_ETH_TUNN_TYPE +}; + +/* + * The first tx bd of a given packet + */ +struct eth_tx_1st_bd { + struct regpair addr /* Single continuous buffer */; + __le16 nbytes /* Number of bytes in this BD. */; + struct eth_tx_data_1st_bd data /* Parsing information data. */; +}; + +/* + * The second tx bd of a given packet + */ +struct eth_tx_2nd_bd { + struct regpair addr /* Single continuous buffer */; + __le16 nbytes /* Number of bytes in this BD. */; + struct eth_tx_data_2nd_bd data /* Parsing information data. */; +}; + +/* + * The parsing information data for the third tx bd of a given packet. + */ +struct eth_tx_data_3rd_bd { + __le16 lso_mss /* For LSO packet - the MSS in bytes. */; + __le16 bitfields; +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_MASK 0xF +#define ETH_TX_DATA_3RD_BD_TCP_HDR_LEN_DW_SHIFT 0 +#define ETH_TX_DATA_3RD_BD_HDR_NBD_MASK 0xF +#define ETH_TX_DATA_3RD_BD_HDR_NBD_SHIFT 4 +#define ETH_TX_DATA_3RD_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_3RD_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_3RD_BD_RESERVED0_MASK 0x7F +#define ETH_TX_DATA_3RD_BD_RESERVED0_SHIFT 9 + u8 tunn_l4_hdr_start_offset_w; + u8 tunn_hdr_size_w; +}; + +/* + * The third tx bd of a given packet + */ +struct eth_tx_3rd_bd { + struct regpair addr /* Single continuous buffer */; + __le16 nbytes /* Number of bytes in this BD. */; + struct eth_tx_data_3rd_bd data /* Parsing information data. */; +}; + +/* + * Complementary information for the regular tx bd of a given packet. + */ +struct eth_tx_data_bd { + __le16 reserved0; + __le16 bitfields; +#define ETH_TX_DATA_BD_RESERVED1_MASK 0xFF +#define ETH_TX_DATA_BD_RESERVED1_SHIFT 0 +#define ETH_TX_DATA_BD_START_BD_MASK 0x1 +#define ETH_TX_DATA_BD_START_BD_SHIFT 8 +#define ETH_TX_DATA_BD_RESERVED2_MASK 0x7F +#define ETH_TX_DATA_BD_RESERVED2_SHIFT 9 + __le16 reserved3; +}; + +/* + * The common regular TX BD ring element + */ +struct eth_tx_bd { + struct regpair addr /* Single continuous buffer */; + __le16 nbytes /* Number of bytes in this BD. */; + struct eth_tx_data_bd data /* Complementary information. */; +}; + +union eth_tx_bd_types { + struct eth_tx_1st_bd first_bd /* The first tx bd of a given packet */; + struct eth_tx_2nd_bd second_bd /* The second tx bd of a given packet */ + ; + struct eth_tx_3rd_bd third_bd /* The third tx bd of a given packet */; + struct eth_tx_bd reg_bd /* The common non-special bd */; +}; + +/* + * Mstorm Queue Zone + */ +struct mstorm_eth_queue_zone { + struct eth_rx_prod_data rx_producers; + __le32 reserved[2]; +}; + +/* + * Ustorm Queue Zone + */ +struct ustorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset + /* Rx interrupt coalescing TimeSet */; + __le16 reserved[3]; +}; + +/* + * Ystorm Queue Zone + */ +struct ystorm_eth_queue_zone { + struct coalescing_timeset int_coalescing_timeset + /* Tx interrupt coalescing TimeSet */; + __le16 reserved[3]; +}; + +/* + * ETH doorbell data + */ +struct eth_db_data { + u8 params; +#define ETH_DB_DATA_DEST_MASK 0x3 +#define ETH_DB_DATA_DEST_SHIFT 0 +#define ETH_DB_DATA_AGG_CMD_MASK 0x3 +#define ETH_DB_DATA_AGG_CMD_SHIFT 2 +#define ETH_DB_DATA_BYPASS_EN_MASK 0x1 +#define ETH_DB_DATA_BYPASS_EN_SHIFT 4 +#define ETH_DB_DATA_RESERVED_MASK 0x1 +#define ETH_DB_DATA_RESERVED_SHIFT 5 +#define ETH_DB_DATA_AGG_VAL_SEL_MASK 0x3 +#define ETH_DB_DATA_AGG_VAL_SEL_SHIFT 6 + u8 agg_flags; + __le16 bd_prod; +}; + +#endif /* __ETH_COMMON__ */ diff --git a/drivers/net/qede/base/mcp_public.h b/drivers/net/qede/base/mcp_public.h new file mode 100644 index 00000000..71922654 --- /dev/null +++ b/drivers/net/qede/base/mcp_public.h @@ -0,0 +1,1205 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +/**************************************************************************** + * + * Name: mcp_public.h + * + * Description: MCP public data + * + * Created: 13/01/2013 yanivr + * + ****************************************************************************/ + +#ifndef MCP_PUBLIC_H +#define MCP_PUBLIC_H + +#define VF_MAX_STATIC 192 /* In case of AH */ + +#define MCP_GLOB_PATH_MAX 2 +#define MCP_PORT_MAX 2 /* Global */ +#define MCP_GLOB_PORT_MAX 4 /* Global */ +#define MCP_GLOB_FUNC_MAX 16 /* Global */ + +typedef u32 offsize_t; /* In DWORDS !!! */ +/* Offset from the beginning of the MCP scratchpad */ +#define OFFSIZE_OFFSET_SHIFT 0 +#define OFFSIZE_OFFSET_MASK 0x0000ffff +/* Size of specific element (not the whole array if any) */ +#define OFFSIZE_SIZE_SHIFT 16 +#define OFFSIZE_SIZE_MASK 0xffff0000 + +/* SECTION_OFFSET is calculating the offset in bytes out of offsize */ +#define SECTION_OFFSET(_offsize) \ +((((_offsize & OFFSIZE_OFFSET_MASK) >> OFFSIZE_OFFSET_SHIFT) << 2)) + +/* SECTION_SIZE is calculating the size in bytes out of offsize */ +#define SECTION_SIZE(_offsize) \ +(((_offsize & OFFSIZE_SIZE_MASK) >> OFFSIZE_SIZE_SHIFT) << 2) + +#define SECTION_ADDR(_offsize, idx) \ +(MCP_REG_SCRATCH + SECTION_OFFSET(_offsize) + (SECTION_SIZE(_offsize) * idx)) + +#define SECTION_OFFSIZE_ADDR(_pub_base, _section) \ +(_pub_base + offsetof(struct mcp_public_data, sections[_section])) + +/* PHY configuration */ +struct pmm_phy_cfg { + u32 speed; /* 0 = autoneg, 1000/10000/20000/25000/40000/50000/100000 */ +#define PMM_SPEED_AUTONEG 0 +#define PMM_SPEED_SMARTLINQ 0x8 + + u32 pause; /* bitmask */ +#define PMM_PAUSE_NONE 0x0 +#define PMM_PAUSE_AUTONEG 0x1 +#define PMM_PAUSE_RX 0x2 +#define PMM_PAUSE_TX 0x4 + + u32 adv_speed; /* Default should be the speed_cap_mask */ + u32 loopback_mode; +#define PMM_LOOPBACK_NONE 0 +#define PMM_LOOPBACK_INT_PHY 1 +#define PMM_LOOPBACK_EXT_PHY 2 +#define PMM_LOOPBACK_EXT 3 +#define PMM_LOOPBACK_MAC 4 +#define PMM_LOOPBACK_CNIG_AH_ONLY_0123 5 /* Port to itself */ +#define PMM_LOOPBACK_CNIG_AH_ONLY_2301 6 /* Port to Port */ + + /* features */ + u32 feature_config_flags; + +}; + +struct port_mf_cfg { + u32 dynamic_cfg; /* device control channel */ +#define PORT_MF_CFG_OV_TAG_MASK 0x0000ffff +#define PORT_MF_CFG_OV_TAG_SHIFT 0 +#define PORT_MF_CFG_OV_TAG_DEFAULT PORT_MF_CFG_OV_TAG_MASK + + u32 reserved[1]; +}; + +/* DO NOT add new fields in the middle + * MUST be synced with struct pmm_stats_map + */ +struct pmm_stats { + u64 r64; /* 0x00 (Offset 0x00 ) RX 64-byte frame counter */ + u64 r127; /* 0x01 (Offset 0x08 ) RX 65 to 127 byte frame counter */ + u64 r255; /* 0x02 (Offset 0x10 ) RX 128 to 255 byte frame counter */ + u64 r511; /* 0x03 (Offset 0x18 ) RX 256 to 511 byte frame counter */ + u64 r1023; /* 0x04 (Offset 0x20 ) RX 512 to 1023 byte frame counter */ + u64 r1518; /* 0x05 (Offset 0x28 ) RX 1024 to 1518 byte frame counter */ + u64 r1522; /* 0x06 (Offset 0x30 ) RX 1519 to 1522 byte VLAN-tagged */ + u64 r2047; /* 0x07 (Offset 0x38 ) RX 1519 to 2047 byte frame counter */ + u64 r4095; /* 0x08 (Offset 0x40 ) RX 2048 to 4095 byte frame counter */ + u64 r9216; /* 0x09 (Offset 0x48 ) RX 4096 to 9216 byte frame counter */ + u64 r16383; /* 0x0A (Offset 0x50 ) RX 9217 to 16383 byte frame ctr */ + u64 rfcs; /* 0x0F (Offset 0x58 ) RX FCS error frame counter */ + u64 rxcf; /* 0x10 (Offset 0x60 ) RX control frame counter */ + u64 rxpf; /* 0x11 (Offset 0x68 ) RX pause frame counter */ + u64 rxpp; /* 0x12 (Offset 0x70 ) RX PFC frame counter */ + u64 raln; /* 0x16 (Offset 0x78 ) RX alignment error counter */ + u64 rfcr; /* 0x19 (Offset 0x80 ) RX false carrier counter */ + u64 rovr; /* 0x1A (Offset 0x88 ) RX oversized frame counter */ + u64 rjbr; /* 0x1B (Offset 0x90 ) RX jabber frame counter */ + u64 rund; /* 0x34 (Offset 0x98 ) RX undersized frame counter */ + u64 rfrg; /* 0x35 (Offset 0xa0 ) RX fragment counter */ + u64 t64; /* 0x40 (Offset 0xa8 ) TX 64-byte frame counter */ + u64 t127; /* 0x41 (Offset 0xb0 ) TX 65 to 127 byte frame counter */ + u64 t255; /* 0x42 (Offset 0xb8 ) TX 128 to 255 byte frame counter */ + u64 t511; /* 0x43 (Offset 0xc0 ) TX 256 to 511 byte frame counter */ + u64 t1023; /* 0x44 (Offset 0xc8 ) TX 512 to 1023 byte frame counter */ + u64 t1518; /* 0x45 (Offset 0xd0 ) TX 1024 to 1518 byte frame counter */ + u64 t2047; /* 0x47 (Offset 0xd8 ) TX 1519 to 2047 byte frame counter */ + u64 t4095; /* 0x48 (Offset 0xe0 ) TX 2048 to 4095 byte frame counter */ + u64 t9216; /* 0x49 (Offset 0xe8 ) TX 4096 to 9216 byte frame counter */ + u64 t16383; /* 0x4A (Offset 0xf0 ) TX 9217 to 16383 byte frame ctr */ + u64 txpf; /* 0x50 (Offset 0xf8 ) TX pause frame counter */ + u64 txpp; /* 0x51 (Offset 0x100) TX PFC frame counter */ + u64 tlpiec; /* 0x6C (Offset 0x108) Transmit Logical Type LLFC */ + u64 tncl; /* 0x6E (Offset 0x110) Transmit Total Collision Counter */ + u64 rbyte; /* 0x3d (Offset 0x118) RX byte counter */ + u64 rxuca; /* 0x0c (Offset 0x120) RX UC frame counter */ + u64 rxmca; /* 0x0d (Offset 0x128) RX MC frame counter */ + u64 rxbca; /* 0x0e (Offset 0x130) RX BC frame counter */ + u64 rxpok; /* 0x22 (Offset 0x138) RX good frame */ + u64 tbyte; /* 0x6f (Offset 0x140) TX byte counter */ + u64 txuca; /* 0x4d (Offset 0x148) TX UC frame counter */ + u64 txmca; /* 0x4e (Offset 0x150) TX MC frame counter */ + u64 txbca; /* 0x4f (Offset 0x158) TX BC frame counter */ + u64 txcf; /* 0x54 (Offset 0x160) TX control frame counter */ +}; + +struct brb_stats { + u64 brb_truncate[8]; + u64 brb_discard[8]; +}; + +struct port_stats { + struct brb_stats brb; + struct pmm_stats pmm; +}; + +/*-----+----------------------------------------------------------------------- + * Chip | Number and | Ports in| Ports in|2 PHY-s |# of ports|# of engines + * | rate of physical | team #1 | team #2 |are used|per path | (paths) + * | ports | | | | | + *======+==================+=========+=========+========+====================== + * BB | 1x100G | This is special mode, where there are 2 HW func + * BB | 2x10/20Gbps | 0,1 | NA | No | 1 | 1 + * BB | 2x40 Gbps | 0,1 | NA | Yes | 1 | 1 + * BB | 2x50Gbps | 0,1 | NA | No | 1 | 1 + * BB | 4x10Gbps | 0,2 | 1,3 | No | 1/2 | 1,2 + * BB | 4x10Gbps | 0,1 | 2,3 | No | 1/2 | 1,2 + * BB | 4x10Gbps | 0,3 | 1,2 | No | 1/2 | 1,2 + * BB | 4x10Gbps | 0,1,2,3 | NA | No | 1 | 1 + * AH | 2x10/20Gbps | 0,1 | NA | NA | 1 | NA + * AH | 4x10Gbps | 0,1 | 2,3 | NA | 2 | NA + * AH | 4x10Gbps | 0,2 | 1,3 | NA | 2 | NA + * AH | 4x10Gbps | 0,3 | 1,2 | NA | 2 | NA + * AH | 4x10Gbps | 0,1,2,3 | NA | NA | 1 | NA + *======+==================+=========+=========+========+======================= + */ + +#define CMT_TEAM0 0 +#define CMT_TEAM1 1 +#define CMT_TEAM_MAX 2 + +struct couple_mode_teaming { + u8 port_cmt[MCP_GLOB_PORT_MAX]; +#define PORT_CMT_IN_TEAM (1 << 0) + +#define PORT_CMT_PORT_ROLE (1 << 1) +#define PORT_CMT_PORT_INACTIVE (0 << 1) +#define PORT_CMT_PORT_ACTIVE (1 << 1) + +#define PORT_CMT_TEAM_MASK (1 << 2) +#define PORT_CMT_TEAM0 (0 << 2) +#define PORT_CMT_TEAM1 (1 << 2) +}; + +/************************************** + * LLDP and DCBX HSI structures + **************************************/ +#define LLDP_CHASSIS_ID_STAT_LEN 4 +#define LLDP_PORT_ID_STAT_LEN 4 +#define DCBX_MAX_APP_PROTOCOL 32 +#define MAX_SYSTEM_LLDP_TLV_DATA 32 + +typedef enum _lldp_agent_e { + LLDP_NEAREST_BRIDGE = 0, + LLDP_NEAREST_NON_TPMR_BRIDGE, + LLDP_NEAREST_CUSTOMER_BRIDGE, + LLDP_MAX_LLDP_AGENTS +} lldp_agent_e; + +struct lldp_config_params_s { + u32 config; +#define LLDP_CONFIG_TX_INTERVAL_MASK 0x000000ff +#define LLDP_CONFIG_TX_INTERVAL_SHIFT 0 +#define LLDP_CONFIG_HOLD_MASK 0x00000f00 +#define LLDP_CONFIG_HOLD_SHIFT 8 +#define LLDP_CONFIG_MAX_CREDIT_MASK 0x0000f000 +#define LLDP_CONFIG_MAX_CREDIT_SHIFT 12 +#define LLDP_CONFIG_ENABLE_RX_MASK 0x40000000 +#define LLDP_CONFIG_ENABLE_RX_SHIFT 30 +#define LLDP_CONFIG_ENABLE_TX_MASK 0x80000000 +#define LLDP_CONFIG_ENABLE_TX_SHIFT 31 + /* Holds local Chassis ID TLV header, subtype and 9B of payload. + * If firtst byte is 0, then we will use default chassis ID + */ + u32 local_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + /* Holds local Port ID TLV header, subtype and 9B of payload. + * If firtst byte is 0, then we will use default port ID + */ + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; +}; + +struct lldp_status_params_s { + u32 prefix_seq_num; + u32 status; /* TBD */ + /* Holds remote Chassis ID TLV header, subtype and 9B of payload. + */ + u32 local_port_id[LLDP_PORT_ID_STAT_LEN]; + u32 peer_chassis_id[LLDP_CHASSIS_ID_STAT_LEN]; + /* Holds remote Port ID TLV header, subtype and 9B of payload. + */ + u32 peer_port_id[LLDP_PORT_ID_STAT_LEN]; + u32 suffix_seq_num; +}; + +struct dcbx_ets_feature { + u32 flags; +#define DCBX_ETS_ENABLED_MASK 0x00000001 +#define DCBX_ETS_ENABLED_SHIFT 0 +#define DCBX_ETS_WILLING_MASK 0x00000002 +#define DCBX_ETS_WILLING_SHIFT 1 +#define DCBX_ETS_ERROR_MASK 0x00000004 +#define DCBX_ETS_ERROR_SHIFT 2 +#define DCBX_ETS_CBS_MASK 0x00000008 +#define DCBX_ETS_CBS_SHIFT 3 +#define DCBX_ETS_MAX_TCS_MASK 0x000000f0 +#define DCBX_ETS_MAX_TCS_SHIFT 4 + u32 pri_tc_tbl[1]; +#define DCBX_CEE_STRICT_PRIORITY 0xf +#define DCBX_CEE_STRICT_PRIORITY_TC 0x7 + u32 tc_bw_tbl[2]; + u32 tc_tsa_tbl[2]; +#define DCBX_ETS_TSA_STRICT 0 +#define DCBX_ETS_TSA_CBS 1 +#define DCBX_ETS_TSA_ETS 2 +}; + +struct dcbx_app_priority_entry { + u32 entry; +#define DCBX_APP_PRI_MAP_MASK 0x000000ff +#define DCBX_APP_PRI_MAP_SHIFT 0 +#define DCBX_APP_PRI_0 0x01 +#define DCBX_APP_PRI_1 0x02 +#define DCBX_APP_PRI_2 0x04 +#define DCBX_APP_PRI_3 0x08 +#define DCBX_APP_PRI_4 0x10 +#define DCBX_APP_PRI_5 0x20 +#define DCBX_APP_PRI_6 0x40 +#define DCBX_APP_PRI_7 0x80 +#define DCBX_APP_SF_MASK 0x00000300 +#define DCBX_APP_SF_SHIFT 8 +#define DCBX_APP_SF_ETHTYPE 0 +#define DCBX_APP_SF_PORT 1 +#define DCBX_APP_PROTOCOL_ID_MASK 0xffff0000 +#define DCBX_APP_PROTOCOL_ID_SHIFT 16 +}; + +/* FW structure in BE */ +struct dcbx_app_priority_feature { + u32 flags; +#define DCBX_APP_ENABLED_MASK 0x00000001 +#define DCBX_APP_ENABLED_SHIFT 0 +#define DCBX_APP_WILLING_MASK 0x00000002 +#define DCBX_APP_WILLING_SHIFT 1 +#define DCBX_APP_ERROR_MASK 0x00000004 +#define DCBX_APP_ERROR_SHIFT 2 + /* Not in use + * #define DCBX_APP_DEFAULT_PRI_MASK 0x00000f00 + * #define DCBX_APP_DEFAULT_PRI_SHIFT 8 + */ +#define DCBX_APP_MAX_TCS_MASK 0x0000f000 +#define DCBX_APP_MAX_TCS_SHIFT 12 +#define DCBX_APP_NUM_ENTRIES_MASK 0x00ff0000 +#define DCBX_APP_NUM_ENTRIES_SHIFT 16 + struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL]; +}; + +/* FW structure in BE */ +struct dcbx_features { + /* PG feature */ + struct dcbx_ets_feature ets; + /* PFC feature */ + u32 pfc; +#define DCBX_PFC_PRI_EN_BITMAP_MASK 0x000000ff +#define DCBX_PFC_PRI_EN_BITMAP_SHIFT 0 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_0 0x01 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_1 0x02 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_2 0x04 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_3 0x08 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_4 0x10 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_5 0x20 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_6 0x40 +#define DCBX_PFC_PRI_EN_BITMAP_PRI_7 0x80 + +#define DCBX_PFC_FLAGS_MASK 0x0000ff00 +#define DCBX_PFC_FLAGS_SHIFT 8 +#define DCBX_PFC_CAPS_MASK 0x00000f00 +#define DCBX_PFC_CAPS_SHIFT 8 +#define DCBX_PFC_MBC_MASK 0x00004000 +#define DCBX_PFC_MBC_SHIFT 14 +#define DCBX_PFC_WILLING_MASK 0x00008000 +#define DCBX_PFC_WILLING_SHIFT 15 +#define DCBX_PFC_ENABLED_MASK 0x00010000 +#define DCBX_PFC_ENABLED_SHIFT 16 +#define DCBX_PFC_ERROR_MASK 0x00020000 +#define DCBX_PFC_ERROR_SHIFT 17 + + /* APP feature */ + struct dcbx_app_priority_feature app; +}; + +struct dcbx_local_params { + u32 config; +#define DCBX_CONFIG_VERSION_MASK 0x00000003 +#define DCBX_CONFIG_VERSION_SHIFT 0 +#define DCBX_CONFIG_VERSION_DISABLED 0 +#define DCBX_CONFIG_VERSION_IEEE 1 +#define DCBX_CONFIG_VERSION_CEE 2 + + u32 flags; + struct dcbx_features features; +}; + +struct dcbx_mib { + u32 prefix_seq_num; + u32 flags; + /* + * #define DCBX_CONFIG_VERSION_MASK 0x00000003 + * #define DCBX_CONFIG_VERSION_SHIFT 0 + * #define DCBX_CONFIG_VERSION_DISABLED 0 + * #define DCBX_CONFIG_VERSION_IEEE 1 + * #define DCBX_CONFIG_VERSION_CEE 2 + */ + struct dcbx_features features; + u32 suffix_seq_num; +}; + +struct lldp_system_tlvs_buffer_s { + u16 valid; + u16 length; + u32 data[MAX_SYSTEM_LLDP_TLV_DATA]; +}; + +/**************************************/ +/* */ +/* P U B L I C G L O B A L */ +/* */ +/**************************************/ +struct public_global { + u32 max_path; /* 32bit is wasty, but this will be used often */ + u32 max_ports; /* (Global) 32bit is wasty, this will be used often */ +#define MODE_1P 1 /* TBD - NEED TO THINK OF A BETTER NAME */ +#define MODE_2P 2 +#define MODE_3P 3 +#define MODE_4P 4 + u32 debug_mb_offset; + u32 phymod_dbg_mb_offset; + struct couple_mode_teaming cmt; + s32 internal_temperature; + u32 mfw_ver; + u32 running_bundle_id; + s32 external_temperature; +}; + +/**************************************/ +/* */ +/* P U B L I C P A T H */ +/* */ +/**************************************/ + +/**************************************************************************** + * Shared Memory 2 Region * + ****************************************************************************/ +/* The fw_flr_ack is actually built in the following way: */ +/* 8 bit: PF ack */ +/* 128 bit: VF ack */ +/* 8 bit: ios_dis_ack */ +/* In order to maintain endianity in the mailbox hsi, we want to keep using */ +/* u32. The fw must have the VF right after the PF since this is how it */ +/* access arrays(it expects always the VF to reside after the PF, and that */ +/* makes the calculation much easier for it. ) */ +/* In order to answer both limitations, and keep the struct small, the code */ +/* will abuse the structure defined here to achieve the actual partition */ +/* above */ +/****************************************************************************/ +struct fw_flr_mb { + u32 aggint; + u32 opgen_addr; + u32 accum_ack; /* 0..15:PF, 16..207:VF, 256..271:IOV_DIS */ +#define ACCUM_ACK_PF_BASE 0 +#define ACCUM_ACK_PF_SHIFT 0 + +#define ACCUM_ACK_VF_BASE 8 +#define ACCUM_ACK_VF_SHIFT 3 + +#define ACCUM_ACK_IOV_DIS_BASE 256 +#define ACCUM_ACK_IOV_DIS_SHIFT 8 + +}; + +struct public_path { + struct fw_flr_mb flr_mb; + /* + * mcp_vf_disabled is set by the MCP to indicate the driver about VFs + * which were disabled/flred + */ + u32 mcp_vf_disabled[VF_MAX_STATIC / 32]; /* 0x003c */ + + u32 process_kill; + /* Reset on mcp reset, and incremented for eveny process kill event. */ +#define PROCESS_KILL_COUNTER_MASK 0x0000ffff +#define PROCESS_KILL_COUNTER_SHIFT 0 +#define PROCESS_KILL_GLOB_AEU_BIT_MASK 0xffff0000 +#define PROCESS_KILL_GLOB_AEU_BIT_SHIFT 16 +#define GLOBAL_AEU_BIT(aeu_reg_id, aeu_bit) (aeu_reg_id * 32 + aeu_bit) +}; + +/**************************************/ +/* */ +/* P U B L I C P O R T */ +/* */ +/**************************************/ +#define FC_NPIV_WWPN_SIZE 8 +#define FC_NPIV_WWNN_SIZE 8 +struct dci_npiv_settings { + u8 npiv_wwpn[FC_NPIV_WWPN_SIZE]; + u8 npiv_wwnn[FC_NPIV_WWNN_SIZE]; +}; + +struct dci_fc_npiv_cfg { + /* hdr used internally by the MFW */ + u32 hdr; + u32 num_of_npiv; +}; + +#define MAX_NUMBER_NPIV 64 +struct dci_fc_npiv_tbl { + struct dci_fc_npiv_cfg fc_npiv_cfg; + struct dci_npiv_settings settings[MAX_NUMBER_NPIV]; +}; + +/**************************************************************************** + * Driver <-> FW Mailbox * + ****************************************************************************/ + +struct public_port { + u32 validity_map; /* 0x0 (4*2 = 0x8) */ + + /* validity bits */ +#define MCP_VALIDITY_PCI_CFG 0x00100000 +#define MCP_VALIDITY_MB 0x00200000 +#define MCP_VALIDITY_DEV_INFO 0x00400000 +#define MCP_VALIDITY_RESERVED 0x00000007 + + /* One licensing bit should be set */ +#define MCP_VALIDITY_LIC_KEY_IN_EFFECT_MASK 0x00000038 /* yaniv - tbd */ +#define MCP_VALIDITY_LIC_MANUF_KEY_IN_EFFECT 0x00000008 +#define MCP_VALIDITY_LIC_UPGRADE_KEY_IN_EFFECT 0x00000010 +#define MCP_VALIDITY_LIC_NO_KEY_IN_EFFECT 0x00000020 + + /* Active MFW */ +#define MCP_VALIDITY_ACTIVE_MFW_UNKNOWN 0x00000000 +#define MCP_VALIDITY_ACTIVE_MFW_MASK 0x000001c0 +#define MCP_VALIDITY_ACTIVE_MFW_NCSI 0x00000040 +#define MCP_VALIDITY_ACTIVE_MFW_NONE 0x000001c0 + + u32 link_status; +#define LINK_STATUS_LINK_UP 0x00000001 +#define LINK_STATUS_SPEED_AND_DUPLEX_MASK 0x0000001e +#define LINK_STATUS_SPEED_AND_DUPLEX_1000THD (1 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_1000TFD (2 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_10G (3 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_20G (4 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_40G (5 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_50G (6 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_100G (7 << 1) +#define LINK_STATUS_SPEED_AND_DUPLEX_25G (8 << 1) + +#define LINK_STATUS_AUTO_NEGOTIATE_ENABLED 0x00000020 + +#define LINK_STATUS_AUTO_NEGOTIATE_COMPLETE 0x00000040 +#define LINK_STATUS_PARALLEL_DETECTION_USED 0x00000080 + +#define LINK_STATUS_PFC_ENABLED 0x00000100 +#define LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE 0x00000200 +#define LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE 0x00000400 +#define LINK_STATUS_LINK_PARTNER_10G_CAPABLE 0x00000800 +#define LINK_STATUS_LINK_PARTNER_20G_CAPABLE 0x00001000 +#define LINK_STATUS_LINK_PARTNER_40G_CAPABLE 0x00002000 +#define LINK_STATUS_LINK_PARTNER_50G_CAPABLE 0x00004000 +#define LINK_STATUS_LINK_PARTNER_100G_CAPABLE 0x00008000 +#define LINK_STATUS_LINK_PARTNER_25G_CAPABLE 0x00010000 + +#define LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK 0x000C0000 +#define LINK_STATUS_LINK_PARTNER_NOT_PAUSE_CAPABLE (0 << 18) +#define LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE (1 << 18) +#define LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE (2 << 18) +#define LINK_STATUS_LINK_PARTNER_BOTH_PAUSE (3 << 18) + +#define LINK_STATUS_SFP_TX_FAULT 0x00100000 +#define LINK_STATUS_TX_FLOW_CONTROL_ENABLED 0x00200000 +#define LINK_STATUS_RX_FLOW_CONTROL_ENABLED 0x00400000 +#define LINK_STATUS_RX_SIGNAL_PRESENT 0x00800000 +#define LINK_STATUS_MAC_LOCAL_FAULT 0x01000000 +#define LINK_STATUS_MAC_REMOTE_FAULT 0x02000000 +#define LINK_STATUS_UNSUPPORTED_SPD_REQ 0x04000000 + + u32 link_status1; + u32 ext_phy_fw_version; + u32 drv_phy_cfg_addr; /* Points to pmm_phy_cfg (For READ-ONLY) */ + + u32 port_stx; + + u32 stat_nig_timer; + + struct port_mf_cfg port_mf_config; + struct port_stats stats; + + u32 media_type; +#define MEDIA_UNSPECIFIED 0x0 +#define MEDIA_SFPP_10G_FIBER 0x1 +#define MEDIA_XFP_FIBER 0x2 +#define MEDIA_DA_TWINAX 0x3 +#define MEDIA_BASE_T 0x4 +#define MEDIA_SFP_1G_FIBER 0x5 +#define MEDIA_MODULE_FIBER 0x6 +#define MEDIA_KR 0xf0 +#define MEDIA_NOT_PRESENT 0xff + + u32 lfa_status; +#define LFA_LINK_FLAP_REASON_OFFSET 0 +#define LFA_LINK_FLAP_REASON_MASK 0x000000ff +#define LFA_NO_REASON (0 << 0) +#define LFA_LINK_DOWN (1 << 0) +#define LFA_FORCE_INIT (1 << 1) +#define LFA_LOOPBACK_MISMATCH (1 << 2) +#define LFA_SPEED_MISMATCH (1 << 3) +#define LFA_FLOW_CTRL_MISMATCH (1 << 4) +#define LFA_ADV_SPEED_MISMATCH (1 << 5) +#define LINK_FLAP_AVOIDANCE_COUNT_OFFSET 8 +#define LINK_FLAP_AVOIDANCE_COUNT_MASK 0x0000ff00 +#define LINK_FLAP_COUNT_OFFSET 16 +#define LINK_FLAP_COUNT_MASK 0x00ff0000 + + u32 link_change_count; + + /* LLDP params */ + struct lldp_config_params_s lldp_config_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_status_params_s lldp_status_params[LLDP_MAX_LLDP_AGENTS]; + struct lldp_system_tlvs_buffer_s system_lldp_tlvs_buf; + + /* DCBX related MIB */ + struct dcbx_local_params local_admin_dcbx_mib; + struct dcbx_mib remote_dcbx_mib; + struct dcbx_mib operational_dcbx_mib; + + /* FC_NPIV table offset & size in NVRAM value of 0 means not present */ + u32 fc_npiv_nvram_tbl_addr; + u32 fc_npiv_nvram_tbl_size; + u32 transceiver_data; +#define PMM_TRANSCEIVER_STATE_MASK 0x000000FF +#define PMM_TRANSCEIVER_STATE_SHIFT 0x00000000 +#define PMM_TRANSCEIVER_STATE_UNPLUGGED 0x00000000 +#define PMM_TRANSCEIVER_STATE_PRESENT 0x00000001 +#define PMM_TRANSCEIVER_STATE_VALID 0x00000003 +#define PMM_TRANSCEIVER_STATE_UPDATING 0x00000008 +#define PMM_TRANSCEIVER_TYPE_MASK 0x0000FF00 +#define PMM_TRANSCEIVER_TYPE_SHIFT 0x00000008 +#define PMM_TRANSCEIVER_TYPE_NONE 0x00000000 +#define PMM_TRANSCEIVER_TYPE_UNKNOWN 0x000000FF +#define PMM_TRANSCEIVER_TYPE_1G_PCC 0x01 /* 1G Passive copper cable */ +#define PMM_TRANSCEIVER_TYPE_1G_ACC 0x02 /* 1G Active copper cable */ +#define PMM_TRANSCEIVER_TYPE_1G_LX 0x03 +#define PMM_TRANSCEIVER_TYPE_1G_SX 0x04 +#define PMM_TRANSCEIVER_TYPE_10G_SR 0x05 +#define PMM_TRANSCEIVER_TYPE_10G_LR 0x06 +#define PMM_TRANSCEIVER_TYPE_10G_LRM 0x07 +#define PMM_TRANSCEIVER_TYPE_10G_ER 0x08 +#define PMM_TRANSCEIVER_TYPE_10G_PCC 0x09 /* 10G Passive copper cable */ +#define PMM_TRANSCEIVER_TYPE_10G_ACC 0x0a /* 10G Active copper cable */ +#define PMM_TRANSCEIVER_TYPE_XLPPI 0x0b +#define PMM_TRANSCEIVER_TYPE_40G_LR4 0x0c +#define PMM_TRANSCEIVER_TYPE_40G_SR4 0x0d +#define PMM_TRANSCEIVER_TYPE_40G_CR4 0x0e +#define PMM_TRANSCEIVER_TYPE_100G_AOC 0x0f /* Active optical cable */ +#define PMM_TRANSCEIVER_TYPE_100G_SR4 0x10 +#define PMM_TRANSCEIVER_TYPE_100G_LR4 0x11 +#define PMM_TRANSCEIVER_TYPE_100G_ER4 0x12 +#define PMM_TRANSCEIVER_TYPE_100G_ACC 0x13 /* Active copper cable */ +#define PMM_TRANSCEIVER_TYPE_100G_CR4 0x14 +#define PMM_TRANSCEIVER_TYPE_4x10G_SR 0x15 +#define PMM_TRANSCEIVER_TYPE_25G_PCC_S 0x16 +#define PMM_TRANSCEIVER_TYPE_25G_ACC_S 0x17 +#define PMM_TRANSCEIVER_TYPE_25G_PCC_M 0x18 +#define PMM_TRANSCEIVER_TYPE_25G_ACC_M 0x19 +#define PMM_TRANSCEIVER_TYPE_25G_PCC_L 0x1a +#define PMM_TRANSCEIVER_TYPE_25G_ACC_L 0x1b +#define PMM_TRANSCEIVER_TYPE_25G_SR 0x1c +#define PMM_TRANSCEIVER_TYPE_25G_LR 0x1d +#define PMM_TRANSCEIVER_TYPE_25G_AOC 0x1e + +#define PMM_TRANSCEIVER_TYPE_4x10G 0x1d +#define PMM_TRANSCEIVER_TYPE_4x25G_CR 0x1e +#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40GR 0x30 +#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR 0x31 +#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR 0x32 +#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR 0x33 +#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR 0x34 +#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR 0x35 +#define PMM_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC 0x36 +}; + +/**************************************/ +/* */ +/* P U B L I C F U N C */ +/* */ +/**************************************/ + +struct public_func { + u32 dpdk_rsvd1[2]; + + /* MTU size per funciton is needed for the OV feature */ + u32 mtu_size; + /* 9 entires for the C2S PCP map for each inner VLAN PCP + 1 default */ + /* For PCP values 0-3 use the map lower */ + /* 0xFF000000 - PCP 0, 0x00FF0000 - PCP 1, + * 0x0000FF00 - PCP 2, 0x000000FF PCP 3 + */ + u32 c2s_pcp_map_lower; + /* For PCP values 4-7 use the map upper */ + /* 0xFF000000 - PCP 4, 0x00FF0000 - PCP 5, + * 0x0000FF00 - PCP 6, 0x000000FF PCP 7 + */ + u32 c2s_pcp_map_upper; + + /* For PCP default value get the MSB byte of the map default */ + u32 c2s_pcp_map_default; + + u32 reserved[4]; + + /* replace old mf_cfg */ + u32 config; + /* E/R/I/D */ + /* function 0 of each port cannot be hidden */ +#define FUNC_MF_CFG_FUNC_HIDE 0x00000001 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING 0x00000002 +#define FUNC_MF_CFG_PAUSE_ON_HOST_RING_SHIFT 0x00000001 + +#define FUNC_MF_CFG_PROTOCOL_MASK 0x000000f0 +#define FUNC_MF_CFG_PROTOCOL_SHIFT 4 +#define FUNC_MF_CFG_PROTOCOL_ETHERNET 0x00000000 +#define FUNC_MF_CFG_PROTOCOL_MAX 0x00000000 + + /* MINBW, MAXBW */ + /* value range - 0..100, increments in 1 % */ +#define FUNC_MF_CFG_MIN_BW_MASK 0x0000ff00 +#define FUNC_MF_CFG_MIN_BW_SHIFT 8 +#define FUNC_MF_CFG_MIN_BW_DEFAULT 0x00000000 +#define FUNC_MF_CFG_MAX_BW_MASK 0x00ff0000 +#define FUNC_MF_CFG_MAX_BW_SHIFT 16 +#define FUNC_MF_CFG_MAX_BW_DEFAULT 0x00640000 + + u32 status; +#define FUNC_STATUS_VLINK_DOWN 0x00000001 + + u32 mac_upper; /* MAC */ +#define FUNC_MF_CFG_UPPERMAC_MASK 0x0000ffff +#define FUNC_MF_CFG_UPPERMAC_SHIFT 0 +#define FUNC_MF_CFG_UPPERMAC_DEFAULT FUNC_MF_CFG_UPPERMAC_MASK + u32 mac_lower; +#define FUNC_MF_CFG_LOWERMAC_DEFAULT 0xffffffff + + u32 dpdk_rsvd2[4]; + + u32 ovlan_stag; /* tags */ +#define FUNC_MF_CFG_OV_STAG_MASK 0x0000ffff +#define FUNC_MF_CFG_OV_STAG_SHIFT 0 +#define FUNC_MF_CFG_OV_STAG_DEFAULT FUNC_MF_CFG_OV_STAG_MASK + + u32 pf_allocation; /* vf per pf */ + + u32 preserve_data; /* Will be used bt CCM */ + + u32 driver_last_activity_ts; + + /* + * drv_ack_vf_disabled is set by the PF driver to ack handled disabled + * VFs + */ + u32 drv_ack_vf_disabled[VF_MAX_STATIC / 32]; /* 0x0044 */ + + u32 drv_id; +#define DRV_ID_PDA_COMP_VER_MASK 0x0000ffff +#define DRV_ID_PDA_COMP_VER_SHIFT 0 + +#define DRV_ID_MCP_HSI_VER_MASK 0x00ff0000 +#define DRV_ID_MCP_HSI_VER_SHIFT 16 +#define DRV_ID_MCP_HSI_VER_CURRENT (1 << DRV_ID_MCP_HSI_VER_SHIFT) + +#define DRV_ID_DRV_TYPE_MASK 0x7f000000 +#define DRV_ID_DRV_TYPE_SHIFT 24 +#define DRV_ID_DRV_TYPE_UNKNOWN (0 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_LINUX (1 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_WINDOWS (2 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_DIAG (3 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_PREBOOT (4 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_SOLARIS (5 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_VMWARE (6 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_FREEBSD (7 << DRV_ID_DRV_TYPE_SHIFT) +#define DRV_ID_DRV_TYPE_AIX (8 << DRV_ID_DRV_TYPE_SHIFT) + +#define DRV_ID_DRV_INIT_HW_MASK 0x80000000 +#define DRV_ID_DRV_INIT_HW_SHIFT 31 +#define DRV_ID_DRV_INIT_HW_FLAG (1 << DRV_ID_DRV_INIT_HW_SHIFT) +}; + +/**************************************/ +/* */ +/* P U B L I C M B */ +/* */ +/**************************************/ +/* This is the only section that the driver can write to, and each */ +/* Basically each driver request to set feature parameters, + * will be done using a different command, which will be linked + * to a specific data structure from the union below. + * For huge strucuture, the common blank structure should be used. + */ + +struct mcp_mac { + u32 mac_upper; /* Upper 16 bits are always zeroes */ + u32 mac_lower; +}; + +struct mcp_val64 { + u32 lo; + u32 hi; +}; + +struct mcp_file_att { + u32 nvm_start_addr; + u32 len; +}; + +#define MCP_DRV_VER_STR_SIZE 16 +#define MCP_DRV_VER_STR_SIZE_DWORD (MCP_DRV_VER_STR_SIZE / sizeof(u32)) +#define MCP_DRV_NVM_BUF_LEN 32 +struct drv_version_stc { + u32 version; + u8 name[MCP_DRV_VER_STR_SIZE - 4]; +}; + +/* statistics for ncsi */ +struct lan_stats_stc { + u64 ucast_rx_pkts; + u64 ucast_tx_pkts; + u32 fcs_err; + u32 rserved; +}; + +struct ocbb_data_stc { + u32 ocbb_host_addr; + u32 ocsd_host_addr; + u32 ocsd_req_update_interval; +}; + +union drv_union_data { + u32 ver_str[MCP_DRV_VER_STR_SIZE_DWORD]; /* LOAD_REQ */ + struct mcp_mac wol_mac; /* UNLOAD_DONE */ + + struct pmm_phy_cfg drv_phy_cfg; + + struct mcp_val64 val64; /* For PHY / AVS commands */ + + u8 raw_data[MCP_DRV_NVM_BUF_LEN]; + + struct mcp_file_att file_att; + + u32 ack_vf_disabled[VF_MAX_STATIC / 32]; + + struct drv_version_stc drv_version; + + struct lan_stats_stc lan_stats; + u32 dpdk_rsvd[3]; + struct ocbb_data_stc ocbb_info; + + /* ... */ +}; + +struct public_drv_mb { + u32 drv_mb_header; +#define DRV_MSG_CODE_MASK 0xffff0000 +#define DRV_MSG_CODE_LOAD_REQ 0x10000000 +#define DRV_MSG_CODE_LOAD_DONE 0x11000000 +#define DRV_MSG_CODE_INIT_HW 0x12000000 +#define DRV_MSG_CODE_UNLOAD_REQ 0x20000000 +#define DRV_MSG_CODE_UNLOAD_DONE 0x21000000 +#define DRV_MSG_CODE_INIT_PHY 0x22000000 + /* Params - FORCE - Reinitialize the link regardless of LFA */ + /* - DONT_CARE - Don't flap the link if up */ +#define DRV_MSG_CODE_LINK_RESET 0x23000000 + + /* Vitaly: LLDP commands */ +#define DRV_MSG_CODE_SET_LLDP 0x24000000 +#define DRV_MSG_CODE_SET_DCBX 0x25000000 + /* OneView feature driver HSI */ +#define DRV_MSG_CODE_OV_UPDATE_CURR_CFG 0x26000000 +#define DRV_MSG_CODE_OV_UPDATE_BUS_NUM 0x27000000 +#define DRV_MSG_CODE_OV_UPDATE_BOOT_PROGRESS 0x28000000 +#define DRV_MSG_CODE_OV_UPDATE_STORM_FW_VER 0x29000000 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE 0x31000000 +#define DRV_MSG_CODE_BW_UPDATE_ACK 0x32000000 +#define DRV_MSG_CODE_OV_UPDATE_MTU 0x33000000 + +#define DRV_MSG_CODE_NIG_DRAIN 0x30000000 + +#define DRV_MSG_CODE_INITIATE_FLR 0x02000000 +#define DRV_MSG_CODE_VF_DISABLED_DONE 0xc0000000 +#define DRV_MSG_CODE_CFG_VF_MSIX 0xc0010000 +#define DRV_MSG_CODE_NVM_PUT_FILE_BEGIN 0x00010000 +#define DRV_MSG_CODE_NVM_PUT_FILE_DATA 0x00020000 +#define DRV_MSG_CODE_NVM_GET_FILE_ATT 0x00030000 +#define DRV_MSG_CODE_NVM_READ_NVRAM 0x00050000 +#define DRV_MSG_CODE_NVM_WRITE_NVRAM 0x00060000 +#define DRV_MSG_CODE_NVM_DEL_FILE 0x00080000 +#define DRV_MSG_CODE_MCP_RESET 0x00090000 +#define DRV_MSG_CODE_SET_SECURE_MODE 0x000a0000 +#define DRV_MSG_CODE_PHY_RAW_READ 0x000b0000 +#define DRV_MSG_CODE_PHY_RAW_WRITE 0x000c0000 +#define DRV_MSG_CODE_PHY_CORE_READ 0x000d0000 +#define DRV_MSG_CODE_PHY_CORE_WRITE 0x000e0000 +#define DRV_MSG_CODE_SET_VERSION 0x000f0000 +#define DRV_MSG_CODE_MCP_HALT 0x00100000 +#define DRV_MSG_CODE_PMD_DIAG_DUMP 0x00140000 +#define DRV_MSG_CODE_PMD_DIAG_EYE 0x00150000 +#define DRV_MSG_CODE_TRANSCEIVER_READ 0x00160000 +#define DRV_MSG_CODE_TRANSCEIVER_WRITE 0x00170000 + +#define DRV_MSG_CODE_SET_VMAC 0x00110000 +#define DRV_MSG_CODE_GET_VMAC 0x00120000 +#define DRV_MSG_CODE_VMAC_TYPE_MAC 1 +#define DRV_MSG_CODE_VMAC_TYPE_WWNN 2 +#define DRV_MSG_CODE_VMAC_TYPE_WWPN 3 + +#define DRV_MSG_CODE_GET_STATS 0x00130000 +#define DRV_MSG_CODE_STATS_TYPE_LAN 1 + +#define DRV_MSG_CODE_OCBB_DATA 0x00180000 +#define DRV_MSG_CODE_SET_BW 0x00190000 +#define DRV_MSG_CODE_MASK_PARITIES 0x001a0000 +#define DRV_MSG_CODE_INDUCE_FAILURE 0x001b0000 +#define DRV_MSG_FAN_FAILURE_TYPE (1 << 0) +#define DRV_MSG_TEMPERATURE_FAILURE_TYPE (1 << 1) + +#define DRV_MSG_CODE_GPIO_READ 0x001c0000 +#define DRV_MSG_CODE_GPIO_WRITE 0x001d0000 + +#define DRV_MSG_CODE_SET_LED_MODE 0x00200000 +#define DRV_MSG_CODE_EMPTY_MB 0x00220000 + +#define DRV_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 drv_mb_param; + /* UNLOAD_REQ params */ +#define DRV_MB_PARAM_UNLOAD_WOL_UNKNOWN 0x00000000 +#define DRV_MB_PARAM_UNLOAD_WOL_MCP 0x00000001 +#define DRV_MB_PARAM_UNLOAD_WOL_DISABLED 0x00000002 +#define DRV_MB_PARAM_UNLOAD_WOL_ENABLED 0x00000003 + + /* UNLOAD_DONE_params */ +#define DRV_MB_PARAM_UNLOAD_NON_D3_POWER 0x00000001 + + /* INIT_PHY params */ +#define DRV_MB_PARAM_INIT_PHY_FORCE 0x00000001 +#define DRV_MB_PARAM_INIT_PHY_DONT_CARE 0x00000002 + + /* LLDP / DCBX params */ +#define DRV_MB_PARAM_LLDP_SEND_MASK 0x00000001 +#define DRV_MB_PARAM_LLDP_SEND_SHIFT 0 +#define DRV_MB_PARAM_LLDP_AGENT_MASK 0x00000006 +#define DRV_MB_PARAM_LLDP_AGENT_SHIFT 1 +#define DRV_MB_PARAM_DCBX_NOTIFY_MASK 0x00000008 +#define DRV_MB_PARAM_DCBX_NOTIFY_SHIFT 3 + +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_MASK 0x000000FF +#define DRV_MB_PARAM_NIG_DRAIN_PERIOD_MS_SHIFT 0 + +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MFW 0x1 +#define DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_IMAGE 0x2 + +#define DRV_MB_PARAM_NVM_OFFSET_SHIFT 0 +#define DRV_MB_PARAM_NVM_OFFSET_MASK 0x00FFFFFF +#define DRV_MB_PARAM_NVM_LEN_SHIFT 24 +#define DRV_MB_PARAM_NVM_LEN_MASK 0xFF000000 + +#define DRV_MB_PARAM_PHY_ADDR_SHIFT 0 +#define DRV_MB_PARAM_PHY_ADDR_MASK 0x1FF0FFFF +#define DRV_MB_PARAM_PHY_LANE_SHIFT 16 +#define DRV_MB_PARAM_PHY_LANE_MASK 0x000F0000 +#define DRV_MB_PARAM_PHY_SELECT_PORT_SHIFT 29 +#define DRV_MB_PARAM_PHY_SELECT_PORT_MASK 0x20000000 +#define DRV_MB_PARAM_PHY_PORT_SHIFT 30 +#define DRV_MB_PARAM_PHY_PORT_MASK 0xc0000000 + +#define DRV_MB_PARAM_PHYMOD_LANE_SHIFT 0 +#define DRV_MB_PARAM_PHYMOD_LANE_MASK 0x000000FF +#define DRV_MB_PARAM_PHYMOD_SIZE_SHIFT 8 +#define DRV_MB_PARAM_PHYMOD_SIZE_MASK 0x000FFF00 + /* configure vf MSIX params */ +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT 0 +#define DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK 0x000000FF +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT 8 +#define DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK 0x0000FF00 + + /* OneView configuration parametres */ +#define DRV_MB_PARAM_OV_CURR_CFG_SHIFT 0 +#define DRV_MB_PARAM_OV_CURR_CFG_MASK 0x0000000F +#define DRV_MB_PARAM_OV_CURR_CFG_NONE 0 +#define DRV_MB_PARAM_OV_CURR_CFG_OS 1 +#define DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC 2 +#define DRV_MB_PARAM_OV_CURR_CFG_OTHER 3 +#define DRV_MB_PARAM_OV_CURR_CFG_VC_CLP 4 +#define DRV_MB_PARAM_OV_CURR_CFG_CNU 5 +#define DRV_MB_PARAM_OV_CURR_CFG_DCI 6 +#define DRV_MB_PARAM_OV_CURR_CFG_HII 7 + +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_SHIFT 0 +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_MASK 0x000000FF +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_NONE (1 << 0) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_TRARGET_FOUND (1 << 2) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_LOGGED_INTO_TGT (1 << 4) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_IMG_DOWNLOADED (1 << 5) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_PROG_OS_HANDOFF (1 << 6) +#define DRV_MB_PARAM_OV_UPDATE_BOOT_COMPLETED 0 + +#define DRV_MB_PARAM_OV_PCI_BUS_NUM_SHIFT 0 +#define DRV_MB_PARAM_OV_PCI_BUS_NUM_MASK 0x000000FF + +#define DRV_MB_PARAM_OV_STORM_FW_VER_SHIFT 0 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MASK 0xFFFFFFFF +#define DRV_MB_PARAM_OV_STORM_FW_VER_MAJOR_MASK 0xFF000000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_MINOR_MASK 0x00FF0000 +#define DRV_MB_PARAM_OV_STORM_FW_VER_BUILD_MASK 0x0000FF00 +#define DRV_MB_PARAM_OV_STORM_FW_VER_DROP_MASK 0x000000FF + +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_SHIFT 0 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_MASK 0xF +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_UNKNOWN 0x1 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED 0x2 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_LOADING 0x3 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED 0x4 +#define DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE 0x5 + +#define DRV_MB_PARAM_OV_MTU_SIZE_SHIFT 0 +#define DRV_MB_PARAM_OV_MTU_SIZE_MASK 0xFFFFFFFF + +#define DRV_MB_PARAM_SET_LED_MODE_OPER 0x0 +#define DRV_MB_PARAM_SET_LED_MODE_ON 0x1 +#define DRV_MB_PARAM_SET_LED_MODE_OFF 0x2 + +#define DRV_MB_PARAM_TRANSCEIVER_PORT_SHIFT 0 +#define DRV_MB_PARAM_TRANSCEIVER_PORT_MASK 0x00000003 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_SHIFT 2 +#define DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK 0x000000FC +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_SHIFT 8 +#define DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK 0x0000FF00 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_SHIFT 16 +#define DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK 0xFFFF0000 + +#define DRV_MB_PARAM_GPIO_NUMBER_SHIFT 0 +#define DRV_MB_PARAM_GPIO_NUMBER_MASK 0x0000FFFF +#define DRV_MB_PARAM_GPIO_VALUE_SHIFT 16 +#define DRV_MB_PARAM_GPIO_VALUE_MASK 0xFFFF0000 + + u32 fw_mb_header; +#define FW_MSG_CODE_MASK 0xffff0000 +#define FW_MSG_CODE_DRV_LOAD_ENGINE 0x10100000 +#define FW_MSG_CODE_DRV_LOAD_PORT 0x10110000 +#define FW_MSG_CODE_DRV_LOAD_FUNCTION 0x10120000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_PDA 0x10200000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_HSI 0x10210000 +#define FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG 0x10220000 +#define FW_MSG_CODE_DRV_LOAD_DONE 0x11100000 +#define FW_MSG_CODE_DRV_UNLOAD_ENGINE 0x20110000 +#define FW_MSG_CODE_DRV_UNLOAD_PORT 0x20120000 +#define FW_MSG_CODE_DRV_UNLOAD_FUNCTION 0x20130000 +#define FW_MSG_CODE_DRV_UNLOAD_DONE 0x21100000 +#define FW_MSG_CODE_INIT_PHY_DONE 0x21200000 +#define FW_MSG_CODE_INIT_PHY_ERR_INVALID_ARGS 0x21300000 +#define FW_MSG_CODE_LINK_RESET_DONE 0x23000000 +#define FW_MSG_CODE_SET_LLDP_DONE 0x24000000 +#define FW_MSG_CODE_SET_LLDP_UNSUPPORTED_AGENT 0x24010000 +#define FW_MSG_CODE_SET_DCBX_DONE 0x25000000 +#define FW_MSG_CODE_UPDATE_CURR_CFG_DONE 0x26000000 +#define FW_MSG_CODE_UPDATE_BUS_NUM_DONE 0x27000000 +#define FW_MSG_CODE_UPDATE_BOOT_PROGRESS_DONE 0x28000000 +#define FW_MSG_CODE_UPDATE_STORM_FW_VER_DONE 0x29000000 +#define FW_MSG_CODE_UPDATE_DRIVER_STATE_DONE 0x31000000 +#define FW_MSG_CODE_DRV_MSG_CODE_BW_UPDATE_DONE 0x32000000 +#define FW_MSG_CODE_DRV_MSG_CODE_MTU_SIZE_DONE 0x33000000 +#define FW_MSG_CODE_NIG_DRAIN_DONE 0x30000000 +#define FW_MSG_CODE_VF_DISABLED_DONE 0xb0000000 +#define FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE 0xb0010000 +#define FW_MSG_CODE_FLR_ACK 0x02000000 +#define FW_MSG_CODE_FLR_NACK 0x02100000 +#define FW_MSG_CODE_SET_DRIVER_DONE 0x02200000 +#define FW_MSG_CODE_SET_VMAC_SUCCESS 0x02300000 +#define FW_MSG_CODE_SET_VMAC_FAIL 0x02400000 + +#define FW_MSG_CODE_NVM_OK 0x00010000 +#define FW_MSG_CODE_NVM_INVALID_MODE 0x00020000 +#define FW_MSG_CODE_NVM_PREV_CMD_WAS_NOT_FINISHED 0x00030000 +#define FW_MSG_CODE_NVM_FAILED_TO_ALLOCATE_PAGE 0x00040000 +#define FW_MSG_CODE_NVM_INVALID_DIR_FOUND 0x00050000 +#define FW_MSG_CODE_NVM_PAGE_NOT_FOUND 0x00060000 +#define FW_MSG_CODE_NVM_FAILED_PARSING_BNDLE_HEADER 0x00070000 +#define FW_MSG_CODE_NVM_FAILED_PARSING_IMAGE_HEADER 0x00080000 +#define FW_MSG_CODE_NVM_PARSING_OUT_OF_SYNC 0x00090000 +#define FW_MSG_CODE_NVM_FAILED_UPDATING_DIR 0x000a0000 +#define FW_MSG_CODE_NVM_FAILED_TO_FREE_PAGE 0x000b0000 +#define FW_MSG_CODE_NVM_FILE_NOT_FOUND 0x000c0000 +#define FW_MSG_CODE_NVM_OPERATION_FAILED 0x000d0000 +#define FW_MSG_CODE_NVM_FAILED_UNALIGNED 0x000e0000 +#define FW_MSG_CODE_NVM_BAD_OFFSET 0x000f0000 +#define FW_MSG_CODE_NVM_BAD_SIGNATURE 0x00100000 +#define FW_MSG_CODE_NVM_FILE_READ_ONLY 0x00200000 +#define FW_MSG_CODE_NVM_UNKNOWN_FILE 0x00300000 +#define FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK 0x00400000 +#define FW_MSG_CODE_MCP_RESET_REJECT 0x00600000 +#define FW_MSG_CODE_PHY_OK 0x00110000 +#define FW_MSG_CODE_PHY_ERROR 0x00120000 +#define FW_MSG_CODE_SET_SECURE_MODE_ERROR 0x00130000 +#define FW_MSG_CODE_SET_SECURE_MODE_OK 0x00140000 +#define FW_MSG_MODE_PHY_PRIVILEGE_ERROR 0x00150000 +#define FW_MSG_CODE_OK 0x00160000 +#define FW_MSG_CODE_LED_MODE_INVALID 0x00170000 +#define FW_MSG_CODE_PHY_DIAG_OK 0x00160000 +#define FW_MSG_CODE_PHY_DIAG_ERROR 0x00170000 +#define FW_MSG_CODE_INIT_HW_FAILED_TO_ALLOCATE_PAGE 0x00040000 +#define FW_MSG_CODE_INIT_HW_FAILED_BAD_STATE 0x00170000 +#define FW_MSG_CODE_INIT_HW_FAILED_TO_SET_WINDOW 0x000d0000 +#define FW_MSG_CODE_INIT_HW_FAILED_NO_IMAGE 0x000c0000 +#define FW_MSG_CODE_INIT_HW_FAILED_VERSION_MISMATCH 0x00100000 +#define FW_MSG_CODE_TRANSCEIVER_DIAG_OK 0x00160000 +#define FW_MSG_CODE_TRANSCEIVER_DIAG_ERROR 0x00170000 +#define FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT 0x00020000 +#define FW_MSG_CODE_TRANSCEIVER_BAD_BUFFER_SIZE 0x000f0000 +#define FW_MSG_CODE_GPIO_OK 0x00160000 +#define FW_MSG_CODE_GPIO_DIRECTION_ERR 0x00170000 +#define FW_MSG_CODE_GPIO_CTRL_ERR 0x00020000 +#define FW_MSG_CODE_GPIO_INVALID 0x000f0000 +#define FW_MSG_CODE_GPIO_INVALID_VALUE 0x00050000 + +#define FW_MSG_SEQ_NUMBER_MASK 0x0000ffff + + u32 fw_mb_param; + + u32 drv_pulse_mb; +#define DRV_PULSE_SEQ_MASK 0x00007fff +#define DRV_PULSE_SYSTEM_TIME_MASK 0xffff0000 + /* + * The system time is in the format of + * (year-2001)*12*32 + month*32 + day. + */ +#define DRV_PULSE_ALWAYS_ALIVE 0x00008000 + /* + * Indicate to the firmware not to go into the + * OS-absent when it is not getting driver pulse. + * This is used for debugging as well for PXE(MBA). + */ + + u32 mcp_pulse_mb; +#define MCP_PULSE_SEQ_MASK 0x00007fff +#define MCP_PULSE_ALWAYS_ALIVE 0x00008000 + /* Indicates to the driver not to assert due to lack + * of MCP response + */ +#define MCP_EVENT_MASK 0xffff0000 +#define MCP_EVENT_OTHER_DRIVER_RESET_REQ 0x00010000 + + union drv_union_data union_data; +}; + +/* MFW - DRV MB */ +/********************************************************************** + * Description + * Incremental Aggregative + * 8-bit MFW counter per message + * 8-bit ack-counter per message + * Capabilities + * Provides up to 256 aggregative message per type + * Provides 4 message types in dword + * Message type pointers to byte offset + * Backward Compatibility by using sizeof for the counters. + * No lock requires for 32bit messages + * Limitations: + * In case of messages greater than 32bit, a dedicated mechanism(e.g lock) + * is required to prevent data corruption. + **********************************************************************/ +enum MFW_DRV_MSG_TYPE { + MFW_DRV_MSG_LINK_CHANGE, + MFW_DRV_MSG_FLR_FW_ACK_FAILED, + MFW_DRV_MSG_VF_DISABLED, + MFW_DRV_MSG_LLDP_DATA_UPDATED, + MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED, + MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED, + MFW_DRV_MSG_ERROR_RECOVERY, + MFW_DRV_MSG_BW_UPDATE, + MFW_DRV_MSG_S_TAG_UPDATE, + MFW_DRV_MSG_GET_LAN_STATS, + MFW_DRV_MSG_GET_FCOE_STATS, + MFW_DRV_MSG_GET_ISCSI_STATS, + MFW_DRV_MSG_GET_RDMA_STATS, + MFW_DRV_MSG_FAILURE_DETECTED, + MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE, + MFW_DRV_MSG_MAX +}; + +#define MFW_DRV_MSG_MAX_DWORDS(msgs) (((msgs - 1) >> 2) + 1) +#define MFW_DRV_MSG_DWORD(msg_id) (msg_id >> 2) +#define MFW_DRV_MSG_OFFSET(msg_id) ((msg_id & 0x3) << 3) +#define MFW_DRV_MSG_MASK(msg_id) (0xff << MFW_DRV_MSG_OFFSET(msg_id)) + +#ifdef BIG_ENDIAN /* Like MFW */ +#define DRV_ACK_MSG(msg_p, msg_id) \ +((u8)((u8 *)msg_p)[msg_id]++;) +#else +#define DRV_ACK_MSG(msg_p, msg_id) \ +((u8)((u8 *)msg_p)[((msg_id & ~3) | ((~msg_id) & 3))]++;) +#endif + +#define MFW_DRV_UPDATE(shmem_func, msg_id) \ +((u8)((u8 *)(MFW_MB_P(shmem_func)->msg))[msg_id]++;) + +struct public_mfw_mb { + u32 sup_msgs; /* Assigend with MFW_DRV_MSG_MAX */ + u32 msg[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; + u32 ack[MFW_DRV_MSG_MAX_DWORDS(MFW_DRV_MSG_MAX)]; +}; + +/**************************************/ +/* */ +/* P U B L I C D A T A */ +/* */ +/**************************************/ +enum public_sections { + PUBLIC_DRV_MB, /* Points to the first drv_mb of path0 */ + PUBLIC_MFW_MB, /* Points to the first mfw_mb of path0 */ + PUBLIC_GLOBAL, + PUBLIC_PATH, + PUBLIC_PORT, + PUBLIC_FUNC, + PUBLIC_MAX_SECTIONS +}; + +struct drv_ver_info_stc { + u32 ver; + u8 name[32]; +}; + +/* Runtime data needs about 1/2K. We use 2K to be on the safe side. + * Please make sure data does not exceed this size. + */ +#define NUM_RUNTIME_DWORDS 16 +struct drv_init_hw_stc { + u32 init_hw_bitmask[NUM_RUNTIME_DWORDS]; + u32 init_hw_data[NUM_RUNTIME_DWORDS * 32]; +}; + +struct mcp_public_data { + /* The sections fields is an array */ + u32 num_sections; + offsize_t sections[PUBLIC_MAX_SECTIONS]; + struct public_drv_mb drv_mb[MCP_GLOB_FUNC_MAX]; + struct public_mfw_mb mfw_mb[MCP_GLOB_FUNC_MAX]; + struct public_global global; + struct public_path path[MCP_GLOB_PATH_MAX]; + struct public_port port[MCP_GLOB_PORT_MAX]; + struct public_func func[MCP_GLOB_FUNC_MAX]; +}; + +#define I2C_TRANSCEIVER_ADDR 0xa0 +#define MAX_I2C_TRANSACTION_SIZE 16 +#define MAX_I2C_TRANSCEIVER_PAGE_SIZE 256 + +#endif /* MCP_PUBLIC_H */ diff --git a/drivers/net/qede/base/nvm_cfg.h b/drivers/net/qede/base/nvm_cfg.h new file mode 100644 index 00000000..7f1a60dd --- /dev/null +++ b/drivers/net/qede/base/nvm_cfg.h @@ -0,0 +1,919 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +/**************************************************************************** + * + * Name: nvm_cfg.h + * + * Description: NVM config file - Generated file from nvm cfg excel. + * DO NOT MODIFY !!! + * + * Created: 1/14/2016 + * + ****************************************************************************/ + +#ifndef NVM_CFG_H +#define NVM_CFG_H + +struct nvm_cfg_mac_address { + u32 mac_addr_hi; +#define NVM_CFG_MAC_ADDRESS_HI_MASK 0x0000FFFF +#define NVM_CFG_MAC_ADDRESS_HI_OFFSET 0 + u32 mac_addr_lo; +}; + +/****************************************** + * nvm_cfg1 structs + ******************************************/ +struct nvm_cfg1_glob { + u32 generic_cont0; /* 0x0 */ +#define NVM_CFG1_GLOB_BOARD_SWAP_MASK 0x0000000F +#define NVM_CFG1_GLOB_BOARD_SWAP_OFFSET 0 +#define NVM_CFG1_GLOB_BOARD_SWAP_NONE 0x0 +#define NVM_CFG1_GLOB_BOARD_SWAP_PATH 0x1 +#define NVM_CFG1_GLOB_BOARD_SWAP_PORT 0x2 +#define NVM_CFG1_GLOB_BOARD_SWAP_BOTH 0x3 +#define NVM_CFG1_GLOB_MF_MODE_MASK 0x00000FF0 +#define NVM_CFG1_GLOB_MF_MODE_OFFSET 4 +#define NVM_CFG1_GLOB_MF_MODE_MF_ALLOWED 0x0 +#define NVM_CFG1_GLOB_MF_MODE_DEFAULT 0x1 +#define NVM_CFG1_GLOB_MF_MODE_SPIO4 0x2 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_0 0x3 +#define NVM_CFG1_GLOB_MF_MODE_NPAR1_5 0x4 +#define NVM_CFG1_GLOB_MF_MODE_NPAR2_0 0x5 +#define NVM_CFG1_GLOB_MF_MODE_BD 0x6 +#define NVM_CFG1_GLOB_MF_MODE_UFP 0x7 +#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_MASK 0x00001000 +#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_OFFSET 12 +#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_DISABLED 0x0 +#define NVM_CFG1_GLOB_FAN_FAILURE_ENFORCEMENT_ENABLED 0x1 +#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_MASK 0x001FE000 +#define NVM_CFG1_GLOB_AVS_MARGIN_LOW_OFFSET 13 +#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_MASK 0x1FE00000 +#define NVM_CFG1_GLOB_AVS_MARGIN_HIGH_OFFSET 21 +#define NVM_CFG1_GLOB_ENABLE_SRIOV_MASK 0x20000000 +#define NVM_CFG1_GLOB_ENABLE_SRIOV_OFFSET 29 +#define NVM_CFG1_GLOB_ENABLE_SRIOV_DISABLED 0x0 +#define NVM_CFG1_GLOB_ENABLE_SRIOV_ENABLED 0x1 +#define NVM_CFG1_GLOB_ENABLE_ATC_MASK 0x40000000 +#define NVM_CFG1_GLOB_ENABLE_ATC_OFFSET 30 +#define NVM_CFG1_GLOB_ENABLE_ATC_DISABLED 0x0 +#define NVM_CFG1_GLOB_ENABLE_ATC_ENABLED 0x1 +#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_MASK 0x80000000 +#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_OFFSET 31 +#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_DISABLED 0x0 +#define NVM_CFG1_GLOB_CLOCK_SLOWDOWN_ENABLED 0x1 + u32 engineering_change[3]; /* 0x4 */ + u32 manufacturing_id; /* 0x10 */ + u32 serial_number[4]; /* 0x14 */ + u32 pcie_cfg; /* 0x24 */ +#define NVM_CFG1_GLOB_PCI_GEN_MASK 0x00000003 +#define NVM_CFG1_GLOB_PCI_GEN_OFFSET 0 +#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN1 0x0 +#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN2 0x1 +#define NVM_CFG1_GLOB_PCI_GEN_PCI_GEN3 0x2 +#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_MASK 0x00000004 +#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_OFFSET 2 +#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_DISABLED 0x0 +#define NVM_CFG1_GLOB_BEACON_WOL_ENABLED_ENABLED 0x1 +#define NVM_CFG1_GLOB_ASPM_SUPPORT_MASK 0x00000018 +#define NVM_CFG1_GLOB_ASPM_SUPPORT_OFFSET 3 +#define NVM_CFG1_GLOB_ASPM_SUPPORT_L0S_L1_ENABLED 0x0 +#define NVM_CFG1_GLOB_ASPM_SUPPORT_L1_DISABLED 0x2 +#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_MASK 0x00000020 +#define NVM_CFG1_GLOB_RESERVED_MPREVENT_PCIE_L1_MENTRY_OFFSET 5 +#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_MASK 0x000003C0 +#define NVM_CFG1_GLOB_PCIE_G2_TX_AMPLITUDE_OFFSET 6 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_MASK 0x00001C00 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_OFFSET 10 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_HW 0x0 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_0DB 0x1 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_3_5DB 0x2 +#define NVM_CFG1_GLOB_PCIE_PREEMPHASIS_6_0DB 0x3 +#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_MASK 0x001FE000 +#define NVM_CFG1_GLOB_WWN_NODE_PREFIX0_OFFSET 13 +#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_MASK 0x1FE00000 +#define NVM_CFG1_GLOB_WWN_NODE_PREFIX1_OFFSET 21 +#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_MASK 0x60000000 +#define NVM_CFG1_GLOB_NCSI_PACKAGE_ID_OFFSET 29 + /* Set the duration, in seconds, fan failure signal should be + * sampled + */ +#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_MASK 0x80000000 +#define NVM_CFG1_GLOB_RESERVED_FAN_FAILURE_DURATION_OFFSET 31 + u32 mgmt_traffic; /* 0x28 */ +#define NVM_CFG1_GLOB_RESERVED60_MASK 0x00000001 +#define NVM_CFG1_GLOB_RESERVED60_OFFSET 0 +#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_MASK 0x000001FE +#define NVM_CFG1_GLOB_WWN_PORT_PREFIX0_OFFSET 1 +#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_MASK 0x0001FE00 +#define NVM_CFG1_GLOB_WWN_PORT_PREFIX1_OFFSET 9 +#define NVM_CFG1_GLOB_SMBUS_ADDRESS_MASK 0x01FE0000 +#define NVM_CFG1_GLOB_SMBUS_ADDRESS_OFFSET 17 +#define NVM_CFG1_GLOB_SIDEBAND_MODE_MASK 0x06000000 +#define NVM_CFG1_GLOB_SIDEBAND_MODE_OFFSET 25 +#define NVM_CFG1_GLOB_SIDEBAND_MODE_DISABLED 0x0 +#define NVM_CFG1_GLOB_SIDEBAND_MODE_RMII 0x1 +#define NVM_CFG1_GLOB_SIDEBAND_MODE_SGMII 0x2 +#define NVM_CFG1_GLOB_AUX_MODE_MASK 0x78000000 +#define NVM_CFG1_GLOB_AUX_MODE_OFFSET 27 +#define NVM_CFG1_GLOB_AUX_MODE_DEFAULT 0x0 +#define NVM_CFG1_GLOB_AUX_MODE_SMBUS_ONLY 0x1 + /* Indicates whether external thermal sonsor is available */ +#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_MASK 0x80000000 +#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_OFFSET 31 +#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_DISABLED 0x0 +#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ENABLED 0x1 + u32 core_cfg; /* 0x2C */ +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_MASK 0x000000FF +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_OFFSET 0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X40G 0x0 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X50G 0x1 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X100G 0x2 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_F 0x3 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X10G_E 0x4 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_4X20G 0x5 +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X40G 0xB +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_2X25G 0xC +#define NVM_CFG1_GLOB_NETWORK_PORT_MODE_DE_1X25G 0xD +#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_MASK 0x00000100 +#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_OFFSET 8 +#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_DISABLED 0x0 +#define NVM_CFG1_GLOB_EAGLE_ENFORCE_TX_FIR_CFG_ENABLED 0x1 +#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_MASK 0x00000200 +#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_OFFSET 9 +#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_DISABLED 0x0 +#define NVM_CFG1_GLOB_FALCON_ENFORCE_TX_FIR_CFG_ENABLED 0x1 +#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_MASK 0x0003FC00 +#define NVM_CFG1_GLOB_EAGLE_CORE_ADDR_OFFSET 10 +#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_MASK 0x03FC0000 +#define NVM_CFG1_GLOB_FALCON_CORE_ADDR_OFFSET 18 +#define NVM_CFG1_GLOB_AVS_MODE_MASK 0x1C000000 +#define NVM_CFG1_GLOB_AVS_MODE_OFFSET 26 +#define NVM_CFG1_GLOB_AVS_MODE_CLOSE_LOOP 0x0 +#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_CFG 0x1 +#define NVM_CFG1_GLOB_AVS_MODE_OPEN_LOOP_OTP 0x2 +#define NVM_CFG1_GLOB_AVS_MODE_DISABLED 0x3 +#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_MASK 0x60000000 +#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_OFFSET 29 +#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_DISABLED 0x0 +#define NVM_CFG1_GLOB_OVERRIDE_SECURE_MODE_ENABLED 0x1 + u32 e_lane_cfg1; /* 0x30 */ +#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F +#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0 +#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0 +#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4 +#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00 +#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8 +#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000 +#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12 +#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000 +#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16 +#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000 +#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20 +#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000 +#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24 +#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000 +#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28 + u32 e_lane_cfg2; /* 0x34 */ +#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001 +#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0 +#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002 +#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1 +#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004 +#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2 +#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008 +#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3 +#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010 +#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4 +#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020 +#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5 +#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040 +#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6 +#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080 +#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7 +#define NVM_CFG1_GLOB_SMBUS_MODE_MASK 0x00000F00 +#define NVM_CFG1_GLOB_SMBUS_MODE_OFFSET 8 +#define NVM_CFG1_GLOB_SMBUS_MODE_DISABLED 0x0 +#define NVM_CFG1_GLOB_SMBUS_MODE_100KHZ 0x1 +#define NVM_CFG1_GLOB_SMBUS_MODE_400KHZ 0x2 +#define NVM_CFG1_GLOB_NCSI_MASK 0x0000F000 +#define NVM_CFG1_GLOB_NCSI_OFFSET 12 +#define NVM_CFG1_GLOB_NCSI_DISABLED 0x0 +#define NVM_CFG1_GLOB_NCSI_ENABLED 0x1 + /* Maximum advertised pcie link width */ +#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_MASK 0x000F0000 +#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_OFFSET 16 +#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_16_LANES 0x0 +#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_1_LANE 0x1 +#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_2_LANES 0x2 +#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_4_LANES 0x3 +#define NVM_CFG1_GLOB_MAX_LINK_WIDTH_8_LANES 0x4 + /* ASPM L1 mode */ +#define NVM_CFG1_GLOB_ASPM_L1_MODE_MASK 0x00300000 +#define NVM_CFG1_GLOB_ASPM_L1_MODE_OFFSET 20 +#define NVM_CFG1_GLOB_ASPM_L1_MODE_FORCED 0x0 +#define NVM_CFG1_GLOB_ASPM_L1_MODE_DYNAMIC_LOW_LATENCY 0x1 +#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_MASK 0x01C00000 +#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_OFFSET 22 +#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_DISABLED 0x0 +#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_I2C 0x1 +#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_ONLY 0x2 +#define NVM_CFG1_GLOB_ON_CHIP_SENSOR_MODE_INT_EXT_SMBUS 0x3 +#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_MASK 0x06000000 +#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_OFFSET 25 +#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_DISABLE 0x0 +#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_INTERNAL 0x1 +#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_EXTERNAL 0x2 +#define NVM_CFG1_GLOB_TEMPERATURE_MONITORING_MODE_BOTH 0x3 + /* Set the PLDM sensor modes */ +#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_MASK 0x38000000 +#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_OFFSET 27 +#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_INTERNAL 0x0 +#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_EXTERNAL 0x1 +#define NVM_CFG1_GLOB_PLDM_SENSOR_MODE_BOTH 0x2 + u32 f_lane_cfg1; /* 0x38 */ +#define NVM_CFG1_GLOB_RX_LANE0_SWAP_MASK 0x0000000F +#define NVM_CFG1_GLOB_RX_LANE0_SWAP_OFFSET 0 +#define NVM_CFG1_GLOB_RX_LANE1_SWAP_MASK 0x000000F0 +#define NVM_CFG1_GLOB_RX_LANE1_SWAP_OFFSET 4 +#define NVM_CFG1_GLOB_RX_LANE2_SWAP_MASK 0x00000F00 +#define NVM_CFG1_GLOB_RX_LANE2_SWAP_OFFSET 8 +#define NVM_CFG1_GLOB_RX_LANE3_SWAP_MASK 0x0000F000 +#define NVM_CFG1_GLOB_RX_LANE3_SWAP_OFFSET 12 +#define NVM_CFG1_GLOB_TX_LANE0_SWAP_MASK 0x000F0000 +#define NVM_CFG1_GLOB_TX_LANE0_SWAP_OFFSET 16 +#define NVM_CFG1_GLOB_TX_LANE1_SWAP_MASK 0x00F00000 +#define NVM_CFG1_GLOB_TX_LANE1_SWAP_OFFSET 20 +#define NVM_CFG1_GLOB_TX_LANE2_SWAP_MASK 0x0F000000 +#define NVM_CFG1_GLOB_TX_LANE2_SWAP_OFFSET 24 +#define NVM_CFG1_GLOB_TX_LANE3_SWAP_MASK 0xF0000000 +#define NVM_CFG1_GLOB_TX_LANE3_SWAP_OFFSET 28 + u32 f_lane_cfg2; /* 0x3C */ +#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_MASK 0x00000001 +#define NVM_CFG1_GLOB_RX_LANE0_POL_FLIP_OFFSET 0 +#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_MASK 0x00000002 +#define NVM_CFG1_GLOB_RX_LANE1_POL_FLIP_OFFSET 1 +#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_MASK 0x00000004 +#define NVM_CFG1_GLOB_RX_LANE2_POL_FLIP_OFFSET 2 +#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_MASK 0x00000008 +#define NVM_CFG1_GLOB_RX_LANE3_POL_FLIP_OFFSET 3 +#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_MASK 0x00000010 +#define NVM_CFG1_GLOB_TX_LANE0_POL_FLIP_OFFSET 4 +#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_MASK 0x00000020 +#define NVM_CFG1_GLOB_TX_LANE1_POL_FLIP_OFFSET 5 +#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_MASK 0x00000040 +#define NVM_CFG1_GLOB_TX_LANE2_POL_FLIP_OFFSET 6 +#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_MASK 0x00000080 +#define NVM_CFG1_GLOB_TX_LANE3_POL_FLIP_OFFSET 7 + /* Control the period between two successive checks */ +#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_TEMPERATURE_PERIOD_BETWEEN_CHECKS_OFFSET 8 + /* Set shutdown temperature */ +#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_SHUTDOWN_THRESHOLD_TEMPERATURE_OFFSET 16 + /* Set max. count for over operational temperature */ +#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_MASK 0xFF000000 +#define NVM_CFG1_GLOB_MAX_COUNT_OPER_THRESHOLD_OFFSET 24 + u32 eagle_preemphasis; /* 0x40 */ +#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24 + u32 eagle_driver_current; /* 0x44 */ +#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24 + u32 falcon_preemphasis; /* 0x48 */ +#define NVM_CFG1_GLOB_LANE0_PREEMP_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_PREEMP_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_PREEMP_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_PREEMP_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_PREEMP_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_PREEMP_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_PREEMP_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_PREEMP_OFFSET 24 + u32 falcon_driver_current; /* 0x4C */ +#define NVM_CFG1_GLOB_LANE0_AMP_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_AMP_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_AMP_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_AMP_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_AMP_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_AMP_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_AMP_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_AMP_OFFSET 24 + u32 pci_id; /* 0x50 */ +#define NVM_CFG1_GLOB_VENDOR_ID_MASK 0x0000FFFF +#define NVM_CFG1_GLOB_VENDOR_ID_OFFSET 0 + /* Set caution temperature */ +#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_CAUTION_THRESHOLD_TEMPERATURE_OFFSET 16 + /* Set external thermal sensor I2C address */ +#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_MASK 0xFF000000 +#define NVM_CFG1_GLOB_EXTERNAL_THERMAL_SENSOR_ADDRESS_OFFSET 24 + u32 pci_subsys_id; /* 0x54 */ +#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_MASK 0x0000FFFF +#define NVM_CFG1_GLOB_SUBSYSTEM_VENDOR_ID_OFFSET 0 +#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_MASK 0xFFFF0000 +#define NVM_CFG1_GLOB_SUBSYSTEM_DEVICE_ID_OFFSET 16 + u32 bar; /* 0x58 */ +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_MASK 0x0000000F +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_OFFSET 0 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_DISABLED 0x0 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2K 0x1 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4K 0x2 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8K 0x3 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16K 0x4 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32K 0x5 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_64K 0x6 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_128K 0x7 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_256K 0x8 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_512K 0x9 +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_1M 0xA +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_2M 0xB +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_4M 0xC +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_8M 0xD +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_16M 0xE +#define NVM_CFG1_GLOB_EXPANSION_ROM_SIZE_32M 0xF +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_MASK 0x000000F0 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_OFFSET 4 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_DISABLED 0x0 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4K 0x1 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8K 0x2 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16K 0x3 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32K 0x4 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64K 0x5 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_128K 0x6 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_256K 0x7 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_512K 0x8 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_1M 0x9 +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_2M 0xA +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_4M 0xB +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_8M 0xC +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_16M 0xD +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_32M 0xE +#define NVM_CFG1_GLOB_VF_PCI_BAR2_SIZE_64M 0xF +#define NVM_CFG1_GLOB_BAR2_SIZE_MASK 0x00000F00 +#define NVM_CFG1_GLOB_BAR2_SIZE_OFFSET 8 +#define NVM_CFG1_GLOB_BAR2_SIZE_DISABLED 0x0 +#define NVM_CFG1_GLOB_BAR2_SIZE_64K 0x1 +#define NVM_CFG1_GLOB_BAR2_SIZE_128K 0x2 +#define NVM_CFG1_GLOB_BAR2_SIZE_256K 0x3 +#define NVM_CFG1_GLOB_BAR2_SIZE_512K 0x4 +#define NVM_CFG1_GLOB_BAR2_SIZE_1M 0x5 +#define NVM_CFG1_GLOB_BAR2_SIZE_2M 0x6 +#define NVM_CFG1_GLOB_BAR2_SIZE_4M 0x7 +#define NVM_CFG1_GLOB_BAR2_SIZE_8M 0x8 +#define NVM_CFG1_GLOB_BAR2_SIZE_16M 0x9 +#define NVM_CFG1_GLOB_BAR2_SIZE_32M 0xA +#define NVM_CFG1_GLOB_BAR2_SIZE_64M 0xB +#define NVM_CFG1_GLOB_BAR2_SIZE_128M 0xC +#define NVM_CFG1_GLOB_BAR2_SIZE_256M 0xD +#define NVM_CFG1_GLOB_BAR2_SIZE_512M 0xE +#define NVM_CFG1_GLOB_BAR2_SIZE_1G 0xF + /* Set the duration, in seconds, fan failure signal should be + * sampled + */ +#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_MASK 0x0000F000 +#define NVM_CFG1_GLOB_FAN_FAILURE_DURATION_OFFSET 12 + u32 eagle_txfir_main; /* 0x5C */ +#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24 + u32 eagle_txfir_post; /* 0x60 */ +#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24 + u32 falcon_txfir_main; /* 0x64 */ +#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_TXFIR_MAIN_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_TXFIR_MAIN_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_TXFIR_MAIN_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_TXFIR_MAIN_OFFSET 24 + u32 falcon_txfir_post; /* 0x68 */ +#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_MASK 0x000000FF +#define NVM_CFG1_GLOB_LANE0_TXFIR_POST_OFFSET 0 +#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_LANE1_TXFIR_POST_OFFSET 8 +#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_LANE2_TXFIR_POST_OFFSET 16 +#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_MASK 0xFF000000 +#define NVM_CFG1_GLOB_LANE3_TXFIR_POST_OFFSET 24 + u32 manufacture_ver; /* 0x6C */ +#define NVM_CFG1_GLOB_MANUF0_VER_MASK 0x0000003F +#define NVM_CFG1_GLOB_MANUF0_VER_OFFSET 0 +#define NVM_CFG1_GLOB_MANUF1_VER_MASK 0x00000FC0 +#define NVM_CFG1_GLOB_MANUF1_VER_OFFSET 6 +#define NVM_CFG1_GLOB_MANUF2_VER_MASK 0x0003F000 +#define NVM_CFG1_GLOB_MANUF2_VER_OFFSET 12 +#define NVM_CFG1_GLOB_MANUF3_VER_MASK 0x00FC0000 +#define NVM_CFG1_GLOB_MANUF3_VER_OFFSET 18 +#define NVM_CFG1_GLOB_MANUF4_VER_MASK 0x3F000000 +#define NVM_CFG1_GLOB_MANUF4_VER_OFFSET 24 + u32 manufacture_time; /* 0x70 */ +#define NVM_CFG1_GLOB_MANUF0_TIME_MASK 0x0000003F +#define NVM_CFG1_GLOB_MANUF0_TIME_OFFSET 0 +#define NVM_CFG1_GLOB_MANUF1_TIME_MASK 0x00000FC0 +#define NVM_CFG1_GLOB_MANUF1_TIME_OFFSET 6 +#define NVM_CFG1_GLOB_MANUF2_TIME_MASK 0x0003F000 +#define NVM_CFG1_GLOB_MANUF2_TIME_OFFSET 12 + u32 led_global_settings; /* 0x74 */ +#define NVM_CFG1_GLOB_LED_SWAP_0_MASK 0x0000000F +#define NVM_CFG1_GLOB_LED_SWAP_0_OFFSET 0 +#define NVM_CFG1_GLOB_LED_SWAP_1_MASK 0x000000F0 +#define NVM_CFG1_GLOB_LED_SWAP_1_OFFSET 4 +#define NVM_CFG1_GLOB_LED_SWAP_2_MASK 0x00000F00 +#define NVM_CFG1_GLOB_LED_SWAP_2_OFFSET 8 +#define NVM_CFG1_GLOB_LED_SWAP_3_MASK 0x0000F000 +#define NVM_CFG1_GLOB_LED_SWAP_3_OFFSET 12 + u32 generic_cont1; /* 0x78 */ +#define NVM_CFG1_GLOB_AVS_DAC_CODE_MASK 0x000003FF +#define NVM_CFG1_GLOB_AVS_DAC_CODE_OFFSET 0 + u32 mbi_version; /* 0x7C */ +#define NVM_CFG1_GLOB_MBI_VERSION_0_MASK 0x000000FF +#define NVM_CFG1_GLOB_MBI_VERSION_0_OFFSET 0 +#define NVM_CFG1_GLOB_MBI_VERSION_1_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_MBI_VERSION_1_OFFSET 8 +#define NVM_CFG1_GLOB_MBI_VERSION_2_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_MBI_VERSION_2_OFFSET 16 + u32 mbi_date; /* 0x80 */ + u32 misc_sig; /* 0x84 */ + /* Define the GPIO mapping to switch i2c mux */ +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_MASK 0x000000FF +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_0_OFFSET 0 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO_1_OFFSET 8 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__NA 0x0 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO0 0x1 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO1 0x2 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO2 0x3 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO3 0x4 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO4 0x5 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO5 0x6 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO6 0x7 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO7 0x8 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO8 0x9 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO9 0xA +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO10 0xB +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO11 0xC +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO12 0xD +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO13 0xE +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO14 0xF +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO15 0x10 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO16 0x11 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO17 0x12 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO18 0x13 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO19 0x14 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO20 0x15 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO21 0x16 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO22 0x17 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO23 0x18 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO24 0x19 +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO25 0x1A +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO26 0x1B +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO27 0x1C +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO28 0x1D +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO29 0x1E +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO30 0x1F +#define NVM_CFG1_GLOB_I2C_MUX_SEL_GPIO__GPIO31 0x20 + u32 device_capabilities; /* 0x88 */ +#define NVM_CFG1_GLOB_DEVICE_CAPABILITIES_ETHERNET 0x1 + u32 power_dissipated; /* 0x8C */ +#define NVM_CFG1_GLOB_POWER_DIS_D0_MASK 0x000000FF +#define NVM_CFG1_GLOB_POWER_DIS_D0_OFFSET 0 +#define NVM_CFG1_GLOB_POWER_DIS_D1_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_POWER_DIS_D1_OFFSET 8 +#define NVM_CFG1_GLOB_POWER_DIS_D2_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_POWER_DIS_D2_OFFSET 16 +#define NVM_CFG1_GLOB_POWER_DIS_D3_MASK 0xFF000000 +#define NVM_CFG1_GLOB_POWER_DIS_D3_OFFSET 24 + u32 power_consumed; /* 0x90 */ +#define NVM_CFG1_GLOB_POWER_CONS_D0_MASK 0x000000FF +#define NVM_CFG1_GLOB_POWER_CONS_D0_OFFSET 0 +#define NVM_CFG1_GLOB_POWER_CONS_D1_MASK 0x0000FF00 +#define NVM_CFG1_GLOB_POWER_CONS_D1_OFFSET 8 +#define NVM_CFG1_GLOB_POWER_CONS_D2_MASK 0x00FF0000 +#define NVM_CFG1_GLOB_POWER_CONS_D2_OFFSET 16 +#define NVM_CFG1_GLOB_POWER_CONS_D3_MASK 0xFF000000 +#define NVM_CFG1_GLOB_POWER_CONS_D3_OFFSET 24 + u32 efi_version; /* 0x94 */ + u32 reserved[42]; /* 0x98 */ +}; + +struct nvm_cfg1_path { + u32 reserved[30]; /* 0x0 */ +}; + +struct nvm_cfg1_port { + u32 reserved__m_relocated_to_option_123; /* 0x0 */ + u32 reserved__m_relocated_to_option_124; /* 0x4 */ + u32 generic_cont0; /* 0x8 */ +#define NVM_CFG1_PORT_LED_MODE_MASK 0x000000FF +#define NVM_CFG1_PORT_LED_MODE_OFFSET 0 +#define NVM_CFG1_PORT_LED_MODE_MAC1 0x0 +#define NVM_CFG1_PORT_LED_MODE_PHY1 0x1 +#define NVM_CFG1_PORT_LED_MODE_PHY2 0x2 +#define NVM_CFG1_PORT_LED_MODE_PHY3 0x3 +#define NVM_CFG1_PORT_LED_MODE_MAC2 0x4 +#define NVM_CFG1_PORT_LED_MODE_PHY4 0x5 +#define NVM_CFG1_PORT_LED_MODE_PHY5 0x6 +#define NVM_CFG1_PORT_LED_MODE_PHY6 0x7 +#define NVM_CFG1_PORT_LED_MODE_MAC3 0x8 +#define NVM_CFG1_PORT_LED_MODE_PHY7 0x9 +#define NVM_CFG1_PORT_LED_MODE_PHY8 0xA +#define NVM_CFG1_PORT_LED_MODE_PHY9 0xB +#define NVM_CFG1_PORT_LED_MODE_MAC4 0xC +#define NVM_CFG1_PORT_LED_MODE_PHY10 0xD +#define NVM_CFG1_PORT_LED_MODE_PHY11 0xE +#define NVM_CFG1_PORT_LED_MODE_PHY12 0xF +#define NVM_CFG1_PORT_DCBX_MODE_MASK 0x000F0000 +#define NVM_CFG1_PORT_DCBX_MODE_OFFSET 16 +#define NVM_CFG1_PORT_DCBX_MODE_DISABLED 0x0 +#define NVM_CFG1_PORT_DCBX_MODE_IEEE 0x1 +#define NVM_CFG1_PORT_DCBX_MODE_CEE 0x2 +#define NVM_CFG1_PORT_DCBX_MODE_DYNAMIC 0x3 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_MASK 0x00F00000 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_OFFSET 20 +#define NVM_CFG1_PORT_DEFAULT_ENABLED_PROTOCOLS_ETHERNET 0x1 + u32 pcie_cfg; /* 0xC */ +#define NVM_CFG1_PORT_RESERVED15_MASK 0x00000007 +#define NVM_CFG1_PORT_RESERVED15_OFFSET 0 + u32 features; /* 0x10 */ +#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_MASK 0x00000001 +#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_OFFSET 0 +#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_DISABLED 0x0 +#define NVM_CFG1_PORT_ENABLE_WOL_ON_ACPI_PATTERN_ENABLED 0x1 +#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_MASK 0x00000002 +#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_OFFSET 1 +#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_DISABLED 0x0 +#define NVM_CFG1_PORT_MAGIC_PACKET_WOL_ENABLED 0x1 + u32 speed_cap_mask; /* 0x14 */ +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_MASK 0x0000FFFF +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_OFFSET 0 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G 0x1 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G 0x8 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G 0x10 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G 0x20 +#define NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G 0x40 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_MASK 0xFFFF0000 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_OFFSET 16 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_1G 0x1 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_10G 0x2 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_25G 0x8 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_40G 0x10 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_50G 0x20 +#define NVM_CFG1_PORT_MFW_SPEED_CAPABILITY_MASK_100G 0x40 + u32 link_settings; /* 0x18 */ +#define NVM_CFG1_PORT_DRV_LINK_SPEED_MASK 0x0000000F +#define NVM_CFG1_PORT_DRV_LINK_SPEED_OFFSET 0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_100G 0x7 +#define NVM_CFG1_PORT_DRV_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_MASK 0x00000070 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_OFFSET 4 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_AUTONEG 0x1 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_RX 0x2 +#define NVM_CFG1_PORT_DRV_FLOW_CONTROL_TX 0x4 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_MASK 0x00000780 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_OFFSET 7 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_100G 0x7 +#define NVM_CFG1_PORT_MFW_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_MASK 0x00003800 +#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_OFFSET 11 +#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_AUTONEG 0x1 +#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_RX 0x2 +#define NVM_CFG1_PORT_MFW_FLOW_CONTROL_TX 0x4 +#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_MASK 0x00004000 +#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_OFFSET 14 +#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_DISABLED 0x0 +#define NVM_CFG1_PORT_OPTIC_MODULE_VENDOR_ENFORCEMENT_ENABLED 0x1 +#define NVM_CFG1_PORT_AN_25G_50G_OUI_MASK 0x00018000 +#define NVM_CFG1_PORT_AN_25G_50G_OUI_OFFSET 15 +#define NVM_CFG1_PORT_AN_25G_50G_OUI_CONSORTIUM 0x0 +#define NVM_CFG1_PORT_AN_25G_50G_OUI_BAM 0x1 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_MASK 0x000E0000 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_OFFSET 17 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_NONE 0x0 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_FIRECODE 0x1 +#define NVM_CFG1_PORT_FEC_FORCE_MODE_FEC_FORCE_RS 0x2 + u32 phy_cfg; /* 0x1C */ +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_MASK 0x0000FFFF +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_OFFSET 0 +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_HIGIG 0x1 +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_SCRAMBLER 0x2 +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_FIBER 0x4 +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_CL72_AN 0x8 +#define NVM_CFG1_PORT_OPTIONAL_LINK_MODES_DISABLE_FEC_AN 0x10 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_MASK 0x00FF0000 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_OFFSET 16 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_BYPASS 0x0 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR 0x2 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR2 0x3 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_KR4 0x4 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XFI 0x8 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SFI 0x9 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_1000X 0xB +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_SGMII 0xC +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLAUI 0x11 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_XLPPI 0x12 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CAUI 0x21 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_CPPI 0x22 +#define NVM_CFG1_PORT_SERDES_NET_INTERFACE_25GAUI 0x31 +#define NVM_CFG1_PORT_AN_MODE_MASK 0xFF000000 +#define NVM_CFG1_PORT_AN_MODE_OFFSET 24 +#define NVM_CFG1_PORT_AN_MODE_NONE 0x0 +#define NVM_CFG1_PORT_AN_MODE_CL73 0x1 +#define NVM_CFG1_PORT_AN_MODE_CL37 0x2 +#define NVM_CFG1_PORT_AN_MODE_CL73_BAM 0x3 +#define NVM_CFG1_PORT_AN_MODE_CL37_BAM 0x4 +#define NVM_CFG1_PORT_AN_MODE_HPAM 0x5 +#define NVM_CFG1_PORT_AN_MODE_SGMII 0x6 + u32 mgmt_traffic; /* 0x20 */ +#define NVM_CFG1_PORT_RESERVED61_MASK 0x0000000F +#define NVM_CFG1_PORT_RESERVED61_OFFSET 0 + u32 ext_phy; /* 0x24 */ +#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_MASK 0x000000FF +#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_OFFSET 0 +#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_NONE 0x0 +#define NVM_CFG1_PORT_EXTERNAL_PHY_TYPE_BCM84844 0x1 +#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_MASK 0x0000FF00 +#define NVM_CFG1_PORT_EXTERNAL_PHY_ADDRESS_OFFSET 8 + u32 mba_cfg1; /* 0x28 */ +#define NVM_CFG1_PORT_PREBOOT_OPROM_MASK 0x00000001 +#define NVM_CFG1_PORT_PREBOOT_OPROM_OFFSET 0 +#define NVM_CFG1_PORT_PREBOOT_OPROM_DISABLED 0x0 +#define NVM_CFG1_PORT_PREBOOT_OPROM_ENABLED 0x1 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_MASK 0x00000006 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_TYPE_OFFSET 1 +#define NVM_CFG1_PORT_MBA_DELAY_TIME_MASK 0x00000078 +#define NVM_CFG1_PORT_MBA_DELAY_TIME_OFFSET 3 +#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_MASK 0x00000080 +#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_OFFSET 7 +#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_S 0x0 +#define NVM_CFG1_PORT_MBA_SETUP_HOT_KEY_CTRL_B 0x1 +#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_MASK 0x00000100 +#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_OFFSET 8 +#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_DISABLED 0x0 +#define NVM_CFG1_PORT_MBA_HIDE_SETUP_PROMPT_ENABLED 0x1 +#define NVM_CFG1_PORT_RESERVED5_MASK 0x0001FE00 +#define NVM_CFG1_PORT_RESERVED5_OFFSET 9 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_MASK 0x001E0000 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_OFFSET 17 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_AUTONEG 0x0 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_1G 0x1 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_10G 0x2 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_25G 0x4 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_40G 0x5 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_50G 0x6 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_100G 0x7 +#define NVM_CFG1_PORT_PREBOOT_LINK_SPEED_SMARTLINQ 0x8 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_MASK 0x00E00000 +#define NVM_CFG1_PORT_RESERVED__M_MBA_BOOT_RETRY_COUNT_OFFSET 21 + u32 mba_cfg2; /* 0x2C */ +#define NVM_CFG1_PORT_RESERVED65_MASK 0x0000FFFF +#define NVM_CFG1_PORT_RESERVED65_OFFSET 0 +#define NVM_CFG1_PORT_RESERVED66_MASK 0x00010000 +#define NVM_CFG1_PORT_RESERVED66_OFFSET 16 + u32 vf_cfg; /* 0x30 */ +#define NVM_CFG1_PORT_RESERVED8_MASK 0x0000FFFF +#define NVM_CFG1_PORT_RESERVED8_OFFSET 0 +#define NVM_CFG1_PORT_RESERVED6_MASK 0x000F0000 +#define NVM_CFG1_PORT_RESERVED6_OFFSET 16 + struct nvm_cfg_mac_address lldp_mac_address; /* 0x34 */ + u32 led_port_settings; /* 0x3C */ +#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_MASK 0x000000FF +#define NVM_CFG1_PORT_LANE_LED_SPD_0_SEL_OFFSET 0 +#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_MASK 0x0000FF00 +#define NVM_CFG1_PORT_LANE_LED_SPD_1_SEL_OFFSET 8 +#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_MASK 0x00FF0000 +#define NVM_CFG1_PORT_LANE_LED_SPD_2_SEL_OFFSET 16 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_1G 0x1 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_10G 0x2 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_25G 0x8 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_40G 0x10 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_50G 0x20 +#define NVM_CFG1_PORT_LANE_LED_SPD__SEL_100G 0x40 + u32 transceiver_00; /* 0x40 */ + /* Define for mapping of transceiver signal module absent */ +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_MASK 0x000000FF +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_OFFSET 0 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_NA 0x0 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO0 0x1 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO1 0x2 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO2 0x3 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO3 0x4 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO4 0x5 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO5 0x6 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO6 0x7 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO7 0x8 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO8 0x9 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO9 0xA +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO10 0xB +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO11 0xC +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO12 0xD +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO13 0xE +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO14 0xF +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO15 0x10 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO16 0x11 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO17 0x12 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO18 0x13 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO19 0x14 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO20 0x15 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO21 0x16 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO22 0x17 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO23 0x18 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO24 0x19 +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO25 0x1A +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO26 0x1B +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO27 0x1C +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO28 0x1D +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO29 0x1E +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO30 0x1F +#define NVM_CFG1_PORT_TRANS_MODULE_ABS_GPIO31 0x20 + /* Define the GPIO mux settings to switch i2c mux to this port */ +#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_MASK 0x00000F00 +#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_0_OFFSET 8 +#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_MASK 0x0000F000 +#define NVM_CFG1_PORT_I2C_MUX_SEL_VALUE_1_OFFSET 12 + u32 device_ids; /* 0x44 */ +#define NVM_CFG1_PORT_ETH_DID_SUFFIX_MASK 0x000000FF +#define NVM_CFG1_PORT_ETH_DID_SUFFIX_OFFSET 0 +#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_MASK 0xFF000000 +#define NVM_CFG1_PORT_RESERVED_DID_SUFFIX_OFFSET 24 + u32 board_cfg; /* 0x48 */ + /* This field defines the board technology + * (backpane,transceiver,external PHY) + */ +#define NVM_CFG1_PORT_PORT_TYPE_MASK 0x000000FF +#define NVM_CFG1_PORT_PORT_TYPE_OFFSET 0 +#define NVM_CFG1_PORT_PORT_TYPE_UNDEFINED 0x0 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE 0x1 +#define NVM_CFG1_PORT_PORT_TYPE_BACKPLANE 0x2 +#define NVM_CFG1_PORT_PORT_TYPE_EXT_PHY 0x3 +#define NVM_CFG1_PORT_PORT_TYPE_MODULE_SLAVE 0x4 + /* This field defines the GPIO mapped to tx_disable signal in SFP */ +#define NVM_CFG1_PORT_TX_DISABLE_MASK 0x0000FF00 +#define NVM_CFG1_PORT_TX_DISABLE_OFFSET 8 +#define NVM_CFG1_PORT_TX_DISABLE_NA 0x0 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO0 0x1 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO1 0x2 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO2 0x3 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO3 0x4 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO4 0x5 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO5 0x6 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO6 0x7 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO7 0x8 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO8 0x9 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO9 0xA +#define NVM_CFG1_PORT_TX_DISABLE_GPIO10 0xB +#define NVM_CFG1_PORT_TX_DISABLE_GPIO11 0xC +#define NVM_CFG1_PORT_TX_DISABLE_GPIO12 0xD +#define NVM_CFG1_PORT_TX_DISABLE_GPIO13 0xE +#define NVM_CFG1_PORT_TX_DISABLE_GPIO14 0xF +#define NVM_CFG1_PORT_TX_DISABLE_GPIO15 0x10 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO16 0x11 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO17 0x12 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO18 0x13 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO19 0x14 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO20 0x15 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO21 0x16 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO22 0x17 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO23 0x18 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO24 0x19 +#define NVM_CFG1_PORT_TX_DISABLE_GPIO25 0x1A +#define NVM_CFG1_PORT_TX_DISABLE_GPIO26 0x1B +#define NVM_CFG1_PORT_TX_DISABLE_GPIO27 0x1C +#define NVM_CFG1_PORT_TX_DISABLE_GPIO28 0x1D +#define NVM_CFG1_PORT_TX_DISABLE_GPIO29 0x1E +#define NVM_CFG1_PORT_TX_DISABLE_GPIO30 0x1F +#define NVM_CFG1_PORT_TX_DISABLE_GPIO31 0x20 + u32 reserved[131]; /* 0x4C */ +}; + +struct nvm_cfg1_func { + struct nvm_cfg_mac_address mac_address; /* 0x0 */ + u32 rsrv1; /* 0x8 */ +#define NVM_CFG1_FUNC_RESERVED1_MASK 0x0000FFFF +#define NVM_CFG1_FUNC_RESERVED1_OFFSET 0 +#define NVM_CFG1_FUNC_RESERVED2_MASK 0xFFFF0000 +#define NVM_CFG1_FUNC_RESERVED2_OFFSET 16 + u32 rsrv2; /* 0xC */ +#define NVM_CFG1_FUNC_RESERVED3_MASK 0x0000FFFF +#define NVM_CFG1_FUNC_RESERVED3_OFFSET 0 +#define NVM_CFG1_FUNC_RESERVED4_MASK 0xFFFF0000 +#define NVM_CFG1_FUNC_RESERVED4_OFFSET 16 + u32 device_id; /* 0x10 */ +#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_MASK 0x0000FFFF +#define NVM_CFG1_FUNC_MF_VENDOR_DEVICE_ID_OFFSET 0 +#define NVM_CFG1_FUNC_RESERVED77_MASK 0xFFFF0000 +#define NVM_CFG1_FUNC_RESERVED77_OFFSET 16 + u32 cmn_cfg; /* 0x14 */ +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_MASK 0x00000007 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_OFFSET 0 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_PXE 0x0 +#define NVM_CFG1_FUNC_PREBOOT_BOOT_PROTOCOL_NONE 0x7 +#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_MASK 0x0007FFF8 +#define NVM_CFG1_FUNC_VF_PCI_DEVICE_ID_OFFSET 3 +#define NVM_CFG1_FUNC_PERSONALITY_MASK 0x00780000 +#define NVM_CFG1_FUNC_PERSONALITY_OFFSET 19 +#define NVM_CFG1_FUNC_PERSONALITY_ETHERNET 0x0 +#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_MASK 0x7F800000 +#define NVM_CFG1_FUNC_BANDWIDTH_WEIGHT_OFFSET 23 +#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_MASK 0x80000000 +#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_OFFSET 31 +#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_DISABLED 0x0 +#define NVM_CFG1_FUNC_PAUSE_ON_HOST_RING_ENABLED 0x1 + u32 pci_cfg; /* 0x18 */ +#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_MASK 0x0000007F +#define NVM_CFG1_FUNC_NUMBER_OF_VFS_PER_PF_OFFSET 0 +#define NVM_CFG1_FUNC_RESERVESD12_MASK 0x00003F80 +#define NVM_CFG1_FUNC_RESERVESD12_OFFSET 7 +#define NVM_CFG1_FUNC_BAR1_SIZE_MASK 0x0003C000 +#define NVM_CFG1_FUNC_BAR1_SIZE_OFFSET 14 +#define NVM_CFG1_FUNC_BAR1_SIZE_DISABLED 0x0 +#define NVM_CFG1_FUNC_BAR1_SIZE_64K 0x1 +#define NVM_CFG1_FUNC_BAR1_SIZE_128K 0x2 +#define NVM_CFG1_FUNC_BAR1_SIZE_256K 0x3 +#define NVM_CFG1_FUNC_BAR1_SIZE_512K 0x4 +#define NVM_CFG1_FUNC_BAR1_SIZE_1M 0x5 +#define NVM_CFG1_FUNC_BAR1_SIZE_2M 0x6 +#define NVM_CFG1_FUNC_BAR1_SIZE_4M 0x7 +#define NVM_CFG1_FUNC_BAR1_SIZE_8M 0x8 +#define NVM_CFG1_FUNC_BAR1_SIZE_16M 0x9 +#define NVM_CFG1_FUNC_BAR1_SIZE_32M 0xA +#define NVM_CFG1_FUNC_BAR1_SIZE_64M 0xB +#define NVM_CFG1_FUNC_BAR1_SIZE_128M 0xC +#define NVM_CFG1_FUNC_BAR1_SIZE_256M 0xD +#define NVM_CFG1_FUNC_BAR1_SIZE_512M 0xE +#define NVM_CFG1_FUNC_BAR1_SIZE_1G 0xF +#define NVM_CFG1_FUNC_MAX_BANDWIDTH_MASK 0x03FC0000 +#define NVM_CFG1_FUNC_MAX_BANDWIDTH_OFFSET 18 + u32 preboot_generic_cfg; /* 0x2C */ +#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_MASK 0x0000FFFF +#define NVM_CFG1_FUNC_PREBOOT_VLAN_VALUE_OFFSET 0 +#define NVM_CFG1_FUNC_PREBOOT_VLAN_MASK 0x00010000 +#define NVM_CFG1_FUNC_PREBOOT_VLAN_OFFSET 16 + u32 reserved[8]; /* 0x30 */ +}; + +struct nvm_cfg1 { + struct nvm_cfg1_glob glob; /* 0x0 */ + struct nvm_cfg1_path path[MCP_GLOB_PATH_MAX]; /* 0x140 */ + struct nvm_cfg1_port port[MCP_GLOB_PORT_MAX]; /* 0x230 */ + struct nvm_cfg1_func func[MCP_GLOB_FUNC_MAX]; /* 0xB90 */ +}; + +/****************************************** + * nvm_cfg structs + ******************************************/ +enum nvm_cfg_sections { + NVM_CFG_SECTION_NVM_CFG1, + NVM_CFG_SECTION_MAX +}; + +struct nvm_cfg { + u32 num_sections; + u32 sections_offset[NVM_CFG_SECTION_MAX]; + struct nvm_cfg1 cfg1; +}; + +#endif /* NVM_CFG_H */ diff --git a/drivers/net/qede/base/reg_addr.h b/drivers/net/qede/base/reg_addr.h new file mode 100644 index 00000000..3b25e1a8 --- /dev/null +++ b/drivers/net/qede/base/reg_addr.h @@ -0,0 +1,1107 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE_SHIFT \ + 0 + +#define CDU_REG_CID_ADDR_PARAMS_CONTEXT_SIZE ( \ + 0xfff << 0) + +#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE_SHIFT \ + 12 + +#define CDU_REG_CID_ADDR_PARAMS_BLOCK_WASTE ( \ + 0xfff << 12) + +#define CDU_REG_CID_ADDR_PARAMS_NCIB_SHIFT \ + 24 + +#define CDU_REG_CID_ADDR_PARAMS_NCIB ( \ + 0xff << 24) + +#define XSDM_REG_OPERATION_GEN \ + 0xf80408UL +#define NIG_REG_RX_BRB_OUT_EN \ + 0x500e18UL +#define NIG_REG_STORM_OUT_EN \ + 0x500e08UL +#define PSWRQ2_REG_L2P_VALIDATE_VFID \ + 0x240c50UL +#define PGLUE_B_REG_USE_CLIENTID_IN_TAG \ + 0x2aae04UL +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER \ + 0x2aa16cUL +#define BAR0_MAP_REG_MSDM_RAM \ + 0x1d00000UL +#define BAR0_MAP_REG_USDM_RAM \ + 0x1d80000UL +#define BAR0_MAP_REG_PSDM_RAM \ + 0x1f00000UL +#define BAR0_MAP_REG_TSDM_RAM \ + 0x1c80000UL +#define NIG_REG_RX_LLH_BRB_GATE_DNTFWD_PERPF \ + 0x5011f4UL +#define PRS_REG_SEARCH_TCP \ + 0x1f0400UL +#define PRS_REG_SEARCH_UDP \ + 0x1f0404UL +#define PRS_REG_SEARCH_OPENFLOW \ + 0x1f0434UL +#define TM_REG_PF_ENABLE_CONN \ + 0x2c043cUL +#define TM_REG_PF_ENABLE_TASK \ + 0x2c0444UL +#define TM_REG_PF_SCAN_ACTIVE_CONN \ + 0x2c04fcUL +#define TM_REG_PF_SCAN_ACTIVE_TASK \ + 0x2c0500UL +#define IGU_REG_LEADING_EDGE_LATCH \ + 0x18082cUL +#define IGU_REG_TRAILING_EDGE_LATCH \ + 0x180830UL +#define QM_REG_USG_CNT_PF_TX \ + 0x2f2eacUL +#define QM_REG_USG_CNT_PF_OTHER \ + 0x2f2eb0UL +#define DORQ_REG_PF_DB_ENABLE \ + 0x100508UL +#define QM_REG_PF_EN \ + 0x2f2ea4UL +#define TCFC_REG_STRONG_ENABLE_PF \ + 0x2d0708UL +#define CCFC_REG_STRONG_ENABLE_PF \ + 0x2e0708UL +#define PGLUE_B_REG_PGL_ADDR_88_F0 \ + 0x2aa404UL +#define PGLUE_B_REG_PGL_ADDR_8C_F0 \ + 0x2aa408UL +#define PGLUE_B_REG_PGL_ADDR_90_F0 \ + 0x2aa40cUL +#define PGLUE_B_REG_PGL_ADDR_94_F0 \ + 0x2aa410UL +#define PGLUE_B_REG_WAS_ERROR_PF_31_0_CLR \ + 0x2aa138UL +#define PGLUE_B_REG_INTERNAL_PFID_ENABLE_TARGET_READ \ + 0x2aa174UL +#define MISC_REG_GEN_PURP_CR0 \ + 0x008c80UL +#define MCP_REG_SCRATCH \ + 0xe20000UL +#define CNIG_REG_NW_PORT_MODE_BB_B0 \ + 0x218200UL +#define MISCS_REG_CHIP_NUM \ + 0x00976cUL +#define MISCS_REG_CHIP_REV \ + 0x009770UL +#define MISCS_REG_CMT_ENABLED_FOR_PAIR \ + 0x00971cUL +#define MISCS_REG_CHIP_TEST_REG \ + 0x009778UL +#define MISCS_REG_CHIP_METAL \ + 0x009774UL +#define BRB_REG_HEADER_SIZE \ + 0x340804UL +#define BTB_REG_HEADER_SIZE \ + 0xdb0804UL +#define CAU_REG_LONG_TIMEOUT_THRESHOLD \ + 0x1c0708UL +#define CCFC_REG_ACTIVITY_COUNTER \ + 0x2e8800UL +#define CDU_REG_CID_ADDR_PARAMS \ + 0x580900UL +#define DBG_REG_CLIENT_ENABLE \ + 0x010004UL +#define DMAE_REG_INIT \ + 0x00c000UL +#define DORQ_REG_IFEN \ + 0x100040UL +#define GRC_REG_TIMEOUT_EN \ + 0x050404UL +#define IGU_REG_BLOCK_CONFIGURATION \ + 0x180040UL +#define MCM_REG_INIT \ + 0x1200000UL +#define MCP2_REG_DBG_DWORD_ENABLE \ + 0x052404UL +#define MISC_REG_PORT_MODE \ + 0x008c00UL +#define MISC_REG_BLOCK_256B_EN \ + 0x008c14UL +#define MISCS_REG_RESET_PL_HV \ + 0x009060UL +#define MISCS_REG_CLK_100G_MODE \ + 0x009070UL +#define MISCS_REG_RESET_PL_HV_2 \ + 0x009150UL +#define MSDM_REG_ENABLE_IN1 \ + 0xfc0004UL +#define MSEM_REG_ENABLE_IN \ + 0x1800004UL +#define NIG_REG_CM_HDR \ + 0x500840UL +#define NCSI_REG_CONFIG \ + 0x040200UL +#define PSWRQ2_REG_RBC_DONE \ + 0x240000UL +#define PSWRQ2_REG_CFG_DONE \ + 0x240004UL +#define PBF_REG_INIT \ + 0xd80000UL +#define PTU_REG_ATC_INIT_ARRAY \ + 0x560000UL +#define PCM_REG_INIT \ + 0x1100000UL +#define PGLUE_B_REG_ADMIN_PER_PF_REGION \ + 0x2a9000UL +#define PRM_REG_DISABLE_PRM \ + 0x230000UL +#define PRS_REG_SOFT_RST \ + 0x1f0000UL +#define PSDM_REG_ENABLE_IN1 \ + 0xfa0004UL +#define PSEM_REG_ENABLE_IN \ + 0x1600004UL +#define PSWRQ_REG_DBG_SELECT \ + 0x280020UL +#define PSWRQ2_REG_CDUT_P_SIZE \ + 0x24000cUL +#define PSWHST_REG_DISCARD_INTERNAL_WRITES \ + 0x2a0040UL +#define PSWHST2_REG_DBGSYN_ALMOST_FULL_THR \ + 0x29e050UL +#define PSWRD_REG_DBG_SELECT \ + 0x29c040UL +#define PSWRD2_REG_CONF11 \ + 0x29d064UL +#define PSWWR_REG_USDM_FULL_TH \ + 0x29a040UL +#define PSWWR2_REG_CDU_FULL_TH2 \ + 0x29b040UL +#define QM_REG_MAXPQSIZE_0 \ + 0x2f0434UL +#define RSS_REG_RSS_INIT_EN \ + 0x238804UL +#define RDIF_REG_STOP_ON_ERROR \ + 0x300040UL +#define SRC_REG_SOFT_RST \ + 0x23874cUL +#define TCFC_REG_ACTIVITY_COUNTER \ + 0x2d8800UL +#define TCM_REG_INIT \ + 0x1180000UL +#define TM_REG_PXP_READ_DATA_FIFO_INIT \ + 0x2c0014UL +#define TSDM_REG_ENABLE_IN1 \ + 0xfb0004UL +#define TSEM_REG_ENABLE_IN \ + 0x1700004UL +#define TDIF_REG_STOP_ON_ERROR \ + 0x310040UL +#define UCM_REG_INIT \ + 0x1280000UL +#define UMAC_REG_IPG_HD_BKP_CNTL_BB_B0 \ + 0x051004UL +#define USDM_REG_ENABLE_IN1 \ + 0xfd0004UL +#define USEM_REG_ENABLE_IN \ + 0x1900004UL +#define XCM_REG_INIT \ + 0x1000000UL +#define XSDM_REG_ENABLE_IN1 \ + 0xf80004UL +#define XSEM_REG_ENABLE_IN \ + 0x1400004UL +#define YCM_REG_INIT \ + 0x1080000UL +#define YSDM_REG_ENABLE_IN1 \ + 0xf90004UL +#define YSEM_REG_ENABLE_IN \ + 0x1500004UL +#define XYLD_REG_SCBD_STRICT_PRIO \ + 0x4c0000UL +#define TMLD_REG_SCBD_STRICT_PRIO \ + 0x4d0000UL +#define MULD_REG_SCBD_STRICT_PRIO \ + 0x4e0000UL +#define YULD_REG_SCBD_STRICT_PRIO \ + 0x4c8000UL +#define MISC_REG_SHARED_MEM_ADDR \ + 0x008c20UL +#define DMAE_REG_GO_C0 \ + 0x00c048UL +#define DMAE_REG_GO_C1 \ + 0x00c04cUL +#define DMAE_REG_GO_C2 \ + 0x00c050UL +#define DMAE_REG_GO_C3 \ + 0x00c054UL +#define DMAE_REG_GO_C4 \ + 0x00c058UL +#define DMAE_REG_GO_C5 \ + 0x00c05cUL +#define DMAE_REG_GO_C6 \ + 0x00c060UL +#define DMAE_REG_GO_C7 \ + 0x00c064UL +#define DMAE_REG_GO_C8 \ + 0x00c068UL +#define DMAE_REG_GO_C9 \ + 0x00c06cUL +#define DMAE_REG_GO_C10 \ + 0x00c070UL +#define DMAE_REG_GO_C11 \ + 0x00c074UL +#define DMAE_REG_GO_C12 \ + 0x00c078UL +#define DMAE_REG_GO_C13 \ + 0x00c07cUL +#define DMAE_REG_GO_C14 \ + 0x00c080UL +#define DMAE_REG_GO_C15 \ + 0x00c084UL +#define DMAE_REG_GO_C16 \ + 0x00c088UL +#define DMAE_REG_GO_C17 \ + 0x00c08cUL +#define DMAE_REG_GO_C18 \ + 0x00c090UL +#define DMAE_REG_GO_C19 \ + 0x00c094UL +#define DMAE_REG_GO_C20 \ + 0x00c098UL +#define DMAE_REG_GO_C21 \ + 0x00c09cUL +#define DMAE_REG_GO_C22 \ + 0x00c0a0UL +#define DMAE_REG_GO_C23 \ + 0x00c0a4UL +#define DMAE_REG_GO_C24 \ + 0x00c0a8UL +#define DMAE_REG_GO_C25 \ + 0x00c0acUL +#define DMAE_REG_GO_C26 \ + 0x00c0b0UL +#define DMAE_REG_GO_C27 \ + 0x00c0b4UL +#define DMAE_REG_GO_C28 \ + 0x00c0b8UL +#define DMAE_REG_GO_C29 \ + 0x00c0bcUL +#define DMAE_REG_GO_C30 \ + 0x00c0c0UL +#define DMAE_REG_GO_C31 \ + 0x00c0c4UL +#define DMAE_REG_CMD_MEM \ + 0x00c800UL +#define QM_REG_MAXPQSIZETXSEL_0 \ + 0x2f0440UL +#define QM_REG_SDMCMDREADY \ + 0x2f1e10UL +#define QM_REG_SDMCMDADDR \ + 0x2f1e04UL +#define QM_REG_SDMCMDDATALSB \ + 0x2f1e08UL +#define QM_REG_SDMCMDDATAMSB \ + 0x2f1e0cUL +#define QM_REG_SDMCMDGO \ + 0x2f1e14UL +#define QM_REG_RLPFCRD \ + 0x2f4d80UL +#define QM_REG_RLPFINCVAL \ + 0x2f4c80UL +#define QM_REG_RLGLBLCRD \ + 0x2f4400UL +#define QM_REG_RLGLBLINCVAL \ + 0x2f3400UL +#define IGU_REG_ATTENTION_ENABLE \ + 0x18083cUL +#define IGU_REG_ATTN_MSG_ADDR_L \ + 0x180820UL +#define IGU_REG_ATTN_MSG_ADDR_H \ + 0x180824UL +#define MISC_REG_AEU_GENERAL_ATTN_0 \ + 0x008400UL +#define CAU_REG_SB_ADDR_MEMORY \ + 0x1c8000UL +#define CAU_REG_SB_VAR_MEMORY \ + 0x1c6000UL +#define CAU_REG_PI_MEMORY \ + 0x1d0000UL +#define IGU_REG_PF_CONFIGURATION \ + 0x180800UL +#define MISC_REG_AEU_ENABLE1_IGU_OUT_0 \ + 0x00849cUL +#define MISC_REG_AEU_MASK_ATTN_IGU \ + 0x008494UL +#define IGU_REG_CLEANUP_STATUS_0 \ + 0x180980UL +#define IGU_REG_CLEANUP_STATUS_1 \ + 0x180a00UL +#define IGU_REG_CLEANUP_STATUS_2 \ + 0x180a80UL +#define IGU_REG_CLEANUP_STATUS_3 \ + 0x180b00UL +#define IGU_REG_CLEANUP_STATUS_4 \ + 0x180b80UL +#define IGU_REG_COMMAND_REG_32LSB_DATA \ + 0x180840UL +#define IGU_REG_COMMAND_REG_CTRL \ + 0x180848UL +#define IGU_REG_BLOCK_CONFIGURATION_VF_CLEANUP_EN ( \ + 0x1 << 1) +#define IGU_REG_BLOCK_CONFIGURATION_PXP_TPH_INTERFACE_EN ( \ + 0x1 << 0) +#define IGU_REG_MAPPING_MEMORY \ + 0x184000UL +#define MISCS_REG_GENERIC_POR_0 \ + 0x0096d4UL +#define MCP_REG_NVM_CFG4 \ + 0xe0642cUL +#define MCP_REG_NVM_CFG4_FLASH_SIZE ( \ + 0x7 << 0) +#define MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT \ + 0 +#define CCFC_REG_STRONG_ENABLE_VF 0x2e070cUL +#define CNIG_REG_PMEG_IF_CMD_BB_B0 0x21821cUL +#define CNIG_REG_PMEG_IF_ADDR_BB_B0 0x218224UL +#define CNIG_REG_PMEG_IF_WRDATA_BB_B0 0x218228UL +#define NWM_REG_MAC0 0x800400UL +#define NWM_REG_MAC0_SIZE 256 +#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL +#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_ENABLE_0_SHIFT 0 +#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_NWM_PORT_MAP_0_SHIFT 1 +#define CNIG_REG_NIG_PORT0_CONF_NIG_PORT_RATE_0_SHIFT 3 +#define ETH_MAC_REG_XIF_MODE 0x000080UL +#define ETH_MAC_REG_XIF_MODE_XGMII_SHIFT 0 +#define ETH_MAC_REG_FRM_LENGTH 0x000014UL +#define ETH_MAC_REG_FRM_LENGTH_FRM_LENGTH_SHIFT 0 +#define ETH_MAC_REG_TX_IPG_LENGTH 0x000044UL +#define ETH_MAC_REG_TX_IPG_LENGTH_TXIPG_SHIFT 0 +#define ETH_MAC_REG_RX_FIFO_SECTIONS 0x00001cUL +#define ETH_MAC_REG_RX_FIFO_SECTIONS_RX_SECTION_FULL_SHIFT 0 +#define ETH_MAC_REG_TX_FIFO_SECTIONS 0x000020UL +#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_EMPTY_SHIFT 16 +#define ETH_MAC_REG_TX_FIFO_SECTIONS_TX_SECTION_FULL_SHIFT 0 +#define ETH_MAC_REG_COMMAND_CONFIG 0x000008UL +#define MISC_REG_RESET_PL_PDA_VAUX 0x008090UL +#define MISC_REG_XMAC_CORE_PORT_MODE 0x008c08UL +#define MISC_REG_XMAC_PHY_PORT_MODE 0x008c04UL +#define XMAC_REG_MODE 0x210008UL +#define XMAC_REG_RX_MAX_SIZE 0x210040UL +#define XMAC_REG_TX_CTRL_LO 0x210020UL +#define XMAC_REG_CTRL 0x210000UL +#define XMAC_REG_RX_CTRL 0x210030UL +#define XMAC_REG_RX_CTRL_PROCESS_VARIABLE_PREAMBLE (0x1 << 12) +#define MISC_REG_CLK_100G_MODE 0x008c10UL +#define MISC_REG_OPTE_MODE 0x008c0cUL +#define NIG_REG_LLH_ENG_CLS_TCP_4_TUPLE_SEARCH 0x501b84UL +#define NIG_REG_LLH_ENG_CLS_ENG_ID_TBL 0x501b90UL +#define PRS_REG_SEARCH_TAG1 0x1f0444UL +#define PRS_REG_SEARCH_TCP_FIRST_FRAG 0x1f0410UL +#define MISCS_REG_PLL_MAIN_CTRL_4 0x00974cUL +#define MISCS_REG_ECO_RESERVED 0x0097b4UL +#define PGLUE_B_REG_PF_BAR0_SIZE 0x2aae60UL +#define PGLUE_B_REG_PF_BAR1_SIZE 0x2aae64UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL +#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL +#define NIG_REG_LLH_FUNC_FILTER_MODE 0x501ac0UL +#define NIG_REG_LLH_FUNC_FILTER_PROTOCOL_TYPE 0x501b00UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL +#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_EN 0x501a80UL +#define NIG_REG_LLH_FUNC_FILTER_EN_SIZE 16 +#define NIG_REG_LLH_FUNC_FILTER_VALUE 0x501a00UL +#define XMAC_REG_CTRL_TX_EN (0x1 << 0) +#define XMAC_REG_CTRL_RX_EN (0x1 << 1) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE (0xff << 24) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE (0xff << 16) +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_BLOCK_WASTE_SHIFT 16 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE (0xff << 16) +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE (0xff << 24) +#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK (0xfff << 0) +#define CDU_REG_SEGMENT1_PARAMS_T1_NUM_TIDS_IN_BLOCK_SHIFT 0 +#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK (0xfff << 0) +#define CDU_REG_SEGMENT0_PARAMS_T0_NUM_TIDS_IN_BLOCK_SHIFT 0 +#define PSWRQ2_REG_ILT_MEMORY 0x260000UL +#define QM_REG_WFQPFWEIGHT 0x2f4e80UL +#define QM_REG_WFQVPWEIGHT 0x2fa000UL +#define NIG_REG_LB_ARB_CREDIT_WEIGHT_0 0x50160cUL +#define NIG_REG_TX_ARB_CREDIT_WEIGHT_0 0x501f88UL +#define NIG_REG_LB_ARB_CREDIT_WEIGHT_1 0x501610UL +#define NIG_REG_TX_ARB_CREDIT_WEIGHT_1 0x501f8cUL +#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_0 0x5015e4UL +#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_0 0x501f58UL +#define NIG_REG_LB_ARB_CREDIT_UPPER_BOUND_1 0x5015e8UL +#define NIG_REG_TX_ARB_CREDIT_UPPER_BOUND_1 0x501f5cUL +#define NIG_REG_LB_ARB_CLIENT_IS_STRICT 0x5015c0UL +#define NIG_REG_TX_ARB_CLIENT_IS_STRICT 0x501f34UL +#define NIG_REG_LB_ARB_CLIENT_IS_SUBJECT2WFQ 0x5015c4UL +#define NIG_REG_TX_ARB_CLIENT_IS_SUBJECT2WFQ 0x501f38UL +#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_BASE_TYPE_SHIFT 1 +#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL 0x501f1cUL +#define NIG_REG_TX_LB_GLBRATELIMIT_INC_PERIOD 0x501f20UL +#define NIG_REG_TX_LB_GLBRATELIMIT_INC_VALUE 0x501f24UL +#define NIG_REG_TX_LB_GLBRATELIMIT_MAX_VALUE 0x501f28UL +#define NIG_REG_TX_LB_GLBRATELIMIT_CTRL_TX_LB_GLBRATELIMIT_EN_SHIFT 0 +#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_BASE_TYPE_SHIFT 1 +#define NIG_REG_LB_BRBRATELIMIT_CTRL 0x50150cUL +#define NIG_REG_LB_BRBRATELIMIT_INC_PERIOD 0x501510UL +#define NIG_REG_LB_BRBRATELIMIT_INC_VALUE 0x501514UL +#define NIG_REG_LB_BRBRATELIMIT_MAX_VALUE 0x501518UL +#define NIG_REG_LB_BRBRATELIMIT_CTRL_LB_BRBRATELIMIT_EN_SHIFT 0 +#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_BASE_TYPE_0_SHIFT 1 +#define NIG_REG_LB_TCRATELIMIT_CTRL_0 0x501520UL +#define NIG_REG_LB_TCRATELIMIT_INC_PERIOD_0 0x501540UL +#define NIG_REG_LB_TCRATELIMIT_INC_VALUE_0 0x501560UL +#define NIG_REG_LB_TCRATELIMIT_MAX_VALUE_0 0x501580UL +#define NIG_REG_LB_TCRATELIMIT_CTRL_0_LB_TCRATELIMIT_EN_0_SHIFT 0 +#define NIG_REG_PRIORITY_FOR_TC_0 0x501bccUL +#define NIG_REG_RX_TC0_PRIORITY_MASK 0x501becUL +#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_1 0x1f0540UL +#define PRS_REG_ETS_ARB_CREDIT_WEIGHT_0 0x1f0534UL +#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_1 0x1f053cUL +#define PRS_REG_ETS_ARB_CREDIT_UPPER_BOUND_0 0x1f0530UL +#define PRS_REG_ETS_ARB_CLIENT_IS_STRICT 0x1f0514UL +#define PRS_REG_ETS_ARB_CLIENT_IS_SUBJECT2WFQ 0x1f0518UL +#define BRB_REG_TOTAL_MAC_SIZE 0x3408c0UL +#define BRB_REG_SHARED_HR_AREA 0x340880UL +#define BRB_REG_TC_GUARANTIED_0 0x340900UL +#define BRB_REG_MAIN_TC_GUARANTIED_HYST_0 0x340978UL +#define BRB_REG_LB_TC_FULL_XOFF_THRESHOLD_0 0x340c60UL +#define BRB_REG_LB_TC_FULL_XON_THRESHOLD_0 0x340d38UL +#define BRB_REG_LB_TC_PAUSE_XOFF_THRESHOLD_0 0x340ab0UL +#define BRB_REG_LB_TC_PAUSE_XON_THRESHOLD_0 0x340b88UL +#define BRB_REG_MAIN_TC_FULL_XOFF_THRESHOLD_0 0x340c00UL +#define BRB_REG_MAIN_TC_FULL_XON_THRESHOLD_0 0x340cd8UL +#define BRB_REG_MAIN_TC_PAUSE_XOFF_THRESHOLD_0 0x340a50UL +#define BRB_REG_MAIN_TC_PAUSE_XON_THRESHOLD_0 0x340b28UL +#define PRS_REG_VXLAN_PORT 0x1f0738UL +#define NIG_REG_VXLAN_PORT 0x50105cUL +#define PBF_REG_VXLAN_PORT 0xd80518UL +#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL +#define NIG_REG_ENC_TYPE_ENABLE_VXLAN_ENABLE_SHIFT 2 +#define DORQ_REG_L2_EDPM_TUNNEL_VXLAN_EN 0x100914UL +#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define NIG_REG_ENC_TYPE_ENABLE 0x501058UL +#define NIG_REG_ENC_TYPE_ENABLE_ETH_OVER_GRE_ENABLE_SHIFT 0 +#define NIG_REG_ENC_TYPE_ENABLE_IP_OVER_GRE_ENABLE_SHIFT 1 +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_ETH_EN 0x10090cUL +#define DORQ_REG_L2_EDPM_TUNNEL_GRE_IP_EN 0x100910UL +#define PRS_REG_NGE_PORT 0x1f086cUL +#define NIG_REG_NGE_PORT 0x508b38UL +#define PBF_REG_NGE_PORT 0xd8051cUL +#define PRS_REG_ENCAPSULATION_TYPE_EN 0x1f0730UL +#define PRS_REG_OUTPUT_FORMAT_4_0 0x1f099cUL +#define NIG_REG_NGE_ETH_ENABLE 0x508b2cUL +#define NIG_REG_NGE_IP_ENABLE 0x508b28UL +#define NIG_REG_NGE_COMP_VER 0x508b30UL +#define PBF_REG_NGE_COMP_VER 0xd80524UL +#define PRS_REG_NGE_COMP_VER 0x1f0878UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_ETH_EN 0x100930UL +#define DORQ_REG_L2_EDPM_TUNNEL_NGE_IP_EN 0x10092cUL +#define NIG_REG_PKT_PRIORITY_TO_TC 0x501ba4UL +#define PGLUE_B_REG_START_INIT_PTT_GTT 0x2a8008UL +#define PGLUE_B_REG_INIT_DONE_PTT_GTT 0x2a800cUL +#define MISC_REG_AEU_GENERAL_ATTN_35 0x00848cUL +#define MCP_REG_CPU_STATE 0xe05004UL +#define MCP_REG_CPU_MODE 0xe05000UL +#define MCP_REG_CPU_MODE_SOFT_HALT (0x1 << 10) +#define MCP_REG_CPU_EVENT_MASK 0xe05008UL +#define PSWHST_REG_VF_DISABLED_ERROR_VALID 0x2a0060UL +#define PSWHST_REG_VF_DISABLED_ERROR_ADDRESS 0x2a0064UL +#define PSWHST_REG_VF_DISABLED_ERROR_DATA 0x2a005cUL +#define PSWHST_REG_INCORRECT_ACCESS_VALID 0x2a0070UL +#define PSWHST_REG_INCORRECT_ACCESS_ADDRESS 0x2a0074UL +#define PSWHST_REG_INCORRECT_ACCESS_DATA 0x2a0068UL +#define PSWHST_REG_INCORRECT_ACCESS_LENGTH 0x2a006cUL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_VALID 0x050054UL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_0 0x05004cUL +#define GRC_REG_TIMEOUT_ATTN_ACCESS_DATA_1 0x050050UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS2 0x2aa150UL +#define PGLUE_B_REG_TX_ERR_WR_ADD_31_0 0x2aa144UL +#define PGLUE_B_REG_TX_ERR_WR_ADD_63_32 0x2aa148UL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS 0x2aa14cUL +#define PGLUE_B_REG_TX_ERR_RD_DETAILS2 0x2aa160UL +#define PGLUE_B_REG_TX_ERR_RD_ADD_31_0 0x2aa154UL +#define PGLUE_B_REG_TX_ERR_RD_ADD_63_32 0x2aa158UL +#define PGLUE_B_REG_TX_ERR_RD_DETAILS 0x2aa15cUL +#define PGLUE_B_REG_TX_ERR_WR_DETAILS_ICPL 0x2aa164UL +#define PGLUE_B_REG_MASTER_ZLR_ERR_DETAILS 0x2aa54cUL +#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_31_0 0x2aa544UL +#define PGLUE_B_REG_MASTER_ZLR_ERR_ADD_63_32 0x2aa548UL +#define PGLUE_B_REG_VF_ILT_ERR_DETAILS2 0x2aae80UL +#define PGLUE_B_REG_VF_ILT_ERR_ADD_31_0 0x2aae74UL +#define PGLUE_B_REG_VF_ILT_ERR_ADD_63_32 0x2aae78UL +#define PGLUE_B_REG_VF_ILT_ERR_DETAILS 0x2aae7cUL +#define PGLUE_B_REG_LATCHED_ERRORS_CLR 0x2aa3bcUL +#define NIG_REG_INT_MASK_3_P0_LB_TC1_PAUSE_TOO_LONG_INT (0x1 << 10) +#define DORQ_REG_DB_DROP_REASON 0x100a2cUL +#define DORQ_REG_DB_DROP_DETAILS 0x100a24UL +#define TM_REG_INT_STS_1 0x2c0190UL +#define TM_REG_INT_STS_1_PEND_TASK_SCAN (0x1 << 6) +#define TM_REG_INT_STS_1_PEND_CONN_SCAN (0x1 << 5) +#define TM_REG_INT_MASK_1 0x2c0194UL +#define TM_REG_INT_MASK_1_PEND_CONN_SCAN (0x1 << 5) +#define TM_REG_INT_MASK_1_PEND_TASK_SCAN (0x1 << 6) +#define MISC_REG_AEU_AFTER_INVERT_1_IGU 0x0087b4UL +#define MISC_REG_AEU_ENABLE4_IGU_OUT_0 0x0084a8UL +#define MISC_REG_AEU_ENABLE3_IGU_OUT_0 0x0084a4UL +#define YSEM_REG_FAST_MEMORY 0x1540000UL +#define NIG_REG_FLOWCTRL_MODE 0x501ba0UL +#define TSEM_REG_FAST_MEMORY 0x1740000UL +#define TSEM_REG_DBG_FRAME_MODE 0x1701408UL +#define TSEM_REG_SLOW_DBG_ACTIVE 0x1701400UL +#define TSEM_REG_SLOW_DBG_MODE 0x1701404UL +#define TSEM_REG_DBG_MODE1_CFG 0x1701420UL +#define TSEM_REG_SYNC_DBG_EMPTY 0x1701160UL +#define TSEM_REG_SLOW_DBG_EMPTY 0x1701140UL +#define TCM_REG_CTX_RBC_ACCS 0x11814c0UL +#define TCM_REG_AGG_CON_CTX 0x11814c4UL +#define TCM_REG_SM_CON_CTX 0x11814ccUL +#define TCM_REG_AGG_TASK_CTX 0x11814c8UL +#define TCM_REG_SM_TASK_CTX 0x11814d0UL +#define MSEM_REG_FAST_MEMORY 0x1840000UL +#define MSEM_REG_DBG_FRAME_MODE 0x1801408UL +#define MSEM_REG_SLOW_DBG_ACTIVE 0x1801400UL +#define MSEM_REG_SLOW_DBG_MODE 0x1801404UL +#define MSEM_REG_DBG_MODE1_CFG 0x1801420UL +#define MSEM_REG_SYNC_DBG_EMPTY 0x1801160UL +#define MSEM_REG_SLOW_DBG_EMPTY 0x1801140UL +#define MCM_REG_CTX_RBC_ACCS 0x1201800UL +#define MCM_REG_AGG_CON_CTX 0x1201804UL +#define MCM_REG_SM_CON_CTX 0x120180cUL +#define MCM_REG_AGG_TASK_CTX 0x1201808UL +#define MCM_REG_SM_TASK_CTX 0x1201810UL +#define USEM_REG_FAST_MEMORY 0x1940000UL +#define USEM_REG_DBG_FRAME_MODE 0x1901408UL +#define USEM_REG_SLOW_DBG_ACTIVE 0x1901400UL +#define USEM_REG_SLOW_DBG_MODE 0x1901404UL +#define USEM_REG_DBG_MODE1_CFG 0x1901420UL +#define USEM_REG_SYNC_DBG_EMPTY 0x1901160UL +#define USEM_REG_SLOW_DBG_EMPTY 0x1901140UL +#define UCM_REG_CTX_RBC_ACCS 0x1281700UL +#define UCM_REG_AGG_CON_CTX 0x1281704UL +#define UCM_REG_SM_CON_CTX 0x128170cUL +#define UCM_REG_AGG_TASK_CTX 0x1281708UL +#define UCM_REG_SM_TASK_CTX 0x1281710UL +#define XSEM_REG_FAST_MEMORY 0x1440000UL +#define XSEM_REG_DBG_FRAME_MODE 0x1401408UL +#define XSEM_REG_SLOW_DBG_ACTIVE 0x1401400UL +#define XSEM_REG_SLOW_DBG_MODE 0x1401404UL +#define XSEM_REG_DBG_MODE1_CFG 0x1401420UL +#define XSEM_REG_SYNC_DBG_EMPTY 0x1401160UL +#define XSEM_REG_SLOW_DBG_EMPTY 0x1401140UL +#define XCM_REG_CTX_RBC_ACCS 0x1001800UL +#define XCM_REG_AGG_CON_CTX 0x1001804UL +#define XCM_REG_SM_CON_CTX 0x1001808UL +#define YSEM_REG_DBG_FRAME_MODE 0x1501408UL +#define YSEM_REG_SLOW_DBG_ACTIVE 0x1501400UL +#define YSEM_REG_SLOW_DBG_MODE 0x1501404UL +#define YSEM_REG_DBG_MODE1_CFG 0x1501420UL +#define YSEM_REG_SYNC_DBG_EMPTY 0x1501160UL +#define YCM_REG_CTX_RBC_ACCS 0x1081800UL +#define YCM_REG_AGG_CON_CTX 0x1081804UL +#define YCM_REG_SM_CON_CTX 0x108180cUL +#define YCM_REG_AGG_TASK_CTX 0x1081808UL +#define YCM_REG_SM_TASK_CTX 0x1081810UL +#define PSEM_REG_FAST_MEMORY 0x1640000UL +#define PSEM_REG_DBG_FRAME_MODE 0x1601408UL +#define PSEM_REG_SLOW_DBG_ACTIVE 0x1601400UL +#define PSEM_REG_SLOW_DBG_MODE 0x1601404UL +#define PSEM_REG_DBG_MODE1_CFG 0x1601420UL +#define PSEM_REG_SYNC_DBG_EMPTY 0x1601160UL +#define PSEM_REG_SLOW_DBG_EMPTY 0x1601140UL +#define PCM_REG_CTX_RBC_ACCS 0x1101440UL +#define PCM_REG_SM_CON_CTX 0x1101444UL +#define GRC_REG_DBG_SELECT 0x0500a4UL +#define GRC_REG_DBG_DWORD_ENABLE 0x0500a8UL +#define GRC_REG_DBG_SHIFT 0x0500acUL +#define GRC_REG_DBG_FORCE_VALID 0x0500b0UL +#define GRC_REG_DBG_FORCE_FRAME 0x0500b4UL +#define PGLUE_B_REG_DBG_SELECT 0x2a8400UL +#define PGLUE_B_REG_DBG_DWORD_ENABLE 0x2a8404UL +#define PGLUE_B_REG_DBG_SHIFT 0x2a8408UL +#define PGLUE_B_REG_DBG_FORCE_VALID 0x2a840cUL +#define PGLUE_B_REG_DBG_FORCE_FRAME 0x2a8410UL +#define CNIG_REG_DBG_SELECT_K2 0x218254UL +#define CNIG_REG_DBG_DWORD_ENABLE_K2 0x218258UL +#define CNIG_REG_DBG_SHIFT_K2 0x21825cUL +#define CNIG_REG_DBG_FORCE_VALID_K2 0x218260UL +#define CNIG_REG_DBG_FORCE_FRAME_K2 0x218264UL +#define NCSI_REG_DBG_SELECT 0x040474UL +#define NCSI_REG_DBG_DWORD_ENABLE 0x040478UL +#define NCSI_REG_DBG_SHIFT 0x04047cUL +#define NCSI_REG_DBG_FORCE_VALID 0x040480UL +#define NCSI_REG_DBG_FORCE_FRAME 0x040484UL +#define BMB_REG_DBG_SELECT 0x540a7cUL +#define BMB_REG_DBG_DWORD_ENABLE 0x540a80UL +#define BMB_REG_DBG_SHIFT 0x540a84UL +#define BMB_REG_DBG_FORCE_VALID 0x540a88UL +#define BMB_REG_DBG_FORCE_FRAME 0x540a8cUL +#define PCIE_REG_DBG_SELECT 0x0547e8UL +#define PHY_PCIE_REG_DBG_SELECT 0x629fe8UL +#define PCIE_REG_DBG_DWORD_ENABLE 0x0547ecUL +#define PHY_PCIE_REG_DBG_DWORD_ENABLE 0x629fecUL +#define PCIE_REG_DBG_SHIFT 0x0547f0UL +#define PHY_PCIE_REG_DBG_SHIFT 0x629ff0UL +#define PCIE_REG_DBG_FORCE_VALID 0x0547f4UL +#define PHY_PCIE_REG_DBG_FORCE_VALID 0x629ff4UL +#define PCIE_REG_DBG_FORCE_FRAME 0x0547f8UL +#define PHY_PCIE_REG_DBG_FORCE_FRAME 0x629ff8UL +#define MCP2_REG_DBG_SELECT 0x052400UL +#define MCP2_REG_DBG_SHIFT 0x052408UL +#define MCP2_REG_DBG_FORCE_VALID 0x052440UL +#define MCP2_REG_DBG_FORCE_FRAME 0x052444UL +#define PSWHST_REG_DBG_SELECT 0x2a0100UL +#define PSWHST_REG_DBG_DWORD_ENABLE 0x2a0104UL +#define PSWHST_REG_DBG_SHIFT 0x2a0108UL +#define PSWHST_REG_DBG_FORCE_VALID 0x2a010cUL +#define PSWHST_REG_DBG_FORCE_FRAME 0x2a0110UL +#define PSWHST2_REG_DBG_SELECT 0x29e058UL +#define PSWHST2_REG_DBG_DWORD_ENABLE 0x29e05cUL +#define PSWHST2_REG_DBG_SHIFT 0x29e060UL +#define PSWHST2_REG_DBG_FORCE_VALID 0x29e064UL +#define PSWHST2_REG_DBG_FORCE_FRAME 0x29e068UL +#define PSWRD_REG_DBG_DWORD_ENABLE 0x29c044UL +#define PSWRD_REG_DBG_SHIFT 0x29c048UL +#define PSWRD_REG_DBG_FORCE_VALID 0x29c04cUL +#define PSWRD_REG_DBG_FORCE_FRAME 0x29c050UL +#define PSWRD2_REG_DBG_SELECT 0x29d400UL +#define PSWRD2_REG_DBG_DWORD_ENABLE 0x29d404UL +#define PSWRD2_REG_DBG_SHIFT 0x29d408UL +#define PSWRD2_REG_DBG_FORCE_VALID 0x29d40cUL +#define PSWRD2_REG_DBG_FORCE_FRAME 0x29d410UL +#define PSWWR_REG_DBG_SELECT 0x29a084UL +#define PSWWR_REG_DBG_DWORD_ENABLE 0x29a088UL +#define PSWWR_REG_DBG_SHIFT 0x29a08cUL +#define PSWWR_REG_DBG_FORCE_VALID 0x29a090UL +#define PSWWR_REG_DBG_FORCE_FRAME 0x29a094UL +#define PSWRQ_REG_DBG_DWORD_ENABLE 0x280024UL +#define PSWRQ_REG_DBG_SHIFT 0x280028UL +#define PSWRQ_REG_DBG_FORCE_VALID 0x28002cUL +#define PSWRQ_REG_DBG_FORCE_FRAME 0x280030UL +#define PSWRQ2_REG_DBG_SELECT 0x240100UL +#define PSWRQ2_REG_DBG_DWORD_ENABLE 0x240104UL +#define PSWRQ2_REG_DBG_SHIFT 0x240108UL +#define PSWRQ2_REG_DBG_FORCE_VALID 0x24010cUL +#define PSWRQ2_REG_DBG_FORCE_FRAME 0x240110UL +#define PGLCS_REG_DBG_SELECT 0x001d14UL +#define PGLCS_REG_DBG_DWORD_ENABLE 0x001d18UL +#define PGLCS_REG_DBG_SHIFT 0x001d1cUL +#define PGLCS_REG_DBG_FORCE_VALID 0x001d20UL +#define PGLCS_REG_DBG_FORCE_FRAME 0x001d24UL +#define PTU_REG_DBG_SELECT 0x560100UL +#define PTU_REG_DBG_DWORD_ENABLE 0x560104UL +#define PTU_REG_DBG_SHIFT 0x560108UL +#define PTU_REG_DBG_FORCE_VALID 0x56010cUL +#define PTU_REG_DBG_FORCE_FRAME 0x560110UL +#define DMAE_REG_DBG_SELECT 0x00c510UL +#define DMAE_REG_DBG_DWORD_ENABLE 0x00c514UL +#define DMAE_REG_DBG_SHIFT 0x00c518UL +#define DMAE_REG_DBG_FORCE_VALID 0x00c51cUL +#define DMAE_REG_DBG_FORCE_FRAME 0x00c520UL +#define TCM_REG_DBG_SELECT 0x1180040UL +#define TCM_REG_DBG_DWORD_ENABLE 0x1180044UL +#define TCM_REG_DBG_SHIFT 0x1180048UL +#define TCM_REG_DBG_FORCE_VALID 0x118004cUL +#define TCM_REG_DBG_FORCE_FRAME 0x1180050UL +#define MCM_REG_DBG_SELECT 0x1200040UL +#define MCM_REG_DBG_DWORD_ENABLE 0x1200044UL +#define MCM_REG_DBG_SHIFT 0x1200048UL +#define MCM_REG_DBG_FORCE_VALID 0x120004cUL +#define MCM_REG_DBG_FORCE_FRAME 0x1200050UL +#define UCM_REG_DBG_SELECT 0x1280050UL +#define UCM_REG_DBG_DWORD_ENABLE 0x1280054UL +#define UCM_REG_DBG_SHIFT 0x1280058UL +#define UCM_REG_DBG_FORCE_VALID 0x128005cUL +#define UCM_REG_DBG_FORCE_FRAME 0x1280060UL +#define XCM_REG_DBG_SELECT 0x1000040UL +#define XCM_REG_DBG_DWORD_ENABLE 0x1000044UL +#define XCM_REG_DBG_SHIFT 0x1000048UL +#define XCM_REG_DBG_FORCE_VALID 0x100004cUL +#define XCM_REG_DBG_FORCE_FRAME 0x1000050UL +#define YCM_REG_DBG_SELECT 0x1080040UL +#define YCM_REG_DBG_DWORD_ENABLE 0x1080044UL +#define YCM_REG_DBG_SHIFT 0x1080048UL +#define YCM_REG_DBG_FORCE_VALID 0x108004cUL +#define YCM_REG_DBG_FORCE_FRAME 0x1080050UL +#define PCM_REG_DBG_SELECT 0x1100040UL +#define PCM_REG_DBG_DWORD_ENABLE 0x1100044UL +#define PCM_REG_DBG_SHIFT 0x1100048UL +#define PCM_REG_DBG_FORCE_VALID 0x110004cUL +#define PCM_REG_DBG_FORCE_FRAME 0x1100050UL +#define QM_REG_DBG_SELECT 0x2f2e74UL +#define QM_REG_DBG_DWORD_ENABLE 0x2f2e78UL +#define QM_REG_DBG_SHIFT 0x2f2e7cUL +#define QM_REG_DBG_FORCE_VALID 0x2f2e80UL +#define QM_REG_DBG_FORCE_FRAME 0x2f2e84UL +#define TM_REG_DBG_SELECT 0x2c07a8UL +#define TM_REG_DBG_DWORD_ENABLE 0x2c07acUL +#define TM_REG_DBG_SHIFT 0x2c07b0UL +#define TM_REG_DBG_FORCE_VALID 0x2c07b4UL +#define TM_REG_DBG_FORCE_FRAME 0x2c07b8UL +#define DORQ_REG_DBG_SELECT 0x100ad0UL +#define DORQ_REG_DBG_DWORD_ENABLE 0x100ad4UL +#define DORQ_REG_DBG_SHIFT 0x100ad8UL +#define DORQ_REG_DBG_FORCE_VALID 0x100adcUL +#define DORQ_REG_DBG_FORCE_FRAME 0x100ae0UL +#define BRB_REG_DBG_SELECT 0x340ed0UL +#define BRB_REG_DBG_DWORD_ENABLE 0x340ed4UL +#define BRB_REG_DBG_SHIFT 0x340ed8UL +#define BRB_REG_DBG_FORCE_VALID 0x340edcUL +#define BRB_REG_DBG_FORCE_FRAME 0x340ee0UL +#define SRC_REG_DBG_SELECT 0x238700UL +#define SRC_REG_DBG_DWORD_ENABLE 0x238704UL +#define SRC_REG_DBG_SHIFT 0x238708UL +#define SRC_REG_DBG_FORCE_VALID 0x23870cUL +#define SRC_REG_DBG_FORCE_FRAME 0x238710UL +#define PRS_REG_DBG_SELECT 0x1f0b6cUL +#define PRS_REG_DBG_DWORD_ENABLE 0x1f0b70UL +#define PRS_REG_DBG_SHIFT 0x1f0b74UL +#define PRS_REG_DBG_FORCE_VALID 0x1f0ba0UL +#define PRS_REG_DBG_FORCE_FRAME 0x1f0ba4UL +#define TSDM_REG_DBG_SELECT 0xfb0e28UL +#define TSDM_REG_DBG_DWORD_ENABLE 0xfb0e2cUL +#define TSDM_REG_DBG_SHIFT 0xfb0e30UL +#define TSDM_REG_DBG_FORCE_VALID 0xfb0e34UL +#define TSDM_REG_DBG_FORCE_FRAME 0xfb0e38UL +#define MSDM_REG_DBG_SELECT 0xfc0e28UL +#define MSDM_REG_DBG_DWORD_ENABLE 0xfc0e2cUL +#define MSDM_REG_DBG_SHIFT 0xfc0e30UL +#define MSDM_REG_DBG_FORCE_VALID 0xfc0e34UL +#define MSDM_REG_DBG_FORCE_FRAME 0xfc0e38UL +#define USDM_REG_DBG_SELECT 0xfd0e28UL +#define USDM_REG_DBG_DWORD_ENABLE 0xfd0e2cUL +#define USDM_REG_DBG_SHIFT 0xfd0e30UL +#define USDM_REG_DBG_FORCE_VALID 0xfd0e34UL +#define USDM_REG_DBG_FORCE_FRAME 0xfd0e38UL +#define XSDM_REG_DBG_SELECT 0xf80e28UL +#define XSDM_REG_DBG_DWORD_ENABLE 0xf80e2cUL +#define XSDM_REG_DBG_SHIFT 0xf80e30UL +#define XSDM_REG_DBG_FORCE_VALID 0xf80e34UL +#define XSDM_REG_DBG_FORCE_FRAME 0xf80e38UL +#define YSDM_REG_DBG_SELECT 0xf90e28UL +#define YSDM_REG_DBG_DWORD_ENABLE 0xf90e2cUL +#define YSDM_REG_DBG_SHIFT 0xf90e30UL +#define YSDM_REG_DBG_FORCE_VALID 0xf90e34UL +#define YSDM_REG_DBG_FORCE_FRAME 0xf90e38UL +#define PSDM_REG_DBG_SELECT 0xfa0e28UL +#define PSDM_REG_DBG_DWORD_ENABLE 0xfa0e2cUL +#define PSDM_REG_DBG_SHIFT 0xfa0e30UL +#define PSDM_REG_DBG_FORCE_VALID 0xfa0e34UL +#define PSDM_REG_DBG_FORCE_FRAME 0xfa0e38UL +#define TSEM_REG_DBG_SELECT 0x1701528UL +#define TSEM_REG_DBG_DWORD_ENABLE 0x170152cUL +#define TSEM_REG_DBG_SHIFT 0x1701530UL +#define TSEM_REG_DBG_FORCE_VALID 0x1701534UL +#define TSEM_REG_DBG_FORCE_FRAME 0x1701538UL +#define MSEM_REG_DBG_SELECT 0x1801528UL +#define MSEM_REG_DBG_DWORD_ENABLE 0x180152cUL +#define MSEM_REG_DBG_SHIFT 0x1801530UL +#define MSEM_REG_DBG_FORCE_VALID 0x1801534UL +#define MSEM_REG_DBG_FORCE_FRAME 0x1801538UL +#define USEM_REG_DBG_SELECT 0x1901528UL +#define USEM_REG_DBG_DWORD_ENABLE 0x190152cUL +#define USEM_REG_DBG_SHIFT 0x1901530UL +#define USEM_REG_DBG_FORCE_VALID 0x1901534UL +#define USEM_REG_DBG_FORCE_FRAME 0x1901538UL +#define XSEM_REG_DBG_SELECT 0x1401528UL +#define XSEM_REG_DBG_DWORD_ENABLE 0x140152cUL +#define XSEM_REG_DBG_SHIFT 0x1401530UL +#define XSEM_REG_DBG_FORCE_VALID 0x1401534UL +#define XSEM_REG_DBG_FORCE_FRAME 0x1401538UL +#define YSEM_REG_DBG_SELECT 0x1501528UL +#define YSEM_REG_DBG_DWORD_ENABLE 0x150152cUL +#define YSEM_REG_DBG_SHIFT 0x1501530UL +#define YSEM_REG_DBG_FORCE_VALID 0x1501534UL +#define YSEM_REG_DBG_FORCE_FRAME 0x1501538UL +#define PSEM_REG_DBG_SELECT 0x1601528UL +#define PSEM_REG_DBG_DWORD_ENABLE 0x160152cUL +#define PSEM_REG_DBG_SHIFT 0x1601530UL +#define PSEM_REG_DBG_FORCE_VALID 0x1601534UL +#define PSEM_REG_DBG_FORCE_FRAME 0x1601538UL +#define RSS_REG_DBG_SELECT 0x238c4cUL +#define RSS_REG_DBG_DWORD_ENABLE 0x238c50UL +#define RSS_REG_DBG_SHIFT 0x238c54UL +#define RSS_REG_DBG_FORCE_VALID 0x238c58UL +#define RSS_REG_DBG_FORCE_FRAME 0x238c5cUL +#define TMLD_REG_DBG_SELECT 0x4d1600UL +#define TMLD_REG_DBG_DWORD_ENABLE 0x4d1604UL +#define TMLD_REG_DBG_SHIFT 0x4d1608UL +#define TMLD_REG_DBG_FORCE_VALID 0x4d160cUL +#define TMLD_REG_DBG_FORCE_FRAME 0x4d1610UL +#define MULD_REG_DBG_SELECT 0x4e1600UL +#define MULD_REG_DBG_DWORD_ENABLE 0x4e1604UL +#define MULD_REG_DBG_SHIFT 0x4e1608UL +#define MULD_REG_DBG_FORCE_VALID 0x4e160cUL +#define MULD_REG_DBG_FORCE_FRAME 0x4e1610UL +#define YULD_REG_DBG_SELECT 0x4c9600UL +#define YULD_REG_DBG_DWORD_ENABLE 0x4c9604UL +#define YULD_REG_DBG_SHIFT 0x4c9608UL +#define YULD_REG_DBG_FORCE_VALID 0x4c960cUL +#define YULD_REG_DBG_FORCE_FRAME 0x4c9610UL +#define XYLD_REG_DBG_SELECT 0x4c1600UL +#define XYLD_REG_DBG_DWORD_ENABLE 0x4c1604UL +#define XYLD_REG_DBG_SHIFT 0x4c1608UL +#define XYLD_REG_DBG_FORCE_VALID 0x4c160cUL +#define XYLD_REG_DBG_FORCE_FRAME 0x4c1610UL +#define PRM_REG_DBG_SELECT 0x2306a8UL +#define PRM_REG_DBG_DWORD_ENABLE 0x2306acUL +#define PRM_REG_DBG_SHIFT 0x2306b0UL +#define PRM_REG_DBG_FORCE_VALID 0x2306b4UL +#define PRM_REG_DBG_FORCE_FRAME 0x2306b8UL +#define PBF_PB1_REG_DBG_SELECT 0xda0728UL +#define PBF_PB1_REG_DBG_DWORD_ENABLE 0xda072cUL +#define PBF_PB1_REG_DBG_SHIFT 0xda0730UL +#define PBF_PB1_REG_DBG_FORCE_VALID 0xda0734UL +#define PBF_PB1_REG_DBG_FORCE_FRAME 0xda0738UL +#define PBF_PB2_REG_DBG_SELECT 0xda4728UL +#define PBF_PB2_REG_DBG_DWORD_ENABLE 0xda472cUL +#define PBF_PB2_REG_DBG_SHIFT 0xda4730UL +#define PBF_PB2_REG_DBG_FORCE_VALID 0xda4734UL +#define PBF_PB2_REG_DBG_FORCE_FRAME 0xda4738UL +#define RPB_REG_DBG_SELECT 0x23c728UL +#define RPB_REG_DBG_DWORD_ENABLE 0x23c72cUL +#define RPB_REG_DBG_SHIFT 0x23c730UL +#define RPB_REG_DBG_FORCE_VALID 0x23c734UL +#define RPB_REG_DBG_FORCE_FRAME 0x23c738UL +#define BTB_REG_DBG_SELECT 0xdb08c8UL +#define BTB_REG_DBG_DWORD_ENABLE 0xdb08ccUL +#define BTB_REG_DBG_SHIFT 0xdb08d0UL +#define BTB_REG_DBG_FORCE_VALID 0xdb08d4UL +#define BTB_REG_DBG_FORCE_FRAME 0xdb08d8UL +#define PBF_REG_DBG_SELECT 0xd80060UL +#define PBF_REG_DBG_DWORD_ENABLE 0xd80064UL +#define PBF_REG_DBG_SHIFT 0xd80068UL +#define PBF_REG_DBG_FORCE_VALID 0xd8006cUL +#define PBF_REG_DBG_FORCE_FRAME 0xd80070UL +#define RDIF_REG_DBG_SELECT 0x300500UL +#define RDIF_REG_DBG_DWORD_ENABLE 0x300504UL +#define RDIF_REG_DBG_SHIFT 0x300508UL +#define RDIF_REG_DBG_FORCE_VALID 0x30050cUL +#define RDIF_REG_DBG_FORCE_FRAME 0x300510UL +#define TDIF_REG_DBG_SELECT 0x310500UL +#define TDIF_REG_DBG_DWORD_ENABLE 0x310504UL +#define TDIF_REG_DBG_SHIFT 0x310508UL +#define TDIF_REG_DBG_FORCE_VALID 0x31050cUL +#define TDIF_REG_DBG_FORCE_FRAME 0x310510UL +#define CDU_REG_DBG_SELECT 0x580704UL +#define CDU_REG_DBG_DWORD_ENABLE 0x580708UL +#define CDU_REG_DBG_SHIFT 0x58070cUL +#define CDU_REG_DBG_FORCE_VALID 0x580710UL +#define CDU_REG_DBG_FORCE_FRAME 0x580714UL +#define CCFC_REG_DBG_SELECT 0x2e0500UL +#define CCFC_REG_DBG_DWORD_ENABLE 0x2e0504UL +#define CCFC_REG_DBG_SHIFT 0x2e0508UL +#define CCFC_REG_DBG_FORCE_VALID 0x2e050cUL +#define CCFC_REG_DBG_FORCE_FRAME 0x2e0510UL +#define TCFC_REG_DBG_SELECT 0x2d0500UL +#define TCFC_REG_DBG_DWORD_ENABLE 0x2d0504UL +#define TCFC_REG_DBG_SHIFT 0x2d0508UL +#define TCFC_REG_DBG_FORCE_VALID 0x2d050cUL +#define TCFC_REG_DBG_FORCE_FRAME 0x2d0510UL +#define IGU_REG_DBG_SELECT 0x181578UL +#define IGU_REG_DBG_DWORD_ENABLE 0x18157cUL +#define IGU_REG_DBG_SHIFT 0x181580UL +#define IGU_REG_DBG_FORCE_VALID 0x181584UL +#define IGU_REG_DBG_FORCE_FRAME 0x181588UL +#define CAU_REG_DBG_SELECT 0x1c0ea8UL +#define CAU_REG_DBG_DWORD_ENABLE 0x1c0eacUL +#define CAU_REG_DBG_SHIFT 0x1c0eb0UL +#define CAU_REG_DBG_FORCE_VALID 0x1c0eb4UL +#define CAU_REG_DBG_FORCE_FRAME 0x1c0eb8UL +#define UMAC_REG_DBG_SELECT 0x051094UL +#define UMAC_REG_DBG_DWORD_ENABLE 0x051098UL +#define UMAC_REG_DBG_SHIFT 0x05109cUL +#define UMAC_REG_DBG_FORCE_VALID 0x0510a0UL +#define UMAC_REG_DBG_FORCE_FRAME 0x0510a4UL +#define NIG_REG_DBG_SELECT 0x502140UL +#define NIG_REG_DBG_DWORD_ENABLE 0x502144UL +#define NIG_REG_DBG_SHIFT 0x502148UL +#define NIG_REG_DBG_FORCE_VALID 0x50214cUL +#define NIG_REG_DBG_FORCE_FRAME 0x502150UL +#define WOL_REG_DBG_SELECT 0x600140UL +#define WOL_REG_DBG_DWORD_ENABLE 0x600144UL +#define WOL_REG_DBG_SHIFT 0x600148UL +#define WOL_REG_DBG_FORCE_VALID 0x60014cUL +#define WOL_REG_DBG_FORCE_FRAME 0x600150UL +#define BMBN_REG_DBG_SELECT 0x610140UL +#define BMBN_REG_DBG_DWORD_ENABLE 0x610144UL +#define BMBN_REG_DBG_SHIFT 0x610148UL +#define BMBN_REG_DBG_FORCE_VALID 0x61014cUL +#define BMBN_REG_DBG_FORCE_FRAME 0x610150UL +#define NWM_REG_DBG_SELECT 0x8000ecUL +#define NWM_REG_DBG_DWORD_ENABLE 0x8000f0UL +#define NWM_REG_DBG_SHIFT 0x8000f4UL +#define NWM_REG_DBG_FORCE_VALID 0x8000f8UL +#define NWM_REG_DBG_FORCE_FRAME 0x8000fcUL +#define BRB_REG_BIG_RAM_ADDRESS 0x340800UL +#define BRB_REG_BIG_RAM_DATA 0x341500UL +#define BTB_REG_BIG_RAM_ADDRESS 0xdb0800UL +#define BTB_REG_BIG_RAM_DATA 0xdb0c00UL +#define BMB_REG_BIG_RAM_ADDRESS 0x540800UL +#define BMB_REG_BIG_RAM_DATA 0x540f00UL +#define MISCS_REG_RESET_PL_UA 0x009050UL +#define MISC_REG_RESET_PL_UA 0x008050UL +#define MISC_REG_RESET_PL_HV 0x008060UL +#define MISC_REG_RESET_PL_PDA_VMAIN_1 0x008070UL +#define MISC_REG_RESET_PL_PDA_VMAIN_2 0x008080UL +#define SEM_FAST_REG_INT_RAM 0x020000UL +#define DBG_REG_DBG_BLOCK_ON 0x010454UL +#define DBG_REG_FRAMING_MODE 0x010058UL +#define SEM_FAST_REG_DEBUG_MODE 0x000744UL +#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL +#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL +#define SEM_FAST_REG_FILTER_CID 0x000754UL +#define SEM_FAST_REG_EVENT_ID_RANGE_STRT 0x000760UL +#define SEM_FAST_REG_EVENT_ID_RANGE_END 0x000764UL +#define SEM_FAST_REG_FILTER_EVENT_ID 0x000758UL +#define SEM_FAST_REG_EVENT_ID_MASK 0x00075cUL +#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL +#define SEM_FAST_REG_DBG_MODE6_SRC_DISABLE 0x000750UL +#define SEM_FAST_REG_DEBUG_ACTIVE 0x000740UL +#define SEM_FAST_REG_RECORD_FILTER_ENABLE 0x000768UL +#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL +#define DBG_REG_FILTER_ENABLE 0x0109d0UL +#define DBG_REG_TRIGGER_ENABLE 0x01054cUL +#define DBG_REG_FILTER_CNSTR_OPRTN_0 0x010a28UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OPRTN_0 0x01071cUL +#define DBG_REG_FILTER_CNSTR_DATA_0 0x0109d8UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_0 0x01059cUL +#define DBG_REG_FILTER_CNSTR_DATA_MASK_0 0x0109f8UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_DATA_MASK_0 0x01065cUL +#define DBG_REG_FILTER_CNSTR_FRAME_0 0x0109e8UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_0 0x0105fcUL +#define DBG_REG_FILTER_CNSTR_FRAME_MASK_0 0x010a08UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_FRAME_MASK_0 0x0106bcUL +#define DBG_REG_FILTER_CNSTR_OFFSET_0 0x010a18UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_OFFSET_0 0x0107dcUL +#define DBG_REG_FILTER_CNSTR_RANGE_0 0x010a38UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_RANGE_0 0x01077cUL +#define DBG_REG_FILTER_CNSTR_CYCLIC_0 0x010a68UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_CYCLIC_0 0x0108fcUL +#define DBG_REG_FILTER_CNSTR_MUST_0 0x010a48UL +#define DBG_REG_TRIGGER_STATE_SET_CNSTR_MUST_0 0x01083cUL +#define DBG_REG_INTR_BUFFER 0x014000UL +#define DBG_REG_INTR_BUFFER_WR_PTR 0x010404UL +#define DBG_REG_WRAP_ON_INT_BUFFER 0x010418UL +#define DBG_REG_INTR_BUFFER_RD_PTR 0x010400UL +#define DBG_REG_EXT_BUFFER_WR_PTR 0x010410UL +#define DBG_REG_WRAP_ON_EXT_BUFFER 0x01041cUL +#define SEM_FAST_REG_STALL_0 0x000488UL +#define SEM_FAST_REG_STALLED 0x000494UL +#define SEM_FAST_REG_STORM_REG_FILE 0x008000UL +#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL +#define SEM_FAST_REG_VFC_ADDR 0x000b44UL +#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL +#define SEM_FAST_REG_VFC_DATA_WR 0x000b40UL +#define SEM_FAST_REG_VFC_ADDR 0x000b44UL +#define SEM_FAST_REG_VFC_DATA_RD 0x000b48UL +#define RSS_REG_RSS_RAM_ADDR 0x238c30UL +#define RSS_REG_RSS_RAM_DATA 0x238c20UL +#define MISCS_REG_BLOCK_256B_EN 0x009074UL +#define MCP_REG_CPU_REG_FILE 0xe05200UL +#define MCP_REG_CPU_REG_FILE_SIZE 32 +#define DBG_REG_CALENDAR_OUT_DATA 0x010480UL +#define DBG_REG_FULL_MODE 0x010060UL +#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_LSB 0x010430UL +#define DBG_REG_PCI_EXT_BUFFER_STRT_ADDR_MSB 0x010434UL +#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL +#define DBG_REG_PCI_EXT_BUFFER_SIZE 0x010438UL +#define DBG_REG_PCI_FUNC_NUM 0x010a98UL +#define DBG_REG_PCI_LOGIC_ADDR 0x010460UL +#define DBG_REG_PCI_REQ_CREDIT 0x010440UL +#define DBG_REG_DEBUG_TARGET 0x01005cUL +#define DBG_REG_OUTPUT_ENABLE 0x01000cUL +#define DBG_REG_OUTPUT_ENABLE 0x01000cUL +#define DBG_REG_DEBUG_TARGET 0x01005cUL +#define DBG_REG_OTHER_ENGINE_MODE 0x010010UL +#define NIG_REG_DEBUG_PORT 0x5020d0UL +#define DBG_REG_ETHERNET_HDR_WIDTH 0x010b38UL +#define DBG_REG_ETHERNET_HDR_7 0x010b34UL +#define DBG_REG_ETHERNET_HDR_6 0x010b30UL +#define DBG_REG_ETHERNET_HDR_5 0x010b2cUL +#define DBG_REG_ETHERNET_HDR_4 0x010b28UL +#define DBG_REG_TARGET_PACKET_SIZE 0x010b3cUL +#define DBG_REG_NIG_DATA_LIMIT_SIZE 0x01043cUL +#define DBG_REG_TIMESTAMP_VALID_EN 0x010b58UL +#define DBG_REG_TIMESTAMP_FRAME_EN 0x010b54UL +#define DBG_REG_TIMESTAMP_TICK 0x010b50UL +#define DBG_REG_FILTER_ID_NUM 0x0109d4UL +#define DBG_REG_FILTER_MSG_LENGTH_ENABLE 0x010a78UL +#define DBG_REG_FILTER_MSG_LENGTH 0x010a7cUL +#define DBG_REG_RCRD_ON_WINDOW_PRE_NUM_CHUNKS 0x010a90UL +#define DBG_REG_RCRD_ON_WINDOW_POST_NUM_CYCLES 0x010a94UL +#define DBG_REG_RCRD_ON_WINDOW_PRE_TRGR_EVNT_MODE 0x010a88UL +#define DBG_REG_RCRD_ON_WINDOW_POST_TRGR_EVNT_MODE 0x010a8cUL +#define DBG_REG_TRIGGER_ENABLE 0x01054cUL +#define DBG_REG_TRIGGER_STATE_ID_0 0x010554UL +#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_ENABLE_0 0x01095cUL +#define DBG_REG_TRIGGER_STATE_MSG_LENGTH_0 0x010968UL +#define DBG_REG_TRIGGER_STATE_SET_COUNT_0 0x010584UL +#define DBG_REG_TRIGGER_STATE_SET_NXT_STATE_0 0x01056cUL +#define DBG_REG_NO_GRANT_ON_FULL 0x010458UL +#define DBG_REG_STORM_ID_NUM 0x010b14UL +#define DBG_REG_CALENDAR_SLOT0 0x010014UL +#define DBG_REG_HW_ID_NUM 0x010b10UL +#define DBG_REG_FILTER_ENABLE 0x0109d0UL +#define DBG_REG_TIMESTAMP 0x010b4cUL +#define DBG_REG_CPU_TIMEOUT 0x010450UL +#define DBG_REG_TRIGGER_STATUS_CUR_STATE 0x010b60UL +#define GRC_REG_TRACE_FIFO_VALID_DATA 0x050064UL +#define GRC_REG_TRACE_FIFO 0x050068UL +#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x181530UL +#define IGU_REG_ERROR_HANDLING_MEMORY 0x181520UL +#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL +#define GRC_REG_NUMBER_VALID_OVERRIDE_WINDOW 0x05040cUL +#define GRC_REG_PROTECTION_OVERRIDE_WINDOW 0x050500UL +#define TSEM_REG_VF_ERROR 0x1700408UL +#define USEM_REG_VF_ERROR 0x1900408UL +#define MSEM_REG_VF_ERROR 0x1800408UL +#define XSEM_REG_VF_ERROR 0x1400408UL +#define YSEM_REG_VF_ERROR 0x1500408UL +#define PSEM_REG_VF_ERROR 0x1600408UL +#define PGLUE_B_REG_WAS_ERROR_VF_31_0_CLR 0x2aa118UL +#define IGU_REG_STATISTIC_NUM_VF_MSG_SENT 0x180408UL +#define IGU_REG_VF_CONFIGURATION 0x180804UL +#define PSWHST_REG_ZONE_PERMISSION_TABLE 0x2a0800UL +#define DORQ_REG_VF_USAGE_CNT 0x1009c4UL +#define PBF_REG_NUM_BLOCKS_ALLOCATED_CONS_VOQ0 0xd806ccUL +#define PBF_REG_NUM_BLOCKS_ALLOCATED_PROD_VOQ0 0xd806c8UL +#define PRS_REG_MSG_CT_MAIN_0 0x1f0a24UL +#define PRS_REG_MSG_CT_LB_0 0x1f0a28UL +#define BRB_REG_PER_TC_COUNTERS 0x341a00UL + +/* added */ +#define DORQ_REG_PF_DPI_BIT_SHIFT 0x100450UL +#define DORQ_REG_PF_ICID_BIT_SHIFT_NORM 0x100448UL +#define DORQ_REG_PF_MIN_ADDR_REG1 0x100400UL +#define MISCS_REG_FUNCTION_HIDE 0x0096f0UL +#define PCIE_REG_PRTY_MASK 0x0547b4UL +#define PGLUE_B_REG_VF_BAR0_SIZE 0x2aaeb4UL +#define BAR0_MAP_REG_YSDM_RAM 0x1e80000UL +#define SEM_FAST_REG_INT_RAM_SIZE 20480 +#define MCP_REG_SCRATCH_SIZE 57344 + +#define CDU_REG_SEGMENT0_PARAMS_T0_TID_SIZE_SHIFT 24 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_SIZE_SHIFT 24 +#define CDU_REG_SEGMENT1_PARAMS_T1_TID_BLOCK_WASTE_SHIFT 16 +#define DORQ_REG_DB_DROP_DETAILS_ADDRESS 0x100a1cUL diff --git a/drivers/net/qede/qede_eth_if.c b/drivers/net/qede/qede_eth_if.c new file mode 100644 index 00000000..b6f6487d --- /dev/null +++ b/drivers/net/qede/qede_eth_if.c @@ -0,0 +1,463 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "qede_ethdev.h" + +static int +qed_start_vport(struct ecore_dev *edev, struct qed_start_vport_params *p_params) +{ + int rc, i; + + for_each_hwfn(edev, i) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + u8 tx_switching = 0; + struct ecore_sp_vport_start_params start = { 0 }; + + start.tpa_mode = p_params->gro_enable ? ECORE_TPA_MODE_GRO : + ECORE_TPA_MODE_NONE; + start.remove_inner_vlan = p_params->remove_inner_vlan; + start.tx_switching = tx_switching; + start.only_untagged = false; /* untagged only */ + start.drop_ttl0 = p_params->drop_ttl0; + start.concrete_fid = p_hwfn->hw_info.concrete_fid; + start.opaque_fid = p_hwfn->hw_info.opaque_fid; + start.concrete_fid = p_hwfn->hw_info.concrete_fid; + start.handle_ptp_pkts = p_params->handle_ptp_pkts; + start.vport_id = p_params->vport_id; + start.max_buffers_per_cqe = 16; /* TODO-is this right */ + start.mtu = p_params->mtu; + /* @DPDK - Disable FW placement */ + start.zero_placement_offset = 1; + + rc = ecore_sp_vport_start(p_hwfn, &start); + if (rc) { + DP_ERR(edev, "Failed to start VPORT\n"); + return rc; + } + + ecore_hw_start_fastpath(p_hwfn); + + DP_VERBOSE(edev, ECORE_MSG_SPQ, + "Started V-PORT %d with MTU %d\n", + p_params->vport_id, p_params->mtu); + } + + ecore_reset_vport_stats(edev); + + return 0; +} + +static int qed_stop_vport(struct ecore_dev *edev, uint8_t vport_id) +{ + int rc, i; + + for_each_hwfn(edev, i) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + rc = ecore_sp_vport_stop(p_hwfn, + p_hwfn->hw_info.opaque_fid, vport_id); + + if (rc) { + DP_ERR(edev, "Failed to stop VPORT\n"); + return rc; + } + } + + return 0; +} + +static int +qed_update_vport(struct ecore_dev *edev, struct qed_update_vport_params *params) +{ + struct ecore_sp_vport_update_params sp_params; + struct ecore_rss_params sp_rss_params; + int rc, i; + + memset(&sp_params, 0, sizeof(sp_params)); + memset(&sp_rss_params, 0, sizeof(sp_rss_params)); + + /* Translate protocol params into sp params */ + sp_params.vport_id = params->vport_id; + sp_params.update_vport_active_rx_flg = params->update_vport_active_flg; + sp_params.update_vport_active_tx_flg = params->update_vport_active_flg; + sp_params.vport_active_rx_flg = params->vport_active_flg; + sp_params.vport_active_tx_flg = params->vport_active_flg; + sp_params.update_inner_vlan_removal_flg = + params->update_inner_vlan_removal_flg; + sp_params.inner_vlan_removal_flg = params->inner_vlan_removal_flg; + sp_params.update_tx_switching_flg = params->update_tx_switching_flg; + sp_params.tx_switching_flg = params->tx_switching_flg; + sp_params.accept_any_vlan = params->accept_any_vlan; + sp_params.update_accept_any_vlan_flg = + params->update_accept_any_vlan_flg; + + /* RSS - is a bit tricky, since upper-layer isn't familiar with hwfns. + * We need to re-fix the rss values per engine for CMT. + */ + + if (edev->num_hwfns > 1 && params->update_rss_flg) { + struct qed_update_vport_rss_params *rss = ¶ms->rss_params; + int k, max = 0; + + /* Find largest entry, since it's possible RSS needs to + * be disabled [in case only 1 queue per-hwfn] + */ + for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++) + max = (max > rss->rss_ind_table[k]) ? + max : rss->rss_ind_table[k]; + + /* Either fix RSS values or disable RSS */ + if (edev->num_hwfns < max + 1) { + int divisor = (max + edev->num_hwfns - 1) / + edev->num_hwfns; + + DP_VERBOSE(edev, ECORE_MSG_SPQ, + "CMT - fixing RSS values (modulo %02x)\n", + divisor); + + for (k = 0; k < ECORE_RSS_IND_TABLE_SIZE; k++) + rss->rss_ind_table[k] = + rss->rss_ind_table[k] % divisor; + } else { + DP_VERBOSE(edev, ECORE_MSG_SPQ, + "CMT - 1 queue per-hwfn; Disabling RSS\n"); + params->update_rss_flg = 0; + } + } + + /* Now, update the RSS configuration for actual configuration */ + if (params->update_rss_flg) { + sp_rss_params.update_rss_config = 1; + sp_rss_params.rss_enable = 1; + sp_rss_params.update_rss_capabilities = 1; + sp_rss_params.update_rss_ind_table = 1; + sp_rss_params.update_rss_key = 1; + sp_rss_params.rss_caps = ECORE_RSS_IPV4 | ECORE_RSS_IPV6 | + ECORE_RSS_IPV4_TCP | ECORE_RSS_IPV6_TCP; + sp_rss_params.rss_table_size_log = 7; /* 2^7 = 128 */ + rte_memcpy(sp_rss_params.rss_ind_table, + params->rss_params.rss_ind_table, + ECORE_RSS_IND_TABLE_SIZE * sizeof(uint16_t)); + rte_memcpy(sp_rss_params.rss_key, params->rss_params.rss_key, + ECORE_RSS_KEY_SIZE * sizeof(uint32_t)); + } + sp_params.rss_params = &sp_rss_params; + + for_each_hwfn(edev, i) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + + sp_params.opaque_fid = p_hwfn->hw_info.opaque_fid; + rc = ecore_sp_vport_update(p_hwfn, &sp_params, + ECORE_SPQ_MODE_EBLOCK, NULL); + if (rc) { + DP_ERR(edev, "Failed to update VPORT\n"); + return rc; + } + + DP_VERBOSE(edev, ECORE_MSG_SPQ, + "Updated V-PORT %d: active_flag %d [update %d]\n", + params->vport_id, params->vport_active_flg, + params->update_vport_active_flg); + } + + return 0; +} + +static int +qed_start_rxq(struct ecore_dev *edev, + uint8_t rss_id, uint8_t rx_queue_id, + uint8_t vport_id, uint16_t sb, + uint8_t sb_index, uint16_t bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod) +{ + struct ecore_hwfn *p_hwfn; + int rc, hwfn_index; + + hwfn_index = rss_id % edev->num_hwfns; + p_hwfn = &edev->hwfns[hwfn_index]; + + rc = ecore_sp_eth_rx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + rx_queue_id / edev->num_hwfns, + vport_id, + vport_id, + sb, + sb_index, + bd_max_bytes, + bd_chain_phys_addr, + cqe_pbl_addr, cqe_pbl_size, pp_prod); + + if (rc) { + DP_ERR(edev, "Failed to start RXQ#%d\n", rx_queue_id); + return rc; + } + + DP_VERBOSE(edev, ECORE_MSG_SPQ, + "Started RX-Q %d [rss %d] on V-PORT %d and SB %d\n", + rx_queue_id, rss_id, vport_id, sb); + + return 0; +} + +static int +qed_stop_rxq(struct ecore_dev *edev, struct qed_stop_rxq_params *params) +{ + int rc, hwfn_index; + struct ecore_hwfn *p_hwfn; + + hwfn_index = params->rss_id % edev->num_hwfns; + p_hwfn = &edev->hwfns[hwfn_index]; + + rc = ecore_sp_eth_rx_queue_stop(p_hwfn, + params->rx_queue_id / edev->num_hwfns, + params->eq_completion_only, false); + if (rc) { + DP_ERR(edev, "Failed to stop RXQ#%d\n", params->rx_queue_id); + return rc; + } + + return 0; +} + +static int +qed_start_txq(struct ecore_dev *edev, + uint8_t rss_id, uint16_t tx_queue_id, + uint8_t vport_id, uint16_t sb, + uint8_t sb_index, + dma_addr_t pbl_addr, + uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell) +{ + struct ecore_hwfn *p_hwfn; + int rc, hwfn_index; + + hwfn_index = rss_id % edev->num_hwfns; + p_hwfn = &edev->hwfns[hwfn_index]; + + rc = ecore_sp_eth_tx_queue_start(p_hwfn, + p_hwfn->hw_info.opaque_fid, + tx_queue_id / edev->num_hwfns, + vport_id, + vport_id, + sb, + sb_index, + pbl_addr, pbl_size, pp_doorbell); + + if (rc) { + DP_ERR(edev, "Failed to start TXQ#%d\n", tx_queue_id); + return rc; + } + + DP_VERBOSE(edev, ECORE_MSG_SPQ, + "Started TX-Q %d [rss %d] on V-PORT %d and SB %d\n", + tx_queue_id, rss_id, vport_id, sb); + + return 0; +} + +static int +qed_stop_txq(struct ecore_dev *edev, struct qed_stop_txq_params *params) +{ + struct ecore_hwfn *p_hwfn; + int rc, hwfn_index; + + hwfn_index = params->rss_id % edev->num_hwfns; + p_hwfn = &edev->hwfns[hwfn_index]; + + rc = ecore_sp_eth_tx_queue_stop(p_hwfn, + params->tx_queue_id / edev->num_hwfns); + if (rc) { + DP_ERR(edev, "Failed to stop TXQ#%d\n", params->tx_queue_id); + return rc; + } + + return 0; +} + +static int +qed_fp_cqe_completion(struct ecore_dev *edev, + uint8_t rss_id, struct eth_slow_path_rx_cqe *cqe) +{ + return ecore_eth_cqe_completion(&edev->hwfns[rss_id % edev->num_hwfns], + cqe); +} + +static int qed_fastpath_stop(struct ecore_dev *edev) +{ + ecore_hw_stop_fastpath(edev); + + return 0; +} + +static void +qed_get_vport_stats(struct ecore_dev *edev, struct ecore_eth_stats *stats) +{ + ecore_get_vport_stats(edev, stats); +} + +static int +qed_configure_filter_ucast(struct ecore_dev *edev, + struct qed_filter_ucast_params *params) +{ + struct ecore_filter_ucast ucast; + + if (!params->vlan_valid && !params->mac_valid) { + DP_NOTICE(edev, true, + "Tried configuring a unicast filter," + "but both MAC and VLAN are not set\n"); + return -EINVAL; + } + + memset(&ucast, 0, sizeof(ucast)); + switch (params->type) { + case QED_FILTER_XCAST_TYPE_ADD: + ucast.opcode = ECORE_FILTER_ADD; + break; + case QED_FILTER_XCAST_TYPE_DEL: + ucast.opcode = ECORE_FILTER_REMOVE; + break; + case QED_FILTER_XCAST_TYPE_REPLACE: + ucast.opcode = ECORE_FILTER_REPLACE; + break; + default: + DP_NOTICE(edev, true, "Unknown unicast filter type %d\n", + params->type); + } + + if (params->vlan_valid && params->mac_valid) { + ucast.type = ECORE_FILTER_MAC_VLAN; + ether_addr_copy((struct ether_addr *)¶ms->mac, + (struct ether_addr *)&ucast.mac); + ucast.vlan = params->vlan; + } else if (params->mac_valid) { + ucast.type = ECORE_FILTER_MAC; + ether_addr_copy((struct ether_addr *)¶ms->mac, + (struct ether_addr *)&ucast.mac); + } else { + ucast.type = ECORE_FILTER_VLAN; + ucast.vlan = params->vlan; + } + + ucast.is_rx_filter = true; + ucast.is_tx_filter = true; + + return ecore_filter_ucast_cmd(edev, &ucast, ECORE_SPQ_MODE_CB, NULL); +} + +static int +qed_configure_filter_mcast(struct ecore_dev *edev, + struct qed_filter_mcast_params *params) +{ + struct ecore_filter_mcast mcast; + int i; + + memset(&mcast, 0, sizeof(mcast)); + switch (params->type) { + case QED_FILTER_XCAST_TYPE_ADD: + mcast.opcode = ECORE_FILTER_ADD; + break; + case QED_FILTER_XCAST_TYPE_DEL: + mcast.opcode = ECORE_FILTER_REMOVE; + break; + default: + DP_NOTICE(edev, true, "Unknown multicast filter type %d\n", + params->type); + } + + mcast.num_mc_addrs = params->num; + for (i = 0; i < mcast.num_mc_addrs; i++) + ether_addr_copy((struct ether_addr *)¶ms->mac[i], + (struct ether_addr *)&mcast.mac[i]); + + return ecore_filter_mcast_cmd(edev, &mcast, ECORE_SPQ_MODE_CB, NULL); +} + +int qed_configure_filter_rx_mode(struct ecore_dev *edev, + enum qed_filter_rx_mode_type type) +{ + struct ecore_filter_accept_flags flags; + + memset(&flags, 0, sizeof(flags)); + + flags.update_rx_mode_config = 1; + flags.update_tx_mode_config = 1; + flags.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | + ECORE_ACCEPT_MCAST_MATCHED | + ECORE_ACCEPT_BCAST; + + flags.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | + ECORE_ACCEPT_MCAST_MATCHED | + ECORE_ACCEPT_BCAST; + + if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) { + flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; + if (IS_VF(edev)) { + flags.tx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED; + DP_INFO(edev, "Enabling Tx unmatched flag for VF\n"); + } + } else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) { + flags.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED; + } else if (type == (QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC | + QED_FILTER_RX_MODE_TYPE_PROMISC)) { + flags.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | + ECORE_ACCEPT_MCAST_UNMATCHED; + } + + return ecore_filter_accept_cmd(edev, 0, flags, false, false, + ECORE_SPQ_MODE_CB, NULL); +} + +static int +qed_configure_filter(struct ecore_dev *edev, struct qed_filter_params *params) +{ + switch (params->type) { + case QED_FILTER_TYPE_UCAST: + return qed_configure_filter_ucast(edev, ¶ms->filter.ucast); + case QED_FILTER_TYPE_MCAST: + return qed_configure_filter_mcast(edev, ¶ms->filter.mcast); + case QED_FILTER_TYPE_RX_MODE: + return qed_configure_filter_rx_mode(edev, + params->filter. + accept_flags); + default: + DP_NOTICE(edev, true, "Unknown filter type %d\n", + (int)params->type); + return -EINVAL; + } +} + +static const struct qed_eth_ops qed_eth_ops_pass = { + INIT_STRUCT_FIELD(common, &qed_common_ops_pass), + INIT_STRUCT_FIELD(fill_dev_info, &qed_fill_eth_dev_info), + INIT_STRUCT_FIELD(vport_start, &qed_start_vport), + INIT_STRUCT_FIELD(vport_stop, &qed_stop_vport), + INIT_STRUCT_FIELD(vport_update, &qed_update_vport), + INIT_STRUCT_FIELD(q_rx_start, &qed_start_rxq), + INIT_STRUCT_FIELD(q_tx_start, &qed_start_txq), + INIT_STRUCT_FIELD(q_rx_stop, &qed_stop_rxq), + INIT_STRUCT_FIELD(q_tx_stop, &qed_stop_txq), + INIT_STRUCT_FIELD(eth_cqe_completion, &qed_fp_cqe_completion), + INIT_STRUCT_FIELD(fastpath_stop, &qed_fastpath_stop), + INIT_STRUCT_FIELD(get_vport_stats, &qed_get_vport_stats), + INIT_STRUCT_FIELD(filter_config, &qed_configure_filter), +}; + +uint32_t qed_get_protocol_version(enum qed_protocol protocol) +{ + switch (protocol) { + case QED_PROTOCOL_ETH: + return QED_ETH_INTERFACE_VERSION; + default: + return 0; + } +} + +const struct qed_eth_ops *qed_get_eth_ops(void) +{ + return &qed_eth_ops_pass; +} diff --git a/drivers/net/qede/qede_eth_if.h b/drivers/net/qede/qede_eth_if.h new file mode 100644 index 00000000..26968eb0 --- /dev/null +++ b/drivers/net/qede/qede_eth_if.h @@ -0,0 +1,177 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef _QEDE_ETH_IF_H +#define _QEDE_ETH_IF_H + +#include "qede_if.h" + +/*forward decl */ +struct eth_slow_path_rx_cqe; + +#define INIT_STRUCT_FIELD(field, value) .field = value + +#define QED_ETH_INTERFACE_VERSION 609 + +#define QEDE_MAX_MCAST_FILTERS 64 + +enum qed_filter_rx_mode_type { + QED_FILTER_RX_MODE_TYPE_REGULAR, + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, + QED_FILTER_RX_MODE_TYPE_PROMISC, +}; + +enum qed_filter_xcast_params_type { + QED_FILTER_XCAST_TYPE_ADD, + QED_FILTER_XCAST_TYPE_DEL, + QED_FILTER_XCAST_TYPE_REPLACE, +}; + +enum qed_filter_type { + QED_FILTER_TYPE_UCAST, + QED_FILTER_TYPE_MCAST, + QED_FILTER_TYPE_RX_MODE, + QED_MAX_FILTER_TYPES, +}; + +struct qed_dev_eth_info { + struct qed_dev_info common; + + uint8_t num_queues; + uint8_t num_tc; + + struct ether_addr port_mac; + uint8_t num_vlan_filters; + uint32_t num_mac_addrs; +}; + +struct qed_update_vport_rss_params { + uint16_t rss_ind_table[128]; + uint32_t rss_key[10]; + u8 rss_caps; +}; + +struct qed_stop_rxq_params { + uint8_t rss_id; + uint8_t rx_queue_id; + uint8_t vport_id; + bool eq_completion_only; +}; + +struct qed_update_vport_params { + uint8_t vport_id; + uint8_t update_vport_active_flg; + uint8_t vport_active_flg; + uint8_t update_inner_vlan_removal_flg; + uint8_t inner_vlan_removal_flg; + uint8_t update_tx_switching_flg; + uint8_t tx_switching_flg; + uint8_t update_accept_any_vlan_flg; + uint8_t accept_any_vlan; + uint8_t update_rss_flg; + struct qed_update_vport_rss_params rss_params; +}; + +struct qed_start_vport_params { + bool remove_inner_vlan; + bool handle_ptp_pkts; + bool gro_enable; + bool drop_ttl0; + uint8_t vport_id; + uint16_t mtu; + bool clear_stats; +}; + +struct qed_stop_txq_params { + uint8_t rss_id; + uint8_t tx_queue_id; +}; + +struct qed_filter_ucast_params { + enum qed_filter_xcast_params_type type; + uint8_t vlan_valid; + uint16_t vlan; + uint8_t mac_valid; + unsigned char mac[ETHER_ADDR_LEN]; +}; + +struct qed_filter_mcast_params { + enum qed_filter_xcast_params_type type; + uint8_t num; + unsigned char mac[QEDE_MAX_MCAST_FILTERS][ETHER_ADDR_LEN]; +}; + +union qed_filter_type_params { + enum qed_filter_rx_mode_type accept_flags; + struct qed_filter_ucast_params ucast; + struct qed_filter_mcast_params mcast; +}; + +struct qed_filter_params { + enum qed_filter_type type; + union qed_filter_type_params filter; +}; + +struct qed_eth_ops { + const struct qed_common_ops *common; + + int (*fill_dev_info)(struct ecore_dev *edev, + struct qed_dev_eth_info *info); + + int (*vport_start)(struct ecore_dev *edev, + struct qed_start_vport_params *params); + + int (*vport_stop)(struct ecore_dev *edev, uint8_t vport_id); + + int (*vport_update)(struct ecore_dev *edev, + struct qed_update_vport_params *params); + + int (*q_rx_start)(struct ecore_dev *cdev, + uint8_t rss_id, uint8_t rx_queue_id, + uint8_t vport_id, uint16_t sb, + uint8_t sb_index, uint16_t bd_max_bytes, + dma_addr_t bd_chain_phys_addr, + dma_addr_t cqe_pbl_addr, + uint16_t cqe_pbl_size, void OSAL_IOMEM * *pp_prod); + + int (*q_rx_stop)(struct ecore_dev *edev, + struct qed_stop_rxq_params *params); + + int (*q_tx_start)(struct ecore_dev *edev, + uint8_t rss_id, uint16_t tx_queue_id, + uint8_t vport_id, uint16_t sb, + uint8_t sb_index, + dma_addr_t pbl_addr, + uint16_t pbl_size, void OSAL_IOMEM * *pp_doorbell); + + int (*q_tx_stop)(struct ecore_dev *edev, + struct qed_stop_txq_params *params); + + int (*eth_cqe_completion)(struct ecore_dev *edev, + uint8_t rss_id, + struct eth_slow_path_rx_cqe *cqe); + + int (*fastpath_stop)(struct ecore_dev *edev); + + void (*get_vport_stats)(struct ecore_dev *edev, + struct ecore_eth_stats *stats); + + int (*filter_config)(struct ecore_dev *edev, + struct qed_filter_params *params); +}; + +/* externs */ + +extern const struct qed_common_ops qed_common_ops_pass; + +const struct qed_eth_ops *qed_get_eth_ops(); + +int qed_configure_filter_rx_mode(struct ecore_dev *edev, + enum qed_filter_rx_mode_type type); + +#endif /* _QEDE_ETH_IF_H */ diff --git a/drivers/net/qede/qede_ethdev.c b/drivers/net/qede/qede_ethdev.c new file mode 100644 index 00000000..bb531be5 --- /dev/null +++ b/drivers/net/qede/qede_ethdev.c @@ -0,0 +1,1344 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "qede_ethdev.h" +#include <rte_alarm.h> + +/* Globals */ +static const struct qed_eth_ops *qed_ops; +static const char *drivername = "qede pmd"; +static int64_t timer_period = 1; + +static void qede_interrupt_action(struct ecore_hwfn *p_hwfn) +{ + ecore_int_sp_dpc((osal_int_ptr_t)(p_hwfn)); +} + +static void +qede_interrupt_handler(__rte_unused struct rte_intr_handle *handle, void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + qede_interrupt_action(ECORE_LEADING_HWFN(edev)); + if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) + DP_ERR(edev, "rte_intr_enable failed\n"); +} + +static void +qede_alloc_etherdev(struct qede_dev *qdev, struct qed_dev_eth_info *info) +{ + rte_memcpy(&qdev->dev_info, info, sizeof(*info)); + qdev->num_tc = qdev->dev_info.num_tc; + qdev->ops = qed_ops; +} + +static void qede_print_adapter_info(struct qede_dev *qdev) +{ + struct ecore_dev *edev = &qdev->edev; + struct qed_dev_info *info = &qdev->dev_info.common; + static char ver_str[QED_DRV_VER_STR_SIZE]; + + DP_INFO(edev, "*********************************\n"); + DP_INFO(edev, " Chip details : %s%d\n", + ECORE_IS_BB(edev) ? "BB" : "AH", + CHIP_REV_IS_A0(edev) ? 0 : 1); + + sprintf(ver_str, "%s %s_%d.%d.%d.%d", QEDE_PMD_VER_PREFIX, + edev->ver_str, QEDE_PMD_VERSION_MAJOR, QEDE_PMD_VERSION_MINOR, + QEDE_PMD_VERSION_REVISION, QEDE_PMD_VERSION_PATCH); + strcpy(qdev->drv_ver, ver_str); + DP_INFO(edev, " Driver version : %s\n", ver_str); + + sprintf(ver_str, "%d.%d.%d.%d", info->fw_major, info->fw_minor, + info->fw_rev, info->fw_eng); + DP_INFO(edev, " Firmware version : %s\n", ver_str); + + sprintf(ver_str, "%d.%d.%d.%d", + (info->mfw_rev >> 24) & 0xff, + (info->mfw_rev >> 16) & 0xff, + (info->mfw_rev >> 8) & 0xff, (info->mfw_rev) & 0xff); + DP_INFO(edev, " Management firmware version : %s\n", ver_str); + + DP_INFO(edev, " Firmware file : %s\n", fw_file); + + DP_INFO(edev, "*********************************\n"); +} + +static int +qede_set_ucast_rx_mac(struct qede_dev *qdev, + enum qed_filter_xcast_params_type opcode, + uint8_t mac[ETHER_ADDR_LEN]) +{ + struct ecore_dev *edev = &qdev->edev; + struct qed_filter_params filter_cmd; + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.mac_valid = 1; + rte_memcpy(&filter_cmd.filter.ucast.mac[0], &mac[0], ETHER_ADDR_LEN); + return qdev->ops->filter_config(edev, &filter_cmd); +} + +static void +qede_mac_addr_add(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr, + uint32_t index, __rte_unused uint32_t pool) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + int rc; + + PMD_INIT_FUNC_TRACE(edev); + + if (index >= qdev->dev_info.num_mac_addrs) { + DP_ERR(edev, "Index %u is above MAC filter limit %u\n", + index, qdev->dev_info.num_mac_addrs); + return; + } + + /* Adding macaddr even though promiscuous mode is set */ + if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) + DP_INFO(edev, "Port is in promisc mode, yet adding it\n"); + + /* Add MAC filters according to the unicast secondary macs */ + rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD, + mac_addr->addr_bytes); + if (rc) + DP_ERR(edev, "Unable to add macaddr rc=%d\n", rc); +} + +static void +qede_mac_addr_remove(struct rte_eth_dev *eth_dev, uint32_t index) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct ether_addr mac_addr; + int rc; + + PMD_INIT_FUNC_TRACE(edev); + + if (index >= qdev->dev_info.num_mac_addrs) { + DP_ERR(edev, "Index %u is above MAC filter limit %u\n", + index, qdev->dev_info.num_mac_addrs); + return; + } + + /* Use the index maintained by rte */ + ether_addr_copy(ð_dev->data->mac_addrs[index], &mac_addr); + rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL, + mac_addr.addr_bytes); + if (rc) + DP_ERR(edev, "Unable to remove macaddr rc=%d\n", rc); +} + +static void +qede_mac_addr_set(struct rte_eth_dev *eth_dev, struct ether_addr *mac_addr) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int rc; + + if (IS_VF(edev) && !ecore_vf_check_mac(ECORE_LEADING_HWFN(edev), + mac_addr->addr_bytes)) { + DP_ERR(edev, "Setting MAC address is not allowed\n"); + ether_addr_copy(&qdev->primary_mac, + ð_dev->data->mac_addrs[0]); + return; + } + + /* First remove the primary mac */ + rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_DEL, + qdev->primary_mac.addr_bytes); + + if (rc) { + DP_ERR(edev, "Unable to remove current macaddr" + " Reverting to previous default mac\n"); + ether_addr_copy(&qdev->primary_mac, + ð_dev->data->mac_addrs[0]); + return; + } + + /* Add new MAC */ + rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD, + mac_addr->addr_bytes); + + if (rc) + DP_ERR(edev, "Unable to add new default mac\n"); + else + ether_addr_copy(mac_addr, &qdev->primary_mac); +} + + + + +static void qede_config_accept_any_vlan(struct qede_dev *qdev, bool action) +{ + struct ecore_dev *edev = &qdev->edev; + struct qed_update_vport_params params = { + .vport_id = 0, + .accept_any_vlan = action, + .update_accept_any_vlan_flg = 1, + }; + int rc; + + /* Proceed only if action actually needs to be performed */ + if (qdev->accept_any_vlan == action) + return; + + rc = qdev->ops->vport_update(edev, ¶ms); + if (rc) { + DP_ERR(edev, "Failed to %s accept-any-vlan\n", + action ? "enable" : "disable"); + } else { + DP_INFO(edev, "%s accept-any-vlan\n", + action ? "enabled" : "disabled"); + qdev->accept_any_vlan = action; + } +} + +void qede_config_rx_mode(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + /* TODO: - QED_FILTER_TYPE_UCAST */ + enum qed_filter_rx_mode_type accept_flags = + QED_FILTER_RX_MODE_TYPE_REGULAR; + struct qed_filter_params rx_mode; + int rc; + + /* Configure the struct for the Rx mode */ + memset(&rx_mode, 0, sizeof(struct qed_filter_params)); + rx_mode.type = QED_FILTER_TYPE_RX_MODE; + + rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_REPLACE, + eth_dev->data->mac_addrs[0].addr_bytes); + if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) { + accept_flags = QED_FILTER_RX_MODE_TYPE_PROMISC; + } else { + rc = qede_set_ucast_rx_mac(qdev, QED_FILTER_XCAST_TYPE_ADD, + eth_dev->data-> + mac_addrs[0].addr_bytes); + if (rc) { + DP_ERR(edev, "Unable to add filter\n"); + return; + } + } + + /* take care of VLAN mode */ + if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) { + qede_config_accept_any_vlan(qdev, true); + } else if (!qdev->non_configured_vlans) { + /* If we dont have non-configured VLANs and promisc + * is not set, then check if we need to disable + * accept_any_vlan mode. + * Because in this case, accept_any_vlan mode is set + * as part of IFF_RPOMISC flag handling. + */ + qede_config_accept_any_vlan(qdev, false); + } + rx_mode.filter.accept_flags = accept_flags; + rc = qdev->ops->filter_config(edev, &rx_mode); + if (rc) + DP_ERR(edev, "Filter config failed rc=%d\n", rc); +} + +static int qede_vlan_stripping(struct rte_eth_dev *eth_dev, bool set_stripping) +{ + struct qed_update_vport_params vport_update_params; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int rc; + + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.vport_id = 0; + vport_update_params.update_inner_vlan_removal_flg = 1; + vport_update_params.inner_vlan_removal_flg = set_stripping; + rc = qdev->ops->vport_update(edev, &vport_update_params); + if (rc) { + DP_ERR(edev, "Update V-PORT failed %d\n", rc); + return rc; + } + + return 0; +} + +static void qede_vlan_offload_set(struct rte_eth_dev *eth_dev, int mask) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + if (mask & ETH_VLAN_STRIP_MASK) { + if (eth_dev->data->dev_conf.rxmode.hw_vlan_strip) + (void)qede_vlan_stripping(eth_dev, 1); + else + (void)qede_vlan_stripping(eth_dev, 0); + } + + DP_INFO(edev, "vlan offload mask %d vlan-strip %d\n", + mask, eth_dev->data->dev_conf.rxmode.hw_vlan_strip); +} + +static int qede_set_ucast_rx_vlan(struct qede_dev *qdev, + enum qed_filter_xcast_params_type opcode, + uint16_t vid) +{ + struct qed_filter_params filter_cmd; + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + + memset(&filter_cmd, 0, sizeof(filter_cmd)); + filter_cmd.type = QED_FILTER_TYPE_UCAST; + filter_cmd.filter.ucast.type = opcode; + filter_cmd.filter.ucast.vlan_valid = 1; + filter_cmd.filter.ucast.vlan = vid; + + return qdev->ops->filter_config(edev, &filter_cmd); +} + +static int qede_vlan_filter_set(struct rte_eth_dev *eth_dev, + uint16_t vlan_id, int on) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qed_dev_eth_info *dev_info = &qdev->dev_info; + int rc; + + if (vlan_id != 0 && + qdev->configured_vlans == dev_info->num_vlan_filters) { + DP_NOTICE(edev, false, "Reached max VLAN filter limit" + " enabling accept_any_vlan\n"); + qede_config_accept_any_vlan(qdev, true); + return 0; + } + + if (on) { + rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_ADD, + vlan_id); + if (rc) + DP_ERR(edev, "Failed to add VLAN %u rc %d\n", vlan_id, + rc); + else + if (vlan_id != 0) + qdev->configured_vlans++; + } else { + rc = qede_set_ucast_rx_vlan(qdev, QED_FILTER_XCAST_TYPE_DEL, + vlan_id); + if (rc) + DP_ERR(edev, "Failed to delete VLAN %u rc %d\n", + vlan_id, rc); + else + if (vlan_id != 0) + qdev->configured_vlans--; + } + + DP_INFO(edev, "vlan_id %u on %u rc %d configured_vlans %u\n", + vlan_id, on, rc, qdev->configured_vlans); + + return rc; +} + +static int qede_dev_configure(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct rte_eth_rxmode *rxmode = ð_dev->data->dev_conf.rxmode; + + PMD_INIT_FUNC_TRACE(edev); + + if (eth_dev->data->nb_rx_queues != eth_dev->data->nb_tx_queues) { + DP_NOTICE(edev, false, + "Unequal number of rx/tx queues " + "is not supported RX=%u TX=%u\n", + eth_dev->data->nb_rx_queues, + eth_dev->data->nb_tx_queues); + return -EINVAL; + } + + /* Check requirements for 100G mode */ + if (edev->num_hwfns > 1) { + if (eth_dev->data->nb_rx_queues < 2) { + DP_NOTICE(edev, false, + "100G mode requires minimum two queues\n"); + return -EINVAL; + } + + if ((eth_dev->data->nb_rx_queues % 2) != 0) { + DP_NOTICE(edev, false, + "100G mode requires even number of queues\n"); + return -EINVAL; + } + } + + qdev->num_rss = eth_dev->data->nb_rx_queues; + + /* Initial state */ + qdev->state = QEDE_CLOSE; + + /* Sanity checks and throw warnings */ + + if (rxmode->enable_scatter == 1) { + DP_ERR(edev, "RX scatter packets is not supported\n"); + return -EINVAL; + } + + if (rxmode->enable_lro == 1) { + DP_INFO(edev, "LRO is not supported\n"); + return -EINVAL; + } + + if (!rxmode->hw_strip_crc) + DP_INFO(edev, "L2 CRC stripping is always enabled in hw\n"); + + if (!rxmode->hw_ip_checksum) + DP_INFO(edev, "IP/UDP/TCP checksum offload is always enabled " + "in hw\n"); + + + DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", + QEDE_RSS_CNT(qdev), qdev->num_tc); + + DP_INFO(edev, "my_id %u rel_pf_id %u abs_pf_id %u" + " port %u first_on_engine %d\n", + edev->hwfns[0].my_id, + edev->hwfns[0].rel_pf_id, + edev->hwfns[0].abs_pf_id, + edev->hwfns[0].port_id, edev->hwfns[0].first_on_engine); + + return 0; +} + +/* Info about HW descriptor ring limitations */ +static const struct rte_eth_desc_lim qede_rx_desc_lim = { + .nb_max = NUM_RX_BDS_MAX, + .nb_min = 128, + .nb_align = 128 /* lowest common multiple */ +}; + +static const struct rte_eth_desc_lim qede_tx_desc_lim = { + .nb_max = NUM_TX_BDS_MAX, + .nb_min = 256, + .nb_align = 256 +}; + +static void +qede_dev_info_get(struct rte_eth_dev *eth_dev, + struct rte_eth_dev_info *dev_info) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + PMD_INIT_FUNC_TRACE(edev); + + dev_info->min_rx_bufsize = (uint32_t)(ETHER_MIN_MTU + + QEDE_ETH_OVERHEAD); + dev_info->max_rx_pktlen = (uint32_t)ETH_TX_MAX_NON_LSO_PKT_LEN; + dev_info->rx_desc_lim = qede_rx_desc_lim; + dev_info->tx_desc_lim = qede_tx_desc_lim; + dev_info->max_rx_queues = (uint16_t)QEDE_MAX_RSS_CNT(qdev); + dev_info->max_tx_queues = dev_info->max_rx_queues; + dev_info->max_mac_addrs = qdev->dev_info.num_mac_addrs; + if (IS_VF(edev)) + dev_info->max_vfs = 0; + else + dev_info->max_vfs = (uint16_t)NUM_OF_VFS(&qdev->edev); + dev_info->driver_name = qdev->drv_ver; + dev_info->reta_size = ECORE_RSS_IND_TABLE_SIZE; + dev_info->flow_type_rss_offloads = (uint64_t)QEDE_RSS_OFFLOAD_ALL; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .txq_flags = QEDE_TXQ_FLAGS, + }; + + dev_info->rx_offload_capa = (DEV_RX_OFFLOAD_VLAN_STRIP | + DEV_RX_OFFLOAD_IPV4_CKSUM | + DEV_RX_OFFLOAD_UDP_CKSUM | + DEV_RX_OFFLOAD_TCP_CKSUM); + dev_info->tx_offload_capa = (DEV_TX_OFFLOAD_VLAN_INSERT | + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM); + + dev_info->speed_capa = ETH_LINK_SPEED_25G | ETH_LINK_SPEED_40G; +} + +/* return 0 means link status changed, -1 means not changed */ +static int +qede_link_update(struct rte_eth_dev *eth_dev, __rte_unused int wait_to_complete) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + uint16_t link_duplex; + struct qed_link_output link; + struct rte_eth_link *curr = ð_dev->data->dev_link; + + memset(&link, 0, sizeof(struct qed_link_output)); + qdev->ops->common->get_link(edev, &link); + + /* Link Speed */ + curr->link_speed = link.speed; + + /* Link Mode */ + switch (link.duplex) { + case QEDE_DUPLEX_HALF: + link_duplex = ETH_LINK_HALF_DUPLEX; + break; + case QEDE_DUPLEX_FULL: + link_duplex = ETH_LINK_FULL_DUPLEX; + break; + case QEDE_DUPLEX_UNKNOWN: + default: + link_duplex = -1; + } + curr->link_duplex = link_duplex; + + /* Link Status */ + curr->link_status = (link.link_up) ? ETH_LINK_UP : ETH_LINK_DOWN; + + /* AN */ + curr->link_autoneg = (link.supported_caps & QEDE_SUPPORTED_AUTONEG) ? + ETH_LINK_AUTONEG : ETH_LINK_FIXED; + + DP_INFO(edev, "Link - Speed %u Mode %u AN %u Status %u\n", + curr->link_speed, curr->link_duplex, + curr->link_autoneg, curr->link_status); + + /* return 0 means link status changed, -1 means not changed */ + return ((curr->link_status == link.link_up) ? -1 : 0); +} + +static void +qede_rx_mode_setting(struct rte_eth_dev *eth_dev, + enum qed_filter_rx_mode_type accept_flags) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct qed_filter_params rx_mode; + + DP_INFO(edev, "%s mode %u\n", __func__, accept_flags); + + memset(&rx_mode, 0, sizeof(struct qed_filter_params)); + rx_mode.type = QED_FILTER_TYPE_RX_MODE; + rx_mode.filter.accept_flags = accept_flags; + qdev->ops->filter_config(edev, &rx_mode); +} + +static void qede_promiscuous_enable(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + PMD_INIT_FUNC_TRACE(edev); + + enum qed_filter_rx_mode_type type = QED_FILTER_RX_MODE_TYPE_PROMISC; + + if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) + type |= QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; + + qede_rx_mode_setting(eth_dev, type); +} + +static void qede_promiscuous_disable(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + PMD_INIT_FUNC_TRACE(edev); + + if (rte_eth_allmulticast_get(eth_dev->data->port_id) == 1) + qede_rx_mode_setting(eth_dev, + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC); + else + qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); +} + +static void qede_poll_sp_sb_cb(void *param) +{ + struct rte_eth_dev *eth_dev = (struct rte_eth_dev *)param; + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + int rc; + + qede_interrupt_action(ECORE_LEADING_HWFN(edev)); + qede_interrupt_action(&edev->hwfns[1]); + + rc = rte_eal_alarm_set(timer_period * US_PER_S, + qede_poll_sp_sb_cb, + (void *)eth_dev); + if (rc != 0) { + DP_ERR(edev, "Unable to start periodic" + " timer rc %d\n", rc); + assert(false && "Unable to start periodic timer"); + } +} + +static void qede_dev_close(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + PMD_INIT_FUNC_TRACE(edev); + + /* dev_stop() shall cleanup fp resources in hw but without releasing + * dma memories and sw structures so that dev_start() can be called + * by the app without reconfiguration. However, in dev_close() we + * can release all the resources and device can be brought up newly + */ + if (qdev->state != QEDE_STOP) + qede_dev_stop(eth_dev); + else + DP_INFO(edev, "Device is already stopped\n"); + + qede_free_mem_load(qdev); + + qede_free_fp_arrays(qdev); + + qede_dev_set_link_state(eth_dev, false); + + qdev->ops->common->slowpath_stop(edev); + + qdev->ops->common->remove(edev); + + rte_intr_disable(ð_dev->pci_dev->intr_handle); + + rte_intr_callback_unregister(ð_dev->pci_dev->intr_handle, + qede_interrupt_handler, (void *)eth_dev); + + if (edev->num_hwfns > 1) + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, (void *)eth_dev); + + qdev->state = QEDE_CLOSE; +} + +static void +qede_get_stats(struct rte_eth_dev *eth_dev, struct rte_eth_stats *eth_stats) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct ecore_eth_stats stats; + + qdev->ops->get_vport_stats(edev, &stats); + + /* RX Stats */ + eth_stats->ipackets = stats.rx_ucast_pkts + + stats.rx_mcast_pkts + stats.rx_bcast_pkts; + + eth_stats->ibytes = stats.rx_ucast_bytes + + stats.rx_mcast_bytes + stats.rx_bcast_bytes; + + eth_stats->ierrors = stats.rx_crc_errors + + stats.rx_align_errors + + stats.rx_carrier_errors + + stats.rx_oversize_packets + + stats.rx_jabbers + stats.rx_undersize_packets; + + eth_stats->rx_nombuf = stats.no_buff_discards; + + eth_stats->imissed = stats.mftag_filter_discards + + stats.mac_filter_discards + + stats.no_buff_discards + stats.brb_truncates + stats.brb_discards; + + /* TX stats */ + eth_stats->opackets = stats.tx_ucast_pkts + + stats.tx_mcast_pkts + stats.tx_bcast_pkts; + + eth_stats->obytes = stats.tx_ucast_bytes + + stats.tx_mcast_bytes + stats.tx_bcast_bytes; + + eth_stats->oerrors = stats.tx_err_drop_pkts; + + DP_INFO(edev, + "no_buff_discards=%" PRIu64 "" + " mac_filter_discards=%" PRIu64 "" + " brb_truncates=%" PRIu64 "" + " brb_discards=%" PRIu64 "\n", + stats.no_buff_discards, + stats.mac_filter_discards, + stats.brb_truncates, stats.brb_discards); +} + +int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qed_link_params link_params; + int rc; + + DP_INFO(edev, "setting link state %d\n", link_up); + memset(&link_params, 0, sizeof(link_params)); + link_params.link_up = link_up; + rc = qdev->ops->common->set_link(edev, &link_params); + if (rc != ECORE_SUCCESS) + DP_ERR(edev, "Unable to set link state %d\n", link_up); + + return rc; +} + +static int qede_dev_set_link_up(struct rte_eth_dev *eth_dev) +{ + return qede_dev_set_link_state(eth_dev, true); +} + +static int qede_dev_set_link_down(struct rte_eth_dev *eth_dev) +{ + return qede_dev_set_link_state(eth_dev, false); +} + +static void qede_reset_stats(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + + ecore_reset_vport_stats(edev); +} + +static void qede_allmulticast_enable(struct rte_eth_dev *eth_dev) +{ + enum qed_filter_rx_mode_type type = + QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC; + + if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) + type |= QED_FILTER_RX_MODE_TYPE_PROMISC; + + qede_rx_mode_setting(eth_dev, type); +} + +static void qede_allmulticast_disable(struct rte_eth_dev *eth_dev) +{ + if (rte_eth_promiscuous_get(eth_dev->data->port_id) == 1) + qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_PROMISC); + else + qede_rx_mode_setting(eth_dev, QED_FILTER_RX_MODE_TYPE_REGULAR); +} + +static int qede_flow_ctrl_set(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qed_link_output current_link; + struct qed_link_params params; + + memset(¤t_link, 0, sizeof(current_link)); + qdev->ops->common->get_link(edev, ¤t_link); + + memset(¶ms, 0, sizeof(params)); + params.override_flags |= QED_LINK_OVERRIDE_PAUSE_CONFIG; + if (fc_conf->autoneg) { + if (!(current_link.supported_caps & QEDE_SUPPORTED_AUTONEG)) { + DP_ERR(edev, "Autoneg not supported\n"); + return -EINVAL; + } + params.pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; + } + + /* Pause is assumed to be supported (SUPPORTED_Pause) */ + if (fc_conf->mode == RTE_FC_FULL) + params.pause_config |= (QED_LINK_PAUSE_TX_ENABLE | + QED_LINK_PAUSE_RX_ENABLE); + if (fc_conf->mode == RTE_FC_TX_PAUSE) + params.pause_config |= QED_LINK_PAUSE_TX_ENABLE; + if (fc_conf->mode == RTE_FC_RX_PAUSE) + params.pause_config |= QED_LINK_PAUSE_RX_ENABLE; + + params.link_up = true; + (void)qdev->ops->common->set_link(edev, ¶ms); + + return 0; +} + +static int qede_flow_ctrl_get(struct rte_eth_dev *eth_dev, + struct rte_eth_fc_conf *fc_conf) +{ + struct qede_dev *qdev = QEDE_INIT_QDEV(eth_dev); + struct ecore_dev *edev = QEDE_INIT_EDEV(qdev); + struct qed_link_output current_link; + + memset(¤t_link, 0, sizeof(current_link)); + qdev->ops->common->get_link(edev, ¤t_link); + + if (current_link.pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) + fc_conf->autoneg = true; + + if (current_link.pause_config & (QED_LINK_PAUSE_RX_ENABLE | + QED_LINK_PAUSE_TX_ENABLE)) + fc_conf->mode = RTE_FC_FULL; + else if (current_link.pause_config & QED_LINK_PAUSE_RX_ENABLE) + fc_conf->mode = RTE_FC_RX_PAUSE; + else if (current_link.pause_config & QED_LINK_PAUSE_TX_ENABLE) + fc_conf->mode = RTE_FC_TX_PAUSE; + else + fc_conf->mode = RTE_FC_NONE; + + return 0; +} + +static const uint32_t * +qede_dev_supported_ptypes_get(struct rte_eth_dev *eth_dev) +{ + static const uint32_t ptypes[] = { + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_UNKNOWN + }; + + if (eth_dev->rx_pkt_burst == qede_recv_pkts) + return ptypes; + + return NULL; +} + +int qede_rss_hash_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct qed_update_vport_params vport_update_params; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + uint8_t rss_caps; + uint32_t *key = (uint32_t *)rss_conf->rss_key; + uint64_t hf = rss_conf->rss_hf; + int i; + + if (hf == 0) + DP_ERR(edev, "hash function 0 will disable RSS\n"); + + rss_caps = 0; + rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; + rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; + rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; + rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; + rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; + rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; + + /* If the mapping doesn't fit any supported, return */ + if (rss_caps == 0 && hf != 0) + return -EINVAL; + + memset(&vport_update_params, 0, sizeof(vport_update_params)); + + if (key != NULL) + memcpy(qdev->rss_params.rss_key, rss_conf->rss_key, + rss_conf->rss_key_len); + + qdev->rss_params.rss_caps = rss_caps; + memcpy(&vport_update_params.rss_params, &qdev->rss_params, + sizeof(vport_update_params.rss_params)); + vport_update_params.update_rss_flg = 1; + vport_update_params.vport_id = 0; + + return qdev->ops->vport_update(edev, &vport_update_params); +} + +int qede_rss_hash_conf_get(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + uint64_t hf; + + if (rss_conf->rss_key_len < sizeof(qdev->rss_params.rss_key)) + return -EINVAL; + + if (rss_conf->rss_key) + memcpy(rss_conf->rss_key, qdev->rss_params.rss_key, + sizeof(qdev->rss_params.rss_key)); + + hf = 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4) ? + ETH_RSS_IPV4 : 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ? + ETH_RSS_IPV6 : 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6) ? + ETH_RSS_IPV6_EX : 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV4_TCP) ? + ETH_RSS_NONFRAG_IPV4_TCP : 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ? + ETH_RSS_NONFRAG_IPV6_TCP : 0; + hf |= (qdev->rss_params.rss_caps & ECORE_RSS_IPV6_TCP) ? + ETH_RSS_IPV6_TCP_EX : 0; + + rss_conf->rss_hf = hf; + + return 0; +} + +int qede_rss_reta_update(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct qed_update_vport_params vport_update_params; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + uint16_t i, idx, shift; + + if (reta_size > ETH_RSS_RETA_SIZE_128) { + DP_ERR(edev, "reta_size %d is not supported by hardware\n", + reta_size); + return -EINVAL; + } + + memset(&vport_update_params, 0, sizeof(vport_update_params)); + memcpy(&vport_update_params.rss_params, &qdev->rss_params, + sizeof(vport_update_params.rss_params)); + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) { + uint8_t entry = reta_conf[idx].reta[shift]; + qdev->rss_params.rss_ind_table[i] = entry; + } + } + + vport_update_params.update_rss_flg = 1; + vport_update_params.vport_id = 0; + + return qdev->ops->vport_update(edev, &vport_update_params); +} + +int qede_rss_reta_query(struct rte_eth_dev *eth_dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + uint16_t i, idx, shift; + + if (reta_size > ETH_RSS_RETA_SIZE_128) { + struct ecore_dev *edev = &qdev->edev; + DP_ERR(edev, "reta_size %d is not supported\n", + reta_size); + } + + for (i = 0; i < reta_size; i++) { + idx = i / RTE_RETA_GROUP_SIZE; + shift = i % RTE_RETA_GROUP_SIZE; + if (reta_conf[idx].mask & (1ULL << shift)) { + uint8_t entry = qdev->rss_params.rss_ind_table[i]; + reta_conf[idx].reta[shift] = entry; + } + } + + return 0; +} + +int qede_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + uint32_t frame_size; + struct qede_dev *qdev = dev->data->dev_private; + struct rte_eth_dev_info dev_info = {0}; + + qede_dev_info_get(dev, &dev_info); + + /* VLAN_TAG = 4 */ + frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN + 4; + + if ((mtu < ETHER_MIN_MTU) || (frame_size > dev_info.max_rx_pktlen)) + return -EINVAL; + + if (!dev->data->scattered_rx && + frame_size > dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM) + return -EINVAL; + + if (frame_size > ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.jumbo_frame = 1; + else + dev->data->dev_conf.rxmode.jumbo_frame = 0; + + /* update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = frame_size; + qdev->mtu = mtu; + qede_dev_stop(dev); + qede_dev_start(dev); + + return 0; +} + +static const struct eth_dev_ops qede_eth_dev_ops = { + .dev_configure = qede_dev_configure, + .dev_infos_get = qede_dev_info_get, + .rx_queue_setup = qede_rx_queue_setup, + .rx_queue_release = qede_rx_queue_release, + .tx_queue_setup = qede_tx_queue_setup, + .tx_queue_release = qede_tx_queue_release, + .dev_start = qede_dev_start, + .dev_set_link_up = qede_dev_set_link_up, + .dev_set_link_down = qede_dev_set_link_down, + .link_update = qede_link_update, + .promiscuous_enable = qede_promiscuous_enable, + .promiscuous_disable = qede_promiscuous_disable, + .allmulticast_enable = qede_allmulticast_enable, + .allmulticast_disable = qede_allmulticast_disable, + .dev_stop = qede_dev_stop, + .dev_close = qede_dev_close, + .stats_get = qede_get_stats, + .stats_reset = qede_reset_stats, + .mac_addr_add = qede_mac_addr_add, + .mac_addr_remove = qede_mac_addr_remove, + .mac_addr_set = qede_mac_addr_set, + .vlan_offload_set = qede_vlan_offload_set, + .vlan_filter_set = qede_vlan_filter_set, + .flow_ctrl_set = qede_flow_ctrl_set, + .flow_ctrl_get = qede_flow_ctrl_get, + .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, + .rss_hash_update = qede_rss_hash_update, + .rss_hash_conf_get = qede_rss_hash_conf_get, + .reta_update = qede_rss_reta_update, + .reta_query = qede_rss_reta_query, + .mtu_set = qede_set_mtu, +}; + +static const struct eth_dev_ops qede_eth_vf_dev_ops = { + .dev_configure = qede_dev_configure, + .dev_infos_get = qede_dev_info_get, + .rx_queue_setup = qede_rx_queue_setup, + .rx_queue_release = qede_rx_queue_release, + .tx_queue_setup = qede_tx_queue_setup, + .tx_queue_release = qede_tx_queue_release, + .dev_start = qede_dev_start, + .dev_set_link_up = qede_dev_set_link_up, + .dev_set_link_down = qede_dev_set_link_down, + .link_update = qede_link_update, + .promiscuous_enable = qede_promiscuous_enable, + .promiscuous_disable = qede_promiscuous_disable, + .allmulticast_enable = qede_allmulticast_enable, + .allmulticast_disable = qede_allmulticast_disable, + .dev_stop = qede_dev_stop, + .dev_close = qede_dev_close, + .stats_get = qede_get_stats, + .stats_reset = qede_reset_stats, + .vlan_offload_set = qede_vlan_offload_set, + .vlan_filter_set = qede_vlan_filter_set, + .dev_supported_ptypes_get = qede_dev_supported_ptypes_get, + .rss_hash_update = qede_rss_hash_update, + .rss_hash_conf_get = qede_rss_hash_conf_get, + .reta_update = qede_rss_reta_update, + .reta_query = qede_rss_reta_query, + .mtu_set = qede_set_mtu, +}; + +static void qede_update_pf_params(struct ecore_dev *edev) +{ + struct ecore_pf_params pf_params; + /* 32 rx + 32 tx */ + memset(&pf_params, 0, sizeof(struct ecore_pf_params)); + pf_params.eth_pf_params.num_cons = 64; + qed_ops->common->update_pf_params(edev, &pf_params); +} + +static int qede_common_dev_init(struct rte_eth_dev *eth_dev, bool is_vf) +{ + struct rte_pci_device *pci_dev; + struct rte_pci_addr pci_addr; + struct qede_dev *adapter; + struct ecore_dev *edev; + struct qed_dev_eth_info dev_info; + struct qed_slowpath_params params; + uint32_t qed_ver; + static bool do_once = true; + uint8_t bulletin_change; + uint8_t vf_mac[ETHER_ADDR_LEN]; + uint8_t is_mac_forced; + bool is_mac_exist; + /* Fix up ecore debug level */ + uint32_t dp_module = ~0 & ~ECORE_MSG_HW; + uint8_t dp_level = ECORE_LEVEL_VERBOSE; + uint32_t max_mac_addrs; + int rc; + + /* Extract key data structures */ + adapter = eth_dev->data->dev_private; + edev = &adapter->edev; + pci_addr = eth_dev->pci_dev->addr; + + PMD_INIT_FUNC_TRACE(edev); + + snprintf(edev->name, NAME_SIZE, PCI_SHORT_PRI_FMT ":dpdk-port-%u", + pci_addr.bus, pci_addr.devid, pci_addr.function, + eth_dev->data->port_id); + + eth_dev->rx_pkt_burst = qede_recv_pkts; + eth_dev->tx_pkt_burst = qede_xmit_pkts; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + DP_NOTICE(edev, false, + "Skipping device init from secondary process\n"); + return 0; + } + + pci_dev = eth_dev->pci_dev; + + rte_eth_copy_pci_info(eth_dev, pci_dev); + + qed_ver = qed_get_protocol_version(QED_PROTOCOL_ETH); + + qed_ops = qed_get_eth_ops(); + if (!qed_ops) { + DP_ERR(edev, "Failed to get qed_eth_ops_pass\n"); + return -EINVAL; + } + + DP_INFO(edev, "Starting qede probe\n"); + + rc = qed_ops->common->probe(edev, pci_dev, QED_PROTOCOL_ETH, + dp_module, dp_level, is_vf); + + if (rc != 0) { + DP_ERR(edev, "qede probe failed rc %d\n", rc); + return -ENODEV; + } + + qede_update_pf_params(edev); + + rte_intr_callback_register(ð_dev->pci_dev->intr_handle, + qede_interrupt_handler, (void *)eth_dev); + + if (rte_intr_enable(ð_dev->pci_dev->intr_handle)) { + DP_ERR(edev, "rte_intr_enable() failed\n"); + return -ENODEV; + } + + /* Start the Slowpath-process */ + memset(¶ms, 0, sizeof(struct qed_slowpath_params)); + params.int_mode = ECORE_INT_MODE_MSIX; + params.drv_major = QEDE_MAJOR_VERSION; + params.drv_minor = QEDE_MINOR_VERSION; + params.drv_rev = QEDE_REVISION_VERSION; + params.drv_eng = QEDE_ENGINEERING_VERSION; + strncpy((char *)params.name, "qede LAN", QED_DRV_VER_STR_SIZE); + + /* For CMT mode device do periodic polling for slowpath events. + * This is required since uio device uses only one MSI-x + * interrupt vector but we need one for each engine. + */ + if (edev->num_hwfns > 1) { + rc = rte_eal_alarm_set(timer_period * US_PER_S, + qede_poll_sp_sb_cb, + (void *)eth_dev); + if (rc != 0) { + DP_ERR(edev, "Unable to start periodic" + " timer rc %d\n", rc); + return -EINVAL; + } + } + + rc = qed_ops->common->slowpath_start(edev, ¶ms); + if (rc) { + DP_ERR(edev, "Cannot start slowpath rc = %d\n", rc); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); + return -ENODEV; + } + + rc = qed_ops->fill_dev_info(edev, &dev_info); + if (rc) { + DP_ERR(edev, "Cannot get device_info rc %d\n", rc); + qed_ops->common->slowpath_stop(edev); + qed_ops->common->remove(edev); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); + return -ENODEV; + } + + qede_alloc_etherdev(adapter, &dev_info); + + adapter->ops->common->set_id(edev, edev->name, QEDE_DRV_MODULE_VERSION); + + if (!is_vf) + adapter->dev_info.num_mac_addrs = + (uint32_t)RESC_NUM(ECORE_LEADING_HWFN(edev), + ECORE_MAC); + else + ecore_vf_get_num_mac_filters(ECORE_LEADING_HWFN(edev), + &adapter->dev_info.num_mac_addrs); + + /* Allocate memory for storing MAC addr */ + eth_dev->data->mac_addrs = rte_zmalloc(edev->name, + (ETHER_ADDR_LEN * + adapter->dev_info.num_mac_addrs), + RTE_CACHE_LINE_SIZE); + + if (eth_dev->data->mac_addrs == NULL) { + DP_ERR(edev, "Failed to allocate MAC address\n"); + qed_ops->common->slowpath_stop(edev); + qed_ops->common->remove(edev); + rte_eal_alarm_cancel(qede_poll_sp_sb_cb, + (void *)eth_dev); + return -ENOMEM; + } + + if (!is_vf) { + ether_addr_copy((struct ether_addr *)edev->hwfns[0]. + hw_info.hw_mac_addr, + ð_dev->data->mac_addrs[0]); + ether_addr_copy(ð_dev->data->mac_addrs[0], + &adapter->primary_mac); + } else { + ecore_vf_read_bulletin(ECORE_LEADING_HWFN(edev), + &bulletin_change); + if (bulletin_change) { + is_mac_exist = + ecore_vf_bulletin_get_forced_mac( + ECORE_LEADING_HWFN(edev), + vf_mac, + &is_mac_forced); + if (is_mac_exist && is_mac_forced) { + DP_INFO(edev, "VF macaddr received from PF\n"); + ether_addr_copy((struct ether_addr *)&vf_mac, + ð_dev->data->mac_addrs[0]); + ether_addr_copy(ð_dev->data->mac_addrs[0], + &adapter->primary_mac); + } else { + DP_NOTICE(edev, false, + "No VF macaddr assigned\n"); + } + } + } + + eth_dev->dev_ops = (is_vf) ? &qede_eth_vf_dev_ops : &qede_eth_dev_ops; + + if (do_once) { + qede_print_adapter_info(adapter); + do_once = false; + } + + DP_NOTICE(edev, false, "MAC address : %02x:%02x:%02x:%02x:%02x:%02x\n", + adapter->primary_mac.addr_bytes[0], + adapter->primary_mac.addr_bytes[1], + adapter->primary_mac.addr_bytes[2], + adapter->primary_mac.addr_bytes[3], + adapter->primary_mac.addr_bytes[4], + adapter->primary_mac.addr_bytes[5]); + + return rc; +} + +static int qedevf_eth_dev_init(struct rte_eth_dev *eth_dev) +{ + return qede_common_dev_init(eth_dev, 1); +} + +static int qede_eth_dev_init(struct rte_eth_dev *eth_dev) +{ + return qede_common_dev_init(eth_dev, 0); +} + +static int qede_dev_common_uninit(struct rte_eth_dev *eth_dev) +{ + /* only uninitialize in the primary process */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + /* safe to close dev here */ + qede_dev_close(eth_dev); + + eth_dev->dev_ops = NULL; + eth_dev->rx_pkt_burst = NULL; + eth_dev->tx_pkt_burst = NULL; + + if (eth_dev->data->mac_addrs) + rte_free(eth_dev->data->mac_addrs); + + eth_dev->data->mac_addrs = NULL; + + return 0; +} + +static int qede_eth_dev_uninit(struct rte_eth_dev *eth_dev) +{ + return qede_dev_common_uninit(eth_dev); +} + +static int qedevf_eth_dev_uninit(struct rte_eth_dev *eth_dev) +{ + return qede_dev_common_uninit(eth_dev); +} + +static struct rte_pci_id pci_id_qedevf_map[] = { +#define QEDEVF_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) + { + QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_VF) + }, + { + QEDEVF_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_IOV) + }, + {.vendor_id = 0,} +}; + +static struct rte_pci_id pci_id_qede_map[] = { +#define QEDE_RTE_PCI_DEVICE(dev) RTE_PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, dev) + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980E) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_NX2_57980S) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_40) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_25) + }, + { + QEDE_RTE_PCI_DEVICE(PCI_DEVICE_ID_57980S_100) + }, + {.vendor_id = 0,} +}; + +static struct eth_driver rte_qedevf_pmd = { + .pci_drv = { + .name = "rte_qedevf_pmd", + .id_table = pci_id_qedevf_map, + .drv_flags = + RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + }, + .eth_dev_init = qedevf_eth_dev_init, + .eth_dev_uninit = qedevf_eth_dev_uninit, + .dev_private_size = sizeof(struct qede_dev), +}; + +static struct eth_driver rte_qede_pmd = { + .pci_drv = { + .name = "rte_qede_pmd", + .id_table = pci_id_qede_map, + .drv_flags = + RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + }, + .eth_dev_init = qede_eth_dev_init, + .eth_dev_uninit = qede_eth_dev_uninit, + .dev_private_size = sizeof(struct qede_dev), +}; + +static int +rte_qedevf_pmd_init(const char *name __rte_unused, + const char *params __rte_unused) +{ + rte_eth_driver_register(&rte_qedevf_pmd); + + return 0; +} + +static int +rte_qede_pmd_init(const char *name __rte_unused, + const char *params __rte_unused) +{ + rte_eth_driver_register(&rte_qede_pmd); + + return 0; +} + +static struct rte_driver rte_qedevf_driver = { + .type = PMD_PDEV, + .init = rte_qede_pmd_init +}; + +static struct rte_driver rte_qede_driver = { + .type = PMD_PDEV, + .init = rte_qedevf_pmd_init +}; + +PMD_REGISTER_DRIVER(rte_qede_driver); +PMD_REGISTER_DRIVER(rte_qedevf_driver); diff --git a/drivers/net/qede/qede_ethdev.h b/drivers/net/qede/qede_ethdev.h new file mode 100644 index 00000000..abb33af1 --- /dev/null +++ b/drivers/net/qede/qede_ethdev.h @@ -0,0 +1,162 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + + +#ifndef _QEDE_ETHDEV_H_ +#define _QEDE_ETHDEV_H_ + +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_dev.h> + +/* ecore includes */ +#include "base/bcm_osal.h" +#include "base/ecore.h" +#include "base/ecore_dev_api.h" +#include "base/ecore_l2_api.h" +#include "base/ecore_vf_api.h" +#include "base/ecore_hsi_common.h" +#include "base/ecore_int_api.h" +#include "base/ecore_chain.h" +#include "base/ecore_status.h" +#include "base/ecore_hsi_eth.h" +#include "base/ecore_dev_api.h" +#include "base/ecore_iov_api.h" + +#include "qede_logs.h" +#include "qede_if.h" +#include "qede_eth_if.h" + +#include "qede_rxtx.h" + +#define qede_stringify1(x...) #x +#define qede_stringify(x...) qede_stringify1(x) + +/* Driver versions */ +#define QEDE_PMD_VER_PREFIX "QEDE PMD" +#define QEDE_PMD_VERSION_MAJOR 1 +#define QEDE_PMD_VERSION_MINOR 1 +#define QEDE_PMD_VERSION_REVISION 0 +#define QEDE_PMD_VERSION_PATCH 1 + +#define QEDE_MAJOR_VERSION 8 +#define QEDE_MINOR_VERSION 7 +#define QEDE_REVISION_VERSION 9 +#define QEDE_ENGINEERING_VERSION 0 + +#define QEDE_DRV_MODULE_VERSION qede_stringify(QEDE_MAJOR_VERSION) "." \ + qede_stringify(QEDE_MINOR_VERSION) "." \ + qede_stringify(QEDE_REVISION_VERSION) "." \ + qede_stringify(QEDE_ENGINEERING_VERSION) + +#define QEDE_RSS_INDIR_INITED (1 << 0) +#define QEDE_RSS_KEY_INITED (1 << 1) +#define QEDE_RSS_CAPS_INITED (1 << 2) + +#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) +#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \ + (edev)->dev_info.num_tc) + +#define QEDE_RSS_CNT(edev) ((edev)->num_rss) +#define QEDE_TSS_CNT(edev) ((edev)->num_rss * (edev)->num_tc) + +#define QEDE_DUPLEX_FULL 1 +#define QEDE_DUPLEX_HALF 2 +#define QEDE_DUPLEX_UNKNOWN 0xff + +#define QEDE_SUPPORTED_AUTONEG (1 << 6) +#define QEDE_SUPPORTED_PAUSE (1 << 13) + +#define QEDE_INIT_QDEV(eth_dev) (eth_dev->data->dev_private) + +#define QEDE_INIT_EDEV(adapter) (&((struct qede_dev *)adapter)->edev) + +#define QEDE_INIT(eth_dev) { \ + struct qede_dev *qdev = eth_dev->data->dev_private; \ + struct ecore_dev *edev = &qdev->edev; \ +} + +/************* QLogic 25G/40G/100G vendor/devices ids *************/ +#define PCI_VENDOR_ID_QLOGIC 0x1077 + +#define CHIP_NUM_57980E 0x1634 +#define CHIP_NUM_57980S 0x1629 +#define CHIP_NUM_VF 0x1630 +#define CHIP_NUM_57980S_40 0x1634 +#define CHIP_NUM_57980S_25 0x1656 +#define CHIP_NUM_57980S_IOV 0x1664 +#define CHIP_NUM_57980S_100 0x1644 + +#define PCI_DEVICE_ID_NX2_57980E CHIP_NUM_57980E +#define PCI_DEVICE_ID_NX2_57980S CHIP_NUM_57980S +#define PCI_DEVICE_ID_NX2_VF CHIP_NUM_VF +#define PCI_DEVICE_ID_57980S_40 CHIP_NUM_57980S_40 +#define PCI_DEVICE_ID_57980S_25 CHIP_NUM_57980S_25 +#define PCI_DEVICE_ID_57980S_IOV CHIP_NUM_57980S_IOV +#define PCI_DEVICE_ID_57980S_100 CHIP_NUM_57980S_100 + +extern char fw_file[]; + +/* Port/function states */ +enum dev_state { + QEDE_START, + QEDE_STOP, + QEDE_CLOSE +}; + +struct qed_int_param { + uint32_t int_mode; + uint8_t num_vectors; + uint8_t min_msix_cnt; +}; + +struct qed_int_params { + struct qed_int_param in; + struct qed_int_param out; + bool fp_initialized; +}; + +/* + * Structure to store private data for each port. + */ +struct qede_dev { + struct ecore_dev edev; + uint8_t protocol; + const struct qed_eth_ops *ops; + struct qed_dev_eth_info dev_info; + struct ecore_sb_info *sb_array; + struct qede_fastpath *fp_array; + uint16_t num_rss; + uint8_t num_tc; + uint16_t mtu; + bool rss_enabled; + struct qed_update_vport_rss_params rss_params; + uint32_t flags; + bool gro_disable; + struct qede_rx_queue **rx_queues; + struct qede_tx_queue **tx_queues; + enum dev_state state; + + /* Vlans */ + osal_list_t vlan_list; + uint16_t configured_vlans; + uint16_t non_configured_vlans; + bool accept_any_vlan; + uint16_t vxlan_dst_port; + + struct ether_addr primary_mac; + bool handle_hw_err; + char drv_ver[QED_DRV_VER_STR_SIZE]; +}; + +int qed_fill_eth_dev_info(struct ecore_dev *edev, + struct qed_dev_eth_info *info); +int qede_dev_set_link_state(struct rte_eth_dev *eth_dev, bool link_up); +void qede_config_rx_mode(struct rte_eth_dev *eth_dev); + +#endif /* _QEDE_ETHDEV_H_ */ diff --git a/drivers/net/qede/qede_if.h b/drivers/net/qede/qede_if.h new file mode 100644 index 00000000..1b05ff86 --- /dev/null +++ b/drivers/net/qede/qede_if.h @@ -0,0 +1,164 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef _QEDE_IF_H +#define _QEDE_IF_H + +#include "qede_ethdev.h" + +/* forward */ +struct ecore_dev; +struct qed_sb_info; +struct qed_pf_params; +enum ecore_int_mode; + +struct qed_dev_info { + uint8_t num_hwfns; + uint8_t hw_mac[ETHER_ADDR_LEN]; + bool is_mf_default; + + /* FW version */ + uint16_t fw_major; + uint16_t fw_minor; + uint16_t fw_rev; + uint16_t fw_eng; + + /* MFW version */ + uint32_t mfw_rev; + + uint32_t flash_size; + uint8_t mf_mode; + bool tx_switching; + /* To be added... */ +}; + +enum qed_sb_type { + QED_SB_TYPE_L2_QUEUE, + QED_SB_TYPE_STORAGE, + QED_SB_TYPE_CNQ, +}; + +enum qed_protocol { + QED_PROTOCOL_ETH, +}; + +struct qed_link_params { + bool link_up; + +#define QED_LINK_OVERRIDE_SPEED_AUTONEG (1 << 0) +#define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS (1 << 1) +#define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED (1 << 2) +#define QED_LINK_OVERRIDE_PAUSE_CONFIG (1 << 3) + uint32_t override_flags; + bool autoneg; + uint32_t adv_speeds; + uint32_t forced_speed; +#define QED_LINK_PAUSE_AUTONEG_ENABLE (1 << 0) +#define QED_LINK_PAUSE_RX_ENABLE (1 << 1) +#define QED_LINK_PAUSE_TX_ENABLE (1 << 2) + uint32_t pause_config; +}; + +struct qed_link_output { + bool link_up; + uint32_t supported_caps; /* In SUPPORTED defs */ + uint32_t advertised_caps; /* In ADVERTISED defs */ + uint32_t lp_caps; /* In ADVERTISED defs */ + uint32_t speed; /* In Mb/s */ + uint8_t duplex; /* In DUPLEX defs */ + uint8_t port; /* In PORT defs */ + bool autoneg; + uint32_t pause_config; +}; + +#define QED_DRV_VER_STR_SIZE 80 +struct qed_slowpath_params { + uint32_t int_mode; + uint8_t drv_major; + uint8_t drv_minor; + uint8_t drv_rev; + uint8_t drv_eng; + uint8_t name[QED_DRV_VER_STR_SIZE]; +}; + +#define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */ + +struct qed_common_cb_ops { + void (*link_update)(void *dev, struct qed_link_output *link); +}; + +struct qed_selftest_ops { +/** + * @brief registers - Perform register tests + * + * @param edev + * + * @return 0 on success, error otherwise. + */ + int (*registers)(struct ecore_dev *edev); +}; + +struct qed_common_ops { + int (*probe)(struct ecore_dev *edev, + struct rte_pci_device *pci_dev, + enum qed_protocol protocol, + uint32_t dp_module, uint8_t dp_level, bool is_vf); + void (*set_id)(struct ecore_dev *edev, + char name[], const char ver_str[]); + enum _ecore_status_t (*chain_alloc)(struct ecore_dev *edev, + enum ecore_chain_use_mode + intended_use, + enum ecore_chain_mode mode, + enum ecore_chain_cnt_type cnt_type, + uint32_t num_elems, + osal_size_t elem_size, + struct ecore_chain *p_chain); + + void (*chain_free)(struct ecore_dev *edev, + struct ecore_chain *p_chain); + + void (*get_link)(struct ecore_dev *edev, + struct qed_link_output *if_link); + int (*set_link)(struct ecore_dev *edev, + struct qed_link_params *params); + + int (*drain)(struct ecore_dev *edev); + + void (*remove)(struct ecore_dev *edev); + + int (*slowpath_stop)(struct ecore_dev *edev); + + void (*update_pf_params)(struct ecore_dev *edev, + struct ecore_pf_params *params); + + int (*slowpath_start)(struct ecore_dev *edev, + struct qed_slowpath_params *params); + + int (*set_fp_int)(struct ecore_dev *edev, uint16_t cnt); + + uint32_t (*sb_init)(struct ecore_dev *edev, + struct ecore_sb_info *sb_info, + void *sb_virt_addr, + dma_addr_t sb_phy_addr, + uint16_t sb_id, enum qed_sb_type type); + + bool (*can_link_change)(struct ecore_dev *edev); + void (*update_msglvl)(struct ecore_dev *edev, + uint32_t dp_module, uint8_t dp_level); +}; + +/** + * @brief qed_get_protocol_version + * + * @param protocol + * + * @return version supported by qed for given protocol driver + */ +uint32_t qed_get_protocol_version(enum qed_protocol protocol); + +#endif /* _QEDE_IF_H */ diff --git a/drivers/net/qede/qede_logs.h b/drivers/net/qede/qede_logs.h new file mode 100644 index 00000000..45c4af09 --- /dev/null +++ b/drivers/net/qede/qede_logs.h @@ -0,0 +1,90 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#ifndef _QEDE_LOGS_H_ +#define _QEDE_LOGS_H_ + +#define DP_ERR(p_dev, fmt, ...) \ + rte_log(RTE_LOG_ERR, RTE_LOGTYPE_PMD, \ + "[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + (p_dev)->name ? (p_dev)->name : "", \ + ##__VA_ARGS__) + +#define DP_NOTICE(p_dev, is_assert, fmt, ...) \ + rte_log(RTE_LOG_NOTICE, RTE_LOGTYPE_PMD,\ + "[QEDE PMD: (%s)]%s:" fmt, \ + (p_dev)->name ? (p_dev)->name : "", \ + __func__, \ + ##__VA_ARGS__) + +#ifdef RTE_LIBRTE_QEDE_DEBUG_INFO + +#define DP_INFO(p_dev, fmt, ...) \ + rte_log(RTE_LOG_INFO, RTE_LOGTYPE_PMD, \ + "[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + (p_dev)->name ? (p_dev)->name : "", \ + ##__VA_ARGS__) +#else +#define DP_INFO(p_dev, fmt, ...) do { } while (0) + +#endif + +#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER +#define DP_VERBOSE(p_dev, module, fmt, ...) \ +do { \ + if ((p_dev)->dp_module & module) \ + rte_log(RTE_LOG_DEBUG, RTE_LOGTYPE_PMD, \ + "[%s:%d(%s)]" fmt, \ + __func__, __LINE__, \ + (p_dev)->name ? (p_dev)->name : "", \ + ##__VA_ARGS__); \ +} while (0) +#else +#define DP_VERBOSE(p_dev, fmt, ...) do { } while (0) +#endif + +#define PMD_INIT_LOG(level, edev, fmt, args...) \ + rte_log(RTE_LOG_ ## level, RTE_LOGTYPE_PMD, \ + "[qede_pmd: %s] %s() " fmt "\n", \ + (edev)->name, __func__, ##args) + +#ifdef RTE_LIBRTE_QEDE_DEBUG_INIT +#define PMD_INIT_FUNC_TRACE(edev) PMD_INIT_LOG(DEBUG, edev, " >>") +#else +#define PMD_INIT_FUNC_TRACE(edev) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_QEDE_DEBUG_TX +#define PMD_TX_LOG(level, q, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \ + __func__, q->port_id, q->queue_id, ## args) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_QEDE_DEBUG_RX +#define PMD_RX_LOG(level, q, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): port=%u queue=%u " fmt "\n", \ + __func__, q->port_id, q->queue_id, ## args) +#else +#define PMD_RX_LOG(level, q, fmt, args...) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_QEDE_DEBUG_DRIVER +#define PMD_DRV_LOG_RAW(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt, __func__, ## args) +#else +#define PMD_DRV_LOG_RAW(level, fmt, args...) do { } while (0) +#endif + +#define PMD_DRV_LOG(level, fmt, args...) \ + PMD_DRV_LOG_RAW(level, fmt "\n", ## args) + +#endif /* _QEDE_LOGS_H_ */ diff --git a/drivers/net/qede/qede_main.c b/drivers/net/qede/qede_main.c new file mode 100644 index 00000000..73608c69 --- /dev/null +++ b/drivers/net/qede/qede_main.c @@ -0,0 +1,664 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include <sys/stat.h> +#include <fcntl.h> +#include <unistd.h> +#include <zlib.h> +#include <limits.h> +#include <rte_alarm.h> + +#include "qede_ethdev.h" + +static uint8_t npar_tx_switching = 1; + +/* Alarm timeout. */ +#define QEDE_ALARM_TIMEOUT_US 100000 + +#define CONFIG_QED_BINARY_FW +/* Global variable to hold absolute path of fw file */ +char fw_file[PATH_MAX]; + +const char *QEDE_DEFAULT_FIRMWARE = + "/lib/firmware/qed/qed_init_values_zipped-8.7.7.0.bin"; + +static void +qed_update_pf_params(struct ecore_dev *edev, struct ecore_pf_params *params) +{ + int i; + + for (i = 0; i < edev->num_hwfns; i++) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + p_hwfn->pf_params = *params; + } +} + +static void qed_init_pci(struct ecore_dev *edev, struct rte_pci_device *pci_dev) +{ + edev->regview = pci_dev->mem_resource[0].addr; + edev->doorbells = pci_dev->mem_resource[2].addr; +} + +static int +qed_probe(struct ecore_dev *edev, struct rte_pci_device *pci_dev, + enum qed_protocol protocol, uint32_t dp_module, + uint8_t dp_level, bool is_vf) +{ + struct qede_dev *qdev = (struct qede_dev *)edev; + int rc; + + ecore_init_struct(edev); + qdev->protocol = protocol; + if (is_vf) { + edev->b_is_vf = true; + edev->sriov_info.b_hw_channel = true; + } + ecore_init_dp(edev, dp_module, dp_level, NULL); + qed_init_pci(edev, pci_dev); + rc = ecore_hw_prepare(edev, ECORE_PCI_DEFAULT); + if (rc) { + DP_ERR(edev, "hw prepare failed\n"); + return rc; + } + + return rc; +} + +static int qed_nic_setup(struct ecore_dev *edev) +{ + int rc, i; + + rc = ecore_resc_alloc(edev); + if (rc) + return rc; + + DP_INFO(edev, "Allocated qed resources\n"); + ecore_resc_setup(edev); + + return rc; +} + +static int qed_alloc_stream_mem(struct ecore_dev *edev) +{ + int i; + + for_each_hwfn(edev, i) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + + p_hwfn->stream = OSAL_ZALLOC(p_hwfn->p_dev, GFP_KERNEL, + sizeof(*p_hwfn->stream)); + if (!p_hwfn->stream) + return -ENOMEM; + } + + return 0; +} + +static void qed_free_stream_mem(struct ecore_dev *edev) +{ + int i; + + for_each_hwfn(edev, i) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + + if (!p_hwfn->stream) + return; + + OSAL_FREE(p_hwfn->p_dev, p_hwfn->stream); + } +} + +static int qed_load_firmware_data(struct ecore_dev *edev) +{ + int fd; + struct stat st; + const char *fw = RTE_LIBRTE_QEDE_FW; + + if (strcmp(fw, "") == 0) + strcpy(fw_file, QEDE_DEFAULT_FIRMWARE); + else + strcpy(fw_file, fw); + + fd = open(fw_file, O_RDONLY); + if (fd < 0) { + DP_NOTICE(edev, false, "Can't open firmware file\n"); + return -ENOENT; + } + + if (fstat(fd, &st) < 0) { + DP_NOTICE(edev, false, "Can't stat firmware file\n"); + return -1; + } + + edev->firmware = rte_zmalloc("qede_fw", st.st_size, + RTE_CACHE_LINE_SIZE); + if (!edev->firmware) { + DP_NOTICE(edev, false, "Can't allocate memory for firmware\n"); + close(fd); + return -ENOMEM; + } + + if (read(fd, edev->firmware, st.st_size) != st.st_size) { + DP_NOTICE(edev, false, "Can't read firmware data\n"); + close(fd); + return -1; + } + + edev->fw_len = st.st_size; + if (edev->fw_len < 104) { + DP_NOTICE(edev, false, "Invalid fw size: %" PRIu64 "\n", + edev->fw_len); + return -EINVAL; + } + + return 0; +} + +static void qed_handle_bulletin_change(struct ecore_hwfn *hwfn) +{ + uint8_t mac[ETH_ALEN], is_mac_exist, is_mac_forced; + + is_mac_exist = ecore_vf_bulletin_get_forced_mac(hwfn, mac, + &is_mac_forced); + if (is_mac_exist && is_mac_forced) + rte_memcpy(hwfn->hw_info.hw_mac_addr, mac, ETH_ALEN); + + /* Always update link configuration according to bulletin */ + qed_link_update(hwfn); +} + +static void qede_vf_task(void *arg) +{ + struct ecore_hwfn *p_hwfn = arg; + uint8_t change = 0; + + /* Read the bulletin board, and re-schedule the task */ + ecore_vf_read_bulletin(p_hwfn, &change); + if (change) + qed_handle_bulletin_change(p_hwfn); + + rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, p_hwfn); +} + +static void qed_start_iov_task(struct ecore_dev *edev) +{ + struct ecore_hwfn *p_hwfn; + int i; + + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + if (!IS_PF(edev)) + rte_eal_alarm_set(QEDE_ALARM_TIMEOUT_US, qede_vf_task, + p_hwfn); + } +} + +static void qed_stop_iov_task(struct ecore_dev *edev) +{ + struct ecore_hwfn *p_hwfn; + int i; + + for_each_hwfn(edev, i) { + p_hwfn = &edev->hwfns[i]; + if (!IS_PF(edev)) + rte_eal_alarm_cancel(qede_vf_task, p_hwfn); + } +} +static int qed_slowpath_start(struct ecore_dev *edev, + struct qed_slowpath_params *params) +{ + bool allow_npar_tx_switching; + const uint8_t *data = NULL; + struct ecore_hwfn *hwfn; + struct ecore_mcp_drv_version drv_version; + struct qede_dev *qdev = (struct qede_dev *)edev; + int rc; +#ifdef QED_ENC_SUPPORTED + struct ecore_tunn_start_params tunn_info; +#endif + +#ifdef CONFIG_QED_BINARY_FW + if (IS_PF(edev)) { + rc = qed_load_firmware_data(edev); + if (rc) { + DP_NOTICE(edev, true, + "Failed to find fw file %s\n", fw_file); + goto err; + } + } +#endif + + rc = qed_nic_setup(edev); + if (rc) + goto err; + + /* set int_coalescing_mode */ + edev->int_coalescing_mode = ECORE_COAL_MODE_ENABLE; + + /* Should go with CONFIG_QED_BINARY_FW */ + if (IS_PF(edev)) { + /* Allocate stream for unzipping */ + rc = qed_alloc_stream_mem(edev); + if (rc) { + DP_NOTICE(edev, true, + "Failed to allocate stream memory\n"); + goto err2; + } + } + + qed_start_iov_task(edev); + + /* Start the slowpath */ +#ifdef CONFIG_QED_BINARY_FW + if (IS_PF(edev)) + data = edev->firmware; +#endif + allow_npar_tx_switching = npar_tx_switching ? true : false; + +#ifdef QED_ENC_SUPPORTED + memset(&tunn_info, 0, sizeof(tunn_info)); + tunn_info.tunn_mode |= 1 << QED_MODE_VXLAN_TUNN | + 1 << QED_MODE_L2GRE_TUNN | + 1 << QED_MODE_IPGRE_TUNN | + 1 << QED_MODE_L2GENEVE_TUNN | 1 << QED_MODE_IPGENEVE_TUNN; + tunn_info.tunn_clss_vxlan = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.tunn_clss_l2gre = QED_TUNN_CLSS_MAC_VLAN; + tunn_info.tunn_clss_ipgre = QED_TUNN_CLSS_MAC_VLAN; + rc = ecore_hw_init(edev, &tunn_info, true, ECORE_INT_MODE_MSIX, + allow_npar_tx_switching, data); +#else + rc = ecore_hw_init(edev, NULL, true, ECORE_INT_MODE_MSIX, + allow_npar_tx_switching, data); +#endif + if (rc) { + DP_ERR(edev, "ecore_hw_init failed\n"); + goto err2; + } + + DP_INFO(edev, "HW inited and function started\n"); + + if (IS_PF(edev)) { + hwfn = ECORE_LEADING_HWFN(edev); + drv_version.version = (params->drv_major << 24) | + (params->drv_minor << 16) | + (params->drv_rev << 8) | (params->drv_eng); + /* TBD: strlcpy() */ + strncpy((char *)drv_version.name, (const char *)params->name, + MCP_DRV_VER_STR_SIZE - 4); + rc = ecore_mcp_send_drv_version(hwfn, hwfn->p_main_ptt, + &drv_version); + if (rc) { + DP_NOTICE(edev, true, + "Failed sending drv version command\n"); + return rc; + } + } + + ecore_reset_vport_stats(edev); + + return 0; + + ecore_hw_stop(edev); +err2: + ecore_resc_free(edev); +err: +#ifdef CONFIG_QED_BINARY_FW + if (IS_PF(edev)) { + if (edev->firmware) + rte_free(edev->firmware); + edev->firmware = NULL; + } +#endif + qed_stop_iov_task(edev); + + return rc; +} + +static int +qed_fill_dev_info(struct ecore_dev *edev, struct qed_dev_info *dev_info) +{ + struct ecore_ptt *ptt = NULL; + + memset(dev_info, 0, sizeof(struct qed_dev_info)); + dev_info->num_hwfns = edev->num_hwfns; + dev_info->is_mf_default = IS_MF_DEFAULT(&edev->hwfns[0]); + rte_memcpy(&dev_info->hw_mac, &edev->hwfns[0].hw_info.hw_mac_addr, + ETHER_ADDR_LEN); + + if (IS_PF(edev)) { + dev_info->fw_major = FW_MAJOR_VERSION; + dev_info->fw_minor = FW_MINOR_VERSION; + dev_info->fw_rev = FW_REVISION_VERSION; + dev_info->fw_eng = FW_ENGINEERING_VERSION; + dev_info->mf_mode = edev->mf_mode; + dev_info->tx_switching = false; + } else { + ecore_vf_get_fw_version(&edev->hwfns[0], &dev_info->fw_major, + &dev_info->fw_minor, &dev_info->fw_rev, + &dev_info->fw_eng); + } + + if (IS_PF(edev)) { + ptt = ecore_ptt_acquire(ECORE_LEADING_HWFN(edev)); + if (ptt) { + ecore_mcp_get_mfw_ver(edev, ptt, + &dev_info->mfw_rev, NULL); + + ecore_mcp_get_flash_size(ECORE_LEADING_HWFN(edev), ptt, + &dev_info->flash_size); + + /* Workaround to allow PHY-read commands for + * B0 bringup. + */ + if (ECORE_IS_BB_B0(edev)) + dev_info->flash_size = 0xffffffff; + + ecore_ptt_release(ECORE_LEADING_HWFN(edev), ptt); + } + } else { + ecore_mcp_get_mfw_ver(edev, ptt, &dev_info->mfw_rev, NULL); + } + + return 0; +} + +int +qed_fill_eth_dev_info(struct ecore_dev *edev, struct qed_dev_eth_info *info) +{ + struct qede_dev *qdev = (struct qede_dev *)edev; + int i; + + memset(info, 0, sizeof(*info)); + + info->num_tc = 1 /* @@@TBD aelior MULTI_COS */; + + if (IS_PF(edev)) { + info->num_queues = 0; + for_each_hwfn(edev, i) + info->num_queues += + FEAT_NUM(&edev->hwfns[i], ECORE_PF_L2_QUE); + + info->num_vlan_filters = RESC_NUM(&edev->hwfns[0], ECORE_VLAN); + + rte_memcpy(&info->port_mac, &edev->hwfns[0].hw_info.hw_mac_addr, + ETHER_ADDR_LEN); + } else { + ecore_vf_get_num_rxqs(&edev->hwfns[0], &info->num_queues); + + ecore_vf_get_num_vlan_filters(&edev->hwfns[0], + &info->num_vlan_filters); + + ecore_vf_get_port_mac(&edev->hwfns[0], + (uint8_t *)&info->port_mac); + } + + qed_fill_dev_info(edev, &info->common); + + if (IS_VF(edev)) + memset(&info->common.hw_mac, 0, ETHER_ADDR_LEN); + + return 0; +} + +static void +qed_set_id(struct ecore_dev *edev, char name[NAME_SIZE], + const char ver_str[VER_SIZE]) +{ + int i; + + rte_memcpy(edev->name, name, NAME_SIZE); + for_each_hwfn(edev, i) { + snprintf(edev->hwfns[i].name, NAME_SIZE, "%s-%d", name, i); + } + rte_memcpy(edev->ver_str, ver_str, VER_SIZE); + edev->drv_type = DRV_ID_DRV_TYPE_LINUX; +} + +static uint32_t +qed_sb_init(struct ecore_dev *edev, struct ecore_sb_info *sb_info, + void *sb_virt_addr, dma_addr_t sb_phy_addr, + uint16_t sb_id, enum qed_sb_type type) +{ + struct ecore_hwfn *p_hwfn; + int hwfn_index; + uint16_t rel_sb_id; + uint8_t n_hwfns; + uint32_t rc; + + /* RoCE uses single engine and CMT uses two engines. When using both + * we force only a single engine. Storage uses only engine 0 too. + */ + if (type == QED_SB_TYPE_L2_QUEUE) + n_hwfns = edev->num_hwfns; + else + n_hwfns = 1; + + hwfn_index = sb_id % n_hwfns; + p_hwfn = &edev->hwfns[hwfn_index]; + rel_sb_id = sb_id / n_hwfns; + + DP_INFO(edev, "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n", + hwfn_index, rel_sb_id, sb_id); + + rc = ecore_int_sb_init(p_hwfn, p_hwfn->p_main_ptt, sb_info, + sb_virt_addr, sb_phy_addr, rel_sb_id); + + return rc; +} + +static void qed_fill_link(struct ecore_hwfn *hwfn, + struct qed_link_output *if_link) +{ + struct ecore_mcp_link_params params; + struct ecore_mcp_link_state link; + struct ecore_mcp_link_capabilities link_caps; + uint32_t media_type; + uint8_t change = 0; + + memset(if_link, 0, sizeof(*if_link)); + + /* Prepare source inputs */ + if (IS_PF(hwfn->p_dev)) { + rte_memcpy(¶ms, ecore_mcp_get_link_params(hwfn), + sizeof(params)); + rte_memcpy(&link, ecore_mcp_get_link_state(hwfn), sizeof(link)); + rte_memcpy(&link_caps, ecore_mcp_get_link_capabilities(hwfn), + sizeof(link_caps)); + } else { + ecore_vf_read_bulletin(hwfn, &change); + ecore_vf_get_link_params(hwfn, ¶ms); + ecore_vf_get_link_state(hwfn, &link); + ecore_vf_get_link_caps(hwfn, &link_caps); + } + + /* Set the link parameters to pass to protocol driver */ + if (link.link_up) + if_link->link_up = true; + + if (link.link_up) + if_link->speed = link.speed; + + if_link->duplex = QEDE_DUPLEX_FULL; + + if (params.speed.autoneg) + if_link->supported_caps |= QEDE_SUPPORTED_AUTONEG; + + if (params.pause.autoneg || params.pause.forced_rx || + params.pause.forced_tx) + if_link->supported_caps |= QEDE_SUPPORTED_PAUSE; + + if (params.pause.autoneg) + if_link->pause_config |= QED_LINK_PAUSE_AUTONEG_ENABLE; + + if (params.pause.forced_rx) + if_link->pause_config |= QED_LINK_PAUSE_RX_ENABLE; + + if (params.pause.forced_tx) + if_link->pause_config |= QED_LINK_PAUSE_TX_ENABLE; +} + +static void +qed_get_current_link(struct ecore_dev *edev, struct qed_link_output *if_link) +{ + qed_fill_link(&edev->hwfns[0], if_link); + +#ifdef CONFIG_QED_SRIOV + for_each_hwfn(cdev, i) + qed_inform_vf_link_state(&cdev->hwfns[i]); +#endif +} + +static int qed_set_link(struct ecore_dev *edev, struct qed_link_params *params) +{ + struct ecore_hwfn *hwfn; + struct ecore_ptt *ptt; + struct ecore_mcp_link_params *link_params; + int rc; + + if (IS_VF(edev)) + return 0; + + /* The link should be set only once per PF */ + hwfn = &edev->hwfns[0]; + + ptt = ecore_ptt_acquire(hwfn); + if (!ptt) + return -EBUSY; + + link_params = ecore_mcp_get_link_params(hwfn); + if (params->override_flags & QED_LINK_OVERRIDE_SPEED_AUTONEG) + link_params->speed.autoneg = params->autoneg; + + if (params->override_flags & QED_LINK_OVERRIDE_PAUSE_CONFIG) { + if (params->pause_config & QED_LINK_PAUSE_AUTONEG_ENABLE) + link_params->pause.autoneg = true; + else + link_params->pause.autoneg = false; + if (params->pause_config & QED_LINK_PAUSE_RX_ENABLE) + link_params->pause.forced_rx = true; + else + link_params->pause.forced_rx = false; + if (params->pause_config & QED_LINK_PAUSE_TX_ENABLE) + link_params->pause.forced_tx = true; + else + link_params->pause.forced_tx = false; + } + + rc = ecore_mcp_set_link(hwfn, ptt, params->link_up); + + ecore_ptt_release(hwfn, ptt); + + return rc; +} + +void qed_link_update(struct ecore_hwfn *hwfn) +{ + struct qed_link_output if_link; + + qed_fill_link(hwfn, &if_link); +} + +static int qed_drain(struct ecore_dev *edev) +{ + struct ecore_hwfn *hwfn; + struct ecore_ptt *ptt; + int i, rc; + + if (IS_VF(edev)) + return 0; + + for_each_hwfn(edev, i) { + hwfn = &edev->hwfns[i]; + ptt = ecore_ptt_acquire(hwfn); + if (!ptt) { + DP_NOTICE(hwfn, true, "Failed to drain NIG; No PTT\n"); + return -EBUSY; + } + rc = ecore_mcp_drain(hwfn, ptt); + if (rc) + return rc; + ecore_ptt_release(hwfn, ptt); + } + + return 0; +} + +static int qed_nic_stop(struct ecore_dev *edev) +{ + int i, rc; + + rc = ecore_hw_stop(edev); + for (i = 0; i < edev->num_hwfns; i++) { + struct ecore_hwfn *p_hwfn = &edev->hwfns[i]; + + if (p_hwfn->b_sp_dpc_enabled) + p_hwfn->b_sp_dpc_enabled = false; + } + return rc; +} + +static int qed_nic_reset(struct ecore_dev *edev) +{ + int rc; + + rc = ecore_hw_reset(edev); + if (rc) + return rc; + + ecore_resc_free(edev); + + return 0; +} + +static int qed_slowpath_stop(struct ecore_dev *edev) +{ +#ifdef CONFIG_QED_SRIOV + int i; +#endif + + if (!edev) + return -ENODEV; + + if (IS_PF(edev)) { + qed_free_stream_mem(edev); + +#ifdef CONFIG_QED_SRIOV + if (IS_QED_ETH_IF(edev)) + qed_sriov_disable(edev, true); +#endif + qed_nic_stop(edev); + } + + qed_nic_reset(edev); + qed_stop_iov_task(edev); + + return 0; +} + +static void qed_remove(struct ecore_dev *edev) +{ + if (!edev) + return; + + ecore_hw_remove(edev); +} + +const struct qed_common_ops qed_common_ops_pass = { + INIT_STRUCT_FIELD(probe, &qed_probe), + INIT_STRUCT_FIELD(update_pf_params, &qed_update_pf_params), + INIT_STRUCT_FIELD(slowpath_start, &qed_slowpath_start), + INIT_STRUCT_FIELD(set_id, &qed_set_id), + INIT_STRUCT_FIELD(chain_alloc, &ecore_chain_alloc), + INIT_STRUCT_FIELD(chain_free, &ecore_chain_free), + INIT_STRUCT_FIELD(sb_init, &qed_sb_init), + INIT_STRUCT_FIELD(get_link, &qed_get_current_link), + INIT_STRUCT_FIELD(set_link, &qed_set_link), + INIT_STRUCT_FIELD(drain, &qed_drain), + INIT_STRUCT_FIELD(slowpath_stop, &qed_slowpath_stop), + INIT_STRUCT_FIELD(remove, &qed_remove), +}; diff --git a/drivers/net/qede/qede_rxtx.c b/drivers/net/qede/qede_rxtx.c new file mode 100644 index 00000000..b5a40fe4 --- /dev/null +++ b/drivers/net/qede/qede_rxtx.c @@ -0,0 +1,1389 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + +#include "qede_rxtx.h" + +static bool gro_disable = 1; /* mod_param */ + +static inline int qede_alloc_rx_buffer(struct qede_rx_queue *rxq) +{ + struct rte_mbuf *new_mb = NULL; + struct eth_rx_bd *rx_bd; + dma_addr_t mapping; + uint16_t idx = rxq->sw_rx_prod & NUM_RX_BDS(rxq); + + new_mb = rte_mbuf_raw_alloc(rxq->mb_pool); + if (unlikely(!new_mb)) { + PMD_RX_LOG(ERR, rxq, + "Failed to allocate rx buffer " + "sw_rx_prod %u sw_rx_cons %u mp entries %u free %u", + idx, rxq->sw_rx_cons & NUM_RX_BDS(rxq), + rte_mempool_avail_count(rxq->mb_pool), + rte_mempool_in_use_count(rxq->mb_pool)); + return -ENOMEM; + } + rxq->sw_rx_ring[idx].mbuf = new_mb; + rxq->sw_rx_ring[idx].page_offset = 0; + mapping = rte_mbuf_data_dma_addr_default(new_mb); + /* Advance PROD and get BD pointer */ + rx_bd = (struct eth_rx_bd *)ecore_chain_produce(&rxq->rx_bd_ring); + rx_bd->addr.hi = rte_cpu_to_le_32(U64_HI(mapping)); + rx_bd->addr.lo = rte_cpu_to_le_32(U64_LO(mapping)); + rxq->sw_rx_prod++; + return 0; +} + +static void qede_rx_queue_release_mbufs(struct qede_rx_queue *rxq) +{ + uint16_t i; + + if (rxq->sw_rx_ring != NULL) { + for (i = 0; i < rxq->nb_rx_desc; i++) { + if (rxq->sw_rx_ring[i].mbuf != NULL) { + rte_pktmbuf_free(rxq->sw_rx_ring[i].mbuf); + rxq->sw_rx_ring[i].mbuf = NULL; + } + } + } +} + +void qede_rx_queue_release(void *rx_queue) +{ + struct qede_rx_queue *rxq = rx_queue; + + if (rxq != NULL) { + qede_rx_queue_release_mbufs(rxq); + rte_free(rxq->sw_rx_ring); + rxq->sw_rx_ring = NULL; + rte_free(rxq); + rx_queue = NULL; + } +} + +int +qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct rte_eth_dev_data *eth_data = dev->data; + struct qede_rx_queue *rxq; + uint16_t pkt_len = (uint16_t)dev->data->dev_conf.rxmode.max_rx_pkt_len; + size_t size; + uint16_t data_size; + int rc; + int i; + + PMD_INIT_FUNC_TRACE(edev); + + /* Note: Ring size/align is controlled by struct rte_eth_desc_lim */ + if (!rte_is_power_of_2(nb_desc)) { + DP_ERR(edev, "Ring size %u is not power of 2\n", + nb_desc); + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->rx_queues[queue_idx] != NULL) { + qede_rx_queue_release(dev->data->rx_queues[queue_idx]); + dev->data->rx_queues[queue_idx] = NULL; + } + + /* First allocate the rx queue data structure */ + rxq = rte_zmalloc_socket("qede_rx_queue", sizeof(struct qede_rx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + + if (!rxq) { + DP_ERR(edev, "Unable to allocate memory for rxq on socket %u", + socket_id); + return -ENOMEM; + } + + rxq->qdev = qdev; + rxq->mb_pool = mp; + rxq->nb_rx_desc = nb_desc; + rxq->queue_id = queue_idx; + rxq->port_id = dev->data->port_id; + + /* Sanity check */ + data_size = (uint16_t)rte_pktmbuf_data_room_size(mp) - + RTE_PKTMBUF_HEADROOM; + + if (pkt_len > data_size) { + DP_ERR(edev, "MTU %u should not exceed dataroom %u\n", + pkt_len, data_size); + rte_free(rxq); + return -EINVAL; + } + + qdev->mtu = pkt_len; + rxq->rx_buf_size = pkt_len + QEDE_ETH_OVERHEAD; + + DP_INFO(edev, "MTU = %u ; RX buffer = %u\n", + qdev->mtu, rxq->rx_buf_size); + + if (pkt_len > ETHER_MAX_LEN) { + dev->data->dev_conf.rxmode.jumbo_frame = 1; + DP_NOTICE(edev, false, "jumbo frame enabled\n"); + } else { + dev->data->dev_conf.rxmode.jumbo_frame = 0; + } + + /* Allocate the parallel driver ring for Rx buffers */ + size = sizeof(*rxq->sw_rx_ring) * rxq->nb_rx_desc; + rxq->sw_rx_ring = rte_zmalloc_socket("sw_rx_ring", size, + RTE_CACHE_LINE_SIZE, socket_id); + if (!rxq->sw_rx_ring) { + DP_NOTICE(edev, false, + "Unable to alloc memory for sw_rx_ring on socket %u\n", + socket_id); + rte_free(rxq); + rxq = NULL; + return -ENOMEM; + } + + /* Allocate FW Rx ring */ + rc = qdev->ops->common->chain_alloc(edev, + ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, + ECORE_CHAIN_MODE_NEXT_PTR, + ECORE_CHAIN_CNT_TYPE_U16, + rxq->nb_rx_desc, + sizeof(struct eth_rx_bd), + &rxq->rx_bd_ring); + + if (rc != ECORE_SUCCESS) { + DP_NOTICE(edev, false, + "Unable to alloc memory for rxbd ring on socket %u\n", + socket_id); + rte_free(rxq->sw_rx_ring); + rxq->sw_rx_ring = NULL; + rte_free(rxq); + rxq = NULL; + } + + /* Allocate FW completion ring */ + rc = qdev->ops->common->chain_alloc(edev, + ECORE_CHAIN_USE_TO_CONSUME, + ECORE_CHAIN_MODE_PBL, + ECORE_CHAIN_CNT_TYPE_U16, + rxq->nb_rx_desc, + sizeof(union eth_rx_cqe), + &rxq->rx_comp_ring); + + if (rc != ECORE_SUCCESS) { + DP_NOTICE(edev, false, + "Unable to alloc memory for cqe ring on socket %u\n", + socket_id); + /* TBD: Freeing RX BD ring */ + rte_free(rxq->sw_rx_ring); + rxq->sw_rx_ring = NULL; + rte_free(rxq); + } + + /* Allocate buffers for the Rx ring */ + for (i = 0; i < rxq->nb_rx_desc; i++) { + rc = qede_alloc_rx_buffer(rxq); + if (rc) { + DP_NOTICE(edev, false, + "RX buffer allocation failed at idx=%d\n", i); + goto err4; + } + } + + dev->data->rx_queues[queue_idx] = rxq; + if (!qdev->rx_queues) + qdev->rx_queues = (struct qede_rx_queue **)dev->data->rx_queues; + + DP_INFO(edev, "rxq %d num_desc %u rx_buf_size=%u socket %u\n", + queue_idx, nb_desc, qdev->mtu, socket_id); + + return 0; +err4: + qede_rx_queue_release(rxq); + return -ENOMEM; +} + +static void qede_tx_queue_release_mbufs(struct qede_tx_queue *txq) +{ + unsigned int i; + + PMD_TX_LOG(DEBUG, txq, "releasing %u mbufs\n", txq->nb_tx_desc); + + if (txq->sw_tx_ring != NULL) { + for (i = 0; i < txq->nb_tx_desc; i++) { + if (txq->sw_tx_ring[i].mbuf != NULL) { + rte_pktmbuf_free(txq->sw_tx_ring[i].mbuf); + txq->sw_tx_ring[i].mbuf = NULL; + } + } + } +} + +void qede_tx_queue_release(void *tx_queue) +{ + struct qede_tx_queue *txq = tx_queue; + + if (txq != NULL) { + qede_tx_queue_release_mbufs(txq); + if (txq->sw_tx_ring) { + rte_free(txq->sw_tx_ring); + txq->sw_tx_ring = NULL; + } + rte_free(txq); + } + tx_queue = NULL; +} + +int +qede_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + struct qede_dev *qdev = dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct qede_tx_queue *txq; + int rc; + + PMD_INIT_FUNC_TRACE(edev); + + if (!rte_is_power_of_2(nb_desc)) { + DP_ERR(edev, "Ring size %u is not power of 2\n", + nb_desc); + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed... */ + if (dev->data->tx_queues[queue_idx] != NULL) { + qede_tx_queue_release(dev->data->tx_queues[queue_idx]); + dev->data->tx_queues[queue_idx] = NULL; + } + + txq = rte_zmalloc_socket("qede_tx_queue", sizeof(struct qede_tx_queue), + RTE_CACHE_LINE_SIZE, socket_id); + + if (txq == NULL) { + DP_ERR(edev, + "Unable to allocate memory for txq on socket %u", + socket_id); + return -ENOMEM; + } + + txq->nb_tx_desc = nb_desc; + txq->qdev = qdev; + txq->port_id = dev->data->port_id; + + rc = qdev->ops->common->chain_alloc(edev, + ECORE_CHAIN_USE_TO_CONSUME_PRODUCE, + ECORE_CHAIN_MODE_PBL, + ECORE_CHAIN_CNT_TYPE_U16, + txq->nb_tx_desc, + sizeof(union eth_tx_bd_types), + &txq->tx_pbl); + if (rc != ECORE_SUCCESS) { + DP_ERR(edev, + "Unable to allocate memory for txbd ring on socket %u", + socket_id); + qede_tx_queue_release(txq); + return -ENOMEM; + } + + /* Allocate software ring */ + txq->sw_tx_ring = rte_zmalloc_socket("txq->sw_tx_ring", + (sizeof(struct qede_tx_entry) * + txq->nb_tx_desc), + RTE_CACHE_LINE_SIZE, socket_id); + + if (!txq->sw_tx_ring) { + DP_ERR(edev, + "Unable to allocate memory for txbd ring on socket %u", + socket_id); + qede_tx_queue_release(txq); + return -ENOMEM; + } + + txq->queue_id = queue_idx; + + txq->nb_tx_avail = txq->nb_tx_desc; + + txq->tx_free_thresh = + tx_conf->tx_free_thresh ? tx_conf->tx_free_thresh : + (txq->nb_tx_desc - QEDE_DEFAULT_TX_FREE_THRESH); + + dev->data->tx_queues[queue_idx] = txq; + if (!qdev->tx_queues) + qdev->tx_queues = (struct qede_tx_queue **)dev->data->tx_queues; + + txq->txq_counter = 0; + + DP_INFO(edev, + "txq %u num_desc %u tx_free_thresh %u socket %u\n", + queue_idx, nb_desc, txq->tx_free_thresh, socket_id); + + return 0; +} + +/* This function inits fp content and resets the SB, RXQ and TXQ arrays */ +static void qede_init_fp(struct qede_dev *qdev) +{ + struct qede_fastpath *fp; + int rss_id, txq_index, tc; + + memset((void *)qdev->fp_array, 0, (QEDE_RSS_CNT(qdev) * + sizeof(*qdev->fp_array))); + memset((void *)qdev->sb_array, 0, (QEDE_RSS_CNT(qdev) * + sizeof(*qdev->sb_array))); + for_each_rss(rss_id) { + fp = &qdev->fp_array[rss_id]; + + fp->qdev = qdev; + fp->rss_id = rss_id; + + /* Point rxq to generic rte queues that was created + * as part of queue creation. + */ + fp->rxq = qdev->rx_queues[rss_id]; + fp->sb_info = &qdev->sb_array[rss_id]; + + for (tc = 0; tc < qdev->num_tc; tc++) { + txq_index = tc * QEDE_RSS_CNT(qdev) + rss_id; + fp->txqs[tc] = qdev->tx_queues[txq_index]; + fp->txqs[tc]->queue_id = txq_index; + /* Updating it to main structure */ + snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", + "qdev", rss_id); + } + } + + qdev->gro_disable = gro_disable; +} + +void qede_free_fp_arrays(struct qede_dev *qdev) +{ + /* It asseumes qede_free_mem_load() is called before */ + if (qdev->fp_array != NULL) { + rte_free(qdev->fp_array); + qdev->fp_array = NULL; + } + + if (qdev->sb_array != NULL) { + rte_free(qdev->sb_array); + qdev->sb_array = NULL; + } +} + +int qede_alloc_fp_array(struct qede_dev *qdev) +{ + struct qede_fastpath *fp; + struct ecore_dev *edev = &qdev->edev; + int i; + + qdev->fp_array = rte_calloc("fp", QEDE_RSS_CNT(qdev), + sizeof(*qdev->fp_array), + RTE_CACHE_LINE_SIZE); + + if (!qdev->fp_array) { + DP_ERR(edev, "fp array allocation failed\n"); + return -ENOMEM; + } + + qdev->sb_array = rte_calloc("sb", QEDE_RSS_CNT(qdev), + sizeof(*qdev->sb_array), + RTE_CACHE_LINE_SIZE); + + if (!qdev->sb_array) { + DP_ERR(edev, "sb array allocation failed\n"); + rte_free(qdev->fp_array); + return -ENOMEM; + } + + return 0; +} + +/* This function allocates fast-path status block memory */ +static int +qede_alloc_mem_sb(struct qede_dev *qdev, struct ecore_sb_info *sb_info, + uint16_t sb_id) +{ + struct ecore_dev *edev = &qdev->edev; + struct status_block *sb_virt; + dma_addr_t sb_phys; + int rc; + + sb_virt = OSAL_DMA_ALLOC_COHERENT(edev, &sb_phys, sizeof(*sb_virt)); + + if (!sb_virt) { + DP_ERR(edev, "Status block allocation failed\n"); + return -ENOMEM; + } + + rc = qdev->ops->common->sb_init(edev, sb_info, + sb_virt, sb_phys, sb_id, + QED_SB_TYPE_L2_QUEUE); + if (rc) { + DP_ERR(edev, "Status block initialization failed\n"); + /* TBD: No dma_free_coherent possible */ + return rc; + } + + return 0; +} + +static int qede_alloc_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp) +{ + return qede_alloc_mem_sb(qdev, fp->sb_info, fp->rss_id); +} + +static void qede_shrink_txq(struct qede_dev *qdev, uint16_t num_rss) +{ + /* @@@TBD - this should also re-set the qed interrupts */ +} + +/* This function allocates all qede memory at NIC load. */ +static int qede_alloc_mem_load(struct qede_dev *qdev) +{ + int rc = 0, rss_id; + struct ecore_dev *edev = &qdev->edev; + + for (rss_id = 0; rss_id < QEDE_RSS_CNT(qdev); rss_id++) { + struct qede_fastpath *fp = &qdev->fp_array[rss_id]; + + rc = qede_alloc_mem_fp(qdev, fp); + if (rc) + break; + } + + if (rss_id != QEDE_RSS_CNT(qdev)) { + /* Failed allocating memory for all the queues */ + if (!rss_id) { + DP_ERR(edev, + "Failed to alloc memory for leading queue\n"); + rc = -ENOMEM; + } else { + DP_NOTICE(edev, false, + "Failed to allocate memory for all of " + "RSS queues\n" + "Desired: %d queues, allocated: %d queues\n", + QEDE_RSS_CNT(qdev), rss_id); + qede_shrink_txq(qdev, rss_id); + } + qdev->num_rss = rss_id; + } + + return 0; +} + +static inline void +qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq) +{ + uint16_t bd_prod = ecore_chain_get_prod_idx(&rxq->rx_bd_ring); + uint16_t cqe_prod = ecore_chain_get_prod_idx(&rxq->rx_comp_ring); + struct eth_rx_prod_data rx_prods = { 0 }; + + /* Update producers */ + rx_prods.bd_prod = rte_cpu_to_le_16(bd_prod); + rx_prods.cqe_prod = rte_cpu_to_le_16(cqe_prod); + + /* Make sure that the BD and SGE data is updated before updating the + * producers since FW might read the BD/SGE right after the producer + * is updated. + */ + rte_wmb(); + + internal_ram_wr(rxq->hw_rxq_prod_addr, sizeof(rx_prods), + (uint32_t *)&rx_prods); + + /* mmiowb is needed to synchronize doorbell writes from more than one + * processor. It guarantees that the write arrives to the device before + * the napi lock is released and another qede_poll is called (possibly + * on another CPU). Without this barrier, the next doorbell can bypass + * this doorbell. This is applicable to IA64/Altix systems. + */ + rte_wmb(); + + PMD_RX_LOG(DEBUG, rxq, "bd_prod %u cqe_prod %u\n", bd_prod, cqe_prod); +} + +static inline uint32_t +qede_rxfh_indir_default(uint32_t index, uint32_t n_rx_rings) +{ + return index % n_rx_rings; +} + +static void qede_prandom_bytes(uint32_t *buff, size_t bytes) +{ + unsigned int i; + + srand((unsigned int)time(NULL)); + + for (i = 0; i < ECORE_RSS_KEY_SIZE; i++) + buff[i] = rand(); +} + +static int +qede_config_rss(struct rte_eth_dev *eth_dev, + struct qed_update_vport_rss_params *rss_params) +{ + struct rte_eth_rss_conf rss_conf; + enum rte_eth_rx_mq_mode mode = eth_dev->data->dev_conf.rxmode.mq_mode; + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + uint8_t rss_caps; + unsigned int i; + uint64_t hf; + uint32_t *key; + + rss_conf = eth_dev->data->dev_conf.rx_adv_conf.rss_conf; + key = (uint32_t *)rss_conf.rss_key; + hf = rss_conf.rss_hf; + PMD_INIT_FUNC_TRACE(edev); + + /* Check if RSS conditions are met. + * Note: Even though its meaningless to enable RSS with one queue, it + * could be used to produce RSS Hash, so skipping that check. + */ + + if (!(mode & ETH_MQ_RX_RSS)) { + DP_INFO(edev, "RSS flag is not set\n"); + return -EINVAL; + } + + DP_INFO(edev, "RSS flag is set\n"); + + if (rss_conf.rss_hf == 0) + DP_NOTICE(edev, false, "RSS hash function = 0, disables RSS\n"); + + if (rss_conf.rss_key != NULL) + memcpy(qdev->rss_params.rss_key, rss_conf.rss_key, + rss_conf.rss_key_len); + + memset(rss_params, 0, sizeof(*rss_params)); + + for (i = 0; i < ECORE_RSS_IND_TABLE_SIZE; i++) + rss_params->rss_ind_table[i] = qede_rxfh_indir_default(i, + QEDE_RSS_CNT(qdev)); + + /* key and protocols */ + if (rss_conf.rss_key == NULL) + qede_prandom_bytes(rss_params->rss_key, + sizeof(rss_params->rss_key)); + else + memcpy(rss_params->rss_key, rss_conf.rss_key, + rss_conf.rss_key_len); + + rss_caps = 0; + rss_caps |= (hf & ETH_RSS_IPV4) ? ECORE_RSS_IPV4 : 0; + rss_caps |= (hf & ETH_RSS_IPV6) ? ECORE_RSS_IPV6 : 0; + rss_caps |= (hf & ETH_RSS_IPV6_EX) ? ECORE_RSS_IPV6 : 0; + rss_caps |= (hf & ETH_RSS_NONFRAG_IPV4_TCP) ? ECORE_RSS_IPV4_TCP : 0; + rss_caps |= (hf & ETH_RSS_NONFRAG_IPV6_TCP) ? ECORE_RSS_IPV6_TCP : 0; + rss_caps |= (hf & ETH_RSS_IPV6_TCP_EX) ? ECORE_RSS_IPV6_TCP : 0; + + rss_params->rss_caps = rss_caps; + + DP_INFO(edev, "RSS check passes\n"); + + return 0; +} + +static int qede_start_queues(struct rte_eth_dev *eth_dev, bool clear_stats) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct qed_update_vport_rss_params *rss_params = &qdev->rss_params; + struct qed_dev_info *qed_info = &qdev->dev_info.common; + struct qed_update_vport_params vport_update_params; + struct qed_start_vport_params start = { 0 }; + int vlan_removal_en = 1; + int rc, tc, i; + + if (!qdev->num_rss) { + DP_ERR(edev, + "Cannot update V-VPORT as active as " + "there are no Rx queues\n"); + return -EINVAL; + } + + start.remove_inner_vlan = vlan_removal_en; + start.gro_enable = !qdev->gro_disable; + start.mtu = qdev->mtu; + start.vport_id = 0; + start.drop_ttl0 = true; + start.clear_stats = clear_stats; + + rc = qdev->ops->vport_start(edev, &start); + if (rc) { + DP_ERR(edev, "Start V-PORT failed %d\n", rc); + return rc; + } + + DP_INFO(edev, + "Start vport ramrod passed, vport_id = %d," + " MTU = %d, vlan_removal_en = %d\n", + start.vport_id, qdev->mtu, vlan_removal_en); + + for_each_rss(i) { + struct qede_fastpath *fp = &qdev->fp_array[i]; + dma_addr_t p_phys_table; + uint16_t page_cnt; + + p_phys_table = ecore_chain_get_pbl_phys(&fp->rxq->rx_comp_ring); + page_cnt = ecore_chain_get_page_cnt(&fp->rxq->rx_comp_ring); + + ecore_sb_ack(fp->sb_info, IGU_INT_DISABLE, 0); /* @DPDK */ + + rc = qdev->ops->q_rx_start(edev, i, i, 0, + fp->sb_info->igu_sb_id, + RX_PI, + fp->rxq->rx_buf_size, + fp->rxq->rx_bd_ring.p_phys_addr, + p_phys_table, + page_cnt, + &fp->rxq->hw_rxq_prod_addr); + if (rc) { + DP_ERR(edev, "Start RXQ #%d failed %d\n", i, rc); + return rc; + } + + fp->rxq->hw_cons_ptr = &fp->sb_info->sb_virt->pi_array[RX_PI]; + + qede_update_rx_prod(qdev, fp->rxq); + + for (tc = 0; tc < qdev->num_tc; tc++) { + struct qede_tx_queue *txq = fp->txqs[tc]; + int txq_index = tc * QEDE_RSS_CNT(qdev) + i; + + p_phys_table = ecore_chain_get_pbl_phys(&txq->tx_pbl); + page_cnt = ecore_chain_get_page_cnt(&txq->tx_pbl); + rc = qdev->ops->q_tx_start(edev, i, txq_index, + 0, + fp->sb_info->igu_sb_id, + TX_PI(tc), + p_phys_table, page_cnt, + &txq->doorbell_addr); + if (rc) { + DP_ERR(edev, "Start txq %u failed %d\n", + txq_index, rc); + return rc; + } + + txq->hw_cons_ptr = + &fp->sb_info->sb_virt->pi_array[TX_PI(tc)]; + SET_FIELD(txq->tx_db.data.params, + ETH_DB_DATA_DEST, DB_DEST_XCM); + SET_FIELD(txq->tx_db.data.params, ETH_DB_DATA_AGG_CMD, + DB_AGG_CMD_SET); + SET_FIELD(txq->tx_db.data.params, + ETH_DB_DATA_AGG_VAL_SEL, + DQ_XCM_ETH_TX_BD_PROD_CMD); + + txq->tx_db.data.agg_flags = DQ_XCM_ETH_DQ_CF_CMD; + } + } + + /* Prepare and send the vport enable */ + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.vport_id = start.vport_id; + vport_update_params.update_vport_active_flg = 1; + vport_update_params.vport_active_flg = 1; + + /* @DPDK */ + if (qed_info->mf_mode == MF_NPAR && qed_info->tx_switching) { + /* TBD: Check SRIOV enabled for VF */ + vport_update_params.update_tx_switching_flg = 1; + vport_update_params.tx_switching_flg = 1; + } + + if (!qede_config_rss(eth_dev, rss_params)) { + vport_update_params.update_rss_flg = 1; + + qdev->rss_enabled = 1; + DP_INFO(edev, "Updating RSS flag\n"); + } else { + qdev->rss_enabled = 0; + DP_INFO(edev, "Not Updating RSS flag\n"); + } + + rte_memcpy(&vport_update_params.rss_params, rss_params, + sizeof(*rss_params)); + + rc = qdev->ops->vport_update(edev, &vport_update_params); + if (rc) { + DP_ERR(edev, "Update V-PORT failed %d\n", rc); + return rc; + } + + return 0; +} + +#ifdef ENC_SUPPORTED +static bool qede_tunn_exist(uint16_t flag) +{ + return !!((PARSING_AND_ERR_FLAGS_TUNNELEXIST_MASK << + PARSING_AND_ERR_FLAGS_TUNNELEXIST_SHIFT) & flag); +} + +static inline uint8_t qede_check_tunn_csum(uint16_t flag) +{ + uint8_t tcsum = 0; + uint16_t csum_flag = 0; + + if ((PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMWASCALCULATED_SHIFT) & flag) + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELL4CHKSMERROR_SHIFT; + + if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + tcsum = QEDE_TUNN_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_TUNNELIPHDRERROR_SHIFT | + PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return QEDE_CSUM_UNNECESSARY | tcsum; +} +#else +static inline uint8_t qede_tunn_exist(uint16_t flag) +{ + return 0; +} + +static inline uint8_t qede_check_tunn_csum(uint16_t flag) +{ + return 0; +} +#endif + +static inline uint8_t qede_check_notunn_csum(uint16_t flag) +{ + uint8_t csum = 0; + uint16_t csum_flag = 0; + + if ((PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMWASCALCULATED_SHIFT) & flag) { + csum_flag |= PARSING_AND_ERR_FLAGS_L4CHKSMERROR_MASK << + PARSING_AND_ERR_FLAGS_L4CHKSMERROR_SHIFT; + csum = QEDE_CSUM_UNNECESSARY; + } + + csum_flag |= PARSING_AND_ERR_FLAGS_IPHDRERROR_MASK << + PARSING_AND_ERR_FLAGS_IPHDRERROR_SHIFT; + + if (csum_flag & flag) + return QEDE_CSUM_ERROR; + + return csum; +} + +static inline uint8_t qede_check_csum(uint16_t flag) +{ + if (likely(!qede_tunn_exist(flag))) + return qede_check_notunn_csum(flag); + else + return qede_check_tunn_csum(flag); +} + +static inline void qede_rx_bd_ring_consume(struct qede_rx_queue *rxq) +{ + ecore_chain_consume(&rxq->rx_bd_ring); + rxq->sw_rx_cons++; +} + +static inline void +qede_reuse_page(struct qede_dev *qdev, + struct qede_rx_queue *rxq, struct qede_rx_entry *curr_cons) +{ + struct eth_rx_bd *rx_bd_prod = ecore_chain_produce(&rxq->rx_bd_ring); + uint16_t idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); + struct qede_rx_entry *curr_prod; + dma_addr_t new_mapping; + + curr_prod = &rxq->sw_rx_ring[idx]; + *curr_prod = *curr_cons; + + new_mapping = rte_mbuf_data_dma_addr_default(curr_prod->mbuf) + + curr_prod->page_offset; + + rx_bd_prod->addr.hi = rte_cpu_to_le_32(U64_HI(new_mapping)); + rx_bd_prod->addr.lo = rte_cpu_to_le_32(U64_LO(new_mapping)); + + rxq->sw_rx_prod++; +} + +static inline void +qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, + struct qede_dev *qdev, uint8_t count) +{ + struct qede_rx_entry *curr_cons; + + for (; count > 0; count--) { + curr_cons = &rxq->sw_rx_ring[rxq->sw_rx_cons & NUM_RX_BDS(rxq)]; + qede_reuse_page(qdev, rxq, curr_cons); + qede_rx_bd_ring_consume(rxq); + } +} + +static inline uint32_t qede_rx_cqe_to_pkt_type(uint16_t flags) +{ + uint32_t p_type; + /* TBD - L4 indications needed ? */ + uint16_t protocol = ((PARSING_AND_ERR_FLAGS_L3TYPE_MASK << + PARSING_AND_ERR_FLAGS_L3TYPE_SHIFT) & flags); + + /* protocol = 3 means LLC/SNAP over Ethernet */ + if (unlikely(protocol == 0 || protocol == 3)) + p_type = RTE_PTYPE_UNKNOWN; + else if (protocol == 1) + p_type = RTE_PTYPE_L3_IPV4; + else if (protocol == 2) + p_type = RTE_PTYPE_L3_IPV6; + + return RTE_PTYPE_L2_ETHER | p_type; +} + +uint16_t +qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + struct qede_rx_queue *rxq = p_rxq; + struct qede_dev *qdev = rxq->qdev; + struct ecore_dev *edev = &qdev->edev; + struct qede_fastpath *fp = &qdev->fp_array[rxq->queue_id]; + uint16_t hw_comp_cons, sw_comp_cons, sw_rx_index; + uint16_t rx_pkt = 0; + union eth_rx_cqe *cqe; + struct eth_fast_path_rx_reg_cqe *fp_cqe; + register struct rte_mbuf *rx_mb = NULL; + enum eth_rx_cqe_type cqe_type; + uint16_t len, pad; + uint16_t preload_idx; + uint8_t csum_flag; + uint16_t parse_flag; + enum rss_hash_type htype; + + hw_comp_cons = rte_le_to_cpu_16(*rxq->hw_cons_ptr); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + + rte_rmb(); + + if (hw_comp_cons == sw_comp_cons) + return 0; + + while (sw_comp_cons != hw_comp_cons) { + /* Get the CQE from the completion ring */ + cqe = + (union eth_rx_cqe *)ecore_chain_consume(&rxq->rx_comp_ring); + cqe_type = cqe->fast_path_regular.type; + + if (unlikely(cqe_type == ETH_RX_CQE_TYPE_SLOW_PATH)) { + PMD_RX_LOG(DEBUG, rxq, "Got a slowath CQE\n"); + + qdev->ops->eth_cqe_completion(edev, fp->rss_id, + (struct eth_slow_path_rx_cqe *)cqe); + goto next_cqe; + } + + /* Get the data from the SW ring */ + sw_rx_index = rxq->sw_rx_cons & NUM_RX_BDS(rxq); + rx_mb = rxq->sw_rx_ring[sw_rx_index].mbuf; + assert(rx_mb != NULL); + + /* non GRO */ + fp_cqe = &cqe->fast_path_regular; + + len = rte_le_to_cpu_16(fp_cqe->len_on_first_bd); + pad = fp_cqe->placement_offset; + assert((len + pad) <= rx_mb->buf_len); + + PMD_RX_LOG(DEBUG, rxq, + "CQE type = 0x%x, flags = 0x%x, vlan = 0x%x" + " len = %u, parsing_flags = %d\n", + cqe_type, fp_cqe->bitfields, + rte_le_to_cpu_16(fp_cqe->vlan_tag), + len, rte_le_to_cpu_16(fp_cqe->pars_flags.flags)); + + /* If this is an error packet then drop it */ + parse_flag = + rte_le_to_cpu_16(cqe->fast_path_regular.pars_flags.flags); + csum_flag = qede_check_csum(parse_flag); + if (unlikely(csum_flag == QEDE_CSUM_ERROR)) { + PMD_RX_LOG(ERR, rxq, + "CQE in CONS = %u has error, flags = 0x%x " + "dropping incoming packet\n", + sw_comp_cons, parse_flag); + rxq->rx_hw_errors++; + qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num); + goto next_cqe; + } + + if (unlikely(qede_alloc_rx_buffer(rxq) != 0)) { + PMD_RX_LOG(ERR, rxq, + "New buffer allocation failed," + "dropping incoming packet\n"); + qede_recycle_rx_bd_ring(rxq, qdev, fp_cqe->bd_num); + rte_eth_devices[rxq->port_id]. + data->rx_mbuf_alloc_failed++; + rxq->rx_alloc_errors++; + break; + } + + qede_rx_bd_ring_consume(rxq); + + /* Prefetch next mbuf while processing current one. */ + preload_idx = rxq->sw_rx_cons & NUM_RX_BDS(rxq); + rte_prefetch0(rxq->sw_rx_ring[preload_idx].mbuf); + + if (fp_cqe->bd_num != 1) + PMD_RX_LOG(DEBUG, rxq, + "Jumbo-over-BD packet not supported\n"); + + /* Update MBUF fields */ + rx_mb->ol_flags = 0; + rx_mb->data_off = pad + RTE_PKTMBUF_HEADROOM; + rx_mb->nb_segs = 1; + rx_mb->data_len = len; + rx_mb->pkt_len = len; + rx_mb->port = rxq->port_id; + rx_mb->packet_type = qede_rx_cqe_to_pkt_type(parse_flag); + + htype = (uint8_t)GET_FIELD(fp_cqe->bitfields, + ETH_FAST_PATH_RX_REG_CQE_RSS_HASH_TYPE); + if (qdev->rss_enabled && htype) { + rx_mb->ol_flags |= PKT_RX_RSS_HASH; + rx_mb->hash.rss = rte_le_to_cpu_32(fp_cqe->rss_hash); + PMD_RX_LOG(DEBUG, rxq, "Hash result 0x%x\n", + rx_mb->hash.rss); + } + + rte_prefetch1(rte_pktmbuf_mtod(rx_mb, void *)); + + if (CQE_HAS_VLAN(parse_flag)) { + rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag); + rx_mb->ol_flags |= PKT_RX_VLAN_PKT; + } + + if (CQE_HAS_OUTER_VLAN(parse_flag)) { + /* FW does not provide indication of Outer VLAN tag, + * which is always stripped, so vlan_tci_outer is set + * to 0. Here vlan_tag represents inner VLAN tag. + */ + rx_mb->vlan_tci = rte_le_to_cpu_16(fp_cqe->vlan_tag); + rx_mb->ol_flags |= PKT_RX_QINQ_PKT; + } + + rx_pkts[rx_pkt] = rx_mb; + rx_pkt++; +next_cqe: + ecore_chain_recycle_consumed(&rxq->rx_comp_ring); + sw_comp_cons = ecore_chain_get_cons_idx(&rxq->rx_comp_ring); + if (rx_pkt == nb_pkts) { + PMD_RX_LOG(DEBUG, rxq, + "Budget reached nb_pkts=%u received=%u\n", + rx_pkt, nb_pkts); + break; + } + } + + qede_update_rx_prod(qdev, rxq); + + PMD_RX_LOG(DEBUG, rxq, "rx_pkts=%u core=%d\n", rx_pkt, rte_lcore_id()); + + return rx_pkt; +} + +static inline int +qede_free_tx_pkt(struct ecore_dev *edev, struct qede_tx_queue *txq) +{ + uint16_t idx = TX_CONS(txq); + struct eth_tx_bd *tx_data_bd; + struct rte_mbuf *mbuf = txq->sw_tx_ring[idx].mbuf; + + if (unlikely(!mbuf)) { + PMD_TX_LOG(ERR, txq, + "null mbuf nb_tx_desc %u nb_tx_avail %u " + "sw_tx_cons %u sw_tx_prod %u\n", + txq->nb_tx_desc, txq->nb_tx_avail, idx, + TX_PROD(txq)); + return -1; + } + + /* Free now */ + rte_pktmbuf_free_seg(mbuf); + txq->sw_tx_ring[idx].mbuf = NULL; + ecore_chain_consume(&txq->tx_pbl); + txq->nb_tx_avail++; + + return 0; +} + +static inline uint16_t +qede_process_tx_compl(struct ecore_dev *edev, struct qede_tx_queue *txq) +{ + uint16_t tx_compl = 0; + uint16_t hw_bd_cons; + int rc; + + hw_bd_cons = rte_le_to_cpu_16(*txq->hw_cons_ptr); + rte_compiler_barrier(); + + while (hw_bd_cons != ecore_chain_get_cons_idx(&txq->tx_pbl)) { + rc = qede_free_tx_pkt(edev, txq); + if (rc) { + DP_NOTICE(edev, false, + "hw_bd_cons = %d, chain_cons=%d\n", + hw_bd_cons, + ecore_chain_get_cons_idx(&txq->tx_pbl)); + break; + } + txq->sw_tx_cons++; /* Making TXD available */ + tx_compl++; + } + + PMD_TX_LOG(DEBUG, txq, "Tx compl %u sw_tx_cons %u avail %u\n", + tx_compl, txq->sw_tx_cons, txq->nb_tx_avail); + return tx_compl; +} + +uint16_t +qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + struct qede_tx_queue *txq = p_txq; + struct qede_dev *qdev = txq->qdev; + struct ecore_dev *edev = &qdev->edev; + struct qede_fastpath *fp = &qdev->fp_array[txq->queue_id]; + struct eth_tx_1st_bd *first_bd; + uint16_t nb_tx_pkts; + uint16_t nb_pkt_sent = 0; + uint16_t bd_prod; + uint16_t idx; + uint16_t tx_count; + + if (unlikely(txq->nb_tx_avail < txq->tx_free_thresh)) { + PMD_TX_LOG(DEBUG, txq, "send=%u avail=%u free_thresh=%u\n", + nb_pkts, txq->nb_tx_avail, txq->tx_free_thresh); + (void)qede_process_tx_compl(edev, txq); + } + + nb_tx_pkts = RTE_MIN(nb_pkts, (txq->nb_tx_avail / MAX_NUM_TX_BDS)); + if (unlikely(nb_tx_pkts == 0)) { + PMD_TX_LOG(DEBUG, txq, "Out of BDs nb_pkts=%u avail=%u\n", + nb_pkts, txq->nb_tx_avail); + return 0; + } + + tx_count = nb_tx_pkts; + while (nb_tx_pkts--) { + /* Fill the entry in the SW ring and the BDs in the FW ring */ + idx = TX_PROD(txq); + struct rte_mbuf *mbuf = *tx_pkts++; + txq->sw_tx_ring[idx].mbuf = mbuf; + first_bd = (struct eth_tx_1st_bd *) + ecore_chain_produce(&txq->tx_pbl); + first_bd->data.bd_flags.bitfields = + 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT; + /* Map MBUF linear data for DMA and set in the first BD */ + QEDE_BD_SET_ADDR_LEN(first_bd, rte_mbuf_data_dma_addr(mbuf), + mbuf->data_len); + + /* Descriptor based VLAN insertion */ + if (mbuf->ol_flags & (PKT_TX_VLAN_PKT | PKT_TX_QINQ_PKT)) { + first_bd->data.vlan = rte_cpu_to_le_16(mbuf->vlan_tci); + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_VLAN_INSERTION_SHIFT; + } + + /* Offload the IP checksum in the hardware */ + if (mbuf->ol_flags & PKT_TX_IP_CKSUM) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_IP_CSUM_SHIFT; + } + + /* L4 checksum offload (tcp or udp) */ + if (mbuf->ol_flags & (PKT_TX_TCP_CKSUM | PKT_TX_UDP_CKSUM)) { + first_bd->data.bd_flags.bitfields |= + 1 << ETH_TX_1ST_BD_FLAGS_L4_CSUM_SHIFT; + /* IPv6 + extn. -> later */ + } + first_bd->data.nbds = MAX_NUM_TX_BDS; + txq->sw_tx_prod++; + rte_prefetch0(txq->sw_tx_ring[TX_PROD(txq)].mbuf); + txq->nb_tx_avail--; + bd_prod = + rte_cpu_to_le_16(ecore_chain_get_prod_idx(&txq->tx_pbl)); + nb_pkt_sent++; + } + + /* Write value of prod idx into bd_prod */ + txq->tx_db.data.bd_prod = bd_prod; + rte_wmb(); + rte_compiler_barrier(); + DIRECT_REG_WR(edev, txq->doorbell_addr, txq->tx_db.raw); + rte_wmb(); + + /* Check again for Tx completions */ + (void)qede_process_tx_compl(edev, txq); + + PMD_TX_LOG(DEBUG, txq, "to_send=%u can_send=%u sent=%u core=%d\n", + nb_pkts, tx_count, nb_pkt_sent, rte_lcore_id()); + + return nb_pkt_sent; +} + +int qede_dev_start(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + struct qed_link_output link_output; + int rc; + + DP_INFO(edev, "port %u\n", eth_dev->data->port_id); + + if (qdev->state == QEDE_START) { + DP_INFO(edev, "device already started\n"); + return 0; + } + + if (qdev->state == QEDE_CLOSE) { + rc = qede_alloc_fp_array(qdev); + qede_init_fp(qdev); + rc = qede_alloc_mem_load(qdev); + DP_INFO(edev, "Allocated %d RSS queues on %d TC/s\n", + QEDE_RSS_CNT(qdev), qdev->num_tc); + } else if (qdev->state == QEDE_STOP) { + DP_INFO(edev, "restarting port %u\n", eth_dev->data->port_id); + } else { + DP_INFO(edev, "unknown state port %u\n", + eth_dev->data->port_id); + return -EINVAL; + } + + rc = qede_start_queues(eth_dev, true); + + if (rc) { + DP_ERR(edev, "Failed to start queues\n"); + /* TBD: free */ + return rc; + } + + DP_INFO(edev, "Start VPORT, RXQ and TXQ succeeded\n"); + + qede_dev_set_link_state(eth_dev, true); + + /* Query whether link is already-up */ + memset(&link_output, 0, sizeof(link_output)); + qdev->ops->common->get_link(edev, &link_output); + DP_NOTICE(edev, false, "link status: %s\n", + link_output.link_up ? "up" : "down"); + + qdev->state = QEDE_START; + + qede_config_rx_mode(eth_dev); + + DP_INFO(edev, "dev_state is QEDE_START\n"); + + return 0; +} + +static int qede_drain_txq(struct qede_dev *qdev, + struct qede_tx_queue *txq, bool allow_drain) +{ + struct ecore_dev *edev = &qdev->edev; + int rc, cnt = 1000; + + while (txq->sw_tx_cons != txq->sw_tx_prod) { + qede_process_tx_compl(edev, txq); + if (!cnt) { + if (allow_drain) { + DP_NOTICE(edev, false, + "Tx queue[%u] is stuck," + "requesting MCP to drain\n", + txq->queue_id); + rc = qdev->ops->common->drain(edev); + if (rc) + return rc; + return qede_drain_txq(qdev, txq, false); + } + + DP_NOTICE(edev, false, + "Timeout waiting for tx queue[%d]:" + "PROD=%d, CONS=%d\n", + txq->queue_id, txq->sw_tx_prod, + txq->sw_tx_cons); + return -ENODEV; + } + cnt--; + DELAY(1000); + rte_compiler_barrier(); + } + + /* FW finished processing, wait for HW to transmit all tx packets */ + DELAY(2000); + + return 0; +} + +static int qede_stop_queues(struct qede_dev *qdev) +{ + struct qed_update_vport_params vport_update_params; + struct ecore_dev *edev = &qdev->edev; + int rc, tc, i; + + /* Disable the vport */ + memset(&vport_update_params, 0, sizeof(vport_update_params)); + vport_update_params.vport_id = 0; + vport_update_params.update_vport_active_flg = 1; + vport_update_params.vport_active_flg = 0; + vport_update_params.update_rss_flg = 0; + + DP_INFO(edev, "vport_update\n"); + + rc = qdev->ops->vport_update(edev, &vport_update_params); + if (rc) { + DP_ERR(edev, "Failed to update vport\n"); + return rc; + } + + DP_INFO(edev, "Flushing tx queues\n"); + + /* Flush Tx queues. If needed, request drain from MCP */ + for_each_rss(i) { + struct qede_fastpath *fp = &qdev->fp_array[i]; + for (tc = 0; tc < qdev->num_tc; tc++) { + struct qede_tx_queue *txq = fp->txqs[tc]; + rc = qede_drain_txq(qdev, txq, true); + if (rc) + return rc; + } + } + + /* Stop all Queues in reverse order */ + for (i = QEDE_RSS_CNT(qdev) - 1; i >= 0; i--) { + struct qed_stop_rxq_params rx_params; + + /* Stop the Tx Queue(s) */ + for (tc = 0; tc < qdev->num_tc; tc++) { + struct qed_stop_txq_params tx_params; + + tx_params.rss_id = i; + tx_params.tx_queue_id = tc * QEDE_RSS_CNT(qdev) + i; + + DP_INFO(edev, "Stopping tx queues\n"); + rc = qdev->ops->q_tx_stop(edev, &tx_params); + if (rc) { + DP_ERR(edev, "Failed to stop TXQ #%d\n", + tx_params.tx_queue_id); + return rc; + } + } + + /* Stop the Rx Queue */ + memset(&rx_params, 0, sizeof(rx_params)); + rx_params.rss_id = i; + rx_params.rx_queue_id = i; + rx_params.eq_completion_only = 1; + + DP_INFO(edev, "Stopping rx queues\n"); + + rc = qdev->ops->q_rx_stop(edev, &rx_params); + if (rc) { + DP_ERR(edev, "Failed to stop RXQ #%d\n", i); + return rc; + } + } + + DP_INFO(edev, "Stopping vports\n"); + + /* Stop the vport */ + rc = qdev->ops->vport_stop(edev, 0); + if (rc) + DP_ERR(edev, "Failed to stop VPORT\n"); + + return rc; +} + +void qede_reset_fp_rings(struct qede_dev *qdev) +{ + uint16_t rss_id; + uint8_t tc; + + for_each_rss(rss_id) { + DP_INFO(&qdev->edev, "reset fp chain for rss %u\n", rss_id); + struct qede_fastpath *fp = &qdev->fp_array[rss_id]; + ecore_chain_reset(&fp->rxq->rx_bd_ring); + ecore_chain_reset(&fp->rxq->rx_comp_ring); + for (tc = 0; tc < qdev->num_tc; tc++) { + struct qede_tx_queue *txq = fp->txqs[tc]; + ecore_chain_reset(&txq->tx_pbl); + } + } +} + +/* This function frees all memory of a single fp */ +static void qede_free_mem_fp(struct qede_dev *qdev, struct qede_fastpath *fp) +{ + uint8_t tc; + + qede_rx_queue_release(fp->rxq); + for (tc = 0; tc < qdev->num_tc; tc++) + qede_tx_queue_release(fp->txqs[tc]); +} + +void qede_free_mem_load(struct qede_dev *qdev) +{ + uint8_t rss_id; + + for_each_rss(rss_id) { + struct qede_fastpath *fp = &qdev->fp_array[rss_id]; + qede_free_mem_fp(qdev, fp); + } + /* qdev->num_rss = 0; */ +} + +/* + * Stop an Ethernet device. The device can be restarted with a call to + * rte_eth_dev_start(). + * Do not change link state and do not release sw structures. + */ +void qede_dev_stop(struct rte_eth_dev *eth_dev) +{ + struct qede_dev *qdev = eth_dev->data->dev_private; + struct ecore_dev *edev = &qdev->edev; + int rc; + + DP_INFO(edev, "port %u\n", eth_dev->data->port_id); + + if (qdev->state != QEDE_START) { + DP_INFO(edev, "device not yet started\n"); + return; + } + + rc = qede_stop_queues(qdev); + + if (rc) + DP_ERR(edev, "Didn't succeed to close queues\n"); + + DP_INFO(edev, "Stopped queues\n"); + + qdev->ops->fastpath_stop(edev); + + qede_reset_fp_rings(qdev); + + qdev->state = QEDE_STOP; + + DP_INFO(edev, "dev_state is QEDE_STOP\n"); +} diff --git a/drivers/net/qede/qede_rxtx.h b/drivers/net/qede/qede_rxtx.h new file mode 100644 index 00000000..34eaf8b6 --- /dev/null +++ b/drivers/net/qede/qede_rxtx.h @@ -0,0 +1,179 @@ +/* + * Copyright (c) 2016 QLogic Corporation. + * All rights reserved. + * www.qlogic.com + * + * See LICENSE.qede_pmd for copyright and licensing details. + */ + + +#ifndef _QEDE_RXTX_H_ +#define _QEDE_RXTX_H_ + +#include "qede_ethdev.h" + +/* Ring Descriptors */ +#define RX_RING_SIZE_POW 16 /* 64K */ +#define RX_RING_SIZE (1ULL << RX_RING_SIZE_POW) +#define NUM_RX_BDS_MAX (RX_RING_SIZE - 1) +#define NUM_RX_BDS_MIN 128 +#define NUM_RX_BDS_DEF NUM_RX_BDS_MAX +#define NUM_RX_BDS(q) (q->nb_rx_desc - 1) + +#define TX_RING_SIZE_POW 16 /* 64K */ +#define TX_RING_SIZE (1ULL << TX_RING_SIZE_POW) +#define NUM_TX_BDS_MAX (TX_RING_SIZE - 1) +#define NUM_TX_BDS_MIN 128 +#define NUM_TX_BDS_DEF NUM_TX_BDS_MAX +#define NUM_TX_BDS(q) (q->nb_tx_desc - 1) + +#define TX_CONS(txq) (txq->sw_tx_cons & NUM_TX_BDS(txq)) +#define TX_PROD(txq) (txq->sw_tx_prod & NUM_TX_BDS(txq)) + +/* Number of TX BDs per packet used currently */ +#define MAX_NUM_TX_BDS 1 + +#define QEDE_DEFAULT_TX_FREE_THRESH 32 + +#define QEDE_CSUM_ERROR (1 << 0) +#define QEDE_CSUM_UNNECESSARY (1 << 1) +#define QEDE_TUNN_CSUM_UNNECESSARY (1 << 2) + +#define QEDE_BD_SET_ADDR_LEN(bd, maddr, len) \ + do { \ + (bd)->addr.hi = rte_cpu_to_le_32(U64_HI(maddr)); \ + (bd)->addr.lo = rte_cpu_to_le_32(U64_LO(maddr)); \ + (bd)->nbytes = rte_cpu_to_le_16(len); \ + } while (0) + +#define CQE_HAS_VLAN(flags) \ + ((flags) & (PARSING_AND_ERR_FLAGS_TAG8021QEXIST_MASK \ + << PARSING_AND_ERR_FLAGS_TAG8021QEXIST_SHIFT)) + +#define CQE_HAS_OUTER_VLAN(flags) \ + ((flags) & (PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_MASK \ + << PARSING_AND_ERR_FLAGS_TUNNEL8021QTAGEXIST_SHIFT)) + +/* Max supported alignment is 256 (8 shift) + * minimal alignment shift 6 is optimal for 57xxx HW performance + */ +#define QEDE_L1_CACHE_SHIFT 6 +#define QEDE_RX_ALIGN_SHIFT (RTE_MAX(6, RTE_MIN(8, QEDE_L1_CACHE_SHIFT))) +#define QEDE_FW_RX_ALIGN_END (1UL << QEDE_RX_ALIGN_SHIFT) + +#define QEDE_ETH_OVERHEAD (ETHER_HDR_LEN + 8 + 8 + QEDE_FW_RX_ALIGN_END) + +/* TBD: Excluding IPV6 */ +#define QEDE_RSS_OFFLOAD_ALL (ETH_RSS_IPV4 | ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP) + +#define QEDE_TXQ_FLAGS ((uint32_t)ETH_TXQ_FLAGS_NOMULTSEGS) + +#define MAX_NUM_TC 8 + +#define for_each_rss(i) for (i = 0; i < qdev->num_rss; i++) + +/* + * RX BD descriptor ring + */ +struct qede_rx_entry { + struct rte_mbuf *mbuf; + uint32_t page_offset; + /* allows expansion .. */ +}; + +/* + * Structure associated with each RX queue. + */ +struct qede_rx_queue { + struct rte_mempool *mb_pool; + struct ecore_chain rx_bd_ring; + struct ecore_chain rx_comp_ring; + uint16_t *hw_cons_ptr; + void OSAL_IOMEM *hw_rxq_prod_addr; + struct qede_rx_entry *sw_rx_ring; + uint16_t sw_rx_cons; + uint16_t sw_rx_prod; + uint16_t nb_rx_desc; + uint16_t queue_id; + uint16_t port_id; + uint16_t rx_buf_size; + uint64_t rx_hw_errors; + uint64_t rx_alloc_errors; + struct qede_dev *qdev; +}; + +/* + * TX BD descriptor ring + */ +struct qede_tx_entry { + struct rte_mbuf *mbuf; + uint8_t flags; +}; + +union db_prod { + struct eth_db_data data; + uint32_t raw; +}; + +struct qede_tx_queue { + struct ecore_chain tx_pbl; + struct qede_tx_entry *sw_tx_ring; + uint16_t nb_tx_desc; + uint16_t nb_tx_avail; + uint16_t tx_free_thresh; + uint16_t queue_id; + uint16_t *hw_cons_ptr; + uint16_t sw_tx_cons; + uint16_t sw_tx_prod; + void OSAL_IOMEM *doorbell_addr; + volatile union db_prod tx_db; + uint16_t port_id; + uint64_t txq_counter; + struct qede_dev *qdev; +}; + +struct qede_fastpath { + struct qede_dev *qdev; + uint8_t rss_id; + struct ecore_sb_info *sb_info; + struct qede_rx_queue *rxq; + struct qede_tx_queue *txqs[MAX_NUM_TC]; + char name[80]; +}; + +/* + * RX/TX function prototypes + */ +int qede_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp); + +int qede_tx_queue_setup(struct rte_eth_dev *dev, + uint16_t queue_idx, + uint16_t nb_desc, + unsigned int socket_id, + const struct rte_eth_txconf *tx_conf); + +void qede_rx_queue_release(void *rx_queue); + +void qede_tx_queue_release(void *tx_queue); + +int qede_dev_start(struct rte_eth_dev *eth_dev); + +void qede_dev_stop(struct rte_eth_dev *eth_dev); + +void qede_reset_fp_rings(struct qede_dev *qdev); + +void qede_free_fp_arrays(struct qede_dev *qdev); + +void qede_free_mem_load(struct qede_dev *qdev); + +uint16_t qede_xmit_pkts(void *p_txq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts); + +uint16_t qede_recv_pkts(void *p_rxq, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +#endif /* _QEDE_RXTX_H_ */ diff --git a/drivers/net/qede/rte_pmd_qede_version.map b/drivers/net/qede/rte_pmd_qede_version.map new file mode 100644 index 00000000..349c6e1c --- /dev/null +++ b/drivers/net/qede/rte_pmd_qede_version.map @@ -0,0 +1,4 @@ +DPDK_16.04 { + + local: *; +}; diff --git a/drivers/net/szedata2/Makefile b/drivers/net/szedata2/Makefile index 963a8d67..4a7b14c9 100644 --- a/drivers/net/szedata2/Makefile +++ b/drivers/net/szedata2/Makefile @@ -55,9 +55,10 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += rte_eth_szedata2.c SYMLINK-y-include += # this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_mempool DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_ether -DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_malloc DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_SZEDATA2) += lib/librte_kvargs include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/szedata2/rte_eth_szedata2.c b/drivers/net/szedata2/rte_eth_szedata2.c index 78c43b0c..985a8d60 100644 --- a/drivers/net/szedata2/rte_eth_szedata2.c +++ b/drivers/net/szedata2/rte_eth_szedata2.c @@ -1481,7 +1481,7 @@ rte_szedata2_eth_dev_init(struct rte_eth_dev *dev) return -EINVAL; } snprintf(rsc_filename, PATH_MAX, - SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/resource%u", + "%s/" PCI_PRI_FMT "/resource%u", pci_get_sysfs_path(), pci_addr->domain, pci_addr->bus, pci_addr->devid, pci_addr->function, PCI_RESOURCE_NUMBER); fd = open(rsc_filename, O_RDWR); diff --git a/drivers/net/thunderx/Makefile b/drivers/net/thunderx/Makefile new file mode 100644 index 00000000..8ea6b454 --- /dev/null +++ b/drivers/net/thunderx/Makefile @@ -0,0 +1,70 @@ +# BSD LICENSE +# +# Copyright(c) 2016 Cavium Networks. All rights reserved. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Cavium Networks nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_pmd_thunderx_nicvf.a + +CFLAGS += $(WERROR_FLAGS) + +LDLIBS += -lm + +EXPORT_MAP := rte_pmd_thunderx_nicvf_version.map + +LIBABIVER := 1 + +OBJS_BASE_DRIVER=$(patsubst %.c,%.o,$(notdir $(wildcard $(SRCDIR)/base/*.c))) +$(foreach obj, $(OBJS_BASE_DRIVER), $(eval CFLAGS_$(obj)+=$(CFLAGS_BASE_DRIVER))) + +VPATH += $(SRCDIR)/base + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_rxtx.c +SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_hw.c +SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_mbox.c +SRCS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += nicvf_ethdev.c + +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_nicvf_rxtx.o += -fno-prefetch-loop-arrays +endif +CFLAGS_nicvf_rxtx.o += -Ofast + +# this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_eal lib/librte_ether +DEPDIRS-$(CONFIG_RTE_LIBRTE_THUNDERX_NICVF_PMD) += lib/librte_mempool lib/librte_mbuf + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/thunderx/base/nicvf_hw.c b/drivers/net/thunderx/base/nicvf_hw.c new file mode 100644 index 00000000..001b0edd --- /dev/null +++ b/drivers/net/thunderx/base/nicvf_hw.c @@ -0,0 +1,905 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <unistd.h> +#include <math.h> +#include <errno.h> +#include <stdarg.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> +#include <assert.h> + +#include "nicvf_plat.h" + +struct nicvf_reg_info { + uint32_t offset; + const char *name; +}; + +#define NICVF_REG_POLL_ITER_NR (10) +#define NICVF_REG_POLL_DELAY_US (2000) +#define NICVF_REG_INFO(reg) {reg, #reg} + +static const struct nicvf_reg_info nicvf_reg_tbl[] = { + NICVF_REG_INFO(NIC_VF_CFG), + NICVF_REG_INFO(NIC_VF_PF_MAILBOX_0_1), + NICVF_REG_INFO(NIC_VF_INT), + NICVF_REG_INFO(NIC_VF_INT_W1S), + NICVF_REG_INFO(NIC_VF_ENA_W1C), + NICVF_REG_INFO(NIC_VF_ENA_W1S), + NICVF_REG_INFO(NIC_VNIC_RSS_CFG), + NICVF_REG_INFO(NIC_VNIC_RQ_GEN_CFG), +}; + +static const struct nicvf_reg_info nicvf_multi_reg_tbl[] = { + {NIC_VNIC_RSS_KEY_0_4 + 0, "NIC_VNIC_RSS_KEY_0"}, + {NIC_VNIC_RSS_KEY_0_4 + 8, "NIC_VNIC_RSS_KEY_1"}, + {NIC_VNIC_RSS_KEY_0_4 + 16, "NIC_VNIC_RSS_KEY_2"}, + {NIC_VNIC_RSS_KEY_0_4 + 24, "NIC_VNIC_RSS_KEY_3"}, + {NIC_VNIC_RSS_KEY_0_4 + 32, "NIC_VNIC_RSS_KEY_4"}, + {NIC_VNIC_TX_STAT_0_4 + 0, "NIC_VNIC_STAT_TX_OCTS"}, + {NIC_VNIC_TX_STAT_0_4 + 8, "NIC_VNIC_STAT_TX_UCAST"}, + {NIC_VNIC_TX_STAT_0_4 + 16, "NIC_VNIC_STAT_TX_BCAST"}, + {NIC_VNIC_TX_STAT_0_4 + 24, "NIC_VNIC_STAT_TX_MCAST"}, + {NIC_VNIC_TX_STAT_0_4 + 32, "NIC_VNIC_STAT_TX_DROP"}, + {NIC_VNIC_RX_STAT_0_13 + 0, "NIC_VNIC_STAT_RX_OCTS"}, + {NIC_VNIC_RX_STAT_0_13 + 8, "NIC_VNIC_STAT_RX_UCAST"}, + {NIC_VNIC_RX_STAT_0_13 + 16, "NIC_VNIC_STAT_RX_BCAST"}, + {NIC_VNIC_RX_STAT_0_13 + 24, "NIC_VNIC_STAT_RX_MCAST"}, + {NIC_VNIC_RX_STAT_0_13 + 32, "NIC_VNIC_STAT_RX_RED"}, + {NIC_VNIC_RX_STAT_0_13 + 40, "NIC_VNIC_STAT_RX_RED_OCTS"}, + {NIC_VNIC_RX_STAT_0_13 + 48, "NIC_VNIC_STAT_RX_ORUN"}, + {NIC_VNIC_RX_STAT_0_13 + 56, "NIC_VNIC_STAT_RX_ORUN_OCTS"}, + {NIC_VNIC_RX_STAT_0_13 + 64, "NIC_VNIC_STAT_RX_FCS"}, + {NIC_VNIC_RX_STAT_0_13 + 72, "NIC_VNIC_STAT_RX_L2ERR"}, + {NIC_VNIC_RX_STAT_0_13 + 80, "NIC_VNIC_STAT_RX_DRP_BCAST"}, + {NIC_VNIC_RX_STAT_0_13 + 88, "NIC_VNIC_STAT_RX_DRP_MCAST"}, + {NIC_VNIC_RX_STAT_0_13 + 96, "NIC_VNIC_STAT_RX_DRP_L3BCAST"}, + {NIC_VNIC_RX_STAT_0_13 + 104, "NIC_VNIC_STAT_RX_DRP_L3MCAST"}, +}; + +static const struct nicvf_reg_info nicvf_qset_cq_reg_tbl[] = { + NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG), + NICVF_REG_INFO(NIC_QSET_CQ_0_7_CFG2), + NICVF_REG_INFO(NIC_QSET_CQ_0_7_THRESH), + NICVF_REG_INFO(NIC_QSET_CQ_0_7_BASE), + NICVF_REG_INFO(NIC_QSET_CQ_0_7_HEAD), + NICVF_REG_INFO(NIC_QSET_CQ_0_7_TAIL), + NICVF_REG_INFO(NIC_QSET_CQ_0_7_DOOR), + NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS), + NICVF_REG_INFO(NIC_QSET_CQ_0_7_STATUS2), + NICVF_REG_INFO(NIC_QSET_CQ_0_7_DEBUG), +}; + +static const struct nicvf_reg_info nicvf_qset_rq_reg_tbl[] = { + NICVF_REG_INFO(NIC_QSET_RQ_0_7_CFG), + NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS0), + NICVF_REG_INFO(NIC_QSET_RQ_0_7_STATUS1), +}; + +static const struct nicvf_reg_info nicvf_qset_sq_reg_tbl[] = { + NICVF_REG_INFO(NIC_QSET_SQ_0_7_CFG), + NICVF_REG_INFO(NIC_QSET_SQ_0_7_THRESH), + NICVF_REG_INFO(NIC_QSET_SQ_0_7_BASE), + NICVF_REG_INFO(NIC_QSET_SQ_0_7_HEAD), + NICVF_REG_INFO(NIC_QSET_SQ_0_7_TAIL), + NICVF_REG_INFO(NIC_QSET_SQ_0_7_DOOR), + NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS), + NICVF_REG_INFO(NIC_QSET_SQ_0_7_DEBUG), + NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS0), + NICVF_REG_INFO(NIC_QSET_SQ_0_7_STATUS1), +}; + +static const struct nicvf_reg_info nicvf_qset_rbdr_reg_tbl[] = { + NICVF_REG_INFO(NIC_QSET_RBDR_0_1_CFG), + NICVF_REG_INFO(NIC_QSET_RBDR_0_1_THRESH), + NICVF_REG_INFO(NIC_QSET_RBDR_0_1_BASE), + NICVF_REG_INFO(NIC_QSET_RBDR_0_1_HEAD), + NICVF_REG_INFO(NIC_QSET_RBDR_0_1_TAIL), + NICVF_REG_INFO(NIC_QSET_RBDR_0_1_DOOR), + NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS0), + NICVF_REG_INFO(NIC_QSET_RBDR_0_1_STATUS1), + NICVF_REG_INFO(NIC_QSET_RBDR_0_1_PRFCH_STATUS), +}; + +int +nicvf_base_init(struct nicvf *nic) +{ + nic->hwcap = 0; + if (nic->subsystem_device_id == 0) + return NICVF_ERR_BASE_INIT; + + if (nicvf_hw_version(nic) == NICVF_PASS2) + nic->hwcap |= NICVF_CAP_TUNNEL_PARSING; + + return NICVF_OK; +} + +/* dump on stdout if data is NULL */ +int +nicvf_reg_dump(struct nicvf *nic, uint64_t *data) +{ + uint32_t i, q; + bool dump_stdout; + + dump_stdout = data ? 0 : 1; + + for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_reg_tbl); i++) + if (dump_stdout) + nicvf_log("%24s = 0x%" PRIx64 "\n", + nicvf_reg_tbl[i].name, + nicvf_reg_read(nic, nicvf_reg_tbl[i].offset)); + else + *data++ = nicvf_reg_read(nic, nicvf_reg_tbl[i].offset); + + for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); i++) + if (dump_stdout) + nicvf_log("%24s = 0x%" PRIx64 "\n", + nicvf_multi_reg_tbl[i].name, + nicvf_reg_read(nic, + nicvf_multi_reg_tbl[i].offset)); + else + *data++ = nicvf_reg_read(nic, + nicvf_multi_reg_tbl[i].offset); + + for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) + for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl); i++) + if (dump_stdout) + nicvf_log("%30s(%d) = 0x%" PRIx64 "\n", + nicvf_qset_cq_reg_tbl[i].name, q, + nicvf_queue_reg_read(nic, + nicvf_qset_cq_reg_tbl[i].offset, q)); + else + *data++ = nicvf_queue_reg_read(nic, + nicvf_qset_cq_reg_tbl[i].offset, q); + + for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) + for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl); i++) + if (dump_stdout) + nicvf_log("%30s(%d) = 0x%" PRIx64 "\n", + nicvf_qset_rq_reg_tbl[i].name, q, + nicvf_queue_reg_read(nic, + nicvf_qset_rq_reg_tbl[i].offset, q)); + else + *data++ = nicvf_queue_reg_read(nic, + nicvf_qset_rq_reg_tbl[i].offset, q); + + for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) + for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl); i++) + if (dump_stdout) + nicvf_log("%30s(%d) = 0x%" PRIx64 "\n", + nicvf_qset_sq_reg_tbl[i].name, q, + nicvf_queue_reg_read(nic, + nicvf_qset_sq_reg_tbl[i].offset, q)); + else + *data++ = nicvf_queue_reg_read(nic, + nicvf_qset_sq_reg_tbl[i].offset, q); + + for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) + for (i = 0; i < NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl); i++) + if (dump_stdout) + nicvf_log("%30s(%d) = 0x%" PRIx64 "\n", + nicvf_qset_rbdr_reg_tbl[i].name, q, + nicvf_queue_reg_read(nic, + nicvf_qset_rbdr_reg_tbl[i].offset, q)); + else + *data++ = nicvf_queue_reg_read(nic, + nicvf_qset_rbdr_reg_tbl[i].offset, q); + return 0; +} + +int +nicvf_reg_get_count(void) +{ + int nr_regs; + + nr_regs = NICVF_ARRAY_SIZE(nicvf_reg_tbl); + nr_regs += NICVF_ARRAY_SIZE(nicvf_multi_reg_tbl); + nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_cq_reg_tbl) * + MAX_CMP_QUEUES_PER_QS; + nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rq_reg_tbl) * + MAX_RCV_QUEUES_PER_QS; + nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_sq_reg_tbl) * + MAX_SND_QUEUES_PER_QS; + nr_regs += NICVF_ARRAY_SIZE(nicvf_qset_rbdr_reg_tbl) * + MAX_RCV_BUF_DESC_RINGS_PER_QS; + + return nr_regs; +} + +static int +nicvf_qset_config_internal(struct nicvf *nic, bool enable) +{ + int ret; + struct pf_qs_cfg pf_qs_cfg = {.value = 0}; + + pf_qs_cfg.ena = enable ? 1 : 0; + pf_qs_cfg.vnic = nic->vf_id; + ret = nicvf_mbox_qset_config(nic, &pf_qs_cfg); + return ret ? NICVF_ERR_SET_QS : 0; +} + +/* Requests PF to assign and enable Qset */ +int +nicvf_qset_config(struct nicvf *nic) +{ + /* Enable Qset */ + return nicvf_qset_config_internal(nic, true); +} + +int +nicvf_qset_reclaim(struct nicvf *nic) +{ + /* Disable Qset */ + return nicvf_qset_config_internal(nic, false); +} + +static int +cmpfunc(const void *a, const void *b) +{ + return (*(const uint32_t *)a - *(const uint32_t *)b); +} + +static uint32_t +nicvf_roundup_list(uint32_t val, uint32_t list[], uint32_t entries) +{ + uint32_t i; + + qsort(list, entries, sizeof(uint32_t), cmpfunc); + for (i = 0; i < entries; i++) + if (val <= list[i]) + break; + /* Not in the list */ + if (i >= entries) + return 0; + else + return list[i]; +} + +static void +nicvf_handle_qset_err_intr(struct nicvf *nic) +{ + uint16_t qidx; + uint64_t status; + + nicvf_log("%s (VF%d)\n", __func__, nic->vf_id); + nicvf_reg_dump(nic, NULL); + + for (qidx = 0; qidx < MAX_CMP_QUEUES_PER_QS; qidx++) { + status = nicvf_queue_reg_read( + nic, NIC_QSET_CQ_0_7_STATUS, qidx); + if (!(status & NICVF_CQ_ERR_MASK)) + continue; + + if (status & NICVF_CQ_WR_FULL) + nicvf_log("[%d]NICVF_CQ_WR_FULL\n", qidx); + if (status & NICVF_CQ_WR_DISABLE) + nicvf_log("[%d]NICVF_CQ_WR_DISABLE\n", qidx); + if (status & NICVF_CQ_WR_FAULT) + nicvf_log("[%d]NICVF_CQ_WR_FAULT\n", qidx); + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_STATUS, qidx, 0); + } + + for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { + status = nicvf_queue_reg_read( + nic, NIC_QSET_SQ_0_7_STATUS, qidx); + if (!(status & NICVF_SQ_ERR_MASK)) + continue; + + if (status & NICVF_SQ_ERR_STOPPED) + nicvf_log("[%d]NICVF_SQ_ERR_STOPPED\n", qidx); + if (status & NICVF_SQ_ERR_SEND) + nicvf_log("[%d]NICVF_SQ_ERR_SEND\n", qidx); + if (status & NICVF_SQ_ERR_DPE) + nicvf_log("[%d]NICVF_SQ_ERR_DPE\n", qidx); + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_STATUS, qidx, 0); + } + + for (qidx = 0; qidx < MAX_RCV_BUF_DESC_RINGS_PER_QS; qidx++) { + status = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_STATUS0, qidx); + status &= NICVF_RBDR_FIFO_STATE_MASK; + status >>= NICVF_RBDR_FIFO_STATE_SHIFT; + + if (status == RBDR_FIFO_STATE_FAIL) + nicvf_log("[%d]RBDR_FIFO_STATE_FAIL\n", qidx); + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx, 0); + } + + nicvf_disable_all_interrupts(nic); + abort(); +} + +/* + * Handle poll mode driver interested "mbox" and "queue-set error" interrupts. + * This function is not re-entrant. + * The caller should provide proper serialization. + */ +int +nicvf_reg_poll_interrupts(struct nicvf *nic) +{ + int msg = 0; + uint64_t intr; + + intr = nicvf_reg_read(nic, NIC_VF_INT); + if (intr & NICVF_INTR_MBOX_MASK) { + nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_MBOX_MASK); + msg = nicvf_handle_mbx_intr(nic); + } + if (intr & NICVF_INTR_QS_ERR_MASK) { + nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_QS_ERR_MASK); + nicvf_handle_qset_err_intr(nic); + } + return msg; +} + +static int +nicvf_qset_poll_reg(struct nicvf *nic, uint16_t qidx, uint32_t offset, + uint32_t bit_pos, uint32_t bits, uint64_t val) +{ + uint64_t bit_mask; + uint64_t reg_val; + int timeout = NICVF_REG_POLL_ITER_NR; + + bit_mask = (1ULL << bits) - 1; + bit_mask = (bit_mask << bit_pos); + + while (timeout) { + reg_val = nicvf_queue_reg_read(nic, offset, qidx); + if (((reg_val & bit_mask) >> bit_pos) == val) + return NICVF_OK; + nicvf_delay_us(NICVF_REG_POLL_DELAY_US); + timeout--; + } + return NICVF_ERR_REG_POLL; +} + +int +nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx) +{ + uint64_t status; + int timeout = NICVF_REG_POLL_ITER_NR; + struct nicvf_rbdr *rbdr = nic->rbdr; + + /* Save head and tail pointers for freeing up buffers */ + if (rbdr) { + rbdr->head = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_HEAD, qidx) >> 3; + rbdr->tail = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_TAIL, qidx) >> 3; + rbdr->next_tail = rbdr->tail; + } + + /* Reset RBDR */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, + NICVF_RBDR_RESET); + + /* Disable RBDR */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0); + if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_RBDR_0_1_STATUS0, + 62, 2, 0x00)) + return NICVF_ERR_RBDR_DISABLE; + + while (1) { + status = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_PRFCH_STATUS, qidx); + if ((status & 0xFFFFFFFF) == ((status >> 32) & 0xFFFFFFFF)) + break; + nicvf_delay_us(NICVF_REG_POLL_DELAY_US); + timeout--; + if (!timeout) + return NICVF_ERR_RBDR_PREFETCH; + } + + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, + NICVF_RBDR_RESET); + if (nicvf_qset_poll_reg(nic, qidx, + NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x02)) + return NICVF_ERR_RBDR_RESET1; + + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, 0x00); + if (nicvf_qset_poll_reg(nic, qidx, + NIC_QSET_RBDR_0_1_STATUS0, 62, 2, 0x00)) + return NICVF_ERR_RBDR_RESET2; + + return NICVF_OK; +} + +static int +nicvf_qsize_regbit(uint32_t len, uint32_t len_shift) +{ + int val; + + val = ((uint32_t)log2(len) - len_shift); + assert(val >= NICVF_QSIZE_MIN_VAL); + assert(val <= NICVF_QSIZE_MAX_VAL); + return val; +} + +int +nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx) +{ + int ret; + uint64_t head, tail; + struct nicvf_rbdr *rbdr = nic->rbdr; + struct rbdr_cfg rbdr_cfg = {.value = 0}; + + ret = nicvf_qset_rbdr_reclaim(nic, qidx); + if (ret) + return ret; + + /* Set descriptor base address */ + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_BASE, qidx, rbdr->phys); + + /* Enable RBDR & set queue size */ + rbdr_cfg.ena = 1; + rbdr_cfg.reset = 0; + rbdr_cfg.ldwb = 0; + rbdr_cfg.qsize = nicvf_qsize_regbit(rbdr->qlen_mask + 1, + RBDR_SIZE_SHIFT); + rbdr_cfg.avg_con = 0; + rbdr_cfg.lines = rbdr->buffsz / 128; + + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_CFG, qidx, rbdr_cfg.value); + + /* Verify proper RBDR reset */ + head = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, qidx); + tail = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, qidx); + + if (head | tail) + return NICVF_ERR_RBDR_RESET; + + return NICVF_OK; +} + +uint32_t +nicvf_qsize_rbdr_roundup(uint32_t val) +{ + uint32_t list[] = {RBDR_QUEUE_SZ_8K, RBDR_QUEUE_SZ_16K, + RBDR_QUEUE_SZ_32K, RBDR_QUEUE_SZ_64K, + RBDR_QUEUE_SZ_128K, RBDR_QUEUE_SZ_256K, + RBDR_QUEUE_SZ_512K}; + return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list)); +} + +int +nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx, + rbdr_pool_get_handler handler, + void *opaque, uint32_t max_buffs) +{ + struct rbdr_entry_t *desc, *desc0; + struct nicvf_rbdr *rbdr = nic->rbdr; + uint32_t count; + nicvf_phys_addr_t phy; + + assert(rbdr != NULL); + desc = rbdr->desc; + count = 0; + /* Don't fill beyond max numbers of desc */ + while (count < rbdr->qlen_mask) { + if (count >= max_buffs) + break; + desc0 = desc + count; + phy = handler(opaque); + if (phy) { + desc0->full_addr = phy; + count++; + } else { + break; + } + } + nicvf_smp_wmb(); + nicvf_queue_reg_write(nic, NIC_QSET_RBDR_0_1_DOOR, ridx, count); + rbdr->tail = nicvf_queue_reg_read(nic, + NIC_QSET_RBDR_0_1_TAIL, ridx) >> 3; + rbdr->next_tail = rbdr->tail; + nicvf_smp_rmb(); + return 0; +} + +int +nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx) +{ + return nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_STATUS0, qidx); +} + +int +nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx) +{ + uint64_t head, tail; + struct sq_cfg sq_cfg; + + sq_cfg.value = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, qidx); + + /* Disable send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, 0); + + /* Check if SQ is stopped */ + if (sq_cfg.ena && nicvf_qset_poll_reg(nic, qidx, NIC_QSET_SQ_0_7_STATUS, + NICVF_SQ_STATUS_STOPPED_BIT, 1, 0x01)) + return NICVF_ERR_SQ_DISABLE; + + /* Reset send queue */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, NICVF_SQ_RESET); + head = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, qidx) >> 4; + tail = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, qidx) >> 4; + if (head | tail) + return NICVF_ERR_SQ_RESET; + + return 0; +} + +int +nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_txq *txq) +{ + int ret; + struct sq_cfg sq_cfg = {.value = 0}; + + ret = nicvf_qset_sq_reclaim(nic, qidx); + if (ret) + return ret; + + /* Send a mailbox msg to PF to config SQ */ + if (nicvf_mbox_sq_config(nic, qidx)) + return NICVF_ERR_SQ_PF_CFG; + + /* Set queue base address */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_BASE, qidx, txq->phys); + + /* Enable send queue & set queue size */ + sq_cfg.ena = 1; + sq_cfg.reset = 0; + sq_cfg.ldwb = 0; + sq_cfg.qsize = nicvf_qsize_regbit(txq->qlen_mask + 1, SND_QSIZE_SHIFT); + sq_cfg.tstmp_bgx_intf = 0; + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_CFG, qidx, sq_cfg.value); + + /* Ring doorbell so that H/W restarts processing SQEs */ + nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, qidx, 0); + + return 0; +} + +uint32_t +nicvf_qsize_sq_roundup(uint32_t val) +{ + uint32_t list[] = {SND_QUEUE_SZ_1K, SND_QUEUE_SZ_2K, + SND_QUEUE_SZ_4K, SND_QUEUE_SZ_8K, + SND_QUEUE_SZ_16K, SND_QUEUE_SZ_32K, + SND_QUEUE_SZ_64K}; + return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list)); +} + +int +nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx) +{ + /* Disable receive queue */ + nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, 0); + return nicvf_mbox_rq_sync(nic); +} + +int +nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq) +{ + struct pf_rq_cfg pf_rq_cfg = {.value = 0}; + struct rq_cfg rq_cfg = {.value = 0}; + + if (nicvf_qset_rq_reclaim(nic, qidx)) + return NICVF_ERR_RQ_CLAIM; + + pf_rq_cfg.strip_pre_l2 = 0; + /* First cache line of RBDR data will be allocated into L2C */ + pf_rq_cfg.caching = RQ_CACHE_ALLOC_FIRST; + pf_rq_cfg.cq_qs = nic->vf_id; + pf_rq_cfg.cq_idx = qidx; + pf_rq_cfg.rbdr_cont_qs = nic->vf_id; + pf_rq_cfg.rbdr_cont_idx = 0; + pf_rq_cfg.rbdr_strt_qs = nic->vf_id; + pf_rq_cfg.rbdr_strt_idx = 0; + + /* Send a mailbox msg to PF to config RQ */ + if (nicvf_mbox_rq_config(nic, qidx, &pf_rq_cfg)) + return NICVF_ERR_RQ_PF_CFG; + + /* Select Rx backpressure */ + if (nicvf_mbox_rq_bp_config(nic, qidx, rxq->rx_drop_en)) + return NICVF_ERR_RQ_BP_CFG; + + /* Send a mailbox msg to PF to config RQ drop */ + if (nicvf_mbox_rq_drop_config(nic, qidx, rxq->rx_drop_en)) + return NICVF_ERR_RQ_DROP_CFG; + + /* Enable Receive queue */ + rq_cfg.ena = 1; + nicvf_queue_reg_write(nic, NIC_QSET_RQ_0_7_CFG, qidx, rq_cfg.value); + + return 0; +} + +int +nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx) +{ + uint64_t tail, head; + + /* Disable completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, 0); + if (nicvf_qset_poll_reg(nic, qidx, NIC_QSET_CQ_0_7_CFG, 42, 1, 0)) + return NICVF_ERR_CQ_DISABLE; + + /* Reset completion queue */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, NICVF_CQ_RESET); + tail = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, qidx) >> 9; + head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, qidx) >> 9; + if (head | tail) + return NICVF_ERR_CQ_RESET; + + /* Disable timer threshold (doesn't get reset upon CQ reset) */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); + return 0; +} + +int +nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, struct nicvf_rxq *rxq) +{ + int ret; + struct cq_cfg cq_cfg = {.value = 0}; + + ret = nicvf_qset_cq_reclaim(nic, qidx); + if (ret) + return ret; + + /* Set completion queue base address */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_BASE, qidx, rxq->phys); + + cq_cfg.ena = 1; + cq_cfg.reset = 0; + /* Writes of CQE will be allocated into L2C */ + cq_cfg.caching = 1; + cq_cfg.qsize = nicvf_qsize_regbit(rxq->qlen_mask + 1, CMP_QSIZE_SHIFT); + cq_cfg.avg_con = 0; + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG, qidx, cq_cfg.value); + + /* Set threshold value for interrupt generation */ + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_THRESH, qidx, 0); + nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_CFG2, qidx, 0); + return 0; +} + +uint32_t +nicvf_qsize_cq_roundup(uint32_t val) +{ + uint32_t list[] = {CMP_QUEUE_SZ_1K, CMP_QUEUE_SZ_2K, + CMP_QUEUE_SZ_4K, CMP_QUEUE_SZ_8K, + CMP_QUEUE_SZ_16K, CMP_QUEUE_SZ_32K, + CMP_QUEUE_SZ_64K}; + return nicvf_roundup_list(val, list, NICVF_ARRAY_SIZE(list)); +} + + +void +nicvf_vlan_hw_strip(struct nicvf *nic, bool enable) +{ + uint64_t val; + + val = nicvf_reg_read(nic, NIC_VNIC_RQ_GEN_CFG); + if (enable) + val |= (STRIP_FIRST_VLAN << 25); + else + val &= ~((STRIP_SECOND_VLAN | STRIP_FIRST_VLAN) << 25); + + nicvf_reg_write(nic, NIC_VNIC_RQ_GEN_CFG, val); +} + +void +nicvf_rss_set_key(struct nicvf *nic, uint8_t *key) +{ + int idx; + uint64_t addr, val; + uint64_t *keyptr = (uint64_t *)key; + + addr = NIC_VNIC_RSS_KEY_0_4; + for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) { + val = nicvf_cpu_to_be_64(*keyptr); + nicvf_reg_write(nic, addr, val); + addr += sizeof(uint64_t); + keyptr++; + } +} + +void +nicvf_rss_get_key(struct nicvf *nic, uint8_t *key) +{ + int idx; + uint64_t addr, val; + uint64_t *keyptr = (uint64_t *)key; + + addr = NIC_VNIC_RSS_KEY_0_4; + for (idx = 0; idx < RSS_HASH_KEY_SIZE; idx++) { + val = nicvf_reg_read(nic, addr); + *keyptr = nicvf_be_to_cpu_64(val); + addr += sizeof(uint64_t); + keyptr++; + } +} + +void +nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val) +{ + nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, val); +} + +uint64_t +nicvf_rss_get_cfg(struct nicvf *nic) +{ + return nicvf_reg_read(nic, NIC_VNIC_RSS_CFG); +} + +int +nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count) +{ + uint32_t idx; + struct nicvf_rss_reta_info *rss = &nic->rss_info; + + /* result will be stored in nic->rss_info.rss_size */ + if (nicvf_mbox_get_rss_size(nic)) + return NICVF_ERR_RSS_GET_SZ; + + assert(rss->rss_size > 0); + rss->hash_bits = (uint8_t)log2(rss->rss_size); + for (idx = 0; idx < rss->rss_size && idx < max_count; idx++) + rss->ind_tbl[idx] = tbl[idx]; + + if (nicvf_mbox_config_rss(nic)) + return NICVF_ERR_RSS_TBL_UPDATE; + + return NICVF_OK; +} + +int +nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count) +{ + uint32_t idx; + struct nicvf_rss_reta_info *rss = &nic->rss_info; + + /* result will be stored in nic->rss_info.rss_size */ + if (nicvf_mbox_get_rss_size(nic)) + return NICVF_ERR_RSS_GET_SZ; + + assert(rss->rss_size > 0); + rss->hash_bits = (uint8_t)log2(rss->rss_size); + for (idx = 0; idx < rss->rss_size && idx < max_count; idx++) + tbl[idx] = rss->ind_tbl[idx]; + + return NICVF_OK; +} + +int +nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg) +{ + uint32_t idx; + uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; + uint8_t default_key[RSS_HASH_KEY_BYTE_SIZE] = { + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD, + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD, + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD, + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD, + 0xFE, 0xED, 0x0B, 0xAD, 0xFE, 0xED, 0x0B, 0xAD + }; + + if (nic->cpi_alg != CPI_ALG_NONE) + return -EINVAL; + + if (cfg == 0) + return -EINVAL; + + /* Update default RSS key and cfg */ + nicvf_rss_set_key(nic, default_key); + nicvf_rss_set_cfg(nic, cfg); + + /* Update default RSS RETA */ + for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) + default_reta[idx] = idx % qcnt; + + return nicvf_rss_reta_update(nic, default_reta, + NIC_MAX_RSS_IDR_TBL_SIZE); +} + +int +nicvf_rss_term(struct nicvf *nic) +{ + uint32_t idx; + uint8_t disable_rss[NIC_MAX_RSS_IDR_TBL_SIZE]; + + nicvf_rss_set_cfg(nic, 0); + /* Redirect the output to 0th queue */ + for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) + disable_rss[idx] = 0; + + return nicvf_rss_reta_update(nic, disable_rss, + NIC_MAX_RSS_IDR_TBL_SIZE); +} + +int +nicvf_loopback_config(struct nicvf *nic, bool enable) +{ + if (enable && nic->loopback_supported == 0) + return NICVF_ERR_LOOPBACK_CFG; + + return nicvf_mbox_loopback_config(nic, enable); +} + +void +nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats) +{ + stats->rx_bytes = NICVF_GET_RX_STATS(RX_OCTS); + stats->rx_ucast_frames = NICVF_GET_RX_STATS(RX_UCAST); + stats->rx_bcast_frames = NICVF_GET_RX_STATS(RX_BCAST); + stats->rx_mcast_frames = NICVF_GET_RX_STATS(RX_MCAST); + stats->rx_fcs_errors = NICVF_GET_RX_STATS(RX_FCS); + stats->rx_l2_errors = NICVF_GET_RX_STATS(RX_L2ERR); + stats->rx_drop_red = NICVF_GET_RX_STATS(RX_RED); + stats->rx_drop_red_bytes = NICVF_GET_RX_STATS(RX_RED_OCTS); + stats->rx_drop_overrun = NICVF_GET_RX_STATS(RX_ORUN); + stats->rx_drop_overrun_bytes = NICVF_GET_RX_STATS(RX_ORUN_OCTS); + stats->rx_drop_bcast = NICVF_GET_RX_STATS(RX_DRP_BCAST); + stats->rx_drop_mcast = NICVF_GET_RX_STATS(RX_DRP_MCAST); + stats->rx_drop_l3_bcast = NICVF_GET_RX_STATS(RX_DRP_L3BCAST); + stats->rx_drop_l3_mcast = NICVF_GET_RX_STATS(RX_DRP_L3MCAST); + + stats->tx_bytes_ok = NICVF_GET_TX_STATS(TX_OCTS); + stats->tx_ucast_frames_ok = NICVF_GET_TX_STATS(TX_UCAST); + stats->tx_bcast_frames_ok = NICVF_GET_TX_STATS(TX_BCAST); + stats->tx_mcast_frames_ok = NICVF_GET_TX_STATS(TX_MCAST); + stats->tx_drops = NICVF_GET_TX_STATS(TX_DROP); +} + +void +nicvf_hw_get_rx_qstats(struct nicvf *nic, struct nicvf_hw_rx_qstats *qstats, + uint16_t qidx) +{ + qstats->q_rx_bytes = + nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS0, qidx); + qstats->q_rx_packets = + nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_STATUS1, qidx); +} + +void +nicvf_hw_get_tx_qstats(struct nicvf *nic, struct nicvf_hw_tx_qstats *qstats, + uint16_t qidx) +{ + qstats->q_tx_bytes = + nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS0, qidx); + qstats->q_tx_packets = + nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS1, qidx); +} diff --git a/drivers/net/thunderx/base/nicvf_hw.h b/drivers/net/thunderx/base/nicvf_hw.h new file mode 100644 index 00000000..9db1d30c --- /dev/null +++ b/drivers/net/thunderx/base/nicvf_hw.h @@ -0,0 +1,240 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _THUNDERX_NICVF_HW_H +#define _THUNDERX_NICVF_HW_H + +#include <stdint.h> + +#include "nicvf_hw_defs.h" + +#define PCI_VENDOR_ID_CAVIUM 0x177D +#define PCI_DEVICE_ID_THUNDERX_PASS1_NICVF 0x0011 +#define PCI_DEVICE_ID_THUNDERX_PASS2_NICVF 0xA034 +#define PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF 0xA11E +#define PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF 0xA134 + +#define NICVF_ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) + +#define NICVF_GET_RX_STATS(reg) \ + nicvf_reg_read(nic, NIC_VNIC_RX_STAT_0_13 | (reg << 3)) +#define NICVF_GET_TX_STATS(reg) \ + nicvf_reg_read(nic, NIC_VNIC_TX_STAT_0_4 | (reg << 3)) + +#define NICVF_PASS1 (PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF) +#define NICVF_PASS2 (PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF) + +#define NICVF_CAP_TUNNEL_PARSING (1ULL << 0) + +enum nicvf_tns_mode { + NIC_TNS_BYPASS_MODE, + NIC_TNS_MODE, +}; + +enum nicvf_err_e { + NICVF_OK, + NICVF_ERR_SET_QS = -8191,/* -8191 */ + NICVF_ERR_RESET_QS, /* -8190 */ + NICVF_ERR_REG_POLL, /* -8189 */ + NICVF_ERR_RBDR_RESET, /* -8188 */ + NICVF_ERR_RBDR_DISABLE, /* -8187 */ + NICVF_ERR_RBDR_PREFETCH, /* -8186 */ + NICVF_ERR_RBDR_RESET1, /* -8185 */ + NICVF_ERR_RBDR_RESET2, /* -8184 */ + NICVF_ERR_RQ_CLAIM, /* -8183 */ + NICVF_ERR_RQ_PF_CFG, /* -8182 */ + NICVF_ERR_RQ_BP_CFG, /* -8181 */ + NICVF_ERR_RQ_DROP_CFG, /* -8180 */ + NICVF_ERR_CQ_DISABLE, /* -8179 */ + NICVF_ERR_CQ_RESET, /* -8178 */ + NICVF_ERR_SQ_DISABLE, /* -8177 */ + NICVF_ERR_SQ_RESET, /* -8176 */ + NICVF_ERR_SQ_PF_CFG, /* -8175 */ + NICVF_ERR_LOOPBACK_CFG, /* -8174 */ + NICVF_ERR_BASE_INIT, /* -8173 */ + NICVF_ERR_RSS_TBL_UPDATE,/* -8172 */ + NICVF_ERR_RSS_GET_SZ, /* -8171 */ +}; + +typedef nicvf_phys_addr_t (*rbdr_pool_get_handler)(void *opaque); + +struct nicvf_hw_rx_qstats { + uint64_t q_rx_bytes; + uint64_t q_rx_packets; +}; + +struct nicvf_hw_tx_qstats { + uint64_t q_tx_bytes; + uint64_t q_tx_packets; +}; + +struct nicvf_hw_stats { + uint64_t rx_bytes; + uint64_t rx_ucast_frames; + uint64_t rx_bcast_frames; + uint64_t rx_mcast_frames; + uint64_t rx_fcs_errors; + uint64_t rx_l2_errors; + uint64_t rx_drop_red; + uint64_t rx_drop_red_bytes; + uint64_t rx_drop_overrun; + uint64_t rx_drop_overrun_bytes; + uint64_t rx_drop_bcast; + uint64_t rx_drop_mcast; + uint64_t rx_drop_l3_bcast; + uint64_t rx_drop_l3_mcast; + + uint64_t tx_bytes_ok; + uint64_t tx_ucast_frames_ok; + uint64_t tx_bcast_frames_ok; + uint64_t tx_mcast_frames_ok; + uint64_t tx_drops; +}; + +struct nicvf_rss_reta_info { + uint8_t hash_bits; + uint16_t rss_size; + uint8_t ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; +}; + +/* Common structs used in DPDK and base layer are defined in DPDK layer */ +#include "../nicvf_struct.h" + +NICVF_STATIC_ASSERT(sizeof(struct nicvf_rbdr) <= 128); +NICVF_STATIC_ASSERT(sizeof(struct nicvf_txq) <= 128); +NICVF_STATIC_ASSERT(sizeof(struct nicvf_rxq) <= 128); + +static inline void +nicvf_reg_write(struct nicvf *nic, uint32_t offset, uint64_t val) +{ + nicvf_addr_write(nic->reg_base + offset, val); +} + +static inline uint64_t +nicvf_reg_read(struct nicvf *nic, uint32_t offset) +{ + return nicvf_addr_read(nic->reg_base + offset); +} + +static inline uintptr_t +nicvf_qset_base(struct nicvf *nic, uint32_t qidx) +{ + return nic->reg_base + (qidx << NIC_Q_NUM_SHIFT); +} + +static inline void +nicvf_queue_reg_write(struct nicvf *nic, uint32_t offset, uint32_t qidx, + uint64_t val) +{ + nicvf_addr_write(nicvf_qset_base(nic, qidx) + offset, val); +} + +static inline uint64_t +nicvf_queue_reg_read(struct nicvf *nic, uint32_t offset, uint32_t qidx) +{ + return nicvf_addr_read(nicvf_qset_base(nic, qidx) + offset); +} + +static inline void +nicvf_disable_all_interrupts(struct nicvf *nic) +{ + nicvf_reg_write(nic, NIC_VF_ENA_W1C, NICVF_INTR_ALL_MASK); + nicvf_reg_write(nic, NIC_VF_INT, NICVF_INTR_ALL_MASK); +} + +static inline uint32_t +nicvf_hw_version(struct nicvf *nic) +{ + return nic->subsystem_device_id; +} + +static inline uint64_t +nicvf_hw_cap(struct nicvf *nic) +{ + return nic->hwcap; +} + +int nicvf_base_init(struct nicvf *nic); + +int nicvf_reg_get_count(void); +int nicvf_reg_poll_interrupts(struct nicvf *nic); +int nicvf_reg_dump(struct nicvf *nic, uint64_t *data); + +int nicvf_qset_config(struct nicvf *nic); +int nicvf_qset_reclaim(struct nicvf *nic); + +int nicvf_qset_rbdr_config(struct nicvf *nic, uint16_t qidx); +int nicvf_qset_rbdr_reclaim(struct nicvf *nic, uint16_t qidx); +int nicvf_qset_rbdr_precharge(struct nicvf *nic, uint16_t ridx, + rbdr_pool_get_handler handler, void *opaque, + uint32_t max_buffs); +int nicvf_qset_rbdr_active(struct nicvf *nic, uint16_t qidx); + +int nicvf_qset_rq_config(struct nicvf *nic, uint16_t qidx, + struct nicvf_rxq *rxq); +int nicvf_qset_rq_reclaim(struct nicvf *nic, uint16_t qidx); + +int nicvf_qset_cq_config(struct nicvf *nic, uint16_t qidx, + struct nicvf_rxq *rxq); +int nicvf_qset_cq_reclaim(struct nicvf *nic, uint16_t qidx); + +int nicvf_qset_sq_config(struct nicvf *nic, uint16_t qidx, + struct nicvf_txq *txq); +int nicvf_qset_sq_reclaim(struct nicvf *nic, uint16_t qidx); + +uint32_t nicvf_qsize_rbdr_roundup(uint32_t val); +uint32_t nicvf_qsize_cq_roundup(uint32_t val); +uint32_t nicvf_qsize_sq_roundup(uint32_t val); + +void nicvf_vlan_hw_strip(struct nicvf *nic, bool enable); + +int nicvf_rss_config(struct nicvf *nic, uint32_t qcnt, uint64_t cfg); +int nicvf_rss_term(struct nicvf *nic); + +int nicvf_rss_reta_update(struct nicvf *nic, uint8_t *tbl, uint32_t max_count); +int nicvf_rss_reta_query(struct nicvf *nic, uint8_t *tbl, uint32_t max_count); + +void nicvf_rss_set_key(struct nicvf *nic, uint8_t *key); +void nicvf_rss_get_key(struct nicvf *nic, uint8_t *key); + +void nicvf_rss_set_cfg(struct nicvf *nic, uint64_t val); +uint64_t nicvf_rss_get_cfg(struct nicvf *nic); + +int nicvf_loopback_config(struct nicvf *nic, bool enable); + +void nicvf_hw_get_stats(struct nicvf *nic, struct nicvf_hw_stats *stats); +void nicvf_hw_get_rx_qstats(struct nicvf *nic, + struct nicvf_hw_rx_qstats *qstats, uint16_t qidx); +void nicvf_hw_get_tx_qstats(struct nicvf *nic, + struct nicvf_hw_tx_qstats *qstats, uint16_t qidx); + +#endif /* _THUNDERX_NICVF_HW_H */ diff --git a/drivers/net/thunderx/base/nicvf_hw_defs.h b/drivers/net/thunderx/base/nicvf_hw_defs.h new file mode 100644 index 00000000..88ecd175 --- /dev/null +++ b/drivers/net/thunderx/base/nicvf_hw_defs.h @@ -0,0 +1,1219 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _THUNDERX_NICVF_HW_DEFS_H +#define _THUNDERX_NICVF_HW_DEFS_H + +#include <stdint.h> +#include <stdbool.h> + +/* Virtual function register offsets */ + +#define NIC_VF_CFG (0x000020) +#define NIC_VF_PF_MAILBOX_0_1 (0x000130) +#define NIC_VF_INT (0x000200) +#define NIC_VF_INT_W1S (0x000220) +#define NIC_VF_ENA_W1C (0x000240) +#define NIC_VF_ENA_W1S (0x000260) + +#define NIC_VNIC_RSS_CFG (0x0020E0) +#define NIC_VNIC_RSS_KEY_0_4 (0x002200) +#define NIC_VNIC_TX_STAT_0_4 (0x004000) +#define NIC_VNIC_RX_STAT_0_13 (0x004100) +#define NIC_VNIC_RQ_GEN_CFG (0x010010) + +#define NIC_QSET_CQ_0_7_CFG (0x010400) +#define NIC_QSET_CQ_0_7_CFG2 (0x010408) +#define NIC_QSET_CQ_0_7_THRESH (0x010410) +#define NIC_QSET_CQ_0_7_BASE (0x010420) +#define NIC_QSET_CQ_0_7_HEAD (0x010428) +#define NIC_QSET_CQ_0_7_TAIL (0x010430) +#define NIC_QSET_CQ_0_7_DOOR (0x010438) +#define NIC_QSET_CQ_0_7_STATUS (0x010440) +#define NIC_QSET_CQ_0_7_STATUS2 (0x010448) +#define NIC_QSET_CQ_0_7_DEBUG (0x010450) + +#define NIC_QSET_RQ_0_7_CFG (0x010600) +#define NIC_QSET_RQ_0_7_STATUS0 (0x010700) +#define NIC_QSET_RQ_0_7_STATUS1 (0x010708) + +#define NIC_QSET_SQ_0_7_CFG (0x010800) +#define NIC_QSET_SQ_0_7_THRESH (0x010810) +#define NIC_QSET_SQ_0_7_BASE (0x010820) +#define NIC_QSET_SQ_0_7_HEAD (0x010828) +#define NIC_QSET_SQ_0_7_TAIL (0x010830) +#define NIC_QSET_SQ_0_7_DOOR (0x010838) +#define NIC_QSET_SQ_0_7_STATUS (0x010840) +#define NIC_QSET_SQ_0_7_DEBUG (0x010848) +#define NIC_QSET_SQ_0_7_STATUS0 (0x010900) +#define NIC_QSET_SQ_0_7_STATUS1 (0x010908) + +#define NIC_QSET_RBDR_0_1_CFG (0x010C00) +#define NIC_QSET_RBDR_0_1_THRESH (0x010C10) +#define NIC_QSET_RBDR_0_1_BASE (0x010C20) +#define NIC_QSET_RBDR_0_1_HEAD (0x010C28) +#define NIC_QSET_RBDR_0_1_TAIL (0x010C30) +#define NIC_QSET_RBDR_0_1_DOOR (0x010C38) +#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40) +#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48) +#define NIC_QSET_RBDR_0_1_PRFCH_STATUS (0x010C50) + +/* vNIC HW Constants */ + +#define NIC_Q_NUM_SHIFT 18 + +#define MAX_QUEUE_SET 128 +#define MAX_RCV_QUEUES_PER_QS 8 +#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2 +#define MAX_SND_QUEUES_PER_QS 8 +#define MAX_CMP_QUEUES_PER_QS 8 + +#define NICVF_INTR_CQ_SHIFT 0 +#define NICVF_INTR_SQ_SHIFT 8 +#define NICVF_INTR_RBDR_SHIFT 16 +#define NICVF_INTR_PKT_DROP_SHIFT 20 +#define NICVF_INTR_TCP_TIMER_SHIFT 21 +#define NICVF_INTR_MBOX_SHIFT 22 +#define NICVF_INTR_QS_ERR_SHIFT 23 + +#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT) +#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT) +#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT) +#define NICVF_INTR_PKT_DROP_MASK (1 << NICVF_INTR_PKT_DROP_SHIFT) +#define NICVF_INTR_TCP_TIMER_MASK (1 << NICVF_INTR_TCP_TIMER_SHIFT) +#define NICVF_INTR_MBOX_MASK (1 << NICVF_INTR_MBOX_SHIFT) +#define NICVF_INTR_QS_ERR_MASK (1 << NICVF_INTR_QS_ERR_SHIFT) +#define NICVF_INTR_ALL_MASK (0x7FFFFF) + +#define NICVF_CQ_WR_FULL (1ULL << 26) +#define NICVF_CQ_WR_DISABLE (1ULL << 25) +#define NICVF_CQ_WR_FAULT (1ULL << 24) +#define NICVF_CQ_ERR_MASK (NICVF_CQ_WR_FULL |\ + NICVF_CQ_WR_DISABLE |\ + NICVF_CQ_WR_FAULT) +#define NICVF_CQ_CQE_COUNT_MASK (0xFFFF) + +#define NICVF_SQ_ERR_STOPPED (1ULL << 21) +#define NICVF_SQ_ERR_SEND (1ULL << 20) +#define NICVF_SQ_ERR_DPE (1ULL << 19) +#define NICVF_SQ_ERR_MASK (NICVF_SQ_ERR_STOPPED |\ + NICVF_SQ_ERR_SEND |\ + NICVF_SQ_ERR_DPE) +#define NICVF_SQ_STATUS_STOPPED_BIT (21) + +#define NICVF_RBDR_FIFO_STATE_SHIFT (62) +#define NICVF_RBDR_FIFO_STATE_MASK (3ULL << NICVF_RBDR_FIFO_STATE_SHIFT) +#define NICVF_RBDR_COUNT_MASK (0x7FFFF) + +/* Queue reset */ +#define NICVF_CQ_RESET (1ULL << 41) +#define NICVF_SQ_RESET (1ULL << 17) +#define NICVF_RBDR_RESET (1ULL << 43) + +/* RSS constants */ +#define NIC_MAX_RSS_HASH_BITS (8) +#define NIC_MAX_RSS_IDR_TBL_SIZE (1 << NIC_MAX_RSS_HASH_BITS) +#define RSS_HASH_KEY_SIZE (5) /* 320 bit key */ +#define RSS_HASH_KEY_BYTE_SIZE (40) /* 320 bit key */ + +#define RSS_L2_EXTENDED_HASH_ENA (1 << 0) +#define RSS_IP_ENA (1 << 1) +#define RSS_TCP_ENA (1 << 2) +#define RSS_TCP_SYN_ENA (1 << 3) +#define RSS_UDP_ENA (1 << 4) +#define RSS_L4_EXTENDED_ENA (1 << 5) +#define RSS_L3_BI_DIRECTION_ENA (1 << 7) +#define RSS_L4_BI_DIRECTION_ENA (1 << 8) +#define RSS_TUN_VXLAN_ENA (1 << 9) +#define RSS_TUN_GENEVE_ENA (1 << 10) +#define RSS_TUN_NVGRE_ENA (1 << 11) + +#define RBDR_QUEUE_SZ_8K (8 * 1024) +#define RBDR_QUEUE_SZ_16K (16 * 1024) +#define RBDR_QUEUE_SZ_32K (32 * 1024) +#define RBDR_QUEUE_SZ_64K (64 * 1024) +#define RBDR_QUEUE_SZ_128K (128 * 1024) +#define RBDR_QUEUE_SZ_256K (256 * 1024) +#define RBDR_QUEUE_SZ_512K (512 * 1024) + +#define RBDR_SIZE_SHIFT (13) /* 8k */ + +#define SND_QUEUE_SZ_1K (1 * 1024) +#define SND_QUEUE_SZ_2K (2 * 1024) +#define SND_QUEUE_SZ_4K (4 * 1024) +#define SND_QUEUE_SZ_8K (8 * 1024) +#define SND_QUEUE_SZ_16K (16 * 1024) +#define SND_QUEUE_SZ_32K (32 * 1024) +#define SND_QUEUE_SZ_64K (64 * 1024) + +#define SND_QSIZE_SHIFT (10) /* 1k */ + +#define CMP_QUEUE_SZ_1K (1 * 1024) +#define CMP_QUEUE_SZ_2K (2 * 1024) +#define CMP_QUEUE_SZ_4K (4 * 1024) +#define CMP_QUEUE_SZ_8K (8 * 1024) +#define CMP_QUEUE_SZ_16K (16 * 1024) +#define CMP_QUEUE_SZ_32K (32 * 1024) +#define CMP_QUEUE_SZ_64K (64 * 1024) + +#define CMP_QSIZE_SHIFT (10) /* 1k */ + +#define NICVF_QSIZE_MIN_VAL (0) +#define NICVF_QSIZE_MAX_VAL (6) + +/* Min/Max packet size */ +#define NIC_HW_MIN_FRS (64) +#define NIC_HW_MAX_FRS (9200) /* 9216 max pkt including FCS */ +#define NIC_HW_MAX_SEGS (12) + +/* Descriptor alignments */ +#define NICVF_RBDR_BASE_ALIGN_BYTES (128) /* 7 bits */ +#define NICVF_CQ_BASE_ALIGN_BYTES (512) /* 9 bits */ +#define NICVF_SQ_BASE_ALIGN_BYTES (128) /* 7 bits */ + +#define NICVF_CQE_RBPTR_WORD (6) +#define NICVF_CQE_RX2_RBPTR_WORD (7) + +#define NICVF_STATIC_ASSERT(s) _Static_assert(s, #s) + +typedef uint64_t nicvf_phys_addr_t; + +#ifndef __BYTE_ORDER__ +#error __BYTE_ORDER__ not defined +#endif + +/* vNIC HW Enumerations */ + +enum nic_send_ld_type_e { + NIC_SEND_LD_TYPE_E_LDD, + NIC_SEND_LD_TYPE_E_LDT, + NIC_SEND_LD_TYPE_E_LDWB, + NIC_SEND_LD_TYPE_E_ENUM_LAST, +}; + +enum ether_type_algorithm { + ETYPE_ALG_NONE, + ETYPE_ALG_SKIP, + ETYPE_ALG_ENDPARSE, + ETYPE_ALG_VLAN, + ETYPE_ALG_VLAN_STRIP, +}; + +enum layer3_type { + L3TYPE_NONE, + L3TYPE_GRH, + L3TYPE_IPV4 = 0x4, + L3TYPE_IPV4_OPTIONS = 0x5, + L3TYPE_IPV6 = 0x6, + L3TYPE_IPV6_OPTIONS = 0x7, + L3TYPE_ET_STOP = 0xD, + L3TYPE_OTHER = 0xE, +}; + +#define NICVF_L3TYPE_OPTIONS_MASK ((uint8_t)1) +#define NICVF_L3TYPE_IPVX_MASK ((uint8_t)0x06) + +enum layer4_type { + L4TYPE_NONE, + L4TYPE_IPSEC_ESP, + L4TYPE_IPFRAG, + L4TYPE_IPCOMP, + L4TYPE_TCP, + L4TYPE_UDP, + L4TYPE_SCTP, + L4TYPE_GRE, + L4TYPE_ROCE_BTH, + L4TYPE_OTHER = 0xE, +}; + +/* CPI and RSSI configuration */ +enum cpi_algorithm_type { + CPI_ALG_NONE, + CPI_ALG_VLAN, + CPI_ALG_VLAN16, + CPI_ALG_DIFF, +}; + +enum rss_algorithm_type { + RSS_ALG_NONE, + RSS_ALG_PORT, + RSS_ALG_IP, + RSS_ALG_TCP_IP, + RSS_ALG_UDP_IP, + RSS_ALG_SCTP_IP, + RSS_ALG_GRE_IP, + RSS_ALG_ROCE, +}; + +enum rss_hash_cfg { + RSS_HASH_L2ETC, + RSS_HASH_IP, + RSS_HASH_TCP, + RSS_HASH_TCP_SYN_DIS, + RSS_HASH_UDP, + RSS_HASH_L4ETC, + RSS_HASH_ROCE, + RSS_L3_BIDI, + RSS_L4_BIDI, +}; + +/* Completion queue entry types */ +enum cqe_type { + CQE_TYPE_INVALID, + CQE_TYPE_RX = 0x2, + CQE_TYPE_RX_SPLIT = 0x3, + CQE_TYPE_RX_TCP = 0x4, + CQE_TYPE_SEND = 0x8, + CQE_TYPE_SEND_PTP = 0x9, +}; + +enum cqe_rx_tcp_status { + CQE_RX_STATUS_VALID_TCP_CNXT, + CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F, +}; + +enum cqe_send_status { + CQE_SEND_STATUS_GOOD, + CQE_SEND_STATUS_DESC_FAULT = 0x01, + CQE_SEND_STATUS_HDR_CONS_ERR = 0x11, + CQE_SEND_STATUS_SUBDESC_ERR = 0x12, + CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80, + CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81, + CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82, + CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83, + CQE_SEND_STATUS_LOCK_VIOL = 0x84, + CQE_SEND_STATUS_LOCK_UFLOW = 0x85, + CQE_SEND_STATUS_DATA_FAULT = 0x86, + CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87, + CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88, + CQE_SEND_STATUS_MEM_FAULT = 0x89, + CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A, + CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B, +}; + +enum cqe_rx_tcp_end_reason { + CQE_RX_TCP_END_FIN_FLAG_DET, + CQE_RX_TCP_END_INVALID_FLAG, + CQE_RX_TCP_END_TIMEOUT, + CQE_RX_TCP_END_OUT_OF_SEQ, + CQE_RX_TCP_END_PKT_ERR, + CQE_RX_TCP_END_QS_DISABLED = 0x0F, +}; + +/* Packet protocol level error enumeration */ +enum cqe_rx_err_level { + CQE_RX_ERRLVL_RE, + CQE_RX_ERRLVL_L2, + CQE_RX_ERRLVL_L3, + CQE_RX_ERRLVL_L4, +}; + +/* Packet protocol level error type enumeration */ +enum cqe_rx_err_opcode { + CQE_RX_ERR_RE_NONE, + CQE_RX_ERR_RE_PARTIAL, + CQE_RX_ERR_RE_JABBER, + CQE_RX_ERR_RE_FCS = 0x7, + CQE_RX_ERR_RE_TERMINATE = 0x9, + CQE_RX_ERR_RE_RX_CTL = 0xb, + CQE_RX_ERR_PREL2_ERR = 0x1f, + CQE_RX_ERR_L2_FRAGMENT = 0x20, + CQE_RX_ERR_L2_OVERRUN = 0x21, + CQE_RX_ERR_L2_PFCS = 0x22, + CQE_RX_ERR_L2_PUNY = 0x23, + CQE_RX_ERR_L2_MAL = 0x24, + CQE_RX_ERR_L2_OVERSIZE = 0x25, + CQE_RX_ERR_L2_UNDERSIZE = 0x26, + CQE_RX_ERR_L2_LENMISM = 0x27, + CQE_RX_ERR_L2_PCLP = 0x28, + CQE_RX_ERR_IP_NOT = 0x41, + CQE_RX_ERR_IP_CHK = 0x42, + CQE_RX_ERR_IP_MAL = 0x43, + CQE_RX_ERR_IP_MALD = 0x44, + CQE_RX_ERR_IP_HOP = 0x45, + CQE_RX_ERR_L3_ICRC = 0x46, + CQE_RX_ERR_L3_PCLP = 0x47, + CQE_RX_ERR_L4_MAL = 0x61, + CQE_RX_ERR_L4_CHK = 0x62, + CQE_RX_ERR_UDP_LEN = 0x63, + CQE_RX_ERR_L4_PORT = 0x64, + CQE_RX_ERR_TCP_FLAG = 0x65, + CQE_RX_ERR_TCP_OFFSET = 0x66, + CQE_RX_ERR_L4_PCLP = 0x67, + CQE_RX_ERR_RBDR_TRUNC = 0x70, +}; + +enum send_l4_csum_type { + SEND_L4_CSUM_DISABLE, + SEND_L4_CSUM_UDP, + SEND_L4_CSUM_TCP, +}; + +enum send_crc_alg { + SEND_CRCALG_CRC32, + SEND_CRCALG_CRC32C, + SEND_CRCALG_ICRC, +}; + +enum send_load_type { + SEND_LD_TYPE_LDD, + SEND_LD_TYPE_LDT, + SEND_LD_TYPE_LDWB, +}; + +enum send_mem_alg_type { + SEND_MEMALG_SET, + SEND_MEMALG_ADD = 0x08, + SEND_MEMALG_SUB = 0x09, + SEND_MEMALG_ADDLEN = 0x0A, + SEND_MEMALG_SUBLEN = 0x0B, +}; + +enum send_mem_dsz_type { + SEND_MEMDSZ_B64, + SEND_MEMDSZ_B32, + SEND_MEMDSZ_B8 = 0x03, +}; + +enum sq_subdesc_type { + SQ_DESC_TYPE_INVALID, + SQ_DESC_TYPE_HEADER, + SQ_DESC_TYPE_CRC, + SQ_DESC_TYPE_IMMEDIATE, + SQ_DESC_TYPE_GATHER, + SQ_DESC_TYPE_MEMORY, +}; + +enum l3_type_t { + L3_NONE, + L3_IPV4 = 0x04, + L3_IPV4_OPT = 0x05, + L3_IPV6 = 0x06, + L3_IPV6_OPT = 0x07, + L3_ET_STOP = 0x0D, + L3_OTHER = 0x0E +}; + +enum l4_type_t { + L4_NONE, + L4_IPSEC_ESP = 0x01, + L4_IPFRAG = 0x02, + L4_IPCOMP = 0x03, + L4_TCP = 0x04, + L4_UDP_PASS1 = 0x05, + L4_GRE = 0x07, + L4_UDP_PASS2 = 0x08, + L4_UDP_GENEVE = 0x09, + L4_UDP_VXLAN = 0x0A, + L4_NVGRE = 0x0C, + L4_OTHER = 0x0E +}; + +enum vlan_strip { + NO_STRIP, + STRIP_FIRST_VLAN, + STRIP_SECOND_VLAN, + STRIP_RESERV, +}; + +enum rbdr_state { + RBDR_FIFO_STATE_INACTIVE, + RBDR_FIFO_STATE_ACTIVE, + RBDR_FIFO_STATE_RESET, + RBDR_FIFO_STATE_FAIL, +}; + +enum rq_cache_allocation { + RQ_CACHE_ALLOC_OFF, + RQ_CACHE_ALLOC_ALL, + RQ_CACHE_ALLOC_FIRST, + RQ_CACHE_ALLOC_TWO, +}; + +enum cq_rx_errlvl_e { + CQ_ERRLVL_MAC, + CQ_ERRLVL_L2, + CQ_ERRLVL_L3, + CQ_ERRLVL_L4, +}; + +enum cq_rx_errop_e { + CQ_RX_ERROP_RE_NONE, + CQ_RX_ERROP_RE_PARTIAL = 0x1, + CQ_RX_ERROP_RE_JABBER = 0x2, + CQ_RX_ERROP_RE_FCS = 0x7, + CQ_RX_ERROP_RE_TERMINATE = 0x9, + CQ_RX_ERROP_RE_RX_CTL = 0xb, + CQ_RX_ERROP_PREL2_ERR = 0x1f, + CQ_RX_ERROP_L2_FRAGMENT = 0x20, + CQ_RX_ERROP_L2_OVERRUN = 0x21, + CQ_RX_ERROP_L2_PFCS = 0x22, + CQ_RX_ERROP_L2_PUNY = 0x23, + CQ_RX_ERROP_L2_MAL = 0x24, + CQ_RX_ERROP_L2_OVERSIZE = 0x25, + CQ_RX_ERROP_L2_UNDERSIZE = 0x26, + CQ_RX_ERROP_L2_LENMISM = 0x27, + CQ_RX_ERROP_L2_PCLP = 0x28, + CQ_RX_ERROP_IP_NOT = 0x41, + CQ_RX_ERROP_IP_CSUM_ERR = 0x42, + CQ_RX_ERROP_IP_MAL = 0x43, + CQ_RX_ERROP_IP_MALD = 0x44, + CQ_RX_ERROP_IP_HOP = 0x45, + CQ_RX_ERROP_L3_ICRC = 0x46, + CQ_RX_ERROP_L3_PCLP = 0x47, + CQ_RX_ERROP_L4_MAL = 0x61, + CQ_RX_ERROP_L4_CHK = 0x62, + CQ_RX_ERROP_UDP_LEN = 0x63, + CQ_RX_ERROP_L4_PORT = 0x64, + CQ_RX_ERROP_TCP_FLAG = 0x65, + CQ_RX_ERROP_TCP_OFFSET = 0x66, + CQ_RX_ERROP_L4_PCLP = 0x67, + CQ_RX_ERROP_RBDR_TRUNC = 0x70, +}; + +enum cq_tx_errop_e { + CQ_TX_ERROP_GOOD, + CQ_TX_ERROP_DESC_FAULT = 0x10, + CQ_TX_ERROP_HDR_CONS_ERR = 0x11, + CQ_TX_ERROP_SUBDC_ERR = 0x12, + CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80, + CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81, + CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82, + CQ_TX_ERROP_LOCK_VIOL = 0x83, + CQ_TX_ERROP_DATA_FAULT = 0x84, + CQ_TX_ERROP_TSTMP_CONFLICT = 0x85, + CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86, + CQ_TX_ERROP_MEM_FAULT = 0x87, + CQ_TX_ERROP_CK_OVERLAP = 0x88, + CQ_TX_ERROP_CK_OFLOW = 0x89, + CQ_TX_ERROP_ENUM_LAST = 0x8a, +}; + +enum rq_sq_stats_reg_offset { + RQ_SQ_STATS_OCTS, + RQ_SQ_STATS_PKTS, +}; + +enum nic_stat_vnic_rx_e { + RX_OCTS, + RX_UCAST, + RX_BCAST, + RX_MCAST, + RX_RED, + RX_RED_OCTS, + RX_ORUN, + RX_ORUN_OCTS, + RX_FCS, + RX_L2ERR, + RX_DRP_BCAST, + RX_DRP_MCAST, + RX_DRP_L3BCAST, + RX_DRP_L3MCAST, +}; + +enum nic_stat_vnic_tx_e { + TX_OCTS, + TX_UCAST, + TX_BCAST, + TX_MCAST, + TX_DROP, +}; + +/* vNIC HW Register structures */ + +typedef union { + uint64_t u64; + struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t cqe_type:4; + uint64_t stdn_fault:1; + uint64_t rsvd0:1; + uint64_t rq_qs:7; + uint64_t rq_idx:3; + uint64_t rsvd1:12; + uint64_t rss_alg:4; + uint64_t rsvd2:4; + uint64_t rb_cnt:4; + uint64_t vlan_found:1; + uint64_t vlan_stripped:1; + uint64_t vlan2_found:1; + uint64_t vlan2_stripped:1; + uint64_t l4_type:4; + uint64_t l3_type:4; + uint64_t l2_present:1; + uint64_t err_level:3; + uint64_t err_opcode:8; +#else + uint64_t err_opcode:8; + uint64_t err_level:3; + uint64_t l2_present:1; + uint64_t l3_type:4; + uint64_t l4_type:4; + uint64_t vlan2_stripped:1; + uint64_t vlan2_found:1; + uint64_t vlan_stripped:1; + uint64_t vlan_found:1; + uint64_t rb_cnt:4; + uint64_t rsvd2:4; + uint64_t rss_alg:4; + uint64_t rsvd1:12; + uint64_t rq_idx:3; + uint64_t rq_qs:7; + uint64_t rsvd0:1; + uint64_t stdn_fault:1; + uint64_t cqe_type:4; +#endif + }; +} cqe_rx_word0_t; + +typedef union { + uint64_t u64; + struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t pkt_len:16; + uint64_t l2_ptr:8; + uint64_t l3_ptr:8; + uint64_t l4_ptr:8; + uint64_t cq_pkt_len:8; + uint64_t align_pad:3; + uint64_t rsvd3:1; + uint64_t chan:12; +#else + uint64_t chan:12; + uint64_t rsvd3:1; + uint64_t align_pad:3; + uint64_t cq_pkt_len:8; + uint64_t l4_ptr:8; + uint64_t l3_ptr:8; + uint64_t l2_ptr:8; + uint64_t pkt_len:16; +#endif + }; +} cqe_rx_word1_t; + +typedef union { + uint64_t u64; + struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t rss_tag:32; + uint64_t vlan_tci:16; + uint64_t vlan_ptr:8; + uint64_t vlan2_ptr:8; +#else + uint64_t vlan2_ptr:8; + uint64_t vlan_ptr:8; + uint64_t vlan_tci:16; + uint64_t rss_tag:32; +#endif + }; +} cqe_rx_word2_t; + +typedef union { + uint64_t u64; + struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint16_t rb3_sz; + uint16_t rb2_sz; + uint16_t rb1_sz; + uint16_t rb0_sz; +#else + uint16_t rb0_sz; + uint16_t rb1_sz; + uint16_t rb2_sz; + uint16_t rb3_sz; +#endif + }; +} cqe_rx_word3_t; + +typedef union { + uint64_t u64; + struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint16_t rb7_sz; + uint16_t rb6_sz; + uint16_t rb5_sz; + uint16_t rb4_sz; +#else + uint16_t rb4_sz; + uint16_t rb5_sz; + uint16_t rb6_sz; + uint16_t rb7_sz; +#endif + }; +} cqe_rx_word4_t; + +typedef union { + uint64_t u64; + struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint16_t rb11_sz; + uint16_t rb10_sz; + uint16_t rb9_sz; + uint16_t rb8_sz; +#else + uint16_t rb8_sz; + uint16_t rb9_sz; + uint16_t rb10_sz; + uint16_t rb11_sz; +#endif + }; +} cqe_rx_word5_t; + +typedef union { + uint64_t u64; + struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t vlan_found:1; + uint64_t vlan_stripped:1; + uint64_t vlan2_found:1; + uint64_t vlan2_stripped:1; + uint64_t rsvd2:3; + uint64_t inner_l2:1; + uint64_t inner_l4type:4; + uint64_t inner_l3type:4; + uint64_t vlan_ptr:8; + uint64_t vlan2_ptr:8; + uint64_t rsvd1:8; + uint64_t rsvd0:8; + uint64_t inner_l3ptr:8; + uint64_t inner_l4ptr:8; +#else + uint64_t inner_l4ptr:8; + uint64_t inner_l3ptr:8; + uint64_t rsvd0:8; + uint64_t rsvd1:8; + uint64_t vlan2_ptr:8; + uint64_t vlan_ptr:8; + uint64_t inner_l3type:4; + uint64_t inner_l4type:4; + uint64_t inner_l2:1; + uint64_t rsvd2:3; + uint64_t vlan2_stripped:1; + uint64_t vlan2_found:1; + uint64_t vlan_stripped:1; + uint64_t vlan_found:1; +#endif + }; +} cqe_rx2_word6_t; + +struct cqe_rx_t { + cqe_rx_word0_t word0; + cqe_rx_word1_t word1; + cqe_rx_word2_t word2; + cqe_rx_word3_t word3; + cqe_rx_word4_t word4; + cqe_rx_word5_t word5; + cqe_rx2_word6_t word6; /* if NIC_PF_RX_CFG[CQE_RX2_ENA] set */ +}; + +struct cqe_rx_tcp_err_t { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t cqe_type:4; /* W0 */ + uint64_t rsvd0:60; + + uint64_t rsvd1:4; /* W1 */ + uint64_t partial_first:1; + uint64_t rsvd2:27; + uint64_t rbdr_bytes:8; + uint64_t rsvd3:24; +#else + uint64_t rsvd0:60; + uint64_t cqe_type:4; + + uint64_t rsvd3:24; + uint64_t rbdr_bytes:8; + uint64_t rsvd2:27; + uint64_t partial_first:1; + uint64_t rsvd1:4; +#endif +}; + +struct cqe_rx_tcp_t { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t cqe_type:4; /* W0 */ + uint64_t rsvd0:52; + uint64_t cq_tcp_status:8; + + uint64_t rsvd1:32; /* W1 */ + uint64_t tcp_cntx_bytes:8; + uint64_t rsvd2:8; + uint64_t tcp_err_bytes:16; +#else + uint64_t cq_tcp_status:8; + uint64_t rsvd0:52; + uint64_t cqe_type:4; /* W0 */ + + uint64_t tcp_err_bytes:16; + uint64_t rsvd2:8; + uint64_t tcp_cntx_bytes:8; + uint64_t rsvd1:32; /* W1 */ +#endif +}; + +struct cqe_send_t { +#if defined(__BIG_ENDIAN_BITFIELD) + uint64_t cqe_type:4; /* W0 */ + uint64_t rsvd0:4; + uint64_t sqe_ptr:16; + uint64_t rsvd1:4; + uint64_t rsvd2:10; + uint64_t sq_qs:7; + uint64_t sq_idx:3; + uint64_t rsvd3:8; + uint64_t send_status:8; + + uint64_t ptp_timestamp:64; /* W1 */ +#elif defined(__LITTLE_ENDIAN_BITFIELD) + uint64_t send_status:8; + uint64_t rsvd3:8; + uint64_t sq_idx:3; + uint64_t sq_qs:7; + uint64_t rsvd2:10; + uint64_t rsvd1:4; + uint64_t sqe_ptr:16; + uint64_t rsvd0:4; + uint64_t cqe_type:4; /* W0 */ + + uint64_t ptp_timestamp:64; +#endif +}; + +struct cq_entry_type_t { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t cqe_type:4; + uint64_t __pad:60; +#else + uint64_t __pad:60; + uint64_t cqe_type:4; +#endif +}; + +union cq_entry_t { + uint64_t u[64]; + struct cq_entry_type_t type; + struct cqe_rx_t rx_hdr; + struct cqe_rx_tcp_t rx_tcp_hdr; + struct cqe_rx_tcp_err_t rx_tcp_err_hdr; + struct cqe_send_t cqe_send; +}; + +NICVF_STATIC_ASSERT(sizeof(union cq_entry_t) == 512); + +struct rbdr_entry_t { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + union { + struct { + uint64_t rsvd0:15; + uint64_t buf_addr:42; + uint64_t cache_align:7; + }; + nicvf_phys_addr_t full_addr; + }; +#else + union { + struct { + uint64_t cache_align:7; + uint64_t buf_addr:42; + uint64_t rsvd0:15; + }; + nicvf_phys_addr_t full_addr; + }; +#endif +}; + +NICVF_STATIC_ASSERT(sizeof(struct rbdr_entry_t) == sizeof(uint64_t)); + +/* TCP reassembly context */ +struct rbe_tcp_cnxt_t { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t tcp_pkt_cnt:12; + uint64_t rsvd1:4; + uint64_t align_hdr_bytes:4; + uint64_t align_ptr_bytes:4; + uint64_t ptr_bytes:16; + uint64_t rsvd2:24; + uint64_t cqe_type:4; + uint64_t rsvd0:54; + uint64_t tcp_end_reason:2; + uint64_t tcp_status:4; +#else + uint64_t tcp_status:4; + uint64_t tcp_end_reason:2; + uint64_t rsvd0:54; + uint64_t cqe_type:4; + uint64_t rsvd2:24; + uint64_t ptr_bytes:16; + uint64_t align_ptr_bytes:4; + uint64_t align_hdr_bytes:4; + uint64_t rsvd1:4; + uint64_t tcp_pkt_cnt:12; +#endif +}; + +/* Always Big endian */ +struct rx_hdr_t { + uint64_t opaque:32; + uint64_t rss_flow:8; + uint64_t skip_length:6; + uint64_t disable_rss:1; + uint64_t disable_tcp_reassembly:1; + uint64_t nodrop:1; + uint64_t dest_alg:2; + uint64_t rsvd0:2; + uint64_t dest_rq:11; +}; + +struct sq_crc_subdesc { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t rsvd1:32; + uint64_t crc_ival:32; + uint64_t subdesc_type:4; + uint64_t crc_alg:2; + uint64_t rsvd0:10; + uint64_t crc_insert_pos:16; + uint64_t hdr_start:16; + uint64_t crc_len:16; +#else + uint64_t crc_len:16; + uint64_t hdr_start:16; + uint64_t crc_insert_pos:16; + uint64_t rsvd0:10; + uint64_t crc_alg:2; + uint64_t subdesc_type:4; + uint64_t crc_ival:32; + uint64_t rsvd1:32; +#endif +}; + +struct sq_gather_subdesc { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t subdesc_type:4; /* W0 */ + uint64_t ld_type:2; + uint64_t rsvd0:42; + uint64_t size:16; + + uint64_t rsvd1:15; /* W1 */ + uint64_t addr:49; +#else + uint64_t size:16; + uint64_t rsvd0:42; + uint64_t ld_type:2; + uint64_t subdesc_type:4; /* W0 */ + + uint64_t addr:49; + uint64_t rsvd1:15; /* W1 */ +#endif +}; + +/* SQ immediate subdescriptor */ +struct sq_imm_subdesc { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t subdesc_type:4; /* W0 */ + uint64_t rsvd0:46; + uint64_t len:14; + + uint64_t data:64; /* W1 */ +#else + uint64_t len:14; + uint64_t rsvd0:46; + uint64_t subdesc_type:4; /* W0 */ + + uint64_t data:64; /* W1 */ +#endif +}; + +struct sq_mem_subdesc { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t subdesc_type:4; /* W0 */ + uint64_t mem_alg:4; + uint64_t mem_dsz:2; + uint64_t wmem:1; + uint64_t rsvd0:21; + uint64_t offset:32; + + uint64_t rsvd1:15; /* W1 */ + uint64_t addr:49; +#else + uint64_t offset:32; + uint64_t rsvd0:21; + uint64_t wmem:1; + uint64_t mem_dsz:2; + uint64_t mem_alg:4; + uint64_t subdesc_type:4; /* W0 */ + + uint64_t addr:49; + uint64_t rsvd1:15; /* W1 */ +#endif +}; + +struct sq_hdr_subdesc { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t subdesc_type:4; + uint64_t tso:1; + uint64_t post_cqe:1; /* Post CQE on no error also */ + uint64_t dont_send:1; + uint64_t tstmp:1; + uint64_t subdesc_cnt:8; + uint64_t csum_l4:2; + uint64_t csum_l3:1; + uint64_t csum_inner_l4:2; + uint64_t csum_inner_l3:1; + uint64_t rsvd0:2; + uint64_t l4_offset:8; + uint64_t l3_offset:8; + uint64_t rsvd1:4; + uint64_t tot_len:20; /* W0 */ + + uint64_t rsvd2:24; + uint64_t inner_l4_offset:8; + uint64_t inner_l3_offset:8; + uint64_t tso_start:8; + uint64_t rsvd3:2; + uint64_t tso_max_paysize:14; /* W1 */ +#else + uint64_t tot_len:20; + uint64_t rsvd1:4; + uint64_t l3_offset:8; + uint64_t l4_offset:8; + uint64_t rsvd0:2; + uint64_t csum_inner_l3:1; + uint64_t csum_inner_l4:2; + uint64_t csum_l3:1; + uint64_t csum_l4:2; + uint64_t subdesc_cnt:8; + uint64_t tstmp:1; + uint64_t dont_send:1; + uint64_t post_cqe:1; /* Post CQE on no error also */ + uint64_t tso:1; + uint64_t subdesc_type:4; /* W0 */ + + uint64_t tso_max_paysize:14; + uint64_t rsvd3:2; + uint64_t tso_start:8; + uint64_t inner_l3_offset:8; + uint64_t inner_l4_offset:8; + uint64_t rsvd2:24; /* W1 */ +#endif +}; + +/* Each sq entry is 128 bits wide */ +union sq_entry_t { + uint64_t buff[2]; + struct sq_hdr_subdesc hdr; + struct sq_imm_subdesc imm; + struct sq_gather_subdesc gather; + struct sq_crc_subdesc crc; + struct sq_mem_subdesc mem; +}; + +NICVF_STATIC_ASSERT(sizeof(union sq_entry_t) == 16); + +/* Queue config register formats */ +struct rq_cfg { union { struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t reserved_2_63:62; + uint64_t ena:1; + uint64_t reserved_0:1; +#else + uint64_t reserved_0:1; + uint64_t ena:1; + uint64_t reserved_2_63:62; +#endif + }; + uint64_t value; +}; }; + +struct cq_cfg { union { struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t reserved_43_63:21; + uint64_t ena:1; + uint64_t reset:1; + uint64_t caching:1; + uint64_t reserved_35_39:5; + uint64_t qsize:3; + uint64_t reserved_25_31:7; + uint64_t avg_con:9; + uint64_t reserved_0_15:16; +#else + uint64_t reserved_0_15:16; + uint64_t avg_con:9; + uint64_t reserved_25_31:7; + uint64_t qsize:3; + uint64_t reserved_35_39:5; + uint64_t caching:1; + uint64_t reset:1; + uint64_t ena:1; + uint64_t reserved_43_63:21; +#endif + }; + uint64_t value; +}; }; + +struct sq_cfg { union { struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t reserved_20_63:44; + uint64_t ena:1; + uint64_t reserved_18_18:1; + uint64_t reset:1; + uint64_t ldwb:1; + uint64_t reserved_11_15:5; + uint64_t qsize:3; + uint64_t reserved_3_7:5; + uint64_t tstmp_bgx_intf:3; +#else + uint64_t tstmp_bgx_intf:3; + uint64_t reserved_3_7:5; + uint64_t qsize:3; + uint64_t reserved_11_15:5; + uint64_t ldwb:1; + uint64_t reset:1; + uint64_t reserved_18_18:1; + uint64_t ena:1; + uint64_t reserved_20_63:44; +#endif + }; + uint64_t value; +}; }; + +struct rbdr_cfg { union { struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t reserved_45_63:19; + uint64_t ena:1; + uint64_t reset:1; + uint64_t ldwb:1; + uint64_t reserved_36_41:6; + uint64_t qsize:4; + uint64_t reserved_25_31:7; + uint64_t avg_con:9; + uint64_t reserved_12_15:4; + uint64_t lines:12; +#else + uint64_t lines:12; + uint64_t reserved_12_15:4; + uint64_t avg_con:9; + uint64_t reserved_25_31:7; + uint64_t qsize:4; + uint64_t reserved_36_41:6; + uint64_t ldwb:1; + uint64_t reset:1; + uint64_t ena: 1; + uint64_t reserved_45_63:19; +#endif + }; + uint64_t value; +}; }; + +struct pf_qs_cfg { union { struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t reserved_32_63:32; + uint64_t ena:1; + uint64_t reserved_27_30:4; + uint64_t sq_ins_ena:1; + uint64_t sq_ins_pos:6; + uint64_t lock_ena:1; + uint64_t lock_viol_cqe_ena:1; + uint64_t send_tstmp_ena:1; + uint64_t be:1; + uint64_t reserved_7_15:9; + uint64_t vnic:7; +#else + uint64_t vnic:7; + uint64_t reserved_7_15:9; + uint64_t be:1; + uint64_t send_tstmp_ena:1; + uint64_t lock_viol_cqe_ena:1; + uint64_t lock_ena:1; + uint64_t sq_ins_pos:6; + uint64_t sq_ins_ena:1; + uint64_t reserved_27_30:4; + uint64_t ena:1; + uint64_t reserved_32_63:32; +#endif + }; + uint64_t value; +}; }; + +struct pf_rq_cfg { union { struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t reserved1:1; + uint64_t reserved0:34; + uint64_t strip_pre_l2:1; + uint64_t caching:2; + uint64_t cq_qs:7; + uint64_t cq_idx:3; + uint64_t rbdr_cont_qs:7; + uint64_t rbdr_cont_idx:1; + uint64_t rbdr_strt_qs:7; + uint64_t rbdr_strt_idx:1; +#else + uint64_t rbdr_strt_idx:1; + uint64_t rbdr_strt_qs:7; + uint64_t rbdr_cont_idx:1; + uint64_t rbdr_cont_qs:7; + uint64_t cq_idx:3; + uint64_t cq_qs:7; + uint64_t caching:2; + uint64_t strip_pre_l2:1; + uint64_t reserved0:34; + uint64_t reserved1:1; +#endif + }; + uint64_t value; +}; }; + +struct pf_rq_drop_cfg { union { struct { +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + uint64_t rbdr_red:1; + uint64_t cq_red:1; + uint64_t reserved3:14; + uint64_t rbdr_pass:8; + uint64_t rbdr_drop:8; + uint64_t reserved2:8; + uint64_t cq_pass:8; + uint64_t cq_drop:8; + uint64_t reserved1:8; +#else + uint64_t reserved1:8; + uint64_t cq_drop:8; + uint64_t cq_pass:8; + uint64_t reserved2:8; + uint64_t rbdr_drop:8; + uint64_t rbdr_pass:8; + uint64_t reserved3:14; + uint64_t cq_red:1; + uint64_t rbdr_red:1; +#endif + }; + uint64_t value; +}; }; + +#endif /* _THUNDERX_NICVF_HW_DEFS_H */ diff --git a/drivers/net/thunderx/base/nicvf_mbox.c b/drivers/net/thunderx/base/nicvf_mbox.c new file mode 100644 index 00000000..9c5cd834 --- /dev/null +++ b/drivers/net/thunderx/base/nicvf_mbox.c @@ -0,0 +1,418 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <assert.h> +#include <unistd.h> +#include <stdio.h> +#include <stdlib.h> + +#include "nicvf_plat.h" + +#define NICVF_MBOX_PF_RESPONSE_DELAY_US (1000) + +static const char *mbox_message[NIC_MBOX_MSG_MAX] = { + [NIC_MBOX_MSG_INVALID] = "NIC_MBOX_MSG_INVALID", + [NIC_MBOX_MSG_READY] = "NIC_MBOX_MSG_READY", + [NIC_MBOX_MSG_ACK] = "NIC_MBOX_MSG_ACK", + [NIC_MBOX_MSG_NACK] = "NIC_MBOX_MSG_ACK", + [NIC_MBOX_MSG_QS_CFG] = "NIC_MBOX_MSG_QS_CFG", + [NIC_MBOX_MSG_RQ_CFG] = "NIC_MBOX_MSG_RQ_CFG", + [NIC_MBOX_MSG_SQ_CFG] = "NIC_MBOX_MSG_SQ_CFG", + [NIC_MBOX_MSG_RQ_DROP_CFG] = "NIC_MBOX_MSG_RQ_DROP_CFG", + [NIC_MBOX_MSG_SET_MAC] = "NIC_MBOX_MSG_SET_MAC", + [NIC_MBOX_MSG_SET_MAX_FRS] = "NIC_MBOX_MSG_SET_MAX_FRS", + [NIC_MBOX_MSG_CPI_CFG] = "NIC_MBOX_MSG_CPI_CFG", + [NIC_MBOX_MSG_RSS_SIZE] = "NIC_MBOX_MSG_RSS_SIZE", + [NIC_MBOX_MSG_RSS_CFG] = "NIC_MBOX_MSG_RSS_CFG", + [NIC_MBOX_MSG_RSS_CFG_CONT] = "NIC_MBOX_MSG_RSS_CFG_CONT", + [NIC_MBOX_MSG_RQ_BP_CFG] = "NIC_MBOX_MSG_RQ_BP_CFG", + [NIC_MBOX_MSG_RQ_SW_SYNC] = "NIC_MBOX_MSG_RQ_SW_SYNC", + [NIC_MBOX_MSG_BGX_LINK_CHANGE] = "NIC_MBOX_MSG_BGX_LINK_CHANGE", + [NIC_MBOX_MSG_ALLOC_SQS] = "NIC_MBOX_MSG_ALLOC_SQS", + [NIC_MBOX_MSG_LOOPBACK] = "NIC_MBOX_MSG_LOOPBACK", + [NIC_MBOX_MSG_RESET_STAT_COUNTER] = "NIC_MBOX_MSG_RESET_STAT_COUNTER", + [NIC_MBOX_MSG_CFG_DONE] = "NIC_MBOX_MSG_CFG_DONE", + [NIC_MBOX_MSG_SHUTDOWN] = "NIC_MBOX_MSG_SHUTDOWN", +}; + +static inline const char * __attribute__((unused)) +nicvf_mbox_msg_str(int msg) +{ + assert(msg >= 0 && msg < NIC_MBOX_MSG_MAX); + /* undefined messages */ + if (mbox_message[msg] == NULL) + msg = 0; + return mbox_message[msg]; +} + +static inline void +nicvf_mbox_send_msg_to_pf_raw(struct nicvf *nic, struct nic_mbx *mbx) +{ + uint64_t *mbx_data; + uint64_t mbx_addr; + int i; + + mbx_addr = NIC_VF_PF_MAILBOX_0_1; + mbx_data = (uint64_t *)mbx; + for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { + nicvf_reg_write(nic, mbx_addr, *mbx_data); + mbx_data++; + mbx_addr += sizeof(uint64_t); + } + nicvf_mbox_log("msg sent %s (VF%d)", + nicvf_mbox_msg_str(mbx->msg.msg), nic->vf_id); +} + +static inline void +nicvf_mbox_send_async_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx) +{ + nicvf_mbox_send_msg_to_pf_raw(nic, mbx); + /* Messages without ack are racy!*/ + nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US); +} + +static inline int +nicvf_mbox_send_msg_to_pf(struct nicvf *nic, struct nic_mbx *mbx) +{ + long timeout; + long sleep = 10; + int i, retry = 5; + + for (i = 0; i < retry; i++) { + nic->pf_acked = false; + nic->pf_nacked = false; + nicvf_smp_wmb(); + + nicvf_mbox_send_msg_to_pf_raw(nic, mbx); + /* Give some time to get PF response */ + nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US); + timeout = NIC_MBOX_MSG_TIMEOUT; + while (timeout > 0) { + /* Periodic poll happens from nicvf_interrupt() */ + nicvf_smp_rmb(); + + if (nic->pf_nacked) + return -EINVAL; + if (nic->pf_acked) + return 0; + + nicvf_delay_us(NICVF_MBOX_PF_RESPONSE_DELAY_US); + timeout -= sleep; + } + nicvf_log_error("PF didn't ack to msg 0x%02x %s VF%d (%d/%d)", + mbx->msg.msg, nicvf_mbox_msg_str(mbx->msg.msg), + nic->vf_id, i, retry); + } + return -EBUSY; +} + + +int +nicvf_handle_mbx_intr(struct nicvf *nic) +{ + struct nic_mbx mbx; + uint64_t *mbx_data = (uint64_t *)&mbx; + uint64_t mbx_addr = NIC_VF_PF_MAILBOX_0_1; + size_t i; + + for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) { + *mbx_data = nicvf_reg_read(nic, mbx_addr); + mbx_data++; + mbx_addr += sizeof(uint64_t); + } + + /* Overwrite the message so we won't receive it again */ + nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1, 0x0); + + nicvf_mbox_log("msg received id=0x%hhx %s (VF%d)", mbx.msg.msg, + nicvf_mbox_msg_str(mbx.msg.msg), nic->vf_id); + + switch (mbx.msg.msg) { + case NIC_MBOX_MSG_READY: + nic->vf_id = mbx.nic_cfg.vf_id & 0x7F; + nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F; + nic->node = mbx.nic_cfg.node_id; + nic->sqs_mode = mbx.nic_cfg.sqs_mode; + nic->loopback_supported = mbx.nic_cfg.loopback_supported; + ether_addr_copy((struct ether_addr *)mbx.nic_cfg.mac_addr, + (struct ether_addr *)nic->mac_addr); + nic->pf_acked = true; + break; + case NIC_MBOX_MSG_ACK: + nic->pf_acked = true; + break; + case NIC_MBOX_MSG_NACK: + nic->pf_nacked = true; + break; + case NIC_MBOX_MSG_RSS_SIZE: + nic->rss_info.rss_size = mbx.rss_size.ind_tbl_size; + nic->pf_acked = true; + break; + case NIC_MBOX_MSG_BGX_LINK_CHANGE: + nic->link_up = mbx.link_status.link_up; + nic->duplex = mbx.link_status.duplex; + nic->speed = mbx.link_status.speed; + nic->pf_acked = true; + break; + default: + nicvf_log_error("Invalid message from PF, msg_id=0x%hhx %s", + mbx.msg.msg, nicvf_mbox_msg_str(mbx.msg.msg)); + break; + } + nicvf_smp_wmb(); + + return mbx.msg.msg; +} + +/* + * Checks if VF is able to communicate with PF + * and also gets the VNIC number this VF is associated to. + */ +int +nicvf_mbox_check_pf_ready(struct nicvf *nic) +{ + struct nic_mbx mbx = { .msg = {.msg = NIC_MBOX_MSG_READY} }; + + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_set_mac_addr(struct nicvf *nic, + const uint8_t mac[NICVF_MAC_ADDR_SIZE]) +{ + struct nic_mbx mbx = { .msg = {0} }; + int i; + + mbx.msg.msg = NIC_MBOX_MSG_SET_MAC; + mbx.mac.vf_id = nic->vf_id; + for (i = 0; i < 6; i++) + mbx.mac.mac_addr[i] = mac[i]; + + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.msg.msg = NIC_MBOX_MSG_CPI_CFG; + mbx.cpi_cfg.vf_id = nic->vf_id; + mbx.cpi_cfg.cpi_alg = nic->cpi_alg; + mbx.cpi_cfg.rq_cnt = qcnt; + + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_get_rss_size(struct nicvf *nic) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.msg.msg = NIC_MBOX_MSG_RSS_SIZE; + mbx.rss_size.vf_id = nic->vf_id; + + /* Result will be stored in nic->rss_info.rss_size */ + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_config_rss(struct nicvf *nic) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + struct nicvf_rss_reta_info *rss = &nic->rss_info; + size_t tot_len = rss->rss_size; + size_t cur_len; + size_t cur_idx = 0; + size_t i; + + mbx.rss_cfg.vf_id = nic->vf_id; + mbx.rss_cfg.hash_bits = rss->hash_bits; + mbx.rss_cfg.tbl_len = 0; + mbx.rss_cfg.tbl_offset = 0; + + while (cur_idx < tot_len) { + cur_len = nicvf_min(tot_len - cur_idx, + (size_t)RSS_IND_TBL_LEN_PER_MBX_MSG); + mbx.msg.msg = (cur_idx > 0) ? + NIC_MBOX_MSG_RSS_CFG_CONT : NIC_MBOX_MSG_RSS_CFG; + mbx.rss_cfg.tbl_offset = cur_idx; + mbx.rss_cfg.tbl_len = cur_len; + for (i = 0; i < cur_len; i++) + mbx.rss_cfg.ind_tbl[i] = rss->ind_tbl[cur_idx++]; + + if (nicvf_mbox_send_msg_to_pf(nic, &mbx)) + return NICVF_ERR_RSS_TBL_UPDATE; + } + + return 0; +} + +int +nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx, + struct pf_rq_cfg *pf_rq_cfg) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.msg.msg = NIC_MBOX_MSG_RQ_CFG; + mbx.rq.qs_num = nic->vf_id; + mbx.rq.rq_num = qidx; + mbx.rq.cfg = pf_rq_cfg->value; + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.msg.msg = NIC_MBOX_MSG_SQ_CFG; + mbx.sq.qs_num = nic->vf_id; + mbx.sq.sq_num = qidx; + mbx.sq.sqs_mode = nic->sqs_mode; + mbx.sq.cfg = (nic->vf_id << 3) | qidx; + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + +#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ + qs_cfg->be = 1; +#endif + /* Send a mailbox msg to PF to config Qset */ + mbx.msg.msg = NIC_MBOX_MSG_QS_CFG; + mbx.qs.num = nic->vf_id; + mbx.qs.cfg = qs_cfg->value; + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + struct pf_rq_drop_cfg *drop_cfg; + + /* Enable CQ drop to reserve sufficient CQEs for all tx packets */ + mbx.msg.msg = NIC_MBOX_MSG_RQ_DROP_CFG; + mbx.rq.qs_num = nic->vf_id; + mbx.rq.rq_num = qidx; + drop_cfg = (struct pf_rq_drop_cfg *)&mbx.rq.cfg; + drop_cfg->value = 0; + if (enable) { + drop_cfg->cq_red = 1; + drop_cfg->cq_drop = 2; + } + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.msg.msg = NIC_MBOX_MSG_SET_MAX_FRS; + mbx.frs.max_frs = mtu; + mbx.frs.vf_id = nic->vf_id; + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_rq_sync(struct nicvf *nic) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + /* Make sure all packets in the pipeline are written back into mem */ + mbx.msg.msg = NIC_MBOX_MSG_RQ_SW_SYNC; + mbx.rq.cfg = 0; + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.msg.msg = NIC_MBOX_MSG_RQ_BP_CFG; + mbx.rq.qs_num = nic->vf_id; + mbx.rq.rq_num = qidx; + mbx.rq.cfg = 0; + if (enable) + mbx.rq.cfg = (1ULL << 63) | (1ULL << 62) | (nic->vf_id << 0); + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_loopback_config(struct nicvf *nic, bool enable) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.lbk.msg = NIC_MBOX_MSG_LOOPBACK; + mbx.lbk.vf_id = nic->vf_id; + mbx.lbk.enable = enable; + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +int +nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask, + uint8_t tx_stat_mask, uint16_t rq_stat_mask, + uint16_t sq_stat_mask) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.reset_stat.msg = NIC_MBOX_MSG_RESET_STAT_COUNTER; + mbx.reset_stat.rx_stat_mask = rx_stat_mask; + mbx.reset_stat.tx_stat_mask = tx_stat_mask; + mbx.reset_stat.rq_stat_mask = rq_stat_mask; + mbx.reset_stat.sq_stat_mask = sq_stat_mask; + return nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +void +nicvf_mbox_shutdown(struct nicvf *nic) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.msg.msg = NIC_MBOX_MSG_SHUTDOWN; + nicvf_mbox_send_msg_to_pf(nic, &mbx); +} + +void +nicvf_mbox_cfg_done(struct nicvf *nic) +{ + struct nic_mbx mbx = { .msg = { 0 } }; + + mbx.msg.msg = NIC_MBOX_MSG_CFG_DONE; + nicvf_mbox_send_async_msg_to_pf(nic, &mbx); +} diff --git a/drivers/net/thunderx/base/nicvf_mbox.h b/drivers/net/thunderx/base/nicvf_mbox.h new file mode 100644 index 00000000..7c0c6a97 --- /dev/null +++ b/drivers/net/thunderx/base/nicvf_mbox.h @@ -0,0 +1,232 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __THUNDERX_NICVF_MBOX__ +#define __THUNDERX_NICVF_MBOX__ + +#include <stdint.h> + +#include "nicvf_plat.h" + +/* PF <--> VF Mailbox communication + * Two 64bit registers are shared between PF and VF for each VF + * Writing into second register means end of message. + */ + +/* PF <--> VF mailbox communication */ +#define NIC_PF_VF_MAILBOX_SIZE 2 +#define NIC_MBOX_MSG_TIMEOUT 2000 /* ms */ + +/* Mailbox message types */ +#define NIC_MBOX_MSG_INVALID 0x00 /* Invalid message */ +#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */ +#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */ +#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */ +#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */ +#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */ +#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */ +#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */ +#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */ +#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */ +#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */ +#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */ +#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */ +#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */ +#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */ +#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */ +#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */ +#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */ +#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */ +#define NIC_MBOX_MSG_RESET_STAT_COUNTER 0x17 /* Reset statistics counters */ +#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */ +#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */ +#define NIC_MBOX_MSG_MAX 0x100 /* Maximum number of messages */ + +/* Get vNIC VF configuration */ +struct nic_cfg_msg { + uint8_t msg; + uint8_t vf_id; + uint8_t node_id; + bool tns_mode:1; + bool sqs_mode:1; + bool loopback_supported:1; + uint8_t mac_addr[NICVF_MAC_ADDR_SIZE]; +}; + +/* Qset configuration */ +struct qs_cfg_msg { + uint8_t msg; + uint8_t num; + uint8_t sqs_count; + uint64_t cfg; +}; + +/* Receive queue configuration */ +struct rq_cfg_msg { + uint8_t msg; + uint8_t qs_num; + uint8_t rq_num; + uint64_t cfg; +}; + +/* Send queue configuration */ +struct sq_cfg_msg { + uint8_t msg; + uint8_t qs_num; + uint8_t sq_num; + bool sqs_mode; + uint64_t cfg; +}; + +/* Set VF's MAC address */ +struct set_mac_msg { + uint8_t msg; + uint8_t vf_id; + uint8_t mac_addr[NICVF_MAC_ADDR_SIZE]; +}; + +/* Set Maximum frame size */ +struct set_frs_msg { + uint8_t msg; + uint8_t vf_id; + uint16_t max_frs; +}; + +/* Set CPI algorithm type */ +struct cpi_cfg_msg { + uint8_t msg; + uint8_t vf_id; + uint8_t rq_cnt; + uint8_t cpi_alg; +}; + +/* Get RSS table size */ +struct rss_sz_msg { + uint8_t msg; + uint8_t vf_id; + uint16_t ind_tbl_size; +}; + +/* Set RSS configuration */ +struct rss_cfg_msg { + uint8_t msg; + uint8_t vf_id; + uint8_t hash_bits; + uint8_t tbl_len; + uint8_t tbl_offset; +#define RSS_IND_TBL_LEN_PER_MBX_MSG 8 + uint8_t ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG]; +}; + +/* Physical interface link status */ +struct bgx_link_status { + uint8_t msg; + uint8_t link_up; + uint8_t duplex; + uint32_t speed; +}; + +/* Set interface in loopback mode */ +struct set_loopback { + uint8_t msg; + uint8_t vf_id; + bool enable; +}; + +/* Reset statistics counters */ +struct reset_stat_cfg { + uint8_t msg; + /* Bitmap to select NIC_PF_VNIC(vf_id)_RX_STAT(0..13) */ + uint16_t rx_stat_mask; + /* Bitmap to select NIC_PF_VNIC(vf_id)_TX_STAT(0..4) */ + uint8_t tx_stat_mask; + /* Bitmap to select NIC_PF_QS(0..127)_RQ(0..7)_STAT(0..1) + * bit14, bit15 NIC_PF_QS(vf_id)_RQ7_STAT(0..1) + * bit12, bit13 NIC_PF_QS(vf_id)_RQ6_STAT(0..1) + * .. + * bit2, bit3 NIC_PF_QS(vf_id)_RQ1_STAT(0..1) + * bit0, bit1 NIC_PF_QS(vf_id)_RQ0_STAT(0..1) + */ + uint16_t rq_stat_mask; + /* Bitmap to select NIC_PF_QS(0..127)_SQ(0..7)_STAT(0..1) + * bit14, bit15 NIC_PF_QS(vf_id)_SQ7_STAT(0..1) + * bit12, bit13 NIC_PF_QS(vf_id)_SQ6_STAT(0..1) + * .. + * bit2, bit3 NIC_PF_QS(vf_id)_SQ1_STAT(0..1) + * bit0, bit1 NIC_PF_QS(vf_id)_SQ0_STAT(0..1) + */ + uint16_t sq_stat_mask; +}; + +struct nic_mbx { +/* 128 bit shared memory between PF and each VF */ +union { + struct { uint8_t msg; } msg; + struct nic_cfg_msg nic_cfg; + struct qs_cfg_msg qs; + struct rq_cfg_msg rq; + struct sq_cfg_msg sq; + struct set_mac_msg mac; + struct set_frs_msg frs; + struct cpi_cfg_msg cpi_cfg; + struct rss_sz_msg rss_size; + struct rss_cfg_msg rss_cfg; + struct bgx_link_status link_status; + struct set_loopback lbk; + struct reset_stat_cfg reset_stat; +}; +}; + +NICVF_STATIC_ASSERT(sizeof(struct nic_mbx) <= 16); + +int nicvf_handle_mbx_intr(struct nicvf *nic); +int nicvf_mbox_check_pf_ready(struct nicvf *nic); +int nicvf_mbox_qset_config(struct nicvf *nic, struct pf_qs_cfg *qs_cfg); +int nicvf_mbox_rq_config(struct nicvf *nic, uint16_t qidx, + struct pf_rq_cfg *pf_rq_cfg); +int nicvf_mbox_sq_config(struct nicvf *nic, uint16_t qidx); +int nicvf_mbox_rq_drop_config(struct nicvf *nic, uint16_t qidx, bool enable); +int nicvf_mbox_rq_bp_config(struct nicvf *nic, uint16_t qidx, bool enable); +int nicvf_mbox_set_mac_addr(struct nicvf *nic, + const uint8_t mac[NICVF_MAC_ADDR_SIZE]); +int nicvf_mbox_config_cpi(struct nicvf *nic, uint32_t qcnt); +int nicvf_mbox_get_rss_size(struct nicvf *nic); +int nicvf_mbox_config_rss(struct nicvf *nic); +int nicvf_mbox_update_hw_max_frs(struct nicvf *nic, uint16_t mtu); +int nicvf_mbox_rq_sync(struct nicvf *nic); +int nicvf_mbox_loopback_config(struct nicvf *nic, bool enable); +int nicvf_mbox_reset_stat_counters(struct nicvf *nic, uint16_t rx_stat_mask, + uint8_t tx_stat_mask, uint16_t rq_stat_mask, uint16_t sq_stat_mask); +void nicvf_mbox_shutdown(struct nicvf *nic); +void nicvf_mbox_cfg_done(struct nicvf *nic); + +#endif /* __THUNDERX_NICVF_MBOX__ */ diff --git a/drivers/net/thunderx/base/nicvf_plat.h b/drivers/net/thunderx/base/nicvf_plat.h new file mode 100644 index 00000000..83c1844d --- /dev/null +++ b/drivers/net/thunderx/base/nicvf_plat.h @@ -0,0 +1,132 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _THUNDERX_NICVF_H +#define _THUNDERX_NICVF_H + +/* Platform/OS/arch specific abstractions */ + +/* log */ +#include <rte_log.h> +#include "../nicvf_logs.h" + +#define nicvf_log_error(s, ...) PMD_DRV_LOG(ERR, s, ##__VA_ARGS__) + +#define nicvf_log_debug(s, ...) PMD_DRV_LOG(DEBUG, s, ##__VA_ARGS__) + +#define nicvf_mbox_log(s, ...) PMD_MBOX_LOG(DEBUG, s, ##__VA_ARGS__) + +#define nicvf_log(s, ...) fprintf(stderr, s, ##__VA_ARGS__) + +/* delay */ +#include <rte_cycles.h> +#define nicvf_delay_us(x) rte_delay_us(x) + +/* barrier */ +#include <rte_atomic.h> +#define nicvf_smp_wmb() rte_smp_wmb() +#define nicvf_smp_rmb() rte_smp_rmb() + +/* utils */ +#include <rte_common.h> +#define nicvf_min(x, y) RTE_MIN(x, y) + +/* byte order */ +#include <rte_byteorder.h> +#define nicvf_cpu_to_be_64(x) rte_cpu_to_be_64(x) +#define nicvf_be_to_cpu_64(x) rte_be_to_cpu_64(x) + +/* Constants */ +#include <rte_ether.h> +#define NICVF_MAC_ADDR_SIZE ETHER_ADDR_LEN + +/* ARM64 specific functions */ +#if defined(RTE_ARCH_ARM64) +#define nicvf_prefetch_store_keep(_ptr) ({\ + asm volatile("prfm pstl1keep, %a0\n" : : "p" (_ptr)); }) + +static inline void __attribute__((always_inline)) +nicvf_addr_write(uintptr_t addr, uint64_t val) +{ + asm volatile( + "str %x[val], [%x[addr]]" + : + : [val] "r" (val), [addr] "r" (addr)); +} + +static inline uint64_t __attribute__((always_inline)) +nicvf_addr_read(uintptr_t addr) +{ + uint64_t val; + + asm volatile( + "ldr %x[val], [%x[addr]]" + : [val] "=r" (val) + : [addr] "r" (addr)); + return val; +} + +#define NICVF_LOAD_PAIR(reg1, reg2, addr) ({ \ + asm volatile( \ + "ldp %x[x1], %x[x0], [%x[p1]]" \ + : [x1]"=r"(reg1), [x0]"=r"(reg2)\ + : [p1]"r"(addr) \ + ); }) + +#else /* non optimized functions for building on non arm64 arch */ + +#define nicvf_prefetch_store_keep(_ptr) do {} while (0) + +static inline void __attribute__((always_inline)) +nicvf_addr_write(uintptr_t addr, uint64_t val) +{ + *(volatile uint64_t *)addr = val; +} + +static inline uint64_t __attribute__((always_inline)) +nicvf_addr_read(uintptr_t addr) +{ + return *(volatile uint64_t *)addr; +} + +#define NICVF_LOAD_PAIR(reg1, reg2, addr) \ +do { \ + reg1 = nicvf_addr_read((uintptr_t)addr); \ + reg2 = nicvf_addr_read((uintptr_t)addr + 8); \ +} while (0) + +#endif + +#include "nicvf_hw.h" +#include "nicvf_mbox.h" + +#endif /* _THUNDERX_NICVF_H */ diff --git a/drivers/net/thunderx/nicvf_ethdev.c b/drivers/net/thunderx/nicvf_ethdev.c new file mode 100644 index 00000000..48ed3812 --- /dev/null +++ b/drivers/net/thunderx/nicvf_ethdev.c @@ -0,0 +1,1791 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <assert.h> +#include <stdio.h> +#include <stdbool.h> +#include <errno.h> +#include <stdint.h> +#include <string.h> +#include <unistd.h> +#include <stdarg.h> +#include <inttypes.h> +#include <netinet/in.h> +#include <sys/queue.h> +#include <sys/timerfd.h> + +#include <rte_alarm.h> +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_debug.h> +#include <rte_dev.h> +#include <rte_eal.h> +#include <rte_ether.h> +#include <rte_ethdev.h> +#include <rte_interrupts.h> +#include <rte_log.h> +#include <rte_memory.h> +#include <rte_memzone.h> +#include <rte_malloc.h> +#include <rte_random.h> +#include <rte_pci.h> +#include <rte_tailq.h> + +#include "base/nicvf_plat.h" + +#include "nicvf_ethdev.h" +#include "nicvf_rxtx.h" +#include "nicvf_logs.h" + +static void nicvf_dev_stop(struct rte_eth_dev *dev); + +static inline int +nicvf_atomic_write_link_status(struct rte_eth_dev *dev, + struct rte_eth_link *link) +{ + struct rte_eth_link *dst = &dev->data->dev_link; + struct rte_eth_link *src = link; + + if (rte_atomic64_cmpset((uint64_t *)dst, *(uint64_t *)dst, + *(uint64_t *)src) == 0) + return -1; + + return 0; +} + +static inline void +nicvf_set_eth_link_status(struct nicvf *nic, struct rte_eth_link *link) +{ + link->link_status = nic->link_up; + link->link_duplex = ETH_LINK_AUTONEG; + if (nic->duplex == NICVF_HALF_DUPLEX) + link->link_duplex = ETH_LINK_HALF_DUPLEX; + else if (nic->duplex == NICVF_FULL_DUPLEX) + link->link_duplex = ETH_LINK_FULL_DUPLEX; + link->link_speed = nic->speed; + link->link_autoneg = ETH_LINK_SPEED_AUTONEG; +} + +static void +nicvf_interrupt(void *arg) +{ + struct nicvf *nic = arg; + + if (nicvf_reg_poll_interrupts(nic) == NIC_MBOX_MSG_BGX_LINK_CHANGE) { + if (nic->eth_dev->data->dev_conf.intr_conf.lsc) + nicvf_set_eth_link_status(nic, + &nic->eth_dev->data->dev_link); + _rte_eth_dev_callback_process(nic->eth_dev, + RTE_ETH_EVENT_INTR_LSC); + } + + rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, + nicvf_interrupt, nic); +} + +static int +nicvf_periodic_alarm_start(struct nicvf *nic) +{ + return rte_eal_alarm_set(NICVF_INTR_POLL_INTERVAL_MS * 1000, + nicvf_interrupt, nic); +} + +static int +nicvf_periodic_alarm_stop(struct nicvf *nic) +{ + return rte_eal_alarm_cancel(nicvf_interrupt, nic); +} + +/* + * Return 0 means link status changed, -1 means not changed + */ +static int +nicvf_dev_link_update(struct rte_eth_dev *dev, + int wait_to_complete __rte_unused) +{ + struct rte_eth_link link; + struct nicvf *nic = nicvf_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + + memset(&link, 0, sizeof(link)); + nicvf_set_eth_link_status(nic, &link); + return nicvf_atomic_write_link_status(dev, &link); +} + +static int +nicvf_dev_set_mtu(struct rte_eth_dev *dev, uint16_t mtu) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + uint32_t buffsz, frame_size = mtu + ETHER_HDR_LEN + ETHER_CRC_LEN; + + PMD_INIT_FUNC_TRACE(); + + if (frame_size > NIC_HW_MAX_FRS) + return -EINVAL; + + if (frame_size < NIC_HW_MIN_FRS) + return -EINVAL; + + buffsz = dev->data->min_rx_buf_size - RTE_PKTMBUF_HEADROOM; + + /* + * Refuse mtu that requires the support of scattered packets + * when this feature has not been enabled before. + */ + if (!dev->data->scattered_rx && + (frame_size + 2 * VLAN_TAG_SIZE > buffsz)) + return -EINVAL; + + /* check <seg size> * <max_seg> >= max_frame */ + if (dev->data->scattered_rx && + (frame_size + 2 * VLAN_TAG_SIZE > buffsz * NIC_HW_MAX_SEGS)) + return -EINVAL; + + if (frame_size > ETHER_MAX_LEN) + dev->data->dev_conf.rxmode.jumbo_frame = 1; + else + dev->data->dev_conf.rxmode.jumbo_frame = 0; + + if (nicvf_mbox_update_hw_max_frs(nic, frame_size)) + return -EINVAL; + + /* Update max frame size */ + dev->data->dev_conf.rxmode.max_rx_pkt_len = (uint32_t)frame_size; + nic->mtu = mtu; + return 0; +} + +static int +nicvf_dev_get_reg_length(struct rte_eth_dev *dev __rte_unused) +{ + return nicvf_reg_get_count(); +} + +static int +nicvf_dev_get_regs(struct rte_eth_dev *dev, struct rte_dev_reg_info *regs) +{ + uint64_t *data = regs->data; + struct nicvf *nic = nicvf_pmd_priv(dev); + + if (data == NULL) + return -EINVAL; + + /* Support only full register dump */ + if ((regs->length == 0) || + (regs->length == (uint32_t)nicvf_reg_get_count())) { + regs->version = nic->vendor_id << 16 | nic->device_id; + nicvf_reg_dump(nic, data); + return 0; + } + return -ENOTSUP; +} + +static void +nicvf_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) +{ + uint16_t qidx; + struct nicvf_hw_rx_qstats rx_qstats; + struct nicvf_hw_tx_qstats tx_qstats; + struct nicvf_hw_stats port_stats; + struct nicvf *nic = nicvf_pmd_priv(dev); + + /* Reading per RX ring stats */ + for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) { + if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + nicvf_hw_get_rx_qstats(nic, &rx_qstats, qidx); + stats->q_ibytes[qidx] = rx_qstats.q_rx_bytes; + stats->q_ipackets[qidx] = rx_qstats.q_rx_packets; + } + + /* Reading per TX ring stats */ + for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) { + if (qidx == RTE_ETHDEV_QUEUE_STAT_CNTRS) + break; + + nicvf_hw_get_tx_qstats(nic, &tx_qstats, qidx); + stats->q_obytes[qidx] = tx_qstats.q_tx_bytes; + stats->q_opackets[qidx] = tx_qstats.q_tx_packets; + } + + nicvf_hw_get_stats(nic, &port_stats); + stats->ibytes = port_stats.rx_bytes; + stats->ipackets = port_stats.rx_ucast_frames; + stats->ipackets += port_stats.rx_bcast_frames; + stats->ipackets += port_stats.rx_mcast_frames; + stats->ierrors = port_stats.rx_l2_errors; + stats->imissed = port_stats.rx_drop_red; + stats->imissed += port_stats.rx_drop_overrun; + stats->imissed += port_stats.rx_drop_bcast; + stats->imissed += port_stats.rx_drop_mcast; + stats->imissed += port_stats.rx_drop_l3_bcast; + stats->imissed += port_stats.rx_drop_l3_mcast; + + stats->obytes = port_stats.tx_bytes_ok; + stats->opackets = port_stats.tx_ucast_frames_ok; + stats->opackets += port_stats.tx_bcast_frames_ok; + stats->opackets += port_stats.tx_mcast_frames_ok; + stats->oerrors = port_stats.tx_drops; +} + +static const uint32_t * +nicvf_dev_supported_ptypes_get(struct rte_eth_dev *dev) +{ + size_t copied; + static uint32_t ptypes[32]; + struct nicvf *nic = nicvf_pmd_priv(dev); + static const uint32_t ptypes_pass1[] = { + RTE_PTYPE_L3_IPV4, + RTE_PTYPE_L3_IPV4_EXT, + RTE_PTYPE_L3_IPV6, + RTE_PTYPE_L3_IPV6_EXT, + RTE_PTYPE_L4_TCP, + RTE_PTYPE_L4_UDP, + RTE_PTYPE_L4_FRAG, + }; + static const uint32_t ptypes_pass2[] = { + RTE_PTYPE_TUNNEL_GRE, + RTE_PTYPE_TUNNEL_GENEVE, + RTE_PTYPE_TUNNEL_VXLAN, + RTE_PTYPE_TUNNEL_NVGRE, + }; + static const uint32_t ptypes_end = RTE_PTYPE_UNKNOWN; + + copied = sizeof(ptypes_pass1); + memcpy(ptypes, ptypes_pass1, copied); + if (nicvf_hw_version(nic) == NICVF_PASS2) { + memcpy((char *)ptypes + copied, ptypes_pass2, + sizeof(ptypes_pass2)); + copied += sizeof(ptypes_pass2); + } + + memcpy((char *)ptypes + copied, &ptypes_end, sizeof(ptypes_end)); + if (dev->rx_pkt_burst == nicvf_recv_pkts || + dev->rx_pkt_burst == nicvf_recv_pkts_multiseg) + return ptypes; + + return NULL; +} + +static void +nicvf_dev_stats_reset(struct rte_eth_dev *dev) +{ + int i; + uint16_t rxqs = 0, txqs = 0; + struct nicvf *nic = nicvf_pmd_priv(dev); + + for (i = 0; i < dev->data->nb_rx_queues; i++) + rxqs |= (0x3 << (i * 2)); + for (i = 0; i < dev->data->nb_tx_queues; i++) + txqs |= (0x3 << (i * 2)); + + nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, rxqs, txqs); +} + +/* Promiscuous mode enabled by default in LMAC to VF 1:1 map configuration */ +static void +nicvf_dev_promisc_enable(struct rte_eth_dev *dev __rte_unused) +{ +} + +static inline uint64_t +nicvf_rss_ethdev_to_nic(struct nicvf *nic, uint64_t ethdev_rss) +{ + uint64_t nic_rss = 0; + + if (ethdev_rss & ETH_RSS_IPV4) + nic_rss |= RSS_IP_ENA; + + if (ethdev_rss & ETH_RSS_IPV6) + nic_rss |= RSS_IP_ENA; + + if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_UDP) + nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); + + if (ethdev_rss & ETH_RSS_NONFRAG_IPV4_TCP) + nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); + + if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_UDP) + nic_rss |= (RSS_IP_ENA | RSS_UDP_ENA); + + if (ethdev_rss & ETH_RSS_NONFRAG_IPV6_TCP) + nic_rss |= (RSS_IP_ENA | RSS_TCP_ENA); + + if (ethdev_rss & ETH_RSS_PORT) + nic_rss |= RSS_L2_EXTENDED_HASH_ENA; + + if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { + if (ethdev_rss & ETH_RSS_VXLAN) + nic_rss |= RSS_TUN_VXLAN_ENA; + + if (ethdev_rss & ETH_RSS_GENEVE) + nic_rss |= RSS_TUN_GENEVE_ENA; + + if (ethdev_rss & ETH_RSS_NVGRE) + nic_rss |= RSS_TUN_NVGRE_ENA; + } + + return nic_rss; +} + +static inline uint64_t +nicvf_rss_nic_to_ethdev(struct nicvf *nic, uint64_t nic_rss) +{ + uint64_t ethdev_rss = 0; + + if (nic_rss & RSS_IP_ENA) + ethdev_rss |= (ETH_RSS_IPV4 | ETH_RSS_IPV6); + + if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_TCP_ENA)) + ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_TCP | + ETH_RSS_NONFRAG_IPV6_TCP); + + if ((nic_rss & RSS_IP_ENA) && (nic_rss & RSS_UDP_ENA)) + ethdev_rss |= (ETH_RSS_NONFRAG_IPV4_UDP | + ETH_RSS_NONFRAG_IPV6_UDP); + + if (nic_rss & RSS_L2_EXTENDED_HASH_ENA) + ethdev_rss |= ETH_RSS_PORT; + + if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) { + if (nic_rss & RSS_TUN_VXLAN_ENA) + ethdev_rss |= ETH_RSS_VXLAN; + + if (nic_rss & RSS_TUN_GENEVE_ENA) + ethdev_rss |= ETH_RSS_GENEVE; + + if (nic_rss & RSS_TUN_NVGRE_ENA) + ethdev_rss |= ETH_RSS_NVGRE; + } + return ethdev_rss; +} + +static int +nicvf_dev_reta_query(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; + int ret, i, j; + + if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { + RTE_LOG(ERR, PMD, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); + return -EINVAL; + } + + ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); + if (ret) + return ret; + + /* Copy RETA table */ + for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + reta_conf[i].reta[j] = tbl[j]; + } + + return 0; +} + +static int +nicvf_dev_reta_update(struct rte_eth_dev *dev, + struct rte_eth_rss_reta_entry64 *reta_conf, + uint16_t reta_size) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + uint8_t tbl[NIC_MAX_RSS_IDR_TBL_SIZE]; + int ret, i, j; + + if (reta_size != NIC_MAX_RSS_IDR_TBL_SIZE) { + RTE_LOG(ERR, PMD, "The size of hash lookup table configured " + "(%d) doesn't match the number hardware can supported " + "(%d)", reta_size, NIC_MAX_RSS_IDR_TBL_SIZE); + return -EINVAL; + } + + ret = nicvf_rss_reta_query(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); + if (ret) + return ret; + + /* Copy RETA table */ + for (i = 0; i < (NIC_MAX_RSS_IDR_TBL_SIZE / RTE_RETA_GROUP_SIZE); i++) { + for (j = 0; j < RTE_RETA_GROUP_SIZE; j++) + if ((reta_conf[i].mask >> j) & 0x01) + tbl[j] = reta_conf[i].reta[j]; + } + + return nicvf_rss_reta_update(nic, tbl, NIC_MAX_RSS_IDR_TBL_SIZE); +} + +static int +nicvf_dev_rss_hash_conf_get(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + + if (rss_conf->rss_key) + nicvf_rss_get_key(nic, rss_conf->rss_key); + + rss_conf->rss_key_len = RSS_HASH_KEY_BYTE_SIZE; + rss_conf->rss_hf = nicvf_rss_nic_to_ethdev(nic, nicvf_rss_get_cfg(nic)); + return 0; +} + +static int +nicvf_dev_rss_hash_update(struct rte_eth_dev *dev, + struct rte_eth_rss_conf *rss_conf) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + uint64_t nic_rss; + + if (rss_conf->rss_key && + rss_conf->rss_key_len != RSS_HASH_KEY_BYTE_SIZE) { + RTE_LOG(ERR, PMD, "Hash key size mismatch %d", + rss_conf->rss_key_len); + return -EINVAL; + } + + if (rss_conf->rss_key) + nicvf_rss_set_key(nic, rss_conf->rss_key); + + nic_rss = nicvf_rss_ethdev_to_nic(nic, rss_conf->rss_hf); + nicvf_rss_set_cfg(nic, nic_rss); + return 0; +} + +static int +nicvf_qset_cq_alloc(struct nicvf *nic, struct nicvf_rxq *rxq, uint16_t qidx, + uint32_t desc_cnt) +{ + const struct rte_memzone *rz; + uint32_t ring_size = desc_cnt * sizeof(union cq_entry_t); + + rz = rte_eth_dma_zone_reserve(nic->eth_dev, "cq_ring", qidx, ring_size, + NICVF_CQ_BASE_ALIGN_BYTES, nic->node); + if (rz == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate mem for cq hw ring"); + return -ENOMEM; + } + + memset(rz->addr, 0, ring_size); + + rxq->phys = rz->phys_addr; + rxq->desc = rz->addr; + rxq->qlen_mask = desc_cnt - 1; + + return 0; +} + +static int +nicvf_qset_sq_alloc(struct nicvf *nic, struct nicvf_txq *sq, uint16_t qidx, + uint32_t desc_cnt) +{ + const struct rte_memzone *rz; + uint32_t ring_size = desc_cnt * sizeof(union sq_entry_t); + + rz = rte_eth_dma_zone_reserve(nic->eth_dev, "sq", qidx, ring_size, + NICVF_SQ_BASE_ALIGN_BYTES, nic->node); + if (rz == NULL) { + PMD_INIT_LOG(ERR, "Failed allocate mem for sq hw ring"); + return -ENOMEM; + } + + memset(rz->addr, 0, ring_size); + + sq->phys = rz->phys_addr; + sq->desc = rz->addr; + sq->qlen_mask = desc_cnt - 1; + + return 0; +} + +static int +nicvf_qset_rbdr_alloc(struct nicvf *nic, uint32_t desc_cnt, uint32_t buffsz) +{ + struct nicvf_rbdr *rbdr; + const struct rte_memzone *rz; + uint32_t ring_size; + + assert(nic->rbdr == NULL); + rbdr = rte_zmalloc_socket("rbdr", sizeof(struct nicvf_rbdr), + RTE_CACHE_LINE_SIZE, nic->node); + if (rbdr == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr"); + return -ENOMEM; + } + + ring_size = sizeof(struct rbdr_entry_t) * desc_cnt; + rz = rte_eth_dma_zone_reserve(nic->eth_dev, "rbdr", 0, ring_size, + NICVF_RBDR_BASE_ALIGN_BYTES, nic->node); + if (rz == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate mem for rbdr desc ring"); + return -ENOMEM; + } + + memset(rz->addr, 0, ring_size); + + rbdr->phys = rz->phys_addr; + rbdr->tail = 0; + rbdr->next_tail = 0; + rbdr->desc = rz->addr; + rbdr->buffsz = buffsz; + rbdr->qlen_mask = desc_cnt - 1; + rbdr->rbdr_status = + nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_STATUS0; + rbdr->rbdr_door = + nicvf_qset_base(nic, 0) + NIC_QSET_RBDR_0_1_DOOR; + + nic->rbdr = rbdr; + return 0; +} + +static void +nicvf_rbdr_release_mbuf(struct nicvf *nic, nicvf_phys_addr_t phy) +{ + uint16_t qidx; + void *obj; + struct nicvf_rxq *rxq; + + for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { + rxq = nic->eth_dev->data->rx_queues[qidx]; + if (rxq->precharge_cnt) { + obj = (void *)nicvf_mbuff_phy2virt(phy, + rxq->mbuf_phys_off); + rte_mempool_put(rxq->pool, obj); + rxq->precharge_cnt--; + break; + } + } +} + +static inline void +nicvf_rbdr_release_mbufs(struct nicvf *nic) +{ + uint32_t qlen_mask, head; + struct rbdr_entry_t *entry; + struct nicvf_rbdr *rbdr = nic->rbdr; + + qlen_mask = rbdr->qlen_mask; + head = rbdr->head; + while (head != rbdr->tail) { + entry = rbdr->desc + head; + nicvf_rbdr_release_mbuf(nic, entry->full_addr); + head++; + head = head & qlen_mask; + } +} + +static inline void +nicvf_tx_queue_release_mbufs(struct nicvf_txq *txq) +{ + uint32_t head; + + head = txq->head; + while (head != txq->tail) { + if (txq->txbuffs[head]) { + rte_pktmbuf_free_seg(txq->txbuffs[head]); + txq->txbuffs[head] = NULL; + } + head++; + head = head & txq->qlen_mask; + } +} + +static void +nicvf_tx_queue_reset(struct nicvf_txq *txq) +{ + uint32_t txq_desc_cnt = txq->qlen_mask + 1; + + memset(txq->desc, 0, sizeof(union sq_entry_t) * txq_desc_cnt); + memset(txq->txbuffs, 0, sizeof(struct rte_mbuf *) * txq_desc_cnt); + txq->tail = 0; + txq->head = 0; + txq->xmit_bufs = 0; +} + +static inline int +nicvf_start_tx_queue(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct nicvf_txq *txq; + int ret; + + if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) + return 0; + + txq = dev->data->tx_queues[qidx]; + txq->pool = NULL; + ret = nicvf_qset_sq_config(nicvf_pmd_priv(dev), qidx, txq); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure sq %d %d", qidx, ret); + goto config_sq_error; + } + + dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; + return ret; + +config_sq_error: + nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx); + return ret; +} + +static inline int +nicvf_stop_tx_queue(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct nicvf_txq *txq; + int ret; + + if (dev->data->tx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) + return 0; + + ret = nicvf_qset_sq_reclaim(nicvf_pmd_priv(dev), qidx); + if (ret) + PMD_INIT_LOG(ERR, "Failed to reclaim sq %d %d", qidx, ret); + + txq = dev->data->tx_queues[qidx]; + nicvf_tx_queue_release_mbufs(txq); + nicvf_tx_queue_reset(txq); + + dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + return ret; +} + +static inline int +nicvf_configure_cpi(struct rte_eth_dev *dev) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + uint16_t qidx, qcnt; + int ret; + + /* Count started rx queues */ + for (qidx = qcnt = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) + if (dev->data->rx_queue_state[qidx] == + RTE_ETH_QUEUE_STATE_STARTED) + qcnt++; + + nic->cpi_alg = CPI_ALG_NONE; + ret = nicvf_mbox_config_cpi(nic, qcnt); + if (ret) + PMD_INIT_LOG(ERR, "Failed to configure CPI %d", ret); + + return ret; +} + +static inline int +nicvf_configure_rss(struct rte_eth_dev *dev) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + uint64_t rsshf; + int ret = -EINVAL; + + rsshf = nicvf_rss_ethdev_to_nic(nic, + dev->data->dev_conf.rx_adv_conf.rss_conf.rss_hf); + PMD_DRV_LOG(INFO, "mode=%d rx_queues=%d loopback=%d rsshf=0x%" PRIx64, + dev->data->dev_conf.rxmode.mq_mode, + nic->eth_dev->data->nb_rx_queues, + nic->eth_dev->data->dev_conf.lpbk_mode, rsshf); + + if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_NONE) + ret = nicvf_rss_term(nic); + else if (dev->data->dev_conf.rxmode.mq_mode == ETH_MQ_RX_RSS) + ret = nicvf_rss_config(nic, + nic->eth_dev->data->nb_rx_queues, rsshf); + if (ret) + PMD_INIT_LOG(ERR, "Failed to configure RSS %d", ret); + + return ret; +} + +static int +nicvf_configure_rss_reta(struct rte_eth_dev *dev) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + unsigned int idx, qmap_size; + uint8_t qmap[RTE_MAX_QUEUES_PER_PORT]; + uint8_t default_reta[NIC_MAX_RSS_IDR_TBL_SIZE]; + + if (nic->cpi_alg != CPI_ALG_NONE) + return -EINVAL; + + /* Prepare queue map */ + for (idx = 0, qmap_size = 0; idx < dev->data->nb_rx_queues; idx++) { + if (dev->data->rx_queue_state[idx] == + RTE_ETH_QUEUE_STATE_STARTED) + qmap[qmap_size++] = idx; + } + + /* Update default RSS RETA */ + for (idx = 0; idx < NIC_MAX_RSS_IDR_TBL_SIZE; idx++) + default_reta[idx] = qmap[idx % qmap_size]; + + return nicvf_rss_reta_update(nic, default_reta, + NIC_MAX_RSS_IDR_TBL_SIZE); +} + +static void +nicvf_dev_tx_queue_release(void *sq) +{ + struct nicvf_txq *txq; + + PMD_INIT_FUNC_TRACE(); + + txq = (struct nicvf_txq *)sq; + if (txq) { + if (txq->txbuffs != NULL) { + nicvf_tx_queue_release_mbufs(txq); + rte_free(txq->txbuffs); + txq->txbuffs = NULL; + } + rte_free(txq); + } +} + +static void +nicvf_set_tx_function(struct rte_eth_dev *dev) +{ + struct nicvf_txq *txq; + size_t i; + bool multiseg = false; + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txq = dev->data->tx_queues[i]; + if ((txq->txq_flags & ETH_TXQ_FLAGS_NOMULTSEGS) == 0) { + multiseg = true; + break; + } + } + + /* Use a simple Tx queue (no offloads, no multi segs) if possible */ + if (multiseg) { + PMD_DRV_LOG(DEBUG, "Using multi-segment tx callback"); + dev->tx_pkt_burst = nicvf_xmit_pkts_multiseg; + } else { + PMD_DRV_LOG(DEBUG, "Using single-segment tx callback"); + dev->tx_pkt_burst = nicvf_xmit_pkts; + } + + if (txq->pool_free == nicvf_single_pool_free_xmited_buffers) + PMD_DRV_LOG(DEBUG, "Using single-mempool tx free method"); + else + PMD_DRV_LOG(DEBUG, "Using multi-mempool tx free method"); +} + +static void +nicvf_set_rx_function(struct rte_eth_dev *dev) +{ + if (dev->data->scattered_rx) { + PMD_DRV_LOG(DEBUG, "Using multi-segment rx callback"); + dev->rx_pkt_burst = nicvf_recv_pkts_multiseg; + } else { + PMD_DRV_LOG(DEBUG, "Using single-segment rx callback"); + dev->rx_pkt_burst = nicvf_recv_pkts; + } +} + +static int +nicvf_dev_tx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_txconf *tx_conf) +{ + uint16_t tx_free_thresh; + uint8_t is_single_pool; + struct nicvf_txq *txq; + struct nicvf *nic = nicvf_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + + /* Socket id check */ + if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) + PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", + socket_id, nic->node); + + /* Tx deferred start is not supported */ + if (tx_conf->tx_deferred_start) { + PMD_INIT_LOG(ERR, "Tx deferred start not supported"); + return -EINVAL; + } + + /* Roundup nb_desc to available qsize and validate max number of desc */ + nb_desc = nicvf_qsize_sq_roundup(nb_desc); + if (nb_desc == 0) { + PMD_INIT_LOG(ERR, "Value of nb_desc beyond available sq qsize"); + return -EINVAL; + } + + /* Validate tx_free_thresh */ + tx_free_thresh = (uint16_t)((tx_conf->tx_free_thresh) ? + tx_conf->tx_free_thresh : + NICVF_DEFAULT_TX_FREE_THRESH); + + if (tx_free_thresh > (nb_desc) || + tx_free_thresh > NICVF_MAX_TX_FREE_THRESH) { + PMD_INIT_LOG(ERR, + "tx_free_thresh must be less than the number of TX " + "descriptors. (tx_free_thresh=%u port=%d " + "queue=%d)", (unsigned int)tx_free_thresh, + (int)dev->data->port_id, (int)qidx); + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed. */ + if (dev->data->tx_queues[qidx] != NULL) { + PMD_TX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", + qidx); + nicvf_dev_tx_queue_release(dev->data->tx_queues[qidx]); + dev->data->tx_queues[qidx] = NULL; + } + + /* Allocating tx queue data structure */ + txq = rte_zmalloc_socket("ethdev TX queue", sizeof(struct nicvf_txq), + RTE_CACHE_LINE_SIZE, nic->node); + if (txq == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate txq=%d", qidx); + return -ENOMEM; + } + + txq->nic = nic; + txq->queue_id = qidx; + txq->tx_free_thresh = tx_free_thresh; + txq->txq_flags = tx_conf->txq_flags; + txq->sq_head = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_HEAD; + txq->sq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_SQ_0_7_DOOR; + is_single_pool = (txq->txq_flags & ETH_TXQ_FLAGS_NOREFCOUNT && + txq->txq_flags & ETH_TXQ_FLAGS_NOMULTMEMP); + + /* Choose optimum free threshold value for multipool case */ + if (!is_single_pool) { + txq->tx_free_thresh = (uint16_t) + (tx_conf->tx_free_thresh == NICVF_DEFAULT_TX_FREE_THRESH ? + NICVF_TX_FREE_MPOOL_THRESH : + tx_conf->tx_free_thresh); + txq->pool_free = nicvf_multi_pool_free_xmited_buffers; + } else { + txq->pool_free = nicvf_single_pool_free_xmited_buffers; + } + + /* Allocate software ring */ + txq->txbuffs = rte_zmalloc_socket("txq->txbuffs", + nb_desc * sizeof(struct rte_mbuf *), + RTE_CACHE_LINE_SIZE, nic->node); + + if (txq->txbuffs == NULL) { + nicvf_dev_tx_queue_release(txq); + return -ENOMEM; + } + + if (nicvf_qset_sq_alloc(nic, txq, qidx, nb_desc)) { + PMD_INIT_LOG(ERR, "Failed to allocate mem for sq %d", qidx); + nicvf_dev_tx_queue_release(txq); + return -ENOMEM; + } + + nicvf_tx_queue_reset(txq); + + PMD_TX_LOG(DEBUG, "[%d] txq=%p nb_desc=%d desc=%p phys=0x%" PRIx64, + qidx, txq, nb_desc, txq->desc, txq->phys); + + dev->data->tx_queues[qidx] = txq; + dev->data->tx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + +static inline void +nicvf_rx_queue_release_mbufs(struct nicvf_rxq *rxq) +{ + uint32_t rxq_cnt; + uint32_t nb_pkts, released_pkts = 0; + uint32_t refill_cnt = 0; + struct rte_eth_dev *dev = rxq->nic->eth_dev; + struct rte_mbuf *rx_pkts[NICVF_MAX_RX_FREE_THRESH]; + + if (dev->rx_pkt_burst == NULL) + return; + + while ((rxq_cnt = nicvf_dev_rx_queue_count(dev, rxq->queue_id))) { + nb_pkts = dev->rx_pkt_burst(rxq, rx_pkts, + NICVF_MAX_RX_FREE_THRESH); + PMD_DRV_LOG(INFO, "nb_pkts=%d rxq_cnt=%d", nb_pkts, rxq_cnt); + while (nb_pkts) { + rte_pktmbuf_free_seg(rx_pkts[--nb_pkts]); + released_pkts++; + } + } + + refill_cnt += nicvf_dev_rbdr_refill(dev, rxq->queue_id); + PMD_DRV_LOG(INFO, "free_cnt=%d refill_cnt=%d", + released_pkts, refill_cnt); +} + +static void +nicvf_rx_queue_reset(struct nicvf_rxq *rxq) +{ + rxq->head = 0; + rxq->available_space = 0; + rxq->recv_buffers = 0; +} + +static inline int +nicvf_start_rx_queue(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + struct nicvf_rxq *rxq; + int ret; + + if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STARTED) + return 0; + + /* Update rbdr pointer to all rxq */ + rxq = dev->data->rx_queues[qidx]; + rxq->shared_rbdr = nic->rbdr; + + ret = nicvf_qset_rq_config(nic, qidx, rxq); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure rq %d %d", qidx, ret); + goto config_rq_error; + } + ret = nicvf_qset_cq_config(nic, qidx, rxq); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure cq %d %d", qidx, ret); + goto config_cq_error; + } + + dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STARTED; + return 0; + +config_cq_error: + nicvf_qset_cq_reclaim(nic, qidx); +config_rq_error: + nicvf_qset_rq_reclaim(nic, qidx); + return ret; +} + +static inline int +nicvf_stop_rx_queue(struct rte_eth_dev *dev, uint16_t qidx) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + struct nicvf_rxq *rxq; + int ret, other_error; + + if (dev->data->rx_queue_state[qidx] == RTE_ETH_QUEUE_STATE_STOPPED) + return 0; + + ret = nicvf_qset_rq_reclaim(nic, qidx); + if (ret) + PMD_INIT_LOG(ERR, "Failed to reclaim rq %d %d", qidx, ret); + + other_error = ret; + rxq = dev->data->rx_queues[qidx]; + nicvf_rx_queue_release_mbufs(rxq); + nicvf_rx_queue_reset(rxq); + + ret = nicvf_qset_cq_reclaim(nic, qidx); + if (ret) + PMD_INIT_LOG(ERR, "Failed to reclaim cq %d %d", qidx, ret); + + other_error |= ret; + dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + return other_error; +} + +static void +nicvf_dev_rx_queue_release(void *rx_queue) +{ + struct nicvf_rxq *rxq = rx_queue; + + PMD_INIT_FUNC_TRACE(); + + if (rxq) + rte_free(rxq); +} + +static int +nicvf_dev_rx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) +{ + int ret; + + ret = nicvf_start_rx_queue(dev, qidx); + if (ret) + return ret; + + ret = nicvf_configure_cpi(dev); + if (ret) + return ret; + + return nicvf_configure_rss_reta(dev); +} + +static int +nicvf_dev_rx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) +{ + int ret; + + ret = nicvf_stop_rx_queue(dev, qidx); + ret |= nicvf_configure_cpi(dev); + ret |= nicvf_configure_rss_reta(dev); + return ret; +} + +static int +nicvf_dev_tx_queue_start(struct rte_eth_dev *dev, uint16_t qidx) +{ + return nicvf_start_tx_queue(dev, qidx); +} + +static int +nicvf_dev_tx_queue_stop(struct rte_eth_dev *dev, uint16_t qidx) +{ + return nicvf_stop_tx_queue(dev, qidx); +} + +static int +nicvf_dev_rx_queue_setup(struct rte_eth_dev *dev, uint16_t qidx, + uint16_t nb_desc, unsigned int socket_id, + const struct rte_eth_rxconf *rx_conf, + struct rte_mempool *mp) +{ + uint16_t rx_free_thresh; + struct nicvf_rxq *rxq; + struct nicvf *nic = nicvf_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + + /* Socket id check */ + if (socket_id != (unsigned int)SOCKET_ID_ANY && socket_id != nic->node) + PMD_DRV_LOG(WARNING, "socket_id expected %d, configured %d", + socket_id, nic->node); + + /* Mempool memory should be contiguous */ + if (mp->nb_mem_chunks != 1) { + PMD_INIT_LOG(ERR, "Non contiguous mempool, check huge page sz"); + return -EINVAL; + } + + /* Rx deferred start is not supported */ + if (rx_conf->rx_deferred_start) { + PMD_INIT_LOG(ERR, "Rx deferred start not supported"); + return -EINVAL; + } + + /* Roundup nb_desc to available qsize and validate max number of desc */ + nb_desc = nicvf_qsize_cq_roundup(nb_desc); + if (nb_desc == 0) { + PMD_INIT_LOG(ERR, "Value nb_desc beyond available hw cq qsize"); + return -EINVAL; + } + + /* Check rx_free_thresh upper bound */ + rx_free_thresh = (uint16_t)((rx_conf->rx_free_thresh) ? + rx_conf->rx_free_thresh : + NICVF_DEFAULT_RX_FREE_THRESH); + if (rx_free_thresh > NICVF_MAX_RX_FREE_THRESH || + rx_free_thresh >= nb_desc * .75) { + PMD_INIT_LOG(ERR, "rx_free_thresh greater than expected %d", + rx_free_thresh); + return -EINVAL; + } + + /* Free memory prior to re-allocation if needed */ + if (dev->data->rx_queues[qidx] != NULL) { + PMD_RX_LOG(DEBUG, "Freeing memory prior to re-allocation %d", + qidx); + nicvf_dev_rx_queue_release(dev->data->rx_queues[qidx]); + dev->data->rx_queues[qidx] = NULL; + } + + /* Allocate rxq memory */ + rxq = rte_zmalloc_socket("ethdev rx queue", sizeof(struct nicvf_rxq), + RTE_CACHE_LINE_SIZE, nic->node); + if (rxq == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate rxq=%d", qidx); + return -ENOMEM; + } + + rxq->nic = nic; + rxq->pool = mp; + rxq->queue_id = qidx; + rxq->port_id = dev->data->port_id; + rxq->rx_free_thresh = rx_free_thresh; + rxq->rx_drop_en = rx_conf->rx_drop_en; + rxq->cq_status = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_STATUS; + rxq->cq_door = nicvf_qset_base(nic, qidx) + NIC_QSET_CQ_0_7_DOOR; + rxq->precharge_cnt = 0; + rxq->rbptr_offset = NICVF_CQE_RBPTR_WORD; + + /* Alloc completion queue */ + if (nicvf_qset_cq_alloc(nic, rxq, rxq->queue_id, nb_desc)) { + PMD_INIT_LOG(ERR, "failed to allocate cq %u", rxq->queue_id); + nicvf_dev_rx_queue_release(rxq); + return -ENOMEM; + } + + nicvf_rx_queue_reset(rxq); + + PMD_RX_LOG(DEBUG, "[%d] rxq=%p pool=%s nb_desc=(%d/%d) phy=%" PRIx64, + qidx, rxq, mp->name, nb_desc, + rte_mempool_avail_count(mp), rxq->phys); + + dev->data->rx_queues[qidx] = rxq; + dev->data->rx_queue_state[qidx] = RTE_ETH_QUEUE_STATE_STOPPED; + return 0; +} + +static void +nicvf_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + + dev_info->min_rx_bufsize = ETHER_MIN_MTU; + dev_info->max_rx_pktlen = NIC_HW_MAX_FRS; + dev_info->max_rx_queues = (uint16_t)MAX_RCV_QUEUES_PER_QS; + dev_info->max_tx_queues = (uint16_t)MAX_SND_QUEUES_PER_QS; + dev_info->max_mac_addrs = 1; + dev_info->max_vfs = dev->pci_dev->max_vfs; + + dev_info->rx_offload_capa = DEV_RX_OFFLOAD_VLAN_STRIP; + dev_info->tx_offload_capa = + DEV_TX_OFFLOAD_IPV4_CKSUM | + DEV_TX_OFFLOAD_UDP_CKSUM | + DEV_TX_OFFLOAD_TCP_CKSUM | + DEV_TX_OFFLOAD_TCP_TSO | + DEV_TX_OFFLOAD_OUTER_IPV4_CKSUM; + + dev_info->reta_size = nic->rss_info.rss_size; + dev_info->hash_key_size = RSS_HASH_KEY_BYTE_SIZE; + dev_info->flow_type_rss_offloads = NICVF_RSS_OFFLOAD_PASS1; + if (nicvf_hw_cap(nic) & NICVF_CAP_TUNNEL_PARSING) + dev_info->flow_type_rss_offloads |= NICVF_RSS_OFFLOAD_TUNNEL; + + dev_info->default_rxconf = (struct rte_eth_rxconf) { + .rx_free_thresh = NICVF_DEFAULT_RX_FREE_THRESH, + .rx_drop_en = 0, + }; + + dev_info->default_txconf = (struct rte_eth_txconf) { + .tx_free_thresh = NICVF_DEFAULT_TX_FREE_THRESH, + .txq_flags = + ETH_TXQ_FLAGS_NOMULTSEGS | + ETH_TXQ_FLAGS_NOREFCOUNT | + ETH_TXQ_FLAGS_NOMULTMEMP | + ETH_TXQ_FLAGS_NOVLANOFFL | + ETH_TXQ_FLAGS_NOXSUMSCTP, + }; +} + +static nicvf_phys_addr_t +rbdr_rte_mempool_get(void *opaque) +{ + uint16_t qidx; + uintptr_t mbuf; + struct nicvf_rxq *rxq; + struct nicvf *nic = nicvf_pmd_priv((struct rte_eth_dev *)opaque); + + for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { + rxq = nic->eth_dev->data->rx_queues[qidx]; + /* Maintain equal buffer count across all pools */ + if (rxq->precharge_cnt >= rxq->qlen_mask) + continue; + rxq->precharge_cnt++; + mbuf = (uintptr_t)rte_pktmbuf_alloc(rxq->pool); + if (mbuf) + return nicvf_mbuff_virt2phy(mbuf, rxq->mbuf_phys_off); + } + return 0; +} + +static int +nicvf_dev_start(struct rte_eth_dev *dev) +{ + int ret; + uint16_t qidx; + uint32_t buffsz = 0, rbdrsz = 0; + uint32_t total_rxq_desc, nb_rbdr_desc, exp_buffs; + uint64_t mbuf_phys_off = 0; + struct nicvf_rxq *rxq; + struct rte_pktmbuf_pool_private *mbp_priv; + struct rte_mbuf *mbuf; + struct nicvf *nic = nicvf_pmd_priv(dev); + struct rte_eth_rxmode *rx_conf = &dev->data->dev_conf.rxmode; + uint16_t mtu; + + PMD_INIT_FUNC_TRACE(); + + /* Userspace process exited without proper shutdown in last run */ + if (nicvf_qset_rbdr_active(nic, 0)) + nicvf_dev_stop(dev); + + /* + * Thunderx nicvf PMD can support more than one pool per port only when + * 1) Data payload size is same across all the pools in given port + * AND + * 2) All mbuffs in the pools are from the same hugepage + * AND + * 3) Mbuff metadata size is same across all the pools in given port + * + * This is to support existing application that uses multiple pool/port. + * But, the purpose of using multipool for QoS will not be addressed. + * + */ + + /* Validate RBDR buff size */ + for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { + rxq = dev->data->rx_queues[qidx]; + mbp_priv = rte_mempool_get_priv(rxq->pool); + buffsz = mbp_priv->mbuf_data_room_size - RTE_PKTMBUF_HEADROOM; + if (buffsz % 128) { + PMD_INIT_LOG(ERR, "rxbuf size must be multiply of 128"); + return -EINVAL; + } + if (rbdrsz == 0) + rbdrsz = buffsz; + if (rbdrsz != buffsz) { + PMD_INIT_LOG(ERR, "buffsz not same, qid=%d (%d/%d)", + qidx, rbdrsz, buffsz); + return -EINVAL; + } + } + + /* Validate mempool attributes */ + for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { + rxq = dev->data->rx_queues[qidx]; + rxq->mbuf_phys_off = nicvf_mempool_phy_offset(rxq->pool); + mbuf = rte_pktmbuf_alloc(rxq->pool); + if (mbuf == NULL) { + PMD_INIT_LOG(ERR, "Failed allocate mbuf qid=%d pool=%s", + qidx, rxq->pool->name); + return -ENOMEM; + } + rxq->mbuf_phys_off -= nicvf_mbuff_meta_length(mbuf); + rxq->mbuf_phys_off -= RTE_PKTMBUF_HEADROOM; + rte_pktmbuf_free(mbuf); + + if (mbuf_phys_off == 0) + mbuf_phys_off = rxq->mbuf_phys_off; + if (mbuf_phys_off != rxq->mbuf_phys_off) { + PMD_INIT_LOG(ERR, "pool params not same,%s %" PRIx64, + rxq->pool->name, mbuf_phys_off); + return -EINVAL; + } + } + + /* Check the level of buffers in the pool */ + total_rxq_desc = 0; + for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { + rxq = dev->data->rx_queues[qidx]; + /* Count total numbers of rxq descs */ + total_rxq_desc += rxq->qlen_mask + 1; + exp_buffs = RTE_MEMPOOL_CACHE_MAX_SIZE + rxq->rx_free_thresh; + exp_buffs *= nic->eth_dev->data->nb_rx_queues; + if (rte_mempool_avail_count(rxq->pool) < exp_buffs) { + PMD_INIT_LOG(ERR, "Buff shortage in pool=%s (%d/%d)", + rxq->pool->name, + rte_mempool_avail_count(rxq->pool), + exp_buffs); + return -ENOENT; + } + } + + /* Check RBDR desc overflow */ + ret = nicvf_qsize_rbdr_roundup(total_rxq_desc); + if (ret == 0) { + PMD_INIT_LOG(ERR, "Reached RBDR desc limit, reduce nr desc"); + return -ENOMEM; + } + + /* Enable qset */ + ret = nicvf_qset_config(nic); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to enable qset %d", ret); + return ret; + } + + /* Allocate RBDR and RBDR ring desc */ + nb_rbdr_desc = nicvf_qsize_rbdr_roundup(total_rxq_desc); + ret = nicvf_qset_rbdr_alloc(nic, nb_rbdr_desc, rbdrsz); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for rbdr alloc"); + goto qset_reclaim; + } + + /* Enable and configure RBDR registers */ + ret = nicvf_qset_rbdr_config(nic, 0); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure rbdr %d", ret); + goto qset_rbdr_free; + } + + /* Fill rte_mempool buffers in RBDR pool and precharge it */ + ret = nicvf_qset_rbdr_precharge(nic, 0, rbdr_rte_mempool_get, + dev, total_rxq_desc); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to fill rbdr %d", ret); + goto qset_rbdr_reclaim; + } + + PMD_DRV_LOG(INFO, "Filled %d out of %d entries in RBDR", + nic->rbdr->tail, nb_rbdr_desc); + + /* Configure RX queues */ + for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) { + ret = nicvf_start_rx_queue(dev, qidx); + if (ret) + goto start_rxq_error; + } + + /* Configure VLAN Strip */ + nicvf_vlan_hw_strip(nic, dev->data->dev_conf.rxmode.hw_vlan_strip); + + /* Configure TX queues */ + for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++) { + ret = nicvf_start_tx_queue(dev, qidx); + if (ret) + goto start_txq_error; + } + + /* Configure CPI algorithm */ + ret = nicvf_configure_cpi(dev); + if (ret) + goto start_txq_error; + + /* Configure RSS */ + ret = nicvf_configure_rss(dev); + if (ret) + goto qset_rss_error; + + /* Configure loopback */ + ret = nicvf_loopback_config(nic, dev->data->dev_conf.lpbk_mode); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to configure loopback %d", ret); + goto qset_rss_error; + } + + /* Reset all statistics counters attached to this port */ + ret = nicvf_mbox_reset_stat_counters(nic, 0x3FFF, 0x1F, 0xFFFF, 0xFFFF); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to reset stat counters %d", ret); + goto qset_rss_error; + } + + /* Setup scatter mode if needed by jumbo */ + if (dev->data->dev_conf.rxmode.max_rx_pkt_len + + 2 * VLAN_TAG_SIZE > buffsz) + dev->data->scattered_rx = 1; + if (rx_conf->enable_scatter) + dev->data->scattered_rx = 1; + + /* Setup MTU based on max_rx_pkt_len or default */ + mtu = dev->data->dev_conf.rxmode.jumbo_frame ? + dev->data->dev_conf.rxmode.max_rx_pkt_len + - ETHER_HDR_LEN - ETHER_CRC_LEN + : ETHER_MTU; + + if (nicvf_dev_set_mtu(dev, mtu)) { + PMD_INIT_LOG(ERR, "Failed to set default mtu size"); + return -EBUSY; + } + + /* Configure callbacks based on scatter mode */ + nicvf_set_tx_function(dev); + nicvf_set_rx_function(dev); + + /* Done; Let PF make the BGX's RX and TX switches to ON position */ + nicvf_mbox_cfg_done(nic); + return 0; + +qset_rss_error: + nicvf_rss_term(nic); +start_txq_error: + for (qidx = 0; qidx < nic->eth_dev->data->nb_tx_queues; qidx++) + nicvf_stop_tx_queue(dev, qidx); +start_rxq_error: + for (qidx = 0; qidx < nic->eth_dev->data->nb_rx_queues; qidx++) + nicvf_stop_rx_queue(dev, qidx); +qset_rbdr_reclaim: + nicvf_qset_rbdr_reclaim(nic, 0); + nicvf_rbdr_release_mbufs(nic); +qset_rbdr_free: + if (nic->rbdr) { + rte_free(nic->rbdr); + nic->rbdr = NULL; + } +qset_reclaim: + nicvf_qset_reclaim(nic); + return ret; +} + +static void +nicvf_dev_stop(struct rte_eth_dev *dev) +{ + int ret; + uint16_t qidx; + struct nicvf *nic = nicvf_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + + /* Let PF make the BGX's RX and TX switches to OFF position */ + nicvf_mbox_shutdown(nic); + + /* Disable loopback */ + ret = nicvf_loopback_config(nic, 0); + if (ret) + PMD_INIT_LOG(ERR, "Failed to disable loopback %d", ret); + + /* Disable VLAN Strip */ + nicvf_vlan_hw_strip(nic, 0); + + /* Reclaim sq */ + for (qidx = 0; qidx < dev->data->nb_tx_queues; qidx++) + nicvf_stop_tx_queue(dev, qidx); + + /* Reclaim rq */ + for (qidx = 0; qidx < dev->data->nb_rx_queues; qidx++) + nicvf_stop_rx_queue(dev, qidx); + + /* Reclaim RBDR */ + ret = nicvf_qset_rbdr_reclaim(nic, 0); + if (ret) + PMD_INIT_LOG(ERR, "Failed to reclaim RBDR %d", ret); + + /* Move all charged buffers in RBDR back to pool */ + if (nic->rbdr != NULL) + nicvf_rbdr_release_mbufs(nic); + + /* Reclaim CPI configuration */ + if (!nic->sqs_mode) { + ret = nicvf_mbox_config_cpi(nic, 0); + if (ret) + PMD_INIT_LOG(ERR, "Failed to reclaim CPI config"); + } + + /* Disable qset */ + ret = nicvf_qset_config(nic); + if (ret) + PMD_INIT_LOG(ERR, "Failed to disable qset %d", ret); + + /* Disable all interrupts */ + nicvf_disable_all_interrupts(nic); + + /* Free RBDR SW structure */ + if (nic->rbdr) { + rte_free(nic->rbdr); + nic->rbdr = NULL; + } +} + +static void +nicvf_dev_close(struct rte_eth_dev *dev) +{ + struct nicvf *nic = nicvf_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + + nicvf_dev_stop(dev); + nicvf_periodic_alarm_stop(nic); +} + +static int +nicvf_dev_configure(struct rte_eth_dev *dev) +{ + struct rte_eth_conf *conf = &dev->data->dev_conf; + struct rte_eth_rxmode *rxmode = &conf->rxmode; + struct rte_eth_txmode *txmode = &conf->txmode; + struct nicvf *nic = nicvf_pmd_priv(dev); + + PMD_INIT_FUNC_TRACE(); + + if (!rte_eal_has_hugepages()) { + PMD_INIT_LOG(INFO, "Huge page is not configured"); + return -EINVAL; + } + + if (txmode->mq_mode) { + PMD_INIT_LOG(INFO, "Tx mq_mode DCB or VMDq not supported"); + return -EINVAL; + } + + if (rxmode->mq_mode != ETH_MQ_RX_NONE && + rxmode->mq_mode != ETH_MQ_RX_RSS) { + PMD_INIT_LOG(INFO, "Unsupported rx qmode %d", rxmode->mq_mode); + return -EINVAL; + } + + if (!rxmode->hw_strip_crc) { + PMD_INIT_LOG(NOTICE, "Can't disable hw crc strip"); + rxmode->hw_strip_crc = 1; + } + + if (rxmode->hw_ip_checksum) { + PMD_INIT_LOG(NOTICE, "Rxcksum not supported"); + rxmode->hw_ip_checksum = 0; + } + + if (rxmode->split_hdr_size) { + PMD_INIT_LOG(INFO, "Rxmode does not support split header"); + return -EINVAL; + } + + if (rxmode->hw_vlan_filter) { + PMD_INIT_LOG(INFO, "VLAN filter not supported"); + return -EINVAL; + } + + if (rxmode->hw_vlan_extend) { + PMD_INIT_LOG(INFO, "VLAN extended not supported"); + return -EINVAL; + } + + if (rxmode->enable_lro) { + PMD_INIT_LOG(INFO, "LRO not supported"); + return -EINVAL; + } + + if (conf->link_speeds & ETH_LINK_SPEED_FIXED) { + PMD_INIT_LOG(INFO, "Setting link speed/duplex not supported"); + return -EINVAL; + } + + if (conf->dcb_capability_en) { + PMD_INIT_LOG(INFO, "DCB enable not supported"); + return -EINVAL; + } + + if (conf->fdir_conf.mode != RTE_FDIR_MODE_NONE) { + PMD_INIT_LOG(INFO, "Flow director not supported"); + return -EINVAL; + } + + PMD_INIT_LOG(DEBUG, "Configured ethdev port%d hwcap=0x%" PRIx64, + dev->data->port_id, nicvf_hw_cap(nic)); + + return 0; +} + +/* Initialize and register driver with DPDK Application */ +static const struct eth_dev_ops nicvf_eth_dev_ops = { + .dev_configure = nicvf_dev_configure, + .dev_start = nicvf_dev_start, + .dev_stop = nicvf_dev_stop, + .link_update = nicvf_dev_link_update, + .dev_close = nicvf_dev_close, + .stats_get = nicvf_dev_stats_get, + .stats_reset = nicvf_dev_stats_reset, + .promiscuous_enable = nicvf_dev_promisc_enable, + .dev_infos_get = nicvf_dev_info_get, + .dev_supported_ptypes_get = nicvf_dev_supported_ptypes_get, + .mtu_set = nicvf_dev_set_mtu, + .reta_update = nicvf_dev_reta_update, + .reta_query = nicvf_dev_reta_query, + .rss_hash_update = nicvf_dev_rss_hash_update, + .rss_hash_conf_get = nicvf_dev_rss_hash_conf_get, + .rx_queue_start = nicvf_dev_rx_queue_start, + .rx_queue_stop = nicvf_dev_rx_queue_stop, + .tx_queue_start = nicvf_dev_tx_queue_start, + .tx_queue_stop = nicvf_dev_tx_queue_stop, + .rx_queue_setup = nicvf_dev_rx_queue_setup, + .rx_queue_release = nicvf_dev_rx_queue_release, + .rx_queue_count = nicvf_dev_rx_queue_count, + .tx_queue_setup = nicvf_dev_tx_queue_setup, + .tx_queue_release = nicvf_dev_tx_queue_release, + .get_reg_length = nicvf_dev_get_reg_length, + .get_reg = nicvf_dev_get_regs, +}; + +static int +nicvf_eth_dev_init(struct rte_eth_dev *eth_dev) +{ + int ret; + struct rte_pci_device *pci_dev; + struct nicvf *nic = nicvf_pmd_priv(eth_dev); + + PMD_INIT_FUNC_TRACE(); + + eth_dev->dev_ops = &nicvf_eth_dev_ops; + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) { + /* Setup callbacks for secondary process */ + nicvf_set_tx_function(eth_dev); + nicvf_set_rx_function(eth_dev); + return 0; + } + + pci_dev = eth_dev->pci_dev; + rte_eth_copy_pci_info(eth_dev, pci_dev); + + nic->device_id = pci_dev->id.device_id; + nic->vendor_id = pci_dev->id.vendor_id; + nic->subsystem_device_id = pci_dev->id.subsystem_device_id; + nic->subsystem_vendor_id = pci_dev->id.subsystem_vendor_id; + nic->eth_dev = eth_dev; + + PMD_INIT_LOG(DEBUG, "nicvf: device (%x:%x) %u:%u:%u:%u", + pci_dev->id.vendor_id, pci_dev->id.device_id, + pci_dev->addr.domain, pci_dev->addr.bus, + pci_dev->addr.devid, pci_dev->addr.function); + + nic->reg_base = (uintptr_t)pci_dev->mem_resource[0].addr; + if (!nic->reg_base) { + PMD_INIT_LOG(ERR, "Failed to map BAR0"); + ret = -ENODEV; + goto fail; + } + + nicvf_disable_all_interrupts(nic); + + ret = nicvf_periodic_alarm_start(nic); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to start period alarm"); + goto fail; + } + + ret = nicvf_mbox_check_pf_ready(nic); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get ready message from PF"); + goto alarm_fail; + } else { + PMD_INIT_LOG(INFO, + "node=%d vf=%d mode=%s sqs=%s loopback_supported=%s", + nic->node, nic->vf_id, + nic->tns_mode == NIC_TNS_MODE ? "tns" : "tns-bypass", + nic->sqs_mode ? "true" : "false", + nic->loopback_supported ? "true" : "false" + ); + } + + if (nic->sqs_mode) { + PMD_INIT_LOG(INFO, "Unsupported SQS VF detected, Detaching..."); + /* Detach port by returning Positive error number */ + ret = ENOTSUP; + goto alarm_fail; + } + + eth_dev->data->mac_addrs = rte_zmalloc("mac_addr", ETHER_ADDR_LEN, 0); + if (eth_dev->data->mac_addrs == NULL) { + PMD_INIT_LOG(ERR, "Failed to allocate memory for mac addr"); + ret = -ENOMEM; + goto alarm_fail; + } + if (is_zero_ether_addr((struct ether_addr *)nic->mac_addr)) + eth_random_addr(&nic->mac_addr[0]); + + ether_addr_copy((struct ether_addr *)nic->mac_addr, + ð_dev->data->mac_addrs[0]); + + ret = nicvf_mbox_set_mac_addr(nic, nic->mac_addr); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to set mac addr"); + goto malloc_fail; + } + + ret = nicvf_base_init(nic); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to execute nicvf_base_init"); + goto malloc_fail; + } + + ret = nicvf_mbox_get_rss_size(nic); + if (ret) { + PMD_INIT_LOG(ERR, "Failed to get rss table size"); + goto malloc_fail; + } + + PMD_INIT_LOG(INFO, "Port %d (%x:%x) mac=%02x:%02x:%02x:%02x:%02x:%02x", + eth_dev->data->port_id, nic->vendor_id, nic->device_id, + nic->mac_addr[0], nic->mac_addr[1], nic->mac_addr[2], + nic->mac_addr[3], nic->mac_addr[4], nic->mac_addr[5]); + + return 0; + +malloc_fail: + rte_free(eth_dev->data->mac_addrs); +alarm_fail: + nicvf_periodic_alarm_stop(nic); +fail: + return ret; +} + +static const struct rte_pci_id pci_id_nicvf_map[] = { + { + .class_id = RTE_CLASS_ANY_ID, + .vendor_id = PCI_VENDOR_ID_CAVIUM, + .device_id = PCI_DEVICE_ID_THUNDERX_PASS1_NICVF, + .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, + .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS1_NICVF, + }, + { + .class_id = RTE_CLASS_ANY_ID, + .vendor_id = PCI_VENDOR_ID_CAVIUM, + .device_id = PCI_DEVICE_ID_THUNDERX_PASS2_NICVF, + .subsystem_vendor_id = PCI_VENDOR_ID_CAVIUM, + .subsystem_device_id = PCI_SUB_DEVICE_ID_THUNDERX_PASS2_NICVF, + }, + { + .vendor_id = 0, + }, +}; + +static struct eth_driver rte_nicvf_pmd = { + .pci_drv = { + .name = "rte_nicvf_pmd", + .id_table = pci_id_nicvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC, + }, + .eth_dev_init = nicvf_eth_dev_init, + .dev_private_size = sizeof(struct nicvf), +}; + +static int +rte_nicvf_pmd_init(const char *name __rte_unused, const char *para __rte_unused) +{ + PMD_INIT_FUNC_TRACE(); + PMD_INIT_LOG(INFO, "librte_pmd_thunderx nicvf version %s", + THUNDERX_NICVF_PMD_VERSION); + + rte_eth_driver_register(&rte_nicvf_pmd); + return 0; +} + +static struct rte_driver rte_nicvf_driver = { + .name = "nicvf_driver", + .type = PMD_PDEV, + .init = rte_nicvf_pmd_init, +}; + +PMD_REGISTER_DRIVER(rte_nicvf_driver); diff --git a/drivers/net/thunderx/nicvf_ethdev.h b/drivers/net/thunderx/nicvf_ethdev.h new file mode 100644 index 00000000..59fa19cf --- /dev/null +++ b/drivers/net/thunderx/nicvf_ethdev.h @@ -0,0 +1,106 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __THUNDERX_NICVF_ETHDEV_H__ +#define __THUNDERX_NICVF_ETHDEV_H__ + +#include <rte_ethdev.h> + +#define THUNDERX_NICVF_PMD_VERSION "1.0" + +#define NICVF_INTR_POLL_INTERVAL_MS 50 +#define NICVF_HALF_DUPLEX 0x00 +#define NICVF_FULL_DUPLEX 0x01 +#define NICVF_UNKNOWN_DUPLEX 0xff + +#define NICVF_RSS_OFFLOAD_PASS1 ( \ + ETH_RSS_PORT | \ + ETH_RSS_IPV4 | \ + ETH_RSS_NONFRAG_IPV4_TCP | \ + ETH_RSS_NONFRAG_IPV4_UDP | \ + ETH_RSS_IPV6 | \ + ETH_RSS_NONFRAG_IPV6_TCP | \ + ETH_RSS_NONFRAG_IPV6_UDP) + +#define NICVF_RSS_OFFLOAD_TUNNEL ( \ + ETH_RSS_VXLAN | \ + ETH_RSS_GENEVE | \ + ETH_RSS_NVGRE) + +#define NICVF_DEFAULT_RX_FREE_THRESH 224 +#define NICVF_DEFAULT_TX_FREE_THRESH 224 +#define NICVF_TX_FREE_MPOOL_THRESH 16 +#define NICVF_MAX_RX_FREE_THRESH 1024 +#define NICVF_MAX_TX_FREE_THRESH 1024 + +#define VLAN_TAG_SIZE 4 /* 802.3ac tag */ + +static inline struct nicvf * +nicvf_pmd_priv(struct rte_eth_dev *eth_dev) +{ + return eth_dev->data->dev_private; +} + +static inline uint64_t +nicvf_mempool_phy_offset(struct rte_mempool *mp) +{ + struct rte_mempool_memhdr *hdr; + + hdr = STAILQ_FIRST(&mp->mem_list); + assert(hdr != NULL); + return (uint64_t)((uintptr_t)hdr->addr - hdr->phys_addr); +} + +static inline uint16_t +nicvf_mbuff_meta_length(struct rte_mbuf *mbuf) +{ + return (uint16_t)((uintptr_t)mbuf->buf_addr - (uintptr_t)mbuf); +} + +/* + * Simple phy2virt functions assuming mbufs are in a single huge page + * V = P + offset + * P = V - offset + */ +static inline uintptr_t +nicvf_mbuff_phy2virt(phys_addr_t phy, uint64_t mbuf_phys_off) +{ + return (uintptr_t)(phy + mbuf_phys_off); +} + +static inline uintptr_t +nicvf_mbuff_virt2phy(uintptr_t virt, uint64_t mbuf_phys_off) +{ + return (phys_addr_t)(virt - mbuf_phys_off); +} + +#endif /* __THUNDERX_NICVF_ETHDEV_H__ */ diff --git a/drivers/net/thunderx/nicvf_logs.h b/drivers/net/thunderx/nicvf_logs.h new file mode 100644 index 00000000..0667d468 --- /dev/null +++ b/drivers/net/thunderx/nicvf_logs.h @@ -0,0 +1,83 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __THUNDERX_NICVF_LOGS__ +#define __THUNDERX_NICVF_LOGS__ + +#include <assert.h> + +#define PMD_INIT_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) + +#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_INIT +#define PMD_INIT_FUNC_TRACE() PMD_INIT_LOG(DEBUG, ">>") +#else +#define PMD_INIT_FUNC_TRACE() do { } while (0) +#endif + +#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_RX +#define PMD_RX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define NICVF_RX_ASSERT(x) assert(x) +#else +#define PMD_RX_LOG(level, fmt, args...) do { } while (0) +#define NICVF_RX_ASSERT(x) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_TX +#define PMD_TX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define NICVF_TX_ASSERT(x) assert(x) +#else +#define PMD_TX_LOG(level, fmt, args...) do { } while (0) +#define NICVF_TX_ASSERT(x) do { } while (0) +#endif + +#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_DRIVER +#define PMD_DRV_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define PMD_DRV_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>") +#else +#define PMD_DRV_LOG(level, fmt, args...) do { } while (0) +#define PMD_DRV_FUNC_TRACE() do { } while (0) +#endif + +#ifdef RTE_LIBRTE_THUNDERX_NICVF_DEBUG_MBOX +#define PMD_MBOX_LOG(level, fmt, args...) \ + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) +#define PMD_MBOX_FUNC_TRACE() PMD_DRV_LOG(DEBUG, ">>") +#else +#define PMD_MBOX_LOG(level, fmt, args...) do { } while (0) +#define PMD_MBOX_FUNC_TRACE() do { } while (0) +#endif + +#endif /* __THUNDERX_NICVF_LOGS__ */ diff --git a/drivers/net/thunderx/nicvf_rxtx.c b/drivers/net/thunderx/nicvf_rxtx.c new file mode 100644 index 00000000..eb51a72c --- /dev/null +++ b/drivers/net/thunderx/nicvf_rxtx.c @@ -0,0 +1,599 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <unistd.h> +#include <stdint.h> +#include <stdio.h> +#include <stdlib.h> + +#include <rte_atomic.h> +#include <rte_branch_prediction.h> +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_errno.h> +#include <rte_ethdev.h> +#include <rte_ether.h> +#include <rte_log.h> +#include <rte_mbuf.h> +#include <rte_prefetch.h> + +#include "base/nicvf_plat.h" + +#include "nicvf_ethdev.h" +#include "nicvf_rxtx.h" +#include "nicvf_logs.h" + +static inline void __hot +fill_sq_desc_header(union sq_entry_t *entry, struct rte_mbuf *pkt) +{ + /* Local variable sqe to avoid read from sq desc memory*/ + union sq_entry_t sqe; + uint64_t ol_flags; + + /* Fill SQ header descriptor */ + sqe.buff[0] = 0; + sqe.hdr.subdesc_type = SQ_DESC_TYPE_HEADER; + /* Number of sub-descriptors following this one */ + sqe.hdr.subdesc_cnt = pkt->nb_segs; + sqe.hdr.tot_len = pkt->pkt_len; + + ol_flags = pkt->ol_flags & NICVF_TX_OFFLOAD_MASK; + if (unlikely(ol_flags)) { + /* L4 cksum */ + if (ol_flags & PKT_TX_TCP_CKSUM) + sqe.hdr.csum_l4 = SEND_L4_CSUM_TCP; + else if (ol_flags & PKT_TX_UDP_CKSUM) + sqe.hdr.csum_l4 = SEND_L4_CSUM_UDP; + else + sqe.hdr.csum_l4 = SEND_L4_CSUM_DISABLE; + sqe.hdr.l4_offset = pkt->l3_len + pkt->l2_len; + + /* L3 cksum */ + if (ol_flags & PKT_TX_IP_CKSUM) { + sqe.hdr.csum_l3 = 1; + sqe.hdr.l3_offset = pkt->l2_len; + } + } + + entry->buff[0] = sqe.buff[0]; +} + +void __hot +nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq) +{ + int j = 0; + uint32_t curr_head; + uint32_t head = sq->head; + struct rte_mbuf **txbuffs = sq->txbuffs; + void *obj_p[NICVF_MAX_TX_FREE_THRESH] __rte_cache_aligned; + + curr_head = nicvf_addr_read(sq->sq_head) >> 4; + while (head != curr_head) { + if (txbuffs[head]) + obj_p[j++] = txbuffs[head]; + + head = (head + 1) & sq->qlen_mask; + } + + rte_mempool_put_bulk(sq->pool, obj_p, j); + sq->head = curr_head; + sq->xmit_bufs -= j; + NICVF_TX_ASSERT(sq->xmit_bufs >= 0); +} + +void __hot +nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq) +{ + uint32_t n = 0; + uint32_t curr_head; + uint32_t head = sq->head; + struct rte_mbuf **txbuffs = sq->txbuffs; + + curr_head = nicvf_addr_read(sq->sq_head) >> 4; + while (head != curr_head) { + if (txbuffs[head]) { + rte_pktmbuf_free_seg(txbuffs[head]); + n++; + } + + head = (head + 1) & sq->qlen_mask; + } + + sq->head = curr_head; + sq->xmit_bufs -= n; + NICVF_TX_ASSERT(sq->xmit_bufs >= 0); +} + +static inline uint32_t __hot +nicvf_free_tx_desc(struct nicvf_txq *sq) +{ + return ((sq->head - sq->tail - 1) & sq->qlen_mask); +} + +/* Send Header + Packet */ +#define TX_DESC_PER_PKT 2 + +static inline uint32_t __hot +nicvf_free_xmitted_buffers(struct nicvf_txq *sq, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + uint32_t free_desc = nicvf_free_tx_desc(sq); + + if (free_desc < nb_pkts * TX_DESC_PER_PKT || + sq->xmit_bufs > sq->tx_free_thresh) { + if (unlikely(sq->pool == NULL)) + sq->pool = tx_pkts[0]->pool; + + sq->pool_free(sq); + /* Freed now, let see the number of free descs again */ + free_desc = nicvf_free_tx_desc(sq); + } + return free_desc; +} + +uint16_t __hot +nicvf_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) +{ + int i; + uint32_t free_desc; + uint32_t tail; + struct nicvf_txq *sq = tx_queue; + union sq_entry_t *desc_ptr = sq->desc; + struct rte_mbuf **txbuffs = sq->txbuffs; + struct rte_mbuf *pkt; + uint32_t qlen_mask = sq->qlen_mask; + + tail = sq->tail; + free_desc = nicvf_free_xmitted_buffers(sq, tx_pkts, nb_pkts); + + for (i = 0; i < nb_pkts && (int)free_desc >= TX_DESC_PER_PKT; i++) { + pkt = tx_pkts[i]; + + txbuffs[tail] = NULL; + fill_sq_desc_header(desc_ptr + tail, pkt); + tail = (tail + 1) & qlen_mask; + + txbuffs[tail] = pkt; + fill_sq_desc_gather(desc_ptr + tail, pkt); + tail = (tail + 1) & qlen_mask; + free_desc -= TX_DESC_PER_PKT; + } + + sq->tail = tail; + sq->xmit_bufs += i; + rte_wmb(); + + /* Inform HW to xmit the packets */ + nicvf_addr_write(sq->sq_door, i * TX_DESC_PER_PKT); + return i; +} + +uint16_t __hot +nicvf_xmit_pkts_multiseg(void *tx_queue, struct rte_mbuf **tx_pkts, + uint16_t nb_pkts) +{ + int i, k; + uint32_t used_desc, next_used_desc, used_bufs, free_desc, tail; + struct nicvf_txq *sq = tx_queue; + union sq_entry_t *desc_ptr = sq->desc; + struct rte_mbuf **txbuffs = sq->txbuffs; + struct rte_mbuf *pkt, *seg; + uint32_t qlen_mask = sq->qlen_mask; + uint16_t nb_segs; + + tail = sq->tail; + used_desc = 0; + used_bufs = 0; + + free_desc = nicvf_free_xmitted_buffers(sq, tx_pkts, nb_pkts); + + for (i = 0; i < nb_pkts; i++) { + pkt = tx_pkts[i]; + + nb_segs = pkt->nb_segs; + + next_used_desc = used_desc + nb_segs + 1; + if (next_used_desc > free_desc) + break; + used_desc = next_used_desc; + used_bufs += nb_segs; + + txbuffs[tail] = NULL; + fill_sq_desc_header(desc_ptr + tail, pkt); + tail = (tail + 1) & qlen_mask; + + txbuffs[tail] = pkt; + fill_sq_desc_gather(desc_ptr + tail, pkt); + tail = (tail + 1) & qlen_mask; + + seg = pkt->next; + for (k = 1; k < nb_segs; k++) { + txbuffs[tail] = seg; + fill_sq_desc_gather(desc_ptr + tail, seg); + tail = (tail + 1) & qlen_mask; + seg = seg->next; + } + } + + sq->tail = tail; + sq->xmit_bufs += used_bufs; + rte_wmb(); + + /* Inform HW to xmit the packets */ + nicvf_addr_write(sq->sq_door, used_desc); + return nb_pkts; +} + +static const uint32_t ptype_table[16][16] __rte_cache_aligned = { + [L3_NONE][L4_NONE] = RTE_PTYPE_UNKNOWN, + [L3_NONE][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN, + [L3_NONE][L4_IPFRAG] = RTE_PTYPE_L4_FRAG, + [L3_NONE][L4_IPCOMP] = RTE_PTYPE_UNKNOWN, + [L3_NONE][L4_TCP] = RTE_PTYPE_L4_TCP, + [L3_NONE][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP, + [L3_NONE][L4_GRE] = RTE_PTYPE_TUNNEL_GRE, + [L3_NONE][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP, + [L3_NONE][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE, + [L3_NONE][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, + [L3_NONE][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE, + + [L3_IPV4][L4_NONE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN, + [L3_IPV4][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L3_IPV4, + [L3_IPV4][L4_IPFRAG] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_FRAG, + [L3_IPV4][L4_IPCOMP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_UNKNOWN, + [L3_IPV4][L4_TCP] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_TCP, + [L3_IPV4][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [L3_IPV4][L4_GRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GRE, + [L3_IPV4][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_L4_UDP, + [L3_IPV4][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_GENEVE, + [L3_IPV4][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_VXLAN, + [L3_IPV4][L4_NVGRE] = RTE_PTYPE_L3_IPV4 | RTE_PTYPE_TUNNEL_NVGRE, + + [L3_IPV4_OPT][L4_NONE] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN, + [L3_IPV4_OPT][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_L3_IPV4, + [L3_IPV4_OPT][L4_IPFRAG] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_FRAG, + [L3_IPV4_OPT][L4_IPCOMP] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_UNKNOWN, + [L3_IPV4_OPT][L4_TCP] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_TCP, + [L3_IPV4_OPT][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, + [L3_IPV4_OPT][L4_GRE] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_TUNNEL_GRE, + [L3_IPV4_OPT][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV4_EXT | RTE_PTYPE_L4_UDP, + [L3_IPV4_OPT][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_TUNNEL_GENEVE, + [L3_IPV4_OPT][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_TUNNEL_VXLAN, + [L3_IPV4_OPT][L4_NVGRE] = RTE_PTYPE_L3_IPV4_EXT | + RTE_PTYPE_TUNNEL_NVGRE, + + [L3_IPV6][L4_NONE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN, + [L3_IPV6][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L3_IPV4, + [L3_IPV6][L4_IPFRAG] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_FRAG, + [L3_IPV6][L4_IPCOMP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_UNKNOWN, + [L3_IPV6][L4_TCP] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_TCP, + [L3_IPV6][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [L3_IPV6][L4_GRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GRE, + [L3_IPV6][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_L4_UDP, + [L3_IPV6][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_GENEVE, + [L3_IPV6][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_VXLAN, + [L3_IPV6][L4_NVGRE] = RTE_PTYPE_L3_IPV6 | RTE_PTYPE_TUNNEL_NVGRE, + + [L3_IPV6_OPT][L4_NONE] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN, + [L3_IPV6_OPT][L4_IPSEC_ESP] = RTE_PTYPE_L3_IPV6_EXT | + RTE_PTYPE_L3_IPV4, + [L3_IPV6_OPT][L4_IPFRAG] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_FRAG, + [L3_IPV6_OPT][L4_IPCOMP] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_UNKNOWN, + [L3_IPV6_OPT][L4_TCP] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_TCP, + [L3_IPV6_OPT][L4_UDP_PASS1] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [L3_IPV6_OPT][L4_GRE] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_TUNNEL_GRE, + [L3_IPV6_OPT][L4_UDP_PASS2] = RTE_PTYPE_L3_IPV6_EXT | RTE_PTYPE_L4_UDP, + [L3_IPV6_OPT][L4_UDP_GENEVE] = RTE_PTYPE_L3_IPV6_EXT | + RTE_PTYPE_TUNNEL_GENEVE, + [L3_IPV6_OPT][L4_UDP_VXLAN] = RTE_PTYPE_L3_IPV6_EXT | + RTE_PTYPE_TUNNEL_VXLAN, + [L3_IPV6_OPT][L4_NVGRE] = RTE_PTYPE_L3_IPV6_EXT | + RTE_PTYPE_TUNNEL_NVGRE, + + [L3_ET_STOP][L4_NONE] = RTE_PTYPE_UNKNOWN, + [L3_ET_STOP][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN, + [L3_ET_STOP][L4_IPFRAG] = RTE_PTYPE_L4_FRAG, + [L3_ET_STOP][L4_IPCOMP] = RTE_PTYPE_UNKNOWN, + [L3_ET_STOP][L4_TCP] = RTE_PTYPE_L4_TCP, + [L3_ET_STOP][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP, + [L3_ET_STOP][L4_GRE] = RTE_PTYPE_TUNNEL_GRE, + [L3_ET_STOP][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP, + [L3_ET_STOP][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE, + [L3_ET_STOP][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, + [L3_ET_STOP][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE, + + [L3_OTHER][L4_NONE] = RTE_PTYPE_UNKNOWN, + [L3_OTHER][L4_IPSEC_ESP] = RTE_PTYPE_UNKNOWN, + [L3_OTHER][L4_IPFRAG] = RTE_PTYPE_L4_FRAG, + [L3_OTHER][L4_IPCOMP] = RTE_PTYPE_UNKNOWN, + [L3_OTHER][L4_TCP] = RTE_PTYPE_L4_TCP, + [L3_OTHER][L4_UDP_PASS1] = RTE_PTYPE_L4_UDP, + [L3_OTHER][L4_GRE] = RTE_PTYPE_TUNNEL_GRE, + [L3_OTHER][L4_UDP_PASS2] = RTE_PTYPE_L4_UDP, + [L3_OTHER][L4_UDP_GENEVE] = RTE_PTYPE_TUNNEL_GENEVE, + [L3_OTHER][L4_UDP_VXLAN] = RTE_PTYPE_TUNNEL_VXLAN, + [L3_OTHER][L4_NVGRE] = RTE_PTYPE_TUNNEL_NVGRE, +}; + +static inline uint32_t __hot +nicvf_rx_classify_pkt(cqe_rx_word0_t cqe_rx_w0) +{ + return ptype_table[cqe_rx_w0.l3_type][cqe_rx_w0.l4_type]; +} + +static inline int __hot +nicvf_fill_rbdr(struct nicvf_rxq *rxq, int to_fill) +{ + int i; + uint32_t ltail, next_tail; + struct nicvf_rbdr *rbdr = rxq->shared_rbdr; + uint64_t mbuf_phys_off = rxq->mbuf_phys_off; + struct rbdr_entry_t *desc = rbdr->desc; + uint32_t qlen_mask = rbdr->qlen_mask; + uintptr_t door = rbdr->rbdr_door; + void *obj_p[NICVF_MAX_RX_FREE_THRESH] __rte_cache_aligned; + + if (unlikely(rte_mempool_get_bulk(rxq->pool, obj_p, to_fill) < 0)) { + rxq->nic->eth_dev->data->rx_mbuf_alloc_failed += to_fill; + return 0; + } + + NICVF_RX_ASSERT((unsigned int)to_fill <= (qlen_mask - + (nicvf_addr_read(rbdr->rbdr_status) & NICVF_RBDR_COUNT_MASK))); + + next_tail = __atomic_fetch_add(&rbdr->next_tail, to_fill, + __ATOMIC_ACQUIRE); + ltail = next_tail; + for (i = 0; i < to_fill; i++) { + struct rbdr_entry_t *entry = desc + (ltail & qlen_mask); + + entry->full_addr = nicvf_mbuff_virt2phy((uintptr_t)obj_p[i], + mbuf_phys_off); + ltail++; + } + + while (__atomic_load_n(&rbdr->tail, __ATOMIC_RELAXED) != next_tail) + rte_pause(); + + __atomic_store_n(&rbdr->tail, ltail, __ATOMIC_RELEASE); + nicvf_addr_write(door, to_fill); + return to_fill; +} + +static inline int32_t __hot +nicvf_rx_pkts_to_process(struct nicvf_rxq *rxq, uint16_t nb_pkts, + int32_t available_space) +{ + if (unlikely(available_space < nb_pkts)) + rxq->available_space = nicvf_addr_read(rxq->cq_status) + & NICVF_CQ_CQE_COUNT_MASK; + + return RTE_MIN(nb_pkts, available_space); +} + +static inline void __hot +nicvf_rx_offload(cqe_rx_word0_t cqe_rx_w0, cqe_rx_word2_t cqe_rx_w2, + struct rte_mbuf *pkt) +{ + if (likely(cqe_rx_w0.rss_alg)) { + pkt->hash.rss = cqe_rx_w2.rss_tag; + pkt->ol_flags |= PKT_RX_RSS_HASH; + } +} + +uint16_t __hot +nicvf_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) +{ + uint32_t i, to_process; + struct cqe_rx_t *cqe_rx; + struct rte_mbuf *pkt; + cqe_rx_word0_t cqe_rx_w0; + cqe_rx_word1_t cqe_rx_w1; + cqe_rx_word2_t cqe_rx_w2; + cqe_rx_word3_t cqe_rx_w3; + struct nicvf_rxq *rxq = rx_queue; + union cq_entry_t *desc = rxq->desc; + const uint64_t cqe_mask = rxq->qlen_mask; + uint64_t rb0_ptr, mbuf_phys_off = rxq->mbuf_phys_off; + uint32_t cqe_head = rxq->head & cqe_mask; + int32_t available_space = rxq->available_space; + uint8_t port_id = rxq->port_id; + const uint8_t rbptr_offset = rxq->rbptr_offset; + + to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space); + + for (i = 0; i < to_process; i++) { + rte_prefetch_non_temporal(&desc[cqe_head + 2]); + cqe_rx = (struct cqe_rx_t *)&desc[cqe_head]; + NICVF_RX_ASSERT(((struct cq_entry_type_t *)cqe_rx)->cqe_type + == CQE_TYPE_RX); + + NICVF_LOAD_PAIR(cqe_rx_w0.u64, cqe_rx_w1.u64, cqe_rx); + NICVF_LOAD_PAIR(cqe_rx_w2.u64, cqe_rx_w3.u64, &cqe_rx->word2); + rb0_ptr = *((uint64_t *)cqe_rx + rbptr_offset); + pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt + (rb0_ptr - cqe_rx_w1.align_pad, mbuf_phys_off); + + pkt->ol_flags = 0; + pkt->port = port_id; + pkt->data_len = cqe_rx_w3.rb0_sz; + pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad; + pkt->nb_segs = 1; + pkt->pkt_len = cqe_rx_w3.rb0_sz; + pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0); + + nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt); + rte_mbuf_refcnt_set(pkt, 1); + rx_pkts[i] = pkt; + cqe_head = (cqe_head + 1) & cqe_mask; + nicvf_prefetch_store_keep(pkt); + } + + if (likely(to_process)) { + rxq->available_space -= to_process; + rxq->head = cqe_head; + nicvf_addr_write(rxq->cq_door, to_process); + rxq->recv_buffers += to_process; + if (rxq->recv_buffers > rxq->rx_free_thresh) { + rxq->recv_buffers -= nicvf_fill_rbdr(rxq, + rxq->rx_free_thresh); + NICVF_RX_ASSERT(rxq->recv_buffers >= 0); + } + } + + return to_process; +} + +static inline uint16_t __hot +nicvf_process_cq_mseg_entry(struct cqe_rx_t *cqe_rx, + uint64_t mbuf_phys_off, uint8_t port_id, + struct rte_mbuf **rx_pkt, uint8_t rbptr_offset) +{ + struct rte_mbuf *pkt, *seg, *prev; + cqe_rx_word0_t cqe_rx_w0; + cqe_rx_word1_t cqe_rx_w1; + cqe_rx_word2_t cqe_rx_w2; + uint16_t *rb_sz, nb_segs, seg_idx; + uint64_t *rb_ptr; + + NICVF_LOAD_PAIR(cqe_rx_w0.u64, cqe_rx_w1.u64, cqe_rx); + NICVF_RX_ASSERT(cqe_rx_w0.cqe_type == CQE_TYPE_RX); + cqe_rx_w2 = cqe_rx->word2; + rb_sz = &cqe_rx->word3.rb0_sz; + rb_ptr = (uint64_t *)cqe_rx + rbptr_offset; + nb_segs = cqe_rx_w0.rb_cnt; + pkt = (struct rte_mbuf *)nicvf_mbuff_phy2virt + (rb_ptr[0] - cqe_rx_w1.align_pad, mbuf_phys_off); + + pkt->ol_flags = 0; + pkt->port = port_id; + pkt->data_off = RTE_PKTMBUF_HEADROOM + cqe_rx_w1.align_pad; + pkt->nb_segs = nb_segs; + pkt->pkt_len = cqe_rx_w1.pkt_len; + pkt->data_len = rb_sz[nicvf_frag_num(0)]; + rte_mbuf_refcnt_set(pkt, 1); + pkt->packet_type = nicvf_rx_classify_pkt(cqe_rx_w0); + nicvf_rx_offload(cqe_rx_w0, cqe_rx_w2, pkt); + + *rx_pkt = pkt; + prev = pkt; + for (seg_idx = 1; seg_idx < nb_segs; seg_idx++) { + seg = (struct rte_mbuf *)nicvf_mbuff_phy2virt + (rb_ptr[seg_idx], mbuf_phys_off); + + prev->next = seg; + seg->data_len = rb_sz[nicvf_frag_num(seg_idx)]; + seg->port = port_id; + seg->data_off = RTE_PKTMBUF_HEADROOM; + rte_mbuf_refcnt_set(seg, 1); + + prev = seg; + } + prev->next = NULL; + return nb_segs; +} + +uint16_t __hot +nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts) +{ + union cq_entry_t *cq_entry; + struct cqe_rx_t *cqe_rx; + struct nicvf_rxq *rxq = rx_queue; + union cq_entry_t *desc = rxq->desc; + const uint64_t cqe_mask = rxq->qlen_mask; + uint64_t mbuf_phys_off = rxq->mbuf_phys_off; + uint32_t i, to_process, cqe_head, buffers_consumed = 0; + int32_t available_space = rxq->available_space; + uint16_t nb_segs; + const uint8_t port_id = rxq->port_id; + const uint8_t rbptr_offset = rxq->rbptr_offset; + + cqe_head = rxq->head & cqe_mask; + to_process = nicvf_rx_pkts_to_process(rxq, nb_pkts, available_space); + + for (i = 0; i < to_process; i++) { + rte_prefetch_non_temporal(&desc[cqe_head + 2]); + cq_entry = &desc[cqe_head]; + cqe_rx = (struct cqe_rx_t *)cq_entry; + nb_segs = nicvf_process_cq_mseg_entry(cqe_rx, mbuf_phys_off, + port_id, rx_pkts + i, rbptr_offset); + buffers_consumed += nb_segs; + cqe_head = (cqe_head + 1) & cqe_mask; + nicvf_prefetch_store_keep(rx_pkts[i]); + } + + if (likely(to_process)) { + rxq->available_space -= to_process; + rxq->head = cqe_head; + nicvf_addr_write(rxq->cq_door, to_process); + rxq->recv_buffers += buffers_consumed; + if (rxq->recv_buffers > rxq->rx_free_thresh) { + rxq->recv_buffers -= + nicvf_fill_rbdr(rxq, rxq->rx_free_thresh); + NICVF_RX_ASSERT(rxq->recv_buffers >= 0); + } + } + + return to_process; +} + +uint32_t +nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + struct nicvf_rxq *rxq; + + rxq = dev->data->rx_queues[queue_idx]; + return nicvf_addr_read(rxq->cq_status) & NICVF_CQ_CQE_COUNT_MASK; +} + +uint32_t +nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx) +{ + struct nicvf_rxq *rxq; + uint32_t to_process; + uint32_t rx_free; + + rxq = dev->data->rx_queues[queue_idx]; + to_process = rxq->recv_buffers; + while (rxq->recv_buffers > 0) { + rx_free = RTE_MIN(rxq->recv_buffers, NICVF_MAX_RX_FREE_THRESH); + rxq->recv_buffers -= nicvf_fill_rbdr(rxq, rx_free); + } + + assert(rxq->recv_buffers == 0); + return to_process; +} diff --git a/drivers/net/thunderx/nicvf_rxtx.h b/drivers/net/thunderx/nicvf_rxtx.h new file mode 100644 index 00000000..9dad8a5a --- /dev/null +++ b/drivers/net/thunderx/nicvf_rxtx.h @@ -0,0 +1,101 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __THUNDERX_NICVF_RXTX_H__ +#define __THUNDERX_NICVF_RXTX_H__ + +#include <rte_byteorder.h> +#include <rte_ethdev.h> + +#define NICVF_TX_OFFLOAD_MASK (PKT_TX_IP_CKSUM | PKT_TX_L4_MASK) + +#ifndef __hot +#define __hot __attribute__((hot)) +#endif + +#if RTE_BYTE_ORDER == RTE_BIG_ENDIAN +static inline uint16_t __attribute__((const)) +nicvf_frag_num(uint16_t i) +{ + return (i & ~3) + 3 - (i & 3); +} + +static inline void __hot +fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt) +{ + /* Local variable sqe to avoid read from sq desc memory*/ + union sq_entry_t sqe; + + /* Fill the SQ gather entry */ + sqe.buff[0] = 0; sqe.buff[1] = 0; + sqe.gather.subdesc_type = SQ_DESC_TYPE_GATHER; + sqe.gather.ld_type = NIC_SEND_LD_TYPE_E_LDT; + sqe.gather.size = pkt->data_len; + sqe.gather.addr = rte_mbuf_data_dma_addr(pkt); + + entry->buff[0] = sqe.buff[0]; + entry->buff[1] = sqe.buff[1]; +} + +#else + +static inline uint16_t __attribute__((const)) +nicvf_frag_num(uint16_t i) +{ + return i; +} + +static inline void __hot +fill_sq_desc_gather(union sq_entry_t *entry, struct rte_mbuf *pkt) +{ + entry->buff[0] = (uint64_t)SQ_DESC_TYPE_GATHER << 60 | + (uint64_t)NIC_SEND_LD_TYPE_E_LDT << 58 | + pkt->data_len; + entry->buff[1] = rte_mbuf_data_dma_addr(pkt); +} +#endif + +uint32_t nicvf_dev_rx_queue_count(struct rte_eth_dev *dev, uint16_t queue_idx); +uint32_t nicvf_dev_rbdr_refill(struct rte_eth_dev *dev, uint16_t queue_idx); + +uint16_t nicvf_recv_pkts(void *rxq, struct rte_mbuf **rx_pkts, uint16_t pkts); +uint16_t nicvf_recv_pkts_multiseg(void *rx_queue, struct rte_mbuf **rx_pkts, + uint16_t nb_pkts); + +uint16_t nicvf_xmit_pkts(void *txq, struct rte_mbuf **tx_pkts, uint16_t pkts); +uint16_t nicvf_xmit_pkts_multiseg(void *txq, struct rte_mbuf **tx_pkts, + uint16_t pkts); + +void nicvf_single_pool_free_xmited_buffers(struct nicvf_txq *sq); +void nicvf_multi_pool_free_xmited_buffers(struct nicvf_txq *sq); + +#endif /* __THUNDERX_NICVF_RXTX_H__ */ diff --git a/drivers/net/thunderx/nicvf_struct.h b/drivers/net/thunderx/nicvf_struct.h new file mode 100644 index 00000000..c52545d9 --- /dev/null +++ b/drivers/net/thunderx/nicvf_struct.h @@ -0,0 +1,124 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium networks Ltd. 2016. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _THUNDERX_NICVF_STRUCT_H +#define _THUNDERX_NICVF_STRUCT_H + +#include <stdint.h> + +#include <rte_spinlock.h> +#include <rte_mempool.h> +#include <rte_mbuf.h> +#include <rte_interrupts.h> +#include <rte_ethdev.h> +#include <rte_memory.h> + +struct nicvf_rbdr { + uint64_t rbdr_status; + uint64_t rbdr_door; + struct rbdr_entry_t *desc; + nicvf_phys_addr_t phys; + uint32_t buffsz; + uint32_t tail; + uint32_t next_tail; + uint32_t head; + uint32_t qlen_mask; +} __rte_cache_aligned; + +struct nicvf_txq { + union sq_entry_t *desc; + nicvf_phys_addr_t phys; + struct rte_mbuf **txbuffs; + uint64_t sq_head; + uint64_t sq_door; + struct rte_mempool *pool; + struct nicvf *nic; + void (*pool_free)(struct nicvf_txq *sq); + uint32_t head; + uint32_t tail; + int32_t xmit_bufs; + uint32_t qlen_mask; + uint32_t txq_flags; + uint16_t queue_id; + uint16_t tx_free_thresh; +} __rte_cache_aligned; + +struct nicvf_rxq { + uint64_t mbuf_phys_off; + uint64_t cq_status; + uint64_t cq_door; + nicvf_phys_addr_t phys; + union cq_entry_t *desc; + struct nicvf_rbdr *shared_rbdr; + struct nicvf *nic; + struct rte_mempool *pool; + uint32_t head; + uint32_t qlen_mask; + int32_t available_space; + int32_t recv_buffers; + uint16_t rx_free_thresh; + uint16_t queue_id; + uint16_t precharge_cnt; + uint8_t rx_drop_en; + uint8_t port_id; + uint8_t rbptr_offset; +} __rte_cache_aligned; + +struct nicvf { + uint8_t vf_id; + uint8_t node; + uintptr_t reg_base; + bool tns_mode; + bool sqs_mode; + bool loopback_supported; + bool pf_acked:1; + bool pf_nacked:1; + uint64_t hwcap; + uint8_t link_up; + uint8_t duplex; + uint32_t speed; + uint32_t msg_enable; + uint16_t device_id; + uint16_t vendor_id; + uint16_t subsystem_device_id; + uint16_t subsystem_vendor_id; + struct nicvf_rbdr *rbdr; + struct nicvf_rss_reta_info rss_info; + struct rte_eth_dev *eth_dev; + struct rte_intr_handle intr_handle; + uint8_t cpi_alg; + uint16_t mtu; + bool vlan_filter_en; + uint8_t mac_addr[ETHER_ADDR_LEN]; +} __rte_cache_aligned; + +#endif /* _THUNDERX_NICVF_STRUCT_H */ diff --git a/drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map b/drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map new file mode 100644 index 00000000..1901bcb3 --- /dev/null +++ b/drivers/net/thunderx/rte_pmd_thunderx_nicvf_version.map @@ -0,0 +1,4 @@ +DPDK_16.07 { + + local: *; +}; diff --git a/drivers/net/vhost/Makefile b/drivers/net/vhost/Makefile index f49a69b3..050c5aa5 100644 --- a/drivers/net/vhost/Makefile +++ b/drivers/net/vhost/Makefile @@ -36,6 +36,8 @@ include $(RTE_SDK)/mk/rte.vars.mk # LIB = librte_pmd_vhost.a +LDLIBS += -lpthread + CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) @@ -54,7 +56,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += rte_eth_vhost.c SYMLINK-y-include += rte_eth_vhost.h # this lib depends upon: +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_eal DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_mbuf +DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_mempool DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_kvargs DEPDIRS-$(CONFIG_RTE_LIBRTE_PMD_VHOST) += lib/librte_vhost diff --git a/drivers/net/vhost/rte_eth_vhost.c b/drivers/net/vhost/rte_eth_vhost.c index 310cbefc..3b509465 100644 --- a/drivers/net/vhost/rte_eth_vhost.c +++ b/drivers/net/vhost/rte_eth_vhost.c @@ -50,12 +50,14 @@ #define ETH_VHOST_IFACE_ARG "iface" #define ETH_VHOST_QUEUES_ARG "queues" +#define ETH_VHOST_CLIENT_ARG "client" static const char *drivername = "VHOST PMD"; static const char *valid_arguments[] = { ETH_VHOST_IFACE_ARG, ETH_VHOST_QUEUES_ARG, + ETH_VHOST_CLIENT_ARG, NULL }; @@ -71,9 +73,9 @@ static struct ether_addr base_eth_addr = { }; struct vhost_queue { + int vid; rte_atomic32_t allow_queuing; rte_atomic32_t while_queuing; - struct virtio_net *device; struct pmd_internal *internal; struct rte_mempool *mb_pool; uint8_t port; @@ -89,6 +91,7 @@ struct pmd_internal { char *dev_name; char *iface_name; uint16_t max_queues; + uint64_t flags; volatile uint16_t once; }; @@ -139,7 +142,7 @@ eth_vhost_rx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) goto out; /* Dequeue packets from guest TX queue */ - nb_rx = rte_vhost_dequeue_burst(r->device, + nb_rx = rte_vhost_dequeue_burst(r->vid, r->virtqueue_id, r->mb_pool, bufs, nb_bufs); r->rx_pkts += nb_rx; @@ -170,7 +173,7 @@ eth_vhost_tx(void *q, struct rte_mbuf **bufs, uint16_t nb_bufs) goto out; /* Enqueue packets to guest RX queue */ - nb_tx = rte_vhost_enqueue_burst(r->device, + nb_tx = rte_vhost_enqueue_burst(r->vid, r->virtqueue_id, bufs, nb_bufs); r->tx_pkts += nb_tx; @@ -222,25 +225,22 @@ find_internal_resource(char *ifname) } static int -new_device(struct virtio_net *dev) +new_device(int vid) { struct rte_eth_dev *eth_dev; struct internal_list *list; struct pmd_internal *internal; struct vhost_queue *vq; unsigned i; + char ifname[PATH_MAX]; #ifdef RTE_LIBRTE_VHOST_NUMA - int newnode, ret; + int newnode; #endif - if (dev == NULL) { - RTE_LOG(INFO, PMD, "Invalid argument\n"); - return -1; - } - - list = find_internal_resource(dev->ifname); + rte_vhost_get_ifname(vid, ifname, sizeof(ifname)); + list = find_internal_resource(ifname); if (list == NULL) { - RTE_LOG(INFO, PMD, "Invalid device name\n"); + RTE_LOG(INFO, PMD, "Invalid device name: %s\n", ifname); return -1; } @@ -248,21 +248,16 @@ new_device(struct virtio_net *dev) internal = eth_dev->data->dev_private; #ifdef RTE_LIBRTE_VHOST_NUMA - ret = get_mempolicy(&newnode, NULL, 0, dev, - MPOL_F_NODE | MPOL_F_ADDR); - if (ret < 0) { - RTE_LOG(ERR, PMD, "Unknown numa node\n"); - return -1; - } - - eth_dev->data->numa_node = newnode; + newnode = rte_vhost_get_numa_node(vid); + if (newnode >= 0) + eth_dev->data->numa_node = newnode; #endif for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { vq = eth_dev->data->rx_queues[i]; if (vq == NULL) continue; - vq->device = dev; + vq->vid = vid; vq->internal = internal; vq->port = eth_dev->data->port_id; } @@ -270,16 +265,14 @@ new_device(struct virtio_net *dev) vq = eth_dev->data->tx_queues[i]; if (vq == NULL) continue; - vq->device = dev; + vq->vid = vid; vq->internal = internal; vq->port = eth_dev->data->port_id; } - for (i = 0; i < dev->virt_qp_nb * VIRTIO_QNUM; i++) - rte_vhost_enable_guest_notification(dev, i, 0); + for (i = 0; i < rte_vhost_get_queue_num(vid) * VIRTIO_QNUM; i++) + rte_vhost_enable_guest_notification(vid, i, 0); - dev->flags |= VIRTIO_DEV_RUNNING; - dev->priv = eth_dev; eth_dev->data->dev_link.link_status = ETH_LINK_UP; for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { @@ -303,22 +296,21 @@ new_device(struct virtio_net *dev) } static void -destroy_device(volatile struct virtio_net *dev) +destroy_device(int vid) { struct rte_eth_dev *eth_dev; struct vhost_queue *vq; + struct internal_list *list; + char ifname[PATH_MAX]; unsigned i; - if (dev == NULL) { - RTE_LOG(INFO, PMD, "Invalid argument\n"); - return; - } - - eth_dev = (struct rte_eth_dev *)dev->priv; - if (eth_dev == NULL) { - RTE_LOG(INFO, PMD, "Failed to find a ethdev\n"); + rte_vhost_get_ifname(vid, ifname, sizeof(ifname)); + list = find_internal_resource(ifname); + if (list == NULL) { + RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname); return; } + eth_dev = list->eth_dev; /* Wait until rx/tx_pkt_burst stops accessing vhost device */ for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { @@ -340,20 +332,17 @@ destroy_device(volatile struct virtio_net *dev) eth_dev->data->dev_link.link_status = ETH_LINK_DOWN; - dev->priv = NULL; - dev->flags &= ~VIRTIO_DEV_RUNNING; - for (i = 0; i < eth_dev->data->nb_rx_queues; i++) { vq = eth_dev->data->rx_queues[i]; if (vq == NULL) continue; - vq->device = NULL; + vq->vid = -1; } for (i = 0; i < eth_dev->data->nb_tx_queues; i++) { vq = eth_dev->data->tx_queues[i]; if (vq == NULL) continue; - vq->device = NULL; + vq->vid = -1; } RTE_LOG(INFO, PMD, "Connection closed\n"); @@ -362,20 +351,17 @@ destroy_device(volatile struct virtio_net *dev) } static int -vring_state_changed(struct virtio_net *dev, uint16_t vring, int enable) +vring_state_changed(int vid, uint16_t vring, int enable) { struct rte_vhost_vring_state *state; struct rte_eth_dev *eth_dev; struct internal_list *list; + char ifname[PATH_MAX]; - if (dev == NULL) { - RTE_LOG(ERR, PMD, "Invalid argument\n"); - return -1; - } - - list = find_internal_resource(dev->ifname); + rte_vhost_get_ifname(vid, ifname, sizeof(ifname)); + list = find_internal_resource(ifname); if (list == NULL) { - RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", dev->ifname); + RTE_LOG(ERR, PMD, "Invalid interface name: %s\n", ifname); return -1; } @@ -484,7 +470,8 @@ eth_dev_start(struct rte_eth_dev *dev) int ret = 0; if (rte_atomic16_cmpset(&internal->once, 0, 1)) { - ret = rte_vhost_driver_register(internal->iface_name); + ret = rte_vhost_driver_register(internal->iface_name, + internal->flags); if (ret) return ret; } @@ -607,7 +594,7 @@ eth_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->ipackets = rx_total; stats->opackets = tx_total; - stats->imissed = tx_missed_total; + stats->oerrors = tx_missed_total; stats->ibytes = rx_total_bytes; stats->obytes = tx_total_bytes; } @@ -689,7 +676,7 @@ static const struct eth_dev_ops ops = { static int eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues, - const unsigned numa_node) + const unsigned numa_node, uint64_t flags) { struct rte_eth_dev_data *data = NULL; struct pmd_internal *internal = NULL; @@ -746,6 +733,7 @@ eth_dev_vhost_create(const char *name, char *iface_name, int16_t queues, internal->iface_name = strdup(iface_name); if (internal->iface_name == NULL) goto error; + internal->flags = flags; list->eth_dev = eth_dev; pthread_mutex_lock(&internal_list_lock); @@ -810,18 +798,15 @@ open_iface(const char *key __rte_unused, const char *value, void *extra_args) } static inline int -open_queues(const char *key __rte_unused, const char *value, void *extra_args) +open_int(const char *key __rte_unused, const char *value, void *extra_args) { - uint16_t *q = extra_args; + uint16_t *n = extra_args; if (value == NULL || extra_args == NULL) return -EINVAL; - *q = (uint16_t)strtoul(value, NULL, 0); - if (*q == USHRT_MAX && errno == ERANGE) - return -1; - - if (*q > RTE_MAX_QUEUES_PER_PORT) + *n = (uint16_t)strtoul(value, NULL, 0); + if (*n == USHRT_MAX && errno == ERANGE) return -1; return 0; @@ -834,6 +819,8 @@ rte_pmd_vhost_devinit(const char *name, const char *params) int ret = 0; char *iface_name; uint16_t queues; + uint64_t flags = 0; + int client_mode = 0; RTE_LOG(INFO, PMD, "Initializing pmd_vhost for %s\n", name); @@ -853,14 +840,24 @@ rte_pmd_vhost_devinit(const char *name, const char *params) if (rte_kvargs_count(kvlist, ETH_VHOST_QUEUES_ARG) == 1) { ret = rte_kvargs_process(kvlist, ETH_VHOST_QUEUES_ARG, - &open_queues, &queues); - if (ret < 0) + &open_int, &queues); + if (ret < 0 || queues > RTE_MAX_QUEUES_PER_PORT) goto out_free; } else queues = 1; - eth_dev_vhost_create(name, iface_name, queues, rte_socket_id()); + if (rte_kvargs_count(kvlist, ETH_VHOST_CLIENT_ARG) == 1) { + ret = rte_kvargs_process(kvlist, ETH_VHOST_CLIENT_ARG, + &open_int, &client_mode); + if (ret < 0) + goto out_free; + + if (client_mode) + flags |= RTE_VHOST_USER_CLIENT; + } + + eth_dev_vhost_create(name, iface_name, queues, rte_socket_id(), flags); out_free: rte_kvargs_free(kvlist); diff --git a/drivers/net/virtio/Makefile b/drivers/net/virtio/Makefile index ef84f604..3020b688 100644 --- a/drivers/net/virtio/Makefile +++ b/drivers/net/virtio/Makefile @@ -55,9 +55,16 @@ ifeq ($(findstring RTE_MACHINE_CPUFLAG_SSSE3,$(CFLAGS)),RTE_MACHINE_CPUFLAG_SSSE SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_rxtx_simple.c endif +ifeq ($(CONFIG_RTE_VIRTIO_USER),y) +SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/vhost_user.c +SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user/virtio_user_dev.c +SRCS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += virtio_user_ethdev.c +endif + # this lib depends upon: DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_eal lib/librte_ether DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_mempool lib/librte_mbuf DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_net +DEPDIRS-$(CONFIG_RTE_LIBRTE_VIRTIO_PMD) += lib/librte_kvargs include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/net/virtio/virtio_ethdev.c b/drivers/net/virtio/virtio_ethdev.c index 63a368ac..480daa37 100644 --- a/drivers/net/virtio/virtio_ethdev.c +++ b/drivers/net/virtio/virtio_ethdev.c @@ -1,7 +1,7 @@ /*- * BSD LICENSE * - * Copyright(c) 2010-2015 Intel Corporation. All rights reserved. + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -59,8 +59,6 @@ #include "virtqueue.h" #include "virtio_rxtx.h" - -static int eth_virtio_dev_init(struct rte_eth_dev *eth_dev); static int eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev); static int virtio_dev_configure(struct rte_eth_dev *dev); static int virtio_dev_start(struct rte_eth_dev *dev); @@ -80,7 +78,10 @@ static void virtio_get_hwaddr(struct virtio_hw *hw); static void virtio_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats); static int virtio_dev_xstats_get(struct rte_eth_dev *dev, - struct rte_eth_xstats *xstats, unsigned n); + struct rte_eth_xstat *xstats, unsigned n); +static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + unsigned limit); static void virtio_dev_stats_reset(struct rte_eth_dev *dev); static void virtio_dev_free_mbufs(struct rte_eth_dev *dev); static int virtio_vlan_filter_set(struct rte_eth_dev *dev, @@ -115,40 +116,61 @@ struct rte_virtio_xstats_name_off { }; /* [rt]x_qX_ is prepended to the name string here */ -static const struct rte_virtio_xstats_name_off rte_virtio_q_stat_strings[] = { - {"good_packets", offsetof(struct virtqueue, packets)}, - {"good_bytes", offsetof(struct virtqueue, bytes)}, - {"errors", offsetof(struct virtqueue, errors)}, - {"multicast_packets", offsetof(struct virtqueue, multicast)}, - {"broadcast_packets", offsetof(struct virtqueue, broadcast)}, - {"undersize_packets", offsetof(struct virtqueue, size_bins[0])}, - {"size_64_packets", offsetof(struct virtqueue, size_bins[1])}, - {"size_65_127_packets", offsetof(struct virtqueue, size_bins[2])}, - {"size_128_255_packets", offsetof(struct virtqueue, size_bins[3])}, - {"size_256_511_packets", offsetof(struct virtqueue, size_bins[4])}, - {"size_512_1023_packets", offsetof(struct virtqueue, size_bins[5])}, - {"size_1024_1517_packets", offsetof(struct virtqueue, size_bins[6])}, - {"size_1518_max_packets", offsetof(struct virtqueue, size_bins[7])}, +static const struct rte_virtio_xstats_name_off rte_virtio_rxq_stat_strings[] = { + {"good_packets", offsetof(struct virtnet_rx, stats.packets)}, + {"good_bytes", offsetof(struct virtnet_rx, stats.bytes)}, + {"errors", offsetof(struct virtnet_rx, stats.errors)}, + {"multicast_packets", offsetof(struct virtnet_rx, stats.multicast)}, + {"broadcast_packets", offsetof(struct virtnet_rx, stats.broadcast)}, + {"undersize_packets", offsetof(struct virtnet_rx, stats.size_bins[0])}, + {"size_64_packets", offsetof(struct virtnet_rx, stats.size_bins[1])}, + {"size_65_127_packets", offsetof(struct virtnet_rx, stats.size_bins[2])}, + {"size_128_255_packets", offsetof(struct virtnet_rx, stats.size_bins[3])}, + {"size_256_511_packets", offsetof(struct virtnet_rx, stats.size_bins[4])}, + {"size_512_1023_packets", offsetof(struct virtnet_rx, stats.size_bins[5])}, + {"size_1024_1517_packets", offsetof(struct virtnet_rx, stats.size_bins[6])}, + {"size_1518_max_packets", offsetof(struct virtnet_rx, stats.size_bins[7])}, +}; + +/* [rt]x_qX_ is prepended to the name string here */ +static const struct rte_virtio_xstats_name_off rte_virtio_txq_stat_strings[] = { + {"good_packets", offsetof(struct virtnet_tx, stats.packets)}, + {"good_bytes", offsetof(struct virtnet_tx, stats.bytes)}, + {"errors", offsetof(struct virtnet_tx, stats.errors)}, + {"multicast_packets", offsetof(struct virtnet_tx, stats.multicast)}, + {"broadcast_packets", offsetof(struct virtnet_tx, stats.broadcast)}, + {"undersize_packets", offsetof(struct virtnet_tx, stats.size_bins[0])}, + {"size_64_packets", offsetof(struct virtnet_tx, stats.size_bins[1])}, + {"size_65_127_packets", offsetof(struct virtnet_tx, stats.size_bins[2])}, + {"size_128_255_packets", offsetof(struct virtnet_tx, stats.size_bins[3])}, + {"size_256_511_packets", offsetof(struct virtnet_tx, stats.size_bins[4])}, + {"size_512_1023_packets", offsetof(struct virtnet_tx, stats.size_bins[5])}, + {"size_1024_1517_packets", offsetof(struct virtnet_tx, stats.size_bins[6])}, + {"size_1518_max_packets", offsetof(struct virtnet_tx, stats.size_bins[7])}, }; -#define VIRTIO_NB_Q_XSTATS (sizeof(rte_virtio_q_stat_strings) / \ - sizeof(rte_virtio_q_stat_strings[0])) +#define VIRTIO_NB_RXQ_XSTATS (sizeof(rte_virtio_rxq_stat_strings) / \ + sizeof(rte_virtio_rxq_stat_strings[0])) +#define VIRTIO_NB_TXQ_XSTATS (sizeof(rte_virtio_txq_stat_strings) / \ + sizeof(rte_virtio_txq_stat_strings[0])) static int -virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, +virtio_send_command(struct virtnet_ctl *cvq, struct virtio_pmd_ctrl *ctrl, int *dlen, int pkt_num) { uint32_t head, i; int k, sum = 0; virtio_net_ctrl_ack status = ~0; struct virtio_pmd_ctrl result; + struct virtqueue *vq; ctrl->status = status; - if (!(vq && vq->hw->cvq)) { + if (!cvq && !cvq->vq) { PMD_INIT_LOG(ERR, "Control queue is not supported."); return -1; } + vq = cvq->vq; head = vq->vq_desc_head_idx; PMD_INIT_LOG(DEBUG, "vq->vq_desc_head_idx = %d, status = %d, " @@ -158,7 +180,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, if ((vq->vq_free_cnt < ((uint32_t)pkt_num + 2)) || (pkt_num < 1)) return -1; - memcpy(vq->virtio_net_hdr_mz->addr, ctrl, + memcpy(cvq->virtio_net_hdr_mz->addr, ctrl, sizeof(struct virtio_pmd_ctrl)); /* @@ -168,14 +190,14 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, * One RX packet for ACK. */ vq->vq_ring.desc[head].flags = VRING_DESC_F_NEXT; - vq->vq_ring.desc[head].addr = vq->virtio_net_hdr_mz->phys_addr; + vq->vq_ring.desc[head].addr = cvq->virtio_net_hdr_mem; vq->vq_ring.desc[head].len = sizeof(struct virtio_net_ctrl_hdr); vq->vq_free_cnt--; i = vq->vq_ring.desc[head].next; for (k = 0; k < pkt_num; k++) { vq->vq_ring.desc[i].flags = VRING_DESC_F_NEXT; - vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr + vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem + sizeof(struct virtio_net_ctrl_hdr) + sizeof(ctrl->status) + sizeof(uint8_t)*sum; vq->vq_ring.desc[i].len = dlen[k]; @@ -185,7 +207,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, } vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; - vq->vq_ring.desc[i].addr = vq->virtio_net_hdr_mz->phys_addr + vq->vq_ring.desc[i].addr = cvq->virtio_net_hdr_mem + sizeof(struct virtio_net_ctrl_hdr); vq->vq_ring.desc[i].len = sizeof(ctrl->status); vq->vq_free_cnt--; @@ -200,12 +222,12 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, virtqueue_notify(vq); rte_rmb(); - while (vq->vq_used_cons_idx == vq->vq_ring.used->idx) { + while (VIRTQUEUE_NUSED(vq) == 0) { rte_rmb(); usleep(100); } - while (vq->vq_used_cons_idx != vq->vq_ring.used->idx) { + while (VIRTQUEUE_NUSED(vq)) { uint32_t idx, desc_idx, used_idx; struct vring_used_elem *uep; @@ -230,7 +252,7 @@ virtio_send_command(struct virtqueue *vq, struct virtio_pmd_ctrl *ctrl, PMD_INIT_LOG(DEBUG, "vq->vq_free_cnt=%d\nvq->vq_desc_head_idx=%d", vq->vq_free_cnt, vq->vq_desc_head_idx); - memcpy(&result, vq->virtio_net_hdr_mz->addr, + memcpy(&result, cvq->virtio_net_hdr_mz->addr, sizeof(struct virtio_pmd_ctrl)); return result.status; @@ -261,12 +283,14 @@ virtio_set_multiple_queues(struct rte_eth_dev *dev, uint16_t nb_queues) } void -virtio_dev_queue_release(struct virtqueue *vq) { +virtio_dev_queue_release(struct virtqueue *vq) +{ struct virtio_hw *hw; if (vq) { hw = vq->hw; - hw->vtpci_ops->del_queue(hw, vq); + if (vq->configured) + hw->vtpci_ops->del_queue(hw, vq); rte_free(vq->sw_ring); rte_free(vq); @@ -279,13 +303,21 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx, uint16_t nb_desc, unsigned int socket_id, - struct virtqueue **pvq) + void **pvq) { char vq_name[VIRTQUEUE_MAX_NAME_SZ]; - const struct rte_memzone *mz; + char vq_hdr_name[VIRTQUEUE_MAX_NAME_SZ]; + const struct rte_memzone *mz = NULL, *hdr_mz = NULL; unsigned int vq_size, size; struct virtio_hw *hw = dev->data->dev_private; - struct virtqueue *vq = NULL; + struct virtnet_rx *rxvq = NULL; + struct virtnet_tx *txvq = NULL; + struct virtnet_ctl *cvq = NULL; + struct virtqueue *vq; + const char *queue_names[] = {"rvq", "txq", "cvq"}; + size_t sz_vq, sz_q = 0, sz_hdr_mz = 0; + void *sw_ring = NULL; + int ret; PMD_INIT_LOG(DEBUG, "setting up queue: %u", vtpci_queue_idx); @@ -305,39 +337,33 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, return -EINVAL; } + snprintf(vq_name, sizeof(vq_name), "port%d_%s%d", + dev->data->port_id, queue_names[queue_type], queue_idx); + + sz_vq = RTE_ALIGN_CEIL(sizeof(*vq) + + vq_size * sizeof(struct vq_desc_extra), + RTE_CACHE_LINE_SIZE); if (queue_type == VTNET_RQ) { - snprintf(vq_name, sizeof(vq_name), "port%d_rvq%d", - dev->data->port_id, queue_idx); - vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) + - vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE); - vq->sw_ring = rte_zmalloc_socket("rxq->sw_ring", - (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) * - sizeof(vq->sw_ring[0]), RTE_CACHE_LINE_SIZE, socket_id); + sz_q = sz_vq + sizeof(*rxvq); } else if (queue_type == VTNET_TQ) { - snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d", - dev->data->port_id, queue_idx); - vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) + - vq_size * sizeof(struct vq_desc_extra), RTE_CACHE_LINE_SIZE); + sz_q = sz_vq + sizeof(*txvq); + /* + * For each xmit packet, allocate a virtio_net_hdr + * and indirect ring elements + */ + sz_hdr_mz = vq_size * sizeof(struct virtio_tx_region); } else if (queue_type == VTNET_CQ) { - snprintf(vq_name, sizeof(vq_name), "port%d_cvq", - dev->data->port_id); - vq = rte_zmalloc(vq_name, sizeof(struct virtqueue) + - vq_size * sizeof(struct vq_desc_extra), - RTE_CACHE_LINE_SIZE); + sz_q = sz_vq + sizeof(*cvq); + /* Allocate a page for control vq command, data and status */ + sz_hdr_mz = PAGE_SIZE; } + + vq = rte_zmalloc_socket(vq_name, sz_q, RTE_CACHE_LINE_SIZE, socket_id); if (vq == NULL) { - PMD_INIT_LOG(ERR, "Can not allocate virtqueue"); + PMD_INIT_LOG(ERR, "can not allocate vq"); return -ENOMEM; } - if (queue_type == VTNET_RQ && vq->sw_ring == NULL) { - PMD_INIT_LOG(ERR, "Can not allocate RX soft ring"); - rte_free(vq); - return -ENOMEM; - } - vq->hw = hw; - vq->port_id = dev->data->port_id; - vq->queue_id = queue_idx; vq->vq_queue_index = vtpci_queue_idx; vq->vq_nentries = vq_size; @@ -350,64 +376,103 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, */ size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN); vq->vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN); - PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", size, vq->vq_ring_size); + PMD_INIT_LOG(DEBUG, "vring_size: %d, rounded_vring_size: %d", + size, vq->vq_ring_size); - mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, - socket_id, 0, VIRTIO_PCI_VRING_ALIGN); + mz = rte_memzone_reserve_aligned(vq_name, vq->vq_ring_size, socket_id, + 0, VIRTIO_PCI_VRING_ALIGN); if (mz == NULL) { if (rte_errno == EEXIST) mz = rte_memzone_lookup(vq_name); if (mz == NULL) { - rte_free(vq); - return -ENOMEM; + ret = -ENOMEM; + goto fail_q_alloc; } } - /* - * Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, - * and only accepts 32 bit page frame number. - * Check if the allocated physical memory exceeds 16TB. - */ - if ((mz->phys_addr + vq->vq_ring_size - 1) >> (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { - PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); - rte_free(vq); - return -ENOMEM; - } - memset(mz->addr, 0, sizeof(mz->len)); - vq->mz = mz; + vq->vq_ring_mem = mz->phys_addr; vq->vq_ring_virt_mem = mz->addr; - PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%"PRIx64, (uint64_t)mz->phys_addr); - PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%"PRIx64, (uint64_t)(uintptr_t)mz->addr); - vq->virtio_net_hdr_mz = NULL; - vq->virtio_net_hdr_mem = 0; - - if (queue_type == VTNET_TQ) { - const struct rte_memzone *hdr_mz; - struct virtio_tx_region *txr; - unsigned int i; - - /* - * For each xmit packet, allocate a virtio_net_hdr - * and indirect ring elements - */ - snprintf(vq_name, sizeof(vq_name), "port%d_tvq%d_hdrzone", - dev->data->port_id, queue_idx); - hdr_mz = rte_memzone_reserve_aligned(vq_name, - vq_size * sizeof(*txr), + PMD_INIT_LOG(DEBUG, "vq->vq_ring_mem: 0x%" PRIx64, + (uint64_t)mz->phys_addr); + PMD_INIT_LOG(DEBUG, "vq->vq_ring_virt_mem: 0x%" PRIx64, + (uint64_t)(uintptr_t)mz->addr); + + if (sz_hdr_mz) { + snprintf(vq_hdr_name, sizeof(vq_hdr_name), "port%d_%s%d_hdr", + dev->data->port_id, queue_names[queue_type], + queue_idx); + hdr_mz = rte_memzone_reserve_aligned(vq_hdr_name, sz_hdr_mz, socket_id, 0, RTE_CACHE_LINE_SIZE); if (hdr_mz == NULL) { if (rte_errno == EEXIST) - hdr_mz = rte_memzone_lookup(vq_name); + hdr_mz = rte_memzone_lookup(vq_hdr_name); if (hdr_mz == NULL) { - rte_free(vq); - return -ENOMEM; + ret = -ENOMEM; + goto fail_q_alloc; } } - vq->virtio_net_hdr_mz = hdr_mz; - vq->virtio_net_hdr_mem = hdr_mz->phys_addr; + } + + if (queue_type == VTNET_RQ) { + size_t sz_sw = (RTE_PMD_VIRTIO_RX_MAX_BURST + vq_size) * + sizeof(vq->sw_ring[0]); + + sw_ring = rte_zmalloc_socket("sw_ring", sz_sw, + RTE_CACHE_LINE_SIZE, socket_id); + if (!sw_ring) { + PMD_INIT_LOG(ERR, "can not allocate RX soft ring"); + ret = -ENOMEM; + goto fail_q_alloc; + } + + vq->sw_ring = sw_ring; + rxvq = (struct virtnet_rx *)RTE_PTR_ADD(vq, sz_vq); + rxvq->vq = vq; + rxvq->port_id = dev->data->port_id; + rxvq->queue_id = queue_idx; + rxvq->mz = mz; + *pvq = rxvq; + } else if (queue_type == VTNET_TQ) { + txvq = (struct virtnet_tx *)RTE_PTR_ADD(vq, sz_vq); + txvq->vq = vq; + txvq->port_id = dev->data->port_id; + txvq->queue_id = queue_idx; + txvq->mz = mz; + txvq->virtio_net_hdr_mz = hdr_mz; + txvq->virtio_net_hdr_mem = hdr_mz->phys_addr; + + *pvq = txvq; + } else if (queue_type == VTNET_CQ) { + cvq = (struct virtnet_ctl *)RTE_PTR_ADD(vq, sz_vq); + cvq->vq = vq; + cvq->mz = mz; + cvq->virtio_net_hdr_mz = hdr_mz; + cvq->virtio_net_hdr_mem = hdr_mz->phys_addr; + memset(cvq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE); + *pvq = cvq; + } + + /* For virtio-user case (that is when dev->pci_dev is NULL), we use + * virtual address. And we need properly set _offset_, please see + * MBUF_DATA_DMA_ADDR in virtqueue.h for more information. + */ + if (dev->pci_dev) + vq->offset = offsetof(struct rte_mbuf, buf_physaddr); + else { + vq->vq_ring_mem = (uintptr_t)mz->addr; + vq->offset = offsetof(struct rte_mbuf, buf_addr); + if (queue_type == VTNET_TQ) + txvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; + else if (queue_type == VTNET_CQ) + cvq->virtio_net_hdr_mem = (uintptr_t)hdr_mz->addr; + } + + if (queue_type == VTNET_TQ) { + struct virtio_tx_region *txr; + unsigned int i; txr = hdr_mz->addr; memset(txr, 0, vq_size * sizeof(*txr)); @@ -417,57 +482,50 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, vring_desc_init(start_dp, RTE_DIM(txr[i].tx_indir)); /* first indirect descriptor is always the tx header */ - start_dp->addr = vq->virtio_net_hdr_mem + start_dp->addr = txvq->virtio_net_hdr_mem + i * sizeof(*txr) + offsetof(struct virtio_tx_region, tx_hdr); - start_dp->len = vq->hw->vtnet_hdr_size; + start_dp->len = hw->vtnet_hdr_size; start_dp->flags = VRING_DESC_F_NEXT; } - - } else if (queue_type == VTNET_CQ) { - /* Allocate a page for control vq command, data and status */ - snprintf(vq_name, sizeof(vq_name), "port%d_cvq_hdrzone", - dev->data->port_id); - vq->virtio_net_hdr_mz = rte_memzone_reserve_aligned(vq_name, - PAGE_SIZE, socket_id, 0, RTE_CACHE_LINE_SIZE); - if (vq->virtio_net_hdr_mz == NULL) { - if (rte_errno == EEXIST) - vq->virtio_net_hdr_mz = - rte_memzone_lookup(vq_name); - if (vq->virtio_net_hdr_mz == NULL) { - rte_free(vq); - return -ENOMEM; - } - } - vq->virtio_net_hdr_mem = - vq->virtio_net_hdr_mz->phys_addr; - memset(vq->virtio_net_hdr_mz->addr, 0, PAGE_SIZE); } - hw->vtpci_ops->setup_queue(hw, vq); + if (hw->vtpci_ops->setup_queue(hw, vq) < 0) { + PMD_INIT_LOG(ERR, "setup_queue failed"); + virtio_dev_queue_release(vq); + return -EINVAL; + } - *pvq = vq; + vq->configured = 1; return 0; + +fail_q_alloc: + rte_free(sw_ring); + rte_memzone_free(hdr_mz); + rte_memzone_free(mz); + rte_free(vq); + + return ret; } static int virtio_dev_cq_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx, uint32_t socket_id) { - struct virtqueue *vq; + struct virtnet_ctl *cvq; int ret; struct virtio_hw *hw = dev->data->dev_private; PMD_INIT_FUNC_TRACE(); ret = virtio_dev_queue_setup(dev, VTNET_CQ, VTNET_SQ_CQ_QUEUE_IDX, - vtpci_queue_idx, 0, socket_id, &vq); + vtpci_queue_idx, 0, socket_id, (void **)&cvq); if (ret < 0) { PMD_INIT_LOG(ERR, "control vq initialization failed"); return ret; } - hw->cvq = vq; + hw->cvq = cvq; return 0; } @@ -491,7 +549,6 @@ static void virtio_dev_close(struct rte_eth_dev *dev) { struct virtio_hw *hw = dev->data->dev_private; - struct rte_pci_device *pci_dev = dev->pci_dev; PMD_INIT_LOG(DEBUG, "virtio_dev_close"); @@ -499,7 +556,7 @@ virtio_dev_close(struct rte_eth_dev *dev) virtio_dev_stop(dev); /* reset the NIC */ - if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) + if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) vtpci_irq_config(hw, VIRTIO_MSI_NO_VECTOR); vtpci_reset(hw); virtio_dev_free_mbufs(dev); @@ -614,6 +671,7 @@ static const struct eth_dev_ops virtio_eth_dev_ops = { .dev_infos_get = virtio_dev_info_get, .stats_get = virtio_dev_stats_get, .xstats_get = virtio_dev_xstats_get, + .xstats_get_names = virtio_dev_xstats_get_names, .stats_reset = virtio_dev_stats_reset, .xstats_reset = virtio_dev_stats_reset, .link_update = virtio_dev_link_update, @@ -675,83 +733,121 @@ virtio_update_stats(struct rte_eth_dev *dev, struct rte_eth_stats *stats) unsigned i; for (i = 0; i < dev->data->nb_tx_queues; i++) { - const struct virtqueue *txvq = dev->data->tx_queues[i]; + const struct virtnet_tx *txvq = dev->data->tx_queues[i]; if (txvq == NULL) continue; - stats->opackets += txvq->packets; - stats->obytes += txvq->bytes; - stats->oerrors += txvq->errors; + stats->opackets += txvq->stats.packets; + stats->obytes += txvq->stats.bytes; + stats->oerrors += txvq->stats.errors; if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { - stats->q_opackets[i] = txvq->packets; - stats->q_obytes[i] = txvq->bytes; + stats->q_opackets[i] = txvq->stats.packets; + stats->q_obytes[i] = txvq->stats.bytes; } } for (i = 0; i < dev->data->nb_rx_queues; i++) { - const struct virtqueue *rxvq = dev->data->rx_queues[i]; + const struct virtnet_rx *rxvq = dev->data->rx_queues[i]; if (rxvq == NULL) continue; - stats->ipackets += rxvq->packets; - stats->ibytes += rxvq->bytes; - stats->ierrors += rxvq->errors; + stats->ipackets += rxvq->stats.packets; + stats->ibytes += rxvq->stats.bytes; + stats->ierrors += rxvq->stats.errors; if (i < RTE_ETHDEV_QUEUE_STAT_CNTRS) { - stats->q_ipackets[i] = rxvq->packets; - stats->q_ibytes[i] = rxvq->bytes; + stats->q_ipackets[i] = rxvq->stats.packets; + stats->q_ibytes[i] = rxvq->stats.bytes; } } stats->rx_nombuf = dev->data->rx_mbuf_alloc_failed; } +static int virtio_dev_xstats_get_names(struct rte_eth_dev *dev, + struct rte_eth_xstat_name *xstats_names, + __rte_unused unsigned limit) +{ + unsigned i; + unsigned count = 0; + unsigned t; + + unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS + + dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS; + + if (xstats_names != NULL) { + /* Note: limit checked in rte_eth_xstats_names() */ + + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct virtqueue *rxvq = dev->data->rx_queues[i]; + if (rxvq == NULL) + continue; + for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "rx_q%u_%s", i, + rte_virtio_rxq_stat_strings[t].name); + count++; + } + } + + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct virtqueue *txvq = dev->data->tx_queues[i]; + if (txvq == NULL) + continue; + for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) { + snprintf(xstats_names[count].name, + sizeof(xstats_names[count].name), + "tx_q%u_%s", i, + rte_virtio_txq_stat_strings[t].name); + count++; + } + } + return count; + } + return nstats; +} + static int -virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstats *xstats, +virtio_dev_xstats_get(struct rte_eth_dev *dev, struct rte_eth_xstat *xstats, unsigned n) { unsigned i; unsigned count = 0; - unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_Q_XSTATS + - dev->data->nb_rx_queues * VIRTIO_NB_Q_XSTATS; + unsigned nstats = dev->data->nb_tx_queues * VIRTIO_NB_TXQ_XSTATS + + dev->data->nb_rx_queues * VIRTIO_NB_RXQ_XSTATS; if (n < nstats) return nstats; for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct virtqueue *rxvq = dev->data->rx_queues[i]; + struct virtnet_rx *rxvq = dev->data->rx_queues[i]; if (rxvq == NULL) continue; unsigned t; - for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "rx_q%u_%s", i, - rte_virtio_q_stat_strings[t].name); + for (t = 0; t < VIRTIO_NB_RXQ_XSTATS; t++) { xstats[count].value = *(uint64_t *)(((char *)rxvq) + - rte_virtio_q_stat_strings[t].offset); + rte_virtio_rxq_stat_strings[t].offset); count++; } } for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct virtqueue *txvq = dev->data->tx_queues[i]; + struct virtnet_tx *txvq = dev->data->tx_queues[i]; if (txvq == NULL) continue; unsigned t; - for (t = 0; t < VIRTIO_NB_Q_XSTATS; t++) { - snprintf(xstats[count].name, sizeof(xstats[count].name), - "tx_q%u_%s", i, - rte_virtio_q_stat_strings[t].name); + for (t = 0; t < VIRTIO_NB_TXQ_XSTATS; t++) { xstats[count].value = *(uint64_t *)(((char *)txvq) + - rte_virtio_q_stat_strings[t].offset); + rte_virtio_txq_stat_strings[t].offset); count++; } } @@ -771,29 +867,31 @@ virtio_dev_stats_reset(struct rte_eth_dev *dev) unsigned int i; for (i = 0; i < dev->data->nb_tx_queues; i++) { - struct virtqueue *txvq = dev->data->tx_queues[i]; + struct virtnet_tx *txvq = dev->data->tx_queues[i]; if (txvq == NULL) continue; - txvq->packets = 0; - txvq->bytes = 0; - txvq->errors = 0; - txvq->multicast = 0; - txvq->broadcast = 0; - memset(txvq->size_bins, 0, sizeof(txvq->size_bins[0]) * 8); + txvq->stats.packets = 0; + txvq->stats.bytes = 0; + txvq->stats.errors = 0; + txvq->stats.multicast = 0; + txvq->stats.broadcast = 0; + memset(txvq->stats.size_bins, 0, + sizeof(txvq->stats.size_bins[0]) * 8); } for (i = 0; i < dev->data->nb_rx_queues; i++) { - struct virtqueue *rxvq = dev->data->rx_queues[i]; + struct virtnet_rx *rxvq = dev->data->rx_queues[i]; if (rxvq == NULL) continue; - rxvq->packets = 0; - rxvq->bytes = 0; - rxvq->errors = 0; - rxvq->multicast = 0; - rxvq->broadcast = 0; - memset(rxvq->size_bins, 0, sizeof(rxvq->size_bins[0]) * 8); + rxvq->stats.packets = 0; + rxvq->stats.bytes = 0; + rxvq->stats.errors = 0; + rxvq->stats.multicast = 0; + rxvq->stats.broadcast = 0; + memset(rxvq->stats.size_bins, 0, + sizeof(rxvq->stats.size_bins[0]) * 8); } } @@ -827,7 +925,7 @@ virtio_mac_table_set(struct virtio_hw *hw, int err, len[2]; if (!vtpci_with_feature(hw, VIRTIO_NET_F_CTRL_MAC_ADDR)) { - PMD_DRV_LOG(INFO, "host does not support mac table\n"); + PMD_DRV_LOG(INFO, "host does not support mac table"); return; } @@ -1027,16 +1125,17 @@ rx_func_get(struct rte_eth_dev *eth_dev) * This function is based on probe() function in virtio_pci.c * It returns 0 on success. */ -static int +int eth_virtio_dev_init(struct rte_eth_dev *eth_dev) { struct virtio_hw *hw = eth_dev->data->dev_private; struct virtio_net_config *config; struct virtio_net_config local_config; struct rte_pci_device *pci_dev; + uint32_t dev_flags = RTE_ETH_DEV_DETACHABLE; int ret; - RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr)); + RTE_BUILD_BUG_ON(RTE_PKTMBUF_HEADROOM < sizeof(struct virtio_net_hdr_mrg_rxbuf)); eth_dev->dev_ops = &virtio_eth_dev_ops; eth_dev->tx_pkt_burst = &virtio_xmit_pkts; @@ -1057,9 +1156,11 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) pci_dev = eth_dev->pci_dev; - ret = vtpci_init(pci_dev, hw); - if (ret) - return ret; + if (pci_dev) { + ret = vtpci_init(pci_dev, hw, &dev_flags); + if (ret) + return ret; + } /* Reset the device although not necessary at startup */ vtpci_reset(hw); @@ -1074,9 +1175,10 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) /* If host does not support status then disable LSC */ if (!vtpci_with_feature(hw, VIRTIO_NET_F_STATUS)) - pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC; + dev_flags &= ~RTE_ETH_DEV_INTR_LSC; rte_eth_copy_pci_info(eth_dev, pci_dev); + eth_dev->data->dev_flags = dev_flags; rx_func_get(eth_dev); @@ -1150,12 +1252,13 @@ eth_virtio_dev_init(struct rte_eth_dev *eth_dev) PMD_INIT_LOG(DEBUG, "hw->max_rx_queues=%d hw->max_tx_queues=%d", hw->max_rx_queues, hw->max_tx_queues); - PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", + if (pci_dev) + PMD_INIT_LOG(DEBUG, "port %d vendorID=0x%x deviceID=0x%x", eth_dev->data->port_id, pci_dev->id.vendor_id, pci_dev->id.device_id); /* Setup interrupt callback */ - if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) + if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) rte_intr_callback_register(&pci_dev->intr_handle, virtio_interrupt_handler, eth_dev); @@ -1184,13 +1287,14 @@ eth_virtio_dev_uninit(struct rte_eth_dev *eth_dev) eth_dev->tx_pkt_burst = NULL; eth_dev->rx_pkt_burst = NULL; - virtio_dev_queue_release(hw->cvq); + if (hw->cvq) + virtio_dev_queue_release(hw->cvq->vq); rte_free(eth_dev->data->mac_addrs); eth_dev->data->mac_addrs = NULL; /* reset interrupt callback */ - if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) + if (eth_dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) rte_intr_callback_unregister(&pci_dev->intr_handle, virtio_interrupt_handler, eth_dev); @@ -1240,7 +1344,6 @@ virtio_dev_configure(struct rte_eth_dev *dev) { const struct rte_eth_rxmode *rxmode = &dev->data->dev_conf.rxmode; struct virtio_hw *hw = dev->data->dev_private; - struct rte_pci_device *pci_dev = dev->pci_dev; PMD_INIT_LOG(DEBUG, "configure"); @@ -1258,7 +1361,7 @@ virtio_dev_configure(struct rte_eth_dev *dev) return -ENOTSUP; } - if (pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC) + if (dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC) if (vtpci_irq_config(hw, 0) == VIRTIO_MSI_NO_VECTOR) { PMD_DRV_LOG(ERR, "failed to set config vector"); return -EBUSY; @@ -1273,11 +1376,12 @@ virtio_dev_start(struct rte_eth_dev *dev) { uint16_t nb_queues, i; struct virtio_hw *hw = dev->data->dev_private; - struct rte_pci_device *pci_dev = dev->pci_dev; + struct virtnet_rx *rxvq; + struct virtnet_tx *txvq __rte_unused; /* check if lsc interrupt feature is enabled */ if (dev->data->dev_conf.intr_conf.lsc) { - if (!(pci_dev->driver->drv_flags & RTE_PCI_DRV_INTR_LSC)) { + if (!(dev->data->dev_flags & RTE_ETH_DEV_INTR_LSC)) { PMD_DRV_LOG(ERR, "link status not supported by host"); return -ENOTSUP; } @@ -1313,16 +1417,22 @@ virtio_dev_start(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "nb_queues=%d", nb_queues); - for (i = 0; i < nb_queues; i++) - virtqueue_notify(dev->data->rx_queues[i]); + for (i = 0; i < nb_queues; i++) { + rxvq = dev->data->rx_queues[i]; + virtqueue_notify(rxvq->vq); + } PMD_INIT_LOG(DEBUG, "Notified backend at initialization"); - for (i = 0; i < dev->data->nb_rx_queues; i++) - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); + for (i = 0; i < dev->data->nb_rx_queues; i++) { + rxvq = dev->data->rx_queues[i]; + VIRTQUEUE_DUMP(rxvq->vq); + } - for (i = 0; i < dev->data->nb_tx_queues; i++) - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); + for (i = 0; i < dev->data->nb_tx_queues; i++) { + txvq = dev->data->tx_queues[i]; + VIRTQUEUE_DUMP(txvq->vq); + } return 0; } @@ -1333,14 +1443,14 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev) int i, mbuf_num = 0; for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct virtnet_rx *rxvq = dev->data->rx_queues[i]; + PMD_INIT_LOG(DEBUG, "Before freeing rxq[%d] used and unused buf", i); - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); + VIRTQUEUE_DUMP(rxvq->vq); - PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", - i, dev->data->rx_queues[i]); - while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused( - dev->data->rx_queues[i])) != NULL) { + PMD_INIT_LOG(DEBUG, "rx_queues[%d]=%p", i, rxvq); + while ((buf = virtqueue_detatch_unused(rxvq->vq)) != NULL) { rte_pktmbuf_free(buf); mbuf_num++; } @@ -1348,27 +1458,27 @@ static void virtio_dev_free_mbufs(struct rte_eth_dev *dev) PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num); PMD_INIT_LOG(DEBUG, "After freeing rxq[%d] used and unused buf", i); - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); + VIRTQUEUE_DUMP(rxvq->vq); } for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct virtnet_tx *txvq = dev->data->tx_queues[i]; + PMD_INIT_LOG(DEBUG, "Before freeing txq[%d] used and unused bufs", i); - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); + VIRTQUEUE_DUMP(txvq->vq); mbuf_num = 0; - while ((buf = (struct rte_mbuf *)virtqueue_detatch_unused( - dev->data->tx_queues[i])) != NULL) { + while ((buf = virtqueue_detatch_unused(txvq->vq)) != NULL) { rte_pktmbuf_free(buf); - mbuf_num++; } PMD_INIT_LOG(DEBUG, "free %d mbufs", mbuf_num); PMD_INIT_LOG(DEBUG, "After freeing txq[%d] used and unused buf", i); - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); + VIRTQUEUE_DUMP(txvq->vq); } } @@ -1431,7 +1541,10 @@ virtio_dev_info_get(struct rte_eth_dev *dev, struct rte_eth_dev_info *dev_info) { struct virtio_hw *hw = dev->data->dev_private; - dev_info->driver_name = dev->driver->pci_drv.name; + if (dev->pci_dev) + dev_info->driver_name = dev->driver->pci_drv.name; + else + dev_info->driver_name = "virtio-user PMD"; dev_info->max_rx_queues = (uint16_t)hw->max_rx_queues; dev_info->max_tx_queues = (uint16_t)hw->max_tx_queues; dev_info->min_rx_bufsize = VIRTIO_MIN_RX_BUFSIZE; diff --git a/drivers/net/virtio/virtio_ethdev.h b/drivers/net/virtio/virtio_ethdev.h index 66423a07..2ecec6eb 100644 --- a/drivers/net/virtio/virtio_ethdev.h +++ b/drivers/net/virtio/virtio_ethdev.h @@ -81,7 +81,7 @@ int virtio_dev_queue_setup(struct rte_eth_dev *dev, uint16_t vtpci_queue_idx, uint16_t nb_desc, unsigned int socket_id, - struct virtqueue **pvq); + void **pvq); void virtio_dev_queue_release(struct virtqueue *vq); @@ -113,6 +113,8 @@ uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts); +int eth_virtio_dev_init(struct rte_eth_dev *eth_dev); + /* * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us * frames larger than 1514 bytes. We do not yet support software LRO diff --git a/drivers/net/virtio/virtio_logs.h b/drivers/net/virtio/virtio_logs.h index d6c33f7b..90a79eaa 100644 --- a/drivers/net/virtio/virtio_logs.h +++ b/drivers/net/virtio/virtio_logs.h @@ -47,14 +47,14 @@ #ifdef RTE_LIBRTE_VIRTIO_DEBUG_RX #define PMD_RX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s() rx: " fmt , __func__, ## args) + RTE_LOG(level, PMD, "%s() rx: " fmt "\n", __func__, ## args) #else #define PMD_RX_LOG(level, fmt, args...) do { } while(0) #endif #ifdef RTE_LIBRTE_VIRTIO_DEBUG_TX #define PMD_TX_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s() tx: " fmt , __func__, ## args) + RTE_LOG(level, PMD, "%s() tx: " fmt "\n", __func__, ## args) #else #define PMD_TX_LOG(level, fmt, args...) do { } while(0) #endif @@ -62,7 +62,7 @@ #ifdef RTE_LIBRTE_VIRTIO_DEBUG_DRIVER #define PMD_DRV_LOG(level, fmt, args...) \ - RTE_LOG(level, PMD, "%s(): " fmt , __func__, ## args) + RTE_LOG(level, PMD, "%s(): " fmt "\n", __func__, ## args) #else #define PMD_DRV_LOG(level, fmt, args...) do { } while(0) #endif diff --git a/drivers/net/virtio/virtio_pci.c b/drivers/net/virtio/virtio_pci.c index c007959f..f1a7ca7e 100644 --- a/drivers/net/virtio/virtio_pci.c +++ b/drivers/net/virtio/virtio_pci.c @@ -55,20 +55,103 @@ */ #define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20) +static inline int +check_vq_phys_addr_ok(struct virtqueue *vq) +{ + /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit, + * and only accepts 32 bit page frame number. + * Check if the allocated physical memory exceeds 16TB. + */ + if ((vq->vq_ring_mem + vq->vq_ring_size - 1) >> + (VIRTIO_PCI_QUEUE_ADDR_SHIFT + 32)) { + PMD_INIT_LOG(ERR, "vring address shouldn't be above 16TB!"); + return 0; + } + + return 1; +} + +/* + * Since we are in legacy mode: + * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf + * + * "Note that this is possible because while the virtio header is PCI (i.e. + * little) endian, the device-specific region is encoded in the native endian of + * the guest (where such distinction is applicable)." + * + * For powerpc which supports both, qemu supposes that cpu is big endian and + * enforces this for the virtio-net stuff. + */ static void legacy_read_dev_config(struct virtio_hw *hw, size_t offset, void *dst, int length) { +#ifdef RTE_ARCH_PPC_64 + int size; + + while (length > 0) { + if (length >= 4) { + size = 4; + rte_eal_pci_ioport_read(&hw->io, dst, size, + VIRTIO_PCI_CONFIG(hw) + offset); + *(uint32_t *)dst = rte_be_to_cpu_32(*(uint32_t *)dst); + } else if (length >= 2) { + size = 2; + rte_eal_pci_ioport_read(&hw->io, dst, size, + VIRTIO_PCI_CONFIG(hw) + offset); + *(uint16_t *)dst = rte_be_to_cpu_16(*(uint16_t *)dst); + } else { + size = 1; + rte_eal_pci_ioport_read(&hw->io, dst, size, + VIRTIO_PCI_CONFIG(hw) + offset); + } + + dst = (char *)dst + size; + offset += size; + length -= size; + } +#else rte_eal_pci_ioport_read(&hw->io, dst, length, VIRTIO_PCI_CONFIG(hw) + offset); +#endif } static void legacy_write_dev_config(struct virtio_hw *hw, size_t offset, const void *src, int length) { +#ifdef RTE_ARCH_PPC_64 + union { + uint32_t u32; + uint16_t u16; + } tmp; + int size; + + while (length > 0) { + if (length >= 4) { + size = 4; + tmp.u32 = rte_cpu_to_be_32(*(const uint32_t *)src); + rte_eal_pci_ioport_write(&hw->io, &tmp.u32, size, + VIRTIO_PCI_CONFIG(hw) + offset); + } else if (length >= 2) { + size = 2; + tmp.u16 = rte_cpu_to_be_16(*(const uint16_t *)src); + rte_eal_pci_ioport_write(&hw->io, &tmp.u16, size, + VIRTIO_PCI_CONFIG(hw) + offset); + } else { + size = 1; + rte_eal_pci_ioport_write(&hw->io, src, size, + VIRTIO_PCI_CONFIG(hw) + offset); + } + + src = (const char *)src + size; + offset += size; + length -= size; + } +#else rte_eal_pci_ioport_write(&hw->io, src, length, VIRTIO_PCI_CONFIG(hw) + offset); +#endif } static uint64_t @@ -143,15 +226,20 @@ legacy_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) return dst; } -static void +static int legacy_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) { uint32_t src; + if (!check_vq_phys_addr_ok(vq)) + return -1; + rte_eal_pci_ioport_write(&hw->io, &vq->vq_queue_index, 2, VIRTIO_PCI_QUEUE_SEL); - src = vq->mz->phys_addr >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; + src = vq->vq_ring_mem >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; rte_eal_pci_ioport_write(&hw->io, &src, 4, VIRTIO_PCI_QUEUE_PFN); + + return 0; } static void @@ -179,7 +267,7 @@ legacy_virtio_has_msix(const struct rte_pci_addr *loc) char dirname[PATH_MAX]; snprintf(dirname, sizeof(dirname), - SYSFS_PCI_DEVICES "/" PCI_PRI_FMT "/msi_irqs", + "%s/" PCI_PRI_FMT "/msi_irqs", pci_get_sysfs_path(), loc->domain, loc->bus, loc->devid, loc->function); d = opendir(dirname); @@ -199,15 +287,15 @@ legacy_virtio_has_msix(const struct rte_pci_addr *loc __rte_unused) static int legacy_virtio_resource_init(struct rte_pci_device *pci_dev, - struct virtio_hw *hw) + struct virtio_hw *hw, uint32_t *dev_flags) { if (rte_eal_pci_ioport_map(pci_dev, 0, &hw->io) < 0) return -1; if (pci_dev->intr_handle.type != RTE_INTR_HANDLE_UNKNOWN) - pci_dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC; + *dev_flags |= RTE_ETH_DEV_INTR_LSC; else - pci_dev->driver->drv_flags &= ~RTE_PCI_DRV_INTR_LSC; + *dev_flags &= ~RTE_ETH_DEV_INTR_LSC; return 0; } @@ -367,13 +455,16 @@ modern_get_queue_num(struct virtio_hw *hw, uint16_t queue_id) return io_read16(&hw->common_cfg->queue_size); } -static void +static int modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) { uint64_t desc_addr, avail_addr, used_addr; uint16_t notify_off; - desc_addr = vq->mz->phys_addr; + if (!check_vq_phys_addr_ok(vq)) + return -1; + + desc_addr = vq->vq_ring_mem; avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, ring[vq->vq_nentries]), @@ -400,6 +491,8 @@ modern_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) PMD_INIT_LOG(DEBUG, "\t used_addr: %" PRIx64, used_addr); PMD_INIT_LOG(DEBUG, "\t notify addr: %p (notify offset: %u)", vq->notify_addr, notify_off); + + return 0; } static void @@ -626,11 +719,13 @@ next: * Return -1: * if there is error mapping with VFIO/UIO. * if port map error when driver type is KDRV_NONE. + * if whitelisted but driver type is KDRV_UNKNOWN. * Return 1 if kernel driver is managing the device. * Return 0 on success. */ int -vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) +vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw, + uint32_t *dev_flags) { hw->dev = dev; @@ -643,14 +738,15 @@ vtpci_init(struct rte_pci_device *dev, struct virtio_hw *hw) PMD_INIT_LOG(INFO, "modern virtio pci detected."); hw->vtpci_ops = &modern_ops; hw->modern = 1; - dev->driver->drv_flags |= RTE_PCI_DRV_INTR_LSC; + *dev_flags |= RTE_ETH_DEV_INTR_LSC; return 0; } PMD_INIT_LOG(INFO, "trying with legacy virtio pci."); - if (legacy_virtio_resource_init(dev, hw) < 0) { + if (legacy_virtio_resource_init(dev, hw, dev_flags) < 0) { if (dev->kdrv == RTE_KDRV_UNKNOWN && - dev->devargs->type != RTE_DEVTYPE_WHITELISTED_PCI) { + (!dev->devargs || + dev->devargs->type != RTE_DEVTYPE_WHITELISTED_PCI)) { PMD_INIT_LOG(INFO, "skip kernel managed virtio device."); return 1; diff --git a/drivers/net/virtio/virtio_pci.h b/drivers/net/virtio/virtio_pci.h index b69785ea..dd7693fe 100644 --- a/drivers/net/virtio/virtio_pci.h +++ b/drivers/net/virtio/virtio_pci.h @@ -40,6 +40,7 @@ #include <rte_ethdev.h> struct virtqueue; +struct virtnet_ctl; /* VirtIO PCI vendor/device ID. */ #define VIRTIO_PCI_VENDORID 0x1AF4 @@ -234,7 +235,7 @@ struct virtio_pci_ops { uint16_t (*set_config_irq)(struct virtio_hw *hw, uint16_t vec); uint16_t (*get_queue_num)(struct virtio_hw *hw, uint16_t queue_id); - void (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq); + int (*setup_queue)(struct virtio_hw *hw, struct virtqueue *vq); void (*del_queue)(struct virtio_hw *hw, struct virtqueue *vq); void (*notify_queue)(struct virtio_hw *hw, struct virtqueue *vq); }; @@ -242,7 +243,7 @@ struct virtio_pci_ops { struct virtio_net_config; struct virtio_hw { - struct virtqueue *cvq; + struct virtnet_ctl *cvq; struct rte_pci_ioport io; uint64_t guest_features; uint32_t max_tx_queues; @@ -260,6 +261,7 @@ struct virtio_hw { struct virtio_pci_common_cfg *common_cfg; struct virtio_net_config *dev_cfg; const struct virtio_pci_ops *vtpci_ops; + void *virtio_user_dev; }; /* @@ -293,7 +295,8 @@ vtpci_with_feature(struct virtio_hw *hw, uint64_t bit) /* * Function declaration from virtio_pci.c */ -int vtpci_init(struct rte_pci_device *, struct virtio_hw *); +int vtpci_init(struct rte_pci_device *, struct virtio_hw *, + uint32_t *dev_flags); void vtpci_reset(struct virtio_hw *); void vtpci_reinit_complete(struct virtio_hw *); diff --git a/drivers/net/virtio/virtio_ring.h b/drivers/net/virtio/virtio_ring.h index 447760a8..fcecc161 100644 --- a/drivers/net/virtio/virtio_ring.h +++ b/drivers/net/virtio/virtio_ring.h @@ -79,7 +79,7 @@ struct vring_used_elem { struct vring_used { uint16_t flags; - uint16_t idx; + volatile uint16_t idx; struct vring_used_elem ring[0]; }; diff --git a/drivers/net/virtio/virtio_rxtx.c b/drivers/net/virtio/virtio_rxtx.c index ef21d8e3..a27208e3 100644 --- a/drivers/net/virtio/virtio_rxtx.c +++ b/drivers/net/virtio/virtio_rxtx.c @@ -193,8 +193,7 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) start_dp = vq->vq_ring.desc; start_dp[idx].addr = - (uint64_t)(cookie->buf_physaddr + RTE_PKTMBUF_HEADROOM - - hw->vtnet_hdr_size); + MBUF_DATA_DMA_ADDR(cookie, vq->offset) - hw->vtnet_hdr_size; start_dp[idx].len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + hw->vtnet_hdr_size; start_dp[idx].flags = VRING_DESC_F_WRITE; @@ -209,23 +208,24 @@ virtqueue_enqueue_recv_refill(struct virtqueue *vq, struct rte_mbuf *cookie) } static inline void -virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie, +virtqueue_enqueue_xmit(struct virtnet_tx *txvq, struct rte_mbuf *cookie, uint16_t needed, int use_indirect, int can_push) { struct vq_desc_extra *dxp; + struct virtqueue *vq = txvq->vq; struct vring_desc *start_dp; uint16_t seg_num = cookie->nb_segs; uint16_t head_idx, idx; - uint16_t head_size = txvq->hw->vtnet_hdr_size; + uint16_t head_size = vq->hw->vtnet_hdr_size; unsigned long offs; - head_idx = txvq->vq_desc_head_idx; + head_idx = vq->vq_desc_head_idx; idx = head_idx; - dxp = &txvq->vq_descx[idx]; + dxp = &vq->vq_descx[idx]; dxp->cookie = (void *)cookie; dxp->ndescs = needed; - start_dp = txvq->vq_ring.desc; + start_dp = vq->vq_ring.desc; if (can_push) { /* put on zero'd transmit header (no offloads) */ @@ -259,46 +259,32 @@ virtqueue_enqueue_xmit(struct virtqueue *txvq, struct rte_mbuf *cookie, + offsetof(struct virtio_tx_region, tx_hdr); start_dp[idx].addr = txvq->virtio_net_hdr_mem + offs; - start_dp[idx].len = txvq->hw->vtnet_hdr_size; + start_dp[idx].len = vq->hw->vtnet_hdr_size; start_dp[idx].flags = VRING_DESC_F_NEXT; idx = start_dp[idx].next; } do { - start_dp[idx].addr = rte_mbuf_data_dma_addr(cookie); + start_dp[idx].addr = MBUF_DATA_DMA_ADDR(cookie, vq->offset); start_dp[idx].len = cookie->data_len; start_dp[idx].flags = cookie->next ? VRING_DESC_F_NEXT : 0; idx = start_dp[idx].next; } while ((cookie = cookie->next) != NULL); - start_dp[idx].flags &= ~VRING_DESC_F_NEXT; - if (use_indirect) - idx = txvq->vq_ring.desc[head_idx].next; - - txvq->vq_desc_head_idx = idx; - if (txvq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) - txvq->vq_desc_tail_idx = idx; - txvq->vq_free_cnt = (uint16_t)(txvq->vq_free_cnt - needed); - vq_update_avail_ring(txvq, head_idx); -} + idx = vq->vq_ring.desc[head_idx].next; -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - - return m; + vq->vq_desc_head_idx = idx; + if (vq->vq_desc_head_idx == VQ_RING_DESC_CHAIN_END) + vq->vq_desc_tail_idx = idx; + vq->vq_free_cnt = (uint16_t)(vq->vq_free_cnt - needed); + vq_update_avail_ring(vq, head_idx); } static void -virtio_dev_vring_start(struct virtqueue *vq, int queue_type) +virtio_dev_vring_start(struct virtqueue *vq) { - struct rte_mbuf *m; - int i, nbufs, error, size = vq->vq_nentries; + int size = vq->vq_nentries; struct vring *vr = &vq->vq_ring; uint8_t *ring_mem = vq->vq_ring_virt_mem; @@ -322,30 +308,70 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type) * Disable device(host) interrupting guest */ virtqueue_disable_intr(vq); +} + +void +virtio_dev_cq_start(struct rte_eth_dev *dev) +{ + struct virtio_hw *hw = dev->data->dev_private; + + if (hw->cvq && hw->cvq->vq) { + virtio_dev_vring_start(hw->cvq->vq); + VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq->vq); + } +} - /* Only rx virtqueue needs mbufs to be allocated at initialization */ - if (queue_type == VTNET_RQ) { - if (vq->mpool == NULL) +void +virtio_dev_rxtx_start(struct rte_eth_dev *dev) +{ + /* + * Start receive and transmit vrings + * - Setup vring structure for all queues + * - Initialize descriptor for the rx vring + * - Allocate blank mbufs for the each rx descriptor + * + */ + uint16_t i; + uint16_t desc_idx; + + PMD_INIT_FUNC_TRACE(); + + /* Start rx vring. */ + for (i = 0; i < dev->data->nb_rx_queues; i++) { + struct virtnet_rx *rxvq = dev->data->rx_queues[i]; + struct virtqueue *vq = rxvq->vq; + int error, nbufs; + struct rte_mbuf *m; + + virtio_dev_vring_start(vq); + if (rxvq->mpool == NULL) { rte_exit(EXIT_FAILURE, - "Cannot allocate initial mbufs for rx virtqueue"); + "Cannot allocate mbufs for rx virtqueue"); + } /* Allocate blank mbufs for the each rx descriptor */ nbufs = 0; error = ENOSPC; #ifdef RTE_MACHINE_CPUFLAG_SSSE3 - if (use_simple_rxtx) - for (i = 0; i < vq->vq_nentries; i++) { - vq->vq_ring.avail->ring[i] = i; - vq->vq_ring.desc[i].flags = VRING_DESC_F_WRITE; + if (use_simple_rxtx) { + for (desc_idx = 0; desc_idx < vq->vq_nentries; + desc_idx++) { + vq->vq_ring.avail->ring[desc_idx] = desc_idx; + vq->vq_ring.desc[desc_idx].flags = + VRING_DESC_F_WRITE; } + } #endif - memset(&vq->fake_mbuf, 0, sizeof(vq->fake_mbuf)); - for (i = 0; i < RTE_PMD_VIRTIO_RX_MAX_BURST; i++) - vq->sw_ring[vq->vq_nentries + i] = &vq->fake_mbuf; + memset(&rxvq->fake_mbuf, 0, sizeof(rxvq->fake_mbuf)); + for (desc_idx = 0; desc_idx < RTE_PMD_VIRTIO_RX_MAX_BURST; + desc_idx++) { + vq->sw_ring[vq->vq_nentries + desc_idx] = + &rxvq->fake_mbuf; + } while (!virtqueue_full(vq)) { - m = rte_rxmbuf_alloc(vq->mpool); + m = rte_mbuf_raw_alloc(rxvq->mpool); if (m == NULL) break; @@ -368,64 +394,40 @@ virtio_dev_vring_start(struct virtqueue *vq, int queue_type) vq_update_avail_idx(vq); PMD_INIT_LOG(DEBUG, "Allocated %d bufs", nbufs); - } else if (queue_type == VTNET_TQ) { + + VIRTQUEUE_DUMP(vq); + } + + /* Start tx vring. */ + for (i = 0; i < dev->data->nb_tx_queues; i++) { + struct virtnet_tx *txvq = dev->data->tx_queues[i]; + struct virtqueue *vq = txvq->vq; + + virtio_dev_vring_start(vq); #ifdef RTE_MACHINE_CPUFLAG_SSSE3 if (use_simple_rxtx) { - int mid_idx = vq->vq_nentries >> 1; - for (i = 0; i < mid_idx; i++) { - vq->vq_ring.avail->ring[i] = i + mid_idx; - vq->vq_ring.desc[i + mid_idx].next = i; - vq->vq_ring.desc[i + mid_idx].addr = - vq->virtio_net_hdr_mem + + uint16_t mid_idx = vq->vq_nentries >> 1; + + for (desc_idx = 0; desc_idx < mid_idx; desc_idx++) { + vq->vq_ring.avail->ring[desc_idx] = + desc_idx + mid_idx; + vq->vq_ring.desc[desc_idx + mid_idx].next = + desc_idx; + vq->vq_ring.desc[desc_idx + mid_idx].addr = + txvq->virtio_net_hdr_mem + offsetof(struct virtio_tx_region, tx_hdr); - vq->vq_ring.desc[i + mid_idx].len = + vq->vq_ring.desc[desc_idx + mid_idx].len = vq->hw->vtnet_hdr_size; - vq->vq_ring.desc[i + mid_idx].flags = + vq->vq_ring.desc[desc_idx + mid_idx].flags = VRING_DESC_F_NEXT; - vq->vq_ring.desc[i].flags = 0; + vq->vq_ring.desc[desc_idx].flags = 0; } - for (i = mid_idx; i < vq->vq_nentries; i++) - vq->vq_ring.avail->ring[i] = i; + for (desc_idx = mid_idx; desc_idx < vq->vq_nentries; + desc_idx++) + vq->vq_ring.avail->ring[desc_idx] = desc_idx; } #endif - } -} - -void -virtio_dev_cq_start(struct rte_eth_dev *dev) -{ - struct virtio_hw *hw = dev->data->dev_private; - - if (hw->cvq) { - virtio_dev_vring_start(hw->cvq, VTNET_CQ); - VIRTQUEUE_DUMP((struct virtqueue *)hw->cvq); - } -} - -void -virtio_dev_rxtx_start(struct rte_eth_dev *dev) -{ - /* - * Start receive and transmit vrings - * - Setup vring structure for all queues - * - Initialize descriptor for the rx vring - * - Allocate blank mbufs for the each rx descriptor - * - */ - int i; - - PMD_INIT_FUNC_TRACE(); - - /* Start rx vring. */ - for (i = 0; i < dev->data->nb_rx_queues; i++) { - virtio_dev_vring_start(dev->data->rx_queues[i], VTNET_RQ); - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->rx_queues[i]); - } - - /* Start tx vring. */ - for (i = 0; i < dev->data->nb_tx_queues; i++) { - virtio_dev_vring_start(dev->data->tx_queues[i], VTNET_TQ); - VIRTQUEUE_DUMP((struct virtqueue *)dev->data->tx_queues[i]); + VIRTQUEUE_DUMP(vq); } } @@ -438,24 +440,24 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, struct rte_mempool *mp) { uint16_t vtpci_queue_idx = 2 * queue_idx + VTNET_SQ_RQ_QUEUE_IDX; - struct virtqueue *vq; + struct virtnet_rx *rxvq; int ret; PMD_INIT_FUNC_TRACE(); ret = virtio_dev_queue_setup(dev, VTNET_RQ, queue_idx, vtpci_queue_idx, - nb_desc, socket_id, &vq); + nb_desc, socket_id, (void **)&rxvq); if (ret < 0) { PMD_INIT_LOG(ERR, "rvq initialization failed"); return ret; } /* Create mempool for rx mbuf allocation */ - vq->mpool = mp; + rxvq->mpool = mp; - dev->data->rx_queues[queue_idx] = vq; + dev->data->rx_queues[queue_idx] = rxvq; #ifdef RTE_MACHINE_CPUFLAG_SSSE3 - virtio_rxq_vec_setup(vq); + virtio_rxq_vec_setup(rxvq); #endif return 0; @@ -464,7 +466,16 @@ virtio_dev_rx_queue_setup(struct rte_eth_dev *dev, void virtio_dev_rx_queue_release(void *rxq) { - virtio_dev_queue_release(rxq); + struct virtnet_rx *rxvq = rxq; + struct virtqueue *vq = rxvq->vq; + /* rxvq is freed when vq is freed, and as mz should be freed after the + * del_queue, so we reserve the mz pointer first. + */ + const struct rte_memzone *mz = rxvq->mz; + + /* no need to free rxq as vq and rxq are allocated together */ + virtio_dev_queue_release(vq); + rte_memzone_free(mz); } /* @@ -486,6 +497,7 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, #ifdef RTE_MACHINE_CPUFLAG_SSSE3 struct virtio_hw *hw = dev->data->dev_private; #endif + struct virtnet_tx *txvq; struct virtqueue *vq; uint16_t tx_free_thresh; int ret; @@ -510,11 +522,12 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, #endif ret = virtio_dev_queue_setup(dev, VTNET_TQ, queue_idx, vtpci_queue_idx, - nb_desc, socket_id, &vq); + nb_desc, socket_id, (void **)&txvq); if (ret < 0) { - PMD_INIT_LOG(ERR, "rvq initialization failed"); + PMD_INIT_LOG(ERR, "tvq initialization failed"); return ret; } + vq = txvq->vq; tx_free_thresh = tx_conf->tx_free_thresh; if (tx_free_thresh == 0) @@ -532,14 +545,24 @@ virtio_dev_tx_queue_setup(struct rte_eth_dev *dev, vq->vq_free_thresh = tx_free_thresh; - dev->data->tx_queues[queue_idx] = vq; + dev->data->tx_queues[queue_idx] = txvq; return 0; } void virtio_dev_tx_queue_release(void *txq) { - virtio_dev_queue_release(txq); + struct virtnet_tx *txvq = txq; + struct virtqueue *vq = txvq->vq; + /* txvq is freed when vq is freed, and as mz should be freed after the + * del_queue, so we reserve the mz pointer first. + */ + const struct rte_memzone *hdr_mz = txvq->virtio_net_hdr_mz; + const struct rte_memzone *mz = txvq->mz; + + virtio_dev_queue_release(vq); + rte_memzone_free(mz); + rte_memzone_free(hdr_mz); } static void @@ -558,34 +581,34 @@ virtio_discard_rxbuf(struct virtqueue *vq, struct rte_mbuf *m) } static void -virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf) +virtio_update_packet_stats(struct virtnet_stats *stats, struct rte_mbuf *mbuf) { uint32_t s = mbuf->pkt_len; struct ether_addr *ea; if (s == 64) { - vq->size_bins[1]++; + stats->size_bins[1]++; } else if (s > 64 && s < 1024) { uint32_t bin; /* count zeros, and offset into correct bin */ bin = (sizeof(s) * 8) - __builtin_clz(s) - 5; - vq->size_bins[bin]++; + stats->size_bins[bin]++; } else { if (s < 64) - vq->size_bins[0]++; + stats->size_bins[0]++; else if (s < 1519) - vq->size_bins[6]++; + stats->size_bins[6]++; else if (s >= 1519) - vq->size_bins[7]++; + stats->size_bins[7]++; } ea = rte_pktmbuf_mtod(mbuf, struct ether_addr *); if (is_multicast_ether_addr(ea)) { if (is_broadcast_ether_addr(ea)) - vq->broadcast++; + stats->broadcast++; else - vq->multicast++; + stats->multicast++; } } @@ -594,7 +617,8 @@ virtio_update_packet_stats(struct virtqueue *vq, struct rte_mbuf *mbuf) uint16_t virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { - struct virtqueue *rxvq = rx_queue; + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num, nb_rx; @@ -604,19 +628,19 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) uint32_t i, nb_enqueued; uint32_t hdr_size; - nb_used = VIRTQUEUE_NUSED(rxvq); + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); num = (uint16_t)(likely(nb_used <= nb_pkts) ? nb_used : nb_pkts); num = (uint16_t)(likely(num <= VIRTIO_MBUF_BURST_SZ) ? num : VIRTIO_MBUF_BURST_SZ); if (likely(num > DESC_PER_CACHELINE)) - num = num - ((rxvq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); + num = num - ((vq->vq_used_cons_idx + num) % DESC_PER_CACHELINE); - num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, num); + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, num); PMD_RX_LOG(DEBUG, "used:%d dequeue:%d", nb_used, num); - hw = rxvq->hw; + hw = vq->hw; nb_rx = 0; nb_enqueued = 0; hdr_size = hw->vtnet_hdr_size; @@ -629,8 +653,8 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) if (unlikely(len[i] < hdr_size + ETHER_HDR_LEN)) { PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; - virtio_discard_rxbuf(rxvq, rxm); - rxvq->errors++; + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; continue; } @@ -651,23 +675,23 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rx_pkts[nb_rx++] = rxm; - rxvq->bytes += rx_pkts[nb_rx - 1]->pkt_len; - virtio_update_packet_stats(rxvq, rxm); + rxvq->stats.bytes += rx_pkts[nb_rx - 1]->pkt_len; + virtio_update_packet_stats(&rxvq->stats, rxm); } - rxvq->packets += nb_rx; + rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ error = ENOSPC; - while (likely(!virtqueue_full(rxvq))) { - new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); + while (likely(!virtqueue_full(vq))) { + new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); if (unlikely(new_mbuf == NULL)) { struct rte_eth_dev *dev = &rte_eth_devices[rxvq->port_id]; dev->data->rx_mbuf_alloc_failed++; break; } - error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf); + error = virtqueue_enqueue_recv_refill(vq, new_mbuf); if (unlikely(error)) { rte_pktmbuf_free(new_mbuf); break; @@ -676,11 +700,11 @@ virtio_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) } if (likely(nb_enqueued)) { - vq_update_avail_idx(rxvq); + vq_update_avail_idx(vq); - if (unlikely(virtqueue_kick_prepare(rxvq))) { - virtqueue_notify(rxvq); - PMD_RX_LOG(DEBUG, "Notified\n"); + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); + PMD_RX_LOG(DEBUG, "Notified"); } } @@ -692,7 +716,8 @@ virtio_recv_mergeable_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { - struct virtqueue *rxvq = rx_queue; + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; struct virtio_hw *hw; struct rte_mbuf *rxm, *new_mbuf; uint16_t nb_used, num, nb_rx; @@ -706,13 +731,13 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint32_t seg_res; uint32_t hdr_size; - nb_used = VIRTQUEUE_NUSED(rxvq); + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); - PMD_RX_LOG(DEBUG, "used:%d\n", nb_used); + PMD_RX_LOG(DEBUG, "used:%d", nb_used); - hw = rxvq->hw; + hw = vq->hw; nb_rx = 0; i = 0; nb_enqueued = 0; @@ -727,22 +752,22 @@ virtio_recv_mergeable_pkts(void *rx_queue, if (nb_rx == nb_pkts) break; - num = virtqueue_dequeue_burst_rx(rxvq, rcv_pkts, len, 1); + num = virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, 1); if (num != 1) continue; i++; - PMD_RX_LOG(DEBUG, "dequeue:%d\n", num); - PMD_RX_LOG(DEBUG, "packet len:%d\n", len[0]); + PMD_RX_LOG(DEBUG, "dequeue:%d", num); + PMD_RX_LOG(DEBUG, "packet len:%d", len[0]); rxm = rcv_pkts[0]; if (unlikely(len[0] < hdr_size + ETHER_HDR_LEN)) { - PMD_RX_LOG(ERR, "Packet drop\n"); + PMD_RX_LOG(ERR, "Packet drop"); nb_enqueued++; - virtio_discard_rxbuf(rxvq, rxm); - rxvq->errors++; + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; continue; } @@ -773,18 +798,18 @@ virtio_recv_mergeable_pkts(void *rx_queue, */ uint16_t rcv_cnt = RTE_MIN(seg_res, RTE_DIM(rcv_pkts)); - if (likely(VIRTQUEUE_NUSED(rxvq) >= rcv_cnt)) { + if (likely(VIRTQUEUE_NUSED(vq) >= rcv_cnt)) { uint32_t rx_num = - virtqueue_dequeue_burst_rx(rxvq, + virtqueue_dequeue_burst_rx(vq, rcv_pkts, len, rcv_cnt); i += rx_num; rcv_cnt = rx_num; } else { PMD_RX_LOG(ERR, - "No enough segments for packet.\n"); + "No enough segments for packet."); nb_enqueued++; - virtio_discard_rxbuf(rxvq, rxm); - rxvq->errors++; + virtio_discard_rxbuf(vq, rxm); + rxvq->stats.errors++; break; } @@ -814,24 +839,24 @@ virtio_recv_mergeable_pkts(void *rx_queue, VIRTIO_DUMP_PACKET(rx_pkts[nb_rx], rx_pkts[nb_rx]->data_len); - rxvq->bytes += rx_pkts[nb_rx]->pkt_len; - virtio_update_packet_stats(rxvq, rx_pkts[nb_rx]); + rxvq->stats.bytes += rx_pkts[nb_rx]->pkt_len; + virtio_update_packet_stats(&rxvq->stats, rx_pkts[nb_rx]); nb_rx++; } - rxvq->packets += nb_rx; + rxvq->stats.packets += nb_rx; /* Allocate new mbuf for the used descriptor */ error = ENOSPC; - while (likely(!virtqueue_full(rxvq))) { - new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); + while (likely(!virtqueue_full(vq))) { + new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); if (unlikely(new_mbuf == NULL)) { struct rte_eth_dev *dev = &rte_eth_devices[rxvq->port_id]; dev->data->rx_mbuf_alloc_failed++; break; } - error = virtqueue_enqueue_recv_refill(rxvq, new_mbuf); + error = virtqueue_enqueue_recv_refill(vq, new_mbuf); if (unlikely(error)) { rte_pktmbuf_free(new_mbuf); break; @@ -840,10 +865,10 @@ virtio_recv_mergeable_pkts(void *rx_queue, } if (likely(nb_enqueued)) { - vq_update_avail_idx(rxvq); + vq_update_avail_idx(vq); - if (unlikely(virtqueue_kick_prepare(rxvq))) { - virtqueue_notify(rxvq); + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); PMD_RX_LOG(DEBUG, "Notified"); } } @@ -854,8 +879,9 @@ virtio_recv_mergeable_pkts(void *rx_queue, uint16_t virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct virtqueue *txvq = tx_queue; - struct virtio_hw *hw = txvq->hw; + struct virtnet_tx *txvq = tx_queue; + struct virtqueue *vq = txvq->vq; + struct virtio_hw *hw = vq->hw; uint16_t hdr_size = hw->vtnet_hdr_size; uint16_t nb_used, nb_tx; int error; @@ -864,11 +890,11 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) return nb_pkts; PMD_TX_LOG(DEBUG, "%d packets to xmit", nb_pkts); - nb_used = VIRTQUEUE_NUSED(txvq); + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); - if (likely(nb_used > txvq->vq_nentries - txvq->vq_free_thresh)) - virtio_xmit_cleanup(txvq, nb_used); + if (likely(nb_used > vq->vq_nentries - vq->vq_free_thresh)) + virtio_xmit_cleanup(vq, nb_used); for (nb_tx = 0; nb_tx < nb_pkts; nb_tx++) { struct rte_mbuf *txm = tx_pkts[nb_tx]; @@ -886,6 +912,7 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* optimize ring usage */ if (vtpci_with_feature(hw, VIRTIO_F_ANY_LAYOUT) && rte_mbuf_refcnt_read(txm) == 1 && + RTE_MBUF_DIRECT(txm) && txm->nb_segs == 1 && rte_pktmbuf_headroom(txm) >= hdr_size && rte_is_aligned(rte_pktmbuf_mtod(txm, char *), @@ -901,16 +928,16 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) * default => number of segments + 1 */ slots = use_indirect ? 1 : (txm->nb_segs + !can_push); - need = slots - txvq->vq_free_cnt; + need = slots - vq->vq_free_cnt; /* Positive value indicates it need free vring descriptors */ if (unlikely(need > 0)) { - nb_used = VIRTQUEUE_NUSED(txvq); + nb_used = VIRTQUEUE_NUSED(vq); virtio_rmb(); need = RTE_MIN(need, (int)nb_used); - virtio_xmit_cleanup(txvq, need); - need = slots - txvq->vq_free_cnt; + virtio_xmit_cleanup(vq, need); + need = slots - vq->vq_free_cnt; if (unlikely(need > 0)) { PMD_TX_LOG(ERR, "No free tx descriptors to transmit"); @@ -921,17 +948,17 @@ virtio_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) /* Enqueue Packet buffers */ virtqueue_enqueue_xmit(txvq, txm, slots, use_indirect, can_push); - txvq->bytes += txm->pkt_len; - virtio_update_packet_stats(txvq, txm); + txvq->stats.bytes += txm->pkt_len; + virtio_update_packet_stats(&txvq->stats, txm); } - txvq->packets += nb_tx; + txvq->stats.packets += nb_tx; if (likely(nb_tx)) { - vq_update_avail_idx(txvq); + vq_update_avail_idx(vq); - if (unlikely(virtqueue_kick_prepare(txvq))) { - virtqueue_notify(txvq); + if (unlikely(virtqueue_kick_prepare(vq))) { + virtqueue_notify(vq); PMD_TX_LOG(DEBUG, "Notified backend after xmit"); } } diff --git a/drivers/net/virtio/virtio_rxtx.h b/drivers/net/virtio/virtio_rxtx.h index a76c3e52..058b56a1 100644 --- a/drivers/net/virtio/virtio_rxtx.h +++ b/drivers/net/virtio/virtio_rxtx.h @@ -31,11 +31,65 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ +#ifndef _VIRTIO_RXTX_H_ +#define _VIRTIO_RXTX_H_ + #define RTE_PMD_VIRTIO_RX_MAX_BURST 64 +struct virtnet_stats { + uint64_t packets; + uint64_t bytes; + uint64_t errors; + uint64_t multicast; + uint64_t broadcast; + /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */ + uint64_t size_bins[8]; +}; + +struct virtnet_rx { + struct virtqueue *vq; + /* dummy mbuf, for wraparound when processing RX ring. */ + struct rte_mbuf fake_mbuf; + uint64_t mbuf_initializer; /**< value to init mbufs. */ + struct rte_mempool *mpool; /**< mempool for mbuf allocation */ + + uint16_t queue_id; /**< DPDK queue index. */ + uint8_t port_id; /**< Device port identifier. */ + + /* Statistics */ + struct virtnet_stats stats; + + const struct rte_memzone *mz; /**< mem zone to populate RX ring. */ +}; + +struct virtnet_tx { + struct virtqueue *vq; + /**< memzone to populate hdr. */ + const struct rte_memzone *virtio_net_hdr_mz; + phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */ + + uint16_t queue_id; /**< DPDK queue index. */ + uint8_t port_id; /**< Device port identifier. */ + + /* Statistics */ + struct virtnet_stats stats; + + const struct rte_memzone *mz; /**< mem zone to populate TX ring. */ +}; + +struct virtnet_ctl { + struct virtqueue *vq; + /**< memzone to populate hdr. */ + const struct rte_memzone *virtio_net_hdr_mz; + phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */ + uint8_t port_id; /**< Device port identifier. */ + const struct rte_memzone *mz; /**< mem zone to populate RX ring. */ +}; + #ifdef RTE_MACHINE_CPUFLAG_SSSE3 -int virtio_rxq_vec_setup(struct virtqueue *rxq); +int virtio_rxq_vec_setup(struct virtnet_rx *rxvq); int virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq, struct rte_mbuf *m); #endif +#endif /* _VIRTIO_RXTX_H_ */ diff --git a/drivers/net/virtio/virtio_rxtx_simple.c b/drivers/net/virtio/virtio_rxtx_simple.c index 8f5293dd..242ad90d 100644 --- a/drivers/net/virtio/virtio_rxtx_simple.c +++ b/drivers/net/virtio/virtio_rxtx_simple.c @@ -80,8 +80,8 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq, vq->sw_ring[desc_idx] = cookie; start_dp = vq->vq_ring.desc; - start_dp[desc_idx].addr = (uint64_t)((uintptr_t)cookie->buf_physaddr + - RTE_PKTMBUF_HEADROOM - vq->hw->vtnet_hdr_size); + start_dp[desc_idx].addr = MBUF_DATA_DMA_ADDR(cookie, vq->offset) - + vq->hw->vtnet_hdr_size; start_dp[desc_idx].len = cookie->buf_len - RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; @@ -92,17 +92,18 @@ virtqueue_enqueue_recv_refill_simple(struct virtqueue *vq, } static inline void -virtio_rxq_rearm_vec(struct virtqueue *rxvq) +virtio_rxq_rearm_vec(struct virtnet_rx *rxvq) { int i; uint16_t desc_idx; struct rte_mbuf **sw_ring; struct vring_desc *start_dp; int ret; + struct virtqueue *vq = rxvq->vq; - desc_idx = rxvq->vq_avail_idx & (rxvq->vq_nentries - 1); - sw_ring = &rxvq->sw_ring[desc_idx]; - start_dp = &rxvq->vq_ring.desc[desc_idx]; + desc_idx = vq->vq_avail_idx & (vq->vq_nentries - 1); + sw_ring = &vq->sw_ring[desc_idx]; + start_dp = &vq->vq_ring.desc[desc_idx]; ret = rte_mempool_get_bulk(rxvq->mpool, (void **)sw_ring, RTE_VIRTIO_VPMD_RX_REARM_THRESH); @@ -119,15 +120,15 @@ virtio_rxq_rearm_vec(struct virtqueue *rxvq) *(uint64_t *)p = rxvq->mbuf_initializer; start_dp[i].addr = - (uint64_t)((uintptr_t)sw_ring[i]->buf_physaddr + - RTE_PKTMBUF_HEADROOM - rxvq->hw->vtnet_hdr_size); + MBUF_DATA_DMA_ADDR(sw_ring[i], vq->offset) - + vq->hw->vtnet_hdr_size; start_dp[i].len = sw_ring[i]->buf_len - - RTE_PKTMBUF_HEADROOM + rxvq->hw->vtnet_hdr_size; + RTE_PKTMBUF_HEADROOM + vq->hw->vtnet_hdr_size; } - rxvq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH; - rxvq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH; - vq_update_avail_idx(rxvq); + vq->vq_avail_idx += RTE_VIRTIO_VPMD_RX_REARM_THRESH; + vq->vq_free_cnt -= RTE_VIRTIO_VPMD_RX_REARM_THRESH; + vq_update_avail_idx(vq); } /* virtio vPMD receive routine, only accept(nb_pkts >= RTE_VIRTIO_DESC_PER_LOOP) @@ -143,7 +144,8 @@ uint16_t virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { - struct virtqueue *rxvq = rx_queue; + struct virtnet_rx *rxvq = rx_queue; + struct virtqueue *vq = rxvq->vq; uint16_t nb_used; uint16_t desc_idx; struct vring_used_elem *rused; @@ -175,15 +177,14 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, len_adjust = _mm_set_epi16( 0, 0, 0, - (uint16_t)-rxvq->hw->vtnet_hdr_size, - 0, (uint16_t)-rxvq->hw->vtnet_hdr_size, + (uint16_t)-vq->hw->vtnet_hdr_size, + 0, (uint16_t)-vq->hw->vtnet_hdr_size, 0, 0); if (unlikely(nb_pkts < RTE_VIRTIO_DESC_PER_LOOP)) return 0; - nb_used = *(volatile uint16_t *)&rxvq->vq_ring.used->idx - - rxvq->vq_used_cons_idx; + nb_used = VIRTQUEUE_NUSED(vq); rte_compiler_barrier(); @@ -193,17 +194,17 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, nb_pkts = RTE_ALIGN_FLOOR(nb_pkts, RTE_VIRTIO_DESC_PER_LOOP); nb_used = RTE_MIN(nb_used, nb_pkts); - desc_idx = (uint16_t)(rxvq->vq_used_cons_idx & (rxvq->vq_nentries - 1)); - rused = &rxvq->vq_ring.used->ring[desc_idx]; - sw_ring = &rxvq->sw_ring[desc_idx]; - sw_ring_end = &rxvq->sw_ring[rxvq->vq_nentries]; + desc_idx = (uint16_t)(vq->vq_used_cons_idx & (vq->vq_nentries - 1)); + rused = &vq->vq_ring.used->ring[desc_idx]; + sw_ring = &vq->sw_ring[desc_idx]; + sw_ring_end = &vq->sw_ring[vq->vq_nentries]; _mm_prefetch((const void *)rused, _MM_HINT_T0); - if (rxvq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { + if (vq->vq_free_cnt >= RTE_VIRTIO_VPMD_RX_REARM_THRESH) { virtio_rxq_rearm_vec(rxvq); - if (unlikely(virtqueue_kick_prepare(rxvq))) - virtqueue_notify(rxvq); + if (unlikely(virtqueue_kick_prepare(vq))) + virtqueue_notify(vq); } for (nb_pkts_received = 0; @@ -286,9 +287,9 @@ virtio_recv_pkts_vec(void *rx_queue, struct rte_mbuf **rx_pkts, } } - rxvq->vq_used_cons_idx += nb_pkts_received; - rxvq->vq_free_cnt += nb_pkts_received; - rxvq->packets += nb_pkts_received; + vq->vq_used_cons_idx += nb_pkts_received; + vq->vq_free_cnt += nb_pkts_received; + rxvq->stats.packets += nb_pkts_received; return nb_pkts_received; } @@ -342,31 +343,32 @@ uint16_t virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, uint16_t nb_pkts) { - struct virtqueue *txvq = tx_queue; + struct virtnet_tx *txvq = tx_queue; + struct virtqueue *vq = txvq->vq; uint16_t nb_used; uint16_t desc_idx; struct vring_desc *start_dp; uint16_t nb_tail, nb_commit; int i; - uint16_t desc_idx_max = (txvq->vq_nentries >> 1) - 1; + uint16_t desc_idx_max = (vq->vq_nentries >> 1) - 1; - nb_used = VIRTQUEUE_NUSED(txvq); + nb_used = VIRTQUEUE_NUSED(vq); rte_compiler_barrier(); if (nb_used >= VIRTIO_TX_FREE_THRESH) - virtio_xmit_cleanup(tx_queue); + virtio_xmit_cleanup(vq); - nb_commit = nb_pkts = RTE_MIN((txvq->vq_free_cnt >> 1), nb_pkts); - desc_idx = (uint16_t) (txvq->vq_avail_idx & desc_idx_max); - start_dp = txvq->vq_ring.desc; + nb_commit = nb_pkts = RTE_MIN((vq->vq_free_cnt >> 1), nb_pkts); + desc_idx = (uint16_t)(vq->vq_avail_idx & desc_idx_max); + start_dp = vq->vq_ring.desc; nb_tail = (uint16_t) (desc_idx_max + 1 - desc_idx); if (nb_commit >= nb_tail) { for (i = 0; i < nb_tail; i++) - txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; + vq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; for (i = 0; i < nb_tail; i++) { start_dp[desc_idx].addr = - rte_mbuf_data_dma_addr(*tx_pkts); + MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset); start_dp[desc_idx].len = (*tx_pkts)->pkt_len; tx_pkts++; desc_idx++; @@ -375,9 +377,10 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, desc_idx = 0; } for (i = 0; i < nb_commit; i++) - txvq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; + vq->vq_descx[desc_idx + i].cookie = tx_pkts[i]; for (i = 0; i < nb_commit; i++) { - start_dp[desc_idx].addr = rte_mbuf_data_dma_addr(*tx_pkts); + start_dp[desc_idx].addr = + MBUF_DATA_DMA_ADDR(*tx_pkts, vq->offset); start_dp[desc_idx].len = (*tx_pkts)->pkt_len; tx_pkts++; desc_idx++; @@ -385,21 +388,21 @@ virtio_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts, rte_compiler_barrier(); - txvq->vq_free_cnt -= (uint16_t)(nb_pkts << 1); - txvq->vq_avail_idx += nb_pkts; - txvq->vq_ring.avail->idx = txvq->vq_avail_idx; - txvq->packets += nb_pkts; + vq->vq_free_cnt -= (uint16_t)(nb_pkts << 1); + vq->vq_avail_idx += nb_pkts; + vq->vq_ring.avail->idx = vq->vq_avail_idx; + txvq->stats.packets += nb_pkts; if (likely(nb_pkts)) { - if (unlikely(virtqueue_kick_prepare(txvq))) - virtqueue_notify(txvq); + if (unlikely(virtqueue_kick_prepare(vq))) + virtqueue_notify(vq); } return nb_pkts; } int __attribute__((cold)) -virtio_rxq_vec_setup(struct virtqueue *rxq) +virtio_rxq_vec_setup(struct virtnet_rx *rxq) { uintptr_t p; struct rte_mbuf mb_def = { .buf_addr = 0 }; /* zeroed mbuf */ diff --git a/drivers/net/virtio/virtio_user/vhost.h b/drivers/net/virtio/virtio_user/vhost.h new file mode 100644 index 00000000..7adb55f5 --- /dev/null +++ b/drivers/net/virtio/virtio_user/vhost.h @@ -0,0 +1,146 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VHOST_NET_USER_H +#define _VHOST_NET_USER_H + +#include <stdint.h> +#include <linux/types.h> +#include <linux/ioctl.h> + +#include "../virtio_pci.h" +#include "../virtio_logs.h" +#include "../virtqueue.h" + +#define VHOST_MEMORY_MAX_NREGIONS 8 + +struct vhost_vring_state { + unsigned int index; + unsigned int num; +}; + +struct vhost_vring_file { + unsigned int index; + int fd; +}; + +struct vhost_vring_addr { + unsigned int index; + /* Option flags. */ + unsigned int flags; + /* Flag values: */ + /* Whether log address is valid. If set enables logging. */ +#define VHOST_VRING_F_LOG 0 + + /* Start of array of descriptors (virtually contiguous) */ + uint64_t desc_user_addr; + /* Used structure address. Must be 32 bit aligned */ + uint64_t used_user_addr; + /* Available structure address. Must be 16 bit aligned */ + uint64_t avail_user_addr; + /* Logging support. */ + /* Log writes to used structure, at offset calculated from specified + * address. Address must be 32 bit aligned. + */ + uint64_t log_guest_addr; +}; + +enum vhost_user_request { + VHOST_USER_NONE = 0, + VHOST_USER_GET_FEATURES = 1, + VHOST_USER_SET_FEATURES = 2, + VHOST_USER_SET_OWNER = 3, + VHOST_USER_RESET_OWNER = 4, + VHOST_USER_SET_MEM_TABLE = 5, + VHOST_USER_SET_LOG_BASE = 6, + VHOST_USER_SET_LOG_FD = 7, + VHOST_USER_SET_VRING_NUM = 8, + VHOST_USER_SET_VRING_ADDR = 9, + VHOST_USER_SET_VRING_BASE = 10, + VHOST_USER_GET_VRING_BASE = 11, + VHOST_USER_SET_VRING_KICK = 12, + VHOST_USER_SET_VRING_CALL = 13, + VHOST_USER_SET_VRING_ERR = 14, + VHOST_USER_GET_PROTOCOL_FEATURES = 15, + VHOST_USER_SET_PROTOCOL_FEATURES = 16, + VHOST_USER_GET_QUEUE_NUM = 17, + VHOST_USER_SET_VRING_ENABLE = 18, + VHOST_USER_MAX +}; + +struct vhost_memory_region { + uint64_t guest_phys_addr; + uint64_t memory_size; /* bytes */ + uint64_t userspace_addr; + uint64_t mmap_offset; +}; + +struct vhost_memory { + uint32_t nregions; + uint32_t padding; + struct vhost_memory_region regions[VHOST_MEMORY_MAX_NREGIONS]; +}; + +struct vhost_user_msg { + enum vhost_user_request request; + +#define VHOST_USER_VERSION_MASK 0x3 +#define VHOST_USER_REPLY_MASK (0x1 << 2) + uint32_t flags; + uint32_t size; /* the following payload size */ + union { +#define VHOST_USER_VRING_IDX_MASK 0xff +#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8) + uint64_t u64; + struct vhost_vring_state state; + struct vhost_vring_addr addr; + struct vhost_memory memory; + } payload; + int fds[VHOST_MEMORY_MAX_NREGIONS]; +} __attribute((packed)); + +#define VHOST_USER_HDR_SIZE offsetof(struct vhost_user_msg, payload.u64) +#define VHOST_USER_PAYLOAD_SIZE \ + (sizeof(struct vhost_user_msg) - VHOST_USER_HDR_SIZE) + +/* The version of the protocol we support */ +#define VHOST_USER_VERSION 0x1 + +#define VHOST_USER_F_PROTOCOL_FEATURES 30 +#define VHOST_USER_MQ (1ULL << VHOST_USER_F_PROTOCOL_FEATURES) + +int vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg); +int vhost_user_setup(const char *path); +int vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable); + +#endif diff --git a/drivers/net/virtio/virtio_user/vhost_user.c b/drivers/net/virtio/virtio_user/vhost_user.c new file mode 100644 index 00000000..a2b0687f --- /dev/null +++ b/drivers/net/virtio/virtio_user/vhost_user.c @@ -0,0 +1,426 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <sys/socket.h> +#include <sys/types.h> +#include <sys/stat.h> +#include <unistd.h> +#include <fcntl.h> +#include <sys/un.h> +#include <string.h> +#include <errno.h> + +#include "vhost.h" + +static int +vhost_user_write(int fd, void *buf, int len, int *fds, int fd_num) +{ + int r; + struct msghdr msgh; + struct iovec iov; + size_t fd_size = fd_num * sizeof(int); + char control[CMSG_SPACE(fd_size)]; + struct cmsghdr *cmsg; + + memset(&msgh, 0, sizeof(msgh)); + memset(control, 0, sizeof(control)); + + iov.iov_base = (uint8_t *)buf; + iov.iov_len = len; + + msgh.msg_iov = &iov; + msgh.msg_iovlen = 1; + msgh.msg_control = control; + msgh.msg_controllen = sizeof(control); + + cmsg = CMSG_FIRSTHDR(&msgh); + cmsg->cmsg_len = CMSG_LEN(fd_size); + cmsg->cmsg_level = SOL_SOCKET; + cmsg->cmsg_type = SCM_RIGHTS; + memcpy(CMSG_DATA(cmsg), fds, fd_size); + + do { + r = sendmsg(fd, &msgh, 0); + } while (r < 0 && errno == EINTR); + + return r; +} + +static int +vhost_user_read(int fd, struct vhost_user_msg *msg) +{ + uint32_t valid_flags = VHOST_USER_REPLY_MASK | VHOST_USER_VERSION; + int ret, sz_hdr = VHOST_USER_HDR_SIZE, sz_payload; + + ret = recv(fd, (void *)msg, sz_hdr, 0); + if (ret < sz_hdr) { + PMD_DRV_LOG(ERR, "Failed to recv msg hdr: %d instead of %d.", + ret, sz_hdr); + goto fail; + } + + /* validate msg flags */ + if (msg->flags != (valid_flags)) { + PMD_DRV_LOG(ERR, "Failed to recv msg: flags %x instead of %x.", + msg->flags, valid_flags); + goto fail; + } + + sz_payload = msg->size; + if (sz_payload) { + ret = recv(fd, (void *)((char *)msg + sz_hdr), sz_payload, 0); + if (ret < sz_payload) { + PMD_DRV_LOG(ERR, + "Failed to recv msg payload: %d instead of %d.", + ret, msg->size); + goto fail; + } + } + + return 0; + +fail: + return -1; +} + +struct hugepage_file_info { + uint64_t addr; /**< virtual addr */ + size_t size; /**< the file size */ + char path[PATH_MAX]; /**< path to backing file */ +}; + +/* Two possible options: + * 1. Match HUGEPAGE_INFO_FMT to find the file storing struct hugepage_file + * array. This is simple but cannot be used in secondary process because + * secondary process will close and munmap that file. + * 2. Match HUGEFILE_FMT to find hugepage files directly. + * + * We choose option 2. + */ +static int +get_hugepage_file_info(struct hugepage_file_info huges[], int max) +{ + int idx; + FILE *f; + char buf[BUFSIZ], *tmp, *tail; + char *str_underline, *str_start; + int huge_index; + uint64_t v_start, v_end; + + f = fopen("/proc/self/maps", "r"); + if (!f) { + PMD_DRV_LOG(ERR, "cannot open /proc/self/maps"); + return -1; + } + + idx = 0; + while (fgets(buf, sizeof(buf), f) != NULL) { + if (sscanf(buf, "%" PRIx64 "-%" PRIx64, &v_start, &v_end) < 2) { + PMD_DRV_LOG(ERR, "Failed to parse address"); + goto error; + } + + tmp = strchr(buf, ' ') + 1; /** skip address */ + tmp = strchr(tmp, ' ') + 1; /** skip perm */ + tmp = strchr(tmp, ' ') + 1; /** skip offset */ + tmp = strchr(tmp, ' ') + 1; /** skip dev */ + tmp = strchr(tmp, ' ') + 1; /** skip inode */ + while (*tmp == ' ') /** skip spaces */ + tmp++; + tail = strrchr(tmp, '\n'); /** remove newline if exists */ + if (tail) + *tail = '\0'; + + /* Match HUGEFILE_FMT, aka "%s/%smap_%d", + * which is defined in eal_filesystem.h + */ + str_underline = strrchr(tmp, '_'); + if (!str_underline) + continue; + + str_start = str_underline - strlen("map"); + if (str_start < tmp) + continue; + + if (sscanf(str_start, "map_%d", &huge_index) != 1) + continue; + + if (idx >= max) { + PMD_DRV_LOG(ERR, "Exceed maximum of %d", max); + goto error; + } + huges[idx].addr = v_start; + huges[idx].size = v_end - v_start; + strcpy(huges[idx].path, tmp); + idx++; + } + + fclose(f); + return idx; + +error: + fclose(f); + return -1; +} + +static int +prepare_vhost_memory_user(struct vhost_user_msg *msg, int fds[]) +{ + int i, num; + struct hugepage_file_info huges[VHOST_MEMORY_MAX_NREGIONS]; + struct vhost_memory_region *mr; + + num = get_hugepage_file_info(huges, VHOST_MEMORY_MAX_NREGIONS); + if (num < 0) { + PMD_INIT_LOG(ERR, "Failed to prepare memory for vhost-user"); + return -1; + } + + for (i = 0; i < num; ++i) { + mr = &msg->payload.memory.regions[i]; + mr->guest_phys_addr = huges[i].addr; /* use vaddr! */ + mr->userspace_addr = huges[i].addr; + mr->memory_size = huges[i].size; + mr->mmap_offset = 0; + fds[i] = open(huges[i].path, O_RDWR); + } + + msg->payload.memory.nregions = num; + msg->payload.memory.padding = 0; + + return 0; +} + +static struct vhost_user_msg m; + +static const char * const vhost_msg_strings[] = { + [VHOST_USER_SET_OWNER] = "VHOST_USER_SET_OWNER", + [VHOST_USER_RESET_OWNER] = "VHOST_USER_RESET_OWNER", + [VHOST_USER_SET_FEATURES] = "VHOST_USER_SET_FEATURES", + [VHOST_USER_GET_FEATURES] = "VHOST_USER_GET_FEATURES", + [VHOST_USER_SET_VRING_CALL] = "VHOST_USER_SET_VRING_CALL", + [VHOST_USER_SET_VRING_NUM] = "VHOST_USER_SET_VRING_NUM", + [VHOST_USER_SET_VRING_BASE] = "VHOST_USER_SET_VRING_BASE", + [VHOST_USER_GET_VRING_BASE] = "VHOST_USER_GET_VRING_BASE", + [VHOST_USER_SET_VRING_ADDR] = "VHOST_USER_SET_VRING_ADDR", + [VHOST_USER_SET_VRING_KICK] = "VHOST_USER_SET_VRING_KICK", + [VHOST_USER_SET_MEM_TABLE] = "VHOST_USER_SET_MEM_TABLE", + [VHOST_USER_SET_VRING_ENABLE] = "VHOST_USER_SET_VRING_ENABLE", + NULL, +}; + +int +vhost_user_sock(int vhostfd, enum vhost_user_request req, void *arg) +{ + struct vhost_user_msg msg; + struct vhost_vring_file *file = 0; + int need_reply = 0; + int fds[VHOST_MEMORY_MAX_NREGIONS]; + int fd_num = 0; + int i, len; + + RTE_SET_USED(m); + RTE_SET_USED(vhost_msg_strings); + + PMD_DRV_LOG(INFO, "%s", vhost_msg_strings[req]); + + msg.request = req; + msg.flags = VHOST_USER_VERSION; + msg.size = 0; + + switch (req) { + case VHOST_USER_GET_FEATURES: + need_reply = 1; + break; + + case VHOST_USER_SET_FEATURES: + case VHOST_USER_SET_LOG_BASE: + msg.payload.u64 = *((__u64 *)arg); + msg.size = sizeof(m.payload.u64); + break; + + case VHOST_USER_SET_OWNER: + case VHOST_USER_RESET_OWNER: + break; + + case VHOST_USER_SET_MEM_TABLE: + if (prepare_vhost_memory_user(&msg, fds) < 0) + return -1; + fd_num = msg.payload.memory.nregions; + msg.size = sizeof(m.payload.memory.nregions); + msg.size += sizeof(m.payload.memory.padding); + msg.size += fd_num * sizeof(struct vhost_memory_region); + break; + + case VHOST_USER_SET_LOG_FD: + fds[fd_num++] = *((int *)arg); + break; + + case VHOST_USER_SET_VRING_NUM: + case VHOST_USER_SET_VRING_BASE: + case VHOST_USER_SET_VRING_ENABLE: + memcpy(&msg.payload.state, arg, sizeof(msg.payload.state)); + msg.size = sizeof(m.payload.state); + break; + + case VHOST_USER_GET_VRING_BASE: + memcpy(&msg.payload.state, arg, sizeof(msg.payload.state)); + msg.size = sizeof(m.payload.state); + need_reply = 1; + break; + + case VHOST_USER_SET_VRING_ADDR: + memcpy(&msg.payload.addr, arg, sizeof(msg.payload.addr)); + msg.size = sizeof(m.payload.addr); + break; + + case VHOST_USER_SET_VRING_KICK: + case VHOST_USER_SET_VRING_CALL: + case VHOST_USER_SET_VRING_ERR: + file = arg; + msg.payload.u64 = file->index & VHOST_USER_VRING_IDX_MASK; + msg.size = sizeof(m.payload.u64); + if (file->fd > 0) + fds[fd_num++] = file->fd; + else + msg.payload.u64 |= VHOST_USER_VRING_NOFD_MASK; + break; + + default: + PMD_DRV_LOG(ERR, "trying to send unhandled msg type"); + return -1; + } + + len = VHOST_USER_HDR_SIZE + msg.size; + if (vhost_user_write(vhostfd, &msg, len, fds, fd_num) < 0) { + PMD_DRV_LOG(ERR, "%s failed: %s", + vhost_msg_strings[req], strerror(errno)); + return -1; + } + + if (req == VHOST_USER_SET_MEM_TABLE) + for (i = 0; i < fd_num; ++i) + close(fds[i]); + + if (need_reply) { + if (vhost_user_read(vhostfd, &msg) < 0) { + PMD_DRV_LOG(ERR, "Received msg failed: %s", + strerror(errno)); + return -1; + } + + if (req != msg.request) { + PMD_DRV_LOG(ERR, "Received unexpected msg type"); + return -1; + } + + switch (req) { + case VHOST_USER_GET_FEATURES: + if (msg.size != sizeof(m.payload.u64)) { + PMD_DRV_LOG(ERR, "Received bad msg size"); + return -1; + } + *((__u64 *)arg) = msg.payload.u64; + break; + case VHOST_USER_GET_VRING_BASE: + if (msg.size != sizeof(m.payload.state)) { + PMD_DRV_LOG(ERR, "Received bad msg size"); + return -1; + } + memcpy(arg, &msg.payload.state, + sizeof(struct vhost_vring_state)); + break; + default: + PMD_DRV_LOG(ERR, "Received unexpected msg type"); + return -1; + } + } + + return 0; +} + +/** + * Set up environment to talk with a vhost user backend. + * @param path + * - The path to vhost user unix socket file. + * + * @return + * - (-1) if fail to set up; + * - (>=0) if successful, and it is the fd to vhostfd. + */ +int +vhost_user_setup(const char *path) +{ + int fd; + int flag; + struct sockaddr_un un; + + fd = socket(AF_UNIX, SOCK_STREAM, 0); + if (fd < 0) { + PMD_DRV_LOG(ERR, "socket() error, %s", strerror(errno)); + return -1; + } + + flag = fcntl(fd, F_GETFD); + fcntl(fd, F_SETFD, flag | FD_CLOEXEC); + + memset(&un, 0, sizeof(un)); + un.sun_family = AF_UNIX; + snprintf(un.sun_path, sizeof(un.sun_path), "%s", path); + if (connect(fd, (struct sockaddr *)&un, sizeof(un)) < 0) { + PMD_DRV_LOG(ERR, "connect error, %s", strerror(errno)); + close(fd); + return -1; + } + + return fd; +} + +int +vhost_user_enable_queue_pair(int vhostfd, uint16_t pair_idx, int enable) +{ + int i; + + for (i = 0; i < 2; ++i) { + struct vhost_vring_state state = { + .index = pair_idx * 2 + i, + .num = enable, + }; + + if (vhost_user_sock(vhostfd, + VHOST_USER_SET_VRING_ENABLE, &state)) + return -1; + } + + return 0; +} diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.c b/drivers/net/virtio/virtio_user/virtio_user_dev.c new file mode 100644 index 00000000..3d12a320 --- /dev/null +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.c @@ -0,0 +1,333 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <stdio.h> +#include <fcntl.h> +#include <string.h> +#include <errno.h> +#include <sys/mman.h> +#include <unistd.h> +#include <sys/eventfd.h> + +#include "vhost.h" +#include "virtio_user_dev.h" +#include "../virtio_ethdev.h" + +static int +virtio_user_kick_queue(struct virtio_user_dev *dev, uint32_t queue_sel) +{ + int callfd, kickfd; + struct vhost_vring_file file; + struct vhost_vring_state state; + struct vring *vring = &dev->vrings[queue_sel]; + struct vhost_vring_addr addr = { + .index = queue_sel, + .desc_user_addr = (uint64_t)(uintptr_t)vring->desc, + .avail_user_addr = (uint64_t)(uintptr_t)vring->avail, + .used_user_addr = (uint64_t)(uintptr_t)vring->used, + .log_guest_addr = 0, + .flags = 0, /* disable log */ + }; + + /* May use invalid flag, but some backend leverages kickfd and callfd as + * criteria to judge if dev is alive. so finally we use real event_fd. + */ + callfd = eventfd(0, O_CLOEXEC | O_NONBLOCK); + if (callfd < 0) { + PMD_DRV_LOG(ERR, "callfd error, %s\n", strerror(errno)); + return -1; + } + kickfd = eventfd(0, O_CLOEXEC | O_NONBLOCK); + if (kickfd < 0) { + close(callfd); + PMD_DRV_LOG(ERR, "kickfd error, %s\n", strerror(errno)); + return -1; + } + + /* Of all per virtqueue MSGs, make sure VHOST_SET_VRING_CALL come + * firstly because vhost depends on this msg to allocate virtqueue + * pair. + */ + file.index = queue_sel; + file.fd = callfd; + vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_CALL, &file); + dev->callfds[queue_sel] = callfd; + + state.index = queue_sel; + state.num = vring->num; + vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_NUM, &state); + + state.num = 0; /* no reservation */ + vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_BASE, &state); + + vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_ADDR, &addr); + + /* Of all per virtqueue MSGs, make sure VHOST_USER_SET_VRING_KICK comes + * lastly because vhost depends on this msg to judge if + * virtio is ready. + */ + file.fd = kickfd; + vhost_user_sock(dev->vhostfd, VHOST_USER_SET_VRING_KICK, &file); + dev->kickfds[queue_sel] = kickfd; + + return 0; +} + +int +virtio_user_start_device(struct virtio_user_dev *dev) +{ + uint64_t features; + uint32_t i, queue_sel; + int ret; + + /* construct memory region inside each implementation */ + ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_MEM_TABLE, NULL); + if (ret < 0) + goto error; + + for (i = 0; i < dev->max_queue_pairs; ++i) { + queue_sel = 2 * i + VTNET_SQ_RQ_QUEUE_IDX; + if (virtio_user_kick_queue(dev, queue_sel) < 0) { + PMD_DRV_LOG(INFO, "kick rx vq fails: %u", i); + goto error; + } + } + for (i = 0; i < dev->max_queue_pairs; ++i) { + queue_sel = 2 * i + VTNET_SQ_TQ_QUEUE_IDX; + if (virtio_user_kick_queue(dev, queue_sel) < 0) { + PMD_DRV_LOG(INFO, "kick tx vq fails: %u", i); + goto error; + } + } + + /* After setup all virtqueues, we need to set_features so that these + * features can be set into each virtqueue in vhost side. And before + * that, make sure VHOST_USER_F_PROTOCOL_FEATURES is added if mq is + * enabled, and VIRTIO_NET_F_MAC is stripped. + */ + features = dev->features; + if (dev->max_queue_pairs > 1) + features |= VHOST_USER_MQ; + features &= ~(1ull << VIRTIO_NET_F_MAC); + ret = vhost_user_sock(dev->vhostfd, VHOST_USER_SET_FEATURES, &features); + if (ret < 0) + goto error; + PMD_DRV_LOG(INFO, "set features: %" PRIx64, features); + + return 0; +error: + /* TODO: free resource here or caller to check */ + return -1; +} + +int virtio_user_stop_device(struct virtio_user_dev *dev) +{ + return vhost_user_sock(dev->vhostfd, VHOST_USER_RESET_OWNER, NULL); +} + +static inline void +parse_mac(struct virtio_user_dev *dev, const char *mac) +{ + int i, r; + uint32_t tmp[ETHER_ADDR_LEN]; + + if (!mac) + return; + + r = sscanf(mac, "%x:%x:%x:%x:%x:%x", &tmp[0], + &tmp[1], &tmp[2], &tmp[3], &tmp[4], &tmp[5]); + if (r == ETHER_ADDR_LEN) { + for (i = 0; i < ETHER_ADDR_LEN; ++i) + dev->mac_addr[i] = (uint8_t)tmp[i]; + dev->mac_specified = 1; + } else { + /* ignore the wrong mac, use random mac */ + PMD_DRV_LOG(ERR, "wrong format of mac: %s", mac); + } +} + +int +virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + int cq, int queue_size, const char *mac) +{ + strncpy(dev->path, path, PATH_MAX); + dev->max_queue_pairs = queues; + dev->queue_pairs = 1; /* mq disabled by default */ + dev->queue_size = queue_size; + dev->mac_specified = 0; + parse_mac(dev, mac); + dev->vhostfd = -1; + + dev->vhostfd = vhost_user_setup(dev->path); + if (dev->vhostfd < 0) { + PMD_INIT_LOG(ERR, "backend set up fails"); + return -1; + } + if (vhost_user_sock(dev->vhostfd, VHOST_USER_SET_OWNER, NULL) < 0) { + PMD_INIT_LOG(ERR, "set_owner fails: %s", strerror(errno)); + return -1; + } + + if (vhost_user_sock(dev->vhostfd, VHOST_USER_GET_FEATURES, + &dev->features) < 0) { + PMD_INIT_LOG(ERR, "get_features failed: %s", strerror(errno)); + return -1; + } + if (dev->mac_specified) + dev->features |= (1ull << VIRTIO_NET_F_MAC); + + if (!cq) { + dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VQ); + /* Also disable features depends on VIRTIO_NET_F_CTRL_VQ */ + dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_RX); + dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_VLAN); + dev->features &= ~(1ull << VIRTIO_NET_F_GUEST_ANNOUNCE); + dev->features &= ~(1ull << VIRTIO_NET_F_MQ); + dev->features &= ~(1ull << VIRTIO_NET_F_CTRL_MAC_ADDR); + } else { + /* vhost user backend does not need to know ctrl-q, so + * actually we need add this bit into features. However, + * DPDK vhost-user does send features with this bit, so we + * check it instead of OR it for now. + */ + if (!(dev->features & (1ull << VIRTIO_NET_F_CTRL_VQ))) + PMD_INIT_LOG(INFO, "vhost does not support ctrl-q"); + } + + if (dev->max_queue_pairs > 1) { + if (!(dev->features & VHOST_USER_MQ)) { + PMD_INIT_LOG(ERR, "MQ not supported by the backend"); + return -1; + } + } + + return 0; +} + +void +virtio_user_dev_uninit(struct virtio_user_dev *dev) +{ + uint32_t i; + + for (i = 0; i < dev->max_queue_pairs * 2; ++i) { + close(dev->callfds[i]); + close(dev->kickfds[i]); + } + + close(dev->vhostfd); +} + +static uint8_t +virtio_user_handle_mq(struct virtio_user_dev *dev, uint16_t q_pairs) +{ + uint16_t i; + uint8_t ret = 0; + + if (q_pairs > dev->max_queue_pairs) { + PMD_INIT_LOG(ERR, "multi-q config %u, but only %u supported", + q_pairs, dev->max_queue_pairs); + return -1; + } + + for (i = 0; i < q_pairs; ++i) + ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 1); + for (i = q_pairs; i < dev->max_queue_pairs; ++i) + ret |= vhost_user_enable_queue_pair(dev->vhostfd, i, 0); + + dev->queue_pairs = q_pairs; + + return ret; +} + +static uint32_t +virtio_user_handle_ctrl_msg(struct virtio_user_dev *dev, struct vring *vring, + uint16_t idx_hdr) +{ + struct virtio_net_ctrl_hdr *hdr; + virtio_net_ctrl_ack status = ~0; + uint16_t i, idx_data, idx_status; + uint32_t n_descs = 0; + + /* locate desc for header, data, and status */ + idx_data = vring->desc[idx_hdr].next; + n_descs++; + + i = idx_data; + while (vring->desc[i].flags == VRING_DESC_F_NEXT) { + i = vring->desc[i].next; + n_descs++; + } + + /* locate desc for status */ + idx_status = i; + n_descs++; + + hdr = (void *)(uintptr_t)vring->desc[idx_hdr].addr; + if (hdr->class == VIRTIO_NET_CTRL_MQ && + hdr->cmd == VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET) { + uint16_t queues; + + queues = *(uint16_t *)(uintptr_t)vring->desc[idx_data].addr; + status = virtio_user_handle_mq(dev, queues); + } + + /* Update status */ + *(virtio_net_ctrl_ack *)(uintptr_t)vring->desc[idx_status].addr = status; + + return n_descs; +} + +void +virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx) +{ + uint16_t avail_idx, desc_idx; + struct vring_used_elem *uep; + uint32_t n_descs; + struct vring *vring = &dev->vrings[queue_idx]; + + /* Consume avail ring, using used ring idx as first one */ + while (vring->used->idx != vring->avail->idx) { + avail_idx = (vring->used->idx) & (vring->num - 1); + desc_idx = vring->avail->ring[avail_idx]; + + n_descs = virtio_user_handle_ctrl_msg(dev, vring, desc_idx); + + /* Update used ring */ + uep = &vring->used->ring[avail_idx]; + uep->id = avail_idx; + uep->len = n_descs; + + vring->used->idx++; + } +} diff --git a/drivers/net/virtio/virtio_user/virtio_user_dev.h b/drivers/net/virtio/virtio_user/virtio_user_dev.h new file mode 100644 index 00000000..33690b5c --- /dev/null +++ b/drivers/net/virtio/virtio_user/virtio_user_dev.h @@ -0,0 +1,62 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _VIRTIO_USER_DEV_H +#define _VIRTIO_USER_DEV_H + +#include <limits.h> +#include "../virtio_pci.h" +#include "../virtio_ring.h" + +struct virtio_user_dev { + int vhostfd; + int callfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1]; + int kickfds[VIRTIO_MAX_VIRTQUEUES * 2 + 1]; + int mac_specified; + uint32_t max_queue_pairs; + uint32_t queue_pairs; + uint32_t queue_size; + uint64_t features; + uint8_t status; + uint8_t mac_addr[ETHER_ADDR_LEN]; + char path[PATH_MAX]; + struct vring vrings[VIRTIO_MAX_VIRTQUEUES * 2 + 1]; +}; + +int virtio_user_start_device(struct virtio_user_dev *dev); +int virtio_user_stop_device(struct virtio_user_dev *dev); +int virtio_user_dev_init(struct virtio_user_dev *dev, char *path, int queues, + int cq, int queue_size, const char *mac); +void virtio_user_dev_uninit(struct virtio_user_dev *dev); +void virtio_user_handle_cq(struct virtio_user_dev *dev, uint16_t queue_idx); +#endif diff --git a/drivers/net/virtio/virtio_user_ethdev.c b/drivers/net/virtio/virtio_user_ethdev.c new file mode 100644 index 00000000..5ab24711 --- /dev/null +++ b/drivers/net/virtio/virtio_user_ethdev.c @@ -0,0 +1,440 @@ +/*- + * BSD LICENSE + * + * Copyright(c) 2010-2016 Intel Corporation. All rights reserved. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdint.h> +#include <sys/types.h> +#include <unistd.h> + +#include <rte_malloc.h> +#include <rte_kvargs.h> + +#include "virtio_ethdev.h" +#include "virtio_logs.h" +#include "virtio_pci.h" +#include "virtqueue.h" +#include "virtio_rxtx.h" +#include "virtio_user/virtio_user_dev.h" + +#define virtio_user_get_dev(hw) \ + ((struct virtio_user_dev *)(hw)->virtio_user_dev) + +static void +virtio_user_read_dev_config(struct virtio_hw *hw, size_t offset, + void *dst, int length) +{ + int i; + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + if (offset == offsetof(struct virtio_net_config, mac) && + length == ETHER_ADDR_LEN) { + for (i = 0; i < ETHER_ADDR_LEN; ++i) + ((uint8_t *)dst)[i] = dev->mac_addr[i]; + return; + } + + if (offset == offsetof(struct virtio_net_config, status)) + *(uint16_t *)dst = dev->status; + + if (offset == offsetof(struct virtio_net_config, max_virtqueue_pairs)) + *(uint16_t *)dst = dev->max_queue_pairs; +} + +static void +virtio_user_write_dev_config(struct virtio_hw *hw, size_t offset, + const void *src, int length) +{ + int i; + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + if ((offset == offsetof(struct virtio_net_config, mac)) && + (length == ETHER_ADDR_LEN)) + for (i = 0; i < ETHER_ADDR_LEN; ++i) + dev->mac_addr[i] = ((const uint8_t *)src)[i]; + else + PMD_DRV_LOG(ERR, "not supported offset=%zu, len=%d\n", + offset, length); +} + +static void +virtio_user_set_status(struct virtio_hw *hw, uint8_t status) +{ + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + if (status & VIRTIO_CONFIG_STATUS_DRIVER_OK) + virtio_user_start_device(dev); + dev->status = status; +} + +static void +virtio_user_reset(struct virtio_hw *hw) +{ + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + virtio_user_stop_device(dev); +} + +static uint8_t +virtio_user_get_status(struct virtio_hw *hw) +{ + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + return dev->status; +} + +static uint64_t +virtio_user_get_features(struct virtio_hw *hw) +{ + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + return dev->features; +} + +static void +virtio_user_set_features(struct virtio_hw *hw, uint64_t features) +{ + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + dev->features = features; +} + +static uint8_t +virtio_user_get_isr(struct virtio_hw *hw __rte_unused) +{ + /* When config interrupt happens, driver calls this function to query + * what kinds of change happen. Interrupt mode not supported for now. + */ + return 0; +} + +static uint16_t +virtio_user_set_config_irq(struct virtio_hw *hw __rte_unused, + uint16_t vec __rte_unused) +{ + return VIRTIO_MSI_NO_VECTOR; +} + +/* This function is to get the queue size, aka, number of descs, of a specified + * queue. Different with the VHOST_USER_GET_QUEUE_NUM, which is used to get the + * max supported queues. + */ +static uint16_t +virtio_user_get_queue_num(struct virtio_hw *hw, uint16_t queue_id __rte_unused) +{ + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + /* Currently, each queue has same queue size */ + return dev->queue_size; +} + +static int +virtio_user_setup_queue(struct virtio_hw *hw, struct virtqueue *vq) +{ + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + uint16_t queue_idx = vq->vq_queue_index; + uint64_t desc_addr, avail_addr, used_addr; + + desc_addr = (uintptr_t)vq->vq_ring_virt_mem; + avail_addr = desc_addr + vq->vq_nentries * sizeof(struct vring_desc); + used_addr = RTE_ALIGN_CEIL(avail_addr + offsetof(struct vring_avail, + ring[vq->vq_nentries]), + VIRTIO_PCI_VRING_ALIGN); + + dev->vrings[queue_idx].num = vq->vq_nentries; + dev->vrings[queue_idx].desc = (void *)(uintptr_t)desc_addr; + dev->vrings[queue_idx].avail = (void *)(uintptr_t)avail_addr; + dev->vrings[queue_idx].used = (void *)(uintptr_t)used_addr; + + return 0; +} + +static void +virtio_user_del_queue(struct virtio_hw *hw, struct virtqueue *vq) +{ + /* For legacy devices, write 0 to VIRTIO_PCI_QUEUE_PFN port, QEMU + * correspondingly stops the ioeventfds, and reset the status of + * the device. + * For modern devices, set queue desc, avail, used in PCI bar to 0, + * not see any more behavior in QEMU. + * + * Here we just care about what information to deliver to vhost-user + * or vhost-kernel. So we just close ioeventfd for now. + */ + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + close(dev->callfds[vq->vq_queue_index]); + close(dev->kickfds[vq->vq_queue_index]); +} + +static void +virtio_user_notify_queue(struct virtio_hw *hw, struct virtqueue *vq) +{ + uint64_t buf = 1; + struct virtio_user_dev *dev = virtio_user_get_dev(hw); + + if (hw->cvq && (hw->cvq->vq == vq)) { + virtio_user_handle_cq(dev, vq->vq_queue_index); + return; + } + + if (write(dev->kickfds[vq->vq_queue_index], &buf, sizeof(buf)) < 0) + PMD_DRV_LOG(ERR, "failed to kick backend: %s\n", + strerror(errno)); +} + +static const struct virtio_pci_ops virtio_user_ops = { + .read_dev_cfg = virtio_user_read_dev_config, + .write_dev_cfg = virtio_user_write_dev_config, + .reset = virtio_user_reset, + .get_status = virtio_user_get_status, + .set_status = virtio_user_set_status, + .get_features = virtio_user_get_features, + .set_features = virtio_user_set_features, + .get_isr = virtio_user_get_isr, + .set_config_irq = virtio_user_set_config_irq, + .get_queue_num = virtio_user_get_queue_num, + .setup_queue = virtio_user_setup_queue, + .del_queue = virtio_user_del_queue, + .notify_queue = virtio_user_notify_queue, +}; + +static const char *valid_args[] = { +#define VIRTIO_USER_ARG_QUEUES_NUM "queues" + VIRTIO_USER_ARG_QUEUES_NUM, +#define VIRTIO_USER_ARG_CQ_NUM "cq" + VIRTIO_USER_ARG_CQ_NUM, +#define VIRTIO_USER_ARG_MAC "mac" + VIRTIO_USER_ARG_MAC, +#define VIRTIO_USER_ARG_PATH "path" + VIRTIO_USER_ARG_PATH, +#define VIRTIO_USER_ARG_QUEUE_SIZE "queue_size" + VIRTIO_USER_ARG_QUEUE_SIZE, + NULL +}; + +#define VIRTIO_USER_DEF_CQ_EN 0 +#define VIRTIO_USER_DEF_Q_NUM 1 +#define VIRTIO_USER_DEF_Q_SZ 256 + +static int +get_string_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(char **)extra_args = strdup(value); + + return 0; +} + +static int +get_integer_arg(const char *key __rte_unused, + const char *value, void *extra_args) +{ + if (!value || !extra_args) + return -EINVAL; + + *(uint64_t *)extra_args = strtoull(value, NULL, 0); + + return 0; +} + +static struct rte_eth_dev * +virtio_user_eth_dev_alloc(const char *name) +{ + struct rte_eth_dev *eth_dev; + struct rte_eth_dev_data *data; + struct virtio_hw *hw; + struct virtio_user_dev *dev; + + eth_dev = rte_eth_dev_allocate(name, RTE_ETH_DEV_VIRTUAL); + if (!eth_dev) { + PMD_INIT_LOG(ERR, "cannot alloc rte_eth_dev"); + return NULL; + } + + data = eth_dev->data; + + hw = rte_zmalloc(NULL, sizeof(*hw), 0); + if (!hw) { + PMD_INIT_LOG(ERR, "malloc virtio_hw failed"); + rte_eth_dev_release_port(eth_dev); + return NULL; + } + + dev = rte_zmalloc(NULL, sizeof(*dev), 0); + if (!dev) { + PMD_INIT_LOG(ERR, "malloc virtio_user_dev failed"); + rte_eth_dev_release_port(eth_dev); + rte_free(hw); + return NULL; + } + + hw->vtpci_ops = &virtio_user_ops; + hw->use_msix = 0; + hw->modern = 0; + hw->virtio_user_dev = dev; + data->dev_private = hw; + data->numa_node = SOCKET_ID_ANY; + data->kdrv = RTE_KDRV_NONE; + data->dev_flags = RTE_ETH_DEV_DETACHABLE; + eth_dev->pci_dev = NULL; + eth_dev->driver = NULL; + return eth_dev; +} + +/* Dev initialization routine. Invoked once for each virtio vdev at + * EAL init time, see rte_eal_dev_init(). + * Returns 0 on success. + */ +static int +virtio_user_pmd_devinit(const char *name, const char *params) +{ + struct rte_kvargs *kvlist; + struct rte_eth_dev *eth_dev; + struct virtio_hw *hw; + uint64_t queues = VIRTIO_USER_DEF_Q_NUM; + uint64_t cq = VIRTIO_USER_DEF_CQ_EN; + uint64_t queue_size = VIRTIO_USER_DEF_Q_SZ; + char *path = NULL; + char *mac_addr = NULL; + int ret = -1; + + if (!params || params[0] == '\0') { + PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio-user", + VIRTIO_USER_ARG_QUEUE_SIZE); + goto end; + } + + kvlist = rte_kvargs_parse(params, valid_args); + if (!kvlist) { + PMD_INIT_LOG(ERR, "error when parsing param"); + goto end; + } + + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_PATH) == 1) + rte_kvargs_process(kvlist, VIRTIO_USER_ARG_PATH, + &get_string_arg, &path); + else { + PMD_INIT_LOG(ERR, "arg %s is mandatory for virtio-user\n", + VIRTIO_USER_ARG_QUEUE_SIZE); + goto end; + } + + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_MAC) == 1) + rte_kvargs_process(kvlist, VIRTIO_USER_ARG_MAC, + &get_string_arg, &mac_addr); + + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE) == 1) + rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUE_SIZE, + &get_integer_arg, &queue_size); + + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_QUEUES_NUM) == 1) + rte_kvargs_process(kvlist, VIRTIO_USER_ARG_QUEUES_NUM, + &get_integer_arg, &queues); + + if (rte_kvargs_count(kvlist, VIRTIO_USER_ARG_CQ_NUM) == 1) + rte_kvargs_process(kvlist, VIRTIO_USER_ARG_CQ_NUM, + &get_integer_arg, &cq); + else if (queues > 1) + cq = 1; + + if (queues > 1 && cq == 0) { + PMD_INIT_LOG(ERR, "multi-q requires ctrl-q"); + goto end; + } + + eth_dev = virtio_user_eth_dev_alloc(name); + if (!eth_dev) { + PMD_INIT_LOG(ERR, "virtio-user fails to alloc device"); + goto end; + } + + hw = eth_dev->data->dev_private; + if (virtio_user_dev_init(hw->virtio_user_dev, path, queues, cq, + queue_size, mac_addr) < 0) + goto end; + + /* previously called by rte_eal_pci_probe() for physical dev */ + if (eth_virtio_dev_init(eth_dev) < 0) { + PMD_INIT_LOG(ERR, "eth_virtio_dev_init fails"); + goto end; + } + ret = 0; + +end: + if (path) + free(path); + if (mac_addr) + free(mac_addr); + return ret; +} + +/** Called by rte_eth_dev_detach() */ +static int +virtio_user_pmd_devuninit(const char *name) +{ + struct rte_eth_dev *eth_dev; + struct virtio_hw *hw; + struct virtio_user_dev *dev; + + if (!name) + return -EINVAL; + + PMD_DRV_LOG(INFO, "Un-Initializing %s\n", name); + eth_dev = rte_eth_dev_allocated(name); + if (!eth_dev) + return -ENODEV; + + /* make sure the device is stopped, queues freed */ + rte_eth_dev_close(eth_dev->data->port_id); + + hw = eth_dev->data->dev_private; + dev = hw->virtio_user_dev; + virtio_user_dev_uninit(dev); + + rte_free(eth_dev->data->dev_private); + rte_free(eth_dev->data); + rte_eth_dev_release_port(eth_dev); + + return 0; +} + +static struct rte_driver virtio_user_driver = { + .name = "virtio-user", + .type = PMD_VDEV, + .init = virtio_user_pmd_devinit, + .uninit = virtio_user_pmd_devuninit, +}; + +PMD_REGISTER_DRIVER(virtio_user_driver); diff --git a/drivers/net/virtio/virtqueue.h b/drivers/net/virtio/virtqueue.h index 4e9239e0..455aaafe 100644 --- a/drivers/net/virtio/virtqueue.h +++ b/drivers/net/virtio/virtqueue.h @@ -66,6 +66,14 @@ struct rte_mbuf; #define VIRTQUEUE_MAX_NAME_SZ 32 +#ifdef RTE_VIRTIO_USER +#define MBUF_DATA_DMA_ADDR(mb, offset) \ + ((uint64_t)((uintptr_t)(*(void **)((uintptr_t)mb + offset)) \ + + (mb)->data_off)) +#else /* RTE_VIRTIO_USER */ +#define MBUF_DATA_DMA_ADDR(mb, offset) rte_mbuf_data_dma_addr(mb) +#endif /* RTE_VIRTIO_USER */ + #define VTNET_SQ_RQ_QUEUE_IDX 0 #define VTNET_SQ_TQ_QUEUE_IDX 1 #define VTNET_SQ_CQ_QUEUE_IDX 2 @@ -153,23 +161,30 @@ struct virtio_pmd_ctrl { uint8_t data[VIRTIO_MAX_CTRL_DATA]; }; +struct vq_desc_extra { + void *cookie; + uint16_t ndescs; +}; + struct virtqueue { - struct virtio_hw *hw; /**< virtio_hw structure pointer. */ - const struct rte_memzone *mz; /**< mem zone to populate RX ring. */ - const struct rte_memzone *virtio_net_hdr_mz; /**< memzone to populate hdr. */ - struct rte_mempool *mpool; /**< mempool for mbuf allocation */ - uint16_t queue_id; /**< DPDK queue index. */ - uint8_t port_id; /**< Device port identifier. */ - uint16_t vq_queue_index; /**< PCI queue index */ - - void *vq_ring_virt_mem; /**< linear address of vring*/ + struct virtio_hw *hw; /**< virtio_hw structure pointer. */ + struct vring vq_ring; /**< vring keeping desc, used and avail */ + /** + * Last consumed descriptor in the used table, + * trails vq_ring.used->idx. + */ + uint16_t vq_used_cons_idx; + uint16_t vq_nentries; /**< vring desc numbers */ + uint16_t vq_free_cnt; /**< num of desc available */ + uint16_t vq_avail_idx; /**< sync until needed */ + uint16_t vq_free_thresh; /**< free threshold */ + + void *vq_ring_virt_mem; /**< linear address of vring*/ unsigned int vq_ring_size; - phys_addr_t vq_ring_mem; /**< physical address of vring */ - struct vring vq_ring; /**< vring keeping desc, used and avail */ - uint16_t vq_free_cnt; /**< num of desc available */ - uint16_t vq_nentries; /**< vring desc numbers */ - uint16_t vq_free_thresh; /**< free threshold */ + phys_addr_t vq_ring_mem; /**< physical address of vring */ + /**< use virtual address for virtio-user. */ + /** * Head of the free chain in the descriptor table. If * there are no free descriptors, this will be set to @@ -177,34 +192,12 @@ struct virtqueue { */ uint16_t vq_desc_head_idx; uint16_t vq_desc_tail_idx; - /** - * Last consumed descriptor in the used table, - * trails vq_ring.used->idx. - */ - uint16_t vq_used_cons_idx; - uint16_t vq_avail_idx; - uint64_t mbuf_initializer; /**< value to init mbufs. */ - phys_addr_t virtio_net_hdr_mem; /**< hdr for each xmit packet */ - - struct rte_mbuf **sw_ring; /**< RX software ring. */ - /* dummy mbuf, for wraparound when processing RX ring. */ - struct rte_mbuf fake_mbuf; - - /* Statistics */ - uint64_t packets; - uint64_t bytes; - uint64_t errors; - uint64_t multicast; - uint64_t broadcast; - /* Size bins in array as RFC 2819, undersized [0], 64 [1], etc */ - uint64_t size_bins[8]; - - uint16_t *notify_addr; - - struct vq_desc_extra { - void *cookie; - uint16_t ndescs; - } vq_descx[0]; + uint16_t vq_queue_index; /**< PCI queue index */ + uint16_t offset; /**< relative offset to obtain addr in mbuf */ + uint16_t *notify_addr; + int configured; + struct rte_mbuf **sw_ring; /**< RX software ring. */ + struct vq_desc_extra vq_descx[0]; }; /* If multiqueue is provided by host, then we suppport it. */ @@ -302,7 +295,8 @@ vq_update_avail_ring(struct virtqueue *vq, uint16_t desc_idx) * descriptor. */ avail_idx = (uint16_t)(vq->vq_avail_idx & (vq->vq_nentries - 1)); - vq->vq_ring.avail->ring[avail_idx] = desc_idx; + if (unlikely(vq->vq_ring.avail->ring[avail_idx] != desc_idx)) + vq->vq_ring.avail->ring[avail_idx] = desc_idx; vq->vq_avail_idx++; } diff --git a/drivers/net/vmxnet3/Makefile b/drivers/net/vmxnet3/Makefile index 4cf3b33b..23ff1da2 100644 --- a/drivers/net/vmxnet3/Makefile +++ b/drivers/net/vmxnet3/Makefile @@ -39,13 +39,13 @@ LIB = librte_pmd_vmxnet3_uio.a CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) -ifeq ($(CC), icc) +ifeq ($(CONFIG_RTE_TOOLCHAIN_ICC),y) # # CFLAGS for icc # CFLAGS_BASE_DRIVER = -wd174 -wd593 -wd869 -wd981 -wd2259 -else ifeq ($(CC), clang) +else ifeq ($(CONFIG_RTE_TOOLCHAIN_CLANG),y) # # CFLAGS for clang # diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.c b/drivers/net/vmxnet3/vmxnet3_ethdev.c index bd7a2bb7..29b469cc 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.c +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.c @@ -694,7 +694,6 @@ vmxnet3_dev_stats_get(struct rte_eth_dev *dev, struct rte_eth_stats *stats) stats->q_errors[i] = rxStats->pktsRxError; stats->ierrors += rxStats->pktsRxError; - stats->imcasts += rxStats->mcastPktsRxOK; stats->rx_nombuf += rxStats->pktsRxOutOfBuf; } } diff --git a/drivers/net/vmxnet3/vmxnet3_ethdev.h b/drivers/net/vmxnet3/vmxnet3_ethdev.h index 4f9d0bd2..1be833ab 100644 --- a/drivers/net/vmxnet3/vmxnet3_ethdev.h +++ b/drivers/net/vmxnet3/vmxnet3_ethdev.h @@ -34,14 +34,6 @@ #ifndef _VMXNET3_ETHDEV_H_ #define _VMXNET3_ETHDEV_H_ -#ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER -#define VMXNET3_ASSERT(x) do { \ - if (!(x)) rte_panic("VMXNET3: %s\n", #x); \ -} while(0) -#else -#define VMXNET3_ASSERT(x) do { (void)(x); } while (0) -#endif - #define VMXNET3_MAX_MAC_ADDRS 1 /* UPT feature to negotiate */ diff --git a/drivers/net/vmxnet3/vmxnet3_rxtx.c b/drivers/net/vmxnet3/vmxnet3_rxtx.c index 4ac0456c..9deeb3ff 100644 --- a/drivers/net/vmxnet3/vmxnet3_rxtx.c +++ b/drivers/net/vmxnet3/vmxnet3_rxtx.c @@ -86,16 +86,6 @@ static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *); static void vmxnet3_txq_dump(struct vmxnet3_tx_queue *); #endif -static struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - return m; -} - #ifdef RTE_LIBRTE_VMXNET3_DEBUG_DRIVER_NOT_USED static void vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq) @@ -106,7 +96,7 @@ vmxnet3_rxq_dump(struct vmxnet3_rx_queue *rxq) return; PMD_RX_LOG(DEBUG, - "RXQ: cmd0 base : 0x%p cmd1 base : 0x%p comp ring base : 0x%p.", + "RXQ: cmd0 base : %p cmd1 base : %p comp ring base : %p.", rxq->cmd_ring[0].base, rxq->cmd_ring[1].base, rxq->comp_ring.base); PMD_RX_LOG(DEBUG, "RXQ: cmd0 basePA : 0x%lx cmd1 basePA : 0x%lx comp ring basePA : 0x%lx.", @@ -136,7 +126,7 @@ vmxnet3_txq_dump(struct vmxnet3_tx_queue *txq) if (txq == NULL) return; - PMD_TX_LOG(DEBUG, "TXQ: cmd base : 0x%p comp ring base : 0x%p data ring base : 0x%p.", + PMD_TX_LOG(DEBUG, "TXQ: cmd base : %p comp ring base : %p data ring base : %p.", txq->cmd_ring.base, txq->comp_ring.base, txq->data_ring.base); PMD_TX_LOG(DEBUG, "TXQ: cmd basePA : 0x%lx comp ring basePA : 0x%lx data ring basePA : 0x%lx.", (unsigned long)txq->cmd_ring.basePA, @@ -296,7 +286,7 @@ vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq) struct rte_mbuf *mbuf; /* Release cmd_ring descriptor and free mbuf */ - VMXNET3_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1); + RTE_ASSERT(txq->cmd_ring.base[eop_idx].txd.eop == 1); mbuf = txq->cmd_ring.buf_info[eop_idx].m; if (mbuf == NULL) @@ -307,7 +297,7 @@ vmxnet3_unmap_pkt(uint16_t eop_idx, vmxnet3_tx_queue_t *txq) while (txq->cmd_ring.next2comp != eop_idx) { /* no out-of-order completion */ - VMXNET3_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0); + RTE_ASSERT(txq->cmd_ring.base[txq->cmd_ring.next2comp].txd.cq == 0); vmxnet3_cmd_ring_adv_next2comp(&txq->cmd_ring); completed++; } @@ -454,7 +444,7 @@ vmxnet3_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts, if (tso) { uint16_t mss = txm->tso_segsz; - VMXNET3_ASSERT(mss > 0); + RTE_ASSERT(mss > 0); gdesc->txd.hlen = txm->l2_len + txm->l3_len + txm->l4_len; gdesc->txd.om = VMXNET3_OM_TSO; @@ -544,7 +534,7 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) rxd = (struct Vmxnet3_RxDesc *)(ring->base + ring->next2fill); /* Allocate blank mbuf for the current Rx Descriptor */ - mbuf = rte_rxmbuf_alloc(rxq->mp); + mbuf = rte_mbuf_raw_alloc(rxq->mp); if (unlikely(mbuf == NULL)) { PMD_RX_LOG(ERR, "Error allocating mbuf"); rxq->stats.rx_buf_alloc_failure++; @@ -587,12 +577,6 @@ vmxnet3_post_rx_bufs(vmxnet3_rx_queue_t *rxq, uint8_t ring_id) static void vmxnet3_rx_offload(const Vmxnet3_RxCompDesc *rcd, struct rte_mbuf *rxm) { - /* Check for hardware stripped VLAN tag */ - if (rcd->ts) { - rxm->ol_flags |= PKT_RX_VLAN_PKT; - rxm->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); - } - /* Check for RSS */ if (rcd->rssType != VMXNET3_RCD_RSS_TYPE_NONE) { rxm->ol_flags |= PKT_RX_RSS_HASH; @@ -658,12 +642,13 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) idx = rcd->rxdIdx; ring_idx = (uint8_t)((rcd->rqID == rxq->qid1) ? 0 : 1); rxd = (Vmxnet3_RxDesc *)rxq->cmd_ring[ring_idx].base + idx; + RTE_SET_USED(rxd); /* used only for assert when enabled */ rbi = rxq->cmd_ring[ring_idx].buf_info + idx; PMD_RX_LOG(DEBUG, "rxd idx: %d ring idx: %d.", idx, ring_idx); - VMXNET3_ASSERT(rcd->len <= rxd->len); - VMXNET3_ASSERT(rbi->m); + RTE_ASSERT(rcd->len <= rxd->len); + RTE_ASSERT(rbi->m); /* Get the packet buffer pointer from buf_info */ rxm = rbi->m; @@ -710,10 +695,10 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) * the last mbuf of the current packet. */ if (rcd->sop) { - VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD); + RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_HEAD); if (unlikely(rcd->len == 0)) { - VMXNET3_ASSERT(rcd->eop); + RTE_ASSERT(rcd->eop); PMD_RX_LOG(DEBUG, "Rx buf was skipped. rxring[%d][%d])", @@ -727,7 +712,7 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) } else { struct rte_mbuf *start = rxq->start_seg; - VMXNET3_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY); + RTE_ASSERT(rxd->btype == VMXNET3_RXD_BTYPE_BODY); start->pkt_len += rxm->data_len; start->nb_segs++; @@ -737,7 +722,15 @@ vmxnet3_recv_pkts(void *rx_queue, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) rxq->last_seg = rxm; if (rcd->eop) { - rx_pkts[nb_rx++] = rxq->start_seg; + struct rte_mbuf *start = rxq->start_seg; + + /* Check for hardware stripped VLAN tag */ + if (rcd->ts) { + start->ol_flags |= (PKT_RX_VLAN_PKT | PKT_RX_VLAN_STRIPPED); + start->vlan_tci = rte_le_to_cpu_16((uint16_t)rcd->tci); + } + + rx_pkts[nb_rx++] = start; rxq->start_seg = NULL; } diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.c b/drivers/net/xenvirt/rte_eth_xenvirt.c index b9638d96..3e45808f 100644 --- a/drivers/net/xenvirt/rte_eth_xenvirt.c +++ b/drivers/net/xenvirt/rte_eth_xenvirt.c @@ -39,6 +39,9 @@ #include <sys/mman.h> #include <errno.h> #include <sys/user.h> +#ifndef PAGE_SIZE +#define PAGE_SIZE sysconf(_SC_PAGE_SIZE) +#endif #include <linux/binfmts.h> #include <xen/xen-compat.h> #if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200 @@ -79,18 +82,6 @@ static struct rte_eth_link pmd_link = { static void eth_xenvirt_free_queues(struct rte_eth_dev *dev); -static inline struct rte_mbuf * -rte_rxmbuf_alloc(struct rte_mempool *mp) -{ - struct rte_mbuf *m; - - m = __rte_mbuf_raw_alloc(mp); - __rte_mbuf_sanity_check_raw(m, 0); - - return m; -} - - static uint16_t eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) { @@ -122,7 +113,7 @@ eth_xenvirt_rx(void *q, struct rte_mbuf **rx_pkts, uint16_t nb_pkts) } /* allocate new mbuf for the used descriptor */ while (likely(!virtqueue_full(rxvq))) { - new_mbuf = rte_rxmbuf_alloc(rxvq->mpool); + new_mbuf = rte_mbuf_raw_alloc(rxvq->mpool); if (unlikely(new_mbuf == NULL)) { break; } @@ -293,7 +284,7 @@ eth_dev_start(struct rte_eth_dev *dev) dev->data->dev_link.link_status = ETH_LINK_UP; while (!virtqueue_full(rxvq)) { - m = rte_rxmbuf_alloc(rxvq->mpool); + m = rte_mbuf_raw_alloc(rxvq->mpool); if (m == NULL) break; /* Enqueue allocated buffers. */ diff --git a/drivers/net/xenvirt/rte_eth_xenvirt.h b/drivers/net/xenvirt/rte_eth_xenvirt.h index fc15a636..4995a9b4 100644 --- a/drivers/net/xenvirt/rte_eth_xenvirt.h +++ b/drivers/net/xenvirt/rte_eth_xenvirt.h @@ -51,7 +51,7 @@ struct rte_mempool * rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags); diff --git a/drivers/net/xenvirt/rte_mempool_gntalloc.c b/drivers/net/xenvirt/rte_mempool_gntalloc.c index 7bfbfda3..73e82f80 100644 --- a/drivers/net/xenvirt/rte_mempool_gntalloc.c +++ b/drivers/net/xenvirt/rte_mempool_gntalloc.c @@ -78,7 +78,7 @@ static struct _mempool_gntalloc_info _create_mempool(const char *name, unsigned elt_num, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags) { struct _mempool_gntalloc_info mgi; @@ -202,7 +202,7 @@ _create_mempool(const char *name, unsigned elt_num, unsigned elt_size, obj_init, obj_init_arg, socket_id, flags, va, pa_arr, rpg_num, pg_shift); - RTE_VERIFY(elt_num == mp->size); + RTE_ASSERT(elt_num == mp->size); } mgi.mp = mp; mgi.pg_num = rpg_num; @@ -253,7 +253,7 @@ struct rte_mempool * rte_mempool_gntalloc_create(const char *name, unsigned elt_num, unsigned elt_size, unsigned cache_size, unsigned private_data_size, rte_mempool_ctor_t *mp_init, void *mp_init_arg, - rte_mempool_obj_ctor_t *obj_init, void *obj_init_arg, + rte_mempool_obj_cb_t *obj_init, void *obj_init_arg, int socket_id, unsigned flags) { int rv; diff --git a/drivers/net/xenvirt/rte_xen_lib.c b/drivers/net/xenvirt/rte_xen_lib.c index de63cd30..6c9a1d49 100644 --- a/drivers/net/xenvirt/rte_xen_lib.c +++ b/drivers/net/xenvirt/rte_xen_lib.c @@ -423,6 +423,7 @@ grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *g { char key_str[PATH_MAX] = {0}; char val_str[PATH_MAX] = {0}; + void *mempool_obj_va; if (grant_node_create(pg_num, gref_arr, pa_arr, val_str, sizeof(val_str))) { return -1; @@ -437,7 +438,14 @@ grant_gntalloc_mbuf_pool(struct rte_mempool *mpool, uint32_t pg_num, uint32_t *g if (snprintf(key_str, sizeof(key_str), DPDK_XENSTORE_PATH"%d"MEMPOOL_VA_XENSTORE_STR, mempool_idx) == -1) return -1; - if (snprintf(val_str, sizeof(val_str), "%"PRIxPTR, (uintptr_t)mpool->elt_va_start) == -1) + if (mpool->nb_mem_chunks != 1) { + RTE_LOG(ERR, PMD, + "mempool with more than 1 chunk is not supported\n"); + return -1; + } + mempool_obj_va = STAILQ_FIRST(&mpool->mem_list)->addr; + if (snprintf(val_str, sizeof(val_str), "%"PRIxPTR, + (uintptr_t)mempool_obj_va) == -1) return -1; if (xenstore_write(key_str, val_str) == -1) return -1; |