diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2017-11-08 14:15:11 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2017-11-08 14:45:54 +0000 |
commit | 055c52583a2794da8ba1e85a48cce3832372b12f (patch) | |
tree | 8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /drivers/mempool | |
parent | f239aed5e674965691846e8ce3f187dd47523689 (diff) |
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/mempool')
-rw-r--r-- | drivers/mempool/Makefile | 7 | ||||
-rw-r--r-- | drivers/mempool/dpaa/Makefile | 59 | ||||
-rw-r--r-- | drivers/mempool/dpaa/dpaa_mempool.c | 284 | ||||
-rw-r--r-- | drivers/mempool/dpaa/dpaa_mempool.h | 76 | ||||
-rw-r--r-- | drivers/mempool/dpaa/rte_mempool_dpaa_version.map | 8 | ||||
-rw-r--r-- | drivers/mempool/dpaa2/Makefile | 1 | ||||
-rw-r--r-- | drivers/mempool/dpaa2/dpaa2_hw_mempool.c | 38 | ||||
-rw-r--r-- | drivers/mempool/dpaa2/dpaa2_hw_mempool.h | 2 | ||||
-rw-r--r-- | drivers/mempool/octeontx/Makefile | 68 | ||||
-rw-r--r-- | drivers/mempool/octeontx/octeontx_fpavf.c | 830 | ||||
-rw-r--r-- | drivers/mempool/octeontx/octeontx_fpavf.h | 131 | ||||
-rw-r--r-- | drivers/mempool/octeontx/octeontx_mbox.c | 242 | ||||
-rw-r--r-- | drivers/mempool/octeontx/octeontx_mbox.h | 64 | ||||
-rw-r--r-- | drivers/mempool/octeontx/octeontx_pool_logs.h | 68 | ||||
-rw-r--r-- | drivers/mempool/octeontx/octeontx_ssovf.c | 299 | ||||
-rw-r--r-- | drivers/mempool/octeontx/rte_mempool_octeontx.c | 253 | ||||
-rw-r--r-- | drivers/mempool/octeontx/rte_mempool_octeontx_version.map | 9 | ||||
-rw-r--r-- | drivers/mempool/ring/Makefile | 1 | ||||
-rw-r--r-- | drivers/mempool/stack/Makefile | 1 |
19 files changed, 2422 insertions, 19 deletions
diff --git a/drivers/mempool/Makefile b/drivers/mempool/Makefile index efd55f23..f656c566 100644 --- a/drivers/mempool/Makefile +++ b/drivers/mempool/Makefile @@ -30,13 +30,10 @@ include $(RTE_SDK)/mk/rte.vars.mk -core-libs := librte_eal librte_mempool librte_ring - +DIRS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa DIRS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2 -DEPDIRS-dpaa2 = $(core-libs) DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_RING) += ring -DEPDIRS-ring = $(core-libs) DIRS-$(CONFIG_RTE_DRIVER_MEMPOOL_STACK) += stack -DEPDIRS-stack = $(core-libs) +DIRS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx include $(RTE_SDK)/mk/rte.subdir.mk diff --git a/drivers/mempool/dpaa/Makefile b/drivers/mempool/dpaa/Makefile new file mode 100644 index 00000000..c49b0ee3 --- /dev/null +++ b/drivers/mempool/dpaa/Makefile @@ -0,0 +1,59 @@ +# BSD LICENSE +# +# Copyright 2016 NXP. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of NXP nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_mempool_dpaa.a + +CFLAGS := -I$(SRCDIR) $(CFLAGS) +CFLAGS += -O3 $(WERROR_FLAGS) +CFLAGS += -D _GNU_SOURCE +CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa +CFLAGS += -I$(RTE_SDK)/drivers/bus/dpaa/include/ +CFLAGS += -I$(RTE_SDK)/drivers/mempool/dpaa +CFLAGS += -I$(RTE_SDK)/lib/librte_mempool + +# versioning export map +EXPORT_MAP := rte_mempool_dpaa_version.map + +# Lbrary version +LIBABIVER := 1 + +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_DPAA_MEMPOOL) += dpaa_mempool.c + +LDLIBS += -lrte_bus_dpaa +LDLIBS += -lrte_eal -lrte_mempool -lrte_ring + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/mempool/dpaa/dpaa_mempool.c b/drivers/mempool/dpaa/dpaa_mempool.c new file mode 100644 index 00000000..f5ee80f2 --- /dev/null +++ b/drivers/mempool/dpaa/dpaa_mempool.c @@ -0,0 +1,284 @@ +/*- + * BSD LICENSE + * + * Copyright 2017 NXP. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of NXP nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/* System headers */ +#include <stdio.h> +#include <inttypes.h> +#include <unistd.h> +#include <limits.h> +#include <sched.h> +#include <signal.h> +#include <pthread.h> +#include <sys/types.h> +#include <sys/syscall.h> + +#include <rte_byteorder.h> +#include <rte_common.h> +#include <rte_log.h> +#include <rte_debug.h> +#include <rte_memory.h> +#include <rte_tailq.h> +#include <rte_eal.h> +#include <rte_malloc.h> +#include <rte_ring.h> + +#include <dpaa_mempool.h> + +struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS]; + +static int +dpaa_mbuf_create_pool(struct rte_mempool *mp) +{ + struct bman_pool *bp; + struct bm_buffer bufs[8]; + struct dpaa_bp_info *bp_info; + uint8_t bpid; + int num_bufs = 0, ret = 0; + struct bman_pool_params params = { + .flags = BMAN_POOL_FLAG_DYNAMIC_BPID + }; + + MEMPOOL_INIT_FUNC_TRACE(); + + bp = bman_new_pool(¶ms); + if (!bp) { + DPAA_MEMPOOL_ERR("bman_new_pool() failed"); + return -ENODEV; + } + bpid = bman_get_params(bp)->bpid; + + /* Drain the pool of anything already in it. */ + do { + /* Acquire is all-or-nothing, so we drain in 8s, + * then in 1s for the remainder. + */ + if (ret != 1) + ret = bman_acquire(bp, bufs, 8, 0); + if (ret < 8) + ret = bman_acquire(bp, bufs, 1, 0); + if (ret > 0) + num_bufs += ret; + } while (ret > 0); + if (num_bufs) + DPAA_MEMPOOL_WARN("drained %u bufs from BPID %d", + num_bufs, bpid); + + rte_dpaa_bpid_info[bpid].mp = mp; + rte_dpaa_bpid_info[bpid].bpid = bpid; + rte_dpaa_bpid_info[bpid].size = mp->elt_size; + rte_dpaa_bpid_info[bpid].bp = bp; + rte_dpaa_bpid_info[bpid].meta_data_size = + sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp); + rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index; + + bp_info = rte_malloc(NULL, + sizeof(struct dpaa_bp_info), + RTE_CACHE_LINE_SIZE); + if (!bp_info) { + DPAA_MEMPOOL_WARN("Memory allocation failed for bp_info"); + bman_free_pool(bp); + return -ENOMEM; + } + + rte_memcpy(bp_info, (void *)&rte_dpaa_bpid_info[bpid], + sizeof(struct dpaa_bp_info)); + mp->pool_data = (void *)bp_info; + + DPAA_MEMPOOL_INFO("BMAN pool created for bpid =%d", bpid); + return 0; +} + +static void +dpaa_mbuf_free_pool(struct rte_mempool *mp) +{ + struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); + + MEMPOOL_INIT_FUNC_TRACE(); + + if (bp_info) { + bman_free_pool(bp_info->bp); + DPAA_MEMPOOL_INFO("BMAN pool freed for bpid =%d", + bp_info->bpid); + rte_free(mp->pool_data); + mp->pool_data = NULL; + } +} + +static void +dpaa_buf_free(struct dpaa_bp_info *bp_info, uint64_t addr) +{ + struct bm_buffer buf; + int ret; + + DPAA_MEMPOOL_DEBUG("Free 0x%lx to bpid: %d", addr, bp_info->bpid); + + bm_buffer_set64(&buf, addr); +retry: + ret = bman_release(bp_info->bp, &buf, 1, 0); + if (ret) { + DPAA_MEMPOOL_DEBUG("BMAN busy. Retrying..."); + cpu_spin(CPU_SPIN_BACKOFF_CYCLES); + goto retry; + } +} + +static int +dpaa_mbuf_free_bulk(struct rte_mempool *pool, + void *const *obj_table, + unsigned int n) +{ + struct dpaa_bp_info *bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool); + int ret; + unsigned int i = 0; + + DPAA_MEMPOOL_DPDEBUG("Request to free %d buffers in bpid = %d", + n, bp_info->bpid); + + ret = rte_dpaa_portal_init((void *)0); + if (ret) { + DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d", + ret); + return 0; + } + + while (i < n) { + dpaa_buf_free(bp_info, + (uint64_t)rte_mempool_virt2iova(obj_table[i]) + + bp_info->meta_data_size); + i = i + 1; + } + + DPAA_MEMPOOL_DPDEBUG("freed %d buffers in bpid =%d", + n, bp_info->bpid); + + return 0; +} + +static int +dpaa_mbuf_alloc_bulk(struct rte_mempool *pool, + void **obj_table, + unsigned int count) +{ + struct rte_mbuf **m = (struct rte_mbuf **)obj_table; + struct bm_buffer bufs[DPAA_MBUF_MAX_ACQ_REL]; + struct dpaa_bp_info *bp_info; + void *bufaddr; + int i, ret; + unsigned int n = 0; + + bp_info = DPAA_MEMPOOL_TO_POOL_INFO(pool); + + DPAA_MEMPOOL_DPDEBUG("Request to alloc %d buffers in bpid = %d", + count, bp_info->bpid); + + if (unlikely(count >= (RTE_MEMPOOL_CACHE_MAX_SIZE * 2))) { + DPAA_MEMPOOL_ERR("Unable to allocate requested (%u) buffers", + count); + return -1; + } + + ret = rte_dpaa_portal_init((void *)0); + if (ret) { + DPAA_MEMPOOL_ERR("rte_dpaa_portal_init failed with ret: %d", + ret); + return -1; + } + + while (n < count) { + /* Acquire is all-or-nothing, so we drain in 7s, + * then the remainder. + */ + if ((count - n) > DPAA_MBUF_MAX_ACQ_REL) { + ret = bman_acquire(bp_info->bp, bufs, + DPAA_MBUF_MAX_ACQ_REL, 0); + } else { + ret = bman_acquire(bp_info->bp, bufs, count - n, 0); + } + /* In case of less than requested number of buffers available + * in pool, qbman_swp_acquire returns 0 + */ + if (ret <= 0) { + DPAA_MEMPOOL_DPDEBUG("Buffer acquire failed (%d)", + ret); + /* The API expect the exact number of requested + * buffers. Releasing all buffers allocated + */ + dpaa_mbuf_free_bulk(pool, obj_table, n); + return -ENOBUFS; + } + /* assigning mbuf from the acquired objects */ + for (i = 0; (i < ret) && bufs[i].addr; i++) { + /* TODO-errata - objerved that bufs may be null + * i.e. first buffer is valid, remaining 6 buffers + * may be null. + */ + bufaddr = (void *)rte_dpaa_mem_ptov(bufs[i].addr); + m[n] = (struct rte_mbuf *)((char *)bufaddr + - bp_info->meta_data_size); + DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN", + (void *)bufaddr, (void *)m[n]); + n++; + } + } + + DPAA_MEMPOOL_DPDEBUG("Allocated %d buffers from bpid=%d", + n, bp_info->bpid); + return 0; +} + +static unsigned int +dpaa_mbuf_get_count(const struct rte_mempool *mp) +{ + struct dpaa_bp_info *bp_info; + + MEMPOOL_INIT_FUNC_TRACE(); + + if (!mp || !mp->pool_data) { + DPAA_MEMPOOL_ERR("Invalid mempool provided\n"); + return 0; + } + + bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp); + + return bman_query_free_buffers(bp_info->bp); +} + +struct rte_mempool_ops dpaa_mpool_ops = { + .name = "dpaa", + .alloc = dpaa_mbuf_create_pool, + .free = dpaa_mbuf_free_pool, + .enqueue = dpaa_mbuf_free_bulk, + .dequeue = dpaa_mbuf_alloc_bulk, + .get_count = dpaa_mbuf_get_count, +}; + +MEMPOOL_REGISTER_OPS(dpaa_mpool_ops); diff --git a/drivers/mempool/dpaa/dpaa_mempool.h b/drivers/mempool/dpaa/dpaa_mempool.h new file mode 100644 index 00000000..67958594 --- /dev/null +++ b/drivers/mempool/dpaa/dpaa_mempool.h @@ -0,0 +1,76 @@ +/*- + * BSD LICENSE + * + * Copyright 2017 NXP. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of NXP nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#ifndef __DPAA_MEMPOOL_H__ +#define __DPAA_MEMPOOL_H__ + +/* System headers */ +#include <stdio.h> +#include <stdbool.h> +#include <inttypes.h> +#include <unistd.h> + +#include <rte_mempool.h> + +#include <rte_dpaa_bus.h> +#include <rte_dpaa_logs.h> + +#include <fsl_usd.h> +#include <fsl_bman.h> + +#define CPU_SPIN_BACKOFF_CYCLES 512 + +/* total number of bpools on SoC */ +#define DPAA_MAX_BPOOLS 256 + +/* Maximum release/acquire from BMAN */ +#define DPAA_MBUF_MAX_ACQ_REL 8 + +struct dpaa_bp_info { + struct rte_mempool *mp; + struct bman_pool *bp; + uint32_t bpid; + uint32_t size; + uint32_t meta_data_size; + int32_t dpaa_ops_index; +}; + +#define DPAA_MEMPOOL_TO_POOL_INFO(__mp) \ + ((struct dpaa_bp_info *)__mp->pool_data) + +#define DPAA_MEMPOOL_TO_BPID(__mp) \ + (((struct dpaa_bp_info *)__mp->pool_data)->bpid) + +extern struct dpaa_bp_info rte_dpaa_bpid_info[DPAA_MAX_BPOOLS]; + +#define DPAA_BPID_TO_POOL_INFO(__bpid) (&rte_dpaa_bpid_info[__bpid]) + +#endif diff --git a/drivers/mempool/dpaa/rte_mempool_dpaa_version.map b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map new file mode 100644 index 00000000..cc635c73 --- /dev/null +++ b/drivers/mempool/dpaa/rte_mempool_dpaa_version.map @@ -0,0 +1,8 @@ +DPDK_17.11 { + global: + + rte_dpaa_bpid_info; + rte_dpaa_pool_table; + + local: *; +}; diff --git a/drivers/mempool/dpaa2/Makefile b/drivers/mempool/dpaa2/Makefile index 1a174968..dd19e100 100644 --- a/drivers/mempool/dpaa2/Makefile +++ b/drivers/mempool/dpaa2/Makefile @@ -58,5 +58,6 @@ LIBABIVER := 1 SRCS-$(CONFIG_RTE_LIBRTE_DPAA2_MEMPOOL) += dpaa2_hw_mempool.c LDLIBS += -lrte_bus_fslmc +LDLIBS += -lrte_eal -lrte_mempool -lrte_ring include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c index 6df203fc..8bcbaa89 100644 --- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.c +++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.c @@ -65,7 +65,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) struct dpaa2_bp_info *bp_info; struct dpbp_attr dpbp_attr; uint32_t bpid; - int ret, p_ret; + int ret; avail_dpbp = dpaa2_alloc_dpbp_dev(); @@ -78,7 +78,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) ret = dpaa2_affine_qbman_swp(); if (ret) { RTE_LOG(ERR, PMD, "Failure in affining portal\n"); - return ret; + goto err1; } } @@ -86,7 +86,7 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) if (ret != 0) { PMD_INIT_LOG(ERR, "Resource enable failure with" " err code: %d\n", ret); - return ret; + goto err1; } ret = dpbp_get_attributes(&avail_dpbp->dpbp, CMD_PRI_LOW, @@ -94,10 +94,16 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) if (ret != 0) { PMD_INIT_LOG(ERR, "Resource read failure with" " err code: %d\n", ret); - p_ret = ret; - ret = dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, - avail_dpbp->token); - return p_ret; + goto err2; + } + + bp_info = rte_malloc(NULL, + sizeof(struct dpaa2_bp_info), + RTE_CACHE_LINE_SIZE); + if (!bp_info) { + PMD_INIT_LOG(ERR, "No heap memory available for bp_info"); + ret = -ENOMEM; + goto err2; } /* Allocate the bp_list which will be added into global_bp_list */ @@ -105,7 +111,8 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) RTE_CACHE_LINE_SIZE); if (!bp_list) { PMD_INIT_LOG(ERR, "No heap memory available"); - return -ENOMEM; + ret = -ENOMEM; + goto err3; } /* Set parameters of buffer pool list */ @@ -127,9 +134,6 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) rte_dpaa2_bpid_info[bpid].bp_list = bp_list; rte_dpaa2_bpid_info[bpid].bpid = bpid; - bp_info = rte_malloc(NULL, - sizeof(struct dpaa2_bp_info), - RTE_CACHE_LINE_SIZE); rte_memcpy(bp_info, (void *)&rte_dpaa2_bpid_info[bpid], sizeof(struct dpaa2_bp_info)); mp->pool_data = (void *)bp_info; @@ -138,6 +142,14 @@ rte_hw_mbuf_create_pool(struct rte_mempool *mp) h_bp_list = bp_list; return 0; +err3: + rte_free(bp_info); +err2: + dpbp_disable(&avail_dpbp->dpbp, CMD_PRI_LOW, avail_dpbp->token); +err1: + dpaa2_free_dpbp_dev(avail_dpbp); + + return ret; } static void @@ -213,7 +225,7 @@ rte_dpaa2_mbuf_release(struct rte_mempool *pool __rte_unused, /* convert mbuf to buffers for the remainder */ for (i = 0; i < n ; i++) { #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA - bufs[i] = (uint64_t)rte_mempool_virt2phy(pool, obj_table[i]) + bufs[i] = (uint64_t)rte_mempool_virt2iova(obj_table[i]) + meta_data_size; #else bufs[i] = (uint64_t)obj_table[i] + meta_data_size; @@ -232,7 +244,7 @@ aligned: for (i = 0; i < DPAA2_MBUF_MAX_ACQ_REL; i++) { #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA bufs[i] = (uint64_t) - rte_mempool_virt2phy(pool, obj_table[n + i]) + rte_mempool_virt2iova(obj_table[n + i]) + meta_data_size; #else bufs[i] = (uint64_t)obj_table[n + i] + meta_data_size; diff --git a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h index 56b71bed..0971929e 100644 --- a/drivers/mempool/dpaa2/dpaa2_hw_mempool.h +++ b/drivers/mempool/dpaa2/dpaa2_hw_mempool.h @@ -39,7 +39,7 @@ struct buf_pool_cfg { void *addr; /**< The address from where DPAA2 will carve out the buffers */ - phys_addr_t phys_addr; + rte_iova_t phys_addr; /**< Physical address of the memory provided in addr */ uint32_t num; /**< Number of buffers */ diff --git a/drivers/mempool/octeontx/Makefile b/drivers/mempool/octeontx/Makefile new file mode 100644 index 00000000..a2e2863c --- /dev/null +++ b/drivers/mempool/octeontx/Makefile @@ -0,0 +1,68 @@ +# BSD LICENSE +# +# Copyright(c) 2017 Cavium Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in +# the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Cavium Networks nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +# + +include $(RTE_SDK)/mk/rte.vars.mk + +# +# library name +# +LIB = librte_mempool_octeontx.a + +CFLAGS += $(WERROR_FLAGS) +EXPORT_MAP := rte_mempool_octeontx_version.map + +LIBABIVER := 1 + +# +# all source are stored in SRCS-y +# +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_ssovf.c +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_mbox.c +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_fpavf.c +SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += rte_mempool_octeontx.c + +ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y) +CFLAGS_rte_mempool_octeontx.o += -fno-prefetch-loop-arrays + +ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1) +CFLAGS_rte_mempool_octeontx.o += -Ofast +else +CFLAGS_rte_mempool_octeontx.o += -O3 -ffast-math +endif + +else +CFLAGS_rte_mempool_octeontx.o += -Ofast +endif + +LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf +LDLIBS += -lrte_bus_pci + +include $(RTE_SDK)/mk/rte.lib.mk diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c new file mode 100644 index 00000000..3bc50f35 --- /dev/null +++ b/drivers/mempool/octeontx/octeontx_fpavf.c @@ -0,0 +1,830 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium Inc. 2017. All Right reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <stdlib.h> +#include <string.h> +#include <stdbool.h> +#include <stdio.h> +#include <unistd.h> +#include <fcntl.h> +#include <errno.h> +#include <sys/mman.h> + +#include <rte_atomic.h> +#include <rte_eal.h> +#include <rte_bus_pci.h> +#include <rte_errno.h> +#include <rte_memory.h> +#include <rte_malloc.h> +#include <rte_spinlock.h> +#include <rte_mbuf.h> + +#include "octeontx_mbox.h" +#include "octeontx_fpavf.h" + +/* FPA Mbox Message */ +#define IDENTIFY 0x0 + +#define FPA_CONFIGSET 0x1 +#define FPA_CONFIGGET 0x2 +#define FPA_START_COUNT 0x3 +#define FPA_STOP_COUNT 0x4 +#define FPA_ATTACHAURA 0x5 +#define FPA_DETACHAURA 0x6 +#define FPA_SETAURALVL 0x7 +#define FPA_GETAURALVL 0x8 + +#define FPA_COPROC 0x1 + +/* fpa mbox struct */ +struct octeontx_mbox_fpa_cfg { + int aid; + uint64_t pool_cfg; + uint64_t pool_stack_base; + uint64_t pool_stack_end; + uint64_t aura_cfg; +}; + +struct __attribute__((__packed__)) gen_req { + uint32_t value; +}; + +struct __attribute__((__packed__)) idn_req { + uint8_t domain_id; +}; + +struct __attribute__((__packed__)) gen_resp { + uint16_t domain_id; + uint16_t vfid; +}; + +struct __attribute__((__packed__)) dcfg_resp { + uint8_t sso_count; + uint8_t ssow_count; + uint8_t fpa_count; + uint8_t pko_count; + uint8_t tim_count; + uint8_t net_port_count; + uint8_t virt_port_count; +}; + +#define FPA_MAX_POOL 32 +#define FPA_PF_PAGE_SZ 4096 + +#define FPA_LN_SIZE 128 +#define FPA_ROUND_UP(x, size) \ + ((((unsigned long)(x)) + size-1) & (~(size-1))) +#define FPA_OBJSZ_2_CACHE_LINE(sz) (((sz) + RTE_CACHE_LINE_MASK) >> 7) +#define FPA_CACHE_LINE_2_OBJSZ(sz) ((sz) << 7) + +#define POOL_ENA (0x1 << 0) +#define POOL_DIS (0x0 << 0) +#define POOL_SET_NAT_ALIGN (0x1 << 1) +#define POOL_DIS_NAT_ALIGN (0x0 << 1) +#define POOL_STYPE(x) (((x) & 0x1) << 2) +#define POOL_LTYPE(x) (((x) & 0x3) << 3) +#define POOL_BUF_OFFSET(x) (((x) & 0x7fffULL) << 16) +#define POOL_BUF_SIZE(x) (((x) & 0x7ffULL) << 32) + +struct fpavf_res { + void *pool_stack_base; + void *bar0; + uint64_t stack_ln_ptr; + uint16_t domain_id; + uint16_t vf_id; /* gpool_id */ + uint16_t sz128; /* Block size in cache lines */ + bool is_inuse; +}; + +struct octeontx_fpadev { + rte_spinlock_t lock; + uint8_t total_gpool_cnt; + struct fpavf_res pool[FPA_VF_MAX]; +}; + +static struct octeontx_fpadev fpadev; + +/* lock is taken by caller */ +static int +octeontx_fpa_gpool_alloc(unsigned int object_size) +{ + struct fpavf_res *res = NULL; + uint16_t gpool; + unsigned int sz128; + + sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size); + + for (gpool = 0; gpool < FPA_VF_MAX; gpool++) { + + /* Skip VF that is not mapped Or _inuse */ + if ((fpadev.pool[gpool].bar0 == NULL) || + (fpadev.pool[gpool].is_inuse == true)) + continue; + + res = &fpadev.pool[gpool]; + + RTE_ASSERT(res->domain_id != (uint16_t)~0); + RTE_ASSERT(res->vf_id != (uint16_t)~0); + RTE_ASSERT(res->stack_ln_ptr != 0); + + if (res->sz128 == 0) { + res->sz128 = sz128; + + fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128); + return gpool; + } + } + + return -ENOSPC; +} + +/* lock is taken by caller */ +static __rte_always_inline uintptr_t +octeontx_fpa_gpool2handle(uint16_t gpool) +{ + struct fpavf_res *res = NULL; + + RTE_ASSERT(gpool < FPA_VF_MAX); + + res = &fpadev.pool[gpool]; + return (uintptr_t)res->bar0 | gpool; +} + +static __rte_always_inline bool +octeontx_fpa_handle_valid(uintptr_t handle) +{ + struct fpavf_res *res = NULL; + uint8_t gpool; + int i; + bool ret = false; + + if (unlikely(!handle)) + return ret; + + /* get the gpool */ + gpool = octeontx_fpa_bufpool_gpool(handle); + + /* get the bar address */ + handle &= ~(uint64_t)FPA_GPOOL_MASK; + for (i = 0; i < FPA_VF_MAX; i++) { + if ((uintptr_t)fpadev.pool[i].bar0 != handle) + continue; + + /* validate gpool */ + if (gpool != i) + return false; + + res = &fpadev.pool[i]; + + if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 || + res->stack_ln_ptr == 0) + ret = false; + else + ret = true; + break; + } + + return ret; +} + +static int +octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size, + signed short buf_offset, unsigned int max_buf_count) +{ + void *memptr = NULL; + rte_iova_t phys_addr; + unsigned int memsz; + struct fpavf_res *fpa = NULL; + uint64_t reg; + struct octeontx_mbox_hdr hdr; + struct dcfg_resp resp; + struct octeontx_mbox_fpa_cfg cfg; + int ret = -1; + + fpa = &fpadev.pool[gpool]; + memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) * + FPA_LN_SIZE; + + /* Round-up to page size */ + memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1); + memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE); + if (memptr == NULL) { + ret = -ENOMEM; + goto err; + } + + /* Configure stack */ + fpa->pool_stack_base = memptr; + phys_addr = rte_malloc_virt2iova(memptr); + + buf_size /= FPA_LN_SIZE; + + /* POOL setup */ + hdr.coproc = FPA_COPROC; + hdr.msg = FPA_CONFIGSET; + hdr.vfid = fpa->vf_id; + hdr.res_code = 0; + + buf_offset /= FPA_LN_SIZE; + reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) | + POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN | + POOL_ENA; + + cfg.aid = 0; + cfg.pool_cfg = reg; + cfg.pool_stack_base = phys_addr; + cfg.pool_stack_end = phys_addr + memsz; + cfg.aura_cfg = (1 << 9); + + ret = octeontx_ssovf_mbox_send(&hdr, &cfg, + sizeof(struct octeontx_mbox_fpa_cfg), + &resp, sizeof(resp)); + if (ret < 0) { + ret = -EACCES; + goto err; + } + + fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n", + fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg, + cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg); + + /* Now pool is in_use */ + fpa->is_inuse = true; + +err: + if (ret < 0) + rte_free(memptr); + + return ret; +} + +static int +octeontx_fpapf_pool_destroy(unsigned int gpool_index) +{ + struct octeontx_mbox_hdr hdr; + struct dcfg_resp resp; + struct octeontx_mbox_fpa_cfg cfg; + struct fpavf_res *fpa = NULL; + int ret = -1; + + fpa = &fpadev.pool[gpool_index]; + + hdr.coproc = FPA_COPROC; + hdr.msg = FPA_CONFIGSET; + hdr.vfid = fpa->vf_id; + hdr.res_code = 0; + + /* reset and free the pool */ + cfg.aid = 0; + cfg.pool_cfg = 0; + cfg.pool_stack_base = 0; + cfg.pool_stack_end = 0; + cfg.aura_cfg = 0; + + ret = octeontx_ssovf_mbox_send(&hdr, &cfg, + sizeof(struct octeontx_mbox_fpa_cfg), + &resp, sizeof(resp)); + if (ret < 0) { + ret = -EACCES; + goto err; + } + + ret = 0; +err: + /* anycase free pool stack memory */ + rte_free(fpa->pool_stack_base); + fpa->pool_stack_base = NULL; + return ret; +} + +static int +octeontx_fpapf_aura_attach(unsigned int gpool_index) +{ + struct octeontx_mbox_hdr hdr; + struct dcfg_resp resp; + struct octeontx_mbox_fpa_cfg cfg; + int ret = 0; + + if (gpool_index >= FPA_MAX_POOL) { + ret = -EINVAL; + goto err; + } + hdr.coproc = FPA_COPROC; + hdr.msg = FPA_ATTACHAURA; + hdr.vfid = gpool_index; + hdr.res_code = 0; + memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg)); + cfg.aid = gpool_index; /* gpool is guara */ + + ret = octeontx_ssovf_mbox_send(&hdr, &cfg, + sizeof(struct octeontx_mbox_fpa_cfg), + &resp, sizeof(resp)); + if (ret < 0) { + fpavf_log_err("Could not attach fpa "); + fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n", + gpool_index, gpool_index, ret, hdr.res_code); + ret = -EACCES; + goto err; + } +err: + return ret; +} + +static int +octeontx_fpapf_aura_detach(unsigned int gpool_index) +{ + struct octeontx_mbox_fpa_cfg cfg = {0}; + struct octeontx_mbox_hdr hdr = {0}; + int ret = 0; + + if (gpool_index >= FPA_MAX_POOL) { + ret = -EINVAL; + goto err; + } + + cfg.aid = gpool_index; /* gpool is gaura */ + hdr.coproc = FPA_COPROC; + hdr.msg = FPA_DETACHAURA; + hdr.vfid = gpool_index; + ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0); + if (ret < 0) { + fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n", + gpool_index, ret, hdr.res_code); + ret = -EINVAL; + } + +err: + return ret; +} + +static int +octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz, + void *memva, uint16_t gpool) +{ + uint64_t va_end; + + if (unlikely(!handle)) + return -ENODEV; + + va_end = (uintptr_t)memva + memsz; + va_end &= ~RTE_CACHE_LINE_MASK; + + /* VHPOOL setup */ + fpavf_write64((uintptr_t)memva, + (void *)((uintptr_t)handle + + FPA_VF_VHPOOL_START_ADDR(gpool))); + fpavf_write64(va_end, + (void *)((uintptr_t)handle + + FPA_VF_VHPOOL_END_ADDR(gpool))); + return 0; +} + +static int +octeontx_fpapf_start_count(uint16_t gpool_index) +{ + int ret = 0; + struct octeontx_mbox_hdr hdr = {0}; + + if (gpool_index >= FPA_MAX_POOL) { + ret = -EINVAL; + goto err; + } + + hdr.coproc = FPA_COPROC; + hdr.msg = FPA_START_COUNT; + hdr.vfid = gpool_index; + ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0); + if (ret < 0) { + fpavf_log_err("Could not start buffer counting for "); + fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n", + gpool_index, ret, hdr.res_code); + ret = -EINVAL; + goto err; + } + +err: + return ret; +} + +static __rte_always_inline int +octeontx_fpavf_free(unsigned int gpool) +{ + int ret = 0; + + if (gpool >= FPA_MAX_POOL) { + ret = -EINVAL; + goto err; + } + + /* Pool is free */ + fpadev.pool[gpool].is_inuse = false; + +err: + return ret; +} + +static __rte_always_inline int +octeontx_gpool_free(uint16_t gpool) +{ + if (fpadev.pool[gpool].sz128 != 0) { + fpadev.pool[gpool].sz128 = 0; + return 0; + } + return -EINVAL; +} + +/* + * Return buffer size for a given pool + */ +int +octeontx_fpa_bufpool_block_size(uintptr_t handle) +{ + struct fpavf_res *res = NULL; + uint8_t gpool; + + if (unlikely(!octeontx_fpa_handle_valid(handle))) + return -EINVAL; + + /* get the gpool */ + gpool = octeontx_fpa_bufpool_gpool(handle); + res = &fpadev.pool[gpool]; + return FPA_CACHE_LINE_2_OBJSZ(res->sz128); +} + +int +octeontx_fpa_bufpool_free_count(uintptr_t handle) +{ + uint64_t cnt, limit, avail; + uint8_t gpool; + uintptr_t pool_bar; + + if (unlikely(!octeontx_fpa_handle_valid(handle))) + return -EINVAL; + + /* get the gpool */ + gpool = octeontx_fpa_bufpool_gpool(handle); + + /* Get pool bar address from handle */ + pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; + + cnt = fpavf_read64((void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT(gpool))); + limit = fpavf_read64((void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT_LIMIT(gpool))); + + avail = fpavf_read64((void *)((uintptr_t)pool_bar + + FPA_VF_VHPOOL_AVAILABLE(gpool))); + + return RTE_MIN(avail, (limit - cnt)); +} + +uintptr_t +octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, + unsigned int buf_offset, char **va_start, + int node_id) +{ + unsigned int gpool; + void *memva; + unsigned long memsz; + uintptr_t gpool_handle; + uintptr_t pool_bar; + int res; + + RTE_SET_USED(node_id); + RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET); + + if (unlikely(*va_start == NULL)) + goto error_end; + + object_size = RTE_CACHE_LINE_ROUNDUP(object_size); + if (object_size > FPA_MAX_OBJ_SIZE) { + errno = EINVAL; + goto error_end; + } + + rte_spinlock_lock(&fpadev.lock); + res = octeontx_fpa_gpool_alloc(object_size); + + /* Bail if failed */ + if (unlikely(res < 0)) { + errno = res; + goto error_unlock; + } + + /* get fpavf */ + gpool = res; + + /* get pool handle */ + gpool_handle = octeontx_fpa_gpool2handle(gpool); + if (!octeontx_fpa_handle_valid(gpool_handle)) { + errno = ENOSPC; + goto error_gpool_free; + } + + /* Get pool bar address from handle */ + pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK; + + res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset, + object_count); + if (res < 0) { + errno = res; + goto error_gpool_free; + } + + /* populate AURA fields */ + res = octeontx_fpapf_aura_attach(gpool); + if (res < 0) { + errno = res; + goto error_pool_destroy; + } + + /* vf pool setup */ + memsz = object_size * object_count; + memva = *va_start; + res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool); + if (res < 0) { + errno = res; + goto error_gaura_detach; + } + + /* Release lock */ + rte_spinlock_unlock(&fpadev.lock); + + /* populate AURA registers */ + fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT(gpool))); + fpavf_write64(object_count, (void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT_LIMIT(gpool))); + fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); + + octeontx_fpapf_start_count(gpool); + + return gpool_handle; + +error_gaura_detach: + (void) octeontx_fpapf_aura_detach(gpool); +error_pool_destroy: + octeontx_fpavf_free(gpool); + octeontx_fpapf_pool_destroy(gpool); +error_gpool_free: + octeontx_gpool_free(gpool); +error_unlock: + rte_spinlock_unlock(&fpadev.lock); +error_end: + return (uintptr_t)NULL; +} + +/* + * Destroy a buffer pool. + */ +int +octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id) +{ + void **node, **curr, *head = NULL; + uint64_t sz; + uint64_t cnt, avail; + uint8_t gpool; + uintptr_t pool_bar; + int ret; + + RTE_SET_USED(node_id); + + /* Wait for all outstanding writes to be committed */ + rte_smp_wmb(); + + if (unlikely(!octeontx_fpa_handle_valid(handle))) + return -EINVAL; + + /* get the pool */ + gpool = octeontx_fpa_bufpool_gpool(handle); + + /* Get pool bar address from handle */ + pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK; + + /* Check for no outstanding buffers */ + cnt = fpavf_read64((void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT(gpool))); + if (cnt) { + fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt); + return -EBUSY; + } + + rte_spinlock_lock(&fpadev.lock); + + avail = fpavf_read64((void *)((uintptr_t)pool_bar + + FPA_VF_VHPOOL_AVAILABLE(gpool))); + + /* Prepare to empty the entire POOL */ + fpavf_write64(avail, (void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT_LIMIT(gpool))); + fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); + + /* Empty the pool */ + /* Invalidate the POOL */ + octeontx_gpool_free(gpool); + + /* Process all buffers in the pool */ + while (avail--) { + + /* Yank a buffer from the pool */ + node = (void *)(uintptr_t) + fpavf_read64((void *) + (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool))); + + if (node == NULL) { + fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n", + gpool, avail); + break; + } + + /* Imsert it into an ordered linked list */ + for (curr = &head; curr[0] != NULL; curr = curr[0]) { + if ((uintptr_t)node <= (uintptr_t)curr[0]) + break; + } + node[0] = curr[0]; + curr[0] = node; + } + + /* Verify the linked list to be a perfect series */ + sz = octeontx_fpa_bufpool_block_size(handle) << 7; + for (curr = head; curr != NULL && curr[0] != NULL; + curr = curr[0]) { + if (curr == curr[0] || + ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) { + fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n", + gpool, curr, curr[0]); + } + } + + /* Disable pool operation */ + fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar + + FPA_VF_VHPOOL_START_ADDR(gpool))); + fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar + + FPA_VF_VHPOOL_END_ADDR(gpool))); + + (void)octeontx_fpapf_pool_destroy(gpool); + + /* Deactivate the AURA */ + fpavf_write64(0, (void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT_LIMIT(gpool))); + fpavf_write64(0, (void *)((uintptr_t)pool_bar + + FPA_VF_VHAURA_CNT_THRESHOLD(gpool))); + + ret = octeontx_fpapf_aura_detach(gpool); + if (ret) { + fpavf_log_err("Failed to dettach gaura %u. error code=%d\n", + gpool, ret); + } + + /* Free VF */ + (void)octeontx_fpavf_free(gpool); + + rte_spinlock_unlock(&fpadev.lock); + return 0; +} + +static void +octeontx_fpavf_setup(void) +{ + uint8_t i; + static bool init_once; + + if (!init_once) { + rte_spinlock_init(&fpadev.lock); + fpadev.total_gpool_cnt = 0; + + for (i = 0; i < FPA_VF_MAX; i++) { + + fpadev.pool[i].domain_id = ~0; + fpadev.pool[i].stack_ln_ptr = 0; + fpadev.pool[i].sz128 = 0; + fpadev.pool[i].bar0 = NULL; + fpadev.pool[i].pool_stack_base = NULL; + fpadev.pool[i].is_inuse = false; + } + init_once = 1; + } +} + +static int +octeontx_fpavf_identify(void *bar0) +{ + uint64_t val; + uint16_t domain_id; + uint16_t vf_id; + uint64_t stack_ln_ptr; + + val = fpavf_read64((void *)((uintptr_t)bar0 + + FPA_VF_VHAURA_CNT_THRESHOLD(0))); + + domain_id = (val >> 8) & 0xffff; + vf_id = (val >> 24) & 0xffff; + + stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 + + FPA_VF_VHPOOL_THRESHOLD(0))); + if (vf_id >= FPA_VF_MAX) { + fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id); + return -1; + } + + if (fpadev.pool[vf_id].is_inuse) { + fpavf_log_err("vf_id %d is_inuse\n", vf_id); + return -1; + } + + fpadev.pool[vf_id].domain_id = domain_id; + fpadev.pool[vf_id].vf_id = vf_id; + fpadev.pool[vf_id].bar0 = bar0; + fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr; + + /* SUCCESS */ + return vf_id; +} + +/* FPAVF pcie device aka mempool probe */ +static int +fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + uint8_t *idreg; + int res; + struct fpavf_res *fpa = NULL; + + RTE_SET_USED(pci_drv); + RTE_SET_USED(fpa); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL) { + fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr); + return -ENODEV; + } + idreg = pci_dev->mem_resource[0].addr; + + octeontx_fpavf_setup(); + + res = octeontx_fpavf_identify(idreg); + if (res < 0) + return -1; + + fpa = &fpadev.pool[res]; + fpadev.total_gpool_cnt++; + rte_wmb(); + + fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x", + fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id, + fpa->vf_id, (unsigned int)fpa->stack_ln_ptr); + + return 0; +} + +static const struct rte_pci_id pci_fpavf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_FPA_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_fpavf = { + .id_table = pci_fpavf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA, + .probe = fpavf_probe, +}; + +RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf); diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h new file mode 100644 index 00000000..1d09f007 --- /dev/null +++ b/drivers/mempool/octeontx/octeontx_fpavf.h @@ -0,0 +1,131 @@ +/* + * BSD LICENSE + * + * Copyright (C) 2017 Cavium Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __OCTEONTX_FPAVF_H__ +#define __OCTEONTX_FPAVF_H__ + +#include <rte_io.h> +#include "octeontx_pool_logs.h" + +/* fpa pool Vendor ID and Device ID */ +#define PCI_VENDOR_ID_CAVIUM 0x177D +#define PCI_DEVICE_ID_OCTEONTX_FPA_VF 0xA053 + +#define FPA_VF_MAX 32 +#define FPA_GPOOL_MASK (FPA_VF_MAX-1) + +/* FPA VF register offsets */ +#define FPA_VF_INT(x) (0x200ULL | ((x) << 22)) +#define FPA_VF_INT_W1S(x) (0x210ULL | ((x) << 22)) +#define FPA_VF_INT_ENA_W1S(x) (0x220ULL | ((x) << 22)) +#define FPA_VF_INT_ENA_W1C(x) (0x230ULL | ((x) << 22)) + +#define FPA_VF_VHPOOL_AVAILABLE(vhpool) (0x04150 | ((vhpool)&0x0)) +#define FPA_VF_VHPOOL_THRESHOLD(vhpool) (0x04160 | ((vhpool)&0x0)) +#define FPA_VF_VHPOOL_START_ADDR(vhpool) (0x04200 | ((vhpool)&0x0)) +#define FPA_VF_VHPOOL_END_ADDR(vhpool) (0x04210 | ((vhpool)&0x0)) + +#define FPA_VF_VHAURA_CNT(vaura) (0x20120 | ((vaura)&0xf)<<18) +#define FPA_VF_VHAURA_CNT_ADD(vaura) (0x20128 | ((vaura)&0xf)<<18) +#define FPA_VF_VHAURA_CNT_LIMIT(vaura) (0x20130 | ((vaura)&0xf)<<18) +#define FPA_VF_VHAURA_CNT_THRESHOLD(vaura) (0x20140 | ((vaura)&0xf)<<18) +#define FPA_VF_VHAURA_OP_ALLOC(vaura) (0x30000 | ((vaura)&0xf)<<18) +#define FPA_VF_VHAURA_OP_FREE(vaura) (0x38000 | ((vaura)&0xf)<<18) + +#define FPA_VF_FREE_ADDRS_S(x, y, z) \ + ((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14)) + +/* FPA VF register offsets from VF_BAR4, size 2 MByte */ +#define FPA_VF_MSIX_VEC_ADDR 0x00000 +#define FPA_VF_MSIX_VEC_CTL 0x00008 +#define FPA_VF_MSIX_PBA 0xF0000 + +#define FPA_VF0_APERTURE_SHIFT 22 +#define FPA_AURA_SET_SIZE 16 + +#define FPA_MAX_OBJ_SIZE (128 * 1024) +#define OCTEONTX_FPAVF_BUF_OFFSET 128 + +/* + * In Cavium OcteonTX SoC, all accesses to the device registers are + * implicitly strongly ordered. So, the relaxed version of IO operation is + * safe to use with out any IO memory barriers. + */ +#define fpavf_read64 rte_read64_relaxed +#define fpavf_write64 rte_write64_relaxed + +/* ARM64 specific functions */ +#if defined(RTE_ARCH_ARM64) +#define fpavf_load_pair(val0, val1, addr) ({ \ + asm volatile( \ + "ldp %x[x0], %x[x1], [%x[p1]]" \ + :[x0]"=r"(val0), [x1]"=r"(val1) \ + :[p1]"r"(addr) \ + ); }) + +#define fpavf_store_pair(val0, val1, addr) ({ \ + asm volatile( \ + "stp %x[x0], %x[x1], [%x[p1]]" \ + ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \ + ); }) +#else /* Un optimized functions for building on non arm64 arch */ + +#define fpavf_load_pair(val0, val1, addr) \ +do { \ + val0 = rte_read64(addr); \ + val1 = rte_read64(((uint8_t *)addr) + 8); \ +} while (0) + +#define fpavf_store_pair(val0, val1, addr) \ +do { \ + rte_write64(val0, addr); \ + rte_write64(val1, (((uint8_t *)addr) + 8)); \ +} while (0) +#endif + +uintptr_t +octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count, + unsigned int buf_offset, char **va_start, + int node); +int +octeontx_fpa_bufpool_destroy(uintptr_t handle, int node); +int +octeontx_fpa_bufpool_block_size(uintptr_t handle); +int +octeontx_fpa_bufpool_free_count(uintptr_t handle); + +static __rte_always_inline uint8_t +octeontx_fpa_bufpool_gpool(uintptr_t handle) +{ + return (uint8_t)handle & FPA_GPOOL_MASK; +} +#endif /* __OCTEONTX_FPAVF_H__ */ diff --git a/drivers/mempool/octeontx/octeontx_mbox.c b/drivers/mempool/octeontx/octeontx_mbox.c new file mode 100644 index 00000000..9525da1a --- /dev/null +++ b/drivers/mempool/octeontx/octeontx_mbox.c @@ -0,0 +1,242 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium, Inc. 2017. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium, Inc nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <string.h> + +#include <rte_atomic.h> +#include <rte_common.h> +#include <rte_cycles.h> +#include <rte_io.h> +#include <rte_spinlock.h> + +#include "octeontx_mbox.h" +#include "octeontx_pool_logs.h" + +/* Mbox operation timeout in seconds */ +#define MBOX_WAIT_TIME_SEC 3 +#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */) + +/* Mbox channel state */ +enum { + MBOX_CHAN_STATE_REQ = 1, + MBOX_CHAN_STATE_RES = 0, +}; + +/* Response messages */ +enum { + MBOX_RET_SUCCESS, + MBOX_RET_INVALID, + MBOX_RET_INTERNAL_ERR, +}; + +struct mbox { + int init_once; + uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */ + uint8_t *reg; /* Store to this register triggers PF mbox interrupt */ + uint16_t tag_own; /* Last tag which was written to own channel */ + rte_spinlock_t lock; +}; + +static struct mbox octeontx_mbox; + +/* + * Structure used for mbox synchronization + * This structure sits at the begin of Mbox RAM and used as main + * synchronization point for channel communication + */ +struct mbox_ram_hdr { + union { + uint64_t u64; + struct { + uint8_t chan_state : 1; + uint8_t coproc : 7; + uint8_t msg; + uint8_t vfid; + uint8_t res_code; + uint16_t tag; + uint16_t len; + }; + }; +}; + +static inline void +mbox_msgcpy(uint8_t *d, const uint8_t *s, uint16_t size) +{ + uint16_t i; + + for (i = 0; i < size; i++) + d[i] = s[i]; +} + +static inline void +mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr, + const void *txmsg, uint16_t txsize) +{ + struct mbox_ram_hdr old_hdr; + struct mbox_ram_hdr new_hdr = { {0} }; + uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; + uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); + + /* + * Initialize the channel with the tag left by last send. + * On success full mbox send complete, PF increments the tag by one. + * The sender can validate integrity of PF message with this scheme + */ + old_hdr.u64 = rte_read64(ram_mbox_hdr); + m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */ + + /* Copy msg body */ + if (txmsg) + mbox_msgcpy(ram_mbox_msg, txmsg, txsize); + + /* Prepare new hdr */ + new_hdr.chan_state = MBOX_CHAN_STATE_REQ; + new_hdr.coproc = hdr->coproc; + new_hdr.msg = hdr->msg; + new_hdr.vfid = hdr->vfid; + new_hdr.tag = m->tag_own; + new_hdr.len = txsize; + + /* Write the msg header */ + rte_write64(new_hdr.u64, ram_mbox_hdr); + rte_io_wmb(); + /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */ + rte_write64(0, m->reg); +} + +static inline int +mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr, + void *rxmsg, uint16_t rxsize) +{ + int res = 0, wait; + uint16_t len; + struct mbox_ram_hdr rx_hdr; + uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base; + uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr); + + /* Wait for response */ + wait = MBOX_WAIT_TIME_SEC * 1000 * 10; + while (wait > 0) { + rte_delay_us(100); + rx_hdr.u64 = rte_read64(ram_mbox_hdr); + if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES) + break; + --wait; + } + + hdr->res_code = rx_hdr.res_code; + m->tag_own++; + + /* Timeout */ + if (wait <= 0) { + res = -ETIMEDOUT; + goto error; + } + + /* Tag mismatch */ + if (m->tag_own != rx_hdr.tag) { + res = -EINVAL; + goto error; + } + + /* PF nacked the msg */ + if (rx_hdr.res_code != MBOX_RET_SUCCESS) { + res = -EBADMSG; + goto error; + } + + len = RTE_MIN(rx_hdr.len, rxsize); + if (rxmsg) + mbox_msgcpy(rxmsg, ram_mbox_msg, len); + + return len; + +error: + mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)", + m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res, + hdr->res_code); + return res; +} + +static inline int +mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg, + uint16_t txsize, void *rxmsg, uint16_t rxsize) +{ + int res = -EINVAL; + + if (m->init_once == 0 || hdr == NULL || + txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) { + mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d", + m->init_once, hdr, txsize, rxsize); + return res; + } + + rte_spinlock_lock(&m->lock); + + mbox_send_request(m, hdr, txmsg, txsize); + res = mbox_wait_response(m, hdr, rxmsg, rxsize); + + rte_spinlock_unlock(&m->lock); + return res; +} + +static inline int +mbox_setup(struct mbox *m) +{ + if (unlikely(m->init_once == 0)) { + rte_spinlock_init(&m->lock); + m->ram_mbox_base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, 0, 4); + m->reg = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0); + m->reg += SSO_VHGRP_PF_MBOX(1); + + if (m->ram_mbox_base == NULL || m->reg == NULL) { + mbox_log_err("Invalid ram_mbox_base=%p or reg=%p", + m->ram_mbox_base, m->reg); + return -EINVAL; + } + m->init_once = 1; + } + return 0; +} + +int +octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata, + uint16_t txlen, void *rxdata, uint16_t rxlen) +{ + struct mbox *m = &octeontx_mbox; + + RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8); + if (rte_eal_process_type() != RTE_PROC_PRIMARY || mbox_setup(m)) + return -EINVAL; + + return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen); +} diff --git a/drivers/mempool/octeontx/octeontx_mbox.h b/drivers/mempool/octeontx/octeontx_mbox.h new file mode 100644 index 00000000..49f38257 --- /dev/null +++ b/drivers/mempool/octeontx/octeontx_mbox.h @@ -0,0 +1,64 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium, Inc. 2017. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium, Inc nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __OCTEONTX_MBOX_H__ +#define __OCTEONTX_MBOX_H__ + +#include <rte_common.h> + +#define SSOW_BAR4_LEN (64 * 1024) +#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3)) + +struct octeontx_ssovf_info { + uint16_t domain; /* Domain id */ + uint8_t total_ssovfs; /* Total sso groups available in domain */ + uint8_t total_ssowvfs;/* Total sso hws available in domain */ +}; + +enum octeontx_ssovf_type { + OCTEONTX_SSO_GROUP, /* SSO group vf */ + OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */ +}; + +struct octeontx_mbox_hdr { + uint16_t vfid; /* VF index or pf resource index local to the domain */ + uint8_t coproc; /* Coprocessor id */ + uint8_t msg; /* Message id */ + uint8_t res_code; /* Functional layer response code */ +}; + +int octeontx_ssovf_info(struct octeontx_ssovf_info *info); +void *octeontx_ssovf_bar(enum octeontx_ssovf_type, uint8_t id, uint8_t bar); +int octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, + void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen); + +#endif /* __OCTEONTX_MBOX_H__ */ diff --git a/drivers/mempool/octeontx/octeontx_pool_logs.h b/drivers/mempool/octeontx/octeontx_pool_logs.h new file mode 100644 index 00000000..58ccb0f0 --- /dev/null +++ b/drivers/mempool/octeontx/octeontx_pool_logs.h @@ -0,0 +1,68 @@ +/* + * BSD LICENSE + * + * Copyright (C) 2017 Cavium Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium networks nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __OCTEONTX_POOL_LOGS_H__ +#define __OCTEONTX_POOL_LOGS_H__ + +#include <rte_debug.h> + +#ifdef RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG +#define fpavf_log_info(fmt, args...) \ + RTE_LOG(INFO, PMD, "%s() line %u: " fmt "\n", \ + __func__, __LINE__, ## args) +#define fpavf_log_dbg(fmt, args...) \ + RTE_LOG(DEBUG, PMD, "%s() line %u: " fmt "\n", \ + __func__, __LINE__, ## args) + +#define mbox_log_info(fmt, args...) \ + RTE_LOG(INFO, PMD, "%s() line %u: " fmt "\n", \ + __func__, __LINE__, ## args) +#define mbox_log_dbg(fmt, args...) \ + RTE_LOG(DEBUG, PMD, "%s() line %u: " fmt "\n", \ + __func__, __LINE__, ## args) +#else +#define fpavf_log_info(fmt, args...) +#define fpavf_log_dbg(fmt, args...) +#define mbox_log_info(fmt, args...) +#define mbox_log_dbg(fmt, args...) +#endif + +#define fpavf_func_trace fpavf_log_dbg +#define fpavf_log_err(fmt, args...) \ + RTE_LOG(ERR, PMD, "%s() line %u: " fmt "\n", \ + __func__, __LINE__, ## args) +#define mbox_func_trace mbox_log_dbg +#define mbox_log_err(fmt, args...) \ + RTE_LOG(ERR, PMD, "%s() line %u: " fmt "\n", \ + __func__, __LINE__, ## args) + +#endif /* __OCTEONTX_POOL_LOGS_H__*/ diff --git a/drivers/mempool/octeontx/octeontx_ssovf.c b/drivers/mempool/octeontx/octeontx_ssovf.c new file mode 100644 index 00000000..012c887d --- /dev/null +++ b/drivers/mempool/octeontx/octeontx_ssovf.c @@ -0,0 +1,299 @@ +/* + * BSD LICENSE + * + * Copyright (C) Cavium, Inc. 2017. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Cavium, Inc nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <rte_atomic.h> +#include <rte_common.h> +#include <rte_eal.h> +#include <rte_io.h> +#include <rte_pci.h> +#include <rte_bus_pci.h> + +#include "octeontx_mbox.h" +#include "octeontx_pool_logs.h" + +#define PCI_VENDOR_ID_CAVIUM 0x177D +#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B +#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D + +#define SSO_MAX_VHGRP (64) +#define SSO_MAX_VHWS (32) + +#define SSO_VHGRP_AQ_THR (0x1E0ULL) + +struct ssovf_res { + uint16_t domain; + uint16_t vfid; + void *bar0; + void *bar2; +}; + +struct ssowvf_res { + uint16_t domain; + uint16_t vfid; + void *bar0; + void *bar2; + void *bar4; +}; + +struct ssowvf_identify { + uint16_t domain; + uint16_t vfid; +}; + +struct ssodev { + uint8_t total_ssovfs; + uint8_t total_ssowvfs; + struct ssovf_res grp[SSO_MAX_VHGRP]; + struct ssowvf_res hws[SSO_MAX_VHWS]; +}; + +static struct ssodev sdev; + +/* Interface functions */ +int +octeontx_ssovf_info(struct octeontx_ssovf_info *info) +{ + uint8_t i; + uint16_t domain; + + if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL) + return -EINVAL; + + if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0) + return -ENODEV; + + domain = sdev.grp[0].domain; + for (i = 0; i < sdev.total_ssovfs; i++) { + /* Check vfid's are contiguous and belong to same domain */ + if (sdev.grp[i].vfid != i || + sdev.grp[i].bar0 == NULL || + sdev.grp[i].domain != domain) { + mbox_log_err("GRP error, vfid=%d/%d domain=%d/%d %p", + i, sdev.grp[i].vfid, + domain, sdev.grp[i].domain, + sdev.grp[i].bar0); + return -EINVAL; + } + } + + for (i = 0; i < sdev.total_ssowvfs; i++) { + /* Check vfid's are contiguous and belong to same domain */ + if (sdev.hws[i].vfid != i || + sdev.hws[i].bar0 == NULL || + sdev.hws[i].domain != domain) { + mbox_log_err("HWS error, vfid=%d/%d domain=%d/%d %p", + i, sdev.hws[i].vfid, + domain, sdev.hws[i].domain, + sdev.hws[i].bar0); + return -EINVAL; + } + } + + info->domain = domain; + info->total_ssovfs = sdev.total_ssovfs; + info->total_ssowvfs = sdev.total_ssowvfs; + return 0; +} + +void* +octeontx_ssovf_bar(enum octeontx_ssovf_type type, uint8_t id, uint8_t bar) +{ + if (rte_eal_process_type() != RTE_PROC_PRIMARY || + type > OCTEONTX_SSO_HWS) + return NULL; + + if (type == OCTEONTX_SSO_GROUP) { + if (id >= sdev.total_ssovfs) + return NULL; + } else { + if (id >= sdev.total_ssowvfs) + return NULL; + } + + if (type == OCTEONTX_SSO_GROUP) { + switch (bar) { + case 0: + return sdev.grp[id].bar0; + case 2: + return sdev.grp[id].bar2; + default: + return NULL; + } + } else { + switch (bar) { + case 0: + return sdev.hws[id].bar0; + case 2: + return sdev.hws[id].bar2; + case 4: + return sdev.hws[id].bar4; + default: + return NULL; + } + } +} + +/* SSOWVF pcie device aka event port probe */ + +static int +ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + uint16_t vfid; + struct ssowvf_res *res; + struct ssowvf_identify *id; + + RTE_SET_USED(pci_drv); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL || + pci_dev->mem_resource[2].addr == NULL || + pci_dev->mem_resource[4].addr == NULL) { + mbox_log_err("Empty bars %p %p %p", + pci_dev->mem_resource[0].addr, + pci_dev->mem_resource[2].addr, + pci_dev->mem_resource[4].addr); + return -ENODEV; + } + + if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) { + mbox_log_err("Bar4 len mismatch %d != %d", + SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len); + return -EINVAL; + } + + id = pci_dev->mem_resource[4].addr; + vfid = id->vfid; + if (vfid >= SSO_MAX_VHWS) { + mbox_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS); + return -EINVAL; + } + + res = &sdev.hws[vfid]; + res->vfid = vfid; + res->bar0 = pci_dev->mem_resource[0].addr; + res->bar2 = pci_dev->mem_resource[2].addr; + res->bar4 = pci_dev->mem_resource[4].addr; + res->domain = id->domain; + + sdev.total_ssowvfs++; + rte_wmb(); + mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain, + res->vfid, sdev.total_ssowvfs); + return 0; +} + +static const struct rte_pci_id pci_ssowvf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_SSOWS_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_ssowvf = { + .id_table = pci_ssowvf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = ssowvf_probe, +}; + +RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf); + +/* SSOVF pcie device aka event queue probe */ + +static int +ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev) +{ + uint64_t val; + uint16_t vfid; + uint8_t *idreg; + struct ssovf_res *res; + + RTE_SET_USED(pci_drv); + + /* For secondary processes, the primary has done all the work */ + if (rte_eal_process_type() != RTE_PROC_PRIMARY) + return 0; + + if (pci_dev->mem_resource[0].addr == NULL || + pci_dev->mem_resource[2].addr == NULL) { + mbox_log_err("Empty bars %p %p", + pci_dev->mem_resource[0].addr, + pci_dev->mem_resource[2].addr); + return -ENODEV; + } + idreg = pci_dev->mem_resource[0].addr; + idreg += SSO_VHGRP_AQ_THR; + val = rte_read64(idreg); + + /* Write back the default value of aq_thr */ + rte_write64((1ULL << 33) - 1, idreg); + vfid = (val >> 16) & 0xffff; + if (vfid >= SSO_MAX_VHGRP) { + mbox_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP); + return -EINVAL; + } + + res = &sdev.grp[vfid]; + res->vfid = vfid; + res->bar0 = pci_dev->mem_resource[0].addr; + res->bar2 = pci_dev->mem_resource[2].addr; + res->domain = val & 0xffff; + + sdev.total_ssovfs++; + rte_wmb(); + mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain, + res->vfid, sdev.total_ssovfs); + return 0; +} + +static const struct rte_pci_id pci_ssovf_map[] = { + { + RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM, + PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF) + }, + { + .vendor_id = 0, + }, +}; + +static struct rte_pci_driver pci_ssovf = { + .id_table = pci_ssovf_map, + .drv_flags = RTE_PCI_DRV_NEED_MAPPING, + .probe = ssovf_probe, +}; + +RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf); diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c new file mode 100644 index 00000000..e89355cd --- /dev/null +++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c @@ -0,0 +1,253 @@ +/* + * BSD LICENSE + * + * Copyright (C) 2017 Cavium Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name of Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +#include <stdio.h> +#include <rte_mempool.h> +#include <rte_malloc.h> +#include <rte_mbuf.h> + +#include "octeontx_fpavf.h" + +/* + * Per-pool descriptor. + * Links mempool with the corresponding memzone, + * that provides memory under the pool's elements. + */ +struct octeontx_pool_info { + const struct rte_mempool *mp; + uintptr_t mz_addr; + + SLIST_ENTRY(octeontx_pool_info) link; +}; + +SLIST_HEAD(octeontx_pool_list, octeontx_pool_info); + +/* List of the allocated pools */ +static struct octeontx_pool_list octeontx_pool_head = + SLIST_HEAD_INITIALIZER(octeontx_pool_head); +/* Spinlock to protect pool list */ +static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER; + +static int +octeontx_fpavf_alloc(struct rte_mempool *mp) +{ + uintptr_t pool; + struct octeontx_pool_info *pool_info; + uint32_t memseg_count = mp->size; + uint32_t object_size; + uintptr_t va_start; + int rc = 0; + + rte_spinlock_lock(&pool_list_lock); + SLIST_FOREACH(pool_info, &octeontx_pool_head, link) { + if (pool_info->mp == mp) + break; + } + if (pool_info == NULL) { + rte_spinlock_unlock(&pool_list_lock); + return -ENXIO; + } + + /* virtual hugepage mapped addr */ + va_start = pool_info->mz_addr; + rte_spinlock_unlock(&pool_list_lock); + + object_size = mp->elt_size + mp->header_size + mp->trailer_size; + + pool = octeontx_fpa_bufpool_create(object_size, memseg_count, + OCTEONTX_FPAVF_BUF_OFFSET, + (char **)&va_start, + mp->socket_id); + rc = octeontx_fpa_bufpool_block_size(pool); + if (rc < 0) + goto _end; + + if ((uint32_t)rc != object_size) + fpavf_log_err("buffer size mismatch: %d instead of %u\n", + rc, object_size); + + fpavf_log_info("Pool created %p with .. ", (void *)pool); + fpavf_log_info("obj_sz %d, cnt %d\n", object_size, memseg_count); + + /* assign pool handle to mempool */ + mp->pool_id = (uint64_t)pool; + + return 0; + +_end: + return rc; +} + +static void +octeontx_fpavf_free(struct rte_mempool *mp) +{ + struct octeontx_pool_info *pool_info; + uintptr_t pool; + + pool = (uintptr_t)mp->pool_id; + + rte_spinlock_lock(&pool_list_lock); + SLIST_FOREACH(pool_info, &octeontx_pool_head, link) { + if (pool_info->mp == mp) + break; + } + + if (pool_info == NULL) { + rte_spinlock_unlock(&pool_list_lock); + rte_panic("%s: trying to free pool with no valid metadata", + __func__); + } + + SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link); + rte_spinlock_unlock(&pool_list_lock); + + rte_free(pool_info); + octeontx_fpa_bufpool_destroy(pool, mp->socket_id); +} + +static __rte_always_inline void * +octeontx_fpa_bufpool_alloc(uintptr_t handle) +{ + return (void *)(uintptr_t)fpavf_read64((void *)(handle + + FPA_VF_VHAURA_OP_ALLOC(0))); +} + +static __rte_always_inline void +octeontx_fpa_bufpool_free(uintptr_t handle, void *buf) +{ + uint64_t free_addr = FPA_VF_FREE_ADDRS_S(FPA_VF_VHAURA_OP_FREE(0), + 0 /* DWB */, 1 /* FABS */); + + fpavf_write64((uintptr_t)buf, (void *)(uintptr_t)(handle + free_addr)); +} + +static int +octeontx_fpavf_enqueue(struct rte_mempool *mp, void * const *obj_table, + unsigned int n) +{ + uintptr_t pool; + unsigned int index; + + pool = (uintptr_t)mp->pool_id; + /* Get pool bar address from handle */ + pool &= ~(uint64_t)FPA_GPOOL_MASK; + for (index = 0; index < n; index++, obj_table++) + octeontx_fpa_bufpool_free(pool, *obj_table); + + return 0; +} + +static int +octeontx_fpavf_dequeue(struct rte_mempool *mp, void **obj_table, + unsigned int n) +{ + unsigned int index; + uintptr_t pool; + void *obj; + + pool = (uintptr_t)mp->pool_id; + /* Get pool bar address from handle */ + pool &= ~(uint64_t)FPA_GPOOL_MASK; + for (index = 0; index < n; index++, obj_table++) { + obj = octeontx_fpa_bufpool_alloc(pool); + if (obj == NULL) { + /* + * Failed to allocate the requested number of objects + * from the pool. Current pool implementation requires + * completing the entire request or returning error + * otherwise. + * Free already allocated buffers to the pool. + */ + for (; index > 0; index--) { + obj_table--; + octeontx_fpa_bufpool_free(pool, *obj_table); + } + return -ENOMEM; + } + *obj_table = obj; + } + + return 0; +} + +static unsigned int +octeontx_fpavf_get_count(const struct rte_mempool *mp) +{ + uintptr_t pool; + + pool = (uintptr_t)mp->pool_id; + + return octeontx_fpa_bufpool_free_count(pool); +} + +static int +octeontx_fpavf_get_capabilities(const struct rte_mempool *mp, + unsigned int *flags) +{ + RTE_SET_USED(mp); + *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG | + MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS); + return 0; +} + +static int +octeontx_fpavf_register_memory_area(const struct rte_mempool *mp, + char *vaddr, rte_iova_t paddr, size_t len) +{ + struct octeontx_pool_info *pool_info; + + RTE_SET_USED(paddr); + RTE_SET_USED(len); + + pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0); + if (pool_info == NULL) + return -ENOMEM; + + pool_info->mp = mp; + pool_info->mz_addr = (uintptr_t)vaddr; + rte_spinlock_lock(&pool_list_lock); + SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link); + rte_spinlock_unlock(&pool_list_lock); + return 0; +} + +static struct rte_mempool_ops octeontx_fpavf_ops = { + .name = "octeontx_fpavf", + .alloc = octeontx_fpavf_alloc, + .free = octeontx_fpavf_free, + .enqueue = octeontx_fpavf_enqueue, + .dequeue = octeontx_fpavf_dequeue, + .get_count = octeontx_fpavf_get_count, + .get_capabilities = octeontx_fpavf_get_capabilities, + .register_memory_area = octeontx_fpavf_register_memory_area, +}; + +MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops); diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map new file mode 100644 index 00000000..fe8cdeca --- /dev/null +++ b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map @@ -0,0 +1,9 @@ +DPDK_17.11 { + global: + + octeontx_ssovf_info; + octeontx_ssovf_bar; + octeontx_ssovf_mbox_send; + + local: *; +}; diff --git a/drivers/mempool/ring/Makefile b/drivers/mempool/ring/Makefile index b339d907..a7889b96 100644 --- a/drivers/mempool/ring/Makefile +++ b/drivers/mempool/ring/Makefile @@ -37,6 +37,7 @@ LIB = librte_mempool_ring.a CFLAGS += -O3 CFLAGS += $(WERROR_FLAGS) +LDLIBS += -lrte_eal -lrte_mempool -lrte_ring EXPORT_MAP := rte_mempool_ring_version.map diff --git a/drivers/mempool/stack/Makefile b/drivers/mempool/stack/Makefile index 7577b23c..f8d6c574 100644 --- a/drivers/mempool/stack/Makefile +++ b/drivers/mempool/stack/Makefile @@ -40,6 +40,7 @@ CFLAGS += $(WERROR_FLAGS) # Headers CFLAGS += -I$(RTE_SDK)/lib/librte_mempool +LDLIBS += -lrte_eal -lrte_mempool -lrte_ring EXPORT_MAP := rte_mempool_stack_version.map |