summaryrefslogtreecommitdiffstats
path: root/drivers/mempool/octeontx
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/mempool/octeontx')
-rw-r--r--drivers/mempool/octeontx/Makefile68
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.c830
-rw-r--r--drivers/mempool/octeontx/octeontx_fpavf.h131
-rw-r--r--drivers/mempool/octeontx/octeontx_mbox.c242
-rw-r--r--drivers/mempool/octeontx/octeontx_mbox.h64
-rw-r--r--drivers/mempool/octeontx/octeontx_pool_logs.h68
-rw-r--r--drivers/mempool/octeontx/octeontx_ssovf.c299
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx.c253
-rw-r--r--drivers/mempool/octeontx/rte_mempool_octeontx_version.map9
9 files changed, 1964 insertions, 0 deletions
diff --git a/drivers/mempool/octeontx/Makefile b/drivers/mempool/octeontx/Makefile
new file mode 100644
index 00000000..a2e2863c
--- /dev/null
+++ b/drivers/mempool/octeontx/Makefile
@@ -0,0 +1,68 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Cavium Inc. All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Cavium Networks nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+#
+# library name
+#
+LIB = librte_mempool_octeontx.a
+
+CFLAGS += $(WERROR_FLAGS)
+EXPORT_MAP := rte_mempool_octeontx_version.map
+
+LIBABIVER := 1
+
+#
+# all source are stored in SRCS-y
+#
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_ssovf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_mbox.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += octeontx_fpavf.c
+SRCS-$(CONFIG_RTE_LIBRTE_OCTEONTX_MEMPOOL) += rte_mempool_octeontx.c
+
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_rte_mempool_octeontx.o += -fno-prefetch-loop-arrays
+
+ifeq ($(shell test $(GCC_VERSION) -ge 46 && echo 1), 1)
+CFLAGS_rte_mempool_octeontx.o += -Ofast
+else
+CFLAGS_rte_mempool_octeontx.o += -O3 -ffast-math
+endif
+
+else
+CFLAGS_rte_mempool_octeontx.o += -Ofast
+endif
+
+LDLIBS += -lrte_eal -lrte_mempool -lrte_ring -lrte_mbuf
+LDLIBS += -lrte_bus_pci
+
+include $(RTE_SDK)/mk/rte.lib.mk
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.c b/drivers/mempool/octeontx/octeontx_fpavf.c
new file mode 100644
index 00000000..3bc50f35
--- /dev/null
+++ b/drivers/mempool/octeontx/octeontx_fpavf.c
@@ -0,0 +1,830 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium Inc. 2017. All Right reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdbool.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <sys/mman.h>
+
+#include <rte_atomic.h>
+#include <rte_eal.h>
+#include <rte_bus_pci.h>
+#include <rte_errno.h>
+#include <rte_memory.h>
+#include <rte_malloc.h>
+#include <rte_spinlock.h>
+#include <rte_mbuf.h>
+
+#include "octeontx_mbox.h"
+#include "octeontx_fpavf.h"
+
+/* FPA Mbox Message */
+#define IDENTIFY 0x0
+
+#define FPA_CONFIGSET 0x1
+#define FPA_CONFIGGET 0x2
+#define FPA_START_COUNT 0x3
+#define FPA_STOP_COUNT 0x4
+#define FPA_ATTACHAURA 0x5
+#define FPA_DETACHAURA 0x6
+#define FPA_SETAURALVL 0x7
+#define FPA_GETAURALVL 0x8
+
+#define FPA_COPROC 0x1
+
+/* fpa mbox struct */
+struct octeontx_mbox_fpa_cfg {
+ int aid;
+ uint64_t pool_cfg;
+ uint64_t pool_stack_base;
+ uint64_t pool_stack_end;
+ uint64_t aura_cfg;
+};
+
+struct __attribute__((__packed__)) gen_req {
+ uint32_t value;
+};
+
+struct __attribute__((__packed__)) idn_req {
+ uint8_t domain_id;
+};
+
+struct __attribute__((__packed__)) gen_resp {
+ uint16_t domain_id;
+ uint16_t vfid;
+};
+
+struct __attribute__((__packed__)) dcfg_resp {
+ uint8_t sso_count;
+ uint8_t ssow_count;
+ uint8_t fpa_count;
+ uint8_t pko_count;
+ uint8_t tim_count;
+ uint8_t net_port_count;
+ uint8_t virt_port_count;
+};
+
+#define FPA_MAX_POOL 32
+#define FPA_PF_PAGE_SZ 4096
+
+#define FPA_LN_SIZE 128
+#define FPA_ROUND_UP(x, size) \
+ ((((unsigned long)(x)) + size-1) & (~(size-1)))
+#define FPA_OBJSZ_2_CACHE_LINE(sz) (((sz) + RTE_CACHE_LINE_MASK) >> 7)
+#define FPA_CACHE_LINE_2_OBJSZ(sz) ((sz) << 7)
+
+#define POOL_ENA (0x1 << 0)
+#define POOL_DIS (0x0 << 0)
+#define POOL_SET_NAT_ALIGN (0x1 << 1)
+#define POOL_DIS_NAT_ALIGN (0x0 << 1)
+#define POOL_STYPE(x) (((x) & 0x1) << 2)
+#define POOL_LTYPE(x) (((x) & 0x3) << 3)
+#define POOL_BUF_OFFSET(x) (((x) & 0x7fffULL) << 16)
+#define POOL_BUF_SIZE(x) (((x) & 0x7ffULL) << 32)
+
+struct fpavf_res {
+ void *pool_stack_base;
+ void *bar0;
+ uint64_t stack_ln_ptr;
+ uint16_t domain_id;
+ uint16_t vf_id; /* gpool_id */
+ uint16_t sz128; /* Block size in cache lines */
+ bool is_inuse;
+};
+
+struct octeontx_fpadev {
+ rte_spinlock_t lock;
+ uint8_t total_gpool_cnt;
+ struct fpavf_res pool[FPA_VF_MAX];
+};
+
+static struct octeontx_fpadev fpadev;
+
+/* lock is taken by caller */
+static int
+octeontx_fpa_gpool_alloc(unsigned int object_size)
+{
+ struct fpavf_res *res = NULL;
+ uint16_t gpool;
+ unsigned int sz128;
+
+ sz128 = FPA_OBJSZ_2_CACHE_LINE(object_size);
+
+ for (gpool = 0; gpool < FPA_VF_MAX; gpool++) {
+
+ /* Skip VF that is not mapped Or _inuse */
+ if ((fpadev.pool[gpool].bar0 == NULL) ||
+ (fpadev.pool[gpool].is_inuse == true))
+ continue;
+
+ res = &fpadev.pool[gpool];
+
+ RTE_ASSERT(res->domain_id != (uint16_t)~0);
+ RTE_ASSERT(res->vf_id != (uint16_t)~0);
+ RTE_ASSERT(res->stack_ln_ptr != 0);
+
+ if (res->sz128 == 0) {
+ res->sz128 = sz128;
+
+ fpavf_log_dbg("gpool %d blk_sz %d\n", gpool, sz128);
+ return gpool;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+/* lock is taken by caller */
+static __rte_always_inline uintptr_t
+octeontx_fpa_gpool2handle(uint16_t gpool)
+{
+ struct fpavf_res *res = NULL;
+
+ RTE_ASSERT(gpool < FPA_VF_MAX);
+
+ res = &fpadev.pool[gpool];
+ return (uintptr_t)res->bar0 | gpool;
+}
+
+static __rte_always_inline bool
+octeontx_fpa_handle_valid(uintptr_t handle)
+{
+ struct fpavf_res *res = NULL;
+ uint8_t gpool;
+ int i;
+ bool ret = false;
+
+ if (unlikely(!handle))
+ return ret;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+
+ /* get the bar address */
+ handle &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (i = 0; i < FPA_VF_MAX; i++) {
+ if ((uintptr_t)fpadev.pool[i].bar0 != handle)
+ continue;
+
+ /* validate gpool */
+ if (gpool != i)
+ return false;
+
+ res = &fpadev.pool[i];
+
+ if (res->sz128 == 0 || res->domain_id == (uint16_t)~0 ||
+ res->stack_ln_ptr == 0)
+ ret = false;
+ else
+ ret = true;
+ break;
+ }
+
+ return ret;
+}
+
+static int
+octeontx_fpapf_pool_setup(unsigned int gpool, unsigned int buf_size,
+ signed short buf_offset, unsigned int max_buf_count)
+{
+ void *memptr = NULL;
+ rte_iova_t phys_addr;
+ unsigned int memsz;
+ struct fpavf_res *fpa = NULL;
+ uint64_t reg;
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ int ret = -1;
+
+ fpa = &fpadev.pool[gpool];
+ memsz = FPA_ROUND_UP(max_buf_count / fpa->stack_ln_ptr, FPA_LN_SIZE) *
+ FPA_LN_SIZE;
+
+ /* Round-up to page size */
+ memsz = (memsz + FPA_PF_PAGE_SZ - 1) & ~(uintptr_t)(FPA_PF_PAGE_SZ-1);
+ memptr = rte_malloc(NULL, memsz, RTE_CACHE_LINE_SIZE);
+ if (memptr == NULL) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ /* Configure stack */
+ fpa->pool_stack_base = memptr;
+ phys_addr = rte_malloc_virt2iova(memptr);
+
+ buf_size /= FPA_LN_SIZE;
+
+ /* POOL setup */
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_CONFIGSET;
+ hdr.vfid = fpa->vf_id;
+ hdr.res_code = 0;
+
+ buf_offset /= FPA_LN_SIZE;
+ reg = POOL_BUF_SIZE(buf_size) | POOL_BUF_OFFSET(buf_offset) |
+ POOL_LTYPE(0x2) | POOL_STYPE(0) | POOL_SET_NAT_ALIGN |
+ POOL_ENA;
+
+ cfg.aid = 0;
+ cfg.pool_cfg = reg;
+ cfg.pool_stack_base = phys_addr;
+ cfg.pool_stack_end = phys_addr + memsz;
+ cfg.aura_cfg = (1 << 9);
+
+ ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ fpavf_log_dbg(" vfid %d gpool %d aid %d pool_cfg 0x%x pool_stack_base %" PRIx64 " pool_stack_end %" PRIx64" aura_cfg %" PRIx64 "\n",
+ fpa->vf_id, gpool, cfg.aid, (unsigned int)cfg.pool_cfg,
+ cfg.pool_stack_base, cfg.pool_stack_end, cfg.aura_cfg);
+
+ /* Now pool is in_use */
+ fpa->is_inuse = true;
+
+err:
+ if (ret < 0)
+ rte_free(memptr);
+
+ return ret;
+}
+
+static int
+octeontx_fpapf_pool_destroy(unsigned int gpool_index)
+{
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ struct fpavf_res *fpa = NULL;
+ int ret = -1;
+
+ fpa = &fpadev.pool[gpool_index];
+
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_CONFIGSET;
+ hdr.vfid = fpa->vf_id;
+ hdr.res_code = 0;
+
+ /* reset and free the pool */
+ cfg.aid = 0;
+ cfg.pool_cfg = 0;
+ cfg.pool_stack_base = 0;
+ cfg.pool_stack_end = 0;
+ cfg.aura_cfg = 0;
+
+ ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ ret = -EACCES;
+ goto err;
+ }
+
+ ret = 0;
+err:
+ /* anycase free pool stack memory */
+ rte_free(fpa->pool_stack_base);
+ fpa->pool_stack_base = NULL;
+ return ret;
+}
+
+static int
+octeontx_fpapf_aura_attach(unsigned int gpool_index)
+{
+ struct octeontx_mbox_hdr hdr;
+ struct dcfg_resp resp;
+ struct octeontx_mbox_fpa_cfg cfg;
+ int ret = 0;
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_ATTACHAURA;
+ hdr.vfid = gpool_index;
+ hdr.res_code = 0;
+ memset(&cfg, 0x0, sizeof(struct octeontx_mbox_fpa_cfg));
+ cfg.aid = gpool_index; /* gpool is guara */
+
+ ret = octeontx_ssovf_mbox_send(&hdr, &cfg,
+ sizeof(struct octeontx_mbox_fpa_cfg),
+ &resp, sizeof(resp));
+ if (ret < 0) {
+ fpavf_log_err("Could not attach fpa ");
+ fpavf_log_err("aura %d to pool %d. Err=%d. FuncErr=%d\n",
+ gpool_index, gpool_index, ret, hdr.res_code);
+ ret = -EACCES;
+ goto err;
+ }
+err:
+ return ret;
+}
+
+static int
+octeontx_fpapf_aura_detach(unsigned int gpool_index)
+{
+ struct octeontx_mbox_fpa_cfg cfg = {0};
+ struct octeontx_mbox_hdr hdr = {0};
+ int ret = 0;
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ cfg.aid = gpool_index; /* gpool is gaura */
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_DETACHAURA;
+ hdr.vfid = gpool_index;
+ ret = octeontx_ssovf_mbox_send(&hdr, &cfg, sizeof(cfg), NULL, 0);
+ if (ret < 0) {
+ fpavf_log_err("Couldn't detach FPA aura %d Err=%d FuncErr=%d\n",
+ gpool_index, ret, hdr.res_code);
+ ret = -EINVAL;
+ }
+
+err:
+ return ret;
+}
+
+static int
+octeontx_fpavf_pool_setup(uintptr_t handle, unsigned long memsz,
+ void *memva, uint16_t gpool)
+{
+ uint64_t va_end;
+
+ if (unlikely(!handle))
+ return -ENODEV;
+
+ va_end = (uintptr_t)memva + memsz;
+ va_end &= ~RTE_CACHE_LINE_MASK;
+
+ /* VHPOOL setup */
+ fpavf_write64((uintptr_t)memva,
+ (void *)((uintptr_t)handle +
+ FPA_VF_VHPOOL_START_ADDR(gpool)));
+ fpavf_write64(va_end,
+ (void *)((uintptr_t)handle +
+ FPA_VF_VHPOOL_END_ADDR(gpool)));
+ return 0;
+}
+
+static int
+octeontx_fpapf_start_count(uint16_t gpool_index)
+{
+ int ret = 0;
+ struct octeontx_mbox_hdr hdr = {0};
+
+ if (gpool_index >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ hdr.coproc = FPA_COPROC;
+ hdr.msg = FPA_START_COUNT;
+ hdr.vfid = gpool_index;
+ ret = octeontx_ssovf_mbox_send(&hdr, NULL, 0, NULL, 0);
+ if (ret < 0) {
+ fpavf_log_err("Could not start buffer counting for ");
+ fpavf_log_err("FPA pool %d. Err=%d. FuncErr=%d\n",
+ gpool_index, ret, hdr.res_code);
+ ret = -EINVAL;
+ goto err;
+ }
+
+err:
+ return ret;
+}
+
+static __rte_always_inline int
+octeontx_fpavf_free(unsigned int gpool)
+{
+ int ret = 0;
+
+ if (gpool >= FPA_MAX_POOL) {
+ ret = -EINVAL;
+ goto err;
+ }
+
+ /* Pool is free */
+ fpadev.pool[gpool].is_inuse = false;
+
+err:
+ return ret;
+}
+
+static __rte_always_inline int
+octeontx_gpool_free(uint16_t gpool)
+{
+ if (fpadev.pool[gpool].sz128 != 0) {
+ fpadev.pool[gpool].sz128 = 0;
+ return 0;
+ }
+ return -EINVAL;
+}
+
+/*
+ * Return buffer size for a given pool
+ */
+int
+octeontx_fpa_bufpool_block_size(uintptr_t handle)
+{
+ struct fpavf_res *res = NULL;
+ uint8_t gpool;
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+ res = &fpadev.pool[gpool];
+ return FPA_CACHE_LINE_2_OBJSZ(res->sz128);
+}
+
+int
+octeontx_fpa_bufpool_free_count(uintptr_t handle)
+{
+ uint64_t cnt, limit, avail;
+ uint8_t gpool;
+ uintptr_t pool_bar;
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the gpool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+
+ /* Get pool bar address from handle */
+ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gpool)));
+ limit = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+
+ avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+ return RTE_MIN(avail, (limit - cnt));
+}
+
+uintptr_t
+octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
+ unsigned int buf_offset, char **va_start,
+ int node_id)
+{
+ unsigned int gpool;
+ void *memva;
+ unsigned long memsz;
+ uintptr_t gpool_handle;
+ uintptr_t pool_bar;
+ int res;
+
+ RTE_SET_USED(node_id);
+ RTE_BUILD_BUG_ON(sizeof(struct rte_mbuf) > OCTEONTX_FPAVF_BUF_OFFSET);
+
+ if (unlikely(*va_start == NULL))
+ goto error_end;
+
+ object_size = RTE_CACHE_LINE_ROUNDUP(object_size);
+ if (object_size > FPA_MAX_OBJ_SIZE) {
+ errno = EINVAL;
+ goto error_end;
+ }
+
+ rte_spinlock_lock(&fpadev.lock);
+ res = octeontx_fpa_gpool_alloc(object_size);
+
+ /* Bail if failed */
+ if (unlikely(res < 0)) {
+ errno = res;
+ goto error_unlock;
+ }
+
+ /* get fpavf */
+ gpool = res;
+
+ /* get pool handle */
+ gpool_handle = octeontx_fpa_gpool2handle(gpool);
+ if (!octeontx_fpa_handle_valid(gpool_handle)) {
+ errno = ENOSPC;
+ goto error_gpool_free;
+ }
+
+ /* Get pool bar address from handle */
+ pool_bar = gpool_handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ res = octeontx_fpapf_pool_setup(gpool, object_size, buf_offset,
+ object_count);
+ if (res < 0) {
+ errno = res;
+ goto error_gpool_free;
+ }
+
+ /* populate AURA fields */
+ res = octeontx_fpapf_aura_attach(gpool);
+ if (res < 0) {
+ errno = res;
+ goto error_pool_destroy;
+ }
+
+ /* vf pool setup */
+ memsz = object_size * object_count;
+ memva = *va_start;
+ res = octeontx_fpavf_pool_setup(pool_bar, memsz, memva, gpool);
+ if (res < 0) {
+ errno = res;
+ goto error_gaura_detach;
+ }
+
+ /* Release lock */
+ rte_spinlock_unlock(&fpadev.lock);
+
+ /* populate AURA registers */
+ fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gpool)));
+ fpavf_write64(object_count, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ fpavf_write64(object_count + 1, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+ octeontx_fpapf_start_count(gpool);
+
+ return gpool_handle;
+
+error_gaura_detach:
+ (void) octeontx_fpapf_aura_detach(gpool);
+error_pool_destroy:
+ octeontx_fpavf_free(gpool);
+ octeontx_fpapf_pool_destroy(gpool);
+error_gpool_free:
+ octeontx_gpool_free(gpool);
+error_unlock:
+ rte_spinlock_unlock(&fpadev.lock);
+error_end:
+ return (uintptr_t)NULL;
+}
+
+/*
+ * Destroy a buffer pool.
+ */
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node_id)
+{
+ void **node, **curr, *head = NULL;
+ uint64_t sz;
+ uint64_t cnt, avail;
+ uint8_t gpool;
+ uintptr_t pool_bar;
+ int ret;
+
+ RTE_SET_USED(node_id);
+
+ /* Wait for all outstanding writes to be committed */
+ rte_smp_wmb();
+
+ if (unlikely(!octeontx_fpa_handle_valid(handle)))
+ return -EINVAL;
+
+ /* get the pool */
+ gpool = octeontx_fpa_bufpool_gpool(handle);
+
+ /* Get pool bar address from handle */
+ pool_bar = handle & ~(uint64_t)FPA_GPOOL_MASK;
+
+ /* Check for no outstanding buffers */
+ cnt = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT(gpool)));
+ if (cnt) {
+ fpavf_log_dbg("buffer exist in pool cnt %" PRId64 "\n", cnt);
+ return -EBUSY;
+ }
+
+ rte_spinlock_lock(&fpadev.lock);
+
+ avail = fpavf_read64((void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_AVAILABLE(gpool)));
+
+ /* Prepare to empty the entire POOL */
+ fpavf_write64(avail, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ fpavf_write64(avail + 1, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+ /* Empty the pool */
+ /* Invalidate the POOL */
+ octeontx_gpool_free(gpool);
+
+ /* Process all buffers in the pool */
+ while (avail--) {
+
+ /* Yank a buffer from the pool */
+ node = (void *)(uintptr_t)
+ fpavf_read64((void *)
+ (pool_bar + FPA_VF_VHAURA_OP_ALLOC(gpool)));
+
+ if (node == NULL) {
+ fpavf_log_err("GAURA[%u] missing %" PRIx64 " buf\n",
+ gpool, avail);
+ break;
+ }
+
+ /* Imsert it into an ordered linked list */
+ for (curr = &head; curr[0] != NULL; curr = curr[0]) {
+ if ((uintptr_t)node <= (uintptr_t)curr[0])
+ break;
+ }
+ node[0] = curr[0];
+ curr[0] = node;
+ }
+
+ /* Verify the linked list to be a perfect series */
+ sz = octeontx_fpa_bufpool_block_size(handle) << 7;
+ for (curr = head; curr != NULL && curr[0] != NULL;
+ curr = curr[0]) {
+ if (curr == curr[0] ||
+ ((uintptr_t)curr != ((uintptr_t)curr[0] - sz))) {
+ fpavf_log_err("POOL# %u buf sequence err (%p vs. %p)\n",
+ gpool, curr, curr[0]);
+ }
+ }
+
+ /* Disable pool operation */
+ fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_START_ADDR(gpool)));
+ fpavf_write64(~0ul, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHPOOL_END_ADDR(gpool)));
+
+ (void)octeontx_fpapf_pool_destroy(gpool);
+
+ /* Deactivate the AURA */
+ fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_LIMIT(gpool)));
+ fpavf_write64(0, (void *)((uintptr_t)pool_bar +
+ FPA_VF_VHAURA_CNT_THRESHOLD(gpool)));
+
+ ret = octeontx_fpapf_aura_detach(gpool);
+ if (ret) {
+ fpavf_log_err("Failed to dettach gaura %u. error code=%d\n",
+ gpool, ret);
+ }
+
+ /* Free VF */
+ (void)octeontx_fpavf_free(gpool);
+
+ rte_spinlock_unlock(&fpadev.lock);
+ return 0;
+}
+
+static void
+octeontx_fpavf_setup(void)
+{
+ uint8_t i;
+ static bool init_once;
+
+ if (!init_once) {
+ rte_spinlock_init(&fpadev.lock);
+ fpadev.total_gpool_cnt = 0;
+
+ for (i = 0; i < FPA_VF_MAX; i++) {
+
+ fpadev.pool[i].domain_id = ~0;
+ fpadev.pool[i].stack_ln_ptr = 0;
+ fpadev.pool[i].sz128 = 0;
+ fpadev.pool[i].bar0 = NULL;
+ fpadev.pool[i].pool_stack_base = NULL;
+ fpadev.pool[i].is_inuse = false;
+ }
+ init_once = 1;
+ }
+}
+
+static int
+octeontx_fpavf_identify(void *bar0)
+{
+ uint64_t val;
+ uint16_t domain_id;
+ uint16_t vf_id;
+ uint64_t stack_ln_ptr;
+
+ val = fpavf_read64((void *)((uintptr_t)bar0 +
+ FPA_VF_VHAURA_CNT_THRESHOLD(0)));
+
+ domain_id = (val >> 8) & 0xffff;
+ vf_id = (val >> 24) & 0xffff;
+
+ stack_ln_ptr = fpavf_read64((void *)((uintptr_t)bar0 +
+ FPA_VF_VHPOOL_THRESHOLD(0)));
+ if (vf_id >= FPA_VF_MAX) {
+ fpavf_log_err("vf_id(%d) greater than max vf (32)\n", vf_id);
+ return -1;
+ }
+
+ if (fpadev.pool[vf_id].is_inuse) {
+ fpavf_log_err("vf_id %d is_inuse\n", vf_id);
+ return -1;
+ }
+
+ fpadev.pool[vf_id].domain_id = domain_id;
+ fpadev.pool[vf_id].vf_id = vf_id;
+ fpadev.pool[vf_id].bar0 = bar0;
+ fpadev.pool[vf_id].stack_ln_ptr = stack_ln_ptr;
+
+ /* SUCCESS */
+ return vf_id;
+}
+
+/* FPAVF pcie device aka mempool probe */
+static int
+fpavf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint8_t *idreg;
+ int res;
+ struct fpavf_res *fpa = NULL;
+
+ RTE_SET_USED(pci_drv);
+ RTE_SET_USED(fpa);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL) {
+ fpavf_log_err("Empty bars %p ", pci_dev->mem_resource[0].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+
+ octeontx_fpavf_setup();
+
+ res = octeontx_fpavf_identify(idreg);
+ if (res < 0)
+ return -1;
+
+ fpa = &fpadev.pool[res];
+ fpadev.total_gpool_cnt++;
+ rte_wmb();
+
+ fpavf_log_dbg("total_fpavfs %d bar0 %p domain %d vf %d stk_ln_ptr 0x%x",
+ fpadev.total_gpool_cnt, fpa->bar0, fpa->domain_id,
+ fpa->vf_id, (unsigned int)fpa->stack_ln_ptr);
+
+ return 0;
+}
+
+static const struct rte_pci_id pci_fpavf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_FPA_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_fpavf = {
+ .id_table = pci_fpavf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_IOVA_AS_VA,
+ .probe = fpavf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_fpavf, pci_fpavf);
diff --git a/drivers/mempool/octeontx/octeontx_fpavf.h b/drivers/mempool/octeontx/octeontx_fpavf.h
new file mode 100644
index 00000000..1d09f007
--- /dev/null
+++ b/drivers/mempool/octeontx/octeontx_fpavf.h
@@ -0,0 +1,131 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) 2017 Cavium Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_FPAVF_H__
+#define __OCTEONTX_FPAVF_H__
+
+#include <rte_io.h>
+#include "octeontx_pool_logs.h"
+
+/* fpa pool Vendor ID and Device ID */
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_FPA_VF 0xA053
+
+#define FPA_VF_MAX 32
+#define FPA_GPOOL_MASK (FPA_VF_MAX-1)
+
+/* FPA VF register offsets */
+#define FPA_VF_INT(x) (0x200ULL | ((x) << 22))
+#define FPA_VF_INT_W1S(x) (0x210ULL | ((x) << 22))
+#define FPA_VF_INT_ENA_W1S(x) (0x220ULL | ((x) << 22))
+#define FPA_VF_INT_ENA_W1C(x) (0x230ULL | ((x) << 22))
+
+#define FPA_VF_VHPOOL_AVAILABLE(vhpool) (0x04150 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_THRESHOLD(vhpool) (0x04160 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_START_ADDR(vhpool) (0x04200 | ((vhpool)&0x0))
+#define FPA_VF_VHPOOL_END_ADDR(vhpool) (0x04210 | ((vhpool)&0x0))
+
+#define FPA_VF_VHAURA_CNT(vaura) (0x20120 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_ADD(vaura) (0x20128 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_LIMIT(vaura) (0x20130 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_CNT_THRESHOLD(vaura) (0x20140 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_OP_ALLOC(vaura) (0x30000 | ((vaura)&0xf)<<18)
+#define FPA_VF_VHAURA_OP_FREE(vaura) (0x38000 | ((vaura)&0xf)<<18)
+
+#define FPA_VF_FREE_ADDRS_S(x, y, z) \
+ ((x) | (((y) & 0x1ff) << 3) | ((((z) & 1)) << 14))
+
+/* FPA VF register offsets from VF_BAR4, size 2 MByte */
+#define FPA_VF_MSIX_VEC_ADDR 0x00000
+#define FPA_VF_MSIX_VEC_CTL 0x00008
+#define FPA_VF_MSIX_PBA 0xF0000
+
+#define FPA_VF0_APERTURE_SHIFT 22
+#define FPA_AURA_SET_SIZE 16
+
+#define FPA_MAX_OBJ_SIZE (128 * 1024)
+#define OCTEONTX_FPAVF_BUF_OFFSET 128
+
+/*
+ * In Cavium OcteonTX SoC, all accesses to the device registers are
+ * implicitly strongly ordered. So, the relaxed version of IO operation is
+ * safe to use with out any IO memory barriers.
+ */
+#define fpavf_read64 rte_read64_relaxed
+#define fpavf_write64 rte_write64_relaxed
+
+/* ARM64 specific functions */
+#if defined(RTE_ARCH_ARM64)
+#define fpavf_load_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "ldp %x[x0], %x[x1], [%x[p1]]" \
+ :[x0]"=r"(val0), [x1]"=r"(val1) \
+ :[p1]"r"(addr) \
+ ); })
+
+#define fpavf_store_pair(val0, val1, addr) ({ \
+ asm volatile( \
+ "stp %x[x0], %x[x1], [%x[p1]]" \
+ ::[x0]"r"(val0), [x1]"r"(val1), [p1]"r"(addr) \
+ ); })
+#else /* Un optimized functions for building on non arm64 arch */
+
+#define fpavf_load_pair(val0, val1, addr) \
+do { \
+ val0 = rte_read64(addr); \
+ val1 = rte_read64(((uint8_t *)addr) + 8); \
+} while (0)
+
+#define fpavf_store_pair(val0, val1, addr) \
+do { \
+ rte_write64(val0, addr); \
+ rte_write64(val1, (((uint8_t *)addr) + 8)); \
+} while (0)
+#endif
+
+uintptr_t
+octeontx_fpa_bufpool_create(unsigned int object_size, unsigned int object_count,
+ unsigned int buf_offset, char **va_start,
+ int node);
+int
+octeontx_fpa_bufpool_destroy(uintptr_t handle, int node);
+int
+octeontx_fpa_bufpool_block_size(uintptr_t handle);
+int
+octeontx_fpa_bufpool_free_count(uintptr_t handle);
+
+static __rte_always_inline uint8_t
+octeontx_fpa_bufpool_gpool(uintptr_t handle)
+{
+ return (uint8_t)handle & FPA_GPOOL_MASK;
+}
+#endif /* __OCTEONTX_FPAVF_H__ */
diff --git a/drivers/mempool/octeontx/octeontx_mbox.c b/drivers/mempool/octeontx/octeontx_mbox.c
new file mode 100644
index 00000000..9525da1a
--- /dev/null
+++ b/drivers/mempool/octeontx/octeontx_mbox.c
@@ -0,0 +1,242 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string.h>
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_cycles.h>
+#include <rte_io.h>
+#include <rte_spinlock.h>
+
+#include "octeontx_mbox.h"
+#include "octeontx_pool_logs.h"
+
+/* Mbox operation timeout in seconds */
+#define MBOX_WAIT_TIME_SEC 3
+#define MAX_RAM_MBOX_LEN ((SSOW_BAR4_LEN >> 1) - 8 /* Mbox header */)
+
+/* Mbox channel state */
+enum {
+ MBOX_CHAN_STATE_REQ = 1,
+ MBOX_CHAN_STATE_RES = 0,
+};
+
+/* Response messages */
+enum {
+ MBOX_RET_SUCCESS,
+ MBOX_RET_INVALID,
+ MBOX_RET_INTERNAL_ERR,
+};
+
+struct mbox {
+ int init_once;
+ uint8_t *ram_mbox_base; /* Base address of mbox message stored in ram */
+ uint8_t *reg; /* Store to this register triggers PF mbox interrupt */
+ uint16_t tag_own; /* Last tag which was written to own channel */
+ rte_spinlock_t lock;
+};
+
+static struct mbox octeontx_mbox;
+
+/*
+ * Structure used for mbox synchronization
+ * This structure sits at the begin of Mbox RAM and used as main
+ * synchronization point for channel communication
+ */
+struct mbox_ram_hdr {
+ union {
+ uint64_t u64;
+ struct {
+ uint8_t chan_state : 1;
+ uint8_t coproc : 7;
+ uint8_t msg;
+ uint8_t vfid;
+ uint8_t res_code;
+ uint16_t tag;
+ uint16_t len;
+ };
+ };
+};
+
+static inline void
+mbox_msgcpy(uint8_t *d, const uint8_t *s, uint16_t size)
+{
+ uint16_t i;
+
+ for (i = 0; i < size; i++)
+ d[i] = s[i];
+}
+
+static inline void
+mbox_send_request(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ const void *txmsg, uint16_t txsize)
+{
+ struct mbox_ram_hdr old_hdr;
+ struct mbox_ram_hdr new_hdr = { {0} };
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /*
+ * Initialize the channel with the tag left by last send.
+ * On success full mbox send complete, PF increments the tag by one.
+ * The sender can validate integrity of PF message with this scheme
+ */
+ old_hdr.u64 = rte_read64(ram_mbox_hdr);
+ m->tag_own = (old_hdr.tag + 2) & (~0x1ul); /* next even number */
+
+ /* Copy msg body */
+ if (txmsg)
+ mbox_msgcpy(ram_mbox_msg, txmsg, txsize);
+
+ /* Prepare new hdr */
+ new_hdr.chan_state = MBOX_CHAN_STATE_REQ;
+ new_hdr.coproc = hdr->coproc;
+ new_hdr.msg = hdr->msg;
+ new_hdr.vfid = hdr->vfid;
+ new_hdr.tag = m->tag_own;
+ new_hdr.len = txsize;
+
+ /* Write the msg header */
+ rte_write64(new_hdr.u64, ram_mbox_hdr);
+ rte_io_wmb();
+ /* Notify PF about the new msg - write to MBOX reg generates PF IRQ */
+ rte_write64(0, m->reg);
+}
+
+static inline int
+mbox_wait_response(struct mbox *m, struct octeontx_mbox_hdr *hdr,
+ void *rxmsg, uint16_t rxsize)
+{
+ int res = 0, wait;
+ uint16_t len;
+ struct mbox_ram_hdr rx_hdr;
+ uint64_t *ram_mbox_hdr = (uint64_t *)m->ram_mbox_base;
+ uint8_t *ram_mbox_msg = m->ram_mbox_base + sizeof(struct mbox_ram_hdr);
+
+ /* Wait for response */
+ wait = MBOX_WAIT_TIME_SEC * 1000 * 10;
+ while (wait > 0) {
+ rte_delay_us(100);
+ rx_hdr.u64 = rte_read64(ram_mbox_hdr);
+ if (rx_hdr.chan_state == MBOX_CHAN_STATE_RES)
+ break;
+ --wait;
+ }
+
+ hdr->res_code = rx_hdr.res_code;
+ m->tag_own++;
+
+ /* Timeout */
+ if (wait <= 0) {
+ res = -ETIMEDOUT;
+ goto error;
+ }
+
+ /* Tag mismatch */
+ if (m->tag_own != rx_hdr.tag) {
+ res = -EINVAL;
+ goto error;
+ }
+
+ /* PF nacked the msg */
+ if (rx_hdr.res_code != MBOX_RET_SUCCESS) {
+ res = -EBADMSG;
+ goto error;
+ }
+
+ len = RTE_MIN(rx_hdr.len, rxsize);
+ if (rxmsg)
+ mbox_msgcpy(rxmsg, ram_mbox_msg, len);
+
+ return len;
+
+error:
+ mbox_log_err("Failed to send mbox(%d/%d) coproc=%d msg=%d ret=(%d,%d)",
+ m->tag_own, rx_hdr.tag, hdr->coproc, hdr->msg, res,
+ hdr->res_code);
+ return res;
+}
+
+static inline int
+mbox_send(struct mbox *m, struct octeontx_mbox_hdr *hdr, const void *txmsg,
+ uint16_t txsize, void *rxmsg, uint16_t rxsize)
+{
+ int res = -EINVAL;
+
+ if (m->init_once == 0 || hdr == NULL ||
+ txsize > MAX_RAM_MBOX_LEN || rxsize > MAX_RAM_MBOX_LEN) {
+ mbox_log_err("Invalid init_once=%d hdr=%p txsz=%d rxsz=%d",
+ m->init_once, hdr, txsize, rxsize);
+ return res;
+ }
+
+ rte_spinlock_lock(&m->lock);
+
+ mbox_send_request(m, hdr, txmsg, txsize);
+ res = mbox_wait_response(m, hdr, rxmsg, rxsize);
+
+ rte_spinlock_unlock(&m->lock);
+ return res;
+}
+
+static inline int
+mbox_setup(struct mbox *m)
+{
+ if (unlikely(m->init_once == 0)) {
+ rte_spinlock_init(&m->lock);
+ m->ram_mbox_base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, 0, 4);
+ m->reg = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, 0, 0);
+ m->reg += SSO_VHGRP_PF_MBOX(1);
+
+ if (m->ram_mbox_base == NULL || m->reg == NULL) {
+ mbox_log_err("Invalid ram_mbox_base=%p or reg=%p",
+ m->ram_mbox_base, m->reg);
+ return -EINVAL;
+ }
+ m->init_once = 1;
+ }
+ return 0;
+}
+
+int
+octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr, void *txdata,
+ uint16_t txlen, void *rxdata, uint16_t rxlen)
+{
+ struct mbox *m = &octeontx_mbox;
+
+ RTE_BUILD_BUG_ON(sizeof(struct mbox_ram_hdr) != 8);
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || mbox_setup(m))
+ return -EINVAL;
+
+ return mbox_send(m, hdr, txdata, txlen, rxdata, rxlen);
+}
diff --git a/drivers/mempool/octeontx/octeontx_mbox.h b/drivers/mempool/octeontx/octeontx_mbox.h
new file mode 100644
index 00000000..49f38257
--- /dev/null
+++ b/drivers/mempool/octeontx/octeontx_mbox.h
@@ -0,0 +1,64 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_MBOX_H__
+#define __OCTEONTX_MBOX_H__
+
+#include <rte_common.h>
+
+#define SSOW_BAR4_LEN (64 * 1024)
+#define SSO_VHGRP_PF_MBOX(x) (0x200ULL | ((x) << 3))
+
+struct octeontx_ssovf_info {
+ uint16_t domain; /* Domain id */
+ uint8_t total_ssovfs; /* Total sso groups available in domain */
+ uint8_t total_ssowvfs;/* Total sso hws available in domain */
+};
+
+enum octeontx_ssovf_type {
+ OCTEONTX_SSO_GROUP, /* SSO group vf */
+ OCTEONTX_SSO_HWS, /* SSO hardware workslot vf */
+};
+
+struct octeontx_mbox_hdr {
+ uint16_t vfid; /* VF index or pf resource index local to the domain */
+ uint8_t coproc; /* Coprocessor id */
+ uint8_t msg; /* Message id */
+ uint8_t res_code; /* Functional layer response code */
+};
+
+int octeontx_ssovf_info(struct octeontx_ssovf_info *info);
+void *octeontx_ssovf_bar(enum octeontx_ssovf_type, uint8_t id, uint8_t bar);
+int octeontx_ssovf_mbox_send(struct octeontx_mbox_hdr *hdr,
+ void *txdata, uint16_t txlen, void *rxdata, uint16_t rxlen);
+
+#endif /* __OCTEONTX_MBOX_H__ */
diff --git a/drivers/mempool/octeontx/octeontx_pool_logs.h b/drivers/mempool/octeontx/octeontx_pool_logs.h
new file mode 100644
index 00000000..58ccb0f0
--- /dev/null
+++ b/drivers/mempool/octeontx/octeontx_pool_logs.h
@@ -0,0 +1,68 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) 2017 Cavium Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium networks nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __OCTEONTX_POOL_LOGS_H__
+#define __OCTEONTX_POOL_LOGS_H__
+
+#include <rte_debug.h>
+
+#ifdef RTE_LIBRTE_OCTEONTX_MEMPOOL_DEBUG
+#define fpavf_log_info(fmt, args...) \
+ RTE_LOG(INFO, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+#define fpavf_log_dbg(fmt, args...) \
+ RTE_LOG(DEBUG, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+
+#define mbox_log_info(fmt, args...) \
+ RTE_LOG(INFO, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+#define mbox_log_dbg(fmt, args...) \
+ RTE_LOG(DEBUG, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+#else
+#define fpavf_log_info(fmt, args...)
+#define fpavf_log_dbg(fmt, args...)
+#define mbox_log_info(fmt, args...)
+#define mbox_log_dbg(fmt, args...)
+#endif
+
+#define fpavf_func_trace fpavf_log_dbg
+#define fpavf_log_err(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+#define mbox_func_trace mbox_log_dbg
+#define mbox_log_err(fmt, args...) \
+ RTE_LOG(ERR, PMD, "%s() line %u: " fmt "\n", \
+ __func__, __LINE__, ## args)
+
+#endif /* __OCTEONTX_POOL_LOGS_H__*/
diff --git a/drivers/mempool/octeontx/octeontx_ssovf.c b/drivers/mempool/octeontx/octeontx_ssovf.c
new file mode 100644
index 00000000..012c887d
--- /dev/null
+++ b/drivers/mempool/octeontx/octeontx_ssovf.c
@@ -0,0 +1,299 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) Cavium, Inc. 2017.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Cavium, Inc nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <rte_atomic.h>
+#include <rte_common.h>
+#include <rte_eal.h>
+#include <rte_io.h>
+#include <rte_pci.h>
+#include <rte_bus_pci.h>
+
+#include "octeontx_mbox.h"
+#include "octeontx_pool_logs.h"
+
+#define PCI_VENDOR_ID_CAVIUM 0x177D
+#define PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF 0xA04B
+#define PCI_DEVICE_ID_OCTEONTX_SSOWS_VF 0xA04D
+
+#define SSO_MAX_VHGRP (64)
+#define SSO_MAX_VHWS (32)
+
+#define SSO_VHGRP_AQ_THR (0x1E0ULL)
+
+struct ssovf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+};
+
+struct ssowvf_res {
+ uint16_t domain;
+ uint16_t vfid;
+ void *bar0;
+ void *bar2;
+ void *bar4;
+};
+
+struct ssowvf_identify {
+ uint16_t domain;
+ uint16_t vfid;
+};
+
+struct ssodev {
+ uint8_t total_ssovfs;
+ uint8_t total_ssowvfs;
+ struct ssovf_res grp[SSO_MAX_VHGRP];
+ struct ssowvf_res hws[SSO_MAX_VHWS];
+};
+
+static struct ssodev sdev;
+
+/* Interface functions */
+int
+octeontx_ssovf_info(struct octeontx_ssovf_info *info)
+{
+ uint8_t i;
+ uint16_t domain;
+
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY || info == NULL)
+ return -EINVAL;
+
+ if (sdev.total_ssovfs == 0 || sdev.total_ssowvfs == 0)
+ return -ENODEV;
+
+ domain = sdev.grp[0].domain;
+ for (i = 0; i < sdev.total_ssovfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.grp[i].vfid != i ||
+ sdev.grp[i].bar0 == NULL ||
+ sdev.grp[i].domain != domain) {
+ mbox_log_err("GRP error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.grp[i].vfid,
+ domain, sdev.grp[i].domain,
+ sdev.grp[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ for (i = 0; i < sdev.total_ssowvfs; i++) {
+ /* Check vfid's are contiguous and belong to same domain */
+ if (sdev.hws[i].vfid != i ||
+ sdev.hws[i].bar0 == NULL ||
+ sdev.hws[i].domain != domain) {
+ mbox_log_err("HWS error, vfid=%d/%d domain=%d/%d %p",
+ i, sdev.hws[i].vfid,
+ domain, sdev.hws[i].domain,
+ sdev.hws[i].bar0);
+ return -EINVAL;
+ }
+ }
+
+ info->domain = domain;
+ info->total_ssovfs = sdev.total_ssovfs;
+ info->total_ssowvfs = sdev.total_ssowvfs;
+ return 0;
+}
+
+void*
+octeontx_ssovf_bar(enum octeontx_ssovf_type type, uint8_t id, uint8_t bar)
+{
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY ||
+ type > OCTEONTX_SSO_HWS)
+ return NULL;
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ if (id >= sdev.total_ssovfs)
+ return NULL;
+ } else {
+ if (id >= sdev.total_ssowvfs)
+ return NULL;
+ }
+
+ if (type == OCTEONTX_SSO_GROUP) {
+ switch (bar) {
+ case 0:
+ return sdev.grp[id].bar0;
+ case 2:
+ return sdev.grp[id].bar2;
+ default:
+ return NULL;
+ }
+ } else {
+ switch (bar) {
+ case 0:
+ return sdev.hws[id].bar0;
+ case 2:
+ return sdev.hws[id].bar2;
+ case 4:
+ return sdev.hws[id].bar4;
+ default:
+ return NULL;
+ }
+ }
+}
+
+/* SSOWVF pcie device aka event port probe */
+
+static int
+ssowvf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint16_t vfid;
+ struct ssowvf_res *res;
+ struct ssowvf_identify *id;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL ||
+ pci_dev->mem_resource[4].addr == NULL) {
+ mbox_log_err("Empty bars %p %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr,
+ pci_dev->mem_resource[4].addr);
+ return -ENODEV;
+ }
+
+ if (pci_dev->mem_resource[4].len != SSOW_BAR4_LEN) {
+ mbox_log_err("Bar4 len mismatch %d != %d",
+ SSOW_BAR4_LEN, (int)pci_dev->mem_resource[4].len);
+ return -EINVAL;
+ }
+
+ id = pci_dev->mem_resource[4].addr;
+ vfid = id->vfid;
+ if (vfid >= SSO_MAX_VHWS) {
+ mbox_log_err("Invalid vfid(%d/%d)", vfid, SSO_MAX_VHWS);
+ return -EINVAL;
+ }
+
+ res = &sdev.hws[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->bar4 = pci_dev->mem_resource[4].addr;
+ res->domain = id->domain;
+
+ sdev.total_ssowvfs++;
+ rte_wmb();
+ mbox_log_dbg("Domain=%d hws=%d total_ssowvfs=%d", res->domain,
+ res->vfid, sdev.total_ssowvfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssowvf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOWS_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssowvf = {
+ .id_table = pci_ssowvf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssowvf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssowvf, pci_ssowvf);
+
+/* SSOVF pcie device aka event queue probe */
+
+static int
+ssovf_probe(struct rte_pci_driver *pci_drv, struct rte_pci_device *pci_dev)
+{
+ uint64_t val;
+ uint16_t vfid;
+ uint8_t *idreg;
+ struct ssovf_res *res;
+
+ RTE_SET_USED(pci_drv);
+
+ /* For secondary processes, the primary has done all the work */
+ if (rte_eal_process_type() != RTE_PROC_PRIMARY)
+ return 0;
+
+ if (pci_dev->mem_resource[0].addr == NULL ||
+ pci_dev->mem_resource[2].addr == NULL) {
+ mbox_log_err("Empty bars %p %p",
+ pci_dev->mem_resource[0].addr,
+ pci_dev->mem_resource[2].addr);
+ return -ENODEV;
+ }
+ idreg = pci_dev->mem_resource[0].addr;
+ idreg += SSO_VHGRP_AQ_THR;
+ val = rte_read64(idreg);
+
+ /* Write back the default value of aq_thr */
+ rte_write64((1ULL << 33) - 1, idreg);
+ vfid = (val >> 16) & 0xffff;
+ if (vfid >= SSO_MAX_VHGRP) {
+ mbox_log_err("Invalid vfid (%d/%d)", vfid, SSO_MAX_VHGRP);
+ return -EINVAL;
+ }
+
+ res = &sdev.grp[vfid];
+ res->vfid = vfid;
+ res->bar0 = pci_dev->mem_resource[0].addr;
+ res->bar2 = pci_dev->mem_resource[2].addr;
+ res->domain = val & 0xffff;
+
+ sdev.total_ssovfs++;
+ rte_wmb();
+ mbox_log_dbg("Domain=%d group=%d total_ssovfs=%d", res->domain,
+ res->vfid, sdev.total_ssovfs);
+ return 0;
+}
+
+static const struct rte_pci_id pci_ssovf_map[] = {
+ {
+ RTE_PCI_DEVICE(PCI_VENDOR_ID_CAVIUM,
+ PCI_DEVICE_ID_OCTEONTX_SSOGRP_VF)
+ },
+ {
+ .vendor_id = 0,
+ },
+};
+
+static struct rte_pci_driver pci_ssovf = {
+ .id_table = pci_ssovf_map,
+ .drv_flags = RTE_PCI_DRV_NEED_MAPPING,
+ .probe = ssovf_probe,
+};
+
+RTE_PMD_REGISTER_PCI(octeontx_ssovf, pci_ssovf);
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx.c b/drivers/mempool/octeontx/rte_mempool_octeontx.c
new file mode 100644
index 00000000..e89355cd
--- /dev/null
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx.c
@@ -0,0 +1,253 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright (C) 2017 Cavium Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include <stdio.h>
+#include <rte_mempool.h>
+#include <rte_malloc.h>
+#include <rte_mbuf.h>
+
+#include "octeontx_fpavf.h"
+
+/*
+ * Per-pool descriptor.
+ * Links mempool with the corresponding memzone,
+ * that provides memory under the pool's elements.
+ */
+struct octeontx_pool_info {
+ const struct rte_mempool *mp;
+ uintptr_t mz_addr;
+
+ SLIST_ENTRY(octeontx_pool_info) link;
+};
+
+SLIST_HEAD(octeontx_pool_list, octeontx_pool_info);
+
+/* List of the allocated pools */
+static struct octeontx_pool_list octeontx_pool_head =
+ SLIST_HEAD_INITIALIZER(octeontx_pool_head);
+/* Spinlock to protect pool list */
+static rte_spinlock_t pool_list_lock = RTE_SPINLOCK_INITIALIZER;
+
+static int
+octeontx_fpavf_alloc(struct rte_mempool *mp)
+{
+ uintptr_t pool;
+ struct octeontx_pool_info *pool_info;
+ uint32_t memseg_count = mp->size;
+ uint32_t object_size;
+ uintptr_t va_start;
+ int rc = 0;
+
+ rte_spinlock_lock(&pool_list_lock);
+ SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
+ if (pool_info->mp == mp)
+ break;
+ }
+ if (pool_info == NULL) {
+ rte_spinlock_unlock(&pool_list_lock);
+ return -ENXIO;
+ }
+
+ /* virtual hugepage mapped addr */
+ va_start = pool_info->mz_addr;
+ rte_spinlock_unlock(&pool_list_lock);
+
+ object_size = mp->elt_size + mp->header_size + mp->trailer_size;
+
+ pool = octeontx_fpa_bufpool_create(object_size, memseg_count,
+ OCTEONTX_FPAVF_BUF_OFFSET,
+ (char **)&va_start,
+ mp->socket_id);
+ rc = octeontx_fpa_bufpool_block_size(pool);
+ if (rc < 0)
+ goto _end;
+
+ if ((uint32_t)rc != object_size)
+ fpavf_log_err("buffer size mismatch: %d instead of %u\n",
+ rc, object_size);
+
+ fpavf_log_info("Pool created %p with .. ", (void *)pool);
+ fpavf_log_info("obj_sz %d, cnt %d\n", object_size, memseg_count);
+
+ /* assign pool handle to mempool */
+ mp->pool_id = (uint64_t)pool;
+
+ return 0;
+
+_end:
+ return rc;
+}
+
+static void
+octeontx_fpavf_free(struct rte_mempool *mp)
+{
+ struct octeontx_pool_info *pool_info;
+ uintptr_t pool;
+
+ pool = (uintptr_t)mp->pool_id;
+
+ rte_spinlock_lock(&pool_list_lock);
+ SLIST_FOREACH(pool_info, &octeontx_pool_head, link) {
+ if (pool_info->mp == mp)
+ break;
+ }
+
+ if (pool_info == NULL) {
+ rte_spinlock_unlock(&pool_list_lock);
+ rte_panic("%s: trying to free pool with no valid metadata",
+ __func__);
+ }
+
+ SLIST_REMOVE(&octeontx_pool_head, pool_info, octeontx_pool_info, link);
+ rte_spinlock_unlock(&pool_list_lock);
+
+ rte_free(pool_info);
+ octeontx_fpa_bufpool_destroy(pool, mp->socket_id);
+}
+
+static __rte_always_inline void *
+octeontx_fpa_bufpool_alloc(uintptr_t handle)
+{
+ return (void *)(uintptr_t)fpavf_read64((void *)(handle +
+ FPA_VF_VHAURA_OP_ALLOC(0)));
+}
+
+static __rte_always_inline void
+octeontx_fpa_bufpool_free(uintptr_t handle, void *buf)
+{
+ uint64_t free_addr = FPA_VF_FREE_ADDRS_S(FPA_VF_VHAURA_OP_FREE(0),
+ 0 /* DWB */, 1 /* FABS */);
+
+ fpavf_write64((uintptr_t)buf, (void *)(uintptr_t)(handle + free_addr));
+}
+
+static int
+octeontx_fpavf_enqueue(struct rte_mempool *mp, void * const *obj_table,
+ unsigned int n)
+{
+ uintptr_t pool;
+ unsigned int index;
+
+ pool = (uintptr_t)mp->pool_id;
+ /* Get pool bar address from handle */
+ pool &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (index = 0; index < n; index++, obj_table++)
+ octeontx_fpa_bufpool_free(pool, *obj_table);
+
+ return 0;
+}
+
+static int
+octeontx_fpavf_dequeue(struct rte_mempool *mp, void **obj_table,
+ unsigned int n)
+{
+ unsigned int index;
+ uintptr_t pool;
+ void *obj;
+
+ pool = (uintptr_t)mp->pool_id;
+ /* Get pool bar address from handle */
+ pool &= ~(uint64_t)FPA_GPOOL_MASK;
+ for (index = 0; index < n; index++, obj_table++) {
+ obj = octeontx_fpa_bufpool_alloc(pool);
+ if (obj == NULL) {
+ /*
+ * Failed to allocate the requested number of objects
+ * from the pool. Current pool implementation requires
+ * completing the entire request or returning error
+ * otherwise.
+ * Free already allocated buffers to the pool.
+ */
+ for (; index > 0; index--) {
+ obj_table--;
+ octeontx_fpa_bufpool_free(pool, *obj_table);
+ }
+ return -ENOMEM;
+ }
+ *obj_table = obj;
+ }
+
+ return 0;
+}
+
+static unsigned int
+octeontx_fpavf_get_count(const struct rte_mempool *mp)
+{
+ uintptr_t pool;
+
+ pool = (uintptr_t)mp->pool_id;
+
+ return octeontx_fpa_bufpool_free_count(pool);
+}
+
+static int
+octeontx_fpavf_get_capabilities(const struct rte_mempool *mp,
+ unsigned int *flags)
+{
+ RTE_SET_USED(mp);
+ *flags |= (MEMPOOL_F_CAPA_PHYS_CONTIG |
+ MEMPOOL_F_CAPA_BLK_ALIGNED_OBJECTS);
+ return 0;
+}
+
+static int
+octeontx_fpavf_register_memory_area(const struct rte_mempool *mp,
+ char *vaddr, rte_iova_t paddr, size_t len)
+{
+ struct octeontx_pool_info *pool_info;
+
+ RTE_SET_USED(paddr);
+ RTE_SET_USED(len);
+
+ pool_info = rte_malloc("octeontx_pool_info", sizeof(*pool_info), 0);
+ if (pool_info == NULL)
+ return -ENOMEM;
+
+ pool_info->mp = mp;
+ pool_info->mz_addr = (uintptr_t)vaddr;
+ rte_spinlock_lock(&pool_list_lock);
+ SLIST_INSERT_HEAD(&octeontx_pool_head, pool_info, link);
+ rte_spinlock_unlock(&pool_list_lock);
+ return 0;
+}
+
+static struct rte_mempool_ops octeontx_fpavf_ops = {
+ .name = "octeontx_fpavf",
+ .alloc = octeontx_fpavf_alloc,
+ .free = octeontx_fpavf_free,
+ .enqueue = octeontx_fpavf_enqueue,
+ .dequeue = octeontx_fpavf_dequeue,
+ .get_count = octeontx_fpavf_get_count,
+ .get_capabilities = octeontx_fpavf_get_capabilities,
+ .register_memory_area = octeontx_fpavf_register_memory_area,
+};
+
+MEMPOOL_REGISTER_OPS(octeontx_fpavf_ops);
diff --git a/drivers/mempool/octeontx/rte_mempool_octeontx_version.map b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
new file mode 100644
index 00000000..fe8cdeca
--- /dev/null
+++ b/drivers/mempool/octeontx/rte_mempool_octeontx_version.map
@@ -0,0 +1,9 @@
+DPDK_17.11 {
+ global:
+
+ octeontx_ssovf_info;
+ octeontx_ssovf_bar;
+ octeontx_ssovf_mbox_send;
+
+ local: *;
+};