aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/softnic
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 11:59:50 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 12:00:19 +0000
commit8d01b9cd70a67cdafd5b965a70420c3bd7fb3f82 (patch)
tree208e3bc33c220854d89d010e3abf720a2e62e546 /drivers/net/softnic
parentb63264c8342e6a1b6971c79550d2af2024b6a4de (diff)
New upstream version 18.11-rc1upstream/18.11-rc1
Change-Id: Iaa71986dd6332e878d8f4bf493101b2bbc6313bb Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/softnic')
-rw-r--r--drivers/net/softnic/Makefile4
-rw-r--r--drivers/net/softnic/conn.c1
-rw-r--r--drivers/net/softnic/hash_func.h359
-rw-r--r--drivers/net/softnic/hash_func_arm64.h261
-rw-r--r--drivers/net/softnic/meson.build8
-rw-r--r--drivers/net/softnic/rte_eth_softnic.c42
-rw-r--r--drivers/net/softnic/rte_eth_softnic_action.c67
-rw-r--r--drivers/net/softnic/rte_eth_softnic_cli.c883
-rw-r--r--drivers/net/softnic/rte_eth_softnic_cryptodev.c125
-rw-r--r--drivers/net/softnic/rte_eth_softnic_flow.c2287
-rw-r--r--drivers/net/softnic/rte_eth_softnic_internals.h219
-rw-r--r--drivers/net/softnic/rte_eth_softnic_meter.c728
-rw-r--r--drivers/net/softnic/rte_eth_softnic_pipeline.c161
-rw-r--r--drivers/net/softnic/rte_eth_softnic_thread.c118
14 files changed, 4553 insertions, 710 deletions
diff --git a/drivers/net/softnic/Makefile b/drivers/net/softnic/Makefile
index ea9b65f4..484e76cd 100644
--- a/drivers/net/softnic/Makefile
+++ b/drivers/net/softnic/Makefile
@@ -14,6 +14,7 @@ CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lrte_pipeline -lrte_port -lrte_table
LDLIBS += -lrte_eal -lrte_mbuf -lrte_mempool -lrte_ring
LDLIBS += -lrte_ethdev -lrte_net -lrte_kvargs -lrte_sched
+LDLIBS += -lrte_cryptodev
LDLIBS += -lrte_bus_vdev
EXPORT_MAP := rte_pmd_softnic_version.map
@@ -33,6 +34,9 @@ SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_action.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_pipeline.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_thread.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cli.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_flow.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_meter.c
+SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += rte_eth_softnic_cryptodev.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += parser.c
SRCS-$(CONFIG_RTE_LIBRTE_PMD_SOFTNIC) += conn.c
diff --git a/drivers/net/softnic/conn.c b/drivers/net/softnic/conn.c
index 990cf40f..8b665808 100644
--- a/drivers/net/softnic/conn.c
+++ b/drivers/net/softnic/conn.c
@@ -8,7 +8,6 @@
#include <unistd.h>
#include <sys/types.h>
-#define __USE_GNU
#include <sys/socket.h>
#include <sys/epoll.h>
diff --git a/drivers/net/softnic/hash_func.h b/drivers/net/softnic/hash_func.h
deleted file mode 100644
index 198d2b20..00000000
--- a/drivers/net/softnic/hash_func.h
+++ /dev/null
@@ -1,359 +0,0 @@
-/* SPDX-License-Identifier: BSD-3-Clause
- * Copyright(c) 2010-2018 Intel Corporation
- */
-
-#ifndef __INCLUDE_HASH_FUNC_H__
-#define __INCLUDE_HASH_FUNC_H__
-
-#include <rte_common.h>
-
-static inline uint64_t
-hash_xor_key8(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0;
-
- xor0 = seed ^ (k[0] & m[0]);
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key16(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key24(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
-
- xor0 ^= k[2] & m[2];
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key32(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
-
- xor0 ^= xor1;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key40(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
-
- xor0 ^= xor1;
-
- xor0 ^= k[4] & m[4];
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key48(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1, xor2;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
- xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
-
- xor0 ^= xor1;
-
- xor0 ^= xor2;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key56(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1, xor2;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
- xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
-
- xor0 ^= xor1;
- xor2 ^= k[6] & m[6];
-
- xor0 ^= xor2;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-static inline uint64_t
-hash_xor_key64(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t xor0, xor1, xor2, xor3;
-
- xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
- xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
- xor3 = (k[6] & m[6]) ^ (k[7] & m[7]);
-
- xor0 ^= xor1;
- xor2 ^= xor3;
-
- xor0 ^= xor2;
-
- return (xor0 >> 32) ^ xor0;
-}
-
-#if defined(RTE_ARCH_X86_64)
-
-#include <x86intrin.h>
-
-static inline uint64_t
-hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t crc0;
-
- crc0 = _mm_crc32_u64(seed, k[0] & m[0]);
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, crc0, crc1;
-
- k0 = k[0] & m[0];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, crc0, crc1;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc0 = _mm_crc32_u64(crc0, k2);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = k2 >> 32;
-
- crc0 = _mm_crc32_u64(crc0, crc1);
- crc1 = _mm_crc32_u64(crc2, crc3);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc0 = _mm_crc32_u64(crc0, crc1);
- crc1 = _mm_crc32_u64(crc2, crc3);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
- crc1 = _mm_crc32_u64(crc3, k5);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
- crc5 = k5 >> 32;
-
- crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
- crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
-
- crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
-
- crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
- crc5 = _mm_crc32_u64(k5 >> 32, k[7] & m[7]);
-
- crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
- crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-#define hash_default_key8 hash_crc_key8
-#define hash_default_key16 hash_crc_key16
-#define hash_default_key24 hash_crc_key24
-#define hash_default_key32 hash_crc_key32
-#define hash_default_key40 hash_crc_key40
-#define hash_default_key48 hash_crc_key48
-#define hash_default_key56 hash_crc_key56
-#define hash_default_key64 hash_crc_key64
-
-#elif defined(RTE_ARCH_ARM64)
-#include "hash_func_arm64.h"
-#else
-
-#define hash_default_key8 hash_xor_key8
-#define hash_default_key16 hash_xor_key16
-#define hash_default_key24 hash_xor_key24
-#define hash_default_key32 hash_xor_key32
-#define hash_default_key40 hash_xor_key40
-#define hash_default_key48 hash_xor_key48
-#define hash_default_key56 hash_xor_key56
-#define hash_default_key64 hash_xor_key64
-
-#endif
-
-#endif
diff --git a/drivers/net/softnic/hash_func_arm64.h b/drivers/net/softnic/hash_func_arm64.h
deleted file mode 100644
index ae6c0f41..00000000
--- a/drivers/net/softnic/hash_func_arm64.h
+++ /dev/null
@@ -1,261 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2017 Linaro Limited. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-#ifndef __HASH_FUNC_ARM64_H__
-#define __HASH_FUNC_ARM64_H__
-
-#define _CRC32CX(crc, val) \
- __asm__("crc32cx %w[c], %w[c], %x[v]":[c] "+r" (crc):[v] "r" (val))
-
-static inline uint64_t
-hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key;
- uint64_t *m = mask;
- uint32_t crc0;
-
- crc0 = seed;
- _CRC32CX(crc0, k[0] & m[0]);
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0;
- uint64_t *m = mask;
- uint32_t crc0, crc1;
-
- k0 = k[0] & m[0];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2;
- uint64_t *m = mask;
- uint32_t crc0, crc1;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- _CRC32CX(crc0, k2);
-
- crc0 ^= crc1;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
-
- _CRC32CX(crc0, crc1);
- _CRC32CX(crc2, crc3);
-
- crc0 ^= crc2;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- _CRC32CX(crc0, crc1);
- _CRC32CX(crc2, crc3);
-
- crc0 ^= crc2;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2, k5;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
- _CRC32CX(crc3, k5);
-
- crc0 ^= crc3;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2, k5;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- crc4 = k5;
- _CRC32CX(crc4, k[6] & m[6]);
- crc5 = k5 >> 32;
-
- _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
- _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
-
- crc0 ^= crc3;
-
- return crc0;
-}
-
-static inline uint64_t
-hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
- uint64_t seed)
-{
- uint64_t *k = key, k0, k2, k5;
- uint64_t *m = mask;
- uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
-
- k0 = k[0] & m[0];
- k2 = k[2] & m[2];
- k5 = k[5] & m[5];
-
- crc0 = k0;
- _CRC32CX(crc0, seed);
- crc1 = k0 >> 32;
- _CRC32CX(crc1, k[1] & m[1]);
-
- crc2 = k2;
- _CRC32CX(crc2, k[3] & m[3]);
- crc3 = k2 >> 32;
- _CRC32CX(crc3, k[4] & m[4]);
-
- crc4 = k5;
- _CRC32CX(crc4, k[6] & m[6]);
- crc5 = k5 >> 32;
- _CRC32CX(crc5, k[7] & m[7]);
-
- _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
- _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
-
- crc0 ^= crc3;
-
- return crc0;
-}
-
-#define hash_default_key8 hash_crc_key8
-#define hash_default_key16 hash_crc_key16
-#define hash_default_key24 hash_crc_key24
-#define hash_default_key32 hash_crc_key32
-#define hash_default_key40 hash_crc_key40
-#define hash_default_key48 hash_crc_key48
-#define hash_default_key56 hash_crc_key56
-#define hash_default_key64 hash_crc_key64
-
-#endif
diff --git a/drivers/net/softnic/meson.build b/drivers/net/softnic/meson.build
index ff982274..da249c06 100644
--- a/drivers/net/softnic/meson.build
+++ b/drivers/net/softnic/meson.build
@@ -1,6 +1,9 @@
# SPDX-License-Identifier: BSD-3-Clause
# Copyright(c) 2018 Intel Corporation
+if host_machine.system() != 'linux'
+ build = false
+endif
allow_experimental_apis = true
install_headers('rte_eth_softnic.h')
sources = files('rte_eth_softnic_tm.c',
@@ -13,6 +16,9 @@ sources = files('rte_eth_softnic_tm.c',
'rte_eth_softnic_pipeline.c',
'rte_eth_softnic_thread.c',
'rte_eth_softnic_cli.c',
+ 'rte_eth_softnic_flow.c',
+ 'rte_eth_softnic_meter.c',
+ 'rte_eth_softnic_cryptodev.c',
'parser.c',
'conn.c')
-deps += ['pipeline', 'port', 'table', 'sched']
+deps += ['pipeline', 'port', 'table', 'sched', 'cryptodev']
diff --git a/drivers/net/softnic/rte_eth_softnic.c b/drivers/net/softnic/rte_eth_softnic.c
index 30fb3952..743a7c58 100644
--- a/drivers/net/softnic/rte_eth_softnic.c
+++ b/drivers/net/softnic/rte_eth_softnic.c
@@ -14,6 +14,7 @@
#include <rte_errno.h>
#include <rte_ring.h>
#include <rte_tm_driver.h>
+#include <rte_mtr_driver.h>
#include "rte_eth_softnic.h"
#include "rte_eth_softnic_internals.h"
@@ -27,7 +28,7 @@
#define PMD_PARAM_TM_QSIZE2 "tm_qsize2"
#define PMD_PARAM_TM_QSIZE3 "tm_qsize3"
-static const char *pmd_valid_args[] = {
+static const char * const pmd_valid_args[] = {
PMD_PARAM_FIRMWARE,
PMD_PARAM_CONN_PORT,
PMD_PARAM_CPU_ID,
@@ -46,7 +47,7 @@ static const char welcome[] =
static const char prompt[] = "softnic> ";
-struct softnic_conn_params conn_params_default = {
+static const struct softnic_conn_params conn_params_default = {
.welcome = welcome,
.prompt = prompt,
.addr = "0.0.0.0",
@@ -73,7 +74,6 @@ static const struct rte_eth_dev_info pmd_dev_info = {
.nb_min = 0,
.nb_align = 1,
},
- .rx_offload_capa = DEV_RX_OFFLOAD_CRC_STRIP,
};
static int pmd_softnic_logtype;
@@ -190,6 +190,7 @@ pmd_dev_stop(struct rte_eth_dev *dev)
softnic_mempool_free(p);
tm_hierarchy_free(p);
+ softnic_mtr_free(p);
}
static void
@@ -206,6 +207,21 @@ pmd_link_update(struct rte_eth_dev *dev __rte_unused,
}
static int
+pmd_filter_ctrl(struct rte_eth_dev *dev __rte_unused,
+ enum rte_filter_type filter_type,
+ enum rte_filter_op filter_op,
+ void *arg)
+{
+ if (filter_type == RTE_ETH_FILTER_GENERIC &&
+ filter_op == RTE_ETH_FILTER_GET) {
+ *(const void **)arg = &pmd_flow_ops;
+ return 0;
+ }
+
+ return -ENOTSUP;
+}
+
+static int
pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
{
*(const struct rte_tm_ops **)arg = &pmd_tm_ops;
@@ -213,6 +229,14 @@ pmd_tm_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
return 0;
}
+static int
+pmd_mtr_ops_get(struct rte_eth_dev *dev __rte_unused, void *arg)
+{
+ *(const struct rte_mtr_ops **)arg = &pmd_mtr_ops;
+
+ return 0;
+}
+
static const struct eth_dev_ops pmd_ops = {
.dev_configure = pmd_dev_configure,
.dev_start = pmd_dev_start,
@@ -222,7 +246,9 @@ static const struct eth_dev_ops pmd_ops = {
.dev_infos_get = pmd_dev_infos_get,
.rx_queue_setup = pmd_rx_queue_setup,
.tx_queue_setup = pmd_tx_queue_setup,
+ .filter_ctrl = pmd_filter_ctrl,
.tm_ops_get = pmd_tm_ops_get,
+ .mtr_ops_get = pmd_mtr_ops_get,
};
static uint16_t
@@ -265,12 +291,14 @@ pmd_init(struct pmd_params *params)
/* Resources */
tm_hierarchy_init(p);
+ softnic_mtr_init(p);
softnic_mempool_init(p);
softnic_swq_init(p);
softnic_link_init(p);
softnic_tmgr_init(p);
softnic_tap_init(p);
+ softnic_cryptodev_init(p);
softnic_port_in_action_profile_init(p);
softnic_table_action_profile_init(p);
softnic_pipeline_init(p);
@@ -319,6 +347,7 @@ pmd_free(struct pmd_internals *p)
softnic_mempool_free(p);
tm_hierarchy_free(p);
+ softnic_mtr_free(p);
rte_free(p);
}
@@ -528,7 +557,6 @@ static int
pmd_remove(struct rte_vdev_device *vdev)
{
struct rte_eth_dev *dev = NULL;
- struct pmd_internals *p;
if (!vdev)
return -EINVAL;
@@ -539,12 +567,12 @@ pmd_remove(struct rte_vdev_device *vdev)
dev = rte_eth_dev_allocated(rte_vdev_device_name(vdev));
if (dev == NULL)
return -ENODEV;
- p = dev->data->dev_private;
/* Free device data structures*/
- rte_free(dev->data);
+ pmd_free(dev->data->dev_private);
+ dev->data->dev_private = NULL; /* already freed */
+ dev->data->mac_addrs = NULL; /* statically allocated */
rte_eth_dev_release_port(dev);
- pmd_free(p);
return 0;
}
diff --git a/drivers/net/softnic/rte_eth_softnic_action.c b/drivers/net/softnic/rte_eth_softnic_action.c
index c25f4dd9..92c744dc 100644
--- a/drivers/net/softnic/rte_eth_softnic_action.c
+++ b/drivers/net/softnic/rte_eth_softnic_action.c
@@ -7,8 +7,8 @@
#include <string.h>
#include <rte_string_fns.h>
+#include <rte_table_hash_func.h>
-#include "hash_func.h"
#include "rte_eth_softnic_internals.h"
/**
@@ -72,35 +72,35 @@ softnic_port_in_action_profile_create(struct pmd_internals *p,
params->lb.f_hash == NULL) {
switch (params->lb.key_size) {
case 8:
- params->lb.f_hash = hash_default_key8;
+ params->lb.f_hash = rte_table_hash_crc_key8;
break;
case 16:
- params->lb.f_hash = hash_default_key16;
+ params->lb.f_hash = rte_table_hash_crc_key16;
break;
case 24:
- params->lb.f_hash = hash_default_key24;
+ params->lb.f_hash = rte_table_hash_crc_key24;
break;
case 32:
- params->lb.f_hash = hash_default_key32;
+ params->lb.f_hash = rte_table_hash_crc_key32;
break;
case 40:
- params->lb.f_hash = hash_default_key40;
+ params->lb.f_hash = rte_table_hash_crc_key40;
break;
case 48:
- params->lb.f_hash = hash_default_key48;
+ params->lb.f_hash = rte_table_hash_crc_key48;
break;
case 56:
- params->lb.f_hash = hash_default_key56;
+ params->lb.f_hash = rte_table_hash_crc_key56;
break;
case 64:
- params->lb.f_hash = hash_default_key64;
+ params->lb.f_hash = rte_table_hash_crc_key64;
break;
default:
@@ -223,35 +223,35 @@ softnic_table_action_profile_create(struct pmd_internals *p,
params->lb.f_hash == NULL) {
switch (params->lb.key_size) {
case 8:
- params->lb.f_hash = hash_default_key8;
+ params->lb.f_hash = rte_table_hash_crc_key8;
break;
case 16:
- params->lb.f_hash = hash_default_key16;
+ params->lb.f_hash = rte_table_hash_crc_key16;
break;
case 24:
- params->lb.f_hash = hash_default_key24;
+ params->lb.f_hash = rte_table_hash_crc_key24;
break;
case 32:
- params->lb.f_hash = hash_default_key32;
+ params->lb.f_hash = rte_table_hash_crc_key32;
break;
case 40:
- params->lb.f_hash = hash_default_key40;
+ params->lb.f_hash = rte_table_hash_crc_key40;
break;
case 48:
- params->lb.f_hash = hash_default_key48;
+ params->lb.f_hash = rte_table_hash_crc_key48;
break;
case 56:
- params->lb.f_hash = hash_default_key56;
+ params->lb.f_hash = rte_table_hash_crc_key56;
break;
case 64:
- params->lb.f_hash = hash_default_key64;
+ params->lb.f_hash = rte_table_hash_crc_key64;
break;
default:
@@ -364,6 +364,39 @@ softnic_table_action_profile_create(struct pmd_internals *p,
}
}
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_TAG,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_DECAP,
+ NULL);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
+ if (params->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+ status = rte_table_action_profile_action_register(ap,
+ RTE_TABLE_ACTION_SYM_CRYPTO,
+ &params->sym_crypto);
+
+ if (status) {
+ rte_table_action_profile_free(ap);
+ return NULL;
+ }
+ }
+
status = rte_table_action_profile_freeze(ap);
if (status) {
rte_table_action_profile_free(ap);
diff --git a/drivers/net/softnic/rte_eth_softnic_cli.c b/drivers/net/softnic/rte_eth_softnic_cli.c
index 0c7448cc..c6640d65 100644
--- a/drivers/net/softnic/rte_eth_softnic_cli.c
+++ b/drivers/net/softnic/rte_eth_softnic_cli.c
@@ -9,6 +9,8 @@
#include <rte_common.h>
#include <rte_cycles.h>
+#include <rte_string_fns.h>
+#include <rte_cryptodev.h>
#include "rte_eth_softnic_internals.h"
#include "parser.h"
@@ -1089,6 +1091,67 @@ cmd_tap(struct pmd_internals *softnic,
}
/**
+ * cryptodev <tap_name> dev <device_name> | dev_id <device_id>
+ * queue <n_queues> <queue_size>
+ **/
+
+static void
+cmd_cryptodev(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ struct softnic_cryptodev_params params;
+ char *name;
+
+ memset(&params, 0, sizeof(params));
+ if (n_tokens != 7) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ name = tokens[1];
+
+ if (strcmp(tokens[2], "dev") == 0)
+ params.dev_name = tokens[3];
+ else if (strcmp(tokens[2], "dev_id") == 0) {
+ if (softnic_parser_read_uint32(&params.dev_id, tokens[3]) < 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "dev_id");
+ return;
+ }
+ } else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "cryptodev");
+ return;
+ }
+
+ if (strcmp(tokens[4], "queue")) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "4");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&params.n_queues, tokens[5]) < 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "q");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&params.queue_size, tokens[6]) < 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "queue_size");
+ return;
+ }
+
+ if (softnic_cryptodev_create(softnic, name, &params) == NULL) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
+/**
* port in action profile <profile_name>
* [filter match | mismatch offset <key_offset> mask <key_mask> key <key_value> port <port_id>]
* [balance offset <key_offset> mask <key_mask> port <port_id0> ... <port_id15>]
@@ -1272,13 +1335,17 @@ cmd_port_in_action_profile(struct pmd_internals *softnic,
* tc <n_tc>
* stats none | pkts | bytes | both]
* [tm spp <n_subports_per_port> pps <n_pipes_per_subport>]
- * [encap ether | vlan | qinq | mpls | pppoe]
+ * [encap ether | vlan | qinq | mpls | pppoe |
+ * vxlan offset <ether_offset> ipv4 | ipv6 vlan on | off]
* [nat src | dst
* proto udp | tcp]
* [ttl drop | fwd
* stats none | pkts]
* [stats pkts | bytes | both]
* [time]
+ * [tag]
+ * [decap]
+ *
*/
static void
cmd_table_action_profile(struct pmd_internals *softnic,
@@ -1478,6 +1545,8 @@ cmd_table_action_profile(struct pmd_internals *softnic,
if (t0 < n_tokens &&
(strcmp(tokens[t0], "encap") == 0)) {
+ uint32_t n_extra_tokens = 0;
+
if (n_tokens < t0 + 2) {
snprintf(out, out_size, MSG_ARG_MISMATCH,
"action profile encap");
@@ -1494,13 +1563,61 @@ cmd_table_action_profile(struct pmd_internals *softnic,
p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_MPLS;
} else if (strcmp(tokens[t0 + 1], "pppoe") == 0) {
p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE;
+ } else if (strcmp(tokens[t0 + 1], "vxlan") == 0) {
+ if (n_tokens < t0 + 2 + 5) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "action profile encap vxlan");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2], "offset") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "vxlan: offset");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.encap.vxlan.data_offset,
+ tokens[t0 + 2 + 1]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "vxlan: ether_offset");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2 + 2], "ipv4") == 0)
+ p.encap.vxlan.ip_version = 1;
+ else if (strcmp(tokens[t0 + 2 + 2], "ipv6") == 0)
+ p.encap.vxlan.ip_version = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "vxlan: ipv4 or ipv6");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2 + 3], "vlan") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND,
+ "vxlan: vlan");
+ return;
+ }
+
+ if (strcmp(tokens[t0 + 2 + 4], "on") == 0)
+ p.encap.vxlan.vlan = 1;
+ else if (strcmp(tokens[t0 + 2 + 4], "off") == 0)
+ p.encap.vxlan.vlan = 0;
+ else {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "vxlan: on or off");
+ return;
+ }
+
+ p.encap.encap_mask = 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN;
+ n_extra_tokens = 5;
+
} else {
snprintf(out, out_size, MSG_ARG_MISMATCH, "encap");
return;
}
-
p.action_mask |= 1LLU << RTE_TABLE_ACTION_ENCAP;
- t0 += 2;
+ t0 += 2 + n_extra_tokens;
} /* encap */
if (t0 < n_tokens &&
@@ -1610,6 +1727,18 @@ cmd_table_action_profile(struct pmd_internals *softnic,
t0 += 1;
} /* time */
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "tag") == 0)) {
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_TAG;
+ t0 += 1;
+ } /* tag */
+
+ if (t0 < n_tokens &&
+ (strcmp(tokens[t0], "decap") == 0)) {
+ p.action_mask |= 1LLU << RTE_TABLE_ACTION_DECAP;
+ t0 += 1;
+ } /* decap */
+
if (t0 < n_tokens) {
snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
return;
@@ -1682,6 +1811,7 @@ cmd_pipeline(struct pmd_internals *softnic,
* | tmgr <tmgr_name>
* | tap <tap_name> mempool <mempool_name> mtu <mtu>
* | source mempool <mempool_name> file <file_name> bpp <n_bytes_per_pkt>
+ * | cryptodev <cryptodev_name> rxq <queue_id>
* [action <port_in_action_profile_name>]
* [disabled]
*/
@@ -1697,6 +1827,8 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
uint32_t t0;
int enabled, status;
+ memset(&p, 0, sizeof(p));
+
if (n_tokens < 7) {
snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
return;
@@ -1735,7 +1867,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
p.type = PORT_IN_RXQ;
- p.dev_name = tokens[t0 + 1];
+ strcpy(p.dev_name, tokens[t0 + 1]);
if (strcmp(tokens[t0 + 2], "rxq") != 0) {
snprintf(out, out_size, MSG_ARG_NOT_FOUND, "rxq");
@@ -1758,7 +1890,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
p.type = PORT_IN_SWQ;
- p.dev_name = tokens[t0 + 1];
+ strcpy(p.dev_name, tokens[t0 + 1]);
t0 += 2;
} else if (strcmp(tokens[t0], "tmgr") == 0) {
@@ -1770,7 +1902,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
p.type = PORT_IN_TMGR;
- p.dev_name = tokens[t0 + 1];
+ strcpy(p.dev_name, tokens[t0 + 1]);
t0 += 2;
} else if (strcmp(tokens[t0], "tap") == 0) {
@@ -1782,7 +1914,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
p.type = PORT_IN_TAP;
- p.dev_name = tokens[t0 + 1];
+ strcpy(p.dev_name, tokens[t0 + 1]);
if (strcmp(tokens[t0 + 2], "mempool") != 0) {
snprintf(out, out_size, MSG_ARG_NOT_FOUND,
@@ -1814,8 +1946,6 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
p.type = PORT_IN_SOURCE;
- p.dev_name = NULL;
-
if (strcmp(tokens[t0 + 1], "mempool") != 0) {
snprintf(out, out_size, MSG_ARG_NOT_FOUND,
"mempool");
@@ -1846,12 +1976,32 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
}
t0 += 7;
+ } else if (strcmp(tokens[t0], "cryptodev") == 0) {
+ if (n_tokens < t0 + 3) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port in cryptodev");
+ return;
+ }
+
+ p.type = PORT_IN_CRYPTODEV;
+
+ strlcpy(p.dev_name, tokens[t0 + 1], sizeof(p.dev_name));
+ if (softnic_parser_read_uint16(&p.rxq.queue_id,
+ tokens[t0 + 3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "rxq");
+ return;
+ }
+
+ p.cryptodev.arg_callback = NULL;
+ p.cryptodev.f_callback = NULL;
+
+ t0 += 4;
} else {
snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
return;
}
- p.action_profile_name = NULL;
if (n_tokens > t0 &&
(strcmp(tokens[t0], "action") == 0)) {
if (n_tokens < t0 + 2) {
@@ -1859,7 +2009,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
return;
}
- p.action_profile_name = tokens[t0 + 1];
+ strcpy(p.action_profile_name, tokens[t0 + 1]);
t0 += 2;
}
@@ -1895,6 +2045,7 @@ cmd_pipeline_port_in(struct pmd_internals *softnic,
* | tmgr <tmgr_name>
* | tap <tap_name>
* | sink [file <file_name> pkts <max_n_pkts>]
+ * | cryptodev <cryptodev_name> txq <txq_id> offset <crypto_op_offset>
*/
static void
cmd_pipeline_port_out(struct pmd_internals *softnic,
@@ -1945,7 +2096,7 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
p.type = PORT_OUT_TXQ;
- p.dev_name = tokens[7];
+ strcpy(p.dev_name, tokens[7]);
if (strcmp(tokens[8], "txq") != 0) {
snprintf(out, out_size, MSG_ARG_NOT_FOUND, "txq");
@@ -1966,7 +2117,7 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
p.type = PORT_OUT_SWQ;
- p.dev_name = tokens[7];
+ strcpy(p.dev_name, tokens[7]);
} else if (strcmp(tokens[6], "tmgr") == 0) {
if (n_tokens != 8) {
snprintf(out, out_size, MSG_ARG_MISMATCH,
@@ -1976,7 +2127,7 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
p.type = PORT_OUT_TMGR;
- p.dev_name = tokens[7];
+ strcpy(p.dev_name, tokens[7]);
} else if (strcmp(tokens[6], "tap") == 0) {
if (n_tokens != 8) {
snprintf(out, out_size, MSG_ARG_MISMATCH,
@@ -1986,7 +2137,7 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
p.type = PORT_OUT_TAP;
- p.dev_name = tokens[7];
+ strcpy(p.dev_name, tokens[7]);
} else if (strcmp(tokens[6], "sink") == 0) {
if ((n_tokens != 7) && (n_tokens != 11)) {
snprintf(out, out_size, MSG_ARG_MISMATCH,
@@ -1996,8 +2147,6 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
p.type = PORT_OUT_SINK;
- p.dev_name = NULL;
-
if (n_tokens == 7) {
p.sink.file_name = NULL;
p.sink.max_n_pkts = 0;
@@ -2021,6 +2170,40 @@ cmd_pipeline_port_out(struct pmd_internals *softnic,
return;
}
}
+ } else if (strcmp(tokens[6], "cryptodev") == 0) {
+ if (n_tokens != 12) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out cryptodev");
+ return;
+ }
+
+ p.type = PORT_OUT_CRYPTODEV;
+
+ strlcpy(p.dev_name, tokens[7], sizeof(p.dev_name));
+
+ if (strcmp(tokens[8], "txq")) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out cryptodev");
+ return;
+ }
+
+ if (softnic_parser_read_uint16(&p.cryptodev.queue_id, tokens[9])
+ != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_id");
+ return;
+ }
+
+ if (strcmp(tokens[10], "offset")) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH,
+ "pipeline port out cryptodev");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&p.cryptodev.op_offset,
+ tokens[11]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "queue_id");
+ return;
+ }
} else {
snprintf(out, out_size, MSG_ARG_INVALID, tokens[0]);
return;
@@ -2064,12 +2247,13 @@ cmd_pipeline_table(struct pmd_internals *softnic,
char *out,
size_t out_size)
{
- uint8_t key_mask[TABLE_RULE_MATCH_SIZE_MAX];
struct softnic_table_params p;
char *pipeline_name;
uint32_t t0;
int status;
+ memset(&p, 0, sizeof(p));
+
if (n_tokens < 5) {
snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
return;
@@ -2203,12 +2387,11 @@ cmd_pipeline_table(struct pmd_internals *softnic,
}
if ((softnic_parse_hex_string(tokens[t0 + 5],
- key_mask, &key_mask_size) != 0) ||
+ p.match.hash.key_mask, &key_mask_size) != 0) ||
key_mask_size != p.match.hash.key_size) {
snprintf(out, out_size, MSG_ARG_INVALID, "key_mask");
return;
}
- p.match.hash.key_mask = key_mask;
if (strcmp(tokens[t0 + 6], "offset") != 0) {
snprintf(out, out_size, MSG_ARG_NOT_FOUND, "offset");
@@ -2295,7 +2478,6 @@ cmd_pipeline_table(struct pmd_internals *softnic,
return;
}
- p.action_profile_name = NULL;
if (n_tokens > t0 &&
(strcmp(tokens[t0], "action") == 0)) {
if (n_tokens < t0 + 2) {
@@ -2303,7 +2485,7 @@ cmd_pipeline_table(struct pmd_internals *softnic,
return;
}
- p.action_profile_name = tokens[t0 + 1];
+ strcpy(p.action_profile_name, tokens[t0 + 1]);
t0 += 2;
}
@@ -3176,10 +3358,30 @@ parse_match(char **tokens,
* [label2 <label> <tc> <ttl>
* [label3 <label> <tc> <ttl>]]]
* | pppoe <da> <sa> <session_id>]
+ * | vxlan ether <da> <sa>
+ * [vlan <pcp> <dei> <vid>]
+ * ipv4 <sa> <da> <dscp> <ttl>
+ * | ipv6 <sa> <da> <flow_label> <dscp> <hop_limit>
+ * udp <sp> <dp>
+ * vxlan <vni>]
* [nat ipv4 | ipv6 <addr> <port>]
* [ttl dec | keep]
* [stats]
* [time]
+ * [tag <tag>]
+ * [decap <n>]
+ * [sym_crypto
+ * encrypt | decrypt
+ * type
+ * | cipher
+ * cipher_algo <algo> cipher_key <key> cipher_iv <iv>
+ * | cipher_auth
+ * cipher_algo <algo> cipher_key <key> cipher_iv <iv>
+ * auth_algo <algo> auth_key <key> digest_size <size>
+ * | aead
+ * aead_algo <algo> aead_key <key> aead_iv <iv> aead_aad <aad>
+ * digest_size <size>
+ * data_offset <data_offset>]
*
* where:
* <pa> ::= g | y | r | drop
@@ -3575,6 +3777,122 @@ parse_table_action_encap(char **tokens,
return 1 + 4;
}
+ /* vxlan */
+ if (n_tokens && (strcmp(tokens[0], "vxlan") == 0)) {
+ uint32_t n = 0;
+
+ n_tokens--;
+ tokens++;
+ n++;
+
+ /* ether <da> <sa> */
+ if ((n_tokens < 3) ||
+ strcmp(tokens[0], "ether") ||
+ softnic_parse_mac_addr(tokens[1], &a->encap.vxlan.ether.da) ||
+ softnic_parse_mac_addr(tokens[2], &a->encap.vxlan.ether.sa))
+ return 0;
+
+ n_tokens -= 3;
+ tokens += 3;
+ n += 3;
+
+ /* [vlan <pcp> <dei> <vid>] */
+ if (strcmp(tokens[0], "vlan") == 0) {
+ uint32_t pcp, dei, vid;
+
+ if ((n_tokens < 4) ||
+ softnic_parser_read_uint32(&pcp, tokens[1]) ||
+ (pcp > 7) ||
+ softnic_parser_read_uint32(&dei, tokens[2]) ||
+ (dei > 1) ||
+ softnic_parser_read_uint32(&vid, tokens[3]) ||
+ (vid > 0xFFF))
+ return 0;
+
+ a->encap.vxlan.vlan.pcp = pcp;
+ a->encap.vxlan.vlan.dei = dei;
+ a->encap.vxlan.vlan.vid = vid;
+
+ n_tokens -= 4;
+ tokens += 4;
+ n += 4;
+ }
+
+ /* ipv4 <sa> <da> <dscp> <ttl>
+ | ipv6 <sa> <da> <flow_label> <dscp> <hop_limit> */
+ if (strcmp(tokens[0], "ipv4") == 0) {
+ struct in_addr sa, da;
+ uint8_t dscp, ttl;
+
+ if ((n_tokens < 5) ||
+ softnic_parse_ipv4_addr(tokens[1], &sa) ||
+ softnic_parse_ipv4_addr(tokens[2], &da) ||
+ softnic_parser_read_uint8(&dscp, tokens[3]) ||
+ (dscp > 64) ||
+ softnic_parser_read_uint8(&ttl, tokens[4]))
+ return 0;
+
+ a->encap.vxlan.ipv4.sa = rte_be_to_cpu_32(sa.s_addr);
+ a->encap.vxlan.ipv4.da = rte_be_to_cpu_32(da.s_addr);
+ a->encap.vxlan.ipv4.dscp = dscp;
+ a->encap.vxlan.ipv4.ttl = ttl;
+
+ n_tokens -= 5;
+ tokens += 5;
+ n += 5;
+ } else if (strcmp(tokens[0], "ipv6") == 0) {
+ struct in6_addr sa, da;
+ uint32_t flow_label;
+ uint8_t dscp, hop_limit;
+
+ if ((n_tokens < 6) ||
+ softnic_parse_ipv6_addr(tokens[1], &sa) ||
+ softnic_parse_ipv6_addr(tokens[2], &da) ||
+ softnic_parser_read_uint32(&flow_label, tokens[3]) ||
+ softnic_parser_read_uint8(&dscp, tokens[4]) ||
+ (dscp > 64) ||
+ softnic_parser_read_uint8(&hop_limit, tokens[5]))
+ return 0;
+
+ memcpy(a->encap.vxlan.ipv6.sa, sa.s6_addr, 16);
+ memcpy(a->encap.vxlan.ipv6.da, da.s6_addr, 16);
+ a->encap.vxlan.ipv6.flow_label = flow_label;
+ a->encap.vxlan.ipv6.dscp = dscp;
+ a->encap.vxlan.ipv6.hop_limit = hop_limit;
+
+ n_tokens -= 6;
+ tokens += 6;
+ n += 6;
+ } else
+ return 0;
+
+ /* udp <sp> <dp> */
+ if ((n_tokens < 3) ||
+ strcmp(tokens[0], "udp") ||
+ softnic_parser_read_uint16(&a->encap.vxlan.udp.sp, tokens[1]) ||
+ softnic_parser_read_uint16(&a->encap.vxlan.udp.dp, tokens[2]))
+ return 0;
+
+ n_tokens -= 3;
+ tokens += 3;
+ n += 3;
+
+ /* vxlan <vni> */
+ if ((n_tokens < 2) ||
+ strcmp(tokens[0], "vxlan") ||
+ softnic_parser_read_uint32(&a->encap.vxlan.vxlan.vni, tokens[1]) ||
+ (a->encap.vxlan.vxlan.vni > 0xFFFFFF))
+ return 0;
+
+ n_tokens -= 2;
+ tokens += 2;
+ n += 2;
+
+ a->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN;
+ a->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ return 1 + n;
+ }
+
return 0;
}
@@ -3669,6 +3987,400 @@ parse_table_action_time(char **tokens,
return 1;
}
+static void
+parse_free_sym_crypto_param_data(struct rte_table_action_sym_crypto_params *p)
+{
+ struct rte_crypto_sym_xform *xform[2] = {NULL};
+ uint32_t i;
+
+ xform[0] = p->xform;
+ if (xform[0])
+ xform[1] = xform[0]->next;
+
+ for (i = 0; i < 2; i++) {
+ if (xform[i] == NULL)
+ continue;
+
+ switch (xform[i]->type) {
+ case RTE_CRYPTO_SYM_XFORM_CIPHER:
+ if (xform[i]->cipher.key.data)
+ free(xform[i]->cipher.key.data);
+ if (p->cipher_auth.cipher_iv.val)
+ free(p->cipher_auth.cipher_iv.val);
+ if (p->cipher_auth.cipher_iv_update.val)
+ free(p->cipher_auth.cipher_iv_update.val);
+ break;
+ case RTE_CRYPTO_SYM_XFORM_AUTH:
+ if (xform[i]->auth.key.data)
+ free(xform[i]->cipher.key.data);
+ if (p->cipher_auth.auth_iv.val)
+ free(p->cipher_auth.cipher_iv.val);
+ if (p->cipher_auth.auth_iv_update.val)
+ free(p->cipher_auth.cipher_iv_update.val);
+ break;
+ case RTE_CRYPTO_SYM_XFORM_AEAD:
+ if (xform[i]->aead.key.data)
+ free(xform[i]->cipher.key.data);
+ if (p->aead.iv.val)
+ free(p->aead.iv.val);
+ if (p->aead.aad.val)
+ free(p->aead.aad.val);
+ break;
+ default:
+ continue;
+ }
+ }
+
+}
+
+static struct rte_crypto_sym_xform *
+parse_table_action_cipher(struct rte_table_action_sym_crypto_params *p,
+ char **tokens, uint32_t n_tokens, uint32_t encrypt,
+ uint32_t *used_n_tokens)
+{
+ struct rte_crypto_sym_xform *xform_cipher;
+ int status;
+ size_t len;
+
+ if (n_tokens < 7 || strcmp(tokens[1], "cipher_algo") ||
+ strcmp(tokens[3], "cipher_key") ||
+ strcmp(tokens[5], "cipher_iv"))
+ return NULL;
+
+ xform_cipher = calloc(1, sizeof(*xform_cipher));
+ if (xform_cipher == NULL)
+ return NULL;
+
+ xform_cipher->type = RTE_CRYPTO_SYM_XFORM_CIPHER;
+ xform_cipher->cipher.op = encrypt ? RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ /* cipher_algo */
+ status = rte_cryptodev_get_cipher_algo_enum(
+ &xform_cipher->cipher.algo, tokens[2]);
+ if (status < 0)
+ goto error_exit;
+
+ /* cipher_key */
+ len = strlen(tokens[4]);
+ xform_cipher->cipher.key.data = calloc(1, len / 2 + 1);
+ if (xform_cipher->cipher.key.data == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[4],
+ xform_cipher->cipher.key.data,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_cipher->cipher.key.length = (uint16_t)len;
+
+ /* cipher_iv */
+ len = strlen(tokens[6]);
+
+ p->cipher_auth.cipher_iv.val = calloc(1, len / 2 + 1);
+ if (p->cipher_auth.cipher_iv.val == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[6],
+ p->cipher_auth.cipher_iv.val,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_cipher->cipher.iv.length = (uint16_t)len;
+ xform_cipher->cipher.iv.offset = RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET;
+ p->cipher_auth.cipher_iv.length = (uint32_t)len;
+ *used_n_tokens = 7;
+
+ return xform_cipher;
+
+error_exit:
+ if (xform_cipher->cipher.key.data)
+ free(xform_cipher->cipher.key.data);
+
+ if (p->cipher_auth.cipher_iv.val) {
+ free(p->cipher_auth.cipher_iv.val);
+ p->cipher_auth.cipher_iv.val = NULL;
+ }
+
+ free(xform_cipher);
+
+ return NULL;
+}
+
+static struct rte_crypto_sym_xform *
+parse_table_action_cipher_auth(struct rte_table_action_sym_crypto_params *p,
+ char **tokens, uint32_t n_tokens, uint32_t encrypt,
+ uint32_t *used_n_tokens)
+{
+ struct rte_crypto_sym_xform *xform_cipher;
+ struct rte_crypto_sym_xform *xform_auth;
+ int status;
+ size_t len;
+
+ if (n_tokens < 13 ||
+ strcmp(tokens[7], "auth_algo") ||
+ strcmp(tokens[9], "auth_key") ||
+ strcmp(tokens[11], "digest_size"))
+ return NULL;
+
+ xform_auth = calloc(1, sizeof(*xform_auth));
+ if (xform_auth == NULL)
+ return NULL;
+
+ xform_auth->type = RTE_CRYPTO_SYM_XFORM_AUTH;
+ xform_auth->auth.op = encrypt ? RTE_CRYPTO_AUTH_OP_GENERATE :
+ RTE_CRYPTO_AUTH_OP_VERIFY;
+
+ /* auth_algo */
+ status = rte_cryptodev_get_auth_algo_enum(&xform_auth->auth.algo,
+ tokens[8]);
+ if (status < 0)
+ goto error_exit;
+
+ /* auth_key */
+ len = strlen(tokens[10]);
+ xform_auth->auth.key.data = calloc(1, len / 2 + 1);
+ if (xform_auth->auth.key.data == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[10],
+ xform_auth->auth.key.data, (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_auth->auth.key.length = (uint16_t)len;
+
+ if (strcmp(tokens[11], "digest_size"))
+ goto error_exit;
+
+ status = softnic_parser_read_uint16(&xform_auth->auth.digest_length,
+ tokens[12]);
+ if (status < 0)
+ goto error_exit;
+
+ xform_cipher = parse_table_action_cipher(p, tokens, 7, encrypt,
+ used_n_tokens);
+ if (xform_cipher == NULL)
+ goto error_exit;
+
+ *used_n_tokens += 6;
+
+ if (encrypt) {
+ xform_cipher->next = xform_auth;
+ return xform_cipher;
+ } else {
+ xform_auth->next = xform_cipher;
+ return xform_auth;
+ }
+
+error_exit:
+ if (xform_auth->auth.key.data)
+ free(xform_auth->auth.key.data);
+ if (p->cipher_auth.auth_iv.val) {
+ free(p->cipher_auth.auth_iv.val);
+ p->cipher_auth.auth_iv.val = 0;
+ }
+
+ free(xform_auth);
+
+ return NULL;
+}
+
+static struct rte_crypto_sym_xform *
+parse_table_action_aead(struct rte_table_action_sym_crypto_params *p,
+ char **tokens, uint32_t n_tokens, uint32_t encrypt,
+ uint32_t *used_n_tokens)
+{
+ struct rte_crypto_sym_xform *xform_aead;
+ int status;
+ size_t len;
+
+ if (n_tokens < 11 || strcmp(tokens[1], "aead_algo") ||
+ strcmp(tokens[3], "aead_key") ||
+ strcmp(tokens[5], "aead_iv") ||
+ strcmp(tokens[7], "aead_aad") ||
+ strcmp(tokens[9], "digest_size"))
+ return NULL;
+
+ xform_aead = calloc(1, sizeof(*xform_aead));
+ if (xform_aead == NULL)
+ return NULL;
+
+ xform_aead->type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ xform_aead->aead.op = encrypt ? RTE_CRYPTO_AEAD_OP_ENCRYPT :
+ RTE_CRYPTO_AEAD_OP_DECRYPT;
+
+ /* aead_algo */
+ status = rte_cryptodev_get_aead_algo_enum(&xform_aead->aead.algo,
+ tokens[2]);
+ if (status < 0)
+ goto error_exit;
+
+ /* aead_key */
+ len = strlen(tokens[4]);
+ xform_aead->aead.key.data = calloc(1, len / 2 + 1);
+ if (xform_aead->aead.key.data == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[4], xform_aead->aead.key.data,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_aead->aead.key.length = (uint16_t)len;
+
+ /* aead_iv */
+ len = strlen(tokens[6]);
+ p->aead.iv.val = calloc(1, len / 2 + 1);
+ if (p->aead.iv.val == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[6], p->aead.iv.val,
+ (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_aead->aead.iv.length = (uint16_t)len;
+ xform_aead->aead.iv.offset = RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET;
+ p->aead.iv.length = (uint32_t)len;
+
+ /* aead_aad */
+ len = strlen(tokens[8]);
+ p->aead.aad.val = calloc(1, len / 2 + 1);
+ if (p->aead.aad.val == NULL)
+ goto error_exit;
+
+ status = softnic_parse_hex_string(tokens[8], p->aead.aad.val, (uint32_t *)&len);
+ if (status < 0)
+ goto error_exit;
+
+ xform_aead->aead.aad_length = (uint16_t)len;
+ p->aead.aad.length = (uint32_t)len;
+
+ /* digest_size */
+ status = softnic_parser_read_uint16(&xform_aead->aead.digest_length,
+ tokens[10]);
+ if (status < 0)
+ goto error_exit;
+
+ *used_n_tokens = 11;
+
+ return xform_aead;
+
+error_exit:
+ if (xform_aead->aead.key.data)
+ free(xform_aead->aead.key.data);
+ if (p->aead.iv.val) {
+ free(p->aead.iv.val);
+ p->aead.iv.val = NULL;
+ }
+ if (p->aead.aad.val) {
+ free(p->aead.aad.val);
+ p->aead.aad.val = NULL;
+ }
+
+ free(xform_aead);
+
+ return NULL;
+}
+
+
+static uint32_t
+parse_table_action_sym_crypto(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ struct rte_table_action_sym_crypto_params *p = &a->sym_crypto;
+ struct rte_crypto_sym_xform *xform = NULL;
+ uint32_t used_n_tokens;
+ uint32_t encrypt;
+ int status;
+
+ if ((n_tokens < 12) ||
+ strcmp(tokens[0], "sym_crypto") ||
+ strcmp(tokens[2], "type"))
+ return 0;
+
+ memset(p, 0, sizeof(*p));
+
+ if (strcmp(tokens[1], "encrypt") == 0)
+ encrypt = 1;
+ else
+ encrypt = 0;
+
+ status = softnic_parser_read_uint32(&p->data_offset, tokens[n_tokens - 1]);
+ if (status < 0)
+ return 0;
+
+ if (strcmp(tokens[3], "cipher") == 0) {
+ tokens += 3;
+ n_tokens -= 3;
+
+ xform = parse_table_action_cipher(p, tokens, n_tokens, encrypt,
+ &used_n_tokens);
+ } else if (strcmp(tokens[3], "cipher_auth") == 0) {
+ tokens += 3;
+ n_tokens -= 3;
+
+ xform = parse_table_action_cipher_auth(p, tokens, n_tokens,
+ encrypt, &used_n_tokens);
+ } else if (strcmp(tokens[3], "aead") == 0) {
+ tokens += 3;
+ n_tokens -= 3;
+
+ xform = parse_table_action_aead(p, tokens, n_tokens, encrypt,
+ &used_n_tokens);
+ }
+
+ if (xform == NULL)
+ return 0;
+
+ p->xform = xform;
+
+ if (strcmp(tokens[used_n_tokens], "data_offset")) {
+ parse_free_sym_crypto_param_data(p);
+ return 0;
+ }
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_SYM_CRYPTO;
+
+ return used_n_tokens + 5;
+}
+
+static uint32_t
+parse_table_action_tag(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 2 ||
+ strcmp(tokens[0], "tag"))
+ return 0;
+
+ if (softnic_parser_read_uint32(&a->tag.tag, tokens[1]))
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_TAG;
+ return 2;
+}
+
+static uint32_t
+parse_table_action_decap(char **tokens,
+ uint32_t n_tokens,
+ struct softnic_table_rule_action *a)
+{
+ if (n_tokens < 2 ||
+ strcmp(tokens[0], "decap"))
+ return 0;
+
+ if (softnic_parser_read_uint16(&a->decap.n, tokens[1]))
+ return 0;
+
+ a->action_mask |= 1 << RTE_TABLE_ACTION_DECAP;
+ return 2;
+}
+
static uint32_t
parse_table_action(char **tokens,
uint32_t n_tokens,
@@ -3813,6 +4525,47 @@ parse_table_action(char **tokens,
n_tokens -= n;
}
+ if (n_tokens && (strcmp(tokens[0], "tag") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_tag(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action tag");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "decap") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_decap(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action decap");
+ return 0;
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
+ if (n_tokens && (strcmp(tokens[0], "sym_crypto") == 0)) {
+ uint32_t n;
+
+ n = parse_table_action_sym_crypto(tokens, n_tokens, a);
+ if (n == 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID,
+ "action sym_crypto");
+ }
+
+ tokens += n;
+ n_tokens -= n;
+ }
+
if (n_tokens0 - n_tokens == 1) {
snprintf(out, out_size, MSG_ARG_INVALID, "action");
return 0;
@@ -4797,6 +5550,81 @@ cmd_softnic_thread_pipeline_disable(struct pmd_internals *softnic,
}
}
+/**
+ * flowapi map
+ * group <group_id>
+ * ingress | egress
+ * pipeline <pipeline_name>
+ * table <table_id>
+ */
+static void
+cmd_softnic_flowapi_map(struct pmd_internals *softnic,
+ char **tokens,
+ uint32_t n_tokens,
+ char *out,
+ size_t out_size)
+{
+ char *pipeline_name;
+ uint32_t group_id, table_id;
+ int ingress, status;
+
+ if (n_tokens != 9) {
+ snprintf(out, out_size, MSG_ARG_MISMATCH, tokens[0]);
+ return;
+ }
+
+ if (strcmp(tokens[1], "map") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "map");
+ return;
+ }
+
+ if (strcmp(tokens[2], "group") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "group");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&group_id, tokens[3]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "group_id");
+ return;
+ }
+
+ if (strcmp(tokens[4], "ingress") == 0) {
+ ingress = 1;
+ } else if (strcmp(tokens[4], "egress") == 0) {
+ ingress = 0;
+ } else {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "ingress | egress");
+ return;
+ }
+
+ if (strcmp(tokens[5], "pipeline") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "pipeline");
+ return;
+ }
+
+ pipeline_name = tokens[6];
+
+ if (strcmp(tokens[7], "table") != 0) {
+ snprintf(out, out_size, MSG_ARG_NOT_FOUND, "table");
+ return;
+ }
+
+ if (softnic_parser_read_uint32(&table_id, tokens[8]) != 0) {
+ snprintf(out, out_size, MSG_ARG_INVALID, "table_id");
+ return;
+ }
+
+ status = flow_attr_map_set(softnic,
+ group_id,
+ ingress,
+ pipeline_name,
+ table_id);
+ if (status) {
+ snprintf(out, out_size, MSG_CMD_FAIL, tokens[0]);
+ return;
+ }
+}
+
void
softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
{
@@ -4877,6 +5705,11 @@ softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
return;
}
+ if (strcmp(tokens[0], "cryptodev") == 0) {
+ cmd_cryptodev(softnic, tokens, n_tokens, out, out_size);
+ return;
+ }
+
if (strcmp(tokens[0], "port") == 0) {
cmd_port_in_action_profile(softnic, tokens, n_tokens, out, out_size);
return;
@@ -5089,6 +5922,12 @@ softnic_cli_process(char *in, char *out, size_t out_size, void *arg)
}
}
+ if (strcmp(tokens[0], "flowapi") == 0) {
+ cmd_softnic_flowapi_map(softnic, tokens, n_tokens, out,
+ out_size);
+ return;
+ }
+
snprintf(out, out_size, MSG_CMD_UNKNOWN, tokens[0]);
}
diff --git a/drivers/net/softnic/rte_eth_softnic_cryptodev.c b/drivers/net/softnic/rte_eth_softnic_cryptodev.c
new file mode 100644
index 00000000..1480f6dd
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_cryptodev.c
@@ -0,0 +1,125 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
+#include <rte_string_fns.h>
+
+#include "rte_eth_softnic_internals.h"
+
+int
+softnic_cryptodev_init(struct pmd_internals *p)
+{
+ TAILQ_INIT(&p->cryptodev_list);
+
+ return 0;
+}
+
+void
+softnic_cryptodev_free(struct pmd_internals *p)
+{
+ for ( ; ; ) {
+ struct softnic_cryptodev *cryptodev;
+
+ cryptodev = TAILQ_FIRST(&p->cryptodev_list);
+ if (cryptodev == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->cryptodev_list, cryptodev, node);
+ free(cryptodev);
+ }
+}
+
+struct softnic_cryptodev *
+softnic_cryptodev_find(struct pmd_internals *p,
+ const char *name)
+{
+ struct softnic_cryptodev *cryptodev;
+
+ if (name == NULL)
+ return NULL;
+
+ TAILQ_FOREACH(cryptodev, &p->cryptodev_list, node)
+ if (strcmp(cryptodev->name, name) == 0)
+ return cryptodev;
+
+ return NULL;
+}
+
+struct softnic_cryptodev *
+softnic_cryptodev_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_cryptodev_params *params)
+{
+ struct rte_cryptodev_info dev_info;
+ struct rte_cryptodev_config dev_conf;
+ struct rte_cryptodev_qp_conf queue_conf;
+ struct softnic_cryptodev *cryptodev;
+ uint32_t dev_id, i;
+ uint32_t socket_id;
+ int status;
+
+ /* Check input params */
+ if ((name == NULL) ||
+ softnic_cryptodev_find(p, name) ||
+ (params->n_queues == 0) ||
+ (params->queue_size == 0))
+ return NULL;
+
+ if (params->dev_name) {
+ status = rte_cryptodev_get_dev_id(params->dev_name);
+ if (status == -1)
+ return NULL;
+
+ dev_id = (uint32_t)status;
+ } else {
+ if (rte_cryptodev_pmd_is_valid_dev(params->dev_id) == 0)
+ return NULL;
+
+ dev_id = params->dev_id;
+ }
+
+ socket_id = rte_cryptodev_socket_id(dev_id);
+ rte_cryptodev_info_get(dev_id, &dev_info);
+
+ if (dev_info.max_nb_queue_pairs < params->n_queues)
+ return NULL;
+ if (dev_info.feature_flags & RTE_CRYPTODEV_FF_HW_ACCELERATED)
+ return NULL;
+
+ dev_conf.socket_id = socket_id;
+ dev_conf.nb_queue_pairs = params->n_queues;
+
+ status = rte_cryptodev_configure(dev_id, &dev_conf);
+ if (status < 0)
+ return NULL;
+
+ queue_conf.nb_descriptors = params->queue_size;
+ for (i = 0; i < params->n_queues; i++) {
+ status = rte_cryptodev_queue_pair_setup(dev_id, i,
+ &queue_conf, socket_id, NULL);
+ if (status < 0)
+ return NULL;
+ }
+
+ if (rte_cryptodev_start(dev_id) < 0)
+ return NULL;
+
+ cryptodev = calloc(1, sizeof(struct softnic_cryptodev));
+ if (cryptodev == NULL) {
+ rte_cryptodev_stop(dev_id);
+ return NULL;
+ }
+
+ strlcpy(cryptodev->name, name, sizeof(cryptodev->name));
+ cryptodev->dev_id = dev_id;
+ cryptodev->n_queues = params->n_queues;
+
+ TAILQ_INSERT_TAIL(&p->cryptodev_list, cryptodev, node);
+
+ return cryptodev;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_flow.c b/drivers/net/softnic/rte_eth_softnic_flow.c
new file mode 100644
index 00000000..285af462
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_flow.c
@@ -0,0 +1,2287 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_common.h>
+#include <rte_byteorder.h>
+#include <rte_malloc.h>
+#include <rte_string_fns.h>
+#include <rte_flow.h>
+#include <rte_flow_driver.h>
+#include <rte_tailq.h>
+
+#include "rte_eth_softnic_internals.h"
+#include "rte_eth_softnic.h"
+
+#define rte_htons rte_cpu_to_be_16
+#define rte_htonl rte_cpu_to_be_32
+
+#define rte_ntohs rte_be_to_cpu_16
+#define rte_ntohl rte_be_to_cpu_32
+
+static struct rte_flow *
+softnic_flow_find(struct softnic_table *table,
+ struct softnic_table_rule_match *rule_match)
+{
+ struct rte_flow *flow;
+
+ TAILQ_FOREACH(flow, &table->flows, node)
+ if (memcmp(&flow->match, rule_match, sizeof(*rule_match)) == 0)
+ return flow;
+
+ return NULL;
+}
+
+int
+flow_attr_map_set(struct pmd_internals *softnic,
+ uint32_t group_id,
+ int ingress,
+ const char *pipeline_name,
+ uint32_t table_id)
+{
+ struct pipeline *pipeline;
+ struct flow_attr_map *map;
+
+ if (group_id >= SOFTNIC_FLOW_MAX_GROUPS ||
+ pipeline_name == NULL)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL ||
+ table_id >= pipeline->n_tables)
+ return -1;
+
+ map = (ingress) ? &softnic->flow.ingress_map[group_id] :
+ &softnic->flow.egress_map[group_id];
+ strcpy(map->pipeline_name, pipeline_name);
+ map->table_id = table_id;
+ map->valid = 1;
+
+ return 0;
+}
+
+struct flow_attr_map *
+flow_attr_map_get(struct pmd_internals *softnic,
+ uint32_t group_id,
+ int ingress)
+{
+ if (group_id >= SOFTNIC_FLOW_MAX_GROUPS)
+ return NULL;
+
+ return (ingress) ? &softnic->flow.ingress_map[group_id] :
+ &softnic->flow.egress_map[group_id];
+}
+
+static int
+flow_pipeline_table_get(struct pmd_internals *softnic,
+ const struct rte_flow_attr *attr,
+ const char **pipeline_name,
+ uint32_t *table_id,
+ struct rte_flow_error *error)
+{
+ struct flow_attr_map *map;
+
+ if (attr == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL,
+ "Null attr");
+
+ if (!attr->ingress && !attr->egress)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr,
+ "Ingress/egress not specified");
+
+ if (attr->ingress && attr->egress)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_INGRESS,
+ attr,
+ "Setting both ingress and egress is not allowed");
+
+ map = flow_attr_map_get(softnic,
+ attr->group,
+ attr->ingress);
+ if (map == NULL ||
+ map->valid == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR_GROUP,
+ attr,
+ "Invalid group ID");
+
+ if (pipeline_name)
+ *pipeline_name = map->pipeline_name;
+
+ if (table_id)
+ *table_id = map->table_id;
+
+ return 0;
+}
+
+union flow_item {
+ uint8_t raw[TABLE_RULE_MATCH_SIZE_MAX];
+ struct rte_flow_item_eth eth;
+ struct rte_flow_item_vlan vlan;
+ struct rte_flow_item_ipv4 ipv4;
+ struct rte_flow_item_ipv6 ipv6;
+ struct rte_flow_item_icmp icmp;
+ struct rte_flow_item_udp udp;
+ struct rte_flow_item_tcp tcp;
+ struct rte_flow_item_sctp sctp;
+ struct rte_flow_item_vxlan vxlan;
+ struct rte_flow_item_e_tag e_tag;
+ struct rte_flow_item_nvgre nvgre;
+ struct rte_flow_item_mpls mpls;
+ struct rte_flow_item_gre gre;
+ struct rte_flow_item_gtp gtp;
+ struct rte_flow_item_esp esp;
+ struct rte_flow_item_geneve geneve;
+ struct rte_flow_item_vxlan_gpe vxlan_gpe;
+ struct rte_flow_item_arp_eth_ipv4 arp_eth_ipv4;
+ struct rte_flow_item_ipv6_ext ipv6_ext;
+ struct rte_flow_item_icmp6 icmp6;
+ struct rte_flow_item_icmp6_nd_ns icmp6_nd_ns;
+ struct rte_flow_item_icmp6_nd_na icmp6_nd_na;
+ struct rte_flow_item_icmp6_nd_opt icmp6_nd_opt;
+ struct rte_flow_item_icmp6_nd_opt_sla_eth icmp6_nd_opt_sla_eth;
+ struct rte_flow_item_icmp6_nd_opt_tla_eth icmp6_nd_opt_tla_eth;
+};
+
+static const union flow_item flow_item_raw_mask;
+
+static int
+flow_item_is_proto(enum rte_flow_item_type type,
+ const void **mask,
+ size_t *size)
+{
+ switch (type) {
+ case RTE_FLOW_ITEM_TYPE_RAW:
+ *mask = &flow_item_raw_mask;
+ *size = sizeof(flow_item_raw_mask);
+ return 1; /* TRUE */
+
+ case RTE_FLOW_ITEM_TYPE_ETH:
+ *mask = &rte_flow_item_eth_mask;
+ *size = sizeof(struct rte_flow_item_eth);
+ return 1; /* TRUE */
+
+ case RTE_FLOW_ITEM_TYPE_VLAN:
+ *mask = &rte_flow_item_vlan_mask;
+ *size = sizeof(struct rte_flow_item_vlan);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ *mask = &rte_flow_item_ipv4_mask;
+ *size = sizeof(struct rte_flow_item_ipv4);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ *mask = &rte_flow_item_ipv6_mask;
+ *size = sizeof(struct rte_flow_item_ipv6);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP:
+ *mask = &rte_flow_item_icmp_mask;
+ *size = sizeof(struct rte_flow_item_icmp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ *mask = &rte_flow_item_udp_mask;
+ *size = sizeof(struct rte_flow_item_udp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ *mask = &rte_flow_item_tcp_mask;
+ *size = sizeof(struct rte_flow_item_tcp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ *mask = &rte_flow_item_sctp_mask;
+ *size = sizeof(struct rte_flow_item_sctp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_VXLAN:
+ *mask = &rte_flow_item_vxlan_mask;
+ *size = sizeof(struct rte_flow_item_vxlan);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_E_TAG:
+ *mask = &rte_flow_item_e_tag_mask;
+ *size = sizeof(struct rte_flow_item_e_tag);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_NVGRE:
+ *mask = &rte_flow_item_nvgre_mask;
+ *size = sizeof(struct rte_flow_item_nvgre);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_MPLS:
+ *mask = &rte_flow_item_mpls_mask;
+ *size = sizeof(struct rte_flow_item_mpls);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_GRE:
+ *mask = &rte_flow_item_gre_mask;
+ *size = sizeof(struct rte_flow_item_gre);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_GTP:
+ case RTE_FLOW_ITEM_TYPE_GTPC:
+ case RTE_FLOW_ITEM_TYPE_GTPU:
+ *mask = &rte_flow_item_gtp_mask;
+ *size = sizeof(struct rte_flow_item_gtp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ESP:
+ *mask = &rte_flow_item_esp_mask;
+ *size = sizeof(struct rte_flow_item_esp);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_GENEVE:
+ *mask = &rte_flow_item_geneve_mask;
+ *size = sizeof(struct rte_flow_item_geneve);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
+ *mask = &rte_flow_item_vxlan_gpe_mask;
+ *size = sizeof(struct rte_flow_item_vxlan_gpe);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ARP_ETH_IPV4:
+ *mask = &rte_flow_item_arp_eth_ipv4_mask;
+ *size = sizeof(struct rte_flow_item_arp_eth_ipv4);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_IPV6_EXT:
+ *mask = &rte_flow_item_ipv6_ext_mask;
+ *size = sizeof(struct rte_flow_item_ipv6_ext);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6:
+ *mask = &rte_flow_item_icmp6_mask;
+ *size = sizeof(struct rte_flow_item_icmp6);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NS:
+ *mask = &rte_flow_item_icmp6_nd_ns_mask;
+ *size = sizeof(struct rte_flow_item_icmp6_nd_ns);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ND_NA:
+ *mask = &rte_flow_item_icmp6_nd_na_mask;
+ *size = sizeof(struct rte_flow_item_icmp6_nd_na);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT:
+ *mask = &rte_flow_item_icmp6_nd_opt_mask;
+ *size = sizeof(struct rte_flow_item_icmp6_nd_opt);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_SLA_ETH:
+ *mask = &rte_flow_item_icmp6_nd_opt_sla_eth_mask;
+ *size = sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth);
+ return 1;
+
+ case RTE_FLOW_ITEM_TYPE_ICMP6_ND_OPT_TLA_ETH:
+ *mask = &rte_flow_item_icmp6_nd_opt_tla_eth_mask;
+ *size = sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth);
+ return 1;
+
+ default: return 0; /* FALSE */
+ }
+}
+
+static int
+flow_item_raw_preprocess(const struct rte_flow_item *item,
+ union flow_item *item_spec,
+ union flow_item *item_mask,
+ size_t *item_size,
+ int *item_disabled,
+ struct rte_flow_error *error)
+{
+ const struct rte_flow_item_raw *item_raw_spec = item->spec;
+ const struct rte_flow_item_raw *item_raw_mask = item->mask;
+ const uint8_t *pattern;
+ const uint8_t *pattern_mask;
+ uint8_t *spec = (uint8_t *)item_spec;
+ uint8_t *mask = (uint8_t *)item_mask;
+ size_t pattern_length, pattern_offset, i;
+ int disabled;
+
+ if (!item->spec)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Null specification");
+
+ if (item->last)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Range not allowed (last must be NULL)");
+
+ if (item_raw_spec->relative == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Absolute offset not supported");
+
+ if (item_raw_spec->search)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Search not supported");
+
+ if (item_raw_spec->offset < 0)
+ return rte_flow_error_set(error,
+ ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Negative offset not supported");
+
+ if (item_raw_spec->length == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Zero pattern length");
+
+ if (item_raw_spec->offset + item_raw_spec->length >
+ TABLE_RULE_MATCH_SIZE_MAX)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Item too big");
+
+ if (!item_raw_spec->pattern && item_raw_mask && item_raw_mask->pattern)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "RAW: Non-NULL pattern mask not allowed with NULL pattern");
+
+ pattern = item_raw_spec->pattern;
+ pattern_mask = (item_raw_mask) ? item_raw_mask->pattern : NULL;
+ pattern_length = (size_t)item_raw_spec->length;
+ pattern_offset = (size_t)item_raw_spec->offset;
+
+ disabled = 0;
+ if (pattern_mask == NULL)
+ disabled = 1;
+ else
+ for (i = 0; i < pattern_length; i++)
+ if ((pattern)[i])
+ disabled = 1;
+
+ memset(spec, 0, TABLE_RULE_MATCH_SIZE_MAX);
+ if (pattern)
+ memcpy(&spec[pattern_offset], pattern, pattern_length);
+
+ memset(mask, 0, TABLE_RULE_MATCH_SIZE_MAX);
+ if (pattern_mask)
+ memcpy(&mask[pattern_offset], pattern_mask, pattern_length);
+
+ *item_size = pattern_offset + pattern_length;
+ *item_disabled = disabled;
+
+ return 0;
+}
+
+static int
+flow_item_proto_preprocess(const struct rte_flow_item *item,
+ union flow_item *item_spec,
+ union flow_item *item_mask,
+ size_t *item_size,
+ int *item_disabled,
+ struct rte_flow_error *error)
+{
+ const void *mask_default;
+ uint8_t *spec = (uint8_t *)item_spec;
+ uint8_t *mask = (uint8_t *)item_mask;
+ size_t size, i;
+
+ if (!flow_item_is_proto(item->type, &mask_default, &size))
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Item type not supported");
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_RAW)
+ return flow_item_raw_preprocess(item,
+ item_spec,
+ item_mask,
+ item_size,
+ item_disabled,
+ error);
+
+ /* spec */
+ if (!item->spec) {
+ /* If spec is NULL, then last and mask also have to be NULL. */
+ if (item->last || item->mask)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Invalid item (NULL spec with non-NULL last or mask)");
+
+ memset(item_spec, 0, size);
+ memset(item_mask, 0, size);
+ *item_size = size;
+ *item_disabled = 1; /* TRUE */
+ return 0;
+ }
+
+ memcpy(spec, item->spec, size);
+ *item_size = size;
+
+ /* mask */
+ if (item->mask)
+ memcpy(mask, item->mask, size);
+ else
+ memcpy(mask, mask_default, size);
+
+ /* disabled */
+ for (i = 0; i < size; i++)
+ if (mask[i])
+ break;
+ *item_disabled = (i == size) ? 1 : 0;
+
+ /* Apply mask over spec. */
+ for (i = 0; i < size; i++)
+ spec[i] &= mask[i];
+
+ /* last */
+ if (item->last) {
+ uint8_t last[size];
+
+ /* init last */
+ memcpy(last, item->last, size);
+ for (i = 0; i < size; i++)
+ last[i] &= mask[i];
+
+ /* check for range */
+ for (i = 0; i < size; i++)
+ if (last[i] != spec[i])
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "Range not supported");
+ }
+
+ return 0;
+}
+
+/***
+ * Skip disabled protocol items and VOID items
+ * until any of the mutually exclusive conditions
+ * from the list below takes place:
+ * (A) A protocol present in the proto_mask
+ * is met (either ENABLED or DISABLED);
+ * (B) A protocol NOT present in the proto_mask is met in ENABLED state;
+ * (C) The END item is met.
+ */
+static int
+flow_item_skip_disabled_protos(const struct rte_flow_item **item,
+ uint64_t proto_mask,
+ size_t *length,
+ struct rte_flow_error *error)
+{
+ size_t len = 0;
+
+ for ( ; (*item)->type != RTE_FLOW_ITEM_TYPE_END; (*item)++) {
+ union flow_item spec, mask;
+ size_t size;
+ int disabled = 0, status;
+
+ if ((*item)->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+
+ status = flow_item_proto_preprocess(*item,
+ &spec,
+ &mask,
+ &size,
+ &disabled,
+ error);
+ if (status)
+ return status;
+
+ if ((proto_mask & (1LLU << (*item)->type)) ||
+ !disabled)
+ break;
+
+ len += size;
+ }
+
+ if (length)
+ *length = len;
+
+ return 0;
+}
+
+#define FLOW_ITEM_PROTO_IP \
+ ((1LLU << RTE_FLOW_ITEM_TYPE_IPV4) | \
+ (1LLU << RTE_FLOW_ITEM_TYPE_IPV6))
+
+static void
+flow_item_skip_void(const struct rte_flow_item **item)
+{
+ for ( ; ; (*item)++)
+ if ((*item)->type != RTE_FLOW_ITEM_TYPE_VOID)
+ return;
+}
+
+#define IP_PROTOCOL_TCP 0x06
+#define IP_PROTOCOL_UDP 0x11
+#define IP_PROTOCOL_SCTP 0x84
+
+static int
+mask_to_depth(uint64_t mask,
+ uint32_t *depth)
+{
+ uint64_t n;
+
+ if (mask == UINT64_MAX) {
+ if (depth)
+ *depth = 64;
+
+ return 0;
+ }
+
+ mask = ~mask;
+
+ if (mask & (mask + 1))
+ return -1;
+
+ n = __builtin_popcountll(mask);
+ if (depth)
+ *depth = (uint32_t)(64 - n);
+
+ return 0;
+}
+
+static int
+ipv4_mask_to_depth(uint32_t mask,
+ uint32_t *depth)
+{
+ uint32_t d;
+ int status;
+
+ status = mask_to_depth(mask | (UINT64_MAX << 32), &d);
+ if (status)
+ return status;
+
+ d -= 32;
+ if (depth)
+ *depth = d;
+
+ return 0;
+}
+
+static int
+ipv6_mask_to_depth(uint8_t *mask,
+ uint32_t *depth)
+{
+ uint64_t *m = (uint64_t *)mask;
+ uint64_t m0 = rte_be_to_cpu_64(m[0]);
+ uint64_t m1 = rte_be_to_cpu_64(m[1]);
+ uint32_t d0, d1;
+ int status;
+
+ status = mask_to_depth(m0, &d0);
+ if (status)
+ return status;
+
+ status = mask_to_depth(m1, &d1);
+ if (status)
+ return status;
+
+ if (d0 < 64 && d1)
+ return -1;
+
+ if (depth)
+ *depth = d0 + d1;
+
+ return 0;
+}
+
+static int
+port_mask_to_range(uint16_t port,
+ uint16_t port_mask,
+ uint16_t *port0,
+ uint16_t *port1)
+{
+ int status;
+ uint16_t p0, p1;
+
+ status = mask_to_depth(port_mask | (UINT64_MAX << 16), NULL);
+ if (status)
+ return -1;
+
+ p0 = port & port_mask;
+ p1 = p0 | ~port_mask;
+
+ if (port0)
+ *port0 = p0;
+
+ if (port1)
+ *port1 = p1;
+
+ return 0;
+}
+
+static int
+flow_rule_match_acl_get(struct pmd_internals *softnic __rte_unused,
+ struct pipeline *pipeline __rte_unused,
+ struct softnic_table *table __rte_unused,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *item,
+ struct softnic_table_rule_match *rule_match,
+ struct rte_flow_error *error)
+{
+ union flow_item spec, mask;
+ size_t size, length = 0;
+ int disabled = 0, status;
+ uint8_t ip_proto, ip_proto_mask;
+
+ memset(rule_match, 0, sizeof(*rule_match));
+ rule_match->match_type = TABLE_ACL;
+ rule_match->match.acl.priority = attr->priority;
+
+ /* VOID or disabled protos only, if any. */
+ status = flow_item_skip_disabled_protos(&item,
+ FLOW_ITEM_PROTO_IP, &length, error);
+ if (status)
+ return status;
+
+ /* IP only. */
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ {
+ uint32_t sa_depth, da_depth;
+
+ status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.src_addr),
+ &sa_depth);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal IPv4 header source address mask");
+
+ status = ipv4_mask_to_depth(rte_ntohl(mask.ipv4.hdr.dst_addr),
+ &da_depth);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal IPv4 header destination address mask");
+
+ ip_proto = spec.ipv4.hdr.next_proto_id;
+ ip_proto_mask = mask.ipv4.hdr.next_proto_id;
+
+ rule_match->match.acl.ip_version = 1;
+ rule_match->match.acl.ipv4.sa =
+ rte_ntohl(spec.ipv4.hdr.src_addr);
+ rule_match->match.acl.ipv4.da =
+ rte_ntohl(spec.ipv4.hdr.dst_addr);
+ rule_match->match.acl.sa_depth = sa_depth;
+ rule_match->match.acl.da_depth = da_depth;
+ rule_match->match.acl.proto = ip_proto;
+ rule_match->match.acl.proto_mask = ip_proto_mask;
+ break;
+ } /* RTE_FLOW_ITEM_TYPE_IPV4 */
+
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ {
+ uint32_t sa_depth, da_depth;
+
+ status = ipv6_mask_to_depth(mask.ipv6.hdr.src_addr, &sa_depth);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal IPv6 header source address mask");
+
+ status = ipv6_mask_to_depth(mask.ipv6.hdr.dst_addr, &da_depth);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal IPv6 header destination address mask");
+
+ ip_proto = spec.ipv6.hdr.proto;
+ ip_proto_mask = mask.ipv6.hdr.proto;
+
+ rule_match->match.acl.ip_version = 0;
+ memcpy(rule_match->match.acl.ipv6.sa,
+ spec.ipv6.hdr.src_addr,
+ sizeof(spec.ipv6.hdr.src_addr));
+ memcpy(rule_match->match.acl.ipv6.da,
+ spec.ipv6.hdr.dst_addr,
+ sizeof(spec.ipv6.hdr.dst_addr));
+ rule_match->match.acl.sa_depth = sa_depth;
+ rule_match->match.acl.da_depth = da_depth;
+ rule_match->match.acl.proto = ip_proto;
+ rule_match->match.acl.proto_mask = ip_proto_mask;
+ break;
+ } /* RTE_FLOW_ITEM_TYPE_IPV6 */
+
+ default:
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: IP protocol required");
+ } /* switch */
+
+ if (ip_proto_mask != UINT8_MAX)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal IP protocol mask");
+
+ item++;
+
+ /* VOID only, if any. */
+ flow_item_skip_void(&item);
+
+ /* TCP/UDP/SCTP only. */
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_TCP:
+ {
+ uint16_t sp0, sp1, dp0, dp1;
+
+ if (ip_proto != IP_PROTOCOL_TCP)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Item type is TCP, but IP protocol is not");
+
+ status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.src_port),
+ rte_ntohs(mask.tcp.hdr.src_port),
+ &sp0,
+ &sp1);
+
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal TCP source port mask");
+
+ status = port_mask_to_range(rte_ntohs(spec.tcp.hdr.dst_port),
+ rte_ntohs(mask.tcp.hdr.dst_port),
+ &dp0,
+ &dp1);
+
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal TCP destination port mask");
+
+ rule_match->match.acl.sp0 = sp0;
+ rule_match->match.acl.sp1 = sp1;
+ rule_match->match.acl.dp0 = dp0;
+ rule_match->match.acl.dp1 = dp1;
+
+ break;
+ } /* RTE_FLOW_ITEM_TYPE_TCP */
+
+ case RTE_FLOW_ITEM_TYPE_UDP:
+ {
+ uint16_t sp0, sp1, dp0, dp1;
+
+ if (ip_proto != IP_PROTOCOL_UDP)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Item type is UDP, but IP protocol is not");
+
+ status = port_mask_to_range(rte_ntohs(spec.udp.hdr.src_port),
+ rte_ntohs(mask.udp.hdr.src_port),
+ &sp0,
+ &sp1);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal UDP source port mask");
+
+ status = port_mask_to_range(rte_ntohs(spec.udp.hdr.dst_port),
+ rte_ntohs(mask.udp.hdr.dst_port),
+ &dp0,
+ &dp1);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal UDP destination port mask");
+
+ rule_match->match.acl.sp0 = sp0;
+ rule_match->match.acl.sp1 = sp1;
+ rule_match->match.acl.dp0 = dp0;
+ rule_match->match.acl.dp1 = dp1;
+
+ break;
+ } /* RTE_FLOW_ITEM_TYPE_UDP */
+
+ case RTE_FLOW_ITEM_TYPE_SCTP:
+ {
+ uint16_t sp0, sp1, dp0, dp1;
+
+ if (ip_proto != IP_PROTOCOL_SCTP)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Item type is SCTP, but IP protocol is not");
+
+ status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.src_port),
+ rte_ntohs(mask.sctp.hdr.src_port),
+ &sp0,
+ &sp1);
+
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal SCTP source port mask");
+
+ status = port_mask_to_range(rte_ntohs(spec.sctp.hdr.dst_port),
+ rte_ntohs(mask.sctp.hdr.dst_port),
+ &dp0,
+ &dp1);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Illegal SCTP destination port mask");
+
+ rule_match->match.acl.sp0 = sp0;
+ rule_match->match.acl.sp1 = sp1;
+ rule_match->match.acl.dp0 = dp0;
+ rule_match->match.acl.dp1 = dp1;
+
+ break;
+ } /* RTE_FLOW_ITEM_TYPE_SCTP */
+
+ default:
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: TCP/UDP/SCTP required");
+ } /* switch */
+
+ item++;
+
+ /* VOID or disabled protos only, if any. */
+ status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
+ if (status)
+ return status;
+
+ /* END only. */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "ACL: Expecting END item");
+
+ return 0;
+}
+
+/***
+ * Both *tmask* and *fmask* are byte arrays of size *tsize* and *fsize*
+ * respectively.
+ * They are located within a larger buffer at offsets *toffset* and *foffset*
+ * respectivelly. Both *tmask* and *fmask* represent bitmasks for the larger
+ * buffer.
+ * Question: are the two masks equivalent?
+ *
+ * Notes:
+ * 1. Offset basically indicates that the first offset bytes in the buffer
+ * are "don't care", so offset is equivalent to pre-pending an "all-zeros"
+ * array of *offset* bytes to the *mask*.
+ * 2. Each *mask* might contain a number of zero bytes at the beginning or
+ * at the end.
+ * 3. Bytes in the larger buffer after the end of the *mask* are also considered
+ * "don't care", so they are equivalent to appending an "all-zeros" array of
+ * bytes to the *mask*.
+ *
+ * Example:
+ * Buffer = [xx xx xx xx xx xx xx xx], buffer size = 8 bytes
+ * tmask = [00 22 00 33 00], toffset = 2, tsize = 5
+ * => buffer mask = [00 00 00 22 00 33 00 00]
+ * fmask = [22 00 33], foffset = 3, fsize = 3 =>
+ * => buffer mask = [00 00 00 22 00 33 00 00]
+ * Therefore, the tmask and fmask from this example are equivalent.
+ */
+static int
+hash_key_mask_is_same(uint8_t *tmask,
+ size_t toffset,
+ size_t tsize,
+ uint8_t *fmask,
+ size_t foffset,
+ size_t fsize,
+ size_t *toffset_plus,
+ size_t *foffset_plus)
+{
+ size_t tpos; /* Position of first non-zero byte in the tmask buffer. */
+ size_t fpos; /* Position of first non-zero byte in the fmask buffer. */
+
+ /* Compute tpos and fpos. */
+ for (tpos = 0; tmask[tpos] == 0; tpos++)
+ ;
+ for (fpos = 0; fmask[fpos] == 0; fpos++)
+ ;
+
+ if (toffset + tpos != foffset + fpos)
+ return 0; /* FALSE */
+
+ tsize -= tpos;
+ fsize -= fpos;
+
+ if (tsize < fsize) {
+ size_t i;
+
+ for (i = 0; i < tsize; i++)
+ if (tmask[tpos + i] != fmask[fpos + i])
+ return 0; /* FALSE */
+
+ for ( ; i < fsize; i++)
+ if (fmask[fpos + i])
+ return 0; /* FALSE */
+ } else {
+ size_t i;
+
+ for (i = 0; i < fsize; i++)
+ if (tmask[tpos + i] != fmask[fpos + i])
+ return 0; /* FALSE */
+
+ for ( ; i < tsize; i++)
+ if (tmask[tpos + i])
+ return 0; /* FALSE */
+ }
+
+ if (toffset_plus)
+ *toffset_plus = tpos;
+
+ if (foffset_plus)
+ *foffset_plus = fpos;
+
+ return 1; /* TRUE */
+}
+
+static int
+flow_rule_match_hash_get(struct pmd_internals *softnic __rte_unused,
+ struct pipeline *pipeline __rte_unused,
+ struct softnic_table *table,
+ const struct rte_flow_attr *attr __rte_unused,
+ const struct rte_flow_item *item,
+ struct softnic_table_rule_match *rule_match,
+ struct rte_flow_error *error)
+{
+ struct softnic_table_rule_match_hash key, key_mask;
+ struct softnic_table_hash_params *params = &table->params.match.hash;
+ size_t offset = 0, length = 0, tpos, fpos;
+ int status;
+
+ memset(&key, 0, sizeof(key));
+ memset(&key_mask, 0, sizeof(key_mask));
+
+ /* VOID or disabled protos only, if any. */
+ status = flow_item_skip_disabled_protos(&item, 0, &offset, error);
+ if (status)
+ return status;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_END)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "HASH: END detected too early");
+
+ /* VOID or any protocols (enabled or disabled). */
+ for ( ; item->type != RTE_FLOW_ITEM_TYPE_END; item++) {
+ union flow_item spec, mask;
+ size_t size;
+ int disabled, status;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VOID)
+ continue;
+
+ status = flow_item_proto_preprocess(item,
+ &spec,
+ &mask,
+ &size,
+ &disabled,
+ error);
+ if (status)
+ return status;
+
+ if (length + size > sizeof(key)) {
+ if (disabled)
+ break;
+
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "HASH: Item too big");
+ }
+
+ memcpy(&key.key[length], &spec, size);
+ memcpy(&key_mask.key[length], &mask, size);
+ length += size;
+ }
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END) {
+ /* VOID or disabled protos only, if any. */
+ status = flow_item_skip_disabled_protos(&item, 0, NULL, error);
+ if (status)
+ return status;
+
+ /* END only. */
+ if (item->type != RTE_FLOW_ITEM_TYPE_END)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "HASH: Expecting END item");
+ }
+
+ /* Compare flow key mask against table key mask. */
+ offset += sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM;
+
+ if (!hash_key_mask_is_same(params->key_mask,
+ params->key_offset,
+ params->key_size,
+ key_mask.key,
+ offset,
+ length,
+ &tpos,
+ &fpos))
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "HASH: Item list is not observing the match format");
+
+ /* Rule match. */
+ memset(rule_match, 0, sizeof(*rule_match));
+ rule_match->match_type = TABLE_HASH;
+ memcpy(&rule_match->match.hash.key[tpos],
+ &key.key[fpos],
+ RTE_MIN(sizeof(rule_match->match.hash.key) - tpos,
+ length - fpos));
+
+ return 0;
+}
+
+static int
+flow_rule_match_get(struct pmd_internals *softnic,
+ struct pipeline *pipeline,
+ struct softnic_table *table,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item *item,
+ struct softnic_table_rule_match *rule_match,
+ struct rte_flow_error *error)
+{
+ switch (table->params.match_type) {
+ case TABLE_ACL:
+ return flow_rule_match_acl_get(softnic,
+ pipeline,
+ table,
+ attr,
+ item,
+ rule_match,
+ error);
+
+ /* FALLTHROUGH */
+
+ case TABLE_HASH:
+ return flow_rule_match_hash_get(softnic,
+ pipeline,
+ table,
+ attr,
+ item,
+ rule_match,
+ error);
+
+ /* FALLTHROUGH */
+
+ default:
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Unsupported pipeline table match type");
+ }
+}
+
+static int
+flow_rule_action_get(struct pmd_internals *softnic,
+ struct pipeline *pipeline,
+ struct softnic_table *table,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_action *action,
+ struct softnic_table_rule_action *rule_action,
+ struct rte_flow_error *error)
+{
+ struct softnic_table_action_profile *profile;
+ struct softnic_table_action_profile_params *params;
+ int n_jump_queue_rss_drop = 0;
+ int n_count = 0;
+ int n_mark = 0;
+ int n_vxlan_decap = 0;
+
+ profile = softnic_table_action_profile_find(softnic,
+ table->params.action_profile_name);
+ if (profile == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ action,
+ "JUMP: Table action profile");
+
+ params = &profile->params;
+
+ for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++) {
+ if (action->type == RTE_FLOW_ACTION_TYPE_VOID)
+ continue;
+
+ switch (action->type) {
+ case RTE_FLOW_ACTION_TYPE_JUMP:
+ {
+ const struct rte_flow_action_jump *conf = action->conf;
+ struct flow_attr_map *map;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "JUMP: Null configuration");
+
+ if (n_jump_queue_rss_drop)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one termination action is"
+ " allowed per flow");
+
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "JUMP action not enabled for this table");
+
+ n_jump_queue_rss_drop = 1;
+
+ map = flow_attr_map_get(softnic,
+ conf->group,
+ attr->ingress);
+ if (map == NULL || map->valid == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "JUMP: Invalid group mapping");
+
+ if (strcmp(pipeline->name, map->pipeline_name) != 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "JUMP: Jump to table in different pipeline");
+
+ /* RTE_TABLE_ACTION_FWD */
+ rule_action->fwd.action = RTE_PIPELINE_ACTION_TABLE;
+ rule_action->fwd.id = map->table_id;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_JUMP */
+
+ case RTE_FLOW_ACTION_TYPE_QUEUE:
+ {
+ char name[NAME_SIZE];
+ struct rte_eth_dev *dev;
+ const struct rte_flow_action_queue *conf = action->conf;
+ uint32_t port_id;
+ int status;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "QUEUE: Null configuration");
+
+ if (n_jump_queue_rss_drop)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one termination action is allowed"
+ " per flow");
+
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "QUEUE action not enabled for this table");
+
+ n_jump_queue_rss_drop = 1;
+
+ dev = ETHDEV(softnic);
+ if (dev == NULL ||
+ conf->index >= dev->data->nb_rx_queues)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "QUEUE: Invalid RX queue ID");
+
+ sprintf(name, "RXQ%u", (uint32_t)conf->index);
+
+ status = softnic_pipeline_port_out_find(softnic,
+ pipeline->name,
+ name,
+ &port_id);
+ if (status)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "QUEUE: RX queue not accessible from this pipeline");
+
+ /* RTE_TABLE_ACTION_FWD */
+ rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT;
+ rule_action->fwd.id = port_id;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ break;
+ } /*RTE_FLOW_ACTION_TYPE_QUEUE */
+
+ case RTE_FLOW_ACTION_TYPE_RSS:
+ {
+ const struct rte_flow_action_rss *conf = action->conf;
+ uint32_t i;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "RSS: Null configuration");
+
+ if (!rte_is_power_of_2(conf->queue_num))
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ conf,
+ "RSS: Number of queues must be a power of 2");
+
+ if (conf->queue_num > RTE_DIM(rule_action->lb.out))
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ conf,
+ "RSS: Number of queues too big");
+
+ if (n_jump_queue_rss_drop)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one termination action is allowed per flow");
+
+ if (((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_FWD)) == 0) ||
+ ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_LB)) == 0))
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "RSS action not supported by this table");
+
+ if (params->lb.out_offset !=
+ pipeline->params.offset_port_id)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "RSS action not supported by this pipeline");
+
+ n_jump_queue_rss_drop = 1;
+
+ /* RTE_TABLE_ACTION_LB */
+ for (i = 0; i < conf->queue_num; i++) {
+ char name[NAME_SIZE];
+ struct rte_eth_dev *dev;
+ uint32_t port_id;
+ int status;
+
+ dev = ETHDEV(softnic);
+ if (dev == NULL ||
+ conf->queue[i] >=
+ dev->data->nb_rx_queues)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "RSS: Invalid RX queue ID");
+
+ sprintf(name, "RXQ%u",
+ (uint32_t)conf->queue[i]);
+
+ status = softnic_pipeline_port_out_find(softnic,
+ pipeline->name,
+ name,
+ &port_id);
+ if (status)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "RSS: RX queue not accessible from this pipeline");
+
+ rule_action->lb.out[i] = port_id;
+ }
+
+ for ( ; i < RTE_DIM(rule_action->lb.out); i++)
+ rule_action->lb.out[i] =
+ rule_action->lb.out[i % conf->queue_num];
+
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_LB;
+
+ /* RTE_TABLE_ACTION_FWD */
+ rule_action->fwd.action = RTE_PIPELINE_ACTION_PORT_META;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_RSS */
+
+ case RTE_FLOW_ACTION_TYPE_DROP:
+ {
+ const void *conf = action->conf;
+
+ if (conf != NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "DROP: No configuration required");
+
+ if (n_jump_queue_rss_drop)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one termination action is allowed per flow");
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_FWD)) == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "DROP action not supported by this table");
+
+ n_jump_queue_rss_drop = 1;
+
+ /* RTE_TABLE_ACTION_FWD */
+ rule_action->fwd.action = RTE_PIPELINE_ACTION_DROP;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_FWD;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_DROP */
+
+ case RTE_FLOW_ACTION_TYPE_COUNT:
+ {
+ const struct rte_flow_action_count *conf = action->conf;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "COUNT: Null configuration");
+
+ if (conf->shared)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ conf,
+ "COUNT: Shared counters not supported");
+
+ if (n_count)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one COUNT action per flow");
+
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_STATS)) == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "COUNT action not supported by this table");
+
+ n_count = 1;
+
+ /* RTE_TABLE_ACTION_STATS */
+ rule_action->stats.n_packets = 0;
+ rule_action->stats.n_bytes = 0;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_STATS;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_COUNT */
+
+ case RTE_FLOW_ACTION_TYPE_MARK:
+ {
+ const struct rte_flow_action_mark *conf = action->conf;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "MARK: Null configuration");
+
+ if (n_mark)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one MARK action per flow");
+
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_TAG)) == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "MARK action not supported by this table");
+
+ n_mark = 1;
+
+ /* RTE_TABLE_ACTION_TAG */
+ rule_action->tag.tag = conf->id;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_TAG;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_MARK */
+
+ case RTE_FLOW_ACTION_TYPE_VXLAN_DECAP:
+ {
+ const struct rte_flow_action_mark *conf = action->conf;
+
+ if (conf)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "VXLAN DECAP: Non-null configuration");
+
+ if (n_vxlan_decap)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Only one VXLAN DECAP action per flow");
+
+ if ((params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_DECAP)) == 0)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "VXLAN DECAP action not supported by this table");
+
+ n_vxlan_decap = 1;
+
+ /* RTE_TABLE_ACTION_DECAP */
+ rule_action->decap.n = 50; /* Ether/IPv4/UDP/VXLAN */
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_DECAP;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_VXLAN_DECAP */
+
+ case RTE_FLOW_ACTION_TYPE_METER:
+ {
+ const struct rte_flow_action_meter *conf = action->conf;
+ struct softnic_mtr_meter_profile *mp;
+ struct softnic_mtr *m;
+ uint32_t table_id = table - pipeline->table;
+ uint32_t meter_profile_id;
+ int status;
+
+ if ((params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "METER: Table action not supported");
+
+ if (params->mtr.n_tc != 1)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "METER: Multiple TCs not supported");
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "METER: Null configuration");
+
+ m = softnic_mtr_find(softnic, conf->mtr_id);
+
+ if (m == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "METER: Invalid meter ID");
+
+ if (m->flow)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION_CONF,
+ NULL,
+ "METER: Meter already attached to a flow");
+
+ meter_profile_id = m->params.meter_profile_id;
+ mp = softnic_mtr_meter_profile_find(softnic, meter_profile_id);
+
+ /* Add meter profile to pipeline table */
+ if (!softnic_pipeline_table_meter_profile_find(table,
+ meter_profile_id)) {
+ struct rte_table_action_meter_profile profile;
+
+ memset(&profile, 0, sizeof(profile));
+ profile.alg = RTE_TABLE_ACTION_METER_TRTCM;
+ profile.trtcm.cir = mp->params.trtcm_rfc2698.cir;
+ profile.trtcm.pir = mp->params.trtcm_rfc2698.pir;
+ profile.trtcm.cbs = mp->params.trtcm_rfc2698.cbs;
+ profile.trtcm.pbs = mp->params.trtcm_rfc2698.pbs;
+
+ status = softnic_pipeline_table_mtr_profile_add(softnic,
+ pipeline->name,
+ table_id,
+ meter_profile_id,
+ &profile);
+ if (status) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "METER: Table meter profile add failed");
+ return -1;
+ }
+ }
+
+ /* RTE_TABLE_ACTION_METER */
+ rule_action->mtr.mtr[0].meter_profile_id = meter_profile_id;
+ rule_action->mtr.mtr[0].policer[e_RTE_METER_GREEN] =
+ (enum rte_table_action_policer)m->params.action[RTE_MTR_GREEN];
+ rule_action->mtr.mtr[0].policer[e_RTE_METER_YELLOW] =
+ (enum rte_table_action_policer)m->params.action[RTE_MTR_YELLOW];
+ rule_action->mtr.mtr[0].policer[e_RTE_METER_RED] =
+ (enum rte_table_action_policer)m->params.action[RTE_MTR_RED];
+ rule_action->mtr.tc_mask = 1;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_MTR;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_METER */
+
+ case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
+ {
+ const struct rte_flow_action_vxlan_encap *conf =
+ action->conf;
+ const struct rte_flow_item *item;
+ union flow_item spec, mask;
+ int disabled = 0, status;
+ size_t size;
+
+ if (conf == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "VXLAN ENCAP: Null configuration");
+
+ item = conf->definition;
+ if (item == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "VXLAN ENCAP: Null configuration definition");
+
+ if (!(params->action_mask &
+ (1LLU << RTE_TABLE_ACTION_ENCAP)))
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "VXLAN ENCAP: Encap action not enabled for this table");
+
+ /* Check for Ether. */
+ flow_item_skip_void(&item);
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_ETH) {
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: first encap item should be ether");
+ }
+ ether_addr_copy(&spec.eth.dst,
+ &rule_action->encap.vxlan.ether.da);
+ ether_addr_copy(&spec.eth.src,
+ &rule_action->encap.vxlan.ether.sa);
+
+ item++;
+
+ /* Check for VLAN. */
+ flow_item_skip_void(&item);
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ if (item->type == RTE_FLOW_ITEM_TYPE_VLAN) {
+ if (!params->encap.vxlan.vlan)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: vlan encap not supported by table");
+
+ uint16_t tci = rte_ntohs(spec.vlan.tci);
+ rule_action->encap.vxlan.vlan.pcp =
+ tci >> 13;
+ rule_action->encap.vxlan.vlan.dei =
+ (tci >> 12) & 0x1;
+ rule_action->encap.vxlan.vlan.vid =
+ tci & 0xfff;
+
+ item++;
+
+ flow_item_skip_void(&item);
+ status = flow_item_proto_preprocess(item, &spec,
+ &mask, &size, &disabled, error);
+ if (status)
+ return status;
+ } else {
+ if (params->encap.vxlan.vlan)
+ return rte_flow_error_set(error,
+ ENOTSUP,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: expecting vlan encap item");
+ }
+
+ /* Check for IPV4/IPV6. */
+ switch (item->type) {
+ case RTE_FLOW_ITEM_TYPE_IPV4:
+ {
+ rule_action->encap.vxlan.ipv4.sa =
+ rte_ntohl(spec.ipv4.hdr.src_addr);
+ rule_action->encap.vxlan.ipv4.da =
+ rte_ntohl(spec.ipv4.hdr.dst_addr);
+ rule_action->encap.vxlan.ipv4.dscp =
+ spec.ipv4.hdr.type_of_service >> 2;
+ rule_action->encap.vxlan.ipv4.ttl =
+ spec.ipv4.hdr.time_to_live;
+ break;
+ }
+ case RTE_FLOW_ITEM_TYPE_IPV6:
+ {
+ uint32_t vtc_flow;
+
+ memcpy(&rule_action->encap.vxlan.ipv6.sa,
+ &spec.ipv6.hdr.src_addr,
+ sizeof(spec.ipv6.hdr.src_addr));
+ memcpy(&rule_action->encap.vxlan.ipv6.da,
+ &spec.ipv6.hdr.dst_addr,
+ sizeof(spec.ipv6.hdr.dst_addr));
+ vtc_flow = rte_ntohl(spec.ipv6.hdr.vtc_flow);
+ rule_action->encap.vxlan.ipv6.flow_label =
+ vtc_flow & 0xfffff;
+ rule_action->encap.vxlan.ipv6.dscp =
+ (vtc_flow >> 22) & 0x3f;
+ rule_action->encap.vxlan.ipv6.hop_limit =
+ spec.ipv6.hdr.hop_limits;
+ break;
+ }
+ default:
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: encap item after ether should be ipv4/ipv6");
+ }
+
+ item++;
+
+ /* Check for UDP. */
+ flow_item_skip_void(&item);
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_UDP) {
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: encap item after ipv4/ipv6 should be udp");
+ }
+ rule_action->encap.vxlan.udp.sp =
+ rte_ntohs(spec.udp.hdr.src_port);
+ rule_action->encap.vxlan.udp.dp =
+ rte_ntohs(spec.udp.hdr.dst_port);
+
+ item++;
+
+ /* Check for VXLAN. */
+ flow_item_skip_void(&item);
+ status = flow_item_proto_preprocess(item, &spec, &mask,
+ &size, &disabled, error);
+ if (status)
+ return status;
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_VXLAN) {
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: encap item after udp should be vxlan");
+ }
+ rule_action->encap.vxlan.vxlan.vni =
+ (spec.vxlan.vni[0] << 16U |
+ spec.vxlan.vni[1] << 8U
+ | spec.vxlan.vni[2]);
+
+ item++;
+
+ /* Check for END. */
+ flow_item_skip_void(&item);
+
+ if (item->type != RTE_FLOW_ITEM_TYPE_END)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ item,
+ "VXLAN ENCAP: expecting END item");
+
+ rule_action->encap.type = RTE_TABLE_ACTION_ENCAP_VXLAN;
+ rule_action->action_mask |= 1 << RTE_TABLE_ACTION_ENCAP;
+ break;
+ } /* RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP */
+
+ default:
+ return -ENOTSUP;
+ }
+ }
+
+ if (n_jump_queue_rss_drop == 0)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ action,
+ "Flow does not have any terminating action");
+
+ return 0;
+}
+
+static int
+pmd_flow_validate(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *error)
+{
+ struct softnic_table_rule_match rule_match;
+ struct softnic_table_rule_action rule_action;
+
+ struct pmd_internals *softnic = dev->data->dev_private;
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ const char *pipeline_name = NULL;
+ uint32_t table_id = 0;
+ int status;
+
+ /* Check input parameters. */
+ if (attr == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL, "Null attr");
+
+ if (item == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Null item");
+
+ if (action == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Null action");
+
+ /* Identify the pipeline table to add this flow to. */
+ status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
+ &table_id, error);
+ if (status)
+ return status;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid pipeline name");
+
+ if (table_id >= pipeline->n_tables)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid pipeline table ID");
+
+ table = &pipeline->table[table_id];
+
+ /* Rule match. */
+ memset(&rule_match, 0, sizeof(rule_match));
+ status = flow_rule_match_get(softnic,
+ pipeline,
+ table,
+ attr,
+ item,
+ &rule_match,
+ error);
+ if (status)
+ return status;
+
+ /* Rule action. */
+ memset(&rule_action, 0, sizeof(rule_action));
+ status = flow_rule_action_get(softnic,
+ pipeline,
+ table,
+ attr,
+ action,
+ &rule_action,
+ error);
+ if (status)
+ return status;
+
+ return 0;
+}
+
+static struct softnic_mtr *
+flow_action_meter_get(struct pmd_internals *softnic,
+ const struct rte_flow_action *action)
+{
+ for ( ; action->type != RTE_FLOW_ACTION_TYPE_END; action++)
+ if (action->type == RTE_FLOW_ACTION_TYPE_METER) {
+ const struct rte_flow_action_meter *conf = action->conf;
+
+ if (conf == NULL)
+ return NULL;
+
+ return softnic_mtr_find(softnic, conf->mtr_id);
+ }
+
+ return NULL;
+}
+
+static void
+flow_meter_owner_reset(struct pmd_internals *softnic,
+ struct rte_flow *flow)
+{
+ struct softnic_mtr_list *ml = &softnic->mtr.mtrs;
+ struct softnic_mtr *m;
+
+ TAILQ_FOREACH(m, ml, node)
+ if (m->flow == flow) {
+ m->flow = NULL;
+ break;
+ }
+}
+
+static void
+flow_meter_owner_set(struct pmd_internals *softnic,
+ struct rte_flow *flow,
+ struct softnic_mtr *mtr)
+{
+ /* Reset current flow meter */
+ flow_meter_owner_reset(softnic, flow);
+
+ /* Set new flow meter */
+ mtr->flow = flow;
+}
+
+static int
+is_meter_action_enable(struct pmd_internals *softnic,
+ struct softnic_table *table)
+{
+ struct softnic_table_action_profile *profile =
+ softnic_table_action_profile_find(softnic,
+ table->params.action_profile_name);
+ struct softnic_table_action_profile_params *params = &profile->params;
+
+ return (params->action_mask & (1LLU << RTE_TABLE_ACTION_MTR)) ? 1 : 0;
+}
+
+static struct rte_flow *
+pmd_flow_create(struct rte_eth_dev *dev,
+ const struct rte_flow_attr *attr,
+ const struct rte_flow_item item[],
+ const struct rte_flow_action action[],
+ struct rte_flow_error *error)
+{
+ struct softnic_table_rule_match rule_match;
+ struct softnic_table_rule_action rule_action;
+ void *rule_data;
+
+ struct pmd_internals *softnic = dev->data->dev_private;
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ struct rte_flow *flow;
+ struct softnic_mtr *mtr;
+ const char *pipeline_name = NULL;
+ uint32_t table_id = 0;
+ int new_flow, status;
+
+ /* Check input parameters. */
+ if (attr == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ATTR,
+ NULL,
+ "Null attr");
+ return NULL;
+ }
+
+ if (item == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ITEM,
+ NULL,
+ "Null item");
+ return NULL;
+ }
+
+ if (action == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_ACTION,
+ NULL,
+ "Null action");
+ return NULL;
+ }
+
+ /* Identify the pipeline table to add this flow to. */
+ status = flow_pipeline_table_get(softnic, attr, &pipeline_name,
+ &table_id, error);
+ if (status)
+ return NULL;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid pipeline name");
+ return NULL;
+ }
+
+ if (table_id >= pipeline->n_tables) {
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid pipeline table ID");
+ return NULL;
+ }
+
+ table = &pipeline->table[table_id];
+
+ /* Rule match. */
+ memset(&rule_match, 0, sizeof(rule_match));
+ status = flow_rule_match_get(softnic,
+ pipeline,
+ table,
+ attr,
+ item,
+ &rule_match,
+ error);
+ if (status)
+ return NULL;
+
+ /* Rule action. */
+ memset(&rule_action, 0, sizeof(rule_action));
+ status = flow_rule_action_get(softnic,
+ pipeline,
+ table,
+ attr,
+ action,
+ &rule_action,
+ error);
+ if (status)
+ return NULL;
+
+ /* Flow find/allocate. */
+ new_flow = 0;
+ flow = softnic_flow_find(table, &rule_match);
+ if (flow == NULL) {
+ new_flow = 1;
+ flow = calloc(1, sizeof(struct rte_flow));
+ if (flow == NULL) {
+ rte_flow_error_set(error,
+ ENOMEM,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Not enough memory for new flow");
+ return NULL;
+ }
+ }
+
+ /* Rule add. */
+ status = softnic_pipeline_table_rule_add(softnic,
+ pipeline_name,
+ table_id,
+ &rule_match,
+ &rule_action,
+ &rule_data);
+ if (status) {
+ if (new_flow)
+ free(flow);
+
+ rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Pipeline table rule add failed");
+ return NULL;
+ }
+
+ /* Flow fill in. */
+ memcpy(&flow->match, &rule_match, sizeof(rule_match));
+ memcpy(&flow->action, &rule_action, sizeof(rule_action));
+ flow->data = rule_data;
+ flow->pipeline = pipeline;
+ flow->table_id = table_id;
+
+ mtr = flow_action_meter_get(softnic, action);
+ if (mtr)
+ flow_meter_owner_set(softnic, flow, mtr);
+
+ /* Flow add to list. */
+ if (new_flow)
+ TAILQ_INSERT_TAIL(&table->flows, flow, node);
+
+ return flow;
+}
+
+static int
+pmd_flow_destroy(struct rte_eth_dev *dev,
+ struct rte_flow *flow,
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *softnic = dev->data->dev_private;
+ struct softnic_table *table;
+ int status;
+
+ /* Check input parameters. */
+ if (flow == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Null flow");
+
+ table = &flow->pipeline->table[flow->table_id];
+
+ /* Rule delete. */
+ status = softnic_pipeline_table_rule_delete(softnic,
+ flow->pipeline->name,
+ flow->table_id,
+ &flow->match);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Pipeline table rule delete failed");
+
+ /* Update dependencies */
+ if (is_meter_action_enable(softnic, table))
+ flow_meter_owner_reset(softnic, flow);
+
+ /* Flow delete. */
+ TAILQ_REMOVE(&table->flows, flow, node);
+ free(flow);
+
+ return 0;
+}
+
+static int
+pmd_flow_flush(struct rte_eth_dev *dev,
+ struct rte_flow_error *error)
+{
+ struct pmd_internals *softnic = dev->data->dev_private;
+ struct pipeline *pipeline;
+ int fail_to_del_rule = 0;
+ uint32_t i;
+
+ TAILQ_FOREACH(pipeline, &softnic->pipeline_list, node) {
+ /* Remove all the flows added to the tables. */
+ for (i = 0; i < pipeline->n_tables; i++) {
+ struct softnic_table *table = &pipeline->table[i];
+ struct rte_flow *flow;
+ void *temp;
+ int status;
+
+ TAILQ_FOREACH_SAFE(flow, &table->flows, node, temp) {
+ /* Rule delete. */
+ status = softnic_pipeline_table_rule_delete
+ (softnic,
+ pipeline->name,
+ i,
+ &flow->match);
+ if (status)
+ fail_to_del_rule = 1;
+ /* Update dependencies */
+ if (is_meter_action_enable(softnic, table))
+ flow_meter_owner_reset(softnic, flow);
+
+ /* Flow delete. */
+ TAILQ_REMOVE(&table->flows, flow, node);
+ free(flow);
+ }
+ }
+ }
+
+ if (fail_to_del_rule)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Some of the rules could not be deleted");
+
+ return 0;
+}
+
+static int
+pmd_flow_query(struct rte_eth_dev *dev __rte_unused,
+ struct rte_flow *flow,
+ const struct rte_flow_action *action __rte_unused,
+ void *data,
+ struct rte_flow_error *error)
+{
+ struct rte_table_action_stats_counters stats;
+ struct softnic_table *table;
+ struct rte_flow_query_count *flow_stats = data;
+ int status;
+
+ /* Check input parameters. */
+ if (flow == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_HANDLE,
+ NULL,
+ "Null flow");
+
+ if (data == NULL)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Null data");
+
+ table = &flow->pipeline->table[flow->table_id];
+
+ /* Rule stats read. */
+ status = rte_table_action_stats_read(table->a,
+ flow->data,
+ &stats,
+ flow_stats->reset);
+ if (status)
+ return rte_flow_error_set(error,
+ EINVAL,
+ RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Pipeline table rule stats read failed");
+
+ /* Fill in flow stats. */
+ flow_stats->hits_set =
+ (table->ap->params.stats.n_packets_enabled) ? 1 : 0;
+ flow_stats->bytes_set =
+ (table->ap->params.stats.n_bytes_enabled) ? 1 : 0;
+ flow_stats->hits = stats.n_packets;
+ flow_stats->bytes = stats.n_bytes;
+
+ return 0;
+}
+
+const struct rte_flow_ops pmd_flow_ops = {
+ .validate = pmd_flow_validate,
+ .create = pmd_flow_create,
+ .destroy = pmd_flow_destroy,
+ .flush = pmd_flow_flush,
+ .query = pmd_flow_query,
+ .isolate = NULL,
+};
diff --git a/drivers/net/softnic/rte_eth_softnic_internals.h b/drivers/net/softnic/rte_eth_softnic_internals.h
index a25eb874..e12b8ae4 100644
--- a/drivers/net/softnic/rte_eth_softnic_internals.h
+++ b/drivers/net/softnic/rte_eth_softnic_internals.h
@@ -18,8 +18,11 @@
#include <rte_table_action.h>
#include <rte_pipeline.h>
+#include <rte_ethdev_core.h>
#include <rte_ethdev_driver.h>
#include <rte_tm_driver.h>
+#include <rte_flow_driver.h>
+#include <rte_mtr_driver.h>
#include "rte_eth_softnic.h"
#include "conn.h"
@@ -44,6 +47,57 @@ struct pmd_params {
};
/**
+ * Ethdev Flow API
+ */
+struct rte_flow;
+
+TAILQ_HEAD(flow_list, rte_flow);
+
+struct flow_attr_map {
+ char pipeline_name[NAME_SIZE];
+ uint32_t table_id;
+ int valid;
+};
+
+#ifndef SOFTNIC_FLOW_MAX_GROUPS
+#define SOFTNIC_FLOW_MAX_GROUPS 64
+#endif
+
+struct flow_internals {
+ struct flow_attr_map ingress_map[SOFTNIC_FLOW_MAX_GROUPS];
+ struct flow_attr_map egress_map[SOFTNIC_FLOW_MAX_GROUPS];
+};
+
+/**
+ * Meter
+ */
+
+/* MTR meter profile */
+struct softnic_mtr_meter_profile {
+ TAILQ_ENTRY(softnic_mtr_meter_profile) node;
+ uint32_t meter_profile_id;
+ struct rte_mtr_meter_profile params;
+ uint32_t n_users;
+};
+
+TAILQ_HEAD(softnic_mtr_meter_profile_list, softnic_mtr_meter_profile);
+
+/* MTR meter object */
+struct softnic_mtr {
+ TAILQ_ENTRY(softnic_mtr) node;
+ uint32_t mtr_id;
+ struct rte_mtr_params params;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(softnic_mtr_list, softnic_mtr);
+
+struct mtr_internals {
+ struct softnic_mtr_meter_profile_list meter_profiles;
+ struct softnic_mtr_list mtrs;
+};
+
+/**
* MEMPOOL
*/
struct softnic_mempool_params {
@@ -225,6 +279,25 @@ struct softnic_tap {
TAILQ_HEAD(softnic_tap_list, softnic_tap);
/**
+ * Cryptodev
+ */
+struct softnic_cryptodev_params {
+ const char *dev_name;
+ uint32_t dev_id; /**< Valid only when *dev_name* is NULL. */
+ uint32_t n_queues;
+ uint32_t queue_size;
+};
+
+struct softnic_cryptodev {
+ TAILQ_ENTRY(softnic_cryptodev) node;
+ char name[NAME_SIZE];
+ uint16_t dev_id;
+ uint32_t n_queues;
+};
+
+TAILQ_HEAD(softnic_cryptodev_list, softnic_cryptodev);
+
+/**
* Input port action
*/
struct softnic_port_in_action_profile_params {
@@ -255,6 +328,7 @@ struct softnic_table_action_profile_params {
struct rte_table_action_nat_config nat;
struct rte_table_action_ttl_config ttl;
struct rte_table_action_stats_config stats;
+ struct rte_table_action_sym_crypto_config sym_crypto;
};
struct softnic_table_action_profile {
@@ -266,6 +340,15 @@ struct softnic_table_action_profile {
TAILQ_HEAD(softnic_table_action_profile_list, softnic_table_action_profile);
+struct softnic_table_meter_profile {
+ TAILQ_ENTRY(softnic_table_meter_profile) node;
+ uint32_t meter_profile_id;
+ struct rte_table_action_meter_profile profile;
+};
+
+TAILQ_HEAD(softnic_table_meter_profile_list,
+ softnic_table_meter_profile);
+
/**
* Pipeline
*/
@@ -280,12 +363,13 @@ enum softnic_port_in_type {
PORT_IN_TMGR,
PORT_IN_TAP,
PORT_IN_SOURCE,
+ PORT_IN_CRYPTODEV,
};
struct softnic_port_in_params {
/* Read */
enum softnic_port_in_type type;
- const char *dev_name;
+ char dev_name[NAME_SIZE];
union {
struct {
uint16_t queue_id;
@@ -301,11 +385,17 @@ struct softnic_port_in_params {
const char *file_name;
uint32_t n_bytes_per_pkt;
} source;
+
+ struct {
+ uint16_t queue_id;
+ void *f_callback;
+ void *arg_callback;
+ } cryptodev;
};
uint32_t burst_size;
/* Action */
- const char *action_profile_name;
+ char action_profile_name[NAME_SIZE];
};
enum softnic_port_out_type {
@@ -314,11 +404,12 @@ enum softnic_port_out_type {
PORT_OUT_TMGR,
PORT_OUT_TAP,
PORT_OUT_SINK,
+ PORT_OUT_CRYPTODEV,
};
struct softnic_port_out_params {
enum softnic_port_out_type type;
- const char *dev_name;
+ char dev_name[NAME_SIZE];
union {
struct {
uint16_t queue_id;
@@ -328,6 +419,11 @@ struct softnic_port_out_params {
const char *file_name;
uint32_t max_n_pkts;
} sink;
+
+ struct {
+ uint16_t queue_id;
+ uint32_t op_offset;
+ } cryptodev;
};
uint32_t burst_size;
int retry;
@@ -353,11 +449,15 @@ struct softnic_table_array_params {
uint32_t key_offset;
};
+#ifndef TABLE_RULE_MATCH_SIZE_MAX
+#define TABLE_RULE_MATCH_SIZE_MAX 256
+#endif
+
struct softnic_table_hash_params {
uint32_t n_keys;
uint32_t key_offset;
uint32_t key_size;
- uint8_t *key_mask;
+ uint8_t key_mask[TABLE_RULE_MATCH_SIZE_MAX];
uint32_t n_buckets;
int extendable_bucket;
};
@@ -379,7 +479,7 @@ struct softnic_table_params {
} match;
/* Action */
- const char *action_profile_name;
+ char action_profile_name[NAME_SIZE];
};
struct softnic_port_in {
@@ -388,10 +488,17 @@ struct softnic_port_in {
struct rte_port_in_action *a;
};
+struct softnic_port_out {
+ struct softnic_port_out_params params;
+};
+
struct softnic_table {
struct softnic_table_params params;
struct softnic_table_action_profile *ap;
struct rte_table_action *a;
+ struct flow_list flows;
+ struct rte_table_action_dscp_table dscp_table;
+ struct softnic_table_meter_profile_list meter_profiles;
};
struct pipeline {
@@ -399,7 +506,9 @@ struct pipeline {
char name[NAME_SIZE];
struct rte_pipeline *p;
+ struct pipeline_params params;
struct softnic_port_in port_in[RTE_PIPELINE_PORT_IN_MAX];
+ struct softnic_port_out port_out[RTE_PIPELINE_PORT_OUT_MAX];
struct softnic_table table[RTE_PIPELINE_TABLE_MAX];
uint32_t n_ports_in;
uint32_t n_ports_out;
@@ -489,12 +598,16 @@ struct pmd_internals {
struct tm_internals tm; /**< Traffic Management */
} soft;
+ struct flow_internals flow;
+ struct mtr_internals mtr;
+
struct softnic_conn *conn;
struct softnic_mempool_list mempool_list;
struct softnic_swq_list swq_list;
struct softnic_link_list link_list;
struct softnic_tmgr_port_list tmgr_port_list;
struct softnic_tap_list tap_list;
+ struct softnic_cryptodev_list cryptodev_list;
struct softnic_port_in_action_profile_list port_in_action_profile_list;
struct softnic_table_action_profile_list table_action_profile_list;
struct pipeline_list pipeline_list;
@@ -502,6 +615,58 @@ struct pmd_internals {
struct softnic_thread_data thread_data[RTE_MAX_LCORE];
};
+static inline struct rte_eth_dev *
+ETHDEV(struct pmd_internals *softnic)
+{
+ uint16_t port_id;
+ int status;
+
+ if (softnic == NULL)
+ return NULL;
+
+ status = rte_eth_dev_get_port_by_name(softnic->params.name, &port_id);
+ if (status)
+ return NULL;
+
+ return &rte_eth_devices[port_id];
+}
+
+/**
+ * Ethdev Flow API
+ */
+int
+flow_attr_map_set(struct pmd_internals *softnic,
+ uint32_t group_id,
+ int ingress,
+ const char *pipeline_name,
+ uint32_t table_id);
+
+struct flow_attr_map *
+flow_attr_map_get(struct pmd_internals *softnic,
+ uint32_t group_id,
+ int ingress);
+
+extern const struct rte_flow_ops pmd_flow_ops;
+
+/**
+ * Meter
+ */
+int
+softnic_mtr_init(struct pmd_internals *p);
+
+void
+softnic_mtr_free(struct pmd_internals *p);
+
+struct softnic_mtr *
+softnic_mtr_find(struct pmd_internals *p,
+ uint32_t mtr_id);
+
+struct softnic_mtr_meter_profile *
+softnic_mtr_meter_profile_find(struct pmd_internals *p,
+ uint32_t meter_profile_id);
+
+extern const struct rte_mtr_ops pmd_mtr_ops;
+
/**
* MEMPOOL
*/
@@ -610,6 +775,24 @@ softnic_tap_create(struct pmd_internals *p,
const char *name);
/**
+ * Sym Crypto
+ */
+int
+softnic_cryptodev_init(struct pmd_internals *p);
+
+void
+softnic_cryptodev_free(struct pmd_internals *p);
+
+struct softnic_cryptodev *
+softnic_cryptodev_find(struct pmd_internals *p,
+ const char *name);
+
+struct softnic_cryptodev *
+softnic_cryptodev_create(struct pmd_internals *p,
+ const char *name,
+ struct softnic_cryptodev_params *params);
+
+/**
* Input port action
*/
int
@@ -683,10 +866,20 @@ softnic_pipeline_port_out_create(struct pmd_internals *p,
struct softnic_port_out_params *params);
int
+softnic_pipeline_port_out_find(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ const char *name,
+ uint32_t *port_id);
+
+int
softnic_pipeline_table_create(struct pmd_internals *p,
const char *pipeline_name,
struct softnic_table_params *params);
+struct softnic_table_meter_profile *
+softnic_pipeline_table_meter_profile_find(struct softnic_table *table,
+ uint32_t meter_profile_id);
+
struct softnic_table_rule_match_acl {
int ip_version;
@@ -718,10 +911,6 @@ struct softnic_table_rule_match_array {
uint32_t pos;
};
-#ifndef TABLE_RULE_MATCH_SIZE_MAX
-#define TABLE_RULE_MATCH_SIZE_MAX 256
-#endif
-
struct softnic_table_rule_match_hash {
uint8_t key[TABLE_RULE_MATCH_SIZE_MAX];
};
@@ -760,6 +949,18 @@ struct softnic_table_rule_action {
struct rte_table_action_ttl_params ttl;
struct rte_table_action_stats_params stats;
struct rte_table_action_time_params time;
+ struct rte_table_action_tag_params tag;
+ struct rte_table_action_decap_params decap;
+ struct rte_table_action_sym_crypto_params sym_crypto;
+};
+
+struct rte_flow {
+ TAILQ_ENTRY(rte_flow) node;
+ struct softnic_table_rule_match match;
+ struct softnic_table_rule_action action;
+ void *data;
+ struct pipeline *pipeline;
+ uint32_t table_id;
};
int
diff --git a/drivers/net/softnic/rte_eth_softnic_meter.c b/drivers/net/softnic/rte_eth_softnic_meter.c
new file mode 100644
index 00000000..73ecf3b1
--- /dev/null
+++ b/drivers/net/softnic/rte_eth_softnic_meter.c
@@ -0,0 +1,728 @@
+/* SPDX-License-Identifier: BSD-3-Clause
+ * Copyright(c) 2018 Intel Corporation
+ */
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <rte_mtr.h>
+#include <rte_mtr_driver.h>
+
+#include "rte_eth_softnic_internals.h"
+
+int
+softnic_mtr_init(struct pmd_internals *p)
+{
+ /* Initialize meter profiles list */
+ TAILQ_INIT(&p->mtr.meter_profiles);
+
+ /* Initialize MTR objects list */
+ TAILQ_INIT(&p->mtr.mtrs);
+
+ return 0;
+}
+
+void
+softnic_mtr_free(struct pmd_internals *p)
+{
+ /* Remove MTR objects */
+ for ( ; ; ) {
+ struct softnic_mtr *m;
+
+ m = TAILQ_FIRST(&p->mtr.mtrs);
+ if (m == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->mtr.mtrs, m, node);
+ free(m);
+ }
+
+ /* Remove meter profiles */
+ for ( ; ; ) {
+ struct softnic_mtr_meter_profile *mp;
+
+ mp = TAILQ_FIRST(&p->mtr.meter_profiles);
+ if (mp == NULL)
+ break;
+
+ TAILQ_REMOVE(&p->mtr.meter_profiles, mp, node);
+ free(mp);
+ }
+}
+
+struct softnic_mtr_meter_profile *
+softnic_mtr_meter_profile_find(struct pmd_internals *p,
+ uint32_t meter_profile_id)
+{
+ struct softnic_mtr_meter_profile_list *mpl = &p->mtr.meter_profiles;
+ struct softnic_mtr_meter_profile *mp;
+
+ TAILQ_FOREACH(mp, mpl, node)
+ if (meter_profile_id == mp->meter_profile_id)
+ return mp;
+
+ return NULL;
+}
+
+static int
+meter_profile_check(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_meter_profile *mp;
+
+ /* Meter profile ID must be valid. */
+ if (meter_profile_id == UINT32_MAX)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter profile id not valid");
+
+ /* Meter profile must not exist. */
+ mp = softnic_mtr_meter_profile_find(p, meter_profile_id);
+ if (mp)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter prfile already exists");
+
+ /* Profile must not be NULL. */
+ if (profile == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ NULL,
+ "profile null");
+
+ /* Traffic metering algorithm : TRTCM_RFC2698 */
+ if (profile->alg != RTE_MTR_TRTCM_RFC2698)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE,
+ NULL,
+ "Metering alg not supported");
+
+ return 0;
+}
+
+/* MTR meter profile add */
+static int
+pmd_mtr_meter_profile_add(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_meter_profile *profile,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_meter_profile_list *mpl = &p->mtr.meter_profiles;
+ struct softnic_mtr_meter_profile *mp;
+ int status;
+
+ /* Check input params */
+ status = meter_profile_check(dev, meter_profile_id, profile, error);
+ if (status)
+ return status;
+
+ /* Memory allocation */
+ mp = calloc(1, sizeof(struct softnic_mtr_meter_profile));
+ if (mp == NULL)
+ return -rte_mtr_error_set(error,
+ ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Memory alloc failed");
+
+ /* Fill in */
+ mp->meter_profile_id = meter_profile_id;
+ memcpy(&mp->params, profile, sizeof(mp->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(mpl, mp, node);
+
+ return 0;
+}
+
+/* MTR meter profile delete */
+static int
+pmd_mtr_meter_profile_delete(struct rte_eth_dev *dev,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_meter_profile *mp;
+
+ /* Meter profile must exist */
+ mp = softnic_mtr_meter_profile_find(p, meter_profile_id);
+ if (mp == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter profile id invalid");
+
+ /* Check unused */
+ if (mp->n_users)
+ return -rte_mtr_error_set(error,
+ EBUSY,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter profile in use");
+
+ /* Remove from list */
+ TAILQ_REMOVE(&p->mtr.meter_profiles, mp, node);
+ free(mp);
+
+ return 0;
+}
+
+struct softnic_mtr *
+softnic_mtr_find(struct pmd_internals *p, uint32_t mtr_id)
+{
+ struct softnic_mtr_list *ml = &p->mtr.mtrs;
+ struct softnic_mtr *m;
+
+ TAILQ_FOREACH(m, ml, node)
+ if (m->mtr_id == mtr_id)
+ return m;
+
+ return NULL;
+}
+
+
+static int
+mtr_check(struct pmd_internals *p,
+ uint32_t mtr_id,
+ struct rte_mtr_params *params,
+ int shared,
+ struct rte_mtr_error *error)
+{
+ /* MTR id valid */
+ if (softnic_mtr_find(p, mtr_id))
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object already exists");
+
+ /* MTR params must not be NULL */
+ if (params == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ NULL,
+ "MTR object params null");
+
+ /* Previous meter color not supported */
+ if (params->use_prev_mtr_color)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_MTR_PARAMS,
+ NULL,
+ "Previous meter color not supported");
+
+ /* Shared MTR object not supported */
+ if (shared)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_SHARED,
+ NULL,
+ "Shared MTR object not supported");
+
+ return 0;
+}
+
+/* MTR object create */
+static int
+pmd_mtr_create(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_params *params,
+ int shared,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_list *ml = &p->mtr.mtrs;
+ struct softnic_mtr_meter_profile *mp;
+ struct softnic_mtr *m;
+ int status;
+
+ /* Check parameters */
+ status = mtr_check(p, mtr_id, params, shared, error);
+ if (status)
+ return status;
+
+ /* Meter profile must exist */
+ mp = softnic_mtr_meter_profile_find(p, params->meter_profile_id);
+ if (mp == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter profile id not valid");
+
+ /* Memory allocation */
+ m = calloc(1, sizeof(struct softnic_mtr));
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ ENOMEM,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Memory alloc failed");
+
+ /* Fill in */
+ m->mtr_id = mtr_id;
+ memcpy(&m->params, params, sizeof(m->params));
+
+ /* Add to list */
+ TAILQ_INSERT_TAIL(ml, m, node);
+
+ /* Update dependencies */
+ mp->n_users++;
+
+ return 0;
+}
+
+/* MTR object destroy */
+static int
+pmd_mtr_destroy(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_list *ml = &p->mtr.mtrs;
+ struct softnic_mtr_meter_profile *mp;
+ struct softnic_mtr *m;
+
+ /* MTR object must exist */
+ m = softnic_mtr_find(p, mtr_id);
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object id not valid");
+
+ /* MTR object must not have any owner */
+ if (m->flow != NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "MTR object is being used");
+
+ /* Get meter profile */
+ mp = softnic_mtr_meter_profile_find(p, m->params.meter_profile_id);
+ if (mp == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "MTR object meter profile invalid");
+
+ /* Update dependencies */
+ mp->n_users--;
+
+ /* Remove from list */
+ TAILQ_REMOVE(ml, m, node);
+ free(m);
+
+ return 0;
+}
+
+/* MTR object meter profile update */
+static int
+pmd_mtr_meter_profile_update(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ uint32_t meter_profile_id,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr_meter_profile *mp_new, *mp_old;
+ struct softnic_mtr *m;
+ int status;
+
+ /* MTR object id must be valid */
+ m = softnic_mtr_find(p, mtr_id);
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object id not valid");
+
+ /* Meter profile id must be valid */
+ mp_new = softnic_mtr_meter_profile_find(p, meter_profile_id);
+ if (mp_new == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_METER_PROFILE_ID,
+ NULL,
+ "Meter profile not valid");
+
+ /* MTR object already set to meter profile id */
+ if (m->params.meter_profile_id == meter_profile_id)
+ return 0;
+
+ /* MTR object owner table update */
+ if (m->flow) {
+ uint32_t table_id = m->flow->table_id;
+ struct softnic_table *table = &m->flow->pipeline->table[table_id];
+ struct softnic_table_rule_action action;
+
+ if (!softnic_pipeline_table_meter_profile_find(table,
+ meter_profile_id)) {
+ struct rte_table_action_meter_profile profile;
+
+ memset(&profile, 0, sizeof(profile));
+
+ profile.alg = RTE_TABLE_ACTION_METER_TRTCM;
+ profile.trtcm.cir = mp_new->params.trtcm_rfc2698.cir;
+ profile.trtcm.pir = mp_new->params.trtcm_rfc2698.pir;
+ profile.trtcm.cbs = mp_new->params.trtcm_rfc2698.cbs;
+ profile.trtcm.pbs = mp_new->params.trtcm_rfc2698.pbs;
+
+ /* Add meter profile to pipeline table */
+ status = softnic_pipeline_table_mtr_profile_add(p,
+ m->flow->pipeline->name,
+ table_id,
+ meter_profile_id,
+ &profile);
+ if (status)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Table meter profile add failed");
+ }
+
+ /* Set meter action */
+ memcpy(&action, &m->flow->action, sizeof(action));
+
+ action.mtr.mtr[0].meter_profile_id = meter_profile_id;
+
+ /* Re-add rule */
+ status = softnic_pipeline_table_rule_add(p,
+ m->flow->pipeline->name,
+ table_id,
+ &m->flow->match,
+ &action,
+ &m->flow->data);
+ if (status)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Pipeline table rule add failed");
+
+ /* Flow: update meter action */
+ memcpy(&m->flow->action, &action, sizeof(m->flow->action));
+ }
+
+ mp_old = softnic_mtr_meter_profile_find(p, m->params.meter_profile_id);
+
+ /* Meter: Set meter profile */
+ m->params.meter_profile_id = meter_profile_id;
+
+ /* Update dependencies*/
+ mp_old->n_users--;
+ mp_new->n_users++;
+
+ return 0;
+}
+
+/* MTR object meter DSCP table update */
+static int
+pmd_mtr_meter_dscp_table_update(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ enum rte_mtr_color *dscp_table,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_table_action_dscp_table dt;
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ struct softnic_mtr *m;
+ uint32_t table_id, i;
+ int status;
+
+ /* MTR object id must be valid */
+ m = softnic_mtr_find(p, mtr_id);
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object id not valid");
+
+ /* MTR object owner valid? */
+ if (m->flow == NULL)
+ return 0;
+
+ pipeline = m->flow->pipeline;
+ table_id = m->flow->table_id;
+ table = &pipeline->table[table_id];
+
+ memcpy(&dt, &table->dscp_table, sizeof(dt));
+ for (i = 0; i < RTE_DIM(dt.entry); i++)
+ dt.entry[i].color = (enum rte_meter_color)dscp_table[i];
+
+ /* Update table */
+ status = softnic_pipeline_table_dscp_table_update(p,
+ pipeline->name,
+ table_id,
+ UINT64_MAX,
+ &dt);
+ if (status)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Table action dscp table update failed");
+
+ return 0;
+}
+
+/* MTR object policer action update */
+static int
+pmd_mtr_policer_actions_update(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ uint32_t action_mask,
+ enum rte_mtr_policer_action *actions,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct softnic_mtr *m;
+ uint32_t i;
+ int status;
+
+ /* MTR object id must be valid */
+ m = softnic_mtr_find(p, mtr_id);
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object id not valid");
+
+ /* Valid policer actions */
+ if (actions == NULL)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Invalid actions");
+
+ for (i = 0; i < RTE_MTR_COLORS; i++) {
+ if (action_mask & (1 << i)) {
+ if (actions[i] != MTR_POLICER_ACTION_COLOR_GREEN &&
+ actions[i] != MTR_POLICER_ACTION_COLOR_YELLOW &&
+ actions[i] != MTR_POLICER_ACTION_COLOR_RED &&
+ actions[i] != MTR_POLICER_ACTION_DROP) {
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ " Invalid action value");
+ }
+ }
+ }
+
+ /* MTR object owner valid? */
+ if (m->flow) {
+ struct pipeline *pipeline = m->flow->pipeline;
+ struct softnic_table *table = &pipeline->table[m->flow->table_id];
+ struct softnic_table_rule_action action;
+
+ memcpy(&action, &m->flow->action, sizeof(action));
+
+ /* Set action */
+ for (i = 0; i < RTE_MTR_COLORS; i++)
+ if (action_mask & (1 << i))
+ action.mtr.mtr[0].policer[i] =
+ (enum rte_table_action_policer)actions[i];
+
+ /* Re-add the rule */
+ status = softnic_pipeline_table_rule_add(p,
+ pipeline->name,
+ m->flow->table_id,
+ &m->flow->match,
+ &action,
+ &m->flow->data);
+ if (status)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Pipeline table rule re-add failed");
+
+ /* Flow: Update meter action */
+ memcpy(&m->flow->action, &action, sizeof(m->flow->action));
+
+ /* Reset the meter stats */
+ rte_table_action_meter_read(table->a, m->flow->data,
+ 1, NULL, 1);
+ }
+
+ /* Meter: Update policer actions */
+ for (i = 0; i < RTE_MTR_COLORS; i++)
+ if (action_mask & (1 << i))
+ m->params.action[i] = actions[i];
+
+ return 0;
+}
+
+#define MTR_STATS_PKTS_DEFAULT (RTE_MTR_STATS_N_PKTS_GREEN | \
+ RTE_MTR_STATS_N_PKTS_YELLOW | \
+ RTE_MTR_STATS_N_PKTS_RED | \
+ RTE_MTR_STATS_N_PKTS_DROPPED)
+
+#define MTR_STATS_BYTES_DEFAULT (RTE_MTR_STATS_N_BYTES_GREEN | \
+ RTE_MTR_STATS_N_BYTES_YELLOW | \
+ RTE_MTR_STATS_N_BYTES_RED | \
+ RTE_MTR_STATS_N_BYTES_DROPPED)
+
+/* MTR object stats read */
+static void
+mtr_stats_convert(struct softnic_mtr *m,
+ struct rte_table_action_mtr_counters_tc *in,
+ struct rte_mtr_stats *out,
+ uint64_t *out_mask)
+{
+ memset(&out, 0, sizeof(out));
+ *out_mask = 0;
+
+ if (in->n_packets_valid) {
+ uint32_t i;
+
+ for (i = 0; i < RTE_MTR_COLORS; i++) {
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_GREEN)
+ out->n_pkts[RTE_MTR_GREEN] += in->n_packets[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_YELLOW)
+ out->n_pkts[RTE_MTR_YELLOW] += in->n_packets[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_RED)
+ out->n_pkts[RTE_MTR_RED] += in->n_packets[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_DROP)
+ out->n_pkts_dropped += in->n_packets[i];
+ }
+
+ *out_mask |= MTR_STATS_PKTS_DEFAULT;
+ }
+
+ if (in->n_bytes_valid) {
+ uint32_t i;
+
+ for (i = 0; i < RTE_MTR_COLORS; i++) {
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_GREEN)
+ out->n_bytes[RTE_MTR_GREEN] += in->n_bytes[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_YELLOW)
+ out->n_bytes[RTE_MTR_YELLOW] += in->n_bytes[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_COLOR_RED)
+ out->n_bytes[RTE_MTR_RED] += in->n_bytes[i];
+
+ if (m->params.action[i] == MTR_POLICER_ACTION_DROP)
+ out->n_bytes_dropped += in->n_bytes[i];
+ }
+
+ *out_mask |= MTR_STATS_BYTES_DEFAULT;
+ }
+}
+
+/* MTR object stats read */
+static int
+pmd_mtr_stats_read(struct rte_eth_dev *dev,
+ uint32_t mtr_id,
+ struct rte_mtr_stats *stats,
+ uint64_t *stats_mask,
+ int clear,
+ struct rte_mtr_error *error)
+{
+ struct pmd_internals *p = dev->data->dev_private;
+ struct rte_table_action_mtr_counters counters;
+ struct pipeline *pipeline;
+ struct softnic_table *table;
+ struct softnic_mtr *m;
+ int status;
+
+ /* MTR object id must be valid */
+ m = softnic_mtr_find(p, mtr_id);
+ if (m == NULL)
+ return -rte_mtr_error_set(error,
+ EEXIST,
+ RTE_MTR_ERROR_TYPE_MTR_ID,
+ NULL,
+ "MTR object id not valid");
+
+ /* MTR meter object owner valid? */
+ if (m->flow == NULL) {
+ if (stats != NULL)
+ memset(stats, 0, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = MTR_STATS_PKTS_DEFAULT |
+ MTR_STATS_BYTES_DEFAULT;
+
+ return 0;
+ }
+
+ pipeline = m->flow->pipeline;
+ table = &pipeline->table[m->flow->table_id];
+
+ /* Meter stats read. */
+ status = rte_table_action_meter_read(table->a,
+ m->flow->data,
+ 1,
+ &counters,
+ clear);
+ if (status)
+ return -rte_mtr_error_set(error,
+ EINVAL,
+ RTE_MTR_ERROR_TYPE_UNSPECIFIED,
+ NULL,
+ "Meter stats read failed");
+
+ /* Stats format conversion. */
+ if (stats || stats_mask) {
+ struct rte_mtr_stats s;
+ uint64_t s_mask = 0;
+
+ mtr_stats_convert(m,
+ &counters.stats[0],
+ &s,
+ &s_mask);
+
+ if (stats)
+ memcpy(stats, &s, sizeof(*stats));
+
+ if (stats_mask)
+ *stats_mask = s_mask;
+ }
+
+ return 0;
+}
+
+const struct rte_mtr_ops pmd_mtr_ops = {
+ .capabilities_get = NULL,
+
+ .meter_profile_add = pmd_mtr_meter_profile_add,
+ .meter_profile_delete = pmd_mtr_meter_profile_delete,
+
+ .create = pmd_mtr_create,
+ .destroy = pmd_mtr_destroy,
+ .meter_enable = NULL,
+ .meter_disable = NULL,
+
+ .meter_profile_update = pmd_mtr_meter_profile_update,
+ .meter_dscp_table_update = pmd_mtr_meter_dscp_table_update,
+ .policer_actions_update = pmd_mtr_policer_actions_update,
+ .stats_update = NULL,
+
+ .stats_read = pmd_mtr_stats_read,
+};
diff --git a/drivers/net/softnic/rte_eth_softnic_pipeline.c b/drivers/net/softnic/rte_eth_softnic_pipeline.c
index 45136a4a..5e180f8f 100644
--- a/drivers/net/softnic/rte_eth_softnic_pipeline.c
+++ b/drivers/net/softnic/rte_eth_softnic_pipeline.c
@@ -15,18 +15,18 @@
#include <rte_port_source_sink.h>
#include <rte_port_fd.h>
#include <rte_port_sched.h>
+#include <rte_port_sym_crypto.h>
#include <rte_table_acl.h>
#include <rte_table_array.h>
#include <rte_table_hash.h>
+#include <rte_table_hash_func.h>
#include <rte_table_lpm.h>
#include <rte_table_lpm_ipv6.h>
#include <rte_table_stub.h>
#include "rte_eth_softnic_internals.h"
-#include "hash_func.h"
-
#ifndef PIPELINE_MSGQ_SIZE
#define PIPELINE_MSGQ_SIZE 64
#endif
@@ -43,17 +43,52 @@ softnic_pipeline_init(struct pmd_internals *p)
return 0;
}
+static void
+softnic_pipeline_table_free(struct softnic_table *table)
+{
+ for ( ; ; ) {
+ struct rte_flow *flow;
+
+ flow = TAILQ_FIRST(&table->flows);
+ if (flow == NULL)
+ break;
+
+ TAILQ_REMOVE(&table->flows, flow, node);
+ free(flow);
+ }
+
+ for ( ; ; ) {
+ struct softnic_table_meter_profile *mp;
+
+ mp = TAILQ_FIRST(&table->meter_profiles);
+ if (mp == NULL)
+ break;
+
+ TAILQ_REMOVE(&table->meter_profiles, mp, node);
+ free(mp);
+ }
+}
+
void
softnic_pipeline_free(struct pmd_internals *p)
{
for ( ; ; ) {
struct pipeline *pipeline;
+ uint32_t table_id;
pipeline = TAILQ_FIRST(&p->pipeline_list);
if (pipeline == NULL)
break;
TAILQ_REMOVE(&p->pipeline_list, pipeline, node);
+
+ for (table_id = 0; table_id < pipeline->n_tables; table_id++) {
+ struct softnic_table *table =
+ &pipeline->table[table_id];
+
+ softnic_pipeline_table_free(table);
+ }
+
rte_ring_free(pipeline->msgq_req);
rte_ring_free(pipeline->msgq_rsp);
rte_pipeline_free(pipeline->p);
@@ -160,6 +195,7 @@ softnic_pipeline_create(struct pmd_internals *softnic,
/* Node fill in */
strlcpy(pipeline->name, name, sizeof(pipeline->name));
pipeline->p = p;
+ memcpy(&pipeline->params, params, sizeof(*params));
pipeline->n_ports_in = 0;
pipeline->n_ports_out = 0;
pipeline->n_tables = 0;
@@ -189,6 +225,7 @@ softnic_pipeline_port_in_create(struct pmd_internals *softnic,
struct rte_port_sched_reader_params sched;
struct rte_port_fd_reader_params fd;
struct rte_port_source_params source;
+ struct rte_port_sym_crypto_reader_params cryptodev;
} pp;
struct pipeline *pipeline;
@@ -213,7 +250,7 @@ softnic_pipeline_port_in_create(struct pmd_internals *softnic,
return -1;
ap = NULL;
- if (params->action_profile_name) {
+ if (strlen(params->action_profile_name)) {
ap = softnic_port_in_action_profile_find(softnic,
params->action_profile_name);
if (ap == NULL)
@@ -306,6 +343,23 @@ softnic_pipeline_port_in_create(struct pmd_internals *softnic,
break;
}
+ case PORT_IN_CRYPTODEV:
+ {
+ struct softnic_cryptodev *cryptodev;
+
+ cryptodev = softnic_cryptodev_find(softnic, params->dev_name);
+ if (cryptodev == NULL)
+ return -1;
+
+ pp.cryptodev.cryptodev_id = cryptodev->dev_id;
+ pp.cryptodev.queue_id = params->cryptodev.queue_id;
+ pp.cryptodev.f_callback = params->cryptodev.f_callback;
+ pp.cryptodev.arg_callback = params->cryptodev.arg_callback;
+ p.ops = &rte_port_sym_crypto_reader_ops;
+ p.arg_create = &pp.cryptodev;
+ break;
+ }
+
default:
return -1;
}
@@ -392,15 +446,18 @@ softnic_pipeline_port_out_create(struct pmd_internals *softnic,
struct rte_port_sched_writer_params sched;
struct rte_port_fd_writer_params fd;
struct rte_port_sink_params sink;
+ struct rte_port_sym_crypto_writer_params cryptodev;
} pp;
union {
struct rte_port_ethdev_writer_nodrop_params ethdev;
struct rte_port_ring_writer_nodrop_params ring;
struct rte_port_fd_writer_nodrop_params fd;
+ struct rte_port_sym_crypto_writer_nodrop_params cryptodev;
} pp_nodrop;
struct pipeline *pipeline;
+ struct softnic_port_out *port_out;
uint32_t port_id;
int status;
@@ -526,6 +583,40 @@ softnic_pipeline_port_out_create(struct pmd_internals *softnic,
break;
}
+ case PORT_OUT_CRYPTODEV:
+ {
+ struct softnic_cryptodev *cryptodev;
+
+ cryptodev = softnic_cryptodev_find(softnic, params->dev_name);
+ if (cryptodev == NULL)
+ return -1;
+
+ if (params->cryptodev.queue_id >= cryptodev->n_queues)
+ return -1;
+
+ pp.cryptodev.cryptodev_id = cryptodev->dev_id;
+ pp.cryptodev.queue_id = params->cryptodev.queue_id;
+ pp.cryptodev.tx_burst_sz = params->burst_size;
+ pp.cryptodev.crypto_op_offset = params->cryptodev.op_offset;
+
+ pp_nodrop.cryptodev.cryptodev_id = cryptodev->dev_id;
+ pp_nodrop.cryptodev.queue_id = params->cryptodev.queue_id;
+ pp_nodrop.cryptodev.tx_burst_sz = params->burst_size;
+ pp_nodrop.cryptodev.n_retries = params->retry;
+ pp_nodrop.cryptodev.crypto_op_offset =
+ params->cryptodev.op_offset;
+
+ if (params->retry == 0) {
+ p.ops = &rte_port_sym_crypto_writer_ops;
+ p.arg_create = &pp.cryptodev;
+ } else {
+ p.ops = &rte_port_sym_crypto_writer_nodrop_ops;
+ p.arg_create = &pp_nodrop.cryptodev;
+ }
+
+ break;
+ }
+
default:
return -1;
}
@@ -542,6 +633,8 @@ softnic_pipeline_port_out_create(struct pmd_internals *softnic,
return -1;
/* Pipeline */
+ port_out = &pipeline->port_out[pipeline->n_ports_out];
+ memcpy(&port_out->params, params, sizeof(*params));
pipeline->n_ports_out++;
return 0;
@@ -730,7 +823,7 @@ softnic_pipeline_table_create(struct pmd_internals *softnic,
return -1;
ap = NULL;
- if (params->action_profile_name) {
+ if (strlen(params->action_profile_name)) {
ap = softnic_table_action_profile_find(softnic,
params->action_profile_name);
if (ap == NULL)
@@ -797,28 +890,28 @@ softnic_pipeline_table_create(struct pmd_internals *softnic,
switch (params->match.hash.key_size) {
case 8:
- f_hash = hash_default_key8;
+ f_hash = rte_table_hash_crc_key8;
break;
case 16:
- f_hash = hash_default_key16;
+ f_hash = rte_table_hash_crc_key16;
break;
case 24:
- f_hash = hash_default_key24;
+ f_hash = rte_table_hash_crc_key24;
break;
case 32:
- f_hash = hash_default_key32;
+ f_hash = rte_table_hash_crc_key32;
break;
case 40:
- f_hash = hash_default_key40;
+ f_hash = rte_table_hash_crc_key40;
break;
case 48:
- f_hash = hash_default_key48;
+ f_hash = rte_table_hash_crc_key48;
break;
case 56:
- f_hash = hash_default_key56;
+ f_hash = rte_table_hash_crc_key56;
break;
case 64:
- f_hash = hash_default_key64;
+ f_hash = rte_table_hash_crc_key64;
break;
default:
return -1;
@@ -960,7 +1053,51 @@ softnic_pipeline_table_create(struct pmd_internals *softnic,
memcpy(&table->params, params, sizeof(*params));
table->ap = ap;
table->a = action;
+ TAILQ_INIT(&table->flows);
+ TAILQ_INIT(&table->meter_profiles);
+ memset(&table->dscp_table, 0, sizeof(table->dscp_table));
pipeline->n_tables++;
return 0;
}
+
+int
+softnic_pipeline_port_out_find(struct pmd_internals *softnic,
+ const char *pipeline_name,
+ const char *name,
+ uint32_t *port_id)
+{
+ struct pipeline *pipeline;
+ uint32_t i;
+
+ if (softnic == NULL ||
+ pipeline_name == NULL ||
+ name == NULL ||
+ port_id == NULL)
+ return -1;
+
+ pipeline = softnic_pipeline_find(softnic, pipeline_name);
+ if (pipeline == NULL)
+ return -1;
+
+ for (i = 0; i < pipeline->n_ports_out; i++)
+ if (strcmp(pipeline->port_out[i].params.dev_name, name) == 0) {
+ *port_id = i;
+ return 0;
+ }
+
+ return -1;
+}
+
+struct softnic_table_meter_profile *
+softnic_pipeline_table_meter_profile_find(struct softnic_table *table,
+ uint32_t meter_profile_id)
+{
+ struct softnic_table_meter_profile *mp;
+
+ TAILQ_FOREACH(mp, &table->meter_profiles, node)
+ if (mp->meter_profile_id == meter_profile_id)
+ return mp;
+
+ return NULL;
+}
diff --git a/drivers/net/softnic/rte_eth_softnic_thread.c b/drivers/net/softnic/rte_eth_softnic_thread.c
index 8a150903..4572adfa 100644
--- a/drivers/net/softnic/rte_eth_softnic_thread.c
+++ b/drivers/net/softnic/rte_eth_softnic_thread.c
@@ -1680,6 +1680,8 @@ softnic_pipeline_table_mtr_profile_add(struct pmd_internals *softnic,
struct pipeline *p;
struct pipeline_msg_req *req;
struct pipeline_msg_rsp *rsp;
+ struct softnic_table *table;
+ struct softnic_table_meter_profile *mp;
int status;
/* Check input params */
@@ -1692,20 +1694,40 @@ softnic_pipeline_table_mtr_profile_add(struct pmd_internals *softnic,
table_id >= p->n_tables)
return -1;
- if (!pipeline_is_running(p)) {
- struct rte_table_action *a = p->table[table_id].a;
+ table = &p->table[table_id];
+ mp = softnic_pipeline_table_meter_profile_find(table, meter_profile_id);
+ if (mp)
+ return -1;
- status = rte_table_action_meter_profile_add(a,
+ /* Resource Allocation */
+ mp = calloc(1, sizeof(struct softnic_table_meter_profile));
+ if (mp == NULL)
+ return -1;
+
+ mp->meter_profile_id = meter_profile_id;
+ memcpy(&mp->profile, profile, sizeof(mp->profile));
+
+ if (!pipeline_is_running(p)) {
+ status = rte_table_action_meter_profile_add(table->a,
meter_profile_id,
profile);
+ if (status) {
+ free(mp);
+ return status;
+ }
+
+ /* Add profile to the table. */
+ TAILQ_INSERT_TAIL(&table->meter_profiles, mp, node);
return status;
}
/* Allocate request */
req = pipeline_msg_alloc();
- if (req == NULL)
+ if (req == NULL) {
+ free(mp);
return -1;
+ }
/* Write request */
req->type = PIPELINE_REQ_TABLE_MTR_PROFILE_ADD;
@@ -1715,11 +1737,17 @@ softnic_pipeline_table_mtr_profile_add(struct pmd_internals *softnic,
/* Send request and wait for response */
rsp = pipeline_msg_send_recv(p, req);
- if (rsp == NULL)
+ if (rsp == NULL) {
+ free(mp);
return -1;
+ }
/* Read response */
status = rsp->status;
+ if (status == 0)
+ TAILQ_INSERT_TAIL(&table->meter_profiles, mp, node);
+ else
+ free(mp);
/* Free response */
pipeline_msg_free(rsp);
@@ -1874,6 +1902,11 @@ softnic_pipeline_table_dscp_table_update(struct pmd_internals *softnic,
dscp_mask,
dscp_table);
+ /* Update table dscp table */
+ if (!status)
+ memcpy(&p->table[table_id].dscp_table, dscp_table,
+ sizeof(p->table[table_id].dscp_table));
+
return status;
}
@@ -1897,6 +1930,11 @@ softnic_pipeline_table_dscp_table_update(struct pmd_internals *softnic,
/* Read response */
status = rsp->status;
+ /* Update table dscp table */
+ if (!status)
+ memcpy(&p->table[table_id].dscp_table, dscp_table,
+ sizeof(p->table[table_id].dscp_table));
+
/* Free response */
pipeline_msg_free(rsp);
@@ -2202,29 +2240,37 @@ match_convert(struct softnic_table_rule_match *mh,
ml->acl_add.field_value[0].mask_range.u8 =
mh->match.acl.proto_mask;
- ml->acl_add.field_value[1].value.u32 = sa32[0];
+ ml->acl_add.field_value[1].value.u32 =
+ rte_be_to_cpu_32(sa32[0]);
ml->acl_add.field_value[1].mask_range.u32 =
sa32_depth[0];
- ml->acl_add.field_value[2].value.u32 = sa32[1];
+ ml->acl_add.field_value[2].value.u32 =
+ rte_be_to_cpu_32(sa32[1]);
ml->acl_add.field_value[2].mask_range.u32 =
sa32_depth[1];
- ml->acl_add.field_value[3].value.u32 = sa32[2];
+ ml->acl_add.field_value[3].value.u32 =
+ rte_be_to_cpu_32(sa32[2]);
ml->acl_add.field_value[3].mask_range.u32 =
sa32_depth[2];
- ml->acl_add.field_value[4].value.u32 = sa32[3];
+ ml->acl_add.field_value[4].value.u32 =
+ rte_be_to_cpu_32(sa32[3]);
ml->acl_add.field_value[4].mask_range.u32 =
sa32_depth[3];
- ml->acl_add.field_value[5].value.u32 = da32[0];
+ ml->acl_add.field_value[5].value.u32 =
+ rte_be_to_cpu_32(da32[0]);
ml->acl_add.field_value[5].mask_range.u32 =
da32_depth[0];
- ml->acl_add.field_value[6].value.u32 = da32[1];
+ ml->acl_add.field_value[6].value.u32 =
+ rte_be_to_cpu_32(da32[1]);
ml->acl_add.field_value[6].mask_range.u32 =
da32_depth[1];
- ml->acl_add.field_value[7].value.u32 = da32[2];
+ ml->acl_add.field_value[7].value.u32 =
+ rte_be_to_cpu_32(da32[2]);
ml->acl_add.field_value[7].mask_range.u32 =
da32_depth[2];
- ml->acl_add.field_value[8].value.u32 = da32[3];
+ ml->acl_add.field_value[8].value.u32 =
+ rte_be_to_cpu_32(da32[3]);
ml->acl_add.field_value[8].mask_range.u32 =
da32_depth[3];
@@ -2264,36 +2310,36 @@ match_convert(struct softnic_table_rule_match *mh,
mh->match.acl.proto_mask;
ml->acl_delete.field_value[1].value.u32 =
- sa32[0];
+ rte_be_to_cpu_32(sa32[0]);
ml->acl_delete.field_value[1].mask_range.u32 =
sa32_depth[0];
ml->acl_delete.field_value[2].value.u32 =
- sa32[1];
+ rte_be_to_cpu_32(sa32[1]);
ml->acl_delete.field_value[2].mask_range.u32 =
sa32_depth[1];
ml->acl_delete.field_value[3].value.u32 =
- sa32[2];
+ rte_be_to_cpu_32(sa32[2]);
ml->acl_delete.field_value[3].mask_range.u32 =
sa32_depth[2];
ml->acl_delete.field_value[4].value.u32 =
- sa32[3];
+ rte_be_to_cpu_32(sa32[3]);
ml->acl_delete.field_value[4].mask_range.u32 =
sa32_depth[3];
ml->acl_delete.field_value[5].value.u32 =
- da32[0];
+ rte_be_to_cpu_32(da32[0]);
ml->acl_delete.field_value[5].mask_range.u32 =
da32_depth[0];
ml->acl_delete.field_value[6].value.u32 =
- da32[1];
+ rte_be_to_cpu_32(da32[1]);
ml->acl_delete.field_value[6].mask_range.u32 =
da32_depth[1];
ml->acl_delete.field_value[7].value.u32 =
- da32[2];
+ rte_be_to_cpu_32(da32[2]);
ml->acl_delete.field_value[7].mask_range.u32 =
da32_depth[2];
ml->acl_delete.field_value[8].value.u32 =
- da32[3];
+ rte_be_to_cpu_32(da32[3]);
ml->acl_delete.field_value[8].mask_range.u32 =
da32_depth[3];
@@ -2432,6 +2478,36 @@ action_convert(struct rte_table_action *a,
return status;
}
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_TAG,
+ &action->tag);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_DECAP,
+ &action->decap);
+
+ if (status)
+ return status;
+ }
+
+ if (action->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+ status = rte_table_action_apply(a,
+ data,
+ RTE_TABLE_ACTION_SYM_CRYPTO,
+ &action->sym_crypto);
+
+ if (status)
+ return status;
+ }
+
return 0;
}