summaryrefslogtreecommitdiffstats
path: root/lib/librte_pipeline/rte_table_action.c
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 11:59:50 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2018-11-01 12:00:19 +0000
commit8d01b9cd70a67cdafd5b965a70420c3bd7fb3f82 (patch)
tree208e3bc33c220854d89d010e3abf720a2e62e546 /lib/librte_pipeline/rte_table_action.c
parentb63264c8342e6a1b6971c79550d2af2024b6a4de (diff)
New upstream version 18.11-rc1upstream/18.11-rc1
Change-Id: Iaa71986dd6332e878d8f4bf493101b2bbc6313bb Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'lib/librte_pipeline/rte_table_action.c')
-rw-r--r--lib/librte_pipeline/rte_table_action.c1048
1 files changed, 1047 insertions, 1 deletions
diff --git a/lib/librte_pipeline/rte_table_action.c b/lib/librte_pipeline/rte_table_action.c
index 83ffa5de..537e6593 100644
--- a/lib/librte_pipeline/rte_table_action.c
+++ b/lib/librte_pipeline/rte_table_action.c
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2010-2018 Intel Corporation
*/
-
#include <stdlib.h>
#include <string.h>
@@ -15,6 +14,8 @@
#include <rte_esp.h>
#include <rte_tcp.h>
#include <rte_udp.h>
+#include <rte_cryptodev.h>
+#include <rte_cryptodev_pmd.h>
#include "rte_table_action.h"
@@ -430,6 +431,7 @@ encap_valid(enum rte_table_action_encap_type encap)
case RTE_TABLE_ACTION_ENCAP_QINQ:
case RTE_TABLE_ACTION_ENCAP_MPLS:
case RTE_TABLE_ACTION_ENCAP_PPPOE:
+ case RTE_TABLE_ACTION_ENCAP_VXLAN:
return 1;
default:
return 0;
@@ -498,6 +500,38 @@ struct encap_pppoe_data {
struct pppoe_ppp_hdr pppoe_ppp;
} __attribute__((__packed__));
+#define IP_PROTO_UDP 17
+
+struct encap_vxlan_ipv4_data {
+ struct ether_hdr ether;
+ struct ipv4_hdr ipv4;
+ struct udp_hdr udp;
+ struct vxlan_hdr vxlan;
+} __attribute__((__packed__));
+
+struct encap_vxlan_ipv4_vlan_data {
+ struct ether_hdr ether;
+ struct vlan_hdr vlan;
+ struct ipv4_hdr ipv4;
+ struct udp_hdr udp;
+ struct vxlan_hdr vxlan;
+} __attribute__((__packed__));
+
+struct encap_vxlan_ipv6_data {
+ struct ether_hdr ether;
+ struct ipv6_hdr ipv6;
+ struct udp_hdr udp;
+ struct vxlan_hdr vxlan;
+} __attribute__((__packed__));
+
+struct encap_vxlan_ipv6_vlan_data {
+ struct ether_hdr ether;
+ struct vlan_hdr vlan;
+ struct ipv6_hdr ipv6;
+ struct udp_hdr udp;
+ struct vxlan_hdr vxlan;
+} __attribute__((__packed__));
+
static size_t
encap_data_size(struct rte_table_action_encap_config *encap)
{
@@ -517,6 +551,18 @@ encap_data_size(struct rte_table_action_encap_config *encap)
case 1LLU << RTE_TABLE_ACTION_ENCAP_PPPOE:
return sizeof(struct encap_pppoe_data);
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
+ if (encap->vxlan.ip_version)
+ if (encap->vxlan.vlan)
+ return sizeof(struct encap_vxlan_ipv4_vlan_data);
+ else
+ return sizeof(struct encap_vxlan_ipv4_data);
+ else
+ if (encap->vxlan.vlan)
+ return sizeof(struct encap_vxlan_ipv6_vlan_data);
+ else
+ return sizeof(struct encap_vxlan_ipv6_data);
+
default:
return 0;
}
@@ -550,6 +596,9 @@ encap_apply_check(struct rte_table_action_encap_params *p,
case RTE_TABLE_ACTION_ENCAP_PPPOE:
return 0;
+ case RTE_TABLE_ACTION_ENCAP_VXLAN:
+ return 0;
+
default:
return -EINVAL;
}
@@ -679,6 +728,168 @@ encap_pppoe_apply(void *data,
}
static int
+encap_vxlan_apply(void *data,
+ struct rte_table_action_encap_params *p,
+ struct rte_table_action_encap_config *cfg)
+{
+ if ((p->vxlan.vxlan.vni > 0xFFFFFF) ||
+ (cfg->vxlan.ip_version && (p->vxlan.ipv4.dscp > 0x3F)) ||
+ (!cfg->vxlan.ip_version && (p->vxlan.ipv6.flow_label > 0xFFFFF)) ||
+ (!cfg->vxlan.ip_version && (p->vxlan.ipv6.dscp > 0x3F)) ||
+ (cfg->vxlan.vlan && (p->vxlan.vlan.vid > 0xFFF)))
+ return -1;
+
+ if (cfg->vxlan.ip_version)
+ if (cfg->vxlan.vlan) {
+ struct encap_vxlan_ipv4_vlan_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+
+ /* VLAN */
+ d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
+ p->vxlan.vlan.dei,
+ p->vxlan.vlan.vid));
+ d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv4);
+
+ /* IPv4*/
+ d->ipv4.version_ihl = 0x45;
+ d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
+ d->ipv4.total_length = 0; /* not pre-computed */
+ d->ipv4.packet_id = 0;
+ d->ipv4.fragment_offset = 0;
+ d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
+ d->ipv4.next_proto_id = IP_PROTO_UDP;
+ d->ipv4.hdr_checksum = 0;
+ d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
+ d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
+
+ d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ } else {
+ struct encap_vxlan_ipv4_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_IPv4);
+
+ /* IPv4*/
+ d->ipv4.version_ihl = 0x45;
+ d->ipv4.type_of_service = p->vxlan.ipv4.dscp << 2;
+ d->ipv4.total_length = 0; /* not pre-computed */
+ d->ipv4.packet_id = 0;
+ d->ipv4.fragment_offset = 0;
+ d->ipv4.time_to_live = p->vxlan.ipv4.ttl;
+ d->ipv4.next_proto_id = IP_PROTO_UDP;
+ d->ipv4.hdr_checksum = 0;
+ d->ipv4.src_addr = rte_htonl(p->vxlan.ipv4.sa);
+ d->ipv4.dst_addr = rte_htonl(p->vxlan.ipv4.da);
+
+ d->ipv4.hdr_checksum = rte_ipv4_cksum(&d->ipv4);
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ }
+ else
+ if (cfg->vxlan.vlan) {
+ struct encap_vxlan_ipv6_vlan_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_VLAN);
+
+ /* VLAN */
+ d->vlan.vlan_tci = rte_htons(VLAN(p->vxlan.vlan.pcp,
+ p->vxlan.vlan.dei,
+ p->vxlan.vlan.vid));
+ d->vlan.eth_proto = rte_htons(ETHER_TYPE_IPv6);
+
+ /* IPv6*/
+ d->ipv6.vtc_flow = rte_htonl((6 << 28) |
+ (p->vxlan.ipv6.dscp << 22) |
+ p->vxlan.ipv6.flow_label);
+ d->ipv6.payload_len = 0; /* not pre-computed */
+ d->ipv6.proto = IP_PROTO_UDP;
+ d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
+ memcpy(d->ipv6.src_addr,
+ p->vxlan.ipv6.sa,
+ sizeof(p->vxlan.ipv6.sa));
+ memcpy(d->ipv6.dst_addr,
+ p->vxlan.ipv6.da,
+ sizeof(p->vxlan.ipv6.da));
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ } else {
+ struct encap_vxlan_ipv6_data *d = data;
+
+ /* Ethernet */
+ ether_addr_copy(&p->vxlan.ether.da, &d->ether.d_addr);
+ ether_addr_copy(&p->vxlan.ether.sa, &d->ether.s_addr);
+ d->ether.ether_type = rte_htons(ETHER_TYPE_IPv6);
+
+ /* IPv6*/
+ d->ipv6.vtc_flow = rte_htonl((6 << 28) |
+ (p->vxlan.ipv6.dscp << 22) |
+ p->vxlan.ipv6.flow_label);
+ d->ipv6.payload_len = 0; /* not pre-computed */
+ d->ipv6.proto = IP_PROTO_UDP;
+ d->ipv6.hop_limits = p->vxlan.ipv6.hop_limit;
+ memcpy(d->ipv6.src_addr,
+ p->vxlan.ipv6.sa,
+ sizeof(p->vxlan.ipv6.sa));
+ memcpy(d->ipv6.dst_addr,
+ p->vxlan.ipv6.da,
+ sizeof(p->vxlan.ipv6.da));
+
+ /* UDP */
+ d->udp.src_port = rte_htons(p->vxlan.udp.sp);
+ d->udp.dst_port = rte_htons(p->vxlan.udp.dp);
+ d->udp.dgram_len = 0; /* not pre-computed */
+ d->udp.dgram_cksum = 0;
+
+ /* VXLAN */
+ d->vxlan.vx_flags = rte_htonl(0x08000000);
+ d->vxlan.vx_vni = rte_htonl(p->vxlan.vxlan.vni << 8);
+
+ return 0;
+ }
+}
+
+static int
encap_apply(void *data,
struct rte_table_action_encap_params *p,
struct rte_table_action_encap_config *cfg,
@@ -707,11 +918,31 @@ encap_apply(void *data,
case RTE_TABLE_ACTION_ENCAP_PPPOE:
return encap_pppoe_apply(data, p);
+ case RTE_TABLE_ACTION_ENCAP_VXLAN:
+ return encap_vxlan_apply(data, p, cfg);
+
default:
return -EINVAL;
}
}
+static __rte_always_inline uint16_t
+encap_vxlan_ipv4_checksum_update(uint16_t cksum0,
+ uint16_t total_length)
+{
+ int32_t cksum1;
+
+ cksum1 = cksum0;
+ cksum1 = ~cksum1 & 0xFFFF;
+
+ /* Add total length (one's complement logic) */
+ cksum1 += total_length;
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+ cksum1 = (cksum1 & 0xFFFF) + (cksum1 >> 16);
+
+ return (uint16_t)(~cksum1);
+}
+
static __rte_always_inline void *
encap(void *dst, const void *src, size_t n)
{
@@ -720,6 +951,118 @@ encap(void *dst, const void *src, size_t n)
}
static __rte_always_inline void
+pkt_work_encap_vxlan_ipv4(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv4_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv4_data *vxlan_pkt;
+ uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv4_total_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr) +
+ sizeof(struct ipv4_hdr));
+ ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
+ rte_htons(ipv4_total_length));
+ udp_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
+ vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv4_vlan(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv4_vlan_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv4_vlan_data *vxlan_pkt;
+ uint16_t ether_length, ipv4_total_length, ipv4_hdr_cksum, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv4_total_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr) +
+ sizeof(struct ipv4_hdr));
+ ipv4_hdr_cksum = encap_vxlan_ipv4_checksum_update(vxlan_tbl->ipv4.hdr_checksum,
+ rte_htons(ipv4_total_length));
+ udp_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv4.total_length = rte_htons(ipv4_total_length);
+ vxlan_pkt->ipv4.hdr_checksum = ipv4_hdr_cksum;
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv6(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv6_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv6_data *vxlan_pkt;
+ uint16_t ether_length, ipv6_payload_length, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv6_payload_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+ udp_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
+pkt_work_encap_vxlan_ipv6_vlan(struct rte_mbuf *mbuf,
+ struct encap_vxlan_ipv6_vlan_data *vxlan_tbl,
+ struct rte_table_action_encap_config *cfg)
+{
+ uint32_t ether_offset = cfg->vxlan.data_offset;
+ void *ether = RTE_MBUF_METADATA_UINT32_PTR(mbuf, ether_offset);
+ struct encap_vxlan_ipv6_vlan_data *vxlan_pkt;
+ uint16_t ether_length, ipv6_payload_length, udp_length;
+
+ ether_length = (uint16_t)mbuf->pkt_len;
+ ipv6_payload_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+ udp_length = ether_length +
+ (sizeof(struct vxlan_hdr) +
+ sizeof(struct udp_hdr));
+
+ vxlan_pkt = encap(ether, vxlan_tbl, sizeof(*vxlan_tbl));
+ vxlan_pkt->ipv6.payload_len = rte_htons(ipv6_payload_length);
+ vxlan_pkt->udp.dgram_len = rte_htons(udp_length);
+
+ mbuf->data_off = ether_offset - (sizeof(struct rte_mbuf) + sizeof(*vxlan_pkt));
+ mbuf->pkt_len = mbuf->data_len = ether_length + sizeof(*vxlan_pkt);
+}
+
+static __rte_always_inline void
pkt_work_encap(struct rte_mbuf *mbuf,
void *data,
struct rte_table_action_encap_config *cfg,
@@ -776,6 +1119,20 @@ pkt_work_encap(struct rte_mbuf *mbuf,
break;
}
+ case 1LLU << RTE_TABLE_ACTION_ENCAP_VXLAN:
+ {
+ if (cfg->vxlan.ip_version)
+ if (cfg->vxlan.vlan)
+ pkt_work_encap_vxlan_ipv4_vlan(mbuf, data, cfg);
+ else
+ pkt_work_encap_vxlan_ipv4(mbuf, data, cfg);
+ else
+ if (cfg->vxlan.vlan)
+ pkt_work_encap_vxlan_ipv6_vlan(mbuf, data, cfg);
+ else
+ pkt_work_encap_vxlan_ipv6(mbuf, data, cfg);
+ }
+
default:
break;
}
@@ -1219,6 +1576,562 @@ pkt_work_time(struct time_data *data,
data->time = time;
}
+
+/**
+ * RTE_TABLE_ACTION_CRYPTO
+ */
+
+#define CRYPTO_OP_MASK_CIPHER 0x1
+#define CRYPTO_OP_MASK_AUTH 0x2
+#define CRYPTO_OP_MASK_AEAD 0x4
+
+struct crypto_op_sym_iv_aad {
+ struct rte_crypto_op op;
+ struct rte_crypto_sym_op sym_op;
+ union {
+ struct {
+ uint8_t cipher_iv[
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ uint8_t auth_iv[
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ } cipher_auth;
+
+ struct {
+ uint8_t iv[RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX];
+ uint8_t aad[RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX];
+ } aead_iv_aad;
+
+ } iv_aad;
+};
+
+struct sym_crypto_data {
+
+ union {
+ struct {
+
+ /** Length of cipher iv. */
+ uint16_t cipher_iv_len;
+
+ /** Offset from start of IP header to the cipher iv. */
+ uint16_t cipher_iv_data_offset;
+
+ /** Length of cipher iv to be updated in the mbuf. */
+ uint16_t cipher_iv_update_len;
+
+ /** Offset from start of IP header to the auth iv. */
+ uint16_t auth_iv_data_offset;
+
+ /** Length of auth iv in the mbuf. */
+ uint16_t auth_iv_len;
+
+ /** Length of auth iv to be updated in the mbuf. */
+ uint16_t auth_iv_update_len;
+
+ } cipher_auth;
+ struct {
+
+ /** Length of iv. */
+ uint16_t iv_len;
+
+ /** Offset from start of IP header to the aead iv. */
+ uint16_t iv_data_offset;
+
+ /** Length of iv to be updated in the mbuf. */
+ uint16_t iv_update_len;
+
+ /** Length of aad */
+ uint16_t aad_len;
+
+ /** Offset from start of IP header to the aad. */
+ uint16_t aad_data_offset;
+
+ /** Length of aad to updated in the mbuf. */
+ uint16_t aad_update_len;
+
+ } aead;
+ };
+
+ /** Offset from start of IP header to the data. */
+ uint16_t data_offset;
+
+ /** Digest length. */
+ uint16_t digest_len;
+
+ /** block size */
+ uint16_t block_size;
+
+ /** Mask of crypto operation */
+ uint16_t op_mask;
+
+ /** Session pointer. */
+ struct rte_cryptodev_sym_session *session;
+
+ /** Direction of crypto, encrypt or decrypt */
+ uint16_t direction;
+
+ /** Private data size to store cipher iv / aad. */
+ uint8_t iv_aad_data[32];
+
+} __attribute__((__packed__));
+
+static int
+sym_crypto_cfg_check(struct rte_table_action_sym_crypto_config *cfg)
+{
+ if (!rte_cryptodev_pmd_is_valid_dev(cfg->cryptodev_id))
+ return -EINVAL;
+ if (cfg->mp_create == NULL || cfg->mp_init == NULL)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int
+get_block_size(const struct rte_crypto_sym_xform *xform, uint8_t cdev_id)
+{
+ struct rte_cryptodev_info dev_info;
+ const struct rte_cryptodev_capabilities *cap;
+ uint32_t i;
+
+ rte_cryptodev_info_get(cdev_id, &dev_info);
+
+ for (i = 0;; i++) {
+ cap = &dev_info.capabilities[i];
+ if (!cap)
+ break;
+
+ if (cap->sym.xform_type != xform->type)
+ continue;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) &&
+ (cap->sym.cipher.algo == xform->cipher.algo))
+ return cap->sym.cipher.block_size;
+
+ if ((xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) &&
+ (cap->sym.aead.algo == xform->aead.algo))
+ return cap->sym.aead.block_size;
+
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_NOT_SPECIFIED)
+ break;
+ }
+
+ return -1;
+}
+
+static int
+sym_crypto_apply(struct sym_crypto_data *data,
+ struct rte_table_action_sym_crypto_config *cfg,
+ struct rte_table_action_sym_crypto_params *p)
+{
+ const struct rte_crypto_cipher_xform *cipher_xform = NULL;
+ const struct rte_crypto_auth_xform *auth_xform = NULL;
+ const struct rte_crypto_aead_xform *aead_xform = NULL;
+ struct rte_crypto_sym_xform *xform = p->xform;
+ struct rte_cryptodev_sym_session *session;
+ int ret;
+
+ memset(data, 0, sizeof(*data));
+
+ while (xform) {
+ if (xform->type == RTE_CRYPTO_SYM_XFORM_CIPHER) {
+ cipher_xform = &xform->cipher;
+
+ if (cipher_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
+ return -ENOMEM;
+ if (cipher_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
+ return -EINVAL;
+
+ ret = get_block_size(xform, cfg->cryptodev_id);
+ if (ret < 0)
+ return -1;
+ data->block_size = (uint16_t)ret;
+ data->op_mask |= CRYPTO_OP_MASK_CIPHER;
+
+ data->cipher_auth.cipher_iv_len =
+ cipher_xform->iv.length;
+ data->cipher_auth.cipher_iv_data_offset = (uint16_t)
+ p->cipher_auth.cipher_iv_update.offset;
+ data->cipher_auth.cipher_iv_update_len = (uint16_t)
+ p->cipher_auth.cipher_iv_update.length;
+
+ rte_memcpy(data->iv_aad_data,
+ p->cipher_auth.cipher_iv.val,
+ p->cipher_auth.cipher_iv.length);
+
+ data->direction = cipher_xform->op;
+
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AUTH) {
+ auth_xform = &xform->auth;
+ if (auth_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX)
+ return -ENOMEM;
+ data->op_mask |= CRYPTO_OP_MASK_AUTH;
+
+ data->cipher_auth.auth_iv_len = auth_xform->iv.length;
+ data->cipher_auth.auth_iv_data_offset = (uint16_t)
+ p->cipher_auth.auth_iv_update.offset;
+ data->cipher_auth.auth_iv_update_len = (uint16_t)
+ p->cipher_auth.auth_iv_update.length;
+ data->digest_len = auth_xform->digest_length;
+
+ data->direction = (auth_xform->op ==
+ RTE_CRYPTO_AUTH_OP_GENERATE) ?
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+
+ } else if (xform->type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ aead_xform = &xform->aead;
+
+ if ((aead_xform->iv.length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_SIZE_MAX) || (
+ aead_xform->aad_length >
+ RTE_TABLE_ACTION_SYM_CRYPTO_AAD_SIZE_MAX))
+ return -EINVAL;
+ if (aead_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET)
+ return -EINVAL;
+
+ ret = get_block_size(xform, cfg->cryptodev_id);
+ if (ret < 0)
+ return -1;
+ data->block_size = (uint16_t)ret;
+ data->op_mask |= CRYPTO_OP_MASK_AEAD;
+
+ data->digest_len = aead_xform->digest_length;
+ data->aead.iv_len = aead_xform->iv.length;
+ data->aead.aad_len = aead_xform->aad_length;
+
+ data->aead.iv_data_offset = (uint16_t)
+ p->aead.iv_update.offset;
+ data->aead.iv_update_len = (uint16_t)
+ p->aead.iv_update.length;
+ data->aead.aad_data_offset = (uint16_t)
+ p->aead.aad_update.offset;
+ data->aead.aad_update_len = (uint16_t)
+ p->aead.aad_update.length;
+
+ rte_memcpy(data->iv_aad_data,
+ p->aead.iv.val,
+ p->aead.iv.length);
+
+ rte_memcpy(data->iv_aad_data + p->aead.iv.length,
+ p->aead.aad.val,
+ p->aead.aad.length);
+
+ data->direction = (aead_xform->op ==
+ RTE_CRYPTO_AEAD_OP_ENCRYPT) ?
+ RTE_CRYPTO_CIPHER_OP_ENCRYPT :
+ RTE_CRYPTO_CIPHER_OP_DECRYPT;
+ } else
+ return -EINVAL;
+
+ xform = xform->next;
+ }
+
+ if (auth_xform && auth_xform->iv.length) {
+ if (cipher_xform) {
+ if (auth_xform->iv.offset !=
+ RTE_TABLE_ACTION_SYM_CRYPTO_IV_OFFSET +
+ cipher_xform->iv.length)
+ return -EINVAL;
+
+ rte_memcpy(data->iv_aad_data + cipher_xform->iv.length,
+ p->cipher_auth.auth_iv.val,
+ p->cipher_auth.auth_iv.length);
+ } else {
+ rte_memcpy(data->iv_aad_data,
+ p->cipher_auth.auth_iv.val,
+ p->cipher_auth.auth_iv.length);
+ }
+ }
+
+ session = rte_cryptodev_sym_session_create(cfg->mp_create);
+ if (!session)
+ return -ENOMEM;
+
+ ret = rte_cryptodev_sym_session_init(cfg->cryptodev_id, session,
+ p->xform, cfg->mp_init);
+ if (ret < 0) {
+ rte_cryptodev_sym_session_free(session);
+ return ret;
+ }
+
+ data->data_offset = (uint16_t)p->data_offset;
+ data->session = session;
+
+ return 0;
+}
+
+static __rte_always_inline uint64_t
+pkt_work_sym_crypto(struct rte_mbuf *mbuf, struct sym_crypto_data *data,
+ struct rte_table_action_sym_crypto_config *cfg,
+ uint16_t ip_offset)
+{
+ struct crypto_op_sym_iv_aad *crypto_op = (struct crypto_op_sym_iv_aad *)
+ RTE_MBUF_METADATA_UINT8_PTR(mbuf, cfg->op_offset);
+ struct rte_crypto_op *op = &crypto_op->op;
+ struct rte_crypto_sym_op *sym = op->sym;
+ uint32_t pkt_offset = sizeof(*mbuf) + mbuf->data_off;
+ uint32_t payload_len = pkt_offset + mbuf->data_len - data->data_offset;
+
+ op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ op->sess_type = RTE_CRYPTO_OP_WITH_SESSION;
+ op->phys_addr = mbuf->buf_iova + cfg->op_offset - sizeof(*mbuf);
+ op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ sym->m_src = mbuf;
+ sym->m_dst = NULL;
+ sym->session = data->session;
+
+ /** pad the packet */
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT) {
+ uint32_t append_len = RTE_ALIGN_CEIL(payload_len,
+ data->block_size) - payload_len;
+
+ if (unlikely(rte_pktmbuf_append(mbuf, append_len +
+ data->digest_len) == NULL))
+ return 1;
+
+ payload_len += append_len;
+ } else
+ payload_len -= data->digest_len;
+
+ if (data->op_mask & CRYPTO_OP_MASK_CIPHER) {
+ /** prepare cipher op */
+ uint8_t *iv = crypto_op->iv_aad.cipher_auth.cipher_iv;
+
+ sym->cipher.data.length = payload_len;
+ sym->cipher.data.offset = data->data_offset - pkt_offset;
+
+ if (data->cipher_auth.cipher_iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->cipher_auth.cipher_iv_data_offset
+ + ip_offset);
+
+ /** For encryption, update the pkt iv field, otherwise
+ * update the iv_aad_field
+ **/
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data->iv_aad_data,
+ data->cipher_auth.cipher_iv_update_len);
+ else
+ rte_memcpy(data->iv_aad_data, pkt_iv,
+ data->cipher_auth.cipher_iv_update_len);
+ }
+
+ /** write iv */
+ rte_memcpy(iv, data->iv_aad_data,
+ data->cipher_auth.cipher_iv_len);
+ }
+
+ if (data->op_mask & CRYPTO_OP_MASK_AUTH) {
+ /** authentication always start from IP header. */
+ sym->auth.data.offset = ip_offset - pkt_offset;
+ sym->auth.data.length = mbuf->data_len - sym->auth.data.offset -
+ data->digest_len;
+ sym->auth.digest.data = rte_pktmbuf_mtod_offset(mbuf,
+ uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
+ data->digest_len);
+ sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
+
+ if (data->cipher_auth.auth_iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->cipher_auth.auth_iv_data_offset
+ + ip_offset);
+ uint8_t *data_iv = data->iv_aad_data +
+ data->cipher_auth.cipher_iv_len;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data_iv,
+ data->cipher_auth.auth_iv_update_len);
+ else
+ rte_memcpy(data_iv, pkt_iv,
+ data->cipher_auth.auth_iv_update_len);
+ }
+
+ if (data->cipher_auth.auth_iv_len) {
+ /** prepare cipher op */
+ uint8_t *iv = crypto_op->iv_aad.cipher_auth.auth_iv;
+
+ rte_memcpy(iv, data->iv_aad_data +
+ data->cipher_auth.cipher_iv_len,
+ data->cipher_auth.auth_iv_len);
+ }
+ }
+
+ if (data->op_mask & CRYPTO_OP_MASK_AEAD) {
+ uint8_t *iv = crypto_op->iv_aad.aead_iv_aad.iv;
+ uint8_t *aad = crypto_op->iv_aad.aead_iv_aad.aad;
+
+ sym->aead.aad.data = aad;
+ sym->aead.aad.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ aad - rte_pktmbuf_mtod(mbuf, uint8_t *));
+ sym->aead.digest.data = rte_pktmbuf_mtod_offset(mbuf,
+ uint8_t *, rte_pktmbuf_pkt_len(mbuf) -
+ data->digest_len);
+ sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(mbuf,
+ rte_pktmbuf_pkt_len(mbuf) - data->digest_len);
+ sym->aead.data.offset = data->data_offset - pkt_offset;
+ sym->aead.data.length = payload_len;
+
+ if (data->aead.iv_update_len) {
+ uint8_t *pkt_iv = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->aead.iv_data_offset + ip_offset);
+ uint8_t *data_iv = data->iv_aad_data;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_iv, data_iv,
+ data->aead.iv_update_len);
+ else
+ rte_memcpy(data_iv, pkt_iv,
+ data->aead.iv_update_len);
+ }
+
+ rte_memcpy(iv, data->iv_aad_data, data->aead.iv_len);
+
+ if (data->aead.aad_update_len) {
+ uint8_t *pkt_aad = RTE_MBUF_METADATA_UINT8_PTR(mbuf,
+ data->aead.aad_data_offset + ip_offset);
+ uint8_t *data_aad = data->iv_aad_data +
+ data->aead.iv_len;
+
+ if (data->direction == RTE_CRYPTO_CIPHER_OP_ENCRYPT)
+ rte_memcpy(pkt_aad, data_aad,
+ data->aead.iv_update_len);
+ else
+ rte_memcpy(data_aad, pkt_aad,
+ data->aead.iv_update_len);
+ }
+
+ rte_memcpy(aad, data->iv_aad_data + data->aead.iv_len,
+ data->aead.aad_len);
+ }
+
+ return 0;
+}
+
+/**
+ * RTE_TABLE_ACTION_TAG
+ */
+struct tag_data {
+ uint32_t tag;
+} __attribute__((__packed__));
+
+static int
+tag_apply(struct tag_data *data,
+ struct rte_table_action_tag_params *p)
+{
+ data->tag = p->tag;
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_tag(struct rte_mbuf *mbuf,
+ struct tag_data *data)
+{
+ mbuf->hash.fdir.hi = data->tag;
+ mbuf->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+}
+
+static __rte_always_inline void
+pkt4_work_tag(struct rte_mbuf *mbuf0,
+ struct rte_mbuf *mbuf1,
+ struct rte_mbuf *mbuf2,
+ struct rte_mbuf *mbuf3,
+ struct tag_data *data0,
+ struct tag_data *data1,
+ struct tag_data *data2,
+ struct tag_data *data3)
+{
+ mbuf0->hash.fdir.hi = data0->tag;
+ mbuf1->hash.fdir.hi = data1->tag;
+ mbuf2->hash.fdir.hi = data2->tag;
+ mbuf3->hash.fdir.hi = data3->tag;
+
+ mbuf0->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf1->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf2->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+ mbuf3->ol_flags |= PKT_RX_FDIR | PKT_RX_FDIR_ID;
+}
+
+/**
+ * RTE_TABLE_ACTION_DECAP
+ */
+struct decap_data {
+ uint16_t n;
+} __attribute__((__packed__));
+
+static int
+decap_apply(struct decap_data *data,
+ struct rte_table_action_decap_params *p)
+{
+ data->n = p->n;
+ return 0;
+}
+
+static __rte_always_inline void
+pkt_work_decap(struct rte_mbuf *mbuf,
+ struct decap_data *data)
+{
+ uint16_t data_off = mbuf->data_off;
+ uint16_t data_len = mbuf->data_len;
+ uint32_t pkt_len = mbuf->pkt_len;
+ uint16_t n = data->n;
+
+ mbuf->data_off = data_off + n;
+ mbuf->data_len = data_len - n;
+ mbuf->pkt_len = pkt_len - n;
+}
+
+static __rte_always_inline void
+pkt4_work_decap(struct rte_mbuf *mbuf0,
+ struct rte_mbuf *mbuf1,
+ struct rte_mbuf *mbuf2,
+ struct rte_mbuf *mbuf3,
+ struct decap_data *data0,
+ struct decap_data *data1,
+ struct decap_data *data2,
+ struct decap_data *data3)
+{
+ uint16_t data_off0 = mbuf0->data_off;
+ uint16_t data_len0 = mbuf0->data_len;
+ uint32_t pkt_len0 = mbuf0->pkt_len;
+
+ uint16_t data_off1 = mbuf1->data_off;
+ uint16_t data_len1 = mbuf1->data_len;
+ uint32_t pkt_len1 = mbuf1->pkt_len;
+
+ uint16_t data_off2 = mbuf2->data_off;
+ uint16_t data_len2 = mbuf2->data_len;
+ uint32_t pkt_len2 = mbuf2->pkt_len;
+
+ uint16_t data_off3 = mbuf3->data_off;
+ uint16_t data_len3 = mbuf3->data_len;
+ uint32_t pkt_len3 = mbuf3->pkt_len;
+
+ uint16_t n0 = data0->n;
+ uint16_t n1 = data1->n;
+ uint16_t n2 = data2->n;
+ uint16_t n3 = data3->n;
+
+ mbuf0->data_off = data_off0 + n0;
+ mbuf0->data_len = data_len0 - n0;
+ mbuf0->pkt_len = pkt_len0 - n0;
+
+ mbuf1->data_off = data_off1 + n1;
+ mbuf1->data_len = data_len1 - n1;
+ mbuf1->pkt_len = pkt_len1 - n1;
+
+ mbuf2->data_off = data_off2 + n2;
+ mbuf2->data_len = data_len2 - n2;
+ mbuf2->pkt_len = pkt_len2 - n2;
+
+ mbuf3->data_off = data_off3 + n3;
+ mbuf3->data_len = data_len3 - n3;
+ mbuf3->pkt_len = pkt_len3 - n3;
+}
+
/**
* Action profile
*/
@@ -1235,6 +2148,9 @@ action_valid(enum rte_table_action_type action)
case RTE_TABLE_ACTION_TTL:
case RTE_TABLE_ACTION_STATS:
case RTE_TABLE_ACTION_TIME:
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ case RTE_TABLE_ACTION_TAG:
+ case RTE_TABLE_ACTION_DECAP:
return 1;
default:
return 0;
@@ -1254,6 +2170,7 @@ struct ap_config {
struct rte_table_action_nat_config nat;
struct rte_table_action_ttl_config ttl;
struct rte_table_action_stats_config stats;
+ struct rte_table_action_sym_crypto_config sym_crypto;
};
static size_t
@@ -1274,6 +2191,8 @@ action_cfg_size(enum rte_table_action_type action)
return sizeof(struct rte_table_action_ttl_config);
case RTE_TABLE_ACTION_STATS:
return sizeof(struct rte_table_action_stats_config);
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ return sizeof(struct rte_table_action_sym_crypto_config);
default:
return 0;
}
@@ -1305,6 +2224,8 @@ action_cfg_get(struct ap_config *ap_config,
case RTE_TABLE_ACTION_STATS:
return &ap_config->stats;
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ return &ap_config->sym_crypto;
default:
return NULL;
}
@@ -1361,6 +2282,15 @@ action_data_size(enum rte_table_action_type action,
case RTE_TABLE_ACTION_TIME:
return sizeof(struct time_data);
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ return (sizeof(struct sym_crypto_data));
+
+ case RTE_TABLE_ACTION_TAG:
+ return sizeof(struct tag_data);
+
+ case RTE_TABLE_ACTION_DECAP:
+ return sizeof(struct decap_data);
+
default:
return 0;
}
@@ -1460,6 +2390,10 @@ rte_table_action_profile_action_register(struct rte_table_action_profile *profil
status = stats_cfg_check(action_config);
break;
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ status = sym_crypto_cfg_check(action_config);
+ break;
+
default:
status = 0;
break;
@@ -1609,6 +2543,19 @@ rte_table_action_apply(struct rte_table_action *action,
return time_apply(action_data,
action_params);
+ case RTE_TABLE_ACTION_SYM_CRYPTO:
+ return sym_crypto_apply(action_data,
+ &action->cfg.sym_crypto,
+ action_params);
+
+ case RTE_TABLE_ACTION_TAG:
+ return tag_apply(action_data,
+ action_params);
+
+ case RTE_TABLE_ACTION_DECAP:
+ return decap_apply(action_data,
+ action_params);
+
default:
return -EINVAL;
}
@@ -1861,6 +2808,25 @@ rte_table_action_time_read(struct rte_table_action *action,
return 0;
}
+struct rte_cryptodev_sym_session *
+rte_table_action_crypto_sym_session_get(struct rte_table_action *action,
+ void *data)
+{
+ struct sym_crypto_data *sym_crypto_data;
+
+ /* Check input arguments */
+ if ((action == NULL) ||
+ ((action->cfg.action_mask &
+ (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) == 0) ||
+ (data == NULL))
+ return NULL;
+
+ sym_crypto_data = action_data_get(data, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+
+ return sym_crypto_data->session;
+}
+
static __rte_always_inline uint64_t
pkt_work(struct rte_mbuf *mbuf,
struct rte_pipeline_table_entry *table_entry,
@@ -1920,6 +2886,14 @@ pkt_work(struct rte_mbuf *mbuf,
dscp);
}
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
+ void *data = action_data_get(table_entry,
+ action,
+ RTE_TABLE_ACTION_DECAP);
+
+ pkt_work_decap(mbuf, data);
+ }
+
if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
void *data =
action_data_get(table_entry, action, RTE_TABLE_ACTION_ENCAP);
@@ -1966,6 +2940,22 @@ pkt_work(struct rte_mbuf *mbuf,
pkt_work_time(data, time);
}
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+ void *data = action_data_get(table_entry, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+
+ drop_mask |= pkt_work_sym_crypto(mbuf, data, &cfg->sym_crypto,
+ ip_offset);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
+ void *data = action_data_get(table_entry,
+ action,
+ RTE_TABLE_ACTION_TAG);
+
+ pkt_work_tag(mbuf, data);
+ }
+
return drop_mask;
}
@@ -2137,6 +3127,24 @@ pkt4_work(struct rte_mbuf **mbufs,
dscp3);
}
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_DECAP)) {
+ void *data0 = action_data_get(table_entry0,
+ action,
+ RTE_TABLE_ACTION_DECAP);
+ void *data1 = action_data_get(table_entry1,
+ action,
+ RTE_TABLE_ACTION_DECAP);
+ void *data2 = action_data_get(table_entry2,
+ action,
+ RTE_TABLE_ACTION_DECAP);
+ void *data3 = action_data_get(table_entry3,
+ action,
+ RTE_TABLE_ACTION_DECAP);
+
+ pkt4_work_decap(mbuf0, mbuf1, mbuf2, mbuf3,
+ data0, data1, data2, data3);
+ }
+
if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_ENCAP)) {
void *data0 =
action_data_get(table_entry0, action, RTE_TABLE_ACTION_ENCAP);
@@ -2254,6 +3262,44 @@ pkt4_work(struct rte_mbuf **mbufs,
pkt_work_time(data3, time);
}
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_SYM_CRYPTO)) {
+ void *data0 = action_data_get(table_entry0, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+ void *data1 = action_data_get(table_entry1, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+ void *data2 = action_data_get(table_entry2, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+ void *data3 = action_data_get(table_entry3, action,
+ RTE_TABLE_ACTION_SYM_CRYPTO);
+
+ drop_mask0 |= pkt_work_sym_crypto(mbuf0, data0, &cfg->sym_crypto,
+ ip_offset);
+ drop_mask1 |= pkt_work_sym_crypto(mbuf1, data1, &cfg->sym_crypto,
+ ip_offset);
+ drop_mask2 |= pkt_work_sym_crypto(mbuf2, data2, &cfg->sym_crypto,
+ ip_offset);
+ drop_mask3 |= pkt_work_sym_crypto(mbuf3, data3, &cfg->sym_crypto,
+ ip_offset);
+ }
+
+ if (cfg->action_mask & (1LLU << RTE_TABLE_ACTION_TAG)) {
+ void *data0 = action_data_get(table_entry0,
+ action,
+ RTE_TABLE_ACTION_TAG);
+ void *data1 = action_data_get(table_entry1,
+ action,
+ RTE_TABLE_ACTION_TAG);
+ void *data2 = action_data_get(table_entry2,
+ action,
+ RTE_TABLE_ACTION_TAG);
+ void *data3 = action_data_get(table_entry3,
+ action,
+ RTE_TABLE_ACTION_TAG);
+
+ pkt4_work_tag(mbuf0, mbuf1, mbuf2, mbuf3,
+ data0, data1, data2, data3);
+ }
+
return drop_mask0 |
(drop_mask1 << 1) |
(drop_mask2 << 2) |