From 055c52583a2794da8ba1e85a48cce3832372b12f Mon Sep 17 00:00:00 2001 From: Luca Boccassi Date: Wed, 8 Nov 2017 14:15:11 +0000 Subject: New upstream version 17.11-rc3 Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685 Signed-off-by: Luca Boccassi --- examples/ipsec-secgw/Makefile | 6 + examples/ipsec-secgw/esp.c | 144 ++++++++++++----- examples/ipsec-secgw/esp.h | 10 -- examples/ipsec-secgw/ipip.h | 3 +- examples/ipsec-secgw/ipsec-secgw.c | 105 +++++++++---- examples/ipsec-secgw/ipsec.c | 306 +++++++++++++++++++++++++++++++------ examples/ipsec-secgw/ipsec.h | 33 +++- examples/ipsec-secgw/sa.c | 154 ++++++++++++++----- 8 files changed, 590 insertions(+), 171 deletions(-) (limited to 'examples/ipsec-secgw') diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile index 17e91551..9fd33cb7 100644 --- a/examples/ipsec-secgw/Makefile +++ b/examples/ipsec-secgw/Makefile @@ -38,6 +38,12 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc include $(RTE_SDK)/mk/rte.vars.mk +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y) +$(error "RTE_LIBRTE_SECURITY is required to build ipsec-secgw") +endif +endif + APP = ipsec-secgw CFLAGS += -O3 -gdwarf-2 diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c index 70bb81f7..c3efe52b 100644 --- a/examples/ipsec-secgw/esp.c +++ b/examples/ipsec-secgw/esp.c @@ -58,8 +58,11 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct rte_crypto_sym_op *sym_cop; int32_t payload_len, ip_hdr_len; - RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) + return 0; + + RTE_ASSERT(m != NULL); RTE_ASSERT(cop != NULL); ip4 = rte_pktmbuf_mtod(m, struct ip *); @@ -103,12 +106,12 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, aad = get_aad(m); memcpy(aad, iv - sizeof(struct esp_hdr), 8); sym_cop->aead.aad.data = aad; - sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, aad - rte_pktmbuf_mtod(m, uint8_t *)); sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*, rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); } else { sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + @@ -154,7 +157,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*, rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); } @@ -175,29 +178,44 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, RTE_ASSERT(sa != NULL); RTE_ASSERT(cop != NULL); + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if (m->ol_flags & PKT_RX_SEC_OFFLOAD) { + if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED) + cop->status = RTE_CRYPTO_OP_STATUS_ERROR; + else + cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS; + } else + cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + } + if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n"); return -1; } - nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, - rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); - pad_len = nexthdr - 1; + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && + sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) { + nexthdr = &m->inner_esp_next_proto; + } else { + nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*, + rte_pktmbuf_pkt_len(m) - sa->digest_len - 1); + pad_len = nexthdr - 1; + + padding = pad_len - *pad_len; + for (i = 0; i < *pad_len; i++) { + if (padding[i] != i + 1) { + RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); + return -EINVAL; + } + } - padding = pad_len - *pad_len; - for (i = 0; i < *pad_len; i++) { - if (padding[i] != i + 1) { - RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n"); + if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { + RTE_LOG(ERR, IPSEC_ESP, + "failed to remove pad_len + digest\n"); return -EINVAL; } } - if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) { - RTE_LOG(ERR, IPSEC_ESP, - "failed to remove pad_len + digest\n"); - return -EINVAL; - } - if (unlikely(sa->flags == TRANSPORT)) { ip = rte_pktmbuf_mtod(m, struct ip *); ip4 = (struct ip *)rte_pktmbuf_adj(m, @@ -211,7 +229,8 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa, /* XXX No option headers supported */ memmove(ip6, ip, sizeof(struct ip6_hdr)); ip6->ip6_nxt = *nexthdr; - ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); } } else ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len); @@ -226,14 +245,13 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct ip *ip4; struct ip6_hdr *ip6; struct esp_hdr *esp = NULL; - uint8_t *padding, *new_ip, nlp; + uint8_t *padding = NULL, *new_ip, nlp; struct rte_crypto_sym_op *sym_cop; int32_t i; uint16_t pad_payload_len, pad_len, ip_hdr_len; RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); - RTE_ASSERT(cop != NULL); ip_hdr_len = 0; @@ -283,12 +301,19 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, return -EINVAL; } - padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len); - if (unlikely(padding == NULL)) { - RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n"); - return -ENOSPC; + /* Add trailer padding if it is not constructed by HW */ + if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO || + (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO && + !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) { + padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + + sa->digest_len); + if (unlikely(padding == NULL)) { + RTE_LOG(ERR, IPSEC_ESP, + "not enough mbuf trailing space\n"); + return -ENOSPC; + } + rte_prefetch0(padding); } - rte_prefetch0(padding); switch (sa->flags) { case IP4_TUNNEL: @@ -306,14 +331,15 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, sizeof(struct esp_hdr) + sa->iv_len); memmove(new_ip, ip4, ip_hdr_len); esp = (struct esp_hdr *)(new_ip + ip_hdr_len); + ip4 = (struct ip *)new_ip; if (likely(ip4->ip_v == IPVERSION)) { - ip4 = (struct ip *)new_ip; ip4->ip_p = IPPROTO_ESP; ip4->ip_len = htons(rte_pktmbuf_data_len(m)); } else { ip6 = (struct ip6_hdr *)new_ip; ip6->ip6_nxt = IPPROTO_ESP; - ip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); + ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); } } @@ -321,15 +347,46 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, esp->spi = rte_cpu_to_be_32(sa->spi); esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq); + /* set iv */ uint64_t *iv = (uint64_t *)(esp + 1); + if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { + *iv = rte_cpu_to_be_64(sa->seq); + } else { + switch (sa->cipher_algo) { + case RTE_CRYPTO_CIPHER_NULL: + case RTE_CRYPTO_CIPHER_AES_CBC: + memset(iv, 0, sa->iv_len); + break; + case RTE_CRYPTO_CIPHER_AES_CTR: + *iv = rte_cpu_to_be_64(sa->seq); + break; + default: + RTE_LOG(ERR, IPSEC_ESP, + "unsupported cipher algorithm %u\n", + sa->cipher_algo); + return -EINVAL; + } + } + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) { + /* Set the inner esp next protocol for HW trailer */ + m->inner_esp_next_proto = nlp; + m->packet_type |= RTE_PTYPE_TUNNEL_ESP; + } else { + padding[pad_len - 2] = pad_len - 2; + padding[pad_len - 1] = nlp; + } + goto done; + } + + RTE_ASSERT(cop != NULL); sym_cop = get_sym_cop(cop); sym_cop->m_src = m; if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { uint8_t *aad; - *iv = sa->seq; sym_cop->aead.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len; sym_cop->aead.data.length = pad_payload_len; @@ -342,30 +399,28 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct cnt_blk *icb = get_cnt_blk(m); icb->salt = sa->salt; - icb->iv = sa->seq; + icb->iv = rte_cpu_to_be_64(sa->seq); icb->cnt = rte_cpu_to_be_32(1); aad = get_aad(m); memcpy(aad, esp, 8); sym_cop->aead.aad.data = aad; - sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m, aad - rte_pktmbuf_mtod(m, uint8_t *)); sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); } else { switch (sa->cipher_algo) { case RTE_CRYPTO_CIPHER_NULL: case RTE_CRYPTO_CIPHER_AES_CBC: - memset(iv, 0, sa->iv_len); sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr); sym_cop->cipher.data.length = pad_payload_len + sa->iv_len; break; case RTE_CRYPTO_CIPHER_AES_CTR: - *iv = sa->seq; sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) + sa->iv_len; sym_cop->cipher.data.length = pad_payload_len; @@ -384,7 +439,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, struct cnt_blk *icb = get_cnt_blk(m); icb->salt = sa->salt; - icb->iv = sa->seq; + icb->iv = rte_cpu_to_be_64(sa->seq); icb->cnt = rte_cpu_to_be_32(1); switch (sa->auth_algo) { @@ -403,25 +458,30 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa, sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *, rte_pktmbuf_pkt_len(m) - sa->digest_len); - sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m, + sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m, rte_pktmbuf_pkt_len(m) - sa->digest_len); } +done: return 0; } int -esp_outbound_post(struct rte_mbuf *m __rte_unused, - struct ipsec_sa *sa __rte_unused, - struct rte_crypto_op *cop) +esp_outbound_post(struct rte_mbuf *m, + struct ipsec_sa *sa, + struct rte_crypto_op *cop) { RTE_ASSERT(m != NULL); RTE_ASSERT(sa != NULL); - RTE_ASSERT(cop != NULL); - if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { - RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n"); - return -1; + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + m->ol_flags |= PKT_TX_SEC_OFFLOAD; + } else { + RTE_ASSERT(cop != NULL); + if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) { + RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n"); + return -1; + } } return 0; diff --git a/examples/ipsec-secgw/esp.h b/examples/ipsec-secgw/esp.h index fa5cc8af..23601e37 100644 --- a/examples/ipsec-secgw/esp.h +++ b/examples/ipsec-secgw/esp.h @@ -35,16 +35,6 @@ struct mbuf; -/* RFC4303 */ -struct esp_hdr { - uint32_t spi; - uint32_t seq; - /* Payload */ - /* Padding */ - /* Pad Length */ - /* Next Header */ - /* Integrity Check Value - ICV */ -}; int esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa, diff --git a/examples/ipsec-secgw/ipip.h b/examples/ipsec-secgw/ipip.h index ff1dccdb..93393d52 100644 --- a/examples/ipsec-secgw/ipip.h +++ b/examples/ipsec-secgw/ipip.h @@ -72,7 +72,8 @@ ipip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t is_ipv6, /* Per RFC4301 5.1.2.1 */ outip6->ip6_flow = htonl(IP6_VERSION << 28 | ds_ecn << 20); - outip6->ip6_plen = htons(rte_pktmbuf_data_len(m)); + outip6->ip6_plen = htons(rte_pktmbuf_data_len(m) - + sizeof(struct ip6_hdr)); outip6->ip6_nxt = IPPROTO_ESP; outip6->ip6_hops = IPDEFTTL; diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c index 99dc270c..c98454a9 100644 --- a/examples/ipsec-secgw/ipsec-secgw.c +++ b/examples/ipsec-secgw/ipsec-secgw.c @@ -57,7 +57,6 @@ #include #include #include -#include #include #include #include @@ -161,14 +160,15 @@ static int32_t numa_on = 1; /**< NUMA is enabled by default. */ static uint32_t nb_lcores; static uint32_t single_sa; static uint32_t single_sa_idx; +static uint32_t frame_size; struct lcore_rx_queue { - uint8_t port_id; + uint16_t port_id; uint8_t queue_id; } __rte_cache_aligned; struct lcore_params { - uint8_t port_id; + uint16_t port_id; uint8_t queue_id; uint8_t lcore_id; } __rte_cache_aligned; @@ -204,11 +204,9 @@ static struct rte_eth_conf port_conf = { .mq_mode = ETH_MQ_RX_RSS, .max_rx_pkt_len = ETHER_MAX_LEN, .split_hdr_size = 0, - .header_split = 0, /**< Header Split disabled */ - .hw_ip_checksum = 1, /**< IP checksum offload enabled */ - .hw_vlan_filter = 0, /**< VLAN filtering disabled */ - .jumbo_frame = 0, /**< Jumbo Frame Support disabled */ - .hw_strip_crc = 1, /**< CRC stripped by hardware */ + .offloads = DEV_RX_OFFLOAD_CHECKSUM | + DEV_RX_OFFLOAD_CRC_STRIP, + .ignore_offload_bitfield = 1, }, .rx_adv_conf = { .rss_conf = { @@ -290,7 +288,7 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t, } static inline void -prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port) +prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port) { struct ip *ip; struct ether_hdr *ethhdr; @@ -320,7 +318,7 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port) } static inline void -prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port) +prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port) { int32_t i; const int32_t prefetch_offset = 2; @@ -336,7 +334,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port) /* Send burst of packets on an output interface */ static inline int32_t -send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) +send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port) { struct rte_mbuf **m_table; int32_t ret; @@ -359,7 +357,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port) /* Enqueue a single packet, and send burst if queue is filled */ static inline int32_t -send_single_packet(struct rte_mbuf *m, uint8_t port) +send_single_packet(struct rte_mbuf *m, uint16_t port) { uint32_t lcore_id; uint16_t len; @@ -646,7 +644,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts) static inline void process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts, - uint8_t nb_pkts, uint8_t portid) + uint8_t nb_pkts, uint16_t portid) { struct ipsec_traffic traffic; @@ -691,7 +689,8 @@ main_loop(__attribute__((unused)) void *dummy) uint32_t lcore_id; uint64_t prev_tsc, diff_tsc, cur_tsc; int32_t i, nb_rx; - uint8_t portid, queueid; + uint16_t portid; + uint8_t queueid; struct lcore_conf *qconf; int32_t socket_id; const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) @@ -728,7 +727,7 @@ main_loop(__attribute__((unused)) void *dummy) portid = rxql[i].port_id; queueid = rxql[i].queue_id; RTE_LOG(INFO, IPSEC, - " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", + " -- lcoreid=%u portid=%u rxqueueid=%hhu\n", lcore_id, portid, queueid); } @@ -759,7 +758,8 @@ main_loop(__attribute__((unused)) void *dummy) static int32_t check_params(void) { - uint8_t lcore, portid, nb_ports; + uint8_t lcore; + uint16_t portid, nb_ports; uint16_t i; int32_t socket_id; @@ -797,7 +797,7 @@ check_params(void) } static uint8_t -get_port_nb_rx_queues(const uint8_t port) +get_port_nb_rx_queues(const uint16_t port) { int32_t queue = -1; uint16_t i; @@ -843,6 +843,7 @@ print_usage(const char *prgname) " -p PORTMASK: hexadecimal bitmask of ports to configure\n" " -P : enable promiscuous mode\n" " -u PORTMASK: hexadecimal bitmask of unprotected ports\n" + " -j FRAMESIZE: jumbo frame maximum size\n" " --"OPTION_CONFIG": (port,queue,lcore): " "rx queues configuration\n" " --single-sa SAIDX: use single SA index for outbound, " @@ -981,7 +982,7 @@ parse_args(int32_t argc, char **argv) argvopt = argv; - while ((opt = getopt_long(argc, argvopt, "p:Pu:f:", + while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:", lgopts, &option_index)) != EOF) { switch (opt) { @@ -1020,6 +1021,23 @@ parse_args(int32_t argc, char **argv) } f_present = 1; break; + case 'j': + { + int32_t size = parse_decimal(optarg); + if (size <= 1518) { + printf("Invalid jumbo frame size\n"); + if (size < 0) { + print_usage(prgname); + return -1; + } + printf("Using default value 9000\n"); + frame_size = 9000; + } else { + frame_size = size; + } + } + printf("Enabled jumbo frames size %u\n", frame_size); + break; case 0: if (parse_args_long_options(lgopts, option_index)) { print_usage(prgname); @@ -1055,11 +1073,12 @@ print_ethaddr(const char *name, const struct ether_addr *eth_addr) /* Check the link status of all ports in up to 9s, and print them finally */ static void -check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) +check_all_ports_link_status(uint16_t port_num, uint32_t port_mask) { #define CHECK_INTERVAL 100 /* 100ms */ #define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */ - uint8_t portid, count, all_ports_up, print_flag = 0; + uint16_t portid; + uint8_t count, all_ports_up, print_flag = 0; struct rte_eth_link link; printf("\nChecking link status"); @@ -1074,14 +1093,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask) /* print link status if flag set */ if (print_flag == 1) { if (link.link_status) - printf("Port %d Link Up - speed %u " - "Mbps - %s\n", (uint8_t)portid, - (uint32_t)link.link_speed, + printf( + "Port%d Link Up - speed %u Mbps -%s\n", + portid, link.link_speed, (link.link_duplex == ETH_LINK_FULL_DUPLEX) ? ("full-duplex") : ("half-duplex\n")); else - printf("Port %d Link Down\n", - (uint8_t)portid); + printf("Port %d Link Down\n", portid); continue; } /* clear all_ports_up flag if any link down */ @@ -1113,7 +1131,8 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id, uint16_t qp, struct lcore_params *params, struct ipsec_ctx *ipsec_ctx, const struct rte_cryptodev_capabilities *cipher, - const struct rte_cryptodev_capabilities *auth) + const struct rte_cryptodev_capabilities *auth, + const struct rte_cryptodev_capabilities *aead) { int32_t ret = 0; unsigned long i; @@ -1124,6 +1143,8 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id, key.cipher_algo = cipher->sym.cipher.algo; if (auth) key.auth_algo = auth->sym.auth.algo; + if (aead) + key.aead_algo = aead->sym.aead.algo; ret = rte_hash_lookup(map, &key); if (ret != -ENOENT) @@ -1192,6 +1213,12 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id, if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) continue; + if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) { + ret |= add_mapping(map, str, cdev_id, qp, params, + ipsec_ctx, NULL, NULL, i); + continue; + } + if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) continue; @@ -1204,7 +1231,7 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id, continue; ret |= add_mapping(map, str, cdev_id, qp, params, - ipsec_ctx, i, j); + ipsec_ctx, i, j, NULL); } } @@ -1322,7 +1349,7 @@ cryptodevs_init(void) } static void -port_init(uint8_t portid) +port_init(uint16_t portid) { struct rte_eth_dev_info dev_info; struct rte_eth_txconf *txconf; @@ -1357,6 +1384,16 @@ port_init(uint8_t portid) printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n", nb_rx_queue, nb_tx_queue); + if (frame_size) { + port_conf.rxmode.max_rx_pkt_len = frame_size; + port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; + } + + if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY) + port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY; + if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY) + port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY; + ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue, &port_conf); if (ret < 0) @@ -1421,11 +1458,14 @@ static void pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf) { char s[64]; + uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) : + RTE_MBUF_DEFAULT_BUF_SIZE; + snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id); ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf, MEMPOOL_CACHE_SIZE, ipsec_metadata_size(), - RTE_MBUF_DEFAULT_BUF_SIZE, + buff_size, socket_id); if (ctx->mbuf_pool == NULL) rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n", @@ -1438,8 +1478,9 @@ int32_t main(int32_t argc, char **argv) { int32_t ret; - uint32_t lcore_id, nb_ports; - uint8_t portid, socket_id; + uint32_t lcore_id; + uint8_t socket_id; + uint16_t portid, nb_ports; /* init EAL */ ret = rte_eal_init(argc, argv); @@ -1522,7 +1563,7 @@ main(int32_t argc, char **argv) rte_eth_promiscuous_enable(portid); } - check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask); + check_all_ports_link_status(nb_ports, enabled_port_mask); /* launch per-lcore init on every lcore */ rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER); diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c index 0afb9d67..c24284d6 100644 --- a/examples/ipsec-secgw/ipsec.c +++ b/examples/ipsec-secgw/ipsec.c @@ -37,7 +37,9 @@ #include #include #include +#include #include +#include #include #include @@ -49,21 +51,28 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) { struct rte_cryptodev_info cdev_info; unsigned long cdev_id_qp = 0; - int32_t ret; + int32_t ret = 0; struct cdev_key key = { 0 }; key.lcore_id = (uint8_t)rte_lcore_id(); key.cipher_algo = (uint8_t)sa->cipher_algo; key.auth_algo = (uint8_t)sa->auth_algo; + key.aead_algo = (uint8_t)sa->aead_algo; - ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key, - (void **)&cdev_id_qp); - if (ret < 0) { - RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, " - "auth_algo %u\n", key.lcore_id, key.cipher_algo, - key.auth_algo); - return -1; + if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) { + ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key, + (void **)&cdev_id_qp); + if (ret < 0) { + RTE_LOG(ERR, IPSEC, + "No cryptodev: core %u, cipher_algo %u, " + "auth_algo %u, aead_algo %u\n", + key.lcore_id, + key.cipher_algo, + key.auth_algo, + key.aead_algo); + return -1; + } } RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev " @@ -71,23 +80,153 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa) ipsec_ctx->tbl[cdev_id_qp].id, ipsec_ctx->tbl[cdev_id_qp].qp); - sa->crypto_session = rte_cryptodev_sym_session_create( - ipsec_ctx->session_pool); - rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id, - sa->crypto_session, sa->xforms, - ipsec_ctx->session_pool); - - rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info); - if (cdev_info.sym.max_nb_sessions_per_qp > 0) { - ret = rte_cryptodev_queue_pair_attach_sym_session( - ipsec_ctx->tbl[cdev_id_qp].id, - ipsec_ctx->tbl[cdev_id_qp].qp, - sa->crypto_session); - if (ret < 0) { - RTE_LOG(ERR, IPSEC, - "Session cannot be attached to qp %u ", - ipsec_ctx->tbl[cdev_id_qp].qp); - return -1; + if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE) { + struct rte_security_session_conf sess_conf = { + .action_type = sa->type, + .protocol = RTE_SECURITY_PROTOCOL_IPSEC, + .ipsec = { + .spi = sa->spi, + .salt = sa->salt, + .options = { 0 }, + .direction = sa->direction, + .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP, + .mode = (sa->flags == IP4_TUNNEL || + sa->flags == IP6_TUNNEL) ? + RTE_SECURITY_IPSEC_SA_MODE_TUNNEL : + RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT, + }, + .crypto_xform = sa->xforms + + }; + + if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) { + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_cryptodev_get_sec_ctx( + ipsec_ctx->tbl[cdev_id_qp].id); + + if (sess_conf.ipsec.mode == + RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) { + struct rte_security_ipsec_tunnel_param *tunnel = + &sess_conf.ipsec.tunnel; + if (sa->flags == IP4_TUNNEL) { + tunnel->type = + RTE_SECURITY_IPSEC_TUNNEL_IPV4; + tunnel->ipv4.ttl = IPDEFTTL; + + memcpy((uint8_t *)&tunnel->ipv4.src_ip, + (uint8_t *)&sa->src.ip.ip4, 4); + + memcpy((uint8_t *)&tunnel->ipv4.dst_ip, + (uint8_t *)&sa->dst.ip.ip4, 4); + } + /* TODO support for Transport and IPV6 tunnel */ + } + + sa->sec_session = rte_security_session_create(ctx, + &sess_conf, ipsec_ctx->session_pool); + if (sa->sec_session == NULL) { + RTE_LOG(ERR, IPSEC, + "SEC Session init failed: err: %d\n", ret); + return -1; + } + } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + struct rte_flow_error err; + struct rte_security_ctx *ctx = (struct rte_security_ctx *) + rte_eth_dev_get_sec_ctx( + sa->portid); + const struct rte_security_capability *sec_cap; + + sa->sec_session = rte_security_session_create(ctx, + &sess_conf, ipsec_ctx->session_pool); + if (sa->sec_session == NULL) { + RTE_LOG(ERR, IPSEC, + "SEC Session init failed: err: %d\n", ret); + return -1; + } + + sec_cap = rte_security_capabilities_get(ctx); + + /* iterate until ESP tunnel*/ + while (sec_cap->action != + RTE_SECURITY_ACTION_TYPE_NONE) { + + if (sec_cap->action == sa->type && + sec_cap->protocol == + RTE_SECURITY_PROTOCOL_IPSEC && + sec_cap->ipsec.mode == + RTE_SECURITY_IPSEC_SA_MODE_TUNNEL && + sec_cap->ipsec.direction == sa->direction) + break; + sec_cap++; + } + + if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) { + RTE_LOG(ERR, IPSEC, + "No suitable security capability found\n"); + return -1; + } + + sa->ol_flags = sec_cap->ol_flags; + sa->security_ctx = ctx; + sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH; + + sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4; + sa->pattern[1].mask = &rte_flow_item_ipv4_mask; + if (sa->flags & IP6_TUNNEL) { + sa->pattern[1].spec = &sa->ipv6_spec; + memcpy(sa->ipv6_spec.hdr.dst_addr, + sa->dst.ip.ip6.ip6_b, 16); + memcpy(sa->ipv6_spec.hdr.src_addr, + sa->src.ip.ip6.ip6_b, 16); + } else { + sa->pattern[1].spec = &sa->ipv4_spec; + sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4; + sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4; + } + + sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP; + sa->pattern[2].spec = &sa->esp_spec; + sa->pattern[2].mask = &rte_flow_item_esp_mask; + sa->esp_spec.hdr.spi = sa->spi; + + sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END; + + sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY; + sa->action[0].conf = sa->sec_session; + + sa->action[1].type = RTE_FLOW_ACTION_TYPE_END; + + sa->attr.egress = (sa->direction == + RTE_SECURITY_IPSEC_SA_DIR_EGRESS); + sa->flow = rte_flow_create(sa->portid, + &sa->attr, sa->pattern, sa->action, &err); + if (sa->flow == NULL) { + RTE_LOG(ERR, IPSEC, + "Failed to create ipsec flow msg: %s\n", + err.message); + return -1; + } + } + } else { + sa->crypto_session = rte_cryptodev_sym_session_create( + ipsec_ctx->session_pool); + rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id, + sa->crypto_session, sa->xforms, + ipsec_ctx->session_pool); + + rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, + &cdev_info); + if (cdev_info.sym.max_nb_sessions_per_qp > 0) { + ret = rte_cryptodev_queue_pair_attach_sym_session( + ipsec_ctx->tbl[cdev_id_qp].id, + ipsec_ctx->tbl[cdev_id_qp].qp, + sa->crypto_session); + if (ret < 0) { + RTE_LOG(ERR, IPSEC, + "Session cannot be attached to qp %u\n", + ipsec_ctx->tbl[cdev_id_qp].qp); + return -1; + } } } sa->cdev_id_qp = cdev_id_qp; @@ -125,7 +264,9 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, { int32_t ret = 0, i; struct ipsec_mbuf_metadata *priv; + struct rte_crypto_sym_op *sym_cop; struct ipsec_sa *sa; + struct cdev_qp *cqp; for (i = 0; i < nb_pkts; i++) { if (unlikely(sas[i] == NULL)) { @@ -140,23 +281,76 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, sa = sas[i]; priv->sa = sa; - priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; - priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; - - rte_prefetch0(&priv->sym_cop); - - if ((unlikely(sa->crypto_session == NULL)) && - create_session(ipsec_ctx, sa)) { - rte_pktmbuf_free(pkts[i]); - continue; - } - - rte_crypto_op_attach_sym_session(&priv->cop, - sa->crypto_session); - - ret = xform_func(pkts[i], sa, &priv->cop); - if (unlikely(ret)) { - rte_pktmbuf_free(pkts[i]); + switch (sa->type) { + case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL: + priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + rte_prefetch0(&priv->sym_cop); + + if ((unlikely(sa->sec_session == NULL)) && + create_session(ipsec_ctx, sa)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + sym_cop = get_sym_cop(&priv->cop); + sym_cop->m_src = pkts[i]; + + rte_security_attach_session(&priv->cop, + sa->sec_session); + break; + case RTE_SECURITY_ACTION_TYPE_NONE: + + priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + rte_prefetch0(&priv->sym_cop); + + if ((unlikely(sa->crypto_session == NULL)) && + create_session(ipsec_ctx, sa)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + rte_crypto_op_attach_sym_session(&priv->cop, + sa->crypto_session); + + ret = xform_func(pkts[i], sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL: + break; + case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO: + priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + + rte_prefetch0(&priv->sym_cop); + + if ((unlikely(sa->sec_session == NULL)) && + create_session(ipsec_ctx, sa)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + rte_security_attach_session(&priv->cop, + sa->sec_session); + + ret = xform_func(pkts[i], sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkts[i]); + continue; + } + + cqp = &ipsec_ctx->tbl[sa->cdev_id_qp]; + cqp->ol_pkts[cqp->ol_pkts_cnt++] = pkts[i]; + if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA) + rte_security_set_pkt_metadata( + sa->security_ctx, + sa->sec_session, pkts[i], NULL); continue; } @@ -167,7 +361,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, static inline int ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, - struct rte_mbuf *pkts[], uint16_t max_pkts) + struct rte_mbuf *pkts[], uint16_t max_pkts) { int32_t nb_pkts = 0, ret = 0, i, j, nb_cops; struct ipsec_mbuf_metadata *priv; @@ -182,6 +376,19 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps) ipsec_ctx->last_qp %= ipsec_ctx->nb_qps; + while (cqp->ol_pkts_cnt > 0 && nb_pkts < max_pkts) { + pkt = cqp->ol_pkts[--cqp->ol_pkts_cnt]; + rte_prefetch0(pkt); + priv = get_priv(pkt); + sa = priv->sa; + ret = xform_func(pkt, sa, &priv->cop); + if (unlikely(ret)) { + rte_pktmbuf_free(pkt); + continue; + } + pkts[nb_pkts++] = pkt; + } + if (cqp->in_flight == 0) continue; @@ -199,11 +406,14 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx, RTE_ASSERT(sa != NULL); - ret = xform_func(pkt, sa, cops[j]); - if (unlikely(ret)) - rte_pktmbuf_free(pkt); - else - pkts[nb_pkts++] = pkt; + if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) { + ret = xform_func(pkt, sa, cops[j]); + if (unlikely(ret)) { + rte_pktmbuf_free(pkt); + continue; + } + } + pkts[nb_pkts++] = pkt; } } diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h index da1fb1b2..775b316f 100644 --- a/examples/ipsec-secgw/ipsec.h +++ b/examples/ipsec-secgw/ipsec.h @@ -38,6 +38,8 @@ #include #include +#include +#include #define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1 #define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2 @@ -99,7 +101,10 @@ struct ipsec_sa { uint32_t cdev_id_qp; uint64_t seq; uint32_t salt; - struct rte_cryptodev_sym_session *crypto_session; + union { + struct rte_cryptodev_sym_session *crypto_session; + struct rte_security_session *sec_session; + }; enum rte_crypto_cipher_algorithm cipher_algo; enum rte_crypto_auth_algorithm auth_algo; enum rte_crypto_aead_algorithm aead_algo; @@ -117,7 +122,28 @@ struct ipsec_sa { uint8_t auth_key[MAX_KEY_SIZE]; uint16_t auth_key_len; uint16_t aad_len; - struct rte_crypto_sym_xform *xforms; + union { + struct rte_crypto_sym_xform *xforms; + struct rte_security_ipsec_xform *sec_xform; + }; + enum rte_security_session_action_type type; + enum rte_security_ipsec_sa_direction direction; + uint16_t portid; + struct rte_security_ctx *security_ctx; + uint32_t ol_flags; + +#define MAX_RTE_FLOW_PATTERN (4) +#define MAX_RTE_FLOW_ACTIONS (2) + struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN]; + struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS]; + struct rte_flow_attr attr; + union { + struct rte_flow_item_ipv4 ipv4_spec; + struct rte_flow_item_ipv6 ipv6_spec; + }; + struct rte_flow_item_esp esp_spec; + struct rte_flow *flow; + struct rte_security_session_conf sess_conf; } __rte_cache_aligned; struct ipsec_mbuf_metadata { @@ -133,6 +159,8 @@ struct cdev_qp { uint16_t in_flight; uint16_t len; struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); + struct rte_mbuf *ol_pkts[MAX_PKT_BURST] __rte_aligned(sizeof(void *)); + uint16_t ol_pkts_cnt; }; struct ipsec_ctx { @@ -150,6 +178,7 @@ struct cdev_key { uint16_t lcore_id; uint8_t cipher_algo; uint8_t auth_algo; + uint8_t aead_algo; }; struct socket_ctx { diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c index 7be0e628..4c448e5c 100644 --- a/examples/ipsec-secgw/sa.c +++ b/examples/ipsec-secgw/sa.c @@ -41,16 +41,20 @@ #include #include +#include #include #include #include #include #include +#include #include "ipsec.h" #include "esp.h" #include "parser.h" +#define IPDEFTTL 64 + struct supported_cipher_algo { const char *keyword; enum rte_crypto_cipher_algorithm algo; @@ -238,6 +242,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, uint32_t src_p = 0; uint32_t dst_p = 0; uint32_t mode_p = 0; + uint32_t type_p = 0; + uint32_t portid_p = 0; if (strcmp(tokens[0], "in") == 0) { ri = &nb_sa_in; @@ -375,7 +381,6 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, rule->auth_algo = algo->algo; rule->auth_key_len = algo->key_len; rule->digest_len = algo->digest_len; - rule->aad_len = algo->key_len; /* NULL algorithm and combined algos do not * require auth key @@ -431,7 +436,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, rule->aead_algo = algo->algo; rule->cipher_key_len = algo->key_len; rule->digest_len = algo->digest_len; - rule->aad_len = algo->key_len; + rule->aad_len = algo->aad_len; rule->block_size = algo->block_size; rule->iv_len = algo->iv_len; @@ -550,6 +555,52 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, continue; } + if (strcmp(tokens[ti], "type") == 0) { + APP_CHECK_PRESENCE(type_p, tokens[ti], status); + if (status->status < 0) + return; + + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + + if (strcmp(tokens[ti], "inline-crypto-offload") == 0) + rule->type = + RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO; + else if (strcmp(tokens[ti], + "inline-protocol-offload") == 0) + rule->type = + RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL; + else if (strcmp(tokens[ti], + "lookaside-protocol-offload") == 0) + rule->type = + RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL; + else if (strcmp(tokens[ti], "no-offload") == 0) + rule->type = RTE_SECURITY_ACTION_TYPE_NONE; + else { + APP_CHECK(0, status, "Invalid input \"%s\"", + tokens[ti]); + return; + } + + type_p = 1; + continue; + } + + if (strcmp(tokens[ti], "port_id") == 0) { + APP_CHECK_PRESENCE(portid_p, tokens[ti], status); + if (status->status < 0) + return; + INCREMENT_TOKEN_INDEX(ti, n_tokens, status); + if (status->status < 0) + return; + rule->portid = atoi(tokens[ti]); + if (status->status < 0) + return; + portid_p = 1; + continue; + } + /* unrecognizeable input */ APP_CHECK(0, status, "unrecognized input \"%s\"", tokens[ti]); @@ -580,6 +631,14 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens, if (status->status < 0) return; + if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0)) + printf("Missing portid option, falling back to non-offload\n"); + + if (!type_p || !portid_p) { + rule->type = RTE_SECURITY_ACTION_TYPE_NONE; + rule->portid = -1; + } + *ri = *ri + 1; } @@ -647,9 +706,11 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound) struct sa_ctx { struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES]; - struct { - struct rte_crypto_sym_xform a; - struct rte_crypto_sym_xform b; + union { + struct { + struct rte_crypto_sym_xform a; + struct rte_crypto_sym_xform b; + }; } xf[IPSEC_SA_MAX_ENTRIES]; }; @@ -681,6 +742,33 @@ sa_create(const char *name, int32_t socket_id) return sa_ctx; } +static int +check_eth_dev_caps(uint16_t portid, uint32_t inbound) +{ + struct rte_eth_dev_info dev_info; + + rte_eth_dev_info_get(portid, &dev_info); + + if (inbound) { + if ((dev_info.rx_offload_capa & + DEV_RX_OFFLOAD_SECURITY) == 0) { + RTE_LOG(WARNING, PORT, + "hardware RX IPSec offload is not supported\n"); + return -EINVAL; + } + + } else { /* outbound */ + if ((dev_info.tx_offload_capa & + DEV_TX_OFFLOAD_SECURITY) == 0) { + RTE_LOG(WARNING, PORT, + "hardware TX IPSec offload is not supported\n"); + return -EINVAL; + } + } + return 0; +} + + static int sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], uint32_t nb_entries, uint32_t inbound) @@ -700,6 +788,16 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], *sa = entries[i]; sa->seq = 0; + if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL || + sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) { + if (check_eth_dev_caps(sa->portid, inbound)) + return -EINVAL; + } + + sa->direction = (inbound == 1) ? + RTE_SECURITY_IPSEC_SA_DIR_INGRESS : + RTE_SECURITY_IPSEC_SA_DIR_EGRESS; + switch (sa->flags) { case IP4_TUNNEL: sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4); @@ -709,37 +807,21 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[], if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) { iv_length = 16; - if (inbound) { - sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD; - sa_ctx->xf[idx].a.aead.algo = sa->aead_algo; - sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key; - sa_ctx->xf[idx].a.aead.key.length = - sa->cipher_key_len; - sa_ctx->xf[idx].a.aead.op = - RTE_CRYPTO_AEAD_OP_DECRYPT; - sa_ctx->xf[idx].a.next = NULL; - sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET; - sa_ctx->xf[idx].a.aead.iv.length = iv_length; - sa_ctx->xf[idx].a.aead.aad_length = - sa->aad_len; - sa_ctx->xf[idx].a.aead.digest_length = - sa->digest_len; - } else { /* outbound */ - sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD; - sa_ctx->xf[idx].a.aead.algo = sa->aead_algo; - sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key; - sa_ctx->xf[idx].a.aead.key.length = - sa->cipher_key_len; - sa_ctx->xf[idx].a.aead.op = - RTE_CRYPTO_AEAD_OP_ENCRYPT; - sa_ctx->xf[idx].a.next = NULL; - sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET; - sa_ctx->xf[idx].a.aead.iv.length = iv_length; - sa_ctx->xf[idx].a.aead.aad_length = - sa->aad_len; - sa_ctx->xf[idx].a.aead.digest_length = - sa->digest_len; - } + sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD; + sa_ctx->xf[idx].a.aead.algo = sa->aead_algo; + sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key; + sa_ctx->xf[idx].a.aead.key.length = + sa->cipher_key_len; + sa_ctx->xf[idx].a.aead.op = (inbound == 1) ? + RTE_CRYPTO_AEAD_OP_DECRYPT : + RTE_CRYPTO_AEAD_OP_ENCRYPT; + sa_ctx->xf[idx].a.next = NULL; + sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET; + sa_ctx->xf[idx].a.aead.iv.length = iv_length; + sa_ctx->xf[idx].a.aead.aad_length = + sa->aad_len; + sa_ctx->xf[idx].a.aead.digest_length = + sa->digest_len; sa->xforms = &sa_ctx->xf[idx].a; -- cgit 1.2.3-korg