aboutsummaryrefslogtreecommitdiffstats
path: root/examples/ipsec-secgw
diff options
context:
space:
mode:
Diffstat (limited to 'examples/ipsec-secgw')
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c177
-rw-r--r--examples/ipsec-secgw/ipsec.c99
-rw-r--r--examples/ipsec-secgw/ipsec.h11
-rw-r--r--examples/ipsec-secgw/sa.c7
-rw-r--r--examples/ipsec-secgw/sp4.c10
-rw-r--r--examples/ipsec-secgw/sp6.c10
6 files changed, 237 insertions, 77 deletions
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 1bc0b5b5..f88fdb4c 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -451,38 +451,55 @@ inbound_sp_sa(struct sp_ctx *sp, struct sa_ctx *sa, struct traffic_type *ip,
ip->num = j;
}
-static inline void
-process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
- struct ipsec_traffic *traffic)
+static void
+split46_traffic(struct ipsec_traffic *trf, struct rte_mbuf *mb[], uint32_t num)
{
+ uint32_t i, n4, n6;
+ struct ip *ip;
struct rte_mbuf *m;
- uint16_t idx, nb_pkts_in, i, n_ip4, n_ip6;
- nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
- traffic->ipsec.num, MAX_PKT_BURST);
+ n4 = trf->ip4.num;
+ n6 = trf->ip6.num;
- n_ip4 = traffic->ip4.num;
- n_ip6 = traffic->ip6.num;
+ for (i = 0; i < num; i++) {
+
+ m = mb[i];
+ ip = rte_pktmbuf_mtod(m, struct ip *);
- /* SP/ACL Inbound check ipsec and ip4 */
- for (i = 0; i < nb_pkts_in; i++) {
- m = traffic->ipsec.pkts[i];
- struct ip *ip = rte_pktmbuf_mtod(m, struct ip *);
if (ip->ip_v == IPVERSION) {
- idx = traffic->ip4.num++;
- traffic->ip4.pkts[idx] = m;
- traffic->ip4.data[idx] = rte_pktmbuf_mtod_offset(m,
+ trf->ip4.pkts[n4] = m;
+ trf->ip4.data[n4] = rte_pktmbuf_mtod_offset(m,
uint8_t *, offsetof(struct ip, ip_p));
+ n4++;
} else if (ip->ip_v == IP6_VERSION) {
- idx = traffic->ip6.num++;
- traffic->ip6.pkts[idx] = m;
- traffic->ip6.data[idx] = rte_pktmbuf_mtod_offset(m,
+ trf->ip6.pkts[n6] = m;
+ trf->ip6.data[n6] = rte_pktmbuf_mtod_offset(m,
uint8_t *,
offsetof(struct ip6_hdr, ip6_nxt));
+ n6++;
} else
rte_pktmbuf_free(m);
}
+ trf->ip4.num = n4;
+ trf->ip6.num = n6;
+}
+
+
+static inline void
+process_pkts_inbound(struct ipsec_ctx *ipsec_ctx,
+ struct ipsec_traffic *traffic)
+{
+ uint16_t nb_pkts_in, n_ip4, n_ip6;
+
+ n_ip4 = traffic->ip4.num;
+ n_ip6 = traffic->ip6.num;
+
+ nb_pkts_in = ipsec_inbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.num, MAX_PKT_BURST);
+
+ split46_traffic(traffic, traffic->ipsec.pkts, nb_pkts_in);
+
inbound_sp_sa(ipsec_ctx->sp4_ctx, ipsec_ctx->sa_ctx, &traffic->ip4,
n_ip4);
@@ -594,32 +611,45 @@ process_pkts_outbound_nosp(struct ipsec_ctx *ipsec_ctx,
struct ipsec_traffic *traffic)
{
struct rte_mbuf *m;
- uint32_t nb_pkts_out, i;
+ uint32_t nb_pkts_out, i, n;
struct ip *ip;
/* Drop any IPsec traffic from protected ports */
for (i = 0; i < traffic->ipsec.num; i++)
rte_pktmbuf_free(traffic->ipsec.pkts[i]);
- traffic->ipsec.num = 0;
+ n = 0;
- for (i = 0; i < traffic->ip4.num; i++)
- traffic->ip4.res[i] = single_sa_idx;
+ for (i = 0; i < traffic->ip4.num; i++) {
+ traffic->ipsec.pkts[n] = traffic->ip4.pkts[i];
+ traffic->ipsec.res[n++] = single_sa_idx;
+ }
- for (i = 0; i < traffic->ip6.num; i++)
- traffic->ip6.res[i] = single_sa_idx;
+ for (i = 0; i < traffic->ip6.num; i++) {
+ traffic->ipsec.pkts[n] = traffic->ip6.pkts[i];
+ traffic->ipsec.res[n++] = single_sa_idx;
+ }
+
+ traffic->ip4.num = 0;
+ traffic->ip6.num = 0;
+ traffic->ipsec.num = n;
- nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ip4.pkts,
- traffic->ip4.res, traffic->ip4.num,
+ nb_pkts_out = ipsec_outbound(ipsec_ctx, traffic->ipsec.pkts,
+ traffic->ipsec.res, traffic->ipsec.num,
MAX_PKT_BURST);
/* They all sue the same SA (ip4 or ip6 tunnel) */
m = traffic->ipsec.pkts[i];
ip = rte_pktmbuf_mtod(m, struct ip *);
- if (ip->ip_v == IPVERSION)
+ if (ip->ip_v == IPVERSION) {
traffic->ip4.num = nb_pkts_out;
- else
+ for (i = 0; i < nb_pkts_out; i++)
+ traffic->ip4.pkts[i] = traffic->ipsec.pkts[i];
+ } else {
traffic->ip6.num = nb_pkts_out;
+ for (i = 0; i < nb_pkts_out; i++)
+ traffic->ip6.pkts[i] = traffic->ipsec.pkts[i];
+ }
}
static inline int32_t
@@ -777,7 +807,7 @@ process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
}
static inline void
-drain_buffers(struct lcore_conf *qconf)
+drain_tx_buffers(struct lcore_conf *qconf)
{
struct buffer *buf;
uint32_t portid;
@@ -791,6 +821,81 @@ drain_buffers(struct lcore_conf *qconf)
}
}
+static inline void
+drain_crypto_buffers(struct lcore_conf *qconf)
+{
+ uint32_t i;
+ struct ipsec_ctx *ctx;
+
+ /* drain inbound buffers*/
+ ctx = &qconf->inbound;
+ for (i = 0; i != ctx->nb_qps; i++) {
+ if (ctx->tbl[i].len != 0)
+ enqueue_cop_burst(ctx->tbl + i);
+ }
+
+ /* drain outbound buffers*/
+ ctx = &qconf->outbound;
+ for (i = 0; i != ctx->nb_qps; i++) {
+ if (ctx->tbl[i].len != 0)
+ enqueue_cop_burst(ctx->tbl + i);
+ }
+}
+
+static void
+drain_inbound_crypto_queues(const struct lcore_conf *qconf,
+ struct ipsec_ctx *ctx)
+{
+ uint32_t n;
+ struct ipsec_traffic trf;
+
+ /* dequeue packets from crypto-queue */
+ n = ipsec_inbound_cqp_dequeue(ctx, trf.ipsec.pkts,
+ RTE_DIM(trf.ipsec.pkts));
+ if (n == 0)
+ return;
+
+ trf.ip4.num = 0;
+ trf.ip6.num = 0;
+
+ /* split traffic by ipv4-ipv6 */
+ split46_traffic(&trf, trf.ipsec.pkts, n);
+
+ /* process ipv4 packets */
+ inbound_sp_sa(ctx->sp4_ctx, ctx->sa_ctx, &trf.ip4, 0);
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+
+ /* process ipv6 packets */
+ inbound_sp_sa(ctx->sp6_ctx, ctx->sa_ctx, &trf.ip6, 0);
+ route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
+}
+
+static void
+drain_outbound_crypto_queues(const struct lcore_conf *qconf,
+ struct ipsec_ctx *ctx)
+{
+ uint32_t n;
+ struct ipsec_traffic trf;
+
+ /* dequeue packets from crypto-queue */
+ n = ipsec_outbound_cqp_dequeue(ctx, trf.ipsec.pkts,
+ RTE_DIM(trf.ipsec.pkts));
+ if (n == 0)
+ return;
+
+ trf.ip4.num = 0;
+ trf.ip6.num = 0;
+
+ /* split traffic by ipv4-ipv6 */
+ split46_traffic(&trf, trf.ipsec.pkts, n);
+
+ /* process ipv4 packets */
+ route4_pkts(qconf->rt4_ctx, trf.ip4.pkts, trf.ip4.num);
+
+ /* process ipv6 packets */
+ route6_pkts(qconf->rt6_ctx, trf.ip6.pkts, trf.ip6.num);
+}
+
/* main processing loop */
static int32_t
main_loop(__attribute__((unused)) void *dummy)
@@ -848,12 +953,14 @@ main_loop(__attribute__((unused)) void *dummy)
diff_tsc = cur_tsc - prev_tsc;
if (unlikely(diff_tsc > drain_tsc)) {
- drain_buffers(qconf);
+ drain_tx_buffers(qconf);
+ drain_crypto_buffers(qconf);
prev_tsc = cur_tsc;
}
- /* Read packet from RX queues */
for (i = 0; i < qconf->nb_rx_queue; ++i) {
+
+ /* Read packets from RX queues */
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
nb_rx = rte_eth_rx_burst(portid, queueid,
@@ -861,6 +968,14 @@ main_loop(__attribute__((unused)) void *dummy)
if (nb_rx > 0)
process_pkts(qconf, pkts, nb_rx, portid);
+
+ /* dequeue and process completed crypto-ops */
+ if (UNPROTECTED_PORT(portid))
+ drain_inbound_crypto_queues(qconf,
+ &qconf->inbound);
+ else
+ drain_outbound_crypto_queues(qconf,
+ &qconf->outbound);
}
}
}
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 3d415f1a..72a29bcb 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -333,33 +333,35 @@ flow_create_failure:
return 0;
}
+/*
+ * queue crypto-ops into PMD queue.
+ */
+void
+enqueue_cop_burst(struct cdev_qp *cqp)
+{
+ uint32_t i, len, ret;
+
+ len = cqp->len;
+ ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp, cqp->buf, len);
+ if (ret < len) {
+ RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
+ " enqueued %u crypto ops out of %u\n",
+ cqp->id, cqp->qp, ret, len);
+ /* drop packets that we fail to enqueue */
+ for (i = ret; i < len; i++)
+ rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
+ }
+ cqp->in_flight += ret;
+ cqp->len = 0;
+}
+
static inline void
enqueue_cop(struct cdev_qp *cqp, struct rte_crypto_op *cop)
{
- int32_t ret = 0, i;
-
cqp->buf[cqp->len++] = cop;
- if (cqp->len == MAX_PKT_BURST) {
- int enq_size = cqp->len;
- if ((cqp->in_flight + enq_size) > MAX_INFLIGHT)
- enq_size -=
- (int)((cqp->in_flight + enq_size) - MAX_INFLIGHT);
-
- if (enq_size > 0)
- ret = rte_cryptodev_enqueue_burst(cqp->id, cqp->qp,
- cqp->buf, enq_size);
- if (ret < cqp->len) {
- RTE_LOG_DP(DEBUG, IPSEC, "Cryptodev %u queue %u:"
- " enqueued %u crypto ops out of %u\n",
- cqp->id, cqp->qp,
- ret, cqp->len);
- for (i = ret; i < cqp->len; i++)
- rte_pktmbuf_free(cqp->buf[i]->sym->m_src);
- }
- cqp->in_flight += ret;
- cqp->len = 0;
- }
+ if (cqp->len == MAX_PKT_BURST)
+ enqueue_cop_burst(cqp);
}
static inline void
@@ -473,6 +475,32 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
}
}
+static inline int32_t
+ipsec_inline_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
+ struct rte_mbuf *pkts[], uint16_t max_pkts)
+{
+ int32_t nb_pkts, ret;
+ struct ipsec_mbuf_metadata *priv;
+ struct ipsec_sa *sa;
+ struct rte_mbuf *pkt;
+
+ nb_pkts = 0;
+ while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
+ pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
+ rte_prefetch0(pkt);
+ priv = get_priv(pkt);
+ sa = priv->sa;
+ ret = xform_func(pkt, sa, &priv->cop);
+ if (unlikely(ret)) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+ pkts[nb_pkts++] = pkt;
+ }
+
+ return nb_pkts;
+}
+
static inline int
ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
struct rte_mbuf *pkts[], uint16_t max_pkts)
@@ -490,19 +518,6 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
- while (ipsec_ctx->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
- pkt = ipsec_ctx->ol_pkts[--ipsec_ctx->ol_pkts_cnt];
- rte_prefetch0(pkt);
- priv = get_priv(pkt);
- sa = priv->sa;
- ret = xform_func(pkt, sa, &priv->cop);
- if (unlikely(ret)) {
- rte_pktmbuf_free(pkt);
- continue;
- }
- pkts[nb_pkts++] = pkt;
- }
-
if (cqp->in_flight == 0)
continue;
@@ -545,6 +560,13 @@ ipsec_inbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
ipsec_enqueue(esp_inbound, ctx, pkts, sas, nb_pkts);
+ return ipsec_inline_dequeue(esp_inbound_post, ctx, pkts, len);
+}
+
+uint16_t
+ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
+ uint16_t len)
+{
return ipsec_dequeue(esp_inbound_post, ctx, pkts, len);
}
@@ -558,5 +580,12 @@ ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
ipsec_enqueue(esp_outbound, ctx, pkts, sas, nb_pkts);
+ return ipsec_inline_dequeue(esp_outbound_post, ctx, pkts, len);
+}
+
+uint16_t
+ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
+ uint16_t len)
+{
return ipsec_dequeue(esp_outbound_post, ctx, pkts, len);
}
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index c998c807..508d87af 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -182,6 +182,14 @@ uint16_t
ipsec_outbound(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
uint32_t sa_idx[], uint16_t nb_pkts, uint16_t len);
+uint16_t
+ipsec_inbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
+ uint16_t len);
+
+uint16_t
+ipsec_outbound_cqp_dequeue(struct ipsec_ctx *ctx, struct rte_mbuf *pkts[],
+ uint16_t len);
+
static inline uint16_t
ipsec_metadata_size(void)
{
@@ -239,4 +247,7 @@ sa_init(struct socket_ctx *ctx, int32_t socket_id);
void
rt_init(struct socket_ctx *ctx, int32_t socket_id);
+void
+enqueue_cop_burst(struct cdev_qp *cqp);
+
#endif /* __IPSEC_H__ */
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index d2d3550a..640f1d79 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -947,10 +947,15 @@ int
inbound_sa_check(struct sa_ctx *sa_ctx, struct rte_mbuf *m, uint32_t sa_idx)
{
struct ipsec_mbuf_metadata *priv;
+ struct ipsec_sa *sa;
priv = get_priv(m);
+ sa = priv->sa;
+ if (sa != NULL)
+ return (sa_ctx->sa[sa_idx].spi == sa->spi);
- return (sa_ctx->sa[sa_idx].spi == priv->sa->spi);
+ RTE_LOG(ERR, IPSEC, "SA not saved in private data\n");
+ return 0;
}
static inline void
diff --git a/examples/ipsec-secgw/sp4.c b/examples/ipsec-secgw/sp4.c
index 8d3d3d8e..6b05daaa 100644
--- a/examples/ipsec-secgw/sp4.c
+++ b/examples/ipsec-secgw/sp4.c
@@ -44,7 +44,7 @@ enum {
RTE_ACL_IPV4_NUM
};
-struct rte_acl_field_def ip4_defs[NUM_FIELDS_IPV4] = {
+static struct rte_acl_field_def ip4_defs[NUM_FIELDS_IPV4] = {
{
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint8_t),
@@ -85,11 +85,11 @@ struct rte_acl_field_def ip4_defs[NUM_FIELDS_IPV4] = {
RTE_ACL_RULE_DEF(acl4_rules, RTE_DIM(ip4_defs));
-struct acl4_rules acl4_rules_out[MAX_ACL_RULE_NUM];
-uint32_t nb_acl4_rules_out;
+static struct acl4_rules acl4_rules_out[MAX_ACL_RULE_NUM];
+static uint32_t nb_acl4_rules_out;
-struct acl4_rules acl4_rules_in[MAX_ACL_RULE_NUM];
-uint32_t nb_acl4_rules_in;
+static struct acl4_rules acl4_rules_in[MAX_ACL_RULE_NUM];
+static uint32_t nb_acl4_rules_in;
void
parse_sp4_tokens(char **tokens, uint32_t n_tokens,
diff --git a/examples/ipsec-secgw/sp6.c b/examples/ipsec-secgw/sp6.c
index 6002afef..dc5b94c6 100644
--- a/examples/ipsec-secgw/sp6.c
+++ b/examples/ipsec-secgw/sp6.c
@@ -34,7 +34,7 @@ enum {
#define IP6_ADDR_SIZE 16
-struct rte_acl_field_def ip6_defs[IP6_NUM] = {
+static struct rte_acl_field_def ip6_defs[IP6_NUM] = {
{
.type = RTE_ACL_FIELD_TYPE_BITMASK,
.size = sizeof(uint8_t),
@@ -116,11 +116,11 @@ struct rte_acl_field_def ip6_defs[IP6_NUM] = {
RTE_ACL_RULE_DEF(acl6_rules, RTE_DIM(ip6_defs));
-struct acl6_rules acl6_rules_out[MAX_ACL_RULE_NUM];
-uint32_t nb_acl6_rules_out;
+static struct acl6_rules acl6_rules_out[MAX_ACL_RULE_NUM];
+static uint32_t nb_acl6_rules_out;
-struct acl6_rules acl6_rules_in[MAX_ACL_RULE_NUM];
-uint32_t nb_acl6_rules_in;
+static struct acl6_rules acl6_rules_in[MAX_ACL_RULE_NUM];
+static uint32_t nb_acl6_rules_in;
void
parse_sp6_tokens(char **tokens, uint32_t n_tokens,