summaryrefslogtreecommitdiffstats
path: root/src/plugins/dpdk/ipsec
diff options
context:
space:
mode:
Diffstat (limited to 'src/plugins/dpdk/ipsec')
-rw-r--r--src/plugins/dpdk/ipsec/cli.c8
-rw-r--r--src/plugins/dpdk/ipsec/crypto_node.c41
-rw-r--r--src/plugins/dpdk/ipsec/esp_decrypt.c5
-rw-r--r--src/plugins/dpdk/ipsec/esp_encrypt.c5
-rw-r--r--src/plugins/dpdk/ipsec/ipsec.c3
-rw-r--r--src/plugins/dpdk/ipsec/ipsec.h35
6 files changed, 58 insertions, 39 deletions
diff --git a/src/plugins/dpdk/ipsec/cli.c b/src/plugins/dpdk/ipsec/cli.c
index 01ef985af84..a2edc8b96c0 100644
--- a/src/plugins/dpdk/ipsec/cli.c
+++ b/src/plugins/dpdk/ipsec/cli.c
@@ -27,10 +27,10 @@ format_crypto_resource (u8 * s, va_list * args)
crypto_resource_t *res = vec_elt_at_index (dcm->resource, res_idx);
- s =
- format (s, "%U thr_id %3d qp %2u inflight %u\n",
- format_white_space, indent, (i16) res->thread_idx,
- res->qp_id, res->inflights);
+
+ s = format (s, "%U thr_id %3d qp %2u enc_inflight %u, dec_inflights %u\n",
+ format_white_space, indent, (i16) res->thread_idx,
+ res->qp_id, res->inflights[0], res->inflights[1]);
return s;
}
diff --git a/src/plugins/dpdk/ipsec/crypto_node.c b/src/plugins/dpdk/ipsec/crypto_node.c
index 9c22e93beed..7fae7d65581 100644
--- a/src/plugins/dpdk/ipsec/crypto_node.c
+++ b/src/plugins/dpdk/ipsec/crypto_node.c
@@ -127,25 +127,24 @@ dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm,
vlib_node_runtime_t * node, crypto_resource_t * res)
{
u8 numa = rte_socket_id ();
- u32 n_ops, n_deq;
+ u32 n_ops, total_n_deq, n_deq[2];
u32 bis[VLIB_FRAME_SIZE], *bi;
u16 nexts[VLIB_FRAME_SIZE], *next;
struct rte_crypto_op **ops;
+ n_deq[0] = 0;
+ n_deq[1] = 0;
bi = bis;
next = nexts;
ops = cwm->ops;
- n_ops = n_deq = rte_cryptodev_dequeue_burst (res->dev_id,
- res->qp_id,
- ops, VLIB_FRAME_SIZE);
-
+ n_ops = total_n_deq = rte_cryptodev_dequeue_burst (res->dev_id,
+ res->qp_id,
+ ops, VLIB_FRAME_SIZE);
/* no op dequeued, do not proceed */
- if (n_deq == 0)
+ if (n_ops == 0)
return 0;
- res->inflights -= n_ops;
-
while (n_ops >= 4)
{
struct rte_crypto_op *op0, *op1, *op2, *op3;
@@ -183,6 +182,11 @@ dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm,
bi[2] = crypto_op_get_priv (op2)->bi;
bi[3] = crypto_op_get_priv (op3)->bi;
+ n_deq[crypto_op_get_priv (op0)->encrypt] += 1;
+ n_deq[crypto_op_get_priv (op1)->encrypt] += 1;
+ n_deq[crypto_op_get_priv (op2)->encrypt] += 1;
+ n_deq[crypto_op_get_priv (op3)->encrypt] += 1;
+
dpdk_crypto_input_check_op (vm, node, op0, next + 0);
dpdk_crypto_input_check_op (vm, node, op1, next + 1);
dpdk_crypto_input_check_op (vm, node, op2, next + 2);
@@ -208,6 +212,8 @@ dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm,
next[0] = crypto_op_get_priv (op0)->next;
bi[0] = crypto_op_get_priv (op0)->bi;
+ n_deq[crypto_op_get_priv (op0)->encrypt] += 1;
+
dpdk_crypto_input_check_op (vm, node, op0, next + 0);
op0->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
@@ -220,15 +226,18 @@ dpdk_crypto_dequeue (vlib_main_t * vm, crypto_worker_main_t * cwm,
}
vlib_node_increment_counter (vm, node->node_index,
- DPDK_CRYPTO_INPUT_ERROR_DQ_COPS, n_deq);
+ DPDK_CRYPTO_INPUT_ERROR_DQ_COPS, total_n_deq);
- vlib_buffer_enqueue_to_next (vm, node, bis, nexts, n_deq);
+ res->inflights[0] -= n_deq[0];
+ res->inflights[1] -= n_deq[1];
- dpdk_crypto_input_trace (vm, node, res->dev_id, bis, nexts, n_deq);
+ vlib_buffer_enqueue_to_next (vm, node, bis, nexts, total_n_deq);
- crypto_free_ops (numa, cwm->ops, n_deq);
+ dpdk_crypto_input_trace (vm, node, res->dev_id, bis, nexts, total_n_deq);
- return n_deq;
+ crypto_free_ops (numa, cwm->ops, total_n_deq);
+
+ return total_n_deq;
}
static_always_inline uword
@@ -246,11 +255,13 @@ dpdk_crypto_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
vec_foreach (res_idx, cwm->resource_idx)
{
res = vec_elt_at_index (dcm->resource, res_idx[0]);
+ u32 inflights = res->inflights[0] + res->inflights[1];
- if (res->inflights)
+ if (inflights)
n_deq += dpdk_crypto_dequeue (vm, cwm, node, res);
- if (PREDICT_FALSE (res->remove && !(res->inflights)))
+ inflights = res->inflights[0] + res->inflights[1];
+ if (PREDICT_FALSE (res->remove && !(inflights)))
vec_add1 (remove, res_idx[0]);
}
/* *INDENT-ON* */
diff --git a/src/plugins/dpdk/ipsec/esp_decrypt.c b/src/plugins/dpdk/ipsec/esp_decrypt.c
index 10bfe98cf2c..4982db7ee6d 100644
--- a/src/plugins/dpdk/ipsec/esp_decrypt.c
+++ b/src/plugins/dpdk/ipsec/esp_decrypt.c
@@ -168,6 +168,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm,
dpdk_op_priv_t *priv = crypto_op_get_priv (op);
/* store bi in op private */
priv->bi = bi0;
+ priv->encrypt = 0;
u16 op_len =
sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]);
@@ -372,7 +373,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm,
from_frame->n_vectors);
crypto_enqueue_ops (vm, cwm, dpdk_esp6_decrypt_node.index,
- ESP_DECRYPT_ERROR_ENQ_FAIL, numa);
+ ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ );
}
else
{
@@ -381,7 +382,7 @@ dpdk_esp_decrypt_inline (vlib_main_t * vm,
from_frame->n_vectors);
crypto_enqueue_ops (vm, cwm, dpdk_esp4_decrypt_node.index,
- ESP_DECRYPT_ERROR_ENQ_FAIL, numa);
+ ESP_DECRYPT_ERROR_ENQ_FAIL, numa, 0 /* encrypt */ );
}
crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops);
diff --git a/src/plugins/dpdk/ipsec/esp_encrypt.c b/src/plugins/dpdk/ipsec/esp_encrypt.c
index 2ff7e41c513..cd751d34e6b 100644
--- a/src/plugins/dpdk/ipsec/esp_encrypt.c
+++ b/src/plugins/dpdk/ipsec/esp_encrypt.c
@@ -207,6 +207,7 @@ dpdk_esp_encrypt_inline (vlib_main_t * vm,
dpdk_op_priv_t *priv = crypto_op_get_priv (op);
/* store bi in op private */
priv->bi = bi0;
+ priv->encrypt = 1;
u16 op_len =
sizeof (op[0]) + sizeof (op[0].sym[0]) + sizeof (priv[0]);
@@ -583,7 +584,7 @@ dpdk_esp_encrypt_inline (vlib_main_t * vm,
from_frame->n_vectors);
crypto_enqueue_ops (vm, cwm, dpdk_esp6_encrypt_node.index,
- ESP_ENCRYPT_ERROR_ENQ_FAIL, numa);
+ ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ );
}
else
{
@@ -592,7 +593,7 @@ dpdk_esp_encrypt_inline (vlib_main_t * vm,
from_frame->n_vectors);
crypto_enqueue_ops (vm, cwm, dpdk_esp4_encrypt_node.index,
- ESP_ENCRYPT_ERROR_ENQ_FAIL, numa);
+ ESP_ENCRYPT_ERROR_ENQ_FAIL, numa, 1 /* encrypt */ );
}
crypto_free_ops (numa, ops, cwm->ops + from_frame->n_vectors - ops);
diff --git a/src/plugins/dpdk/ipsec/ipsec.c b/src/plugins/dpdk/ipsec/ipsec.c
index 88fd75dcf1a..93efc6bcf7e 100644
--- a/src/plugins/dpdk/ipsec/ipsec.c
+++ b/src/plugins/dpdk/ipsec/ipsec.c
@@ -639,9 +639,6 @@ crypto_parse_capabilities (crypto_dev_t * dev,
}
}
-#define DPDK_CRYPTO_N_QUEUE_DESC 2048
-#define DPDK_CRYPTO_NB_SESS_OBJS 20000
-
static clib_error_t *
crypto_dev_conf (u8 dev, u16 n_qp, u8 numa)
{
diff --git a/src/plugins/dpdk/ipsec/ipsec.h b/src/plugins/dpdk/ipsec/ipsec.h
index 4866142630b..572845927c8 100644
--- a/src/plugins/dpdk/ipsec/ipsec.h
+++ b/src/plugins/dpdk/ipsec/ipsec.h
@@ -30,6 +30,9 @@
#define always_inline static inline __attribute__ ((__always_inline__))
#endif
+#define DPDK_CRYPTO_N_QUEUE_DESC 2048
+#define DPDK_CRYPTO_NB_SESS_OBJS 20000
+
#define foreach_dpdk_crypto_input_next \
_(DROP, "error-drop") \
_(IP4_LOOKUP, "ip4-lookup") \
@@ -59,9 +62,11 @@ typedef struct
{
u32 next;
u32 bi;
- dpdk_gcm_cnt_blk cb __attribute__ ((aligned (16)));
+ u8 encrypt;
+ CLIB_ALIGN_MARK (mark0, 16);
+ dpdk_gcm_cnt_blk cb;
u8 aad[16];
- u8 icv[32];
+ u8 icv[32]; /* XXX last 16B in next cache line */
} dpdk_op_priv_t;
typedef struct
@@ -70,8 +75,8 @@ typedef struct
struct rte_crypto_op **ops;
u16 cipher_resource_idx[IPSEC_CRYPTO_N_ALG];
u16 auth_resource_idx[IPSEC_INTEG_N_ALG];
- CLIB_CACHE_LINE_ALIGN_MARK (pad);
-} crypto_worker_main_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+} crypto_worker_main_t;
typedef struct
{
@@ -115,12 +120,13 @@ typedef struct
u8 dev_id;
u8 numa;
u16 qp_id;
- u16 inflights;
+ u16 inflights[2];
u16 n_ops;
u16 __unused;
struct rte_crypto_op *ops[VLIB_FRAME_SIZE];
u32 bi[VLIB_FRAME_SIZE];
-} crypto_resource_t __attribute__ ((aligned (CLIB_CACHE_LINE_BYTES)));
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+} crypto_resource_t;
typedef struct
{
@@ -130,15 +136,13 @@ typedef struct
typedef struct
{
- CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */
struct rte_cryptodev_sym_session *session;
u64 dev_mask;
+ CLIB_ALIGN_MARK (pad, 16); /* align up to 16 bytes for 32bit builds */
} crypto_session_by_drv_t;
typedef struct
{
- /* Required for vec_validate_aligned */
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
struct rte_mempool *crypto_op;
struct rte_mempool *session_h;
struct rte_mempool **session_drv;
@@ -149,6 +153,8 @@ typedef struct
u64 *session_drv_failed;
crypto_session_by_drv_t *session_by_drv_id_and_sa_index;
clib_spinlock_t lockp;
+ /* Required for vec_validate_aligned */
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
} crypto_data_t;
typedef struct
@@ -303,7 +309,7 @@ crypto_free_ops (u8 numa, struct rte_crypto_op **ops, u32 n)
static_always_inline void
crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm,
- u32 node_index, u32 error, u8 numa)
+ u32 node_index, u32 error, u8 numa, u8 encrypt)
{
dpdk_crypto_main_t *dcm = &dpdk_crypto_main;
crypto_resource_t *res;
@@ -312,15 +318,18 @@ crypto_enqueue_ops (vlib_main_t * vm, crypto_worker_main_t * cwm,
/* *INDENT-OFF* */
vec_foreach (res_idx, cwm->resource_idx)
{
- u16 enq;
+ u16 enq, n_ops;
res = vec_elt_at_index (dcm->resource, res_idx[0]);
if (!res->n_ops)
continue;
+ n_ops = (DPDK_CRYPTO_N_QUEUE_DESC / 2) - res->inflights[encrypt];
+ n_ops = res->n_ops < n_ops ? res->n_ops : n_ops;
enq = rte_cryptodev_enqueue_burst (res->dev_id, res->qp_id,
- res->ops, res->n_ops);
- res->inflights += enq;
+ res->ops, n_ops);
+ ASSERT (n_ops == enq);
+ res->inflights[encrypt] += enq;
if (PREDICT_FALSE (enq < res->n_ops))
{