diff options
author | Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com> | 2017-08-26 15:22:05 +0100 |
---|---|---|
committer | Damjan Marion <dmarion.lists@gmail.com> | 2017-10-05 09:54:34 +0000 |
commit | db93cd971320301eb21403caabada7a3ec6a4cce (patch) | |
tree | 01c57f4f476a97805411a74d665eedb72331f91e /src/plugins/dpdk/ipsec/ipsec.c | |
parent | 7939f904600018aeed9d8cc9d19ca37c7e96f3d1 (diff) |
dpdk/ipsec: rework plus improved cli commands
This patch reworks the DPDK ipsec implementation including the cryptodev
management as well as replacing new cli commands for better usability.
For the data path:
- The dpdk-esp-encrypt-post node is not necessary anymore.
- IPv4 packets in the decrypt path are sent to ip4-input-no-checksum instead
of ip4-input.
The DPDK cryptodev cli commands are replaced by the following new commands:
- show dpdk crypto devices
- show dpdk crypto placement [verbose]
- set dpdk crypto placement (<device> <thread> | auto)
- clear dpdk crypto placement <device> [<thread>]
- show dpdk crypto pools
Change-Id: I47324517ede82d3e6e0e9f9c71c1a3433714b27b
Signed-off-by: Sergio Gonzalez Monroy <sergio.gonzalez.monroy@intel.com>
Diffstat (limited to 'src/plugins/dpdk/ipsec/ipsec.c')
-rw-r--r-- | src/plugins/dpdk/ipsec/ipsec.c | 1269 |
1 files changed, 940 insertions, 329 deletions
diff --git a/src/plugins/dpdk/ipsec/ipsec.c b/src/plugins/dpdk/ipsec/ipsec.c index c9fce3d8392..2fd331c1ccd 100644 --- a/src/plugins/dpdk/ipsec/ipsec.c +++ b/src/plugins/dpdk/ipsec/ipsec.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 2016 Intel and/or its affiliates. + * Copyright (c) 2017 Intel and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: @@ -20,426 +20,1058 @@ #include <dpdk/device/dpdk.h> #include <dpdk/ipsec/ipsec.h> -#include <dpdk/ipsec/esp.h> -#define DPDK_CRYPTO_NB_SESS_OBJS 20000 -#define DPDK_CRYPTO_CACHE_SIZE 512 -#define DPDK_CRYPTO_PRIV_SIZE 128 -#define DPDK_CRYPTO_N_QUEUE_DESC 1024 -#define DPDK_CRYPTO_NB_COPS (1024 * 4) +#define EMPTY_STRUCT {0} -static int -add_del_sa_sess (u32 sa_index, u8 is_add) +static void +algos_init (u32 n_mains) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - crypto_worker_main_t *cwm; - u8 skip_master = vlib_num_workers () > 0; + crypto_alg_t *a; + + vec_validate_aligned (dcm->cipher_algs, IPSEC_CRYPTO_N_ALG - 1, 8); + + { +#define _(v,f,str) \ + dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].name = str; \ + dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].disabled = n_mains; + foreach_ipsec_crypto_alg +#undef _ + } + + /* Minimum boundary for ciphers is 4B, required by ESP */ + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_NONE]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_NULL; + a->boundary = 4; /* 1 */ + a->key_len = 0; + a->iv_len = 0; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_128]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CBC; + a->boundary = 16; + a->key_len = 16; + a->iv_len = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_192]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CBC; + a->boundary = 16; + a->key_len = 24; + a->iv_len = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_256]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CBC; + a->boundary = 16; + a->key_len = 32; + a->iv_len = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_128]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CTR; + a->boundary = 4; /* 1 */ + a->key_len = 16; + a->iv_len = 8; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_192]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CTR; + a->boundary = 4; /* 1 */ + a->key_len = 24; + a->iv_len = 8; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_256]; + a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + a->alg = RTE_CRYPTO_CIPHER_AES_CTR; + a->boundary = 4; /* 1 */ + a->key_len = 32; + a->iv_len = 8; - /* *INDENT-OFF* */ - vec_foreach (cwm, dcm->workers_main) - { - crypto_sa_session_t *sa_sess; - u8 is_outbound; +#if DPDK_NO_AEAD +#define AES_GCM_TYPE RTE_CRYPTO_SYM_XFORM_CIPHER +#define AES_GCM_ALG RTE_CRYPTO_CIPHER_AES_GCM +#else +#define AES_GCM_TYPE RTE_CRYPTO_SYM_XFORM_AEAD +#define AES_GCM_ALG RTE_CRYPTO_AEAD_AES_GCM +#endif - if (skip_master) - { - skip_master = 0; - continue; - } + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_128]; + a->type = AES_GCM_TYPE; + a->alg = AES_GCM_ALG; + a->boundary = 4; /* 1 */ + a->key_len = 16; + a->iv_len = 8; + a->trunc_size = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_192]; + a->type = AES_GCM_TYPE; + a->alg = AES_GCM_ALG; + a->boundary = 4; /* 1 */ + a->key_len = 24; + a->iv_len = 8; + a->trunc_size = 16; + + a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_256]; + a->type = AES_GCM_TYPE; + a->alg = AES_GCM_ALG; + a->boundary = 4; /* 1 */ + a->key_len = 32; + a->iv_len = 8; + a->trunc_size = 16; + + vec_validate (dcm->auth_algs, IPSEC_INTEG_N_ALG - 1); + + { +#define _(v,f,str) \ + dcm->auth_algs[IPSEC_INTEG_ALG_##f].name = str; \ + dcm->auth_algs[IPSEC_INTEG_ALG_##f].disabled = n_mains; + foreach_ipsec_integ_alg +#undef _ + } + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_NONE]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_NULL; + a->key_len = 0; + a->trunc_size = 0; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_MD5_96]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_MD5_HMAC; + a->key_len = 16; + a->trunc_size = 12; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA1_96]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA1_HMAC; + a->key_len = 20; + a->trunc_size = 12; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_96]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC; + a->key_len = 32; + a->trunc_size = 12; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_128]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC; + a->key_len = 32; + a->trunc_size = 16; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_384_192]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA384_HMAC; + a->key_len = 48; + a->trunc_size = 24; + + a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_512_256]; + a->type = RTE_CRYPTO_SYM_XFORM_AUTH; + a->alg = RTE_CRYPTO_AUTH_SHA512_HMAC; + a->key_len = 64; + a->trunc_size = 32; +} - for (is_outbound = 0; is_outbound < 2; is_outbound++) - { - if (is_add) - { - pool_get (cwm->sa_sess_d[is_outbound], sa_sess); - } - else - { - u8 dev_id; - i32 ret; +static u8 +cipher_alg_index (const crypto_alg_t * alg) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - sa_sess = pool_elt_at_index (cwm->sa_sess_d[is_outbound], sa_index); - dev_id = cwm->qp_data[sa_sess->qp_index].dev_id; + return (alg - dcm->cipher_algs); +} - if (!sa_sess->sess) - continue; -#if DPDK_NO_AEAD - ret = (rte_cryptodev_sym_session_free(dev_id, sa_sess->sess) == NULL); - ASSERT (ret); -#else - ret = rte_cryptodev_sym_session_clear(dev_id, sa_sess->sess); - ASSERT (!ret); +static u8 +auth_alg_index (const crypto_alg_t * alg) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + return (alg - dcm->auth_algs); +} + +static crypto_alg_t * +cipher_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 key_len) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *alg; + + if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) + return NULL; - ret = rte_cryptodev_sym_session_free(sa_sess->sess); - ASSERT (!ret); + /* *INDENT-OFF* */ + vec_foreach (alg, dcm->cipher_algs) + { + if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (alg->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && + (cap->sym.cipher.algo == alg->alg) && + (alg->key_len == key_len)) + return alg; +#if ! DPDK_NO_AEAD + if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) && + (alg->type == RTE_CRYPTO_SYM_XFORM_AEAD) && + (cap->sym.aead.algo == alg->alg) && + (alg->key_len == key_len)) + return alg; #endif - memset(sa_sess, 0, sizeof(sa_sess[0])); - } - } } - /* *INDENT-OFF* */ + /* *INDENT-ON* */ - return 0; + return NULL; } -static void -update_qp_data (crypto_worker_main_t * cwm, - u8 cdev_id, u16 qp_id, u8 is_outbound, u16 * idx) +static crypto_alg_t * +auth_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 trunc_size) { - crypto_qp_data_t *qpd; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *alg; + + if ((cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) || + (cap->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH)) + return NULL; /* *INDENT-OFF* */ - vec_foreach_index (*idx, cwm->qp_data) + vec_foreach (alg, dcm->auth_algs) { - qpd = vec_elt_at_index(cwm->qp_data, *idx); - - if (qpd->dev_id == cdev_id && qpd->qp_id == qp_id && - qpd->is_outbound == is_outbound) - return; + if ((cap->sym.auth.algo == alg->alg) && + (alg->trunc_size == trunc_size)) + return alg; } /* *INDENT-ON* */ - vec_add2_aligned (cwm->qp_data, qpd, 1, CLIB_CACHE_LINE_BYTES); + return NULL; +} + +#if ! DPDK_NO_AEAD +static void +crypto_set_aead_xform (struct rte_crypto_sym_xform *xform, + ipsec_sa_t * sa, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *c; + + c = vec_elt_at_index (dcm->cipher_algs, sa->crypto_alg); + + ASSERT (c->type == RTE_CRYPTO_SYM_XFORM_AEAD); + + xform->type = RTE_CRYPTO_SYM_XFORM_AEAD; + xform->aead.algo = c->alg; + xform->aead.key.data = sa->crypto_key; + xform->aead.key.length = c->key_len; + xform->aead.iv.offset = + crypto_op_get_priv_offset () + offsetof (dpdk_op_priv_t, cb); + xform->aead.iv.length = 12; + xform->aead.digest_length = c->trunc_size; + xform->aead.aad_length = sa->use_esn ? 12 : 8; + xform->next = NULL; - qpd->dev_id = cdev_id; - qpd->qp_id = qp_id; - qpd->is_outbound = is_outbound; + if (is_outbound) + xform->aead.op = RTE_CRYPTO_AEAD_OP_ENCRYPT; + else + xform->aead.op = RTE_CRYPTO_AEAD_OP_DECRYPT; } +#endif -/* - * return: - * 0: already exist - * 1: mapped - */ -static int -add_mapping (crypto_worker_main_t * cwm, - u8 cdev_id, u16 qp, u8 is_outbound, - const struct rte_cryptodev_capabilities *cipher_cap, - const struct rte_cryptodev_capabilities *auth_cap) +static void +crypto_set_cipher_xform (struct rte_crypto_sym_xform *xform, + ipsec_sa_t * sa, u8 is_outbound) { - u16 qp_index; - uword key = 0, data, *ret; - crypto_worker_qp_key_t *p_key = (crypto_worker_qp_key_t *) & key; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *c; - p_key->cipher_algo = (u8) cipher_cap->sym.cipher.algo; - p_key->auth_algo = (u8) auth_cap->sym.auth.algo; - p_key->is_outbound = is_outbound; + c = vec_elt_at_index (dcm->cipher_algs, sa->crypto_alg); + + ASSERT (c->type == RTE_CRYPTO_SYM_XFORM_CIPHER); + + xform->type = RTE_CRYPTO_SYM_XFORM_CIPHER; + xform->cipher.algo = c->alg; + xform->cipher.key.data = sa->crypto_key; + xform->cipher.key.length = c->key_len; #if ! DPDK_NO_AEAD - p_key->is_aead = cipher_cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD; + xform->cipher.iv.offset = + crypto_op_get_priv_offset () + offsetof (dpdk_op_priv_t, cb); + xform->cipher.iv.length = c->iv_len; #endif + xform->next = NULL; - ret = hash_get (cwm->algo_qp_map, key); - if (ret) - return 0; + if (is_outbound) + xform->cipher.op = RTE_CRYPTO_CIPHER_OP_ENCRYPT; + else + xform->cipher.op = RTE_CRYPTO_CIPHER_OP_DECRYPT; +} - update_qp_data (cwm, cdev_id, qp, is_outbound, &qp_index); +static void +crypto_set_auth_xform (struct rte_crypto_sym_xform *xform, + ipsec_sa_t * sa, u8 is_outbound) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *a; + + a = vec_elt_at_index (dcm->auth_algs, sa->integ_alg); - data = (uword) qp_index; - hash_set (cwm->algo_qp_map, key, data); + ASSERT (a->type == RTE_CRYPTO_SYM_XFORM_AUTH); + + xform->type = RTE_CRYPTO_SYM_XFORM_AUTH; + xform->auth.algo = a->alg; + xform->auth.key.data = sa->integ_key; + xform->auth.key.length = a->key_len; + xform->auth.digest_length = a->trunc_size; +#if DPDK_NO_AEAD + if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128 | + sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192 | + sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256) + xform->auth.algo = RTE_CRYPTO_AUTH_AES_GCM; + xform->auth.add_auth_data_length = sa->use_esn ? 12 : 8; +#else +#if 0 + xform->auth.iv.offset = + sizeof (struct rte_crypto_op) + sizeof (struct rte_crypto_sym_op) + + offsetof (dpdk_op_priv_t, cb); + xform->auth.iv.length = a->iv_len; +#endif +#endif + xform->next = NULL; - return 1; + if (is_outbound) + xform->auth.op = RTE_CRYPTO_AUTH_OP_GENERATE; + else + xform->auth.op = RTE_CRYPTO_AUTH_OP_VERIFY; } -/* - * return: - * 0: already exist - * 1: mapped - */ -static int -add_cdev_mapping (crypto_worker_main_t * cwm, - struct rte_cryptodev_info *dev_info, u8 cdev_id, - u16 qp, u8 is_outbound) +clib_error_t * +create_sym_session (struct rte_cryptodev_sym_session **session, + u32 sa_idx, + crypto_resource_t * res, + crypto_worker_main_t * cwm, u8 is_outbound) { - const struct rte_cryptodev_capabilities *i, *j; - u32 mapped = 0; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + ipsec_main_t *im = &ipsec_main; + crypto_data_t *data; + ipsec_sa_t *sa; + struct rte_crypto_sym_xform cipher_xform = { 0 }; + struct rte_crypto_sym_xform auth_xform = { 0 }; + struct rte_crypto_sym_xform *xfs; + crypto_session_key_t key = { 0 }; + + key.drv_id = res->drv_id; + key.sa_idx = sa_idx; + + sa = pool_elt_at_index (im->sad, sa_idx); - for (i = dev_info->capabilities; i->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; i++) - { #if ! DPDK_NO_AEAD - if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) + if ((sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) | + (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) | + (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)) + { + crypto_set_aead_xform (&cipher_xform, sa, is_outbound); + xfs = &cipher_xform; + } + else +#endif /* ! DPDK_NO_AEAD */ + { + crypto_set_cipher_xform (&cipher_xform, sa, is_outbound); + crypto_set_auth_xform (&auth_xform, sa, is_outbound); + + if (is_outbound) + { + cipher_xform.next = &auth_xform; + xfs = &cipher_xform; + } + else { - struct rte_cryptodev_capabilities none = { 0 }; + auth_xform.next = &cipher_xform; + xfs = &auth_xform; + } + } - if (check_algo_is_supported (i, NULL) != 0) - continue; + data = vec_elt_at_index (dcm->data, res->numa); + +#if DPDK_NO_AEAD + /* + * DPDK_VER <= 1705: + * Each worker/thread has its own session per device driver + */ + session[0] = rte_cryptodev_sym_session_create (res->dev_id, xfs); + if (!session[0]) + { + data->session_drv_failed[res->drv_id] += 1; + return clib_error_return (0, "failed to create session for dev %u", + res->dev_id); + } +#else + /* + * DPDK_VER >= 1708: + * Multiple worker/threads share the session for an SA + * Single session per SA, initialized for each device driver + */ + session[0] = (void *) hash_get (data->session_by_sa_index, sa_idx); + + if (!session[0]) + { + session[0] = rte_cryptodev_sym_session_create (data->session_h); + if (!session[0]) + { + data->session_h_failed += 1; + return clib_error_return (0, "failed to create session header"); + } + hash_set (data->session_by_sa_index, sa_idx, session[0]); + } - none.sym.auth.algo = RTE_CRYPTO_AUTH_NULL; + struct rte_mempool **mp; + mp = vec_elt_at_index (data->session_drv, res->drv_id); + ASSERT (mp[0] != NULL); - mapped |= add_mapping (cwm, cdev_id, qp, is_outbound, i, &none); - continue; + i32 ret = + rte_cryptodev_sym_session_init (res->dev_id, session[0], xfs, mp[0]); + if (ret) + { + data->session_drv_failed[res->drv_id] += 1; + return clib_error_return (0, "failed to init session for drv %u", + res->drv_id); + } +#endif /* DPDK_NO_AEAD */ + + hash_set (cwm->session_by_drv_id_and_sa_index, key.val, session[0]); + + return 0; +} + +static void __attribute__ ((unused)) clear_and_free_obj (void *obj) +{ + struct rte_mempool *mp = rte_mempool_from_obj (obj); + + memset (obj, 0, mp->elt_size); + + rte_mempool_put (mp, obj); +} + +#if ! DPDK_NO_AEAD +/* This is from rte_cryptodev_pmd.h */ +static inline void * +get_session_private_data (const struct rte_cryptodev_sym_session *sess, + uint8_t driver_id) +{ + return sess->sess_private_data[driver_id]; +} + +/* This is from rte_cryptodev_pmd.h */ +static inline void +set_session_private_data (struct rte_cryptodev_sym_session *sess, + uint8_t driver_id, void *private_data) +{ + sess->sess_private_data[driver_id] = private_data; +} +#endif + +static clib_error_t * +add_del_sa_session (u32 sa_index, u8 is_add) +{ + ipsec_main_t *im = &ipsec_main; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_worker_main_t *cwm; + struct rte_cryptodev_sym_session *s; + crypto_session_key_t key = { 0 }; + uword *val; + u32 drv_id; + i32 ret; + + key.sa_idx = sa_index; + + if (is_add) + { +#if 1 + ipsec_sa_t *sa = pool_elt_at_index (im->sad, sa_index); + u32 seed; + switch (sa->crypto_alg) + { + case IPSEC_CRYPTO_ALG_AES_GCM_128: + case IPSEC_CRYPTO_ALG_AES_GCM_192: + case IPSEC_CRYPTO_ALG_AES_GCM_256: + clib_memcpy (&sa->salt, &sa->crypto_key[sa->crypto_key_len - 4], 4); + break; + default: + seed = (u32) clib_cpu_time_now (); + sa->salt = random_u32 (&seed); } #endif - if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER) - continue; + return 0; + } - if (check_algo_is_supported (i, NULL) != 0) - continue; + /* XXX Wait N cycles to be sure session is not in use OR + * keep refcnt at SA level per worker/thread ? */ + unix_sleep (0.2); - for (j = dev_info->capabilities; j->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; - j++) + /* *INDENT-OFF* */ + vec_foreach (cwm, dcm->workers_main) + { + for (drv_id = 0; drv_id < dcm->max_drv_id; drv_id++) { - if (j->sym.xform_type != RTE_CRYPTO_SYM_XFORM_AUTH) - continue; + key.drv_id = drv_id; + val = hash_get (cwm->session_by_drv_id_and_sa_index, key.val); + s = (struct rte_cryptodev_sym_session *) val; - if (check_algo_is_supported (j, NULL) != 0) + if (!s) continue; - mapped |= add_mapping (cwm, cdev_id, qp, is_outbound, i, j); +#if DPDK_NO_AEAD + ret = (rte_cryptodev_sym_session_free (s->dev_id, s) == NULL); + ASSERT (ret); +#endif + hash_unset (cwm->session_by_drv_id_and_sa_index, key.val); } } + /* *INDENT-ON* */ - return mapped; -} +#if ! DPDK_NO_AEAD + crypto_data_t *data; + /* *INDENT-OFF* */ + vec_foreach (data, dcm->data) + { + val = hash_get (data->session_by_sa_index, sa_index); + s = (struct rte_cryptodev_sym_session *) val; -static int -check_cryptodev_queues () -{ - u32 n_qs = 0; - u8 cdev_id; - u32 n_req_qs = 2; + if (!s) + continue; - if (vlib_num_workers () > 0) - n_req_qs = vlib_num_workers () * 2; + hash_unset (data->session_by_sa_index, sa_index); - for (cdev_id = 0; cdev_id < rte_cryptodev_count (); cdev_id++) - { - struct rte_cryptodev_info cdev_info; + void *drv_session; + vec_foreach_index (drv_id, dcm->drv) + { + drv_session = get_session_private_data (s, drv_id); + if (!drv_session) + continue; - rte_cryptodev_info_get (cdev_id, &cdev_info); + /* + * Custom clear to avoid finding a dev_id for drv_id: + * ret = rte_cryptodev_sym_session_clear (dev_id, drv_session); + * ASSERT (!ret); + */ + clear_and_free_obj (drv_session); - if (! - (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) - continue; + set_session_private_data (s, drv_id, NULL); + } - n_qs += cdev_info.max_nb_queue_pairs; + ret = rte_cryptodev_sym_session_free(s); + ASSERT (!ret); } + /* *INDENT-ON* */ +#endif - if (n_qs >= n_req_qs) - return 0; - else - return -1; + return 0; } static clib_error_t * dpdk_ipsec_check_support (ipsec_sa_t * sa) { - if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + + if (sa->integ_alg == IPSEC_INTEG_ALG_NONE) + switch (sa->crypto_alg) + { + case IPSEC_CRYPTO_ALG_AES_GCM_128: + case IPSEC_CRYPTO_ALG_AES_GCM_192: + case IPSEC_CRYPTO_ALG_AES_GCM_256: + break; + default: + return clib_error_return (0, "unsupported integ-alg %U crypto-alg %U", + format_ipsec_integ_alg, sa->integ_alg, + format_ipsec_crypto_alg, sa->crypto_alg); + } + + /* XXX do we need the NONE check? */ + if (sa->crypto_alg != IPSEC_CRYPTO_ALG_NONE && + dcm->cipher_algs[sa->crypto_alg].disabled) + return clib_error_return (0, "disabled crypto-alg %U", + format_ipsec_crypto_alg, sa->crypto_alg); + + /* XXX do we need the NONE check? */ + if (sa->integ_alg != IPSEC_INTEG_ALG_NONE && + dcm->auth_algs[sa->integ_alg].disabled) + return clib_error_return (0, "disabled integ-alg %U", + format_ipsec_integ_alg, sa->integ_alg); + return NULL; +} + +static void +crypto_parse_capabilities (crypto_dev_t * dev, + const struct rte_cryptodev_capabilities *cap, + u32 n_mains) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_alg_t *alg; + u8 len, inc; + + for (; cap->op != RTE_CRYPTO_OP_TYPE_UNDEFINED; cap++) { - if (sa->integ_alg != IPSEC_INTEG_ALG_NONE) - return clib_error_return (0, "unsupported integ-alg %U with " - "crypto-alg aes-gcm-128", - format_ipsec_integ_alg, sa->integ_alg); -#if DPDK_NO_AEAD - sa->integ_alg = IPSEC_INTEG_ALG_AES_GCM_128; + /* A single capability maps to multiple cipher/auth algorithms */ + switch (cap->sym.xform_type) + { +#if ! DPDK_NO_AEAD + case RTE_CRYPTO_SYM_XFORM_AEAD: #endif + case RTE_CRYPTO_SYM_XFORM_CIPHER: + inc = cap->sym.cipher.key_size.increment; + inc = inc ? inc : 1; + for (len = cap->sym.cipher.key_size.min; + len <= cap->sym.cipher.key_size.max; len += inc) + { + alg = cipher_cap_to_alg (cap, len); + if (!alg) + continue; + dev->cipher_support[cipher_alg_index (alg)] = 1; + alg->resources += vec_len (dev->free_resources); + /* At least enough resources to support one algo */ + dcm->enabled |= (alg->resources >= n_mains); + } + break; + case RTE_CRYPTO_SYM_XFORM_AUTH: + inc = cap->sym.auth.digest_size.increment; + inc = inc ? inc : 1; + for (len = cap->sym.auth.digest_size.min; + len <= cap->sym.auth.digest_size.max; len += inc) + { + alg = auth_cap_to_alg (cap, len); + if (!alg) + continue; + dev->auth_support[auth_alg_index (alg)] = 1; + alg->resources += vec_len (dev->free_resources); + /* At least enough resources to support one algo */ + dcm->enabled |= (alg->resources >= n_mains); + } + break; + default: + ; + } } +} + +#define DPDK_CRYPTO_N_QUEUE_DESC 2048 +#define DPDK_CRYPTO_NB_SESS_OBJS 20000 + +static clib_error_t * +crypto_dev_conf (u8 dev, u16 n_qp, u8 numa) +{ + struct rte_cryptodev_config dev_conf; + struct rte_cryptodev_qp_conf qp_conf; + i32 ret; + u16 qp; + i8 *error_str; + + dev_conf.socket_id = numa; + dev_conf.nb_queue_pairs = n_qp; +#if DPDK_NO_AEAD + dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_SESS_OBJS; + dev_conf.session_mp.cache_size = 512; +#endif + + error_str = "failed to configure crypto device %u"; + ret = rte_cryptodev_configure (dev, &dev_conf); + if (ret < 0) + return clib_error_return (0, error_str, dev); + + error_str = "failed to setup crypto device %u queue pair %u"; + qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC; + for (qp = 0; qp < n_qp; qp++) + { #if DPDK_NO_AEAD - else if (sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE || - sa->integ_alg == IPSEC_INTEG_ALG_NONE || - sa->integ_alg == IPSEC_INTEG_ALG_AES_GCM_128) + ret = rte_cryptodev_queue_pair_setup (dev, qp, &qp_conf, numa); #else - else if (sa->integ_alg == IPSEC_INTEG_ALG_NONE) + ret = rte_cryptodev_queue_pair_setup (dev, qp, &qp_conf, numa, NULL); #endif - return clib_error_return (0, - "unsupported integ-alg %U with crypto-alg %U", - format_ipsec_integ_alg, sa->integ_alg, - format_ipsec_crypto_alg, sa->crypto_alg); + if (ret < 0) + return clib_error_return (0, error_str, dev, qp); + } return 0; } -static uword -dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, - vlib_frame_t * f) +static void +crypto_scan_devs (u32 n_mains) { - ipsec_main_t *im = &ipsec_main; dpdk_crypto_main_t *dcm = &dpdk_crypto_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); - struct rte_cryptodev_config dev_conf; - struct rte_cryptodev_qp_conf qp_conf; - struct rte_cryptodev_info cdev_info; - struct rte_mempool *rmp; - i32 dev_id, ret; - u32 i, skip_master; -#if ! DPDK_NO_AEAD - u32 max_sess_size = 0, sess_size; - i8 socket_id; + struct rte_cryptodev *cryptodev; + struct rte_cryptodev_info info; + crypto_dev_t *dev; + crypto_resource_t *res; + clib_error_t *error; + u32 i; + u16 max_res_idx, res_idx, j; + u8 drv_id; + + vec_validate_init_empty (dcm->dev, rte_cryptodev_count () - 1, + (crypto_dev_t) EMPTY_STRUCT); + + for (i = 0; i < rte_cryptodev_count (); i++) + { + dev = vec_elt_at_index (dcm->dev, i); + + cryptodev = &rte_cryptodevs[i]; + rte_cryptodev_info_get (i, &info); + + dev->id = i; + dev->name = cryptodev->data->name; + dev->numa = rte_cryptodev_socket_id (i); + dev->features = info.feature_flags; + dev->max_qp = info.max_nb_queue_pairs; +#if DPDK_NO_AEAD + drv_id = cryptodev->dev_type; +#else + drv_id = info.driver_id; #endif + if (drv_id >= vec_len (dcm->drv)) + vec_validate_init_empty (dcm->drv, drv_id, + (crypto_drv_t) EMPTY_STRUCT); + vec_elt_at_index (dcm->drv, drv_id)->name = info.driver_name; + dev->drv_id = drv_id; + vec_add1 (vec_elt_at_index (dcm->drv, drv_id)->devs, i); + + if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) + continue; - if (check_cryptodev_queues () < 0) - { - clib_warning ("not enough Cryptodevs, default to OpenSSL IPsec"); - return 0; - } - dcm->enabled = 1; + if ((error = crypto_dev_conf (i, dev->max_qp, dev->numa))) + { + clib_error_report (error); + continue; + } - vec_alloc (dcm->workers_main, tm->n_vlib_mains); - _vec_len (dcm->workers_main) = tm->n_vlib_mains; + max_res_idx = (dev->max_qp / 2) - 1; - skip_master = vlib_num_workers () > 0; + vec_validate (dev->free_resources, max_res_idx); - fprintf (stdout, "DPDK Cryptodevs info:\n"); - fprintf (stdout, "dev_id\tn_qp\tnb_obj\tcache_size\n"); - /* HW cryptodevs have higher dev_id, use HW first */ - for (dev_id = rte_cryptodev_count () - 1; dev_id >= 0; dev_id--) - { - u16 max_nb_qp, qp = 0; + res_idx = vec_len (dcm->resource); + vec_validate_init_empty_aligned (dcm->resource, res_idx + max_res_idx, + (crypto_resource_t) EMPTY_STRUCT, + CLIB_CACHE_LINE_BYTES); - rte_cryptodev_info_get (dev_id, &cdev_info); + for (j = 0; j <= max_res_idx; j++, res_idx++) + { + vec_elt (dev->free_resources, max_res_idx - j) = res_idx; + res = &dcm->resource[res_idx]; + res->dev_id = i; + res->drv_id = drv_id; + res->qp_id = j * 2; + res->numa = dev->numa; + res->thread_idx = (u16) ~ 0; + } - if (! - (cdev_info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) - continue; + crypto_parse_capabilities (dev, info.capabilities, n_mains); + } +} - max_nb_qp = cdev_info.max_nb_queue_pairs; +void +crypto_auto_placement (void) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_resource_t *res; + crypto_worker_main_t *cwm; + crypto_dev_t *dev; + u32 thread_idx, skip_master; + u16 res_idx, *idx; + u8 used; + u16 i; - for (i = 0; i < tm->n_vlib_mains; i++) + skip_master = vlib_num_workers () > 0; + + /* *INDENT-OFF* */ + vec_foreach (dev, dcm->dev) + { + vec_foreach_index (thread_idx, dcm->workers_main) { - u8 is_outbound; - crypto_worker_main_t *cwm; - uword *map; + if (vec_len (dev->free_resources) == 0) + break; + + if (thread_idx < skip_master) + continue; + + /* Check thread is not already using the device */ + vec_foreach (idx, dev->used_resources) + if (dcm->resource[idx[0]].thread_idx == thread_idx) + continue; - if (skip_master) + cwm = vec_elt_at_index (dcm->workers_main, thread_idx); + + used = 0; + res_idx = vec_pop (dev->free_resources); + + /* Set device only for supported algos */ + for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) + if (dev->cipher_support[i] && + cwm->cipher_resource_idx[i] == (u16) ~0) + { + dcm->cipher_algs[i].disabled--; + cwm->cipher_resource_idx[i] = res_idx; + used = 1; + } + + for (i = 0; i < IPSEC_INTEG_N_ALG; i++) + if (dev->auth_support[i] && + cwm->auth_resource_idx[i] == (u16) ~0) + { + dcm->auth_algs[i].disabled--; + cwm->auth_resource_idx[i] = res_idx; + used = 1; + } + + if (!used) { - skip_master = 0; + vec_add1 (dev->free_resources, res_idx); continue; } - cwm = vec_elt_at_index (dcm->workers_main, i); - map = cwm->algo_qp_map; + vec_add1 (dev->used_resources, res_idx); - if (!map) - { - map = hash_create (0, sizeof (crypto_worker_qp_key_t)); - if (!map) - { - clib_warning ("unable to create hash table for worker %u", - vlib_mains[i]->thread_index); - goto error; - } - cwm->algo_qp_map = map; - } + res = vec_elt_at_index (dcm->resource, res_idx); + + ASSERT (res->thread_idx == (u16) ~0); + res->thread_idx = thread_idx; - for (is_outbound = 0; is_outbound < 2 && qp < max_nb_qp; - is_outbound++) - qp += add_cdev_mapping (cwm, &cdev_info, dev_id, qp, is_outbound); + /* Add device to vector of polling resources */ + vec_add1 (cwm->resource_idx, res_idx); } + } + /* *INDENT-ON* */ +} - if (qp == 0) - continue; +static void +crypto_op_init (struct rte_mempool *mempool, + void *_arg __attribute__ ((unused)), + void *_obj, unsigned i __attribute__ ((unused))) +{ + struct rte_crypto_op *op = _obj; - dev_conf.socket_id = rte_cryptodev_socket_id (dev_id); - dev_conf.nb_queue_pairs = cdev_info.max_nb_queue_pairs; #if DPDK_NO_AEAD - dev_conf.session_mp.nb_objs = DPDK_CRYPTO_NB_SESS_OBJS; - dev_conf.session_mp.cache_size = DPDK_CRYPTO_CACHE_SIZE; + op->sym = (struct rte_crypto_sym_op *) (op + 1); + op->sym->sess_type = RTE_CRYPTO_SYM_OP_WITH_SESSION; +#else + op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; #endif - ret = rte_cryptodev_configure (dev_id, &dev_conf); - if (ret < 0) - { - clib_warning ("cryptodev %u config error", dev_id); - goto error; - } + op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; + op->phys_addr = rte_mem_virt2phy (_obj); + op->mempool = mempool; +} - qp_conf.nb_descriptors = DPDK_CRYPTO_N_QUEUE_DESC; - for (qp = 0; qp < dev_conf.nb_queue_pairs; qp++) - { +static clib_error_t * +crypto_create_crypto_op_pool (u8 numa) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + dpdk_config_main_t *conf = &dpdk_config_main; + crypto_data_t *data; + u8 *pool_name; + u32 pool_priv_size = sizeof (struct rte_crypto_op_pool_private); + struct rte_crypto_op_pool_private *priv; + clib_error_t *error = NULL; + + data = vec_elt_at_index (dcm->data, numa); + + if (data->crypto_op) + return NULL; + + pool_name = format (0, "crypto_pool_numa%u%c", numa, 0); + + data->crypto_op = + rte_mempool_create ((i8 *) pool_name, conf->num_mbufs, crypto_op_len (), + 512, pool_priv_size, NULL, NULL, crypto_op_init, NULL, + numa, 0); + + if (!data->crypto_op) + { + error = clib_error_return (0, "failed to allocate %s", pool_name); + goto done; + } + + priv = rte_mempool_get_priv (data->crypto_op); + + priv->priv_size = pool_priv_size; + priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; + +done: + vec_free (pool_name); + + return error; +} + +static clib_error_t * +crypto_create_session_h_pool (u8 numa) +{ #if DPDK_NO_AEAD - ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf, - dev_conf.socket_id); + return NULL; #else - ret = rte_cryptodev_queue_pair_setup (dev_id, qp, &qp_conf, - dev_conf.socket_id, NULL); + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + u8 *pool_name; + u32 elt_size; + clib_error_t *error = NULL; + + data = vec_elt_at_index (dcm->data, numa); + + if (data->session_h) + return NULL; + + pool_name = format (0, "session_h_pool_numa%u%c", numa, 0); + elt_size = rte_cryptodev_get_header_session_size (); + + data->session_h = + rte_mempool_create ((i8 *) pool_name, DPDK_CRYPTO_NB_SESS_OBJS, elt_size, + 512, 0, NULL, NULL, NULL, NULL, numa, 0); + + if (!data->session_h) + error = clib_error_return (0, "failed to allocate %s", pool_name); + + vec_free (pool_name); + + return error; #endif - if (ret < 0) - { - clib_warning ("cryptodev %u qp %u setup error", dev_id, qp); - goto error; - } - } - vec_validate (dcm->cop_pools, dev_conf.socket_id); +} -#if ! DPDK_NO_AEAD - sess_size = rte_cryptodev_get_private_session_size (dev_id); - if (sess_size > max_sess_size) - max_sess_size = sess_size; +static clib_error_t * +crypto_create_session_drv_pool (crypto_dev_t * dev) +{ +#if DPDK_NO_AEAD + return NULL; +#else + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + u8 *pool_name; + u32 elt_size; + clib_error_t *error = NULL; + u8 numa = dev->numa; + + data = vec_elt_at_index (dcm->data, numa); + + vec_validate (data->session_drv, dev->drv_id); + vec_validate (data->session_drv_failed, dev->drv_id); + + if (data->session_drv[dev->drv_id]) + return NULL; + + pool_name = format (0, "session_drv%u_pool_numa%u%c", dev->drv_id, numa, 0); + elt_size = rte_cryptodev_get_private_session_size (dev->id); + + data->session_drv[dev->drv_id] = + rte_mempool_create ((i8 *) pool_name, DPDK_CRYPTO_NB_SESS_OBJS, elt_size, + 512, 0, NULL, NULL, NULL, NULL, numa, 0); + + if (!data->session_drv[dev->drv_id]) + error = clib_error_return (0, "failed to allocate %s", pool_name); + + vec_free (pool_name); + + return error; #endif +} - if (!vec_elt (dcm->cop_pools, dev_conf.socket_id)) - { - u8 *pool_name = format (0, "crypto_op_pool_socket%u%c", - dev_conf.socket_id, 0); - - rmp = rte_crypto_op_pool_create ((char *) pool_name, - RTE_CRYPTO_OP_TYPE_SYMMETRIC, - DPDK_CRYPTO_NB_COPS * - (1 + vlib_num_workers ()), - DPDK_CRYPTO_CACHE_SIZE, - DPDK_CRYPTO_PRIV_SIZE, - dev_conf.socket_id); - - if (!rmp) - { - clib_warning ("failed to allocate %s", pool_name); - vec_free (pool_name); - goto error; - } - vec_free (pool_name); - vec_elt (dcm->cop_pools, dev_conf.socket_id) = rmp; - } +static clib_error_t * +crypto_create_pools (void) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + clib_error_t *error = NULL; + crypto_dev_t *dev; + + /* *INDENT-OFF* */ + vec_foreach (dev, dcm->dev) + { + vec_validate (dcm->data, dev->numa); + + error = crypto_create_crypto_op_pool (dev->numa); + if (error) + return error; + + error = crypto_create_session_h_pool (dev->numa); + if (error) + return error; - fprintf (stdout, "%u\t%u\t%u\t%u\n", dev_id, dev_conf.nb_queue_pairs, - DPDK_CRYPTO_NB_SESS_OBJS, DPDK_CRYPTO_CACHE_SIZE); + error = crypto_create_session_drv_pool (dev); + if (error) + return error; } + /* *INDENT-ON* */ + + return NULL; +} + +static void +crypto_disable (void) +{ + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + crypto_data_t *data; + u8 i; + + dcm->enabled = 0; -#if ! DPDK_NO_AEAD /* *INDENT-OFF* */ - vec_foreach_index (socket_id, dcm->cop_pools) + vec_foreach (data, dcm->data) { - u8 *pool_name; + rte_mempool_free (data->crypto_op); + rte_mempool_free (data->session_h); - if (!vec_elt (dcm->cop_pools, socket_id)) - continue; + vec_foreach_index (i, data->session_drv) + rte_mempool_free (data->session_drv[i]); - vec_validate (dcm->sess_h_pools, socket_id); - pool_name = format (0, "crypto_sess_h_socket%u%c", - socket_id, 0); - rmp = - rte_mempool_create((i8 *)pool_name, DPDK_CRYPTO_NB_SESS_OBJS, - rte_cryptodev_get_header_session_size (), - 512, 0, NULL, NULL, NULL, NULL, - socket_id, 0); - if (!rmp) - { - clib_warning ("failed to allocate %s", pool_name); - vec_free (pool_name); - goto error; - } - vec_free (pool_name); - vec_elt (dcm->sess_h_pools, socket_id) = rmp; - - vec_validate (dcm->sess_pools, socket_id); - pool_name = format (0, "crypto_sess_socket%u%c", - socket_id, 0); - rmp = - rte_mempool_create((i8 *)pool_name, DPDK_CRYPTO_NB_SESS_OBJS, - max_sess_size, 512, 0, NULL, NULL, NULL, NULL, - socket_id, 0); - if (!rmp) - { - clib_warning ("failed to allocate %s", pool_name); - vec_free (pool_name); - goto error; - } - vec_free (pool_name); - vec_elt (dcm->sess_pools, socket_id) = rmp; + vec_free (data->session_drv); } /* *INDENT-ON* */ -#endif - dpdk_esp_init (); + vec_free (dcm->data); - /* Add new next node and set as default */ + vec_free (dcm->workers_main); + vec_free (dcm->sa_session); + vec_free (dcm->dev); + vec_free (dcm->resource); + vec_free (dcm->cipher_algs); + vec_free (dcm->auth_algs); +} + +static uword +dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, + vlib_frame_t * f) +{ + ipsec_main_t *im = &ipsec_main; + dpdk_crypto_main_t *dcm = &dpdk_crypto_main; + vlib_thread_main_t *tm = vlib_get_thread_main (); + crypto_worker_main_t *cwm; + clib_error_t *error = NULL; + u32 i, skip_master, n_mains; + + n_mains = tm->n_vlib_mains; + skip_master = vlib_num_workers () > 0; + + algos_init (n_mains - skip_master); + + crypto_scan_devs (n_mains - skip_master); + + if (!(dcm->enabled)) + { + clib_warning ("not enough DPDK crypto resources, default to OpenSSL"); + crypto_disable (); + return 0; + } + + vec_validate_init_empty (dcm->workers_main, n_mains - 1, + (crypto_worker_main_t) EMPTY_STRUCT); + + /* *INDENT-OFF* */ + vec_foreach (cwm, dcm->workers_main) + { + memset (cwm->cipher_resource_idx, ~0, + IPSEC_CRYPTO_N_ALG * sizeof(*cwm->cipher_resource_idx)); + memset (cwm->auth_resource_idx, ~0, + IPSEC_INTEG_N_ALG * sizeof(*cwm->auth_resource_idx)); + } + /* *INDENT-ON* */ + + crypto_auto_placement (); + + error = crypto_create_pools (); + if (error) + { + clib_error_report (error); + crypto_disable (); + return 0; + } + + /* Add new next node and set it as default */ vlib_node_t *node, *next_node; next_node = vlib_get_node_by_name (vm, (u8 *) "dpdk-esp-encrypt"); @@ -459,33 +1091,12 @@ dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_node_add_next (vm, node->index, next_node->index); im->cb.check_support_cb = dpdk_ipsec_check_support; - im->cb.add_del_sa_sess_cb = add_del_sa_sess; - - for (i = skip_master; i < tm->n_vlib_mains; i++) - vlib_node_set_state (vlib_mains[i], dpdk_crypto_input_node.index, - VLIB_NODE_STATE_POLLING); - - /* TODO cryptodev counters */ - - return 0; - -error: - ; - crypto_worker_main_t *cwm; - struct rte_mempool **mp; - /* *INDENT-OFF* */ - vec_foreach (cwm, dcm->workers_main) - hash_free (cwm->algo_qp_map); - - vec_foreach (mp, dcm->cop_pools) - { - if (mp) - rte_mempool_free (mp[0]); - } - /* *INDENT-ON* */ - vec_free (dcm->workers_main); - vec_free (dcm->cop_pools); + im->cb.add_del_sa_sess_cb = add_del_sa_session; + node = vlib_get_node_by_name (vm, (u8 *) "dpdk-crypto-input"); + ASSERT (node); + for (i = skip_master; i < n_mains; i++) + vlib_node_set_state (vlib_mains[i], node->index, VLIB_NODE_STATE_POLLING); return 0; } |