/* * Copyright (c) 2017 Intel and/or its affiliates. * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at: * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include #include #include #include #include #include #include dpdk_crypto_main_t dpdk_crypto_main; #define EMPTY_STRUCT {0} static void algos_init (u32 n_mains) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; crypto_alg_t *a; vec_validate_aligned (dcm->cipher_algs, IPSEC_CRYPTO_N_ALG - 1, 8); { #define _(v,f,str) \ dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].name = str; \ dcm->cipher_algs[IPSEC_CRYPTO_ALG_##f].disabled = n_mains; foreach_ipsec_crypto_alg #undef _ } /* Minimum boundary for ciphers is 4B, required by ESP */ a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_NONE]; a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; a->alg = RTE_CRYPTO_CIPHER_NULL; a->boundary = 4; /* 1 */ a->key_len = 0; a->iv_len = 0; a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_128]; a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; a->alg = RTE_CRYPTO_CIPHER_AES_CBC; a->boundary = 16; a->key_len = 16; a->iv_len = 16; a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_192]; a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; a->alg = RTE_CRYPTO_CIPHER_AES_CBC; a->boundary = 16; a->key_len = 24; a->iv_len = 16; a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CBC_256]; a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; a->alg = RTE_CRYPTO_CIPHER_AES_CBC; a->boundary = 16; a->key_len = 32; a->iv_len = 16; a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_128]; a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; a->alg = RTE_CRYPTO_CIPHER_AES_CTR; a->boundary = 4; /* 1 */ a->key_len = 16; a->iv_len = 8; a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_192]; a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; a->alg = RTE_CRYPTO_CIPHER_AES_CTR; a->boundary = 4; /* 1 */ a->key_len = 24; a->iv_len = 8; a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_CTR_256]; a->type = RTE_CRYPTO_SYM_XFORM_CIPHER; a->alg = RTE_CRYPTO_CIPHER_AES_CTR; a->boundary = 4; /* 1 */ a->key_len = 32; a->iv_len = 8; #define AES_GCM_TYPE RTE_CRYPTO_SYM_XFORM_AEAD #define AES_GCM_ALG RTE_CRYPTO_AEAD_AES_GCM a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_128]; a->type = AES_GCM_TYPE; a->alg = AES_GCM_ALG; a->boundary = 4; /* 1 */ a->key_len = 16; a->iv_len = 8; a->trunc_size = 16; a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_192]; a->type = AES_GCM_TYPE; a->alg = AES_GCM_ALG; a->boundary = 4; /* 1 */ a->key_len = 24; a->iv_len = 8; a->trunc_size = 16; a = &dcm->cipher_algs[IPSEC_CRYPTO_ALG_AES_GCM_256]; a->type = AES_GCM_TYPE; a->alg = AES_GCM_ALG; a->boundary = 4; /* 1 */ a->key_len = 32; a->iv_len = 8; a->trunc_size = 16; vec_validate (dcm->auth_algs, IPSEC_INTEG_N_ALG - 1); { #define _(v,f,str) \ dcm->auth_algs[IPSEC_INTEG_ALG_##f].name = str; \ dcm->auth_algs[IPSEC_INTEG_ALG_##f].disabled = n_mains; foreach_ipsec_integ_alg #undef _ } a = &dcm->auth_algs[IPSEC_INTEG_ALG_NONE]; a->type = RTE_CRYPTO_SYM_XFORM_AUTH; a->alg = RTE_CRYPTO_AUTH_NULL; a->key_len = 0; a->trunc_size = 0; a = &dcm->auth_algs[IPSEC_INTEG_ALG_MD5_96]; a->type = RTE_CRYPTO_SYM_XFORM_AUTH; a->alg = RTE_CRYPTO_AUTH_MD5_HMAC; a->key_len = 16; a->trunc_size = 12; a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA1_96]; a->type = RTE_CRYPTO_SYM_XFORM_AUTH; a->alg = RTE_CRYPTO_AUTH_SHA1_HMAC; a->key_len = 20; a->trunc_size = 12; a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_96]; a->type = RTE_CRYPTO_SYM_XFORM_AUTH; a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC; a->key_len = 32; a->trunc_size = 12; a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_256_128]; a->type = RTE_CRYPTO_SYM_XFORM_AUTH; a->alg = RTE_CRYPTO_AUTH_SHA256_HMAC; a->key_len = 32; a->trunc_size = 16; a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_384_192]; a->type = RTE_CRYPTO_SYM_XFORM_AUTH; a->alg = RTE_CRYPTO_AUTH_SHA384_HMAC; a->key_len = 48; a->trunc_size = 24; a = &dcm->auth_algs[IPSEC_INTEG_ALG_SHA_512_256]; a->type = RTE_CRYPTO_SYM_XFORM_AUTH; a->alg = RTE_CRYPTO_AUTH_SHA512_HMAC; a->key_len = 64; a->trunc_size = 32; } static u8 cipher_alg_index (const crypto_alg_t * alg) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; return (alg - dcm->cipher_algs); } static u8 auth_alg_index (const crypto_alg_t * alg) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; return (alg - dcm->auth_algs); } static crypto_alg_t * cipher_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 key_len) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; crypto_alg_t *alg; if (cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) return NULL; /* *INDENT-OFF* */ vec_foreach (alg, dcm->cipher_algs) { if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_CIPHER) && (alg->type == RTE_CRYPTO_SYM_XFORM_CIPHER) && (cap->sym.cipher.algo == alg->alg) && (alg->key_len == key_len)) return alg; if ((cap->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) && (alg->type == RTE_CRYPTO_SYM_XFORM_AEAD) && (cap->sym.aead.algo == alg->alg) && (alg->key_len == key_len)) return alg; } /* *INDENT-ON* */ return NULL; } static crypto_alg_t * auth_cap_to_alg (const struct rte_cryptodev_capabilities *cap, u8 trunc_size) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; crypto_alg_t *alg; if ((cap->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC) || (cap->sym.xform_type != RT
/*
 * Copyright (c) 2017 Cisco and/or its affiliates.
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at:
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
#ifndef included_ioam_cache_msg_enum_h
#define included_ioam_cache_msg_enum_h

#include <vppinfra/byte_order.h>

#define vl_msg_id(n,h) n,
typedef enum {
#include <ioam/ip6/ioam_cache_all_api_h.h>
    /* We'll want to know how many messages IDs we need... */
    VL_MSG_FIRST_AVAILABLE,
} vl_msg_id_t;
#undef vl_msg_id

#endif /* included_ioam_cache_msg_enum_h */
odev *cryptodev; struct rte_cryptodev_info info; crypto_dev_t *dev; crypto_resource_t *res; clib_error_t *error; u32 i; u16 max_res_idx, res_idx, j; u8 drv_id; vec_validate_init_empty (dcm->dev, rte_cryptodev_count () - 1, (crypto_dev_t) EMPTY_STRUCT); for (i = 0; i < rte_cryptodev_count (); i++) { dev = vec_elt_at_index (dcm->dev, i); cryptodev = &rte_cryptodevs[i]; rte_cryptodev_info_get (i, &info); dev->id = i; dev->name = cryptodev->data->name; dev->numa = rte_cryptodev_socket_id (i); dev->features = info.feature_flags; dev->max_qp = info.max_nb_queue_pairs; drv_id = info.driver_id; if (drv_id >= vec_len (dcm->drv)) vec_validate_init_empty (dcm->drv, drv_id, (crypto_drv_t) EMPTY_STRUCT); vec_elt_at_index (dcm->drv, drv_id)->name = info.driver_name; dev->drv_id = drv_id; vec_add1 (vec_elt_at_index (dcm->drv, drv_id)->devs, i); if (!(info.feature_flags & RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING)) continue; if ((error = crypto_dev_conf (i, dev->max_qp, dev->numa))) { clib_error_report (error); continue; } max_res_idx = (dev->max_qp / 2) - 1; vec_validate (dev->free_resources, max_res_idx); res_idx = vec_len (dcm->resource); vec_validate_init_empty_aligned (dcm->resource, res_idx + max_res_idx, (crypto_resource_t) EMPTY_STRUCT, CLIB_CACHE_LINE_BYTES); for (j = 0; j <= max_res_idx; j++, res_idx++) { vec_elt (dev->free_resources, max_res_idx - j) = res_idx; res = &dcm->resource[res_idx]; res->dev_id = i; res->drv_id = drv_id; res->qp_id = j * 2; res->numa = dev->numa; res->thread_idx = (u16) ~ 0; } crypto_parse_capabilities (dev, info.capabilities, n_mains); } } void crypto_auto_placement (void) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; crypto_resource_t *res; crypto_worker_main_t *cwm; crypto_dev_t *dev; u32 thread_idx, skip_master; u16 res_idx, *idx; u8 used; u16 i; skip_master = vlib_num_workers () > 0; /* *INDENT-OFF* */ vec_foreach (dev, dcm->dev) { vec_foreach_index (thread_idx, dcm->workers_main) { if (vec_len (dev->free_resources) == 0) break; if (thread_idx < skip_master) continue; /* Check thread is not already using the device */ vec_foreach (idx, dev->used_resources) if (dcm->resource[idx[0]].thread_idx == thread_idx) continue; cwm = vec_elt_at_index (dcm->workers_main, thread_idx); used = 0; res_idx = vec_pop (dev->free_resources); /* Set device only for supported algos */ for (i = 0; i < IPSEC_CRYPTO_N_ALG; i++) if (dev->cipher_support[i] && cwm->cipher_resource_idx[i] == (u16) ~0) { dcm->cipher_algs[i].disabled--; cwm->cipher_resource_idx[i] = res_idx; used = 1; } for (i = 0; i < IPSEC_INTEG_N_ALG; i++) if (dev->auth_support[i] && cwm->auth_resource_idx[i] == (u16) ~0) { dcm->auth_algs[i].disabled--; cwm->auth_resource_idx[i] = res_idx; used = 1; } if (!used) { vec_add1 (dev->free_resources, res_idx); continue; } vec_add1 (dev->used_resources, res_idx); res = vec_elt_at_index (dcm->resource, res_idx); ASSERT (res->thread_idx == (u16) ~0); res->thread_idx = thread_idx; /* Add device to vector of polling resources */ vec_add1 (cwm->resource_idx, res_idx); } } /* *INDENT-ON* */ } static void crypto_op_init (struct rte_mempool *mempool, void *_arg __attribute__ ((unused)), void *_obj, unsigned i __attribute__ ((unused))) { struct rte_crypto_op *op = _obj; op->sess_type = RTE_CRYPTO_OP_WITH_SESSION; op->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; op->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED; op->phys_addr = rte_mempool_virt2iova (_obj); op->mempool = mempool; } static clib_error_t * crypto_create_crypto_op_pool (vlib_main_t * vm, u8 numa) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; dpdk_config_main_t *conf = &dpdk_config_main; crypto_data_t *data; u8 *pool_name; u32 pool_priv_size = sizeof (struct rte_crypto_op_pool_private); struct rte_crypto_op_pool_private *priv; struct rte_mempool *mp; data = vec_elt_at_index (dcm->data, numa); /* Already allocated */ if (data->crypto_op) return NULL; pool_name = format (0, "crypto_pool_numa%u%c", numa, 0); mp = rte_mempool_create ((char *) pool_name, conf->num_mbufs, crypto_op_len (), 512, pool_priv_size, NULL, NULL, crypto_op_init, NULL, numa, 0); vec_free (pool_name); if (!mp) return clib_error_return (0, "failed to create crypto op mempool"); /* Initialize mempool private data */ priv = rte_mempool_get_priv (mp); priv->priv_size = pool_priv_size; priv->type = RTE_CRYPTO_OP_TYPE_SYMMETRIC; data->crypto_op = mp; return NULL; } static clib_error_t * crypto_create_session_h_pool (vlib_main_t * vm, u8 numa) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; crypto_data_t *data; u8 *pool_name; struct rte_mempool *mp; u32 elt_size; data = vec_elt_at_index (dcm->data, numa); if (data->session_h) return NULL; pool_name = format (0, "session_h_pool_numa%u%c", numa, 0); elt_size = rte_cryptodev_sym_get_header_session_size (); mp = rte_mempool_create ((char *) pool_name, DPDK_CRYPTO_NB_SESS_OBJS, elt_size, 512, 0, NULL, NULL, NULL, NULL, numa, 0); vec_free (pool_name); if (!mp) return clib_error_return (0, "failed to create crypto session mempool"); data->session_h = mp; return NULL; } static clib_error_t * crypto_create_session_drv_pool (vlib_main_t * vm, crypto_dev_t * dev) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; crypto_data_t *data; u8 *pool_name; struct rte_mempool *mp; u32 elt_size; u8 numa = dev->numa; data = vec_elt_at_index (dcm->data, numa); vec_validate (data->session_drv, dev->drv_id); vec_validate (data->session_drv_failed, dev->drv_id); vec_validate_aligned (data->session_by_drv_id_and_sa_index, 32, CLIB_CACHE_LINE_BYTES); if (data->session_drv[dev->drv_id]) return NULL; pool_name = format (0, "session_drv%u_pool_numa%u%c", dev->drv_id, numa, 0); elt_size = rte_cryptodev_sym_get_private_session_size (dev->id); mp = rte_mempool_create ((char *) pool_name, DPDK_CRYPTO_NB_SESS_OBJS, elt_size, 512, 0, NULL, NULL, NULL, NULL, numa, 0); vec_free (pool_name); if (!mp) return clib_error_return (0, "failed to create session drv mempool"); data->session_drv[dev->drv_id] = mp; clib_spinlock_init (&data->lockp); return NULL; } static clib_error_t * crypto_create_pools (vlib_main_t * vm) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; clib_error_t *error = NULL; crypto_dev_t *dev; /* *INDENT-OFF* */ vec_foreach (dev, dcm->dev) { vec_validate_aligned (dcm->data, dev->numa, CLIB_CACHE_LINE_BYTES); error = crypto_create_crypto_op_pool (vm, dev->numa); if (error) return error; error = crypto_create_session_h_pool (vm, dev->numa); if (error) return error; error = crypto_create_session_drv_pool (vm, dev); if (error) return error; } /* *INDENT-ON* */ return NULL; } static void crypto_disable (void) { dpdk_crypto_main_t *dcm = &dpdk_crypto_main; crypto_data_t *data; u8 i; dcm->enabled = 0; /* *INDENT-OFF* */ vec_foreach (data, dcm->data) { rte_mempool_free (data->crypto_op); rte_mempool_free (data->session_h); vec_foreach_index (i, data->session_drv) rte_mempool_free (data->session_drv[i]); vec_free (data->session_drv); clib_spinlock_free (&data->lockp); } /* *INDENT-ON* */ vec_free (dcm->data); vec_free (dcm->workers_main); vec_free (dcm->dev); vec_free (dcm->resource); vec_free (dcm->cipher_algs); vec_free (dcm->auth_algs); } static uword dpdk_ipsec_process (vlib_main_t * vm, vlib_node_runtime_t * rt, vlib_frame_t * f) { ipsec_main_t *im = &ipsec_main; dpdk_crypto_main_t *dcm = &dpdk_crypto_main; vlib_thread_main_t *tm = vlib_get_thread_main (); crypto_worker_main_t *cwm; clib_error_t *error = NULL; u32 i, skip_master, n_mains; n_mains = tm->n_vlib_mains; skip_master = vlib_num_workers () > 0; algos_init (n_mains - skip_master); crypto_scan_devs (n_mains - skip_master); if (!(dcm->enabled)) { clib_warning ("not enough DPDK crypto resources, default to OpenSSL"); crypto_disable (); return 0; } dcm->session_timeout = 10e9; vec_validate_init_empty_aligned (dcm->workers_main, n_mains - 1, (crypto_worker_main_t) EMPTY_STRUCT, CLIB_CACHE_LINE_BYTES); /* *INDENT-OFF* */ vec_foreach (cwm, dcm->workers_main) { vec_validate_init_empty_aligned (cwm->ops, VLIB_FRAME_SIZE - 1, 0, CLIB_CACHE_LINE_BYTES); clib_memset (cwm->cipher_resource_idx, ~0, IPSEC_CRYPTO_N_ALG * sizeof(*cwm->cipher_resource_idx)); clib_memset (cwm->auth_resource_idx, ~0, IPSEC_INTEG_N_ALG * sizeof(*cwm->auth_resource_idx)); } /* *INDENT-ON* */ crypto_auto_placement (); error = crypto_create_pools (vm); if (error) { clib_error_report (error); crypto_disable (); return 0; } ipsec_register_esp_backend (vm, im, "dpdk backend", "dpdk-esp4-encrypt", "dpdk-esp4-decrypt", "dpdk-esp6-encrypt", "dpdk-esp6-decrypt", dpdk_ipsec_check_support, add_del_sa_session); vlib_node_t *node = vlib_get_node_by_name (vm, (u8 *) "dpdk-crypto-input"); ASSERT (node); for (i = skip_master; i < n_mains; i++) vlib_node_set_state (vlib_mains[i], node->index, VLIB_NODE_STATE_POLLING); return 0; } /* *INDENT-OFF* */ VLIB_REGISTER_NODE (dpdk_ipsec_process_node,static) = { .function = dpdk_ipsec_process, .type = VLIB_NODE_TYPE_PROCESS, .name = "dpdk-ipsec-process", .process_log2_n_stack_bytes = 17, }; /* *INDENT-ON* */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */