diff options
author | Filip Tehlar <ftehlar@cisco.com> | 2020-02-04 09:36:04 +0000 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2020-02-11 23:07:38 +0000 |
commit | efcad1a9d22c4a664f3004cafe09d9c3a68e1620 (patch) | |
tree | 5d0668c307083f096f6034d5ae8a608078640d18 /src/vnet | |
parent | 16d974ec59776f0103ad62d0d04dc57989eef7ed (diff) |
ipsec: add support for chained buffers
Type: feature
Change-Id: Ie072a7c2bbb1e4a77f7001754f01897efd30fc53
Signed-off-by: Filip Tehlar <ftehlar@cisco.com>
Diffstat (limited to 'src/vnet')
-rw-r--r-- | src/vnet/crypto/cli.c | 30 | ||||
-rw-r--r-- | src/vnet/crypto/crypto.c | 183 | ||||
-rw-r--r-- | src/vnet/crypto/crypto.h | 95 | ||||
-rw-r--r-- | src/vnet/crypto/format.c | 5 | ||||
-rw-r--r-- | src/vnet/ipsec/esp_decrypt.c | 444 | ||||
-rw-r--r-- | src/vnet/ipsec/esp_encrypt.c | 183 | ||||
-rw-r--r-- | src/vnet/ipsec/ipsec.h | 4 |
7 files changed, 790 insertions, 154 deletions
diff --git a/src/vnet/crypto/cli.c b/src/vnet/crypto/cli.c index 8d523aec484..ef6371ad3dc 100644 --- a/src/vnet/crypto/cli.c +++ b/src/vnet/crypto/cli.c @@ -74,14 +74,24 @@ format_vnet_crypto_handlers (u8 * s, va_list * args) od = cm->opt_data + id; if (first == 0) s = format (s, "\n%U", format_white_space, indent); - s = format (s, "%-20U%-20U", format_vnet_crypto_op_type, od->type, - format_vnet_crypto_engine, od->active_engine_index,s); + s = format (s, "%-22U%-20U", format_vnet_crypto_op_type, od->type, 0, + format_vnet_crypto_engine, od->active_engine_index_simple,s); vec_foreach (e, cm->engines) { if (e->ops_handlers[id] != 0) s = format (s, "%U ", format_vnet_crypto_engine, e - cm->engines); } + + s = format (s, "\n%U", format_white_space, indent); + s = format (s, "%-22U%-20U", format_vnet_crypto_op_type, od->type, 1, + format_vnet_crypto_engine, + od->active_engine_index_chained); + vec_foreach (e, cm->engines) + { + if (e->chained_ops_handlers[id] != 0) + s = format (s, "%U ", format_vnet_crypto_engine, e - cm->engines); + } first = 0; } return s; @@ -98,7 +108,7 @@ show_crypto_handlers_command_fn (vlib_main_t * vm, if (unformat_user (input, unformat_line_input, line_input)) unformat_free (line_input); - vlib_cli_output (vm, "%-20s%-20s%-20s%s", "Algo", "Type", "Active", + vlib_cli_output (vm, "%-20s%-22s%-20s%s", "Algo", "Type", "Active", "Candidates"); for (i = 0; i < VNET_CRYPTO_N_ALGS; i++) @@ -128,6 +138,7 @@ set_crypto_handler_command_fn (vlib_main_t * vm, char **args = 0, *s, **arg, *engine = 0; int all = 0; clib_error_t *error = 0; + crypto_op_class_type_t oct = CRYPTO_OP_BOTH; if (!unformat_user (input, unformat_line_input, line_input)) return 0; @@ -136,6 +147,12 @@ set_crypto_handler_command_fn (vlib_main_t * vm, { if (unformat (line_input, "all")) all = 1; + else if (unformat (line_input, "simple")) + oct = CRYPTO_OP_SIMPLE; + else if (unformat (line_input, "chained")) + oct = CRYPTO_OP_CHAINED; + else if (unformat (line_input, "both")) + oct = CRYPTO_OP_BOTH; else if (unformat (line_input, "%s", &s)) vec_add1 (args, s); else @@ -163,7 +180,7 @@ set_crypto_handler_command_fn (vlib_main_t * vm, hash_foreach_mem (key, value, cm->alg_index_by_name, ({ (void) value; - rc += vnet_crypto_set_handler (key, engine); + rc += vnet_crypto_set_handler2 (key, engine, oct); })); /* *INDENT-ON* */ @@ -174,7 +191,7 @@ set_crypto_handler_command_fn (vlib_main_t * vm, { vec_foreach (arg, args) { - rc = vnet_crypto_set_handler (arg[0], engine); + rc = vnet_crypto_set_handler2 (arg[0], engine, oct); if (rc) { vlib_cli_output (vm, "failed to set engine %s for %s!", @@ -195,7 +212,8 @@ done: VLIB_CLI_COMMAND (set_crypto_handler_command, static) = { .path = "set crypto handler", - .short_help = "set crypto handler cipher [cipher2 cipher3 ...] engine", + .short_help = "set crypto handler cipher [cipher2 cipher3 ...] engine" + " [simple|chained]", .function = set_crypto_handler_command_fn, }; /* *INDENT-ON* */ diff --git a/src/vnet/crypto/crypto.c b/src/vnet/crypto/crypto.c index 6cd12108f6c..4458bfcff4d 100644 --- a/src/vnet/crypto/crypto.c +++ b/src/vnet/crypto/crypto.c @@ -19,31 +19,52 @@ vnet_crypto_main_t crypto_main; +static_always_inline void +crypto_set_op_status (vnet_crypto_op_t * ops[], u32 n_ops, int status) +{ + while (n_ops--) + { + ops[0]->status = status; + ops++; + } +} + static_always_inline u32 vnet_crypto_process_ops_call_handler (vlib_main_t * vm, vnet_crypto_main_t * cm, vnet_crypto_op_id_t opt, - vnet_crypto_op_t * ops[], u32 n_ops) + vnet_crypto_op_t * ops[], + vnet_crypto_op_chunk_t * chunks, + u32 n_ops) { + u32 rv = 0; if (n_ops == 0) return 0; - if (cm->ops_handlers[opt] == 0) + if (chunks) { - while (n_ops--) - { - ops[0]->status = VNET_CRYPTO_OP_STATUS_FAIL_NO_HANDLER; - ops++; - } - return 0; - } - return (cm->ops_handlers[opt]) (vm, ops, n_ops); + if (cm->chained_ops_handlers[opt] == 0) + crypto_set_op_status (ops, n_ops, + VNET_CRYPTO_OP_STATUS_FAIL_NO_HANDLER); + else + rv = (cm->chained_ops_handlers[opt]) (vm, ops, chunks, n_ops); + } + else + { + if (cm->ops_handlers[opt] == 0) + crypto_set_op_status (ops, n_ops, + VNET_CRYPTO_OP_STATUS_FAIL_NO_HANDLER); + else + rv = (cm->ops_handlers[opt]) (vm, ops, n_ops); + } + return rv; } -u32 -vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops) +static_always_inline u32 +vnet_crypto_process_ops_inline (vlib_main_t * vm, vnet_crypto_op_t ops[], + vnet_crypto_op_chunk_t * chunks, u32 n_ops) { vnet_crypto_main_t *cm = &crypto_main; const int op_q_size = VLIB_FRAME_SIZE; @@ -61,7 +82,8 @@ vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops) if (current_op_type != opt || n_op_queue >= op_q_size) { rv += vnet_crypto_process_ops_call_handler (vm, cm, current_op_type, - op_queue, n_op_queue); + op_queue, chunks, + n_op_queue); n_op_queue = 0; current_op_type = opt; } @@ -70,11 +92,24 @@ vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops) } rv += vnet_crypto_process_ops_call_handler (vm, cm, current_op_type, - op_queue, n_op_queue); + op_queue, chunks, n_op_queue); return rv; } u32 +vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops) +{ + return vnet_crypto_process_ops_inline (vm, ops, 0, n_ops); +} + +u32 +vnet_crypto_process_chained_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], + vnet_crypto_op_chunk_t * chunks, u32 n_ops) +{ + return vnet_crypto_process_ops_inline (vm, ops, chunks, n_ops); +} + +u32 vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio, char *desc) { @@ -91,13 +126,40 @@ vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio, return p - cm->engines; } +static_always_inline void +crypto_set_active_engine (vnet_crypto_op_data_t * od, + vnet_crypto_op_id_t id, u32 ei, + crypto_op_class_type_t oct) +{ + vnet_crypto_main_t *cm = &crypto_main; + vnet_crypto_engine_t *ce = vec_elt_at_index (cm->engines, ei); + + if (oct == CRYPTO_OP_BOTH || oct == CRYPTO_OP_CHAINED) + { + if (ce->chained_ops_handlers[id]) + { + od->active_engine_index_chained = ei; + cm->chained_ops_handlers[id] = ce->chained_ops_handlers[id]; + } + } + + if (oct == CRYPTO_OP_BOTH || oct == CRYPTO_OP_SIMPLE) + { + if (ce->ops_handlers[id]) + { + od->active_engine_index_simple = ei; + cm->ops_handlers[id] = ce->ops_handlers[id]; + } + } +} + int -vnet_crypto_set_handler (char *alg_name, char *engine) +vnet_crypto_set_handler2 (char *alg_name, char *engine, + crypto_op_class_type_t oct) { uword *p; vnet_crypto_main_t *cm = &crypto_main; vnet_crypto_alg_data_t *ad; - vnet_crypto_engine_t *ce; int i; p = hash_get_mem (cm->alg_index_by_name, alg_name); @@ -110,20 +172,15 @@ vnet_crypto_set_handler (char *alg_name, char *engine) if (!p) return -1; - ce = vec_elt_at_index (cm->engines, p[0]); - - for (i = 0; i < VNET_CRYPTO_OP_N_TYPES; i++) + for (i = 0; i < VNET_CRYPTO_OP_N_TYPES; i += 2) { vnet_crypto_op_data_t *od; vnet_crypto_op_id_t id = ad->op_by_type[i]; if (id == 0) continue; + od = cm->opt_data + id; - if (ce->ops_handlers[id]) - { - od->active_engine_index = p[0]; - cm->ops_handlers[id] = ce->ops_handlers[id]; - } + crypto_set_active_engine (od, id, p[0], oct); } return 0; @@ -138,34 +195,78 @@ vnet_crypto_is_set_handler (vnet_crypto_alg_t alg) } void -vnet_crypto_register_ops_handler (vlib_main_t * vm, u32 engine_index, - vnet_crypto_op_id_t opt, - vnet_crypto_ops_handler_t * fn) +vnet_crypto_register_ops_handler_inline (vlib_main_t * vm, u32 engine_index, + vnet_crypto_op_id_t opt, + vnet_crypto_ops_handler_t * fn, + vnet_crypto_chained_ops_handler_t * + cfn) { vnet_crypto_main_t *cm = &crypto_main; vnet_crypto_engine_t *ae, *e = vec_elt_at_index (cm->engines, engine_index); vnet_crypto_op_data_t *otd = cm->opt_data + opt; vec_validate_aligned (cm->ops_handlers, VNET_CRYPTO_N_OP_IDS - 1, CLIB_CACHE_LINE_BYTES); - e->ops_handlers[opt] = fn; + vec_validate_aligned (cm->chained_ops_handlers, VNET_CRYPTO_N_OP_IDS - 1, + CLIB_CACHE_LINE_BYTES); - if (otd->active_engine_index == ~0) + if (fn) { - otd->active_engine_index = engine_index; - cm->ops_handlers[opt] = fn; - return; + e->ops_handlers[opt] = fn; + if (otd->active_engine_index_simple == ~0) + { + otd->active_engine_index_simple = engine_index; + cm->ops_handlers[opt] = fn; + } + + ae = vec_elt_at_index (cm->engines, otd->active_engine_index_simple); + if (ae->priority < e->priority) + crypto_set_active_engine (otd, opt, engine_index, CRYPTO_OP_SIMPLE); } - ae = vec_elt_at_index (cm->engines, otd->active_engine_index); - if (ae->priority < e->priority) + + if (cfn) { - otd->active_engine_index = engine_index; - cm->ops_handlers[opt] = fn; + e->chained_ops_handlers[opt] = cfn; + if (otd->active_engine_index_chained == ~0) + { + otd->active_engine_index_chained = engine_index; + cm->chained_ops_handlers[opt] = cfn; + } + + ae = vec_elt_at_index (cm->engines, otd->active_engine_index_chained); + if (ae->priority < e->priority) + crypto_set_active_engine (otd, opt, engine_index, CRYPTO_OP_CHAINED); } return; } void +vnet_crypto_register_ops_handler (vlib_main_t * vm, u32 engine_index, + vnet_crypto_op_id_t opt, + vnet_crypto_ops_handler_t * fn) +{ + vnet_crypto_register_ops_handler_inline (vm, engine_index, opt, fn, 0); +} + +void +vnet_crypto_register_chained_ops_handler (vlib_main_t * vm, u32 engine_index, + vnet_crypto_op_id_t opt, + vnet_crypto_chained_ops_handler_t * + fn) +{ + vnet_crypto_register_ops_handler_inline (vm, engine_index, opt, 0, fn); +} + +void +vnet_crypto_register_ops_handlers (vlib_main_t * vm, u32 engine_index, + vnet_crypto_op_id_t opt, + vnet_crypto_ops_handler_t * fn, + vnet_crypto_chained_ops_handler_t * cfn) +{ + vnet_crypto_register_ops_handler_inline (vm, engine_index, opt, fn, cfn); +} + +void vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index, vnet_crypto_key_handler_t * key_handler) { @@ -253,10 +354,13 @@ vnet_crypto_init_cipher_data (vnet_crypto_alg_t alg, vnet_crypto_op_id_t eid, { vnet_crypto_op_type_t eopt, dopt; vnet_crypto_main_t *cm = &crypto_main; + cm->algs[alg].name = name; cm->opt_data[eid].alg = cm->opt_data[did].alg = alg; - cm->opt_data[eid].active_engine_index = ~0; - cm->opt_data[did].active_engine_index = ~0; + cm->opt_data[eid].active_engine_index_simple = ~0; + cm->opt_data[did].active_engine_index_simple = ~0; + cm->opt_data[eid].active_engine_index_chained = ~0; + cm->opt_data[did].active_engine_index_chained = ~0; if (is_aead) { eopt = VNET_CRYPTO_OP_TYPE_AEAD_ENCRYPT; @@ -282,7 +386,8 @@ vnet_crypto_init_hmac_data (vnet_crypto_alg_t alg, cm->algs[alg].name = name; cm->algs[alg].op_by_type[VNET_CRYPTO_OP_TYPE_HMAC] = id; cm->opt_data[id].alg = alg; - cm->opt_data[id].active_engine_index = ~0; + cm->opt_data[id].active_engine_index_simple = ~0; + cm->opt_data[id].active_engine_index_chained = ~0; cm->opt_data[id].type = VNET_CRYPTO_OP_TYPE_HMAC; hash_set_mem (cm->alg_index_by_name, name, alg); } diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h index 626e71d5030..f89ecf9f3d5 100644 --- a/src/vnet/crypto/crypto.h +++ b/src/vnet/crypto/crypto.h @@ -116,6 +116,13 @@ typedef enum } vnet_crypto_op_id_t; /* *INDENT-ON* */ +typedef enum +{ + CRYPTO_OP_SIMPLE, + CRYPTO_OP_CHAINED, + CRYPTO_OP_BOTH, +} crypto_op_class_type_t; + typedef struct { char *name; @@ -124,30 +131,67 @@ typedef struct typedef struct { + u8 *src; + u8 *dst; + u32 len; +} vnet_crypto_op_chunk_t; + +typedef struct +{ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); + uword user_data; vnet_crypto_op_id_t op:16; vnet_crypto_op_status_t status:8; u8 flags; #define VNET_CRYPTO_OP_FLAG_INIT_IV (1 << 0) #define VNET_CRYPTO_OP_FLAG_HMAC_CHECK (1 << 1) - u32 key_index; - u32 len; +#define VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS (1 << 2) + + union + { + u8 digest_len; + u8 tag_len; + }; u16 aad_len; - u8 digest_len, tag_len; + + union + { + struct + { + u8 *src; + u8 *dst; + }; + + /* valid if VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS is set */ + u16 n_chunks; + }; + + union + { + u32 len; + /* valid if VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS is set */ + u32 chunk_index; + }; + + u32 key_index; u8 *iv; - u8 *src; - u8 *dst; u8 *aad; - u8 *tag; - u8 *digest; - uword user_data; + + union + { + u8 *tag; + u8 *digest; + }; } vnet_crypto_op_t; +STATIC_ASSERT_SIZEOF (vnet_crypto_op_t, CLIB_CACHE_LINE_BYTES); + typedef struct { vnet_crypto_op_type_t type; vnet_crypto_alg_t alg; - u32 active_engine_index; + u32 active_engine_index_simple; + u32 active_engine_index_chained; } vnet_crypto_op_data_t; typedef struct @@ -158,6 +202,11 @@ typedef struct typedef u32 vnet_crypto_key_index_t; +typedef u32 (vnet_crypto_chained_ops_handler_t) (vlib_main_t * vm, + vnet_crypto_op_t * ops[], + vnet_crypto_op_chunk_t * + chunks, u32 n_ops); + typedef u32 (vnet_crypto_ops_handler_t) (vlib_main_t * vm, vnet_crypto_op_t * ops[], u32 n_ops); @@ -171,6 +220,18 @@ u32 vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio, void vnet_crypto_register_ops_handler (vlib_main_t * vm, u32 engine_index, vnet_crypto_op_id_t opt, vnet_crypto_ops_handler_t * oph); + +void vnet_crypto_register_chained_ops_handler (vlib_main_t * vm, + u32 engine_index, + vnet_crypto_op_id_t opt, + vnet_crypto_chained_ops_handler_t + * oph); +void vnet_crypto_register_ops_handlers (vlib_main_t * vm, u32 engine_index, + vnet_crypto_op_id_t opt, + vnet_crypto_ops_handler_t * fn, + vnet_crypto_chained_ops_handler_t * + cfn); + void vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index, vnet_crypto_key_handler_t * keyh); @@ -181,6 +242,8 @@ typedef struct int priority; vnet_crypto_key_handler_t *key_op_handler; vnet_crypto_ops_handler_t *ops_handlers[VNET_CRYPTO_N_OP_IDS]; + vnet_crypto_chained_ops_handler_t + * chained_ops_handlers[VNET_CRYPTO_N_OP_IDS]; } vnet_crypto_engine_t; typedef struct @@ -188,6 +251,7 @@ typedef struct vnet_crypto_alg_data_t *algs; vnet_crypto_thread_t *threads; vnet_crypto_ops_handler_t **ops_handlers; + vnet_crypto_chained_ops_handler_t **chained_ops_handlers; vnet_crypto_op_data_t opt_data[VNET_CRYPTO_N_OP_IDS]; vnet_crypto_engine_t *engines; vnet_crypto_key_t *keys; @@ -200,10 +264,14 @@ extern vnet_crypto_main_t crypto_main; u32 vnet_crypto_submit_ops (vlib_main_t * vm, vnet_crypto_op_t ** jobs, u32 n_jobs); +u32 vnet_crypto_process_chained_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], + vnet_crypto_op_chunk_t * chunks, + u32 n_ops); u32 vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[], u32 n_ops); -int vnet_crypto_set_handler (char *ops_handler_name, char *engine); +int vnet_crypto_set_handler2 (char *ops_handler_name, char *engine, + crypto_op_class_type_t oct); int vnet_crypto_is_set_handler (vnet_crypto_alg_t alg); u32 vnet_crypto_key_add (vlib_main_t * vm, vnet_crypto_alg_t alg, @@ -225,6 +293,7 @@ vnet_crypto_op_init (vnet_crypto_op_t * op, vnet_crypto_op_id_t type) op->op = type; op->flags = 0; op->key_index = ~0; + op->n_chunks = 0; } static_always_inline vnet_crypto_op_type_t @@ -243,6 +312,12 @@ vnet_crypto_get_key (vnet_crypto_key_index_t index) return vec_elt_at_index (cm->keys, index); } +static_always_inline int +vnet_crypto_set_handler (char *alg_name, char *engine) +{ + return vnet_crypto_set_handler2 (alg_name, engine, CRYPTO_OP_BOTH); +} + #endif /* included_vnet_crypto_crypto_h */ /* diff --git a/src/vnet/crypto/format.c b/src/vnet/crypto/format.c index 715941e0ee7..3210ab9eac0 100644 --- a/src/vnet/crypto/format.c +++ b/src/vnet/crypto/format.c @@ -54,7 +54,7 @@ format_vnet_crypto_op (u8 * s, va_list * args) vnet_crypto_op_id_t op = va_arg (*args, int); // vnet_crypto_op_id_t); vnet_crypto_op_data_t *otd = cm->opt_data + op; - return format (s, "%U-%U", format_vnet_crypto_op_type, otd->type, + return format (s, "%U-%U", format_vnet_crypto_op_type, otd->type, 0, format_vnet_crypto_alg, otd->alg); } @@ -62,6 +62,7 @@ u8 * format_vnet_crypto_op_type (u8 * s, va_list * args) { vnet_crypto_op_type_t opt = va_arg (*args, vnet_crypto_op_type_t); + int is_chained = va_arg (*args, int); char *strings[] = { #define _(n, s) [VNET_CRYPTO_OP_TYPE_##n] = s, foreach_crypto_op_type @@ -71,7 +72,7 @@ format_vnet_crypto_op_type (u8 * s, va_list * args) if (opt >= VNET_CRYPTO_OP_N_TYPES) return format (s, "unknown"); - return format (s, "%s", strings[opt]); + return format (s, "%s%s", strings[opt], is_chained ? "-chained" : ""); } u8 * diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c index 56724c00239..f29dacb3dc7 100644 --- a/src/vnet/ipsec/esp_decrypt.c +++ b/src/vnet/ipsec/esp_decrypt.c @@ -50,7 +50,7 @@ typedef enum _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \ _(REPLAY, "SA replayed packet") \ _(RUNT, "undersized packet") \ - _(CHAINED_BUFFER, "chained buffers (packet dropped)") \ + _(NO_BUFFERS, "no buffers (packet dropped)") \ _(OVERSIZED_HEADER, "buffer with oversized header (dropped)") \ _(NO_TAIL_SPACE, "no enough buffer tail space (dropped)") \ _(TUN_NO_PROTO, "no tunnel protocol") \ @@ -114,12 +114,130 @@ typedef struct i16 current_data; i16 current_length; u16 hdr_sz; + vlib_buffer_t *lb; + u32 free_buffer_index; + u8 icv_removed; } esp_decrypt_packet_data_t; -STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 3 * sizeof (u64)); +STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 5 * sizeof (u64)); #define ESP_ENCRYPT_PD_F_FD_TRANSPORT (1 << 2) +static_always_inline void +esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node, + vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts, + int e) +{ + vnet_crypto_op_t *op = ops; + u32 n_fail, n_ops = vec_len (ops); + + if (n_ops == 0) + return; + + n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops); + + while (n_fail) + { + ASSERT (op - ops < n_ops); + if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) + { + u32 err, bi = op->user_data; + if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC) + err = e; + else + err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; + b[bi]->error = node->errors[err]; + nexts[bi] = ESP_DECRYPT_NEXT_DROP; + n_fail--; + } + op++; + } +} + +static_always_inline void +esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node, + vnet_crypto_op_t * ops, vlib_buffer_t * b[], + u16 * nexts, vnet_crypto_op_chunk_t * chunks, int e) +{ + + vnet_crypto_op_t *op = ops; + u32 n_fail, n_ops = vec_len (ops); + + if (n_ops == 0) + return; + + n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops); + + while (n_fail) + { + ASSERT (op - ops < n_ops); + if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) + { + u32 err, bi = op->user_data; + if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC) + err = e; + else + err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; + b[bi]->error = node->errors[err]; + nexts[bi] = ESP_DECRYPT_NEXT_DROP; + n_fail--; + } + op++; + } +} + +always_inline void +esp_remove_tail (vlib_main_t * vm, vlib_buffer_t * b, vlib_buffer_t * last, + u16 tail) +{ + vlib_buffer_t *before_last = b; + + if (last->current_length > tail) + { + last->current_length -= tail; + return; + } + ASSERT (b->flags & VLIB_BUFFER_NEXT_PRESENT); + + while (b->flags & VLIB_BUFFER_NEXT_PRESENT) + { + before_last = b; + b = vlib_get_buffer (vm, b->next_buffer); + } + before_last->current_length -= tail - last->current_length; + vlib_buffer_free_one (vm, before_last->next_buffer); + before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT; +} + +/* ICV is splitted in last two buffers so move it to the last buffer and + return pointer to it */ +static_always_inline u8 * +esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first, + esp_decrypt_packet_data_t * pd, u16 icv_sz) +{ + vlib_buffer_t *before_last, *bp; + u16 last_sz = pd->lb->current_length; + u16 first_sz = icv_sz - last_sz; + + bp = before_last = first; + while (bp->flags & VLIB_BUFFER_NEXT_PRESENT) + { + before_last = bp; + bp = vlib_get_buffer (vm, bp->next_buffer); + } + + u8 *lb_curr = vlib_buffer_get_current (pd->lb); + memmove (lb_curr + first_sz, lb_curr, last_sz); + clib_memcpy_fast (lb_curr, vlib_buffer_get_tail (before_last) - first_sz, + first_sz); + before_last->current_length -= first_sz; + pd->lb = before_last; + pd->icv_removed = 1; + pd->free_buffer_index = before_last->next_buffer; + before_last->flags &= ~VLIB_BUFFER_NEXT_PRESENT; + return lb_curr; +} + always_inline uword esp_decrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_frame_t * from_frame, @@ -131,7 +249,7 @@ esp_decrypt_inline (vlib_main_t * vm, u16 len; ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index); u32 *from = vlib_frame_vector_args (from_frame); - u32 n, n_left = from_frame->n_vectors; + u32 n_left = from_frame->n_vectors; vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs; u16 nexts[VLIB_FRAME_SIZE], *next = nexts; esp_decrypt_packet_data_t pkt_data[VLIB_FRAME_SIZE], *pd = pkt_data; @@ -139,10 +257,16 @@ esp_decrypt_inline (vlib_main_t * vm, u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0; const u8 esp_sz = sizeof (esp_header_t); ipsec_sa_t *sa0 = 0; + vnet_crypto_op_chunk_t *ch; + vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops; + vnet_crypto_op_t **integ_ops = &ptd->integ_ops; vlib_get_buffers (vm, from, b, n_left); vec_reset_length (ptd->crypto_ops); vec_reset_length (ptd->integ_ops); + vec_reset_length (ptd->chained_crypto_ops); + vec_reset_length (ptd->chained_integ_ops); + vec_reset_length (ptd->chunks); clib_memset_u16 (nexts, -1, n_left); while (n_left > 0) @@ -159,9 +283,10 @@ esp_decrypt_inline (vlib_main_t * vm, CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD); } - if (vlib_buffer_chain_linearize (vm, b[0]) != 1) + u32 n_bufs = vlib_buffer_chain_linearize (vm, b[0]); + if (n_bufs == 0) { - b[0]->error = node->errors[ESP_DECRYPT_ERROR_CHAINED_BUFFER]; + b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_BUFFERS]; next[0] = ESP_DECRYPT_NEXT_DROP; goto next; } @@ -205,10 +330,26 @@ esp_decrypt_inline (vlib_main_t * vm, pd->hdr_sz = pd->current_data - vnet_buffer (b[0])->l3_hdr_offset; payload = b[0]->data + pd->current_data; pd->seq = clib_host_to_net_u32 (((esp_header_t *) payload)->seq); + pd->free_buffer_index = 0; + pd->icv_removed = 0; + + pd->lb = b[0]; + if (n_bufs > 1) + { + /* find last buffer in the chain */ + while (pd->lb->flags & VLIB_BUFFER_NEXT_PRESENT) + pd->lb = vlib_get_buffer (vm, pd->lb->next_buffer); + + crypto_ops = &ptd->chained_crypto_ops; + integ_ops = &ptd->chained_integ_ops; + } + pd->current_length = b[0]->current_length; /* we need 4 extra bytes for HMAC calculation when ESN are used */ - if (ipsec_sa_is_set_USE_ESN (sa0) && pd->icv_sz && - (pd->current_data + pd->current_length + 4 > buffer_data_size)) + /* Chained buffers can process ESN as a separate chunk */ + if (pd->lb == b[0] && ipsec_sa_is_set_USE_ESN (sa0) && cpd.icv_sz && + (pd->lb->current_data + pd->lb->current_length + 4 + > buffer_data_size)) { b[0]->error = node->errors[ESP_DECRYPT_ERROR_NO_TAIL_SPACE]; next[0] = ESP_DECRYPT_NEXT_DROP; @@ -232,12 +373,12 @@ esp_decrypt_inline (vlib_main_t * vm, len = pd->current_length - cpd.icv_sz; current_sa_pkts += 1; - current_sa_bytes += pd->current_length; + current_sa_bytes += vlib_buffer_length_in_chain (vm, b[0]); if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE)) { vnet_crypto_op_t *op; - vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES); + vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); vnet_crypto_op_init (op, sa0->integ_op_id); op->key_index = sa0->integ_key_index; @@ -247,7 +388,100 @@ esp_decrypt_inline (vlib_main_t * vm, op->digest = payload + len; op->digest_len = cpd.icv_sz; op->len = len; - if (ipsec_sa_is_set_USE_ESN (sa0)) + + if (pd->lb != b[0]) + { + /* buffer is chained */ + vlib_buffer_t *cb = b[0]; + op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; + op->chunk_index = vec_len (ptd->chunks); + + if (pd->lb->current_length < cpd.icv_sz) + op->digest = esp_move_icv (vm, b[0], pd, cpd.icv_sz); + else + op->digest = vlib_buffer_get_tail (pd->lb) - cpd.icv_sz; + + vec_add2 (ptd->chunks, ch, 1); + ch->len = pd->current_length; + ch->src = payload; + cb = vlib_get_buffer (vm, cb->next_buffer); + op->n_chunks = 1; + while (1) + { + vec_add2 (ptd->chunks, ch, 1); + op->n_chunks += 1; + ch->src = vlib_buffer_get_current (cb); + if (pd->lb == cb) + { + if (pd->icv_removed) + ch->len = cb->current_length; + else + ch->len = cb->current_length - cpd.icv_sz; + if (ipsec_sa_is_set_USE_ESN (sa0)) + { + u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi); + u8 tmp[ESP_MAX_ICV_SIZE], sz = sizeof (sa0->seq_hi); + u8 *esn; + vlib_buffer_t *tmp_b; + u16 space_left = vlib_buffer_space_left_at_end + (vm, pd->lb); + if (space_left < sz) + { + if (pd->icv_removed) + { + /* use pre-data area from the last bufer + that was removed from the chain */ + tmp_b = + vlib_get_buffer (vm, + pd->free_buffer_index); + esn = tmp_b->data - sz; + } + else + { + /* no space, need to allocate new buffer */ + u32 tmp_bi = 0; + vlib_buffer_alloc (vm, &tmp_bi, 1); + tmp_b = vlib_get_buffer (vm, tmp_bi); + esn = tmp_b->data; + pd->free_buffer_index = tmp_bi; + } + clib_memcpy_fast (esn, &seq_hi, sz); + + vec_add2 (ptd->chunks, ch, 1); + op->n_chunks += 1; + ch->src = esn; + ch->len = sz; + } + else + { + if (pd->icv_removed) + { + clib_memcpy_fast (vlib_buffer_get_tail + (pd->lb), &seq_hi, sz); + } + else + { + clib_memcpy_fast (tmp, op->digest, + ESP_MAX_ICV_SIZE); + clib_memcpy_fast (op->digest, &seq_hi, sz); + clib_memcpy_fast (op->digest + sz, tmp, + ESP_MAX_ICV_SIZE); + op->digest += sz; + } + ch->len += sz; + } + } + } + else + ch->len = cb->current_length; + + if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT)) + break; + + cb = vlib_get_buffer (vm, cb->next_buffer); + } + } + else if (ipsec_sa_is_set_USE_ESN (sa0)) { /* shift ICV by 4 bytes to insert ESN */ u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi); @@ -263,10 +497,10 @@ esp_decrypt_inline (vlib_main_t * vm, payload += esp_sz; len -= esp_sz; - if (sa0->crypto_enc_op_id != VNET_CRYPTO_OP_NONE) + if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE) { vnet_crypto_op_t *op; - vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES); + vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); vnet_crypto_op_init (op, sa0->crypto_dec_op_id); op->key_index = sa0->crypto_key_index; op->iv = payload; @@ -303,6 +537,61 @@ esp_decrypt_inline (vlib_main_t * vm, op->src = op->dst = payload += cpd.iv_sz; op->len = len - cpd.iv_sz; op->user_data = b - bufs; + + if (pd->lb != b[0]) + { + /* buffer is chained */ + vlib_buffer_t *cb = b[0]; + op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; + op->chunk_index = vec_len (ptd->chunks); + vec_add2 (ptd->chunks, ch, 1); + ch->len = len - cpd.iv_sz + cpd.icv_sz; + ch->src = ch->dst = payload; + cb = vlib_get_buffer (vm, cb->next_buffer); + op->n_chunks = 1; + + while (1) + { + vec_add2 (ptd->chunks, ch, 1); + op->n_chunks += 1; + ch->src = ch->dst = vlib_buffer_get_current (cb); + if (pd->lb == cb) + { + if (ipsec_sa_is_set_IS_AEAD (sa0)) + { + if (pd->lb->current_length < cpd.icv_sz) + { + op->tag = + esp_move_icv (vm, b[0], pd, cpd.icv_sz); + + /* this chunk does not contain crypto data */ + op->n_chunks -= 1; + + /* and fix previous chunk's length as it might have + been changed */ + ASSERT (op->n_chunks > 0); + ch[-1].len = pd->lb->current_length; + break; + } + else + op->tag = + vlib_buffer_get_tail (pd->lb) - cpd.icv_sz; + } + + if (pd->icv_removed) + ch->len = cb->current_length; + else + ch->len = cb->current_length - cpd.icv_sz; + } + else + ch->len = cb->current_length; + + if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT)) + break; + + cb = vlib_get_buffer (vm, cb->next_buffer); + } + } } /* next */ @@ -318,52 +607,15 @@ esp_decrypt_inline (vlib_main_t * vm, current_sa_index, current_sa_pkts, current_sa_bytes); - if ((n = vec_len (ptd->integ_ops))) - { - vnet_crypto_op_t *op = ptd->integ_ops; - n -= vnet_crypto_process_ops (vm, op, n); - while (n) - { - ASSERT (op - ptd->integ_ops < vec_len (ptd->integ_ops)); - if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) - { - u32 err, bi = op->user_data; - if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC) - err = ESP_DECRYPT_ERROR_INTEG_ERROR; - else - err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; - bufs[bi]->error = node->errors[err]; - nexts[bi] = ESP_DECRYPT_NEXT_DROP; - n--; - } - op++; - } - } - if ((n = vec_len (ptd->crypto_ops))) - { - vnet_crypto_op_t *op = ptd->crypto_ops; - n -= vnet_crypto_process_ops (vm, op, n); - while (n) - { - ASSERT (op - ptd->crypto_ops < vec_len (ptd->crypto_ops)); - if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) - { - u32 err, bi; - - bi = op->user_data; - - if (op->status == VNET_CRYPTO_OP_STATUS_FAIL_BAD_HMAC) - err = ESP_DECRYPT_ERROR_DECRYPTION_FAILED; - else - err = ESP_DECRYPT_ERROR_CRYPTO_ENGINE_ERROR; + esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts, + ESP_DECRYPT_ERROR_INTEG_ERROR); + esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts, + ptd->chunks, ESP_DECRYPT_ERROR_INTEG_ERROR); - bufs[bi]->error = node->errors[err]; - nexts[bi] = ESP_DECRYPT_NEXT_DROP; - n--; - } - op++; - } - } + esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts, + ESP_DECRYPT_ERROR_DECRYPTION_FAILED); + esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts, + ptd->chunks, ESP_DECRYPT_ERROR_DECRYPTION_FAILED); /* Post decryption ronud - adjust packet data start and length and next node */ @@ -430,11 +682,51 @@ esp_decrypt_inline (vlib_main_t * vm, ipsec_sa_anti_replay_advance (sa0, pd->seq); - esp_footer_t *f = (esp_footer_t *) (b[0]->data + pd->current_data + - pd->current_length - sizeof (*f) - - pd->icv_sz); + u8 pad_length = 0, next_header = 0; + u16 icv_sz = pd->icv_removed ? 0 : pd->icv_sz; + + if (pd->free_buffer_index) + vlib_buffer_free_one (vm, pd->free_buffer_index); + + if (pd->lb->current_length < sizeof (esp_footer_t) + icv_sz) + { + /* esp footer is either splitted in two buffers or in the before + * last buffer */ + + vlib_buffer_t *before_last = b[0], *bp = b[0]; + while (bp->flags & VLIB_BUFFER_NEXT_PRESENT) + { + before_last = bp; + bp = vlib_get_buffer (vm, bp->next_buffer); + } + u8 *bt = vlib_buffer_get_tail (before_last); + + if (pd->lb->current_length == icv_sz) + { + esp_footer_t *f = (esp_footer_t *) (bt - sizeof (*f)); + pad_length = f->pad_length; + next_header = f->next_header; + } + else + { + pad_length = (bt - 1)[0]; + next_header = ((u8 *) vlib_buffer_get_current (pd->lb))[0]; + } + } + else + { + esp_footer_t *f = + (esp_footer_t *) (pd->lb->data + pd->lb->current_data + + pd->lb->current_length - sizeof (esp_footer_t) - + icv_sz); + pad_length = f->pad_length; + next_header = f->next_header; + } + u16 adv = pd->iv_sz + esp_sz; - u16 tail = sizeof (esp_footer_t) + f->pad_length + pd->icv_sz; + u16 tail = sizeof (esp_footer_t) + pad_length + icv_sz; + u16 tail_orig = sizeof (esp_footer_t) + pad_length + pd->icv_sz; + b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */ { @@ -450,15 +742,16 @@ esp_decrypt_inline (vlib_main_t * vm, clib_memcpy_le64 (ip, old_ip, ip_hdr_sz); b[0]->current_data = pd->current_data + adv - ip_hdr_sz; - b[0]->current_length = pd->current_length + ip_hdr_sz - tail - adv; + b[0]->current_length = pd->current_length + ip_hdr_sz - adv; + esp_remove_tail (vm, b[0], pd->lb, tail); if (is_ip6) { ip6_header_t *ip6 = (ip6_header_t *) ip; u16 len = clib_net_to_host_u16 (ip6->payload_length); - len -= adv + tail; + len -= adv + tail_orig; ip6->payload_length = clib_host_to_net_u16 (len); - ip6->protocol = f->next_header; + ip6->protocol = next_header; next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; } else @@ -466,34 +759,36 @@ esp_decrypt_inline (vlib_main_t * vm, ip4_header_t *ip4 = (ip4_header_t *) ip; ip_csum_t sum = ip4->checksum; u16 len = clib_net_to_host_u16 (ip4->length); - len = clib_host_to_net_u16 (len - adv - tail - udp_sz); - sum = ip_csum_update (sum, ip4->protocol, f->next_header, + len = clib_host_to_net_u16 (len - adv - tail_orig - udp_sz); + sum = ip_csum_update (sum, ip4->protocol, next_header, ip4_header_t, protocol); sum = ip_csum_update (sum, ip4->length, len, ip4_header_t, length); ip4->checksum = ip_csum_fold (sum); - ip4->protocol = f->next_header; + ip4->protocol = next_header; ip4->length = len; next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; } } else { - if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP)) + if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP)) { next[0] = ESP_DECRYPT_NEXT_IP4_INPUT; b[0]->current_data = pd->current_data + adv; - b[0]->current_length = pd->current_length - adv - tail; + b[0]->current_length = pd->current_length - adv; + esp_remove_tail (vm, b[0], pd->lb, tail); } - else if (f->next_header == IP_PROTOCOL_IPV6) + else if (next_header == IP_PROTOCOL_IPV6) { next[0] = ESP_DECRYPT_NEXT_IP6_INPUT; b[0]->current_data = pd->current_data + adv; - b[0]->current_length = pd->current_length - adv - tail; + b[0]->current_length = pd->current_length - adv; + esp_remove_tail (vm, b[0], pd->lb, tail); } else { - if (is_tun && f->next_header == IP_PROTOCOL_GRE) + if (is_tun && next_header == IP_PROTOCOL_GRE) { gre_header_t *gre; @@ -555,7 +850,7 @@ esp_decrypt_inline (vlib_main_t * vm, itp = ipsec_tun_protect_get (vnet_buffer (b[0])->ipsec.protect_index); - if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP)) + if (PREDICT_TRUE (next_header == IP_PROTOCOL_IP_IN_IP)) { const ip4_header_t *ip4; @@ -571,7 +866,7 @@ esp_decrypt_inline (vlib_main_t * vm, node->errors[ESP_DECRYPT_ERROR_TUN_NO_PROTO]; } } - else if (f->next_header == IP_PROTOCOL_IPV6) + else if (next_header == IP_PROTOCOL_IPV6) { const ip6_header_t *ip6; @@ -618,7 +913,6 @@ esp_decrypt_inline (vlib_main_t * vm, vlib_buffer_enqueue_to_next (vm, node, from, nexts, n_left); - b = bufs; return n_left; } diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index 3c2fdf4ec3c..2c4da5dafa8 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -42,7 +42,7 @@ typedef enum _(RX_PKTS, "ESP pkts received") \ _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \ _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \ - _(CHAINED_BUFFER, "chained buffers (packet dropped)") \ + _(NO_BUFFERS, "no buffers (packet dropped)") \ _(NO_TRAILER_SPACE, "no trailer space (packet dropped)") typedef enum @@ -92,21 +92,23 @@ format_esp_encrypt_trace (u8 * s, va_list * args) static_always_inline u8 * esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz, u16 * next, vlib_node_runtime_t * node, - u16 buffer_data_size) + u16 buffer_data_size, uword total_len) { static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x00, 0x00, }; - u16 min_length = b->current_length + sizeof (esp_footer_t); + u16 min_length = total_len + sizeof (esp_footer_t); u16 new_length = round_pow2 (min_length, block_size); u8 pad_bytes = new_length - min_length; esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (b) + - new_length - sizeof (esp_footer_t)); + b->current_length + pad_bytes); + u16 tail_sz = sizeof (esp_footer_t) + pad_bytes + icv_sz; - if (b->current_data + new_length + icv_sz > buffer_data_size) + if (b->current_data + tail_sz > buffer_data_size) { + // TODO alloc new buffer b->error = node->errors[ESP_ENCRYPT_ERROR_NO_TRAILER_SPACE]; next[0] = ESP_ENCRYPT_NEXT_DROP; return 0; @@ -120,7 +122,7 @@ esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz, } f->pad_length = pad_bytes; - b->current_length = new_length + icv_sz; + b->current_length += tail_sz; return &f->next_header; } @@ -204,6 +206,34 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr) } static_always_inline void +esp_process_chained_ops (vlib_main_t * vm, vlib_node_runtime_t * node, + vnet_crypto_op_t * ops, vlib_buffer_t * b[], + u16 * nexts, vnet_crypto_op_chunk_t * chunks) +{ + u32 n_fail, n_ops = vec_len (ops); + vnet_crypto_op_t *op = ops; + + if (n_ops == 0) + return; + + n_fail = n_ops - vnet_crypto_process_chained_ops (vm, op, chunks, n_ops); + + while (n_fail) + { + ASSERT (op - ops < n_ops); + + if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED) + { + u32 bi = op->user_data; + b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR]; + nexts[bi] = ESP_ENCRYPT_NEXT_DROP; + n_fail--; + } + op++; + } +} + +static_always_inline void esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts) { @@ -255,10 +285,17 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u32 current_sa_bytes = 0, spi = 0; u8 block_sz = 0, iv_sz = 0, icv_sz = 0; ipsec_sa_t *sa0 = 0; + vnet_crypto_op_chunk_t *ch; + vlib_buffer_t *lb; + vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops; + vnet_crypto_op_t **integ_ops = &ptd->integ_ops; vlib_get_buffers (vm, from, b, n_left); vec_reset_length (ptd->crypto_ops); vec_reset_length (ptd->integ_ops); + vec_reset_length (ptd->chained_crypto_ops); + vec_reset_length (ptd->chained_integ_ops); + vec_reset_length (ptd->chunks); while (n_left > 0) { @@ -266,7 +303,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, dpo_id_t *dpo; esp_header_t *esp; u8 *payload, *next_hdr_ptr; - u16 payload_len; + u16 payload_len, payload_len_total, n_bufs; u32 hdr_len, config_index; if (n_left > 2) @@ -329,13 +366,30 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, goto trace; } - if (vlib_buffer_chain_linearize (vm, b[0]) != 1) + lb = b[0]; + n_bufs = vlib_buffer_chain_linearize (vm, b[0]); + if (n_bufs == 0) { - b[0]->error = node->errors[ESP_ENCRYPT_ERROR_CHAINED_BUFFER]; + b[0]->error = node->errors[ESP_ENCRYPT_ERROR_NO_BUFFERS]; next[0] = ESP_ENCRYPT_NEXT_DROP; goto trace; } + if (n_bufs > 1) + { + crypto_ops = &ptd->chained_crypto_ops; + integ_ops = &ptd->chained_integ_ops; + + /* find last buffer in the chain */ + while (lb->flags & VLIB_BUFFER_NEXT_PRESENT) + lb = vlib_get_buffer (vm, lb->next_buffer); + } + else + { + crypto_ops = &ptd->crypto_ops; + integ_ops = &ptd->integ_ops; + } + if (PREDICT_FALSE (esp_seq_advance (sa0))) { b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED]; @@ -349,12 +403,16 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (ipsec_sa_is_set_IS_TUNNEL (sa0)) { payload = vlib_buffer_get_current (b[0]); - next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz, + next_hdr_ptr = esp_add_footer_and_icv (lb, block_sz, icv_sz, next, node, - buffer_data_size); + buffer_data_size, + vlib_buffer_length_in_chain + (vm, b[0])); if (!next_hdr_ptr) goto trace; + b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; payload_len = b[0]->current_length; + payload_len_total = vlib_buffer_length_in_chain (vm, b[0]); /* ESP header */ hdr_len += sizeof (*esp); @@ -365,7 +423,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, { hdr_len += sizeof (udp_header_t); esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len), - payload_len + hdr_len); + payload_len_total + hdr_len); } /* IP header */ @@ -378,7 +436,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, clib_memcpy_fast (ip6, &sa0->ip6_hdr, len); *next_hdr_ptr = (is_ip6 ? IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); - len = payload_len + hdr_len - len; + len = payload_len_total + hdr_len - len; ip6->payload_length = clib_net_to_host_u16 (len); } else @@ -390,7 +448,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, clib_memcpy_fast (ip4, &sa0->ip4_hdr, len); *next_hdr_ptr = (is_ip6 ? IP_PROTOCOL_IPV6 : IP_PROTOCOL_IP_IN_IP); - len = payload_len + hdr_len; + len = payload_len_total + hdr_len; esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0); } @@ -414,12 +472,17 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_advance (b[0], ip_len); payload = vlib_buffer_get_current (b[0]); - next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz, + next_hdr_ptr = esp_add_footer_and_icv (lb, block_sz, icv_sz, next, node, - buffer_data_size); + buffer_data_size, + vlib_buffer_length_in_chain + (vm, b[0])); if (!next_hdr_ptr) goto trace; + + b[0]->flags &= ~VLIB_BUFFER_TOTAL_LENGTH_VALID; payload_len = b[0]->current_length; + payload_len_total = vlib_buffer_length_in_chain (vm, b[0]); /* ESP header */ hdr_len += sizeof (*esp); @@ -463,7 +526,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, ext_hdr->next_hdr = IP_PROTOCOL_IPSEC_ESP; } ip6->payload_length = - clib_host_to_net_u16 (payload_len + hdr_len - l2_len - + clib_host_to_net_u16 (payload_len_total + hdr_len - l2_len - sizeof (ip6_header_t)); } else @@ -471,7 +534,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, u16 len; ip4_header_t *ip4 = (ip4_header_t *) (old_ip_hdr); *next_hdr_ptr = ip4->protocol; - len = payload_len + hdr_len - l2_len; + len = payload_len_total + hdr_len - l2_len; if (udp) { esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1); @@ -493,8 +556,9 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (sa0->crypto_enc_op_id) { vnet_crypto_op_t *op; - vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES); + vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); vnet_crypto_op_init (op, sa0->crypto_enc_op_id); + op->src = op->dst = payload; op->key_index = sa0->crypto_key_index; op->len = payload_len - icv_sz; @@ -524,12 +588,42 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, op->iv = payload - iv_sz; op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV; } + + if (lb != b[0]) + { + /* is chained */ + vlib_buffer_t *cb = b[0]; + op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; + op->chunk_index = vec_len (ptd->chunks); + op->tag = vlib_buffer_get_tail (lb) - icv_sz; + vec_add2 (ptd->chunks, ch, 1); + ch->len = payload_len; + ch->src = ch->dst = payload; + cb = vlib_get_buffer (vm, cb->next_buffer); + op->n_chunks = 1; + + while (1) + { + vec_add2 (ptd->chunks, ch, 1); + op->n_chunks += 1; + if (lb == cb) + ch->len = cb->current_length - icv_sz; + else + ch->len = cb->current_length; + ch->src = ch->dst = vlib_buffer_get_current (cb); + + if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT)) + break; + + cb = vlib_get_buffer (vm, cb->next_buffer); + } + } } if (sa0->integ_op_id) { vnet_crypto_op_t *op; - vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES); + vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES); vnet_crypto_op_init (op, sa0->integ_op_id); op->src = payload - iv_sz - sizeof (esp_header_t); op->digest = payload + payload_len - icv_sz; @@ -537,7 +631,46 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, op->digest_len = icv_sz; op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t); op->user_data = b - bufs; - if (ipsec_sa_is_set_USE_ESN (sa0)) + + if (lb != b[0]) + { + /* is chained */ + op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS; + vlib_buffer_t *cb = b[0]; + op->chunk_index = vec_len (ptd->chunks); + op->digest = vlib_buffer_get_tail (lb) - icv_sz; + vec_add2 (ptd->chunks, ch, 1); + ch->len = payload_len + iv_sz + sizeof (esp_header_t); + ch->src = payload - iv_sz - sizeof (esp_header_t); + cb = vlib_get_buffer (vm, cb->next_buffer); + op->n_chunks = 1; + + while (1) + { + vec_add2 (ptd->chunks, ch, 1); + op->n_chunks += 1; + if (lb == cb) + { + ch->len = cb->current_length - icv_sz; + if (ipsec_sa_is_set_USE_ESN (sa0)) + { + u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi); + clib_memcpy_fast (op->digest, &seq_hi, + sizeof (seq_hi)); + ch->len += sizeof (seq_hi); + } + } + else + ch->len = cb->current_length; + ch->src = vlib_buffer_get_current (cb); + + if (!(cb->flags & VLIB_BUFFER_NEXT_PRESENT)) + break; + + cb = vlib_get_buffer (vm, cb->next_buffer); + } + } + else if (ipsec_sa_is_set_USE_ESN (sa0)) { u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi); clib_memcpy_fast (op->digest, &seq_hi, sizeof (seq_hi)); @@ -548,7 +681,7 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_buffer_advance (b[0], 0LL - hdr_len); current_sa_packets += 1; - current_sa_bytes += payload_len; + current_sa_bytes += payload_len_total; trace: if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED)) @@ -572,8 +705,14 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, vlib_increment_combined_counter (&ipsec_sa_counters, thread_index, current_sa_index, current_sa_packets, current_sa_bytes); + esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts); + esp_process_chained_ops (vm, node, ptd->chained_crypto_ops, bufs, nexts, + ptd->chunks); + esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts); + esp_process_chained_ops (vm, node, ptd->chained_integ_ops, bufs, nexts, + ptd->chunks); vlib_node_increment_counter (vm, node->node_index, ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors); diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 0c3e5778e6d..f1b7dafa9be 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -90,8 +90,12 @@ typedef struct typedef struct { + CLIB_CACHE_LINE_ALIGN_MARK (cacheline0); vnet_crypto_op_t *crypto_ops; vnet_crypto_op_t *integ_ops; + vnet_crypto_op_t *chained_crypto_ops; + vnet_crypto_op_t *chained_integ_ops; + vnet_crypto_op_chunk_t *chunks; } ipsec_per_thread_data_t; typedef struct |