aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet
diff options
context:
space:
mode:
Diffstat (limited to 'src/vnet')
-rw-r--r--src/vnet/CMakeLists.txt4
-rw-r--r--src/vnet/arp/arp_proxy.c3
-rw-r--r--src/vnet/crypto/cli.c319
-rw-r--r--src/vnet/crypto/crypto.c484
-rw-r--r--src/vnet/crypto/crypto.h320
-rw-r--r--src/vnet/crypto/crypto_api.c29
-rw-r--r--src/vnet/crypto/engine.h6
-rw-r--r--src/vnet/crypto/format.c6
-rw-r--r--src/vnet/crypto/main.c108
-rw-r--r--src/vnet/crypto/node.c9
-rw-r--r--src/vnet/dev/port.c14
-rw-r--r--src/vnet/ip/ip4_forward.c17
-rw-r--r--src/vnet/ipsec/ah_decrypt.c55
-rw-r--r--src/vnet/ipsec/ah_encrypt.c106
-rw-r--r--src/vnet/ipsec/esp.h35
-rw-r--r--src/vnet/ipsec/esp_decrypt.c216
-rw-r--r--src/vnet/ipsec/esp_encrypt.c234
-rw-r--r--src/vnet/ipsec/ipsec.c157
-rw-r--r--src/vnet/ipsec/ipsec.h31
-rw-r--r--src/vnet/ipsec/ipsec_api.c179
-rw-r--r--src/vnet/ipsec/ipsec_cli.c7
-rw-r--r--src/vnet/ipsec/ipsec_format.c33
-rw-r--r--src/vnet/ipsec/ipsec_funcs.h41
-rw-r--r--src/vnet/ipsec/ipsec_sa.c353
-rw-r--r--src/vnet/ipsec/ipsec_sa.h319
-rw-r--r--src/vnet/ipsec/ipsec_tun.c13
-rw-r--r--src/vnet/ipsec/main.c201
-rw-r--r--src/vnet/session/application_interface.h8
-rw-r--r--src/vnet/session/application_local.c4
-rw-r--r--src/vnet/session/application_worker.c8
-rw-r--r--src/vnet/session/segment_manager.c8
-rw-r--r--src/vnet/session/session.c12
-rw-r--r--src/vnet/session/session.h6
-rw-r--r--src/vnet/session/session_api.c9
-rw-r--r--src/vnet/session/session_debug.c2
-rw-r--r--src/vnet/session/session_node.c8
-rw-r--r--src/vnet/session/transport.c42
-rw-r--r--src/vnet/session/transport.h8
-rw-r--r--src/vnet/srv6/sr_localsid.c1
-rw-r--r--src/vnet/tcp/tcp.c4
-rw-r--r--src/vnet/tls/tls.c6
-rw-r--r--src/vnet/udp/udp.c12
42 files changed, 1686 insertions, 1751 deletions
diff --git a/src/vnet/CMakeLists.txt b/src/vnet/CMakeLists.txt
index 46b1a870e9e..a071709542a 100644
--- a/src/vnet/CMakeLists.txt
+++ b/src/vnet/CMakeLists.txt
@@ -524,6 +524,7 @@ list(APPEND VNET_SOURCES
crypto/cli.c
crypto/crypto.c
crypto/format.c
+ crypto/main.c
crypto/node.c
crypto/crypto_api.c
)
@@ -552,6 +553,7 @@ list(APPEND VNET_SOURCES
ipsec/ipsec_spd_policy.c
ipsec/ipsec_tun.c
ipsec/ipsec_tun_in.c
+ ipsec/main.c
ipsec/esp_format.c
ipsec/esp_encrypt.c
ipsec/esp_decrypt.c
@@ -587,6 +589,7 @@ list(APPEND VNET_HEADERS
ipsec/ipsec_tun.h
ipsec/ipsec_types_api.h
ipsec/ipsec_punt.h
+ ipsec/ipsec_funcs.h
ipsec/esp.h
ipsec/ah.h
)
@@ -625,6 +628,7 @@ list(APPEND VNET_HEADERS
tcp/tcp_debug.h
tcp/tcp_inlines.h
tcp/tcp_sack.h
+ tcp/tcp_sdl.h
tcp/tcp_types.h
tcp/tcp.h
tcp/tcp_error.def
diff --git a/src/vnet/arp/arp_proxy.c b/src/vnet/arp/arp_proxy.c
index 39f624d5a1d..16ed09bee14 100644
--- a/src/vnet/arp/arp_proxy.c
+++ b/src/vnet/arp/arp_proxy.c
@@ -251,7 +251,8 @@ VLIB_CLI_COMMAND (set_int_proxy_enable_command, static) = {
VLIB_CLI_COMMAND (set_arp_proxy_command, static) = {
.path = "set arp proxy",
- .short_help = "set arp proxy [del] table-ID <table-ID> start <start-address> end <end-addres>",
+ .short_help = "set arp proxy [del] table-id <table-id> start "
+ "<start-address> end <end-addres>",
.function = set_arp_proxy,
};
diff --git a/src/vnet/crypto/cli.c b/src/vnet/crypto/cli.c
index 2ca66f228c3..4cfa1bb1abc 100644
--- a/src/vnet/crypto/cli.c
+++ b/src/vnet/crypto/cli.c
@@ -1,19 +1,7 @@
-/*
- * Copyright (c) 2019 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
*/
-#include <stdbool.h>
#include <vlib/vlib.h>
#include <vnet/crypto/crypto.h>
@@ -38,7 +26,8 @@ show_crypto_engines_command_fn (vlib_main_t * vm,
vlib_cli_output (vm, "%-20s%-8s%s", "Name", "Prio", "Description");
vec_foreach (p, cm->engines)
{
- vlib_cli_output (vm, "%-20s%-8u%s", p->name, p->priority, p->desc);
+ if (p->name)
+ vlib_cli_output (vm, "%-20s%-8u%s", p->name, p->priority, p->desc);
}
return 0;
}
@@ -50,100 +39,53 @@ VLIB_CLI_COMMAND (show_crypto_engines_command, static) =
.function = show_crypto_engines_command_fn,
};
-static u8 *
-format_vnet_crypto_engine_candidates (u8 * s, va_list * args)
-{
- vnet_crypto_engine_t *e;
- vnet_crypto_main_t *cm = &crypto_main;
- u32 id = va_arg (*args, u32);
- u32 ei = va_arg (*args, u32);
- int is_chained = va_arg (*args, int);
- int is_async = va_arg (*args, int);
-
- if (is_async)
- {
- vec_foreach (e, cm->engines)
- {
- if (e->enqueue_handlers[id] && e->dequeue_handler)
- {
- s = format (s, "%U", format_vnet_crypto_engine, e - cm->engines);
- if (ei == e - cm->engines)
- s = format (s, "%c ", '*');
- else
- s = format (s, " ");
- }
- }
-
- return s;
- }
- else
- {
- vec_foreach (e, cm->engines)
- {
- void * h = is_chained ? (void *) e->chained_ops_handlers[id]
- : (void *) e->ops_handlers[id];
-
- if (h)
- {
- s = format (s, "%U", format_vnet_crypto_engine, e - cm->engines);
- if (ei == e - cm->engines)
- s = format (s, "%c ", '*');
- else
- s = format (s, " ");
- }
- }
- return s;
- }
-}
-
-static u8 *
-format_vnet_crypto_handlers (u8 * s, va_list * args)
-{
- vnet_crypto_alg_t alg = va_arg (*args, vnet_crypto_alg_t);
- vnet_crypto_main_t *cm = &crypto_main;
- vnet_crypto_alg_data_t *d = vec_elt_at_index (cm->algs, alg);
- u32 indent = format_get_indent (s);
- int i, first = 1;
-
- for (i = 0; i < VNET_CRYPTO_OP_N_TYPES; i++)
- {
- vnet_crypto_op_data_t *od;
- vnet_crypto_op_id_t id = d->op_by_type[i];
-
- if (id == 0)
- continue;
-
- od = cm->opt_data + id;
- if (first == 0)
- s = format (s, "\n%U", format_white_space, indent);
- s = format (s, "%-16U", format_vnet_crypto_op_type, od->type);
-
- s = format (s, "%-28U", format_vnet_crypto_engine_candidates, id,
- od->active_engine_index_simple, 0, 0);
- s = format (s, "%U", format_vnet_crypto_engine_candidates, id,
- od->active_engine_index_chained, 1, 0);
- first = 0;
- }
- return s;
-}
-
-
static clib_error_t *
show_crypto_handlers_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
+ vnet_crypto_main_t *cm = &crypto_main;
unformat_input_t _line_input, *line_input = &_line_input;
- int i;
+ u8 *s = 0;
+ char *handler_type_str[] = {
+#define _(n, s) [VNET_CRYPTO_HANDLER_TYPE_##n] = s,
+ foreach_crypto_handler_type
+ };
if (unformat_user (input, unformat_line_input, line_input))
unformat_free (line_input);
- vlib_cli_output (vm, "%-16s%-16s%-28s%s", "Algo", "Type", "Simple",
- "Chained");
+ FOREACH_ARRAY_ELT (a, cm->algs)
+ {
+ if (a == cm->algs)
+ continue;
- for (i = 0; i < VNET_CRYPTO_N_ALGS; i++)
- vlib_cli_output (vm, "%-20U%U", format_vnet_crypto_alg, i,
- format_vnet_crypto_handlers, i);
+ vlib_cli_output (vm, "\n%s:", a->name);
+ for (u32 i = 0; i < VNET_CRYPTO_OP_N_TYPES; i++)
+ if (a->op_by_type[i] != VNET_CRYPTO_OP_NONE)
+ {
+ vlib_cli_output (vm, " %U:", format_vnet_crypto_op_type, i);
+ vnet_crypto_op_id_t id = a->op_by_type[i];
+ vnet_crypto_op_data_t *od = cm->opt_data + id;
+ vnet_crypto_engine_t *e;
+
+ for (u32 i = 0; i < VNET_CRYPTO_HANDLER_N_TYPES; i++)
+ {
+ vec_foreach (e, cm->engines)
+ {
+ if (e->ops[id].handlers[i])
+ {
+ s = format (s, " %s", e->name);
+ if (e->ops[id].handlers[i] == od->handlers[i])
+ s = format (s, "*");
+ }
+ }
+
+ vlib_cli_output (vm, " %s:%v", handler_type_str[i], s);
+ vec_reset_length (s);
+ }
+ }
+ }
+ vec_free (s);
return 0;
}
@@ -163,10 +105,10 @@ set_crypto_handler_command_fn (vlib_main_t * vm,
unformat_input_t _line_input, *line_input = &_line_input;
vnet_crypto_main_t *cm = &crypto_main;
int rc = 0;
- char **args = 0, *s, **arg, *engine = 0;
+ char **args = 0, *s, **arg;
int all = 0;
clib_error_t *error = 0;
- crypto_op_class_type_t oct = CRYPTO_OP_BOTH;
+ vnet_crypto_set_handlers_args_t ha = {};
if (!unformat_user (input, unformat_line_input, line_input))
return 0;
@@ -176,11 +118,13 @@ set_crypto_handler_command_fn (vlib_main_t * vm,
if (unformat (line_input, "all"))
all = 1;
else if (unformat (line_input, "simple"))
- oct = CRYPTO_OP_SIMPLE;
+ ha.set_simple = 1;
else if (unformat (line_input, "chained"))
- oct = CRYPTO_OP_CHAINED;
+ ha.set_chained = 1;
else if (unformat (line_input, "both"))
- oct = CRYPTO_OP_BOTH;
+ ha.set_simple = ha.set_chained = 1;
+ else if (unformat (line_input, "async"))
+ ha.set_async = 1;
else if (unformat (line_input, "%s", &s))
vec_add1 (args, s);
else
@@ -196,7 +140,7 @@ set_crypto_handler_command_fn (vlib_main_t * vm,
goto done;
}
- engine = vec_elt_at_index (args, vec_len (args) - 1)[0];
+ ha.engine = vec_elt_at_index (args, vec_len (args) - 1)[0];
vec_del1 (args, vec_len (args) - 1);
if (all)
@@ -207,7 +151,8 @@ set_crypto_handler_command_fn (vlib_main_t * vm,
hash_foreach_mem (key, value, cm->alg_index_by_name,
({
(void) value;
- rc += vnet_crypto_set_handler2 (key, engine, oct);
+ ha.handler_name = key;
+ rc += vnet_crypto_set_handlers (&ha);
}));
if (rc)
@@ -217,88 +162,29 @@ set_crypto_handler_command_fn (vlib_main_t * vm,
{
vec_foreach (arg, args)
{
- rc = vnet_crypto_set_handler2 (arg[0], engine, oct);
- if (rc)
- {
- vlib_cli_output (vm, "failed to set engine %s for %s!",
- engine, arg[0]);
- }
+ ha.handler_name = arg[0];
+ rc = vnet_crypto_set_handlers (&ha);
+ if (rc)
+ vlib_cli_output (vm, "failed to set engine %s for %s!", ha.engine,
+ arg[0]);
}
}
done:
- vec_free (engine);
+ vec_free (ha.engine);
vec_foreach (arg, args) vec_free (arg[0]);
vec_free (args);
unformat_free (line_input);
return error;
}
-VLIB_CLI_COMMAND (set_crypto_handler_command, static) =
-{
+VLIB_CLI_COMMAND (set_crypto_handler_command, static) = {
.path = "set crypto handler",
.short_help = "set crypto handler cipher [cipher2 cipher3 ...] engine"
- " [simple|chained]",
+ " [simple|chained|async]",
.function = set_crypto_handler_command_fn,
};
-static u8 *
-format_vnet_crypto_async_handlers (u8 * s, va_list * args)
-{
- vnet_crypto_async_alg_t alg = va_arg (*args, vnet_crypto_async_alg_t);
- vnet_crypto_main_t *cm = &crypto_main;
- vnet_crypto_async_alg_data_t *d = vec_elt_at_index (cm->async_algs, alg);
- u32 indent = format_get_indent (s);
- int i, first = 1;
-
- for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_TYPES; i++)
- {
- vnet_crypto_async_op_data_t *od;
- vnet_crypto_async_op_id_t id = d->op_by_type[i];
-
- if (id == 0)
- continue;
-
- od = cm->async_opt_data + id;
- if (first == 0)
- s = format (s, "\n%U", format_white_space, indent);
- s = format (s, "%-16U", format_vnet_crypto_async_op_type, od->type);
-
- s = format (s, "%U", format_vnet_crypto_engine_candidates, id,
- od->active_engine_index_async, 0, 1);
- first = 0;
- }
- return s;
-}
-
-static clib_error_t *
-show_crypto_async_handlers_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- unformat_input_t _line_input, *line_input = &_line_input;
- int i;
-
- if (unformat_user (input, unformat_line_input, line_input))
- unformat_free (line_input);
-
- vlib_cli_output (vm, "%-28s%-16s%s", "Algo", "Type", "Handler");
-
- for (i = 0; i < VNET_CRYPTO_N_ASYNC_ALGS; i++)
- vlib_cli_output (vm, "%-28U%U", format_vnet_crypto_async_alg, i,
- format_vnet_crypto_async_handlers, i);
-
- return 0;
-}
-
-VLIB_CLI_COMMAND (show_crypto_async_handlers_command, static) =
-{
- .path = "show crypto async handlers",
- .short_help = "show crypto async handlers",
- .function = show_crypto_async_handlers_command_fn,
-};
-
-
static clib_error_t *
show_crypto_async_status_command_fn (vlib_main_t * vm,
unformat_input_t * input,
@@ -334,85 +220,6 @@ VLIB_CLI_COMMAND (show_crypto_async_status_command, static) =
};
static clib_error_t *
-set_crypto_async_handler_command_fn (vlib_main_t * vm,
- unformat_input_t * input,
- vlib_cli_command_t * cmd)
-{
- unformat_input_t _line_input, *line_input = &_line_input;
- vnet_crypto_main_t *cm = &crypto_main;
- int rc = 0;
- char **args = 0, *s, **arg, *engine = 0;
- int all = 0;
- clib_error_t *error = 0;
-
- if (!unformat_user (input, unformat_line_input, line_input))
- return 0;
-
- while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (line_input, "all"))
- all = 1;
- else if (unformat (line_input, "%s", &s))
- vec_add1 (args, s);
- else
- {
- error = clib_error_return (0, "invalid params");
- goto done;
- }
- }
-
- if ((vec_len (args) < 2 && !all) || (vec_len (args) == 0 && all))
- {
- error = clib_error_return (0, "missing cipher or engine!");
- goto done;
- }
-
- engine = vec_elt_at_index (args, vec_len (args) - 1)[0];
- vec_del1 (args, vec_len (args) - 1);
-
- if (all)
- {
- char *key;
- u8 *value;
-
- hash_foreach_mem (key, value, cm->async_alg_index_by_name,
- ({
- (void) value;
- rc += vnet_crypto_set_async_handler2 (key, engine);
- }));
-
- if (rc)
- vlib_cli_output (vm, "failed to set crypto engine!");
- }
- else
- {
- vec_foreach (arg, args)
- {
- rc = vnet_crypto_set_async_handler2 (arg[0], engine);
- if (rc)
- {
- vlib_cli_output (vm, "failed to set engine %s for %s!",
- engine, arg[0]);
- }
- }
- }
-
-done:
- vec_free (engine);
- vec_foreach (arg, args) vec_free (arg[0]);
- vec_free (args);
- unformat_free (line_input);
- return error;
-}
-
-VLIB_CLI_COMMAND (set_crypto_async_handler_command, static) =
-{
- .path = "set crypto async handler",
- .short_help = "set crypto async handler type [type2 type3 ...] engine",
- .function = set_crypto_async_handler_command_fn,
-};
-
-static clib_error_t *
set_crypto_async_dispatch_command_fn (vlib_main_t *vm, unformat_input_t *input,
vlib_cli_command_t *cmd)
{
@@ -450,11 +257,3 @@ VLIB_CLI_COMMAND (set_crypto_async_dispatch_mode_command, static) = {
.short_help = "set crypto async dispatch mode <polling|interrupt|adaptive>",
.function = set_crypto_async_dispatch_command_fn,
};
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/vnet/crypto/crypto.c b/src/vnet/crypto/crypto.c
index f46e307af89..d1a6a6b12a1 100644
--- a/src/vnet/crypto/crypto.c
+++ b/src/vnet/crypto/crypto.c
@@ -1,16 +1,5 @@
-/*
- * Copyright (c) 2018 Cisco and/or its affiliates.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
*/
#include <stdbool.h>
@@ -22,8 +11,6 @@
#include <dlfcn.h>
#include <dirent.h>
-vnet_crypto_main_t crypto_main;
-
VLIB_REGISTER_LOG_CLASS (crypto_main_log, static) = {
.class_name = "crypto",
.subclass_name = "main",
@@ -52,26 +39,31 @@ vnet_crypto_process_ops_call_handler (vlib_main_t * vm,
vnet_crypto_op_chunk_t * chunks,
u32 n_ops)
{
+ vnet_crypto_op_data_t *od = cm->opt_data + opt;
u32 rv = 0;
if (n_ops == 0)
return 0;
if (chunks)
{
+ vnet_crypto_chained_op_fn_t *fn =
+ od->handlers[VNET_CRYPTO_HANDLER_TYPE_CHAINED];
- if (cm->chained_ops_handlers[opt] == 0)
+ if (fn == 0)
crypto_set_op_status (ops, n_ops,
VNET_CRYPTO_OP_STATUS_FAIL_NO_HANDLER);
else
- rv = (cm->chained_ops_handlers[opt]) (vm, ops, chunks, n_ops);
+ rv = fn (vm, ops, chunks, n_ops);
}
else
{
- if (cm->ops_handlers[opt] == 0)
+ vnet_crypto_simple_op_fn_t *fn =
+ od->handlers[VNET_CRYPTO_HANDLER_TYPE_SIMPLE];
+ if (fn == 0)
crypto_set_op_status (ops, n_ops,
VNET_CRYPTO_OP_STATUS_FAIL_NO_HANDLER);
else
- rv = (cm->ops_handlers[opt]) (vm, ops, n_ops);
+ rv = fn (vm, ops, n_ops);
}
return rv;
}
@@ -141,48 +133,34 @@ vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio,
}
static_always_inline void
-crypto_set_active_engine (vnet_crypto_op_data_t * od,
- vnet_crypto_op_id_t id, u32 ei,
- crypto_op_class_type_t oct)
+crypto_set_active_engine (vnet_crypto_op_data_t *od, vnet_crypto_op_id_t id,
+ u32 ei, vnet_crypto_handler_type_t t)
{
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *ce = vec_elt_at_index (cm->engines, ei);
- if (oct == CRYPTO_OP_BOTH || oct == CRYPTO_OP_CHAINED)
+ if (ce->ops[id].handlers[t])
{
- if (ce->chained_ops_handlers[id])
- {
- od->active_engine_index_chained = ei;
- cm->chained_ops_handlers[id] = ce->chained_ops_handlers[id];
- }
- }
-
- if (oct == CRYPTO_OP_BOTH || oct == CRYPTO_OP_SIMPLE)
- {
- if (ce->ops_handlers[id])
- {
- od->active_engine_index_simple = ei;
- cm->ops_handlers[id] = ce->ops_handlers[id];
- }
+ od->active_engine_index[t] = ei;
+ cm->opt_data[id].handlers[t] = ce->ops[id].handlers[t];
}
}
int
-vnet_crypto_set_handler2 (char *alg_name, char *engine,
- crypto_op_class_type_t oct)
+vnet_crypto_set_handlers (vnet_crypto_set_handlers_args_t *a)
{
uword *p;
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_alg_data_t *ad;
int i;
- p = hash_get_mem (cm->alg_index_by_name, alg_name);
+ p = hash_get_mem (cm->alg_index_by_name, a->handler_name);
if (!p)
return -1;
- ad = vec_elt_at_index (cm->algs, p[0]);
+ ad = cm->algs + p[0];
- p = hash_get_mem (cm->engine_index_by_name, engine);
+ p = hash_get_mem (cm->engine_index_by_name, a->engine);
if (!p)
return -1;
@@ -194,7 +172,15 @@ vnet_crypto_set_handler2 (char *alg_name, char *engine,
continue;
od = cm->opt_data + id;
- crypto_set_active_engine (od, id, p[0], oct);
+ if (a->set_async)
+ crypto_set_active_engine (od, id, p[0],
+ VNET_CRYPTO_HANDLER_TYPE_ASYNC);
+ if (a->set_simple)
+ crypto_set_active_engine (od, id, p[0],
+ VNET_CRYPTO_HANDLER_TYPE_SIMPLE);
+ if (a->set_chained)
+ crypto_set_active_engine (od, id, p[0],
+ VNET_CRYPTO_HANDLER_TYPE_CHAINED);
}
return 0;
@@ -207,117 +193,109 @@ vnet_crypto_is_set_handler (vnet_crypto_alg_t alg)
vnet_crypto_op_id_t opt = 0;
int i;
- if (alg >= vec_len (cm->algs))
+ if (alg >= ARRAY_LEN (cm->algs))
return 0;
for (i = 0; i < VNET_CRYPTO_OP_N_TYPES; i++)
if ((opt = cm->algs[alg].op_by_type[i]) != 0)
break;
- if (opt >= vec_len (cm->ops_handlers))
- return 0;
-
- return NULL != cm->ops_handlers[opt];
+ return NULL != cm->opt_data[opt].handlers[VNET_CRYPTO_HANDLER_TYPE_SIMPLE];
}
void
-vnet_crypto_register_ops_handler_inline (vlib_main_t * vm, u32 engine_index,
+vnet_crypto_register_ops_handler_inline (vlib_main_t *vm, u32 engine_index,
vnet_crypto_op_id_t opt,
- vnet_crypto_ops_handler_t * fn,
- vnet_crypto_chained_ops_handler_t *
- cfn)
+ vnet_crypto_simple_op_fn_t *fn,
+ vnet_crypto_chained_op_fn_t *cfn)
{
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *ae, *e = vec_elt_at_index (cm->engines, engine_index);
vnet_crypto_op_data_t *otd = cm->opt_data + opt;
- vec_validate_aligned (cm->ops_handlers, VNET_CRYPTO_N_OP_IDS - 1,
- CLIB_CACHE_LINE_BYTES);
- vec_validate_aligned (cm->chained_ops_handlers, VNET_CRYPTO_N_OP_IDS - 1,
- CLIB_CACHE_LINE_BYTES);
if (fn)
{
- e->ops_handlers[opt] = fn;
- if (otd->active_engine_index_simple == ~0)
+ vnet_crypto_handler_type_t t = VNET_CRYPTO_HANDLER_TYPE_SIMPLE;
+ e->ops[opt].handlers[t] = fn;
+ if (!otd->active_engine_index[t])
{
- otd->active_engine_index_simple = engine_index;
- cm->ops_handlers[opt] = fn;
+ otd->active_engine_index[t] = engine_index;
+ cm->opt_data[opt].handlers[t] = fn;
}
- ae = vec_elt_at_index (cm->engines, otd->active_engine_index_simple);
+ ae = vec_elt_at_index (cm->engines, otd->active_engine_index[t]);
if (ae->priority < e->priority)
- crypto_set_active_engine (otd, opt, engine_index, CRYPTO_OP_SIMPLE);
+ crypto_set_active_engine (otd, opt, engine_index, t);
}
if (cfn)
{
- e->chained_ops_handlers[opt] = cfn;
- if (otd->active_engine_index_chained == ~0)
+ vnet_crypto_handler_type_t t = VNET_CRYPTO_HANDLER_TYPE_CHAINED;
+ e->ops[opt].handlers[t] = cfn;
+ if (otd->active_engine_index[t])
{
- otd->active_engine_index_chained = engine_index;
- cm->chained_ops_handlers[opt] = cfn;
+ otd->active_engine_index[t] = engine_index;
+ cm->opt_data[opt].handlers[t] = cfn;
}
- ae = vec_elt_at_index (cm->engines, otd->active_engine_index_chained);
+ ae = vec_elt_at_index (cm->engines, otd->active_engine_index[t]);
if (ae->priority < e->priority)
- crypto_set_active_engine (otd, opt, engine_index, CRYPTO_OP_CHAINED);
+ crypto_set_active_engine (otd, opt, engine_index, t);
}
return;
}
void
-vnet_crypto_register_ops_handler (vlib_main_t * vm, u32 engine_index,
+vnet_crypto_register_ops_handler (vlib_main_t *vm, u32 engine_index,
vnet_crypto_op_id_t opt,
- vnet_crypto_ops_handler_t * fn)
+ vnet_crypto_simple_op_fn_t *fn)
{
vnet_crypto_register_ops_handler_inline (vm, engine_index, opt, fn, 0);
}
void
-vnet_crypto_register_chained_ops_handler (vlib_main_t * vm, u32 engine_index,
+vnet_crypto_register_chained_ops_handler (vlib_main_t *vm, u32 engine_index,
vnet_crypto_op_id_t opt,
- vnet_crypto_chained_ops_handler_t *
- fn)
+ vnet_crypto_chained_op_fn_t *fn)
{
vnet_crypto_register_ops_handler_inline (vm, engine_index, opt, 0, fn);
}
void
-vnet_crypto_register_ops_handlers (vlib_main_t * vm, u32 engine_index,
+vnet_crypto_register_ops_handlers (vlib_main_t *vm, u32 engine_index,
vnet_crypto_op_id_t opt,
- vnet_crypto_ops_handler_t * fn,
- vnet_crypto_chained_ops_handler_t * cfn)
+ vnet_crypto_simple_op_fn_t *fn,
+ vnet_crypto_chained_op_fn_t *cfn)
{
vnet_crypto_register_ops_handler_inline (vm, engine_index, opt, fn, cfn);
}
void
vnet_crypto_register_enqueue_handler (vlib_main_t *vm, u32 engine_index,
- vnet_crypto_async_op_id_t opt,
- vnet_crypto_frame_enqueue_t *enqueue_hdl)
+ vnet_crypto_op_id_t opt,
+ vnet_crypto_frame_enq_fn_t *enqueue_hdl)
{
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *ae, *e = vec_elt_at_index (cm->engines, engine_index);
- vnet_crypto_async_op_data_t *otd = cm->async_opt_data + opt;
- vec_validate_aligned (cm->enqueue_handlers, VNET_CRYPTO_ASYNC_OP_N_IDS,
- CLIB_CACHE_LINE_BYTES);
+ vnet_crypto_op_data_t *otd = cm->opt_data + opt;
+ vnet_crypto_handler_type_t t = VNET_CRYPTO_HANDLER_TYPE_ASYNC;
if (!enqueue_hdl)
return;
- e->enqueue_handlers[opt] = enqueue_hdl;
- if (otd->active_engine_index_async == ~0)
+ e->ops[opt].handlers[t] = enqueue_hdl;
+ if (!otd->active_engine_index[t])
{
- otd->active_engine_index_async = engine_index;
- cm->enqueue_handlers[opt] = enqueue_hdl;
+ otd->active_engine_index[t] = engine_index;
+ otd->handlers[t] = enqueue_hdl;
}
- ae = vec_elt_at_index (cm->engines, otd->active_engine_index_async);
+ ae = vec_elt_at_index (cm->engines, otd->active_engine_index[t]);
if (ae->priority <= e->priority)
{
- otd->active_engine_index_async = engine_index;
- cm->enqueue_handlers[opt] = enqueue_hdl;
+ otd->active_engine_index[t] = engine_index;
+ otd->handlers[t] = enqueue_hdl;
}
return;
@@ -340,21 +318,23 @@ static void
vnet_crypto_update_cm_dequeue_handlers (void)
{
vnet_crypto_main_t *cm = &crypto_main;
- vnet_crypto_async_op_data_t *otd;
+ vnet_crypto_op_data_t *otd;
vnet_crypto_engine_t *e;
u32 *active_engines = 0, *ei, last_ei = ~0, i;
vec_reset_length (cm->dequeue_handlers);
- for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_IDS; i++)
+ for (i = 0; i < VNET_CRYPTO_N_OP_IDS; i++)
{
- otd = cm->async_opt_data + i;
- if (otd->active_engine_index_async == ~0)
+ otd = cm->opt_data + i;
+ if (!otd->active_engine_index[VNET_CRYPTO_HANDLER_TYPE_ASYNC])
continue;
- e = cm->engines + otd->active_engine_index_async;
+ e =
+ cm->engines + otd->active_engine_index[VNET_CRYPTO_HANDLER_TYPE_ASYNC];
if (!e->dequeue_handler)
continue;
- vec_add1 (active_engines, otd->active_engine_index_async);
+ vec_add1 (active_engines,
+ otd->active_engine_index[VNET_CRYPTO_HANDLER_TYPE_ASYNC]);
}
vec_sort_with_function (active_engines, engine_index_cmp);
@@ -392,8 +372,8 @@ vnet_crypto_register_dequeue_handler (vlib_main_t *vm, u32 engine_index,
}
void
-vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index,
- vnet_crypto_key_handler_t * key_handler)
+vnet_crypto_register_key_handler (vlib_main_t *vm, u32 engine_index,
+ vnet_crypto_key_fn_t *key_handler)
{
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *e = vec_elt_at_index (cm->engines, engine_index);
@@ -401,73 +381,67 @@ vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index,
return;
}
-static int
-vnet_crypto_key_len_check (vnet_crypto_alg_t alg, u16 length)
+static vnet_crypto_key_t *
+vnet_crypoto_key_alloc (u32 length)
{
- switch (alg)
- {
- case VNET_CRYPTO_N_ALGS:
- return 0;
- case VNET_CRYPTO_ALG_NONE:
- return 1;
-
-#define _(n, s, l) \
- case VNET_CRYPTO_ALG_##n: \
- if ((l) == length) \
- return 1; \
- break;
- foreach_crypto_cipher_alg foreach_crypto_aead_alg
-#undef _
- /* HMAC allows any key length */
-#define _(n, s) \
- case VNET_CRYPTO_ALG_HMAC_##n: \
- return 1;
- foreach_crypto_hmac_alg
-#undef _
+ vnet_crypto_main_t *cm = &crypto_main;
+ u8 expected = 0;
+ vnet_crypto_key_t *k, **kp;
+ u32 alloc_sz = sizeof (vnet_crypto_key_t) + round_pow2 (length, 16);
-#define _(n, s) \
- case VNET_CRYPTO_ALG_HASH_##n: \
- return 1;
- foreach_crypto_hash_alg
-#undef _
+ while (!__atomic_compare_exchange_n (&cm->keys_lock, &expected, 1, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
+ {
+ while (__atomic_load_n (&cm->keys_lock, __ATOMIC_RELAXED))
+ CLIB_PAUSE ();
+ expected = 0;
}
- return 0;
+ pool_get (cm->keys, kp);
+
+ __atomic_store_n (&cm->keys_lock, 0, __ATOMIC_RELEASE);
+
+ k = clib_mem_alloc_aligned (alloc_sz, alignof (vnet_crypto_key_t));
+ kp[0] = k;
+ *k = (vnet_crypto_key_t){
+ .index = kp - cm->keys,
+ .length = length,
+ };
+
+ return k;
}
u32
vnet_crypto_key_add (vlib_main_t * vm, vnet_crypto_alg_t alg, u8 * data,
u16 length)
{
- u32 index;
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *engine;
vnet_crypto_key_t *key;
+ vnet_crypto_alg_data_t *ad = cm->algs + alg;
- u8 need_barrier_sync = 0;
+ ASSERT (alg != 0);
- if (!vnet_crypto_key_len_check (alg, length))
+ if (length == 0)
return ~0;
- need_barrier_sync = pool_get_will_expand (cm->keys);
- /* If the cm->keys will expand, stop the parade. */
- if (need_barrier_sync)
- vlib_worker_thread_barrier_sync (vm);
-
- pool_get_zero (cm->keys, key);
+ if (ad->variable_key_length == 0)
+ {
+ if (ad->key_length == 0)
+ return ~0;
- if (need_barrier_sync)
- vlib_worker_thread_barrier_release (vm);
+ if (ad->key_length != length)
+ return ~0;
+ }
- index = key - cm->keys;
- key->type = VNET_CRYPTO_KEY_TYPE_DATA;
+ key = vnet_crypoto_key_alloc (length);
key->alg = alg;
- vec_validate_aligned (key->data, length - 1, CLIB_CACHE_LINE_BYTES);
+
clib_memcpy (key->data, data, length);
vec_foreach (engine, cm->engines)
if (engine->key_op_handler)
- engine->key_op_handler (VNET_CRYPTO_KEY_OP_ADD, index);
- return index;
+ engine->key_op_handler (VNET_CRYPTO_KEY_OP_ADD, key->index);
+ return key->index;
}
void
@@ -475,23 +449,16 @@ vnet_crypto_key_del (vlib_main_t * vm, vnet_crypto_key_index_t index)
{
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *engine;
- vnet_crypto_key_t *key = pool_elt_at_index (cm->keys, index);
+ vnet_crypto_key_t *key = cm->keys[index];
+ u32 sz = sizeof (vnet_crypto_key_t) + round_pow2 (key->length, 16);
vec_foreach (engine, cm->engines)
if (engine->key_op_handler)
engine->key_op_handler (VNET_CRYPTO_KEY_OP_DEL, index);
- if (key->type == VNET_CRYPTO_KEY_TYPE_DATA)
- {
- clib_memset (key->data, 0xfe, vec_len (key->data));
- vec_free (key->data);
- }
- else if (key->type == VNET_CRYPTO_KEY_TYPE_LINK)
- {
- key->index_crypto = key->index_integ = ~0;
- }
-
- pool_put (cm->keys, key);
+ clib_memset (key, 0xfe, sz);
+ clib_mem_free (key);
+ pool_put_index (cm->keys, index);
}
void
@@ -505,7 +472,7 @@ vnet_crypto_key_update (vlib_main_t *vm, vnet_crypto_key_index_t index)
engine->key_op_handler (VNET_CRYPTO_KEY_OP_MODIFY, index);
}
-vnet_crypto_async_alg_t
+vnet_crypto_alg_t
vnet_crypto_link_algs (vnet_crypto_alg_t crypto_alg,
vnet_crypto_alg_t integ_alg)
{
@@ -523,79 +490,29 @@ vnet_crypto_key_add_linked (vlib_main_t * vm,
vnet_crypto_key_index_t index_crypto,
vnet_crypto_key_index_t index_integ)
{
- u32 index;
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *engine;
vnet_crypto_key_t *key_crypto, *key_integ, *key;
- vnet_crypto_async_alg_t linked_alg;
+ vnet_crypto_alg_t linked_alg;
- key_crypto = pool_elt_at_index (cm->keys, index_crypto);
- key_integ = pool_elt_at_index (cm->keys, index_integ);
+ key_crypto = cm->keys[index_crypto];
+ key_integ = cm->keys[index_integ];
linked_alg = vnet_crypto_link_algs (key_crypto->alg, key_integ->alg);
if (linked_alg == ~0)
return ~0;
- pool_get_zero (cm->keys, key);
- index = key - cm->keys;
- key->type = VNET_CRYPTO_KEY_TYPE_LINK;
+ key = vnet_crypoto_key_alloc (0);
+ key->is_link = 1;
key->index_crypto = index_crypto;
key->index_integ = index_integ;
- key->async_alg = linked_alg;
+ key->alg = linked_alg;
vec_foreach (engine, cm->engines)
if (engine->key_op_handler)
- engine->key_op_handler (VNET_CRYPTO_KEY_OP_ADD, index);
-
- return index;
-}
-
-static_always_inline void
-crypto_set_active_async_engine (vnet_crypto_async_op_data_t * od,
- vnet_crypto_async_op_id_t id, u32 ei)
-{
- vnet_crypto_main_t *cm = &crypto_main;
- vnet_crypto_engine_t *ce = vec_elt_at_index (cm->engines, ei);
-
- if (ce->enqueue_handlers[id] && ce->dequeue_handler)
- {
- od->active_engine_index_async = ei;
- cm->enqueue_handlers[id] = ce->enqueue_handlers[id];
- }
-}
-
-int
-vnet_crypto_set_async_handler2 (char *alg_name, char *engine)
-{
- uword *p;
- vnet_crypto_main_t *cm = &crypto_main;
- vnet_crypto_async_alg_data_t *ad;
- int i;
-
- p = hash_get_mem (cm->async_alg_index_by_name, alg_name);
- if (!p)
- return -1;
-
- ad = vec_elt_at_index (cm->async_algs, p[0]);
-
- p = hash_get_mem (cm->engine_index_by_name, engine);
- if (!p)
- return -1;
-
- for (i = 0; i < VNET_CRYPTO_ASYNC_OP_N_TYPES; i++)
- {
- vnet_crypto_async_op_data_t *od;
- vnet_crypto_async_op_id_t id = ad->op_by_type[i];
- if (id == 0)
- continue;
-
- od = cm->async_opt_data + id;
- crypto_set_active_async_engine (od, id, p[0]);
- }
+ engine->key_op_handler (VNET_CRYPTO_KEY_OP_ADD, key->index);
- vnet_crypto_update_cm_dequeue_handlers ();
-
- return 0;
+ return key->index;
}
u32
@@ -643,94 +560,6 @@ vnet_crypto_set_async_dispatch (u8 mode, u8 adaptive)
}
}
-int
-vnet_crypto_is_set_async_handler (vnet_crypto_async_op_id_t op)
-{
- vnet_crypto_main_t *cm = &crypto_main;
-
- return (op < vec_len (cm->enqueue_handlers) &&
- NULL != cm->enqueue_handlers[op]);
-}
-
-static void
-vnet_crypto_init_cipher_data (vnet_crypto_alg_t alg, vnet_crypto_op_id_t eid,
- vnet_crypto_op_id_t did, char *name, u8 is_aead)
-{
- vnet_crypto_op_type_t eopt, dopt;
- vnet_crypto_main_t *cm = &crypto_main;
-
- cm->algs[alg].name = name;
- cm->opt_data[eid].alg = cm->opt_data[did].alg = alg;
- cm->opt_data[eid].active_engine_index_simple = ~0;
- cm->opt_data[did].active_engine_index_simple = ~0;
- cm->opt_data[eid].active_engine_index_chained = ~0;
- cm->opt_data[did].active_engine_index_chained = ~0;
- if (is_aead)
- {
- eopt = VNET_CRYPTO_OP_TYPE_AEAD_ENCRYPT;
- dopt = VNET_CRYPTO_OP_TYPE_AEAD_DECRYPT;
- }
- else
- {
- eopt = VNET_CRYPTO_OP_TYPE_ENCRYPT;
- dopt = VNET_CRYPTO_OP_TYPE_DECRYPT;
- }
- cm->opt_data[eid].type = eopt;
- cm->opt_data[did].type = dopt;
- cm->algs[alg].op_by_type[eopt] = eid;
- cm->algs[alg].op_by_type[dopt] = did;
- hash_set_mem (cm->alg_index_by_name, name, alg);
-}
-
-static void
-vnet_crypto_init_hash_data (vnet_crypto_alg_t alg, vnet_crypto_op_id_t id,
- char *name)
-{
- vnet_crypto_main_t *cm = &crypto_main;
- cm->algs[alg].name = name;
- cm->algs[alg].op_by_type[VNET_CRYPTO_OP_TYPE_HASH] = id;
- cm->opt_data[id].alg = alg;
- cm->opt_data[id].active_engine_index_simple = ~0;
- cm->opt_data[id].active_engine_index_chained = ~0;
- cm->opt_data[id].type = VNET_CRYPTO_OP_TYPE_HASH;
- hash_set_mem (cm->alg_index_by_name, name, alg);
-}
-
-static void
-vnet_crypto_init_hmac_data (vnet_crypto_alg_t alg,
- vnet_crypto_op_id_t id, char *name)
-{
- vnet_crypto_main_t *cm = &crypto_main;
- cm->algs[alg].name = name;
- cm->algs[alg].op_by_type[VNET_CRYPTO_OP_TYPE_HMAC] = id;
- cm->opt_data[id].alg = alg;
- cm->opt_data[id].active_engine_index_simple = ~0;
- cm->opt_data[id].active_engine_index_chained = ~0;
- cm->opt_data[id].type = VNET_CRYPTO_OP_TYPE_HMAC;
- hash_set_mem (cm->alg_index_by_name, name, alg);
-}
-
-static void
-vnet_crypto_init_async_data (vnet_crypto_async_alg_t alg,
- vnet_crypto_async_op_id_t eid,
- vnet_crypto_async_op_id_t did, char *name)
-{
- vnet_crypto_main_t *cm = &crypto_main;
-
- cm->async_algs[alg].name = name;
- cm->async_algs[alg].op_by_type[VNET_CRYPTO_ASYNC_OP_TYPE_ENCRYPT] = eid;
- cm->async_algs[alg].op_by_type[VNET_CRYPTO_ASYNC_OP_TYPE_DECRYPT] = did;
- cm->async_opt_data[eid].type = VNET_CRYPTO_ASYNC_OP_TYPE_ENCRYPT;
- cm->async_opt_data[eid].alg = alg;
- cm->async_opt_data[eid].active_engine_index_async = ~0;
- cm->async_opt_data[eid].active_engine_index_async = ~0;
- cm->async_opt_data[did].type = VNET_CRYPTO_ASYNC_OP_TYPE_DECRYPT;
- cm->async_opt_data[did].alg = alg;
- cm->async_opt_data[did].active_engine_index_async = ~0;
- cm->async_opt_data[did].active_engine_index_async = ~0;
- hash_set_mem (cm->async_alg_index_by_name, name, alg);
-}
-
static void
vnet_crypto_load_engines (vlib_main_t *vm)
{
@@ -848,54 +677,21 @@ vnet_crypto_init (vlib_main_t * vm)
vnet_crypto_main_t *cm = &crypto_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
vnet_crypto_thread_t *ct = 0;
+ vnet_crypto_engine_t *p;
+ vec_add2 (cm->engines, p, 1);
cm->engine_index_by_name = hash_create_string ( /* size */ 0,
sizeof (uword));
cm->alg_index_by_name = hash_create_string (0, sizeof (uword));
- cm->async_alg_index_by_name = hash_create_string (0, sizeof (uword));
vec_validate_aligned (cm->threads, tm->n_vlib_mains, CLIB_CACHE_LINE_BYTES);
vec_foreach (ct, cm->threads)
pool_init_fixed (ct->frame_pool, VNET_CRYPTO_FRAME_POOL_SIZE);
- vec_validate (cm->algs, VNET_CRYPTO_N_ALGS);
- vec_validate (cm->async_algs, VNET_CRYPTO_N_ASYNC_ALGS);
-
-#define _(n, s, l) \
- vnet_crypto_init_cipher_data (VNET_CRYPTO_ALG_##n, \
- VNET_CRYPTO_OP_##n##_ENC, \
- VNET_CRYPTO_OP_##n##_DEC, s, 0);
- foreach_crypto_cipher_alg;
-#undef _
-#define _(n, s, l) \
- vnet_crypto_init_cipher_data (VNET_CRYPTO_ALG_##n, \
- VNET_CRYPTO_OP_##n##_ENC, \
- VNET_CRYPTO_OP_##n##_DEC, s, 1);
- foreach_crypto_aead_alg;
-#undef _
-#define _(n, s) \
- vnet_crypto_init_hmac_data (VNET_CRYPTO_ALG_HMAC_##n, \
- VNET_CRYPTO_OP_##n##_HMAC, "hmac-" s);
- foreach_crypto_hmac_alg;
-#undef _
-#define _(n, s) \
- vnet_crypto_init_hash_data (VNET_CRYPTO_ALG_HASH_##n, \
- VNET_CRYPTO_OP_##n##_HASH, s);
- foreach_crypto_hash_alg;
-#undef _
-#define _(n, s, k, t, a) \
- vnet_crypto_init_async_data (VNET_CRYPTO_ALG_##n##_TAG##t##_AAD##a, \
- VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
- VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
- s);
- foreach_crypto_aead_async_alg
-#undef _
-#define _(c, h, s, k ,d) \
- vnet_crypto_init_async_data (VNET_CRYPTO_ALG_##c##_##h##_TAG##d, \
- VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
- VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
- s);
- foreach_crypto_link_async_alg
-#undef _
- cm->crypto_node_index =
+
+ FOREACH_ARRAY_ELT (e, cm->algs)
+ if (e->name)
+ hash_set_mem (cm->alg_index_by_name, e->name, e - cm->algs);
+
+ cm->crypto_node_index =
vlib_get_node_by_name (vm, (u8 *) "crypto-dispatch")->index;
vnet_crypto_load_engines (vm);
@@ -904,11 +700,3 @@ vnet_crypto_init (vlib_main_t * vm)
}
VLIB_INIT_FUNCTION (vnet_crypto_init);
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h
index 13d08756109..0a021282b5d 100644
--- a/src/vnet/crypto/crypto.h
+++ b/src/vnet/crypto/crypto.h
@@ -21,47 +21,38 @@
#define VNET_CRYPTO_FRAME_SIZE 64
#define VNET_CRYPTO_FRAME_POOL_SIZE 1024
-/* CRYPTO_ID, PRETTY_NAME, KEY_LENGTH_IN_BYTES */
-#define foreach_crypto_cipher_alg \
- _(DES_CBC, "des-cbc", 7) \
- _(3DES_CBC, "3des-cbc", 24) \
- _(AES_128_CBC, "aes-128-cbc", 16) \
- _(AES_192_CBC, "aes-192-cbc", 24) \
- _(AES_256_CBC, "aes-256-cbc", 32) \
- _(AES_128_CTR, "aes-128-ctr", 16) \
- _(AES_192_CTR, "aes-192-ctr", 24) \
- _(AES_256_CTR, "aes-256-ctr", 32)
-
-/* CRYPTO_ID, PRETTY_NAME, KEY_LENGTH_IN_BYTES */
+/* CRYPTO_ID, PRETTY_NAME, ARGS*/
+#define foreach_crypto_cipher_alg \
+ _ (DES_CBC, "des-cbc", .key_length = 7) \
+ _ (3DES_CBC, "3des-cbc", .key_length = 24) \
+ _ (AES_128_CBC, "aes-128-cbc", .key_length = 16) \
+ _ (AES_192_CBC, "aes-192-cbc", .key_length = 24) \
+ _ (AES_256_CBC, "aes-256-cbc", .key_length = 32) \
+ _ (AES_128_CTR, "aes-128-ctr", .key_length = 16) \
+ _ (AES_192_CTR, "aes-192-ctr", .key_length = 24) \
+ _ (AES_256_CTR, "aes-256-ctr", .key_length = 32)
+
+/* CRYPTO_ID, PRETTY_NAME, ARGS */
#define foreach_crypto_aead_alg \
- _ (AES_128_GCM, "aes-128-gcm", 16) \
- _ (AES_192_GCM, "aes-192-gcm", 24) \
- _ (AES_256_GCM, "aes-256-gcm", 32) \
- _ (AES_128_NULL_GMAC, "aes-128-null-gmac", 16) \
- _ (AES_192_NULL_GMAC, "aes-192-null-gmac", 24) \
- _ (AES_256_NULL_GMAC, "aes-256-null-gmac", 32) \
- _ (CHACHA20_POLY1305, "chacha20-poly1305", 32)
+ _ (AES_128_GCM, "aes-128-gcm", .is_aead = 1, .key_length = 16) \
+ _ (AES_192_GCM, "aes-192-gcm", .is_aead = 1, .key_length = 24) \
+ _ (AES_256_GCM, "aes-256-gcm", .is_aead = 1, .key_length = 32) \
+ _ (AES_128_NULL_GMAC, "aes-128-null-gmac", .is_aead = 1, .key_length = 16) \
+ _ (AES_192_NULL_GMAC, "aes-192-null-gmac", .is_aead = 1, .key_length = 24) \
+ _ (AES_256_NULL_GMAC, "aes-256-null-gmac", .is_aead = 1, .key_length = 32) \
+ _ (CHACHA20_POLY1305, "chacha20-poly1305", .is_aead = 1, .key_length = 32)
#define foreach_crypto_hash_alg \
+ _ (MD5, "md5") \
_ (SHA1, "sha-1") \
_ (SHA224, "sha-224") \
_ (SHA256, "sha-256") \
_ (SHA384, "sha-384") \
_ (SHA512, "sha-512")
-#define foreach_crypto_hmac_alg \
- _(MD5, "md5") \
- _(SHA1, "sha-1") \
- _(SHA224, "sha-224") \
- _(SHA256, "sha-256") \
- _(SHA384, "sha-384") \
- _(SHA512, "sha-512")
-
#define foreach_crypto_op_type \
_ (ENCRYPT, "encrypt") \
_ (DECRYPT, "decrypt") \
- _ (AEAD_ENCRYPT, "aead-encrypt") \
- _ (AEAD_DECRYPT, "aead-decrypt") \
_ (HMAC, "hmac") \
_ (HASH, "hash")
@@ -100,7 +91,7 @@ typedef enum
_ (AES_256_NULL_GMAC, "aes-256-null-gmac-aad12", 32, 16, 12) \
_ (CHACHA20_POLY1305, "chacha20-poly1305-aad8", 32, 16, 8) \
_ (CHACHA20_POLY1305, "chacha20-poly1305-aad12", 32, 16, 12) \
- _ (CHACHA20_POLY1305, "chacha20-poly1305", 32, 16, 0)
+ _ (CHACHA20_POLY1305, "chacha20-poly1305-aad0", 32, 16, 0)
/* CRYPTO_ID, INTEG_ID, PRETTY_NAME, KEY_LENGTH_IN_BYTES, DIGEST_LEN */
#define foreach_crypto_link_async_alg \
@@ -130,11 +121,16 @@ typedef enum
_ (AES_256_CBC, SHA512, "aes-256-cbc-hmac-sha-512", 32, 32) \
_ (AES_128_CTR, SHA1, "aes-128-ctr-hmac-sha-1", 16, 12) \
_ (AES_192_CTR, SHA1, "aes-192-ctr-hmac-sha-1", 24, 12) \
- _ (AES_256_CTR, SHA1, "aes-256-ctr-hmac-sha-1", 32, 12)
-
-#define foreach_crypto_async_op_type \
- _(ENCRYPT, "async-encrypt") \
- _(DECRYPT, "async-decrypt")
+ _ (AES_256_CTR, SHA1, "aes-256-ctr-hmac-sha-1", 32, 12) \
+ _ (AES_128_CTR, SHA256, "aes-128-ctr-hmac-sha-256", 16, 16) \
+ _ (AES_192_CTR, SHA256, "aes-192-ctr-hmac-sha-256", 24, 16) \
+ _ (AES_256_CTR, SHA256, "aes-256-ctr-hmac-sha-256", 32, 16) \
+ _ (AES_128_CTR, SHA384, "aes-128-ctr-hmac-sha-384", 16, 24) \
+ _ (AES_192_CTR, SHA384, "aes-192-ctr-hmac-sha-384", 24, 24) \
+ _ (AES_256_CTR, SHA384, "aes-256-ctr-hmac-sha-384", 32, 24) \
+ _ (AES_128_CTR, SHA512, "aes-128-ctr-hmac-sha-512", 16, 32) \
+ _ (AES_192_CTR, SHA512, "aes-192-ctr-hmac-sha-512", 24, 32) \
+ _ (AES_256_CTR, SHA512, "aes-256-ctr-hmac-sha-512", 32, 32)
typedef enum
{
@@ -154,102 +150,68 @@ typedef enum
typedef enum
{
VNET_CRYPTO_ALG_NONE = 0,
-#define _(n, s, l) VNET_CRYPTO_ALG_##n,
+#define _(n, s, ...) VNET_CRYPTO_ALG_##n,
foreach_crypto_cipher_alg foreach_crypto_aead_alg
#undef _
-#define _(n, s) VNET_CRYPTO_ALG_HMAC_##n,
- foreach_crypto_hmac_alg
-#undef _
-#define _(n, s) VNET_CRYPTO_ALG_HASH_##n,
- foreach_crypto_hash_alg
-#undef _
- VNET_CRYPTO_N_ALGS,
-} vnet_crypto_alg_t;
-
-typedef enum
-{
-#define _(n, s) VNET_CRYPTO_ASYNC_OP_TYPE_##n,
- foreach_crypto_async_op_type
+#define _(n, s) VNET_CRYPTO_ALG_HASH_##n, VNET_CRYPTO_ALG_HMAC_##n,
+ foreach_crypto_hash_alg
#undef _
- VNET_CRYPTO_ASYNC_OP_N_TYPES,
-} vnet_crypto_async_op_type_t;
-
-typedef enum
-{
- VNET_CRYPTO_ASYNC_ALG_NONE = 0,
#define _(n, s, k, t, a) \
VNET_CRYPTO_ALG_##n##_TAG##t##_AAD##a,
- foreach_crypto_aead_async_alg
+ foreach_crypto_aead_async_alg
#undef _
#define _(c, h, s, k ,d) \
VNET_CRYPTO_ALG_##c##_##h##_TAG##d,
- foreach_crypto_link_async_alg
-#undef _
- VNET_CRYPTO_N_ASYNC_ALGS,
-} vnet_crypto_async_alg_t;
-
-typedef enum
-{
- VNET_CRYPTO_ASYNC_OP_NONE = 0,
-#define _(n, s, k, t, a) \
- VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
- VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,
- foreach_crypto_aead_async_alg
+ foreach_crypto_link_async_alg
#undef _
-#define _(c, h, s, k ,d) \
- VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
- VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,
- foreach_crypto_link_async_alg
-#undef _
- VNET_CRYPTO_ASYNC_OP_N_IDS,
-} vnet_crypto_async_op_id_t;
+ VNET_CRYPTO_N_ALGS,
+} vnet_crypto_alg_t;
typedef struct
{
+ u32 index;
+ u16 length;
+ u8 is_link : 1;
+ vnet_crypto_alg_t alg : 8;
union
{
struct
{
- u8 *data;
- vnet_crypto_alg_t alg:8;
- };
- struct
- {
u32 index_crypto;
u32 index_integ;
- vnet_crypto_async_alg_t async_alg:8;
};
};
-#define VNET_CRYPTO_KEY_TYPE_DATA 0
-#define VNET_CRYPTO_KEY_TYPE_LINK 1
- u8 type;
+ u8 data[];
} vnet_crypto_key_t;
typedef enum
{
VNET_CRYPTO_OP_NONE = 0,
-#define _(n, s, l) VNET_CRYPTO_OP_##n##_ENC, VNET_CRYPTO_OP_##n##_DEC,
+#define _(n, s, ...) VNET_CRYPTO_OP_##n##_ENC, VNET_CRYPTO_OP_##n##_DEC,
foreach_crypto_cipher_alg foreach_crypto_aead_alg
#undef _
-#define _(n, s) VNET_CRYPTO_OP_##n##_HMAC,
- foreach_crypto_hmac_alg
+#define _(n, s) VNET_CRYPTO_OP_##n##_HASH, VNET_CRYPTO_OP_##n##_HMAC,
+ foreach_crypto_hash_alg
#undef _
-#define _(n, s) VNET_CRYPTO_OP_##n##_HASH,
- foreach_crypto_hash_alg
+#define _(n, s, k, t, a) \
+ VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
+ VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC,
+ foreach_crypto_aead_async_alg
#undef _
- VNET_CRYPTO_N_OP_IDS,
-} vnet_crypto_op_id_t;
-
-typedef enum
-{
- CRYPTO_OP_SIMPLE,
- CRYPTO_OP_CHAINED,
- CRYPTO_OP_BOTH,
-} crypto_op_class_type_t;
+#define _(c, h, s, k, d) \
+ VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
+ VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC,
+ foreach_crypto_link_async_alg
+#undef _
+ VNET_CRYPTO_N_OP_IDS,
+} __clib_packed vnet_crypto_op_id_t;
typedef struct
{
char *name;
+ u16 key_length;
+ u8 is_aead : 1;
+ u8 variable_key_length : 1;
vnet_crypto_op_id_t op_by_type[VNET_CRYPTO_OP_N_TYPES];
} vnet_crypto_alg_data_t;
@@ -264,7 +226,7 @@ typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
uword user_data;
- vnet_crypto_op_id_t op:16;
+ vnet_crypto_op_id_t op;
vnet_crypto_op_status_t status:8;
u8 flags;
#define VNET_CRYPTO_OP_FLAG_HMAC_CHECK (1 << 0)
@@ -309,26 +271,19 @@ typedef struct
STATIC_ASSERT_SIZEOF (vnet_crypto_op_t, CLIB_CACHE_LINE_BYTES);
-typedef struct
-{
- vnet_crypto_op_type_t type;
- vnet_crypto_alg_t alg;
- u32 active_engine_index_simple;
- u32 active_engine_index_chained;
-} vnet_crypto_op_data_t;
+#define foreach_crypto_handler_type \
+ _ (SIMPLE, "simple") \
+ _ (CHAINED, "chained") \
+ _ (ASYNC, "async")
-typedef struct
+typedef enum
{
- vnet_crypto_async_op_type_t type;
- vnet_crypto_async_alg_t alg;
- u32 active_engine_index_async;
-} vnet_crypto_async_op_data_t;
+#define _(n, s) VNET_CRYPTO_HANDLER_TYPE_##n,
+ foreach_crypto_handler_type
+#undef _
+ VNET_CRYPTO_HANDLER_N_TYPES
-typedef struct
-{
- char *name;
- vnet_crypto_async_op_id_t op_by_type[VNET_CRYPTO_ASYNC_OP_N_TYPES];
-} vnet_crypto_async_alg_data_t;
+} vnet_crypto_handler_type_t;
typedef struct
{
@@ -366,7 +321,7 @@ typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
vnet_crypto_async_frame_state_t state;
- vnet_crypto_async_op_id_t op:8;
+ vnet_crypto_op_id_t op : 8;
u16 n_elts;
vnet_crypto_async_frame_elt_t elts[VNET_CRYPTO_FRAME_SIZE];
u32 buffer_indices[VNET_CRYPTO_FRAME_SIZE];
@@ -384,21 +339,20 @@ typedef struct
typedef u32 vnet_crypto_key_index_t;
-typedef u32 (vnet_crypto_chained_ops_handler_t) (vlib_main_t * vm,
- vnet_crypto_op_t * ops[],
- vnet_crypto_op_chunk_t *
- chunks, u32 n_ops);
+typedef u32 (vnet_crypto_chained_op_fn_t) (vlib_main_t *vm,
+ vnet_crypto_op_t *ops[],
+ vnet_crypto_op_chunk_t *chunks,
+ u32 n_ops);
-typedef u32 (vnet_crypto_ops_handler_t) (vlib_main_t * vm,
- vnet_crypto_op_t * ops[], u32 n_ops);
+typedef u32 (vnet_crypto_simple_op_fn_t) (vlib_main_t *vm,
+ vnet_crypto_op_t *ops[], u32 n_ops);
-typedef void (vnet_crypto_key_handler_t) (vnet_crypto_key_op_t kop,
- vnet_crypto_key_index_t idx);
+typedef void (vnet_crypto_key_fn_t) (vnet_crypto_key_op_t kop,
+ vnet_crypto_key_index_t idx);
/** async crypto function handlers **/
-typedef int
- (vnet_crypto_frame_enqueue_t) (vlib_main_t * vm,
- vnet_crypto_async_frame_t * frame);
+typedef int (vnet_crypto_frame_enq_fn_t) (vlib_main_t *vm,
+ vnet_crypto_async_frame_t *frame);
typedef vnet_crypto_async_frame_t *
(vnet_crypto_frame_dequeue_t) (vlib_main_t * vm, u32 * nb_elts_processed,
u32 * enqueue_thread_idx);
@@ -407,32 +361,29 @@ u32
vnet_crypto_register_engine (vlib_main_t * vm, char *name, int prio,
char *desc);
-void vnet_crypto_register_ops_handler (vlib_main_t * vm, u32 engine_index,
+void vnet_crypto_register_ops_handler (vlib_main_t *vm, u32 engine_index,
vnet_crypto_op_id_t opt,
- vnet_crypto_ops_handler_t * oph);
+ vnet_crypto_simple_op_fn_t *oph);
-void vnet_crypto_register_chained_ops_handler (vlib_main_t * vm,
- u32 engine_index,
- vnet_crypto_op_id_t opt,
- vnet_crypto_chained_ops_handler_t
- * oph);
+void
+vnet_crypto_register_chained_ops_handler (vlib_main_t *vm, u32 engine_index,
+ vnet_crypto_op_id_t opt,
+ vnet_crypto_chained_op_fn_t *oph);
-void vnet_crypto_register_ops_handlers (vlib_main_t * vm, u32 engine_index,
+void vnet_crypto_register_ops_handlers (vlib_main_t *vm, u32 engine_index,
vnet_crypto_op_id_t opt,
- vnet_crypto_ops_handler_t * fn,
- vnet_crypto_chained_ops_handler_t *
- cfn);
+ vnet_crypto_simple_op_fn_t *fn,
+ vnet_crypto_chained_op_fn_t *cfn);
-void vnet_crypto_register_key_handler (vlib_main_t * vm, u32 engine_index,
- vnet_crypto_key_handler_t * keyh);
+void vnet_crypto_register_key_handler (vlib_main_t *vm, u32 engine_index,
+ vnet_crypto_key_fn_t *keyh);
/** async crypto register functions */
u32 vnet_crypto_register_post_node (vlib_main_t * vm, char *post_node_name);
-void
-vnet_crypto_register_enqueue_handler (vlib_main_t *vm, u32 engine_index,
- vnet_crypto_async_op_id_t opt,
- vnet_crypto_frame_enqueue_t *enq_fn);
+void vnet_crypto_register_enqueue_handler (vlib_main_t *vm, u32 engine_index,
+ vnet_crypto_op_id_t opt,
+ vnet_crypto_frame_enq_fn_t *enq_fn);
void
vnet_crypto_register_dequeue_handler (vlib_main_t *vm, u32 engine_index,
@@ -440,14 +391,16 @@ vnet_crypto_register_dequeue_handler (vlib_main_t *vm, u32 engine_index,
typedef struct
{
+ void *handlers[VNET_CRYPTO_HANDLER_N_TYPES];
+} vnet_crypto_engine_op_t;
+
+typedef struct
+{
char *name;
char *desc;
int priority;
- vnet_crypto_key_handler_t *key_op_handler;
- vnet_crypto_ops_handler_t *ops_handlers[VNET_CRYPTO_N_OP_IDS];
- vnet_crypto_chained_ops_handler_t
- * chained_ops_handlers[VNET_CRYPTO_N_OP_IDS];
- vnet_crypto_frame_enqueue_t *enqueue_handlers[VNET_CRYPTO_ASYNC_OP_N_IDS];
+ vnet_crypto_engine_op_t ops[VNET_CRYPTO_N_OP_IDS];
+ vnet_crypto_key_fn_t *key_op_handler;
vnet_crypto_frame_dequeue_t *dequeue_handler;
} vnet_crypto_engine_t;
@@ -459,22 +412,25 @@ typedef struct
typedef struct
{
- vnet_crypto_alg_data_t *algs;
+ vnet_crypto_op_type_t type;
+ vnet_crypto_alg_t alg;
+ u8 active_engine_index[VNET_CRYPTO_HANDLER_N_TYPES];
+ void *handlers[VNET_CRYPTO_HANDLER_N_TYPES];
+} vnet_crypto_op_data_t;
+
+typedef struct
+{
+ vnet_crypto_key_t **keys;
+ u8 keys_lock;
+ u32 crypto_node_index;
vnet_crypto_thread_t *threads;
- vnet_crypto_ops_handler_t **ops_handlers;
- vnet_crypto_chained_ops_handler_t **chained_ops_handlers;
- vnet_crypto_frame_enqueue_t **enqueue_handlers;
vnet_crypto_frame_dequeue_t **dequeue_handlers;
- vnet_crypto_op_data_t opt_data[VNET_CRYPTO_N_OP_IDS];
- vnet_crypto_async_op_data_t async_opt_data[VNET_CRYPTO_ASYNC_OP_N_IDS];
vnet_crypto_engine_t *engines;
- vnet_crypto_key_t *keys;
uword *engine_index_by_name;
uword *alg_index_by_name;
- uword *async_alg_index_by_name;
- vnet_crypto_async_alg_data_t *async_algs;
vnet_crypto_async_next_node_t *next_nodes;
- u32 crypto_node_index;
+ vnet_crypto_alg_data_t algs[VNET_CRYPTO_N_ALGS];
+ vnet_crypto_op_data_t opt_data[VNET_CRYPTO_N_OP_IDS];
} vnet_crypto_main_t;
extern vnet_crypto_main_t crypto_main;
@@ -486,8 +442,17 @@ u32 vnet_crypto_process_ops (vlib_main_t * vm, vnet_crypto_op_t ops[],
u32 n_ops);
void vnet_crypto_set_async_dispatch (u8 mode, u8 adaptive);
-int vnet_crypto_set_handler2 (char *ops_handler_name, char *engine,
- crypto_op_class_type_t oct);
+
+typedef struct
+{
+ char *handler_name;
+ char *engine;
+ u8 set_simple : 1;
+ u8 set_chained : 1;
+ u8 set_async : 1;
+} vnet_crypto_set_handlers_args_t;
+
+int vnet_crypto_set_handlers (vnet_crypto_set_handlers_args_t *);
int vnet_crypto_is_set_handler (vnet_crypto_alg_t alg);
u32 vnet_crypto_key_add (vlib_main_t * vm, vnet_crypto_alg_t alg,
@@ -503,12 +468,8 @@ u32 vnet_crypto_key_add_linked (vlib_main_t * vm,
vnet_crypto_key_index_t index_crypto,
vnet_crypto_key_index_t index_integ);
-int vnet_crypto_set_async_handler2 (char *alg_name, char *engine);
-
-int vnet_crypto_is_set_async_handler (vnet_crypto_async_op_id_t opt);
-
-vnet_crypto_async_alg_t vnet_crypto_link_algs (vnet_crypto_alg_t crypto_alg,
- vnet_crypto_alg_t integ_alg);
+vnet_crypto_alg_t vnet_crypto_link_algs (vnet_crypto_alg_t crypto_alg,
+ vnet_crypto_alg_t integ_alg);
format_function_t format_vnet_crypto_alg;
format_function_t format_vnet_crypto_engine;
@@ -517,10 +478,6 @@ format_function_t format_vnet_crypto_op_type;
format_function_t format_vnet_crypto_op_status;
unformat_function_t unformat_vnet_crypto_alg;
-format_function_t format_vnet_crypto_async_op;
-format_function_t format_vnet_crypto_async_alg;
-format_function_t format_vnet_crypto_async_op_type;
-
static_always_inline void
vnet_crypto_op_init (vnet_crypto_op_t * op, vnet_crypto_op_id_t type)
{
@@ -545,19 +502,13 @@ static_always_inline vnet_crypto_key_t *
vnet_crypto_get_key (vnet_crypto_key_index_t index)
{
vnet_crypto_main_t *cm = &crypto_main;
- return vec_elt_at_index (cm->keys, index);
-}
-
-static_always_inline int
-vnet_crypto_set_handler (char *alg_name, char *engine)
-{
- return vnet_crypto_set_handler2 (alg_name, engine, CRYPTO_OP_BOTH);
+ return cm->keys[index];
}
/** async crypto inline functions **/
static_always_inline vnet_crypto_async_frame_t *
-vnet_crypto_async_get_frame (vlib_main_t * vm, vnet_crypto_async_op_id_t opt)
+vnet_crypto_async_get_frame (vlib_main_t *vm, vnet_crypto_op_id_t opt)
{
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_thread_t *ct = cm->threads + vm->thread_index;
@@ -592,19 +543,22 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm,
{
vnet_crypto_main_t *cm = &crypto_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
+ vnet_crypto_op_id_t op = frame->op;
+ vnet_crypto_frame_enq_fn_t *fn =
+ cm->opt_data[op].handlers[VNET_CRYPTO_HANDLER_TYPE_ASYNC];
u32 i;
vlib_node_t *n;
frame->state = VNET_CRYPTO_FRAME_STATE_PENDING;
frame->enqueue_thread_index = vm->thread_index;
- if (PREDICT_FALSE (cm->enqueue_handlers == NULL))
+ if (PREDICT_FALSE (fn == 0))
{
frame->state = VNET_CRYPTO_FRAME_STATE_ELT_ERROR;
return -1;
}
- int ret = (cm->enqueue_handlers[frame->op]) (vm, frame);
+ int ret = fn (vm, frame);
if (PREDICT_TRUE (ret == 0))
{
@@ -656,7 +610,7 @@ vnet_crypto_async_add_to_frame (vlib_main_t *vm, vnet_crypto_async_frame_t *f,
static_always_inline void
vnet_crypto_async_reset_frame (vnet_crypto_async_frame_t * f)
{
- vnet_crypto_async_op_id_t opt;
+ vnet_crypto_op_id_t opt;
ASSERT (f != 0);
ASSERT ((f->state == VNET_CRYPTO_FRAME_STATE_NOT_PROCESSED
|| f->state == VNET_CRYPTO_FRAME_STATE_ELT_ERROR));
diff --git a/src/vnet/crypto/crypto_api.c b/src/vnet/crypto/crypto_api.c
index e701864a5ba..e7322cdd553 100644
--- a/src/vnet/crypto/crypto_api.c
+++ b/src/vnet/crypto/crypto_api.c
@@ -68,18 +68,23 @@ vl_api_crypto_set_handler_t_handler (vl_api_crypto_set_handler_t * mp)
{
vl_api_crypto_set_handler_reply_t *rmp;
int rv = 0;
- char *engine;
- char *alg_name;
- crypto_op_class_type_t oct;
-
- engine = (char *) mp->engine;
- alg_name = (char *) mp->alg_name;
- oct = (crypto_op_class_type_t) mp->oct;
-
- if (mp->is_async)
- rv = vnet_crypto_set_async_handler2 (alg_name, engine);
- else
- rv = vnet_crypto_set_handler2 (alg_name, engine, oct);
+
+ enum
+ {
+ CRYPTO_OP_SIMPLE,
+ CRYPTO_OP_CHAINED,
+ CRYPTO_OP_BOTH,
+ } oct = (typeof (oct)) mp->oct;
+
+ vnet_crypto_set_handlers_args_t args = {
+ .engine = (char *) mp->engine,
+ .handler_name = (char *) mp->alg_name,
+ .set_async = mp->is_async != 0,
+ .set_simple = oct == CRYPTO_OP_SIMPLE || oct == CRYPTO_OP_BOTH,
+ .set_chained = oct == CRYPTO_OP_CHAINED || oct == CRYPTO_OP_BOTH,
+ };
+
+ rv = vnet_crypto_set_handlers (&args);
REPLY_MACRO (VL_API_CRYPTO_SET_HANDLER_REPLY);
}
diff --git a/src/vnet/crypto/engine.h b/src/vnet/crypto/engine.h
index 993befb393a..517b6ec3457 100644
--- a/src/vnet/crypto/engine.h
+++ b/src/vnet/crypto/engine.h
@@ -12,8 +12,8 @@ typedef unsigned int u32;
typedef struct
{
vnet_crypto_op_id_t opt;
- vnet_crypto_ops_handler_t *fn;
- vnet_crypto_chained_ops_handler_t *cfn;
+ vnet_crypto_simple_op_fn_t *fn;
+ vnet_crypto_chained_op_fn_t *cfn;
} vnet_crypto_engine_op_handlers_t;
struct vnet_crypto_engine_registration;
@@ -31,7 +31,7 @@ typedef struct vnet_crypto_engine_registration
u32 num_threads;
void *per_thread_data;
vnet_crypto_engine_init_fn_t *init_fn;
- vnet_crypto_key_handler_t *key_handler;
+ vnet_crypto_key_fn_t *key_handler;
vnet_crypto_engine_op_handlers_t *op_handlers;
} vnet_crypto_engine_registration_t;
diff --git a/src/vnet/crypto/format.c b/src/vnet/crypto/format.c
index c503ac81663..cfcee2f4572 100644
--- a/src/vnet/crypto/format.c
+++ b/src/vnet/crypto/format.c
@@ -22,7 +22,7 @@ format_vnet_crypto_alg (u8 * s, va_list * args)
{
vnet_crypto_alg_t alg = va_arg (*args, vnet_crypto_alg_t);
vnet_crypto_main_t *cm = &crypto_main;
- vnet_crypto_alg_data_t *d = vec_elt_at_index (cm->algs, alg);
+ vnet_crypto_alg_data_t *d = cm->algs + alg;
return format (s, "%s", d->name);
}
@@ -105,6 +105,7 @@ format_vnet_crypto_engine (u8 * s, va_list * args)
return format (s, "%s", e->name);
}
+#if 0
u8 *
format_vnet_crypto_async_op_type (u8 * s, va_list * args)
{
@@ -125,7 +126,7 @@ format_vnet_crypto_async_op_type (u8 * s, va_list * args)
u8 *
format_vnet_crypto_async_alg (u8 * s, va_list * args)
{
- vnet_crypto_async_alg_t alg = va_arg (*args, vnet_crypto_async_alg_t);
+ vnet_crypto_alg_t alg = va_arg (*args, vnet_crypto_alg_t);
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_async_alg_data_t *d = vec_elt_at_index (cm->async_algs, alg);
return format (s, "%s", d->name);
@@ -141,6 +142,7 @@ format_vnet_crypto_async_op (u8 * s, va_list * args)
return format (s, "%U-%U", format_vnet_crypto_async_op_type, otd->type,
format_vnet_crypto_async_alg, otd->alg);
}
+#endif
/*
* fd.io coding-style-patch-verification: ON
diff --git a/src/vnet/crypto/main.c b/src/vnet/crypto/main.c
new file mode 100644
index 00000000000..4f00e9b5c62
--- /dev/null
+++ b/src/vnet/crypto/main.c
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/crypto/crypto.h>
+#include <vnet/crypto/engine.h>
+
+vnet_crypto_main_t crypto_main =
+{
+ .algs = {
+#define _(n, s, ...) \
+ [VNET_CRYPTO_ALG_##n] = { \
+ .name = (s), \
+ .op_by_type[VNET_CRYPTO_OP_TYPE_ENCRYPT] = VNET_CRYPTO_OP_##n##_ENC, \
+ .op_by_type[VNET_CRYPTO_OP_TYPE_DECRYPT] = VNET_CRYPTO_OP_##n##_DEC, \
+ __VA_ARGS__, \
+ },
+ foreach_crypto_cipher_alg foreach_crypto_aead_alg
+#undef _
+
+#define _(n, s) \
+ [VNET_CRYPTO_ALG_HASH_##n] = { \
+ .name = (s), \
+ .op_by_type[VNET_CRYPTO_OP_TYPE_HASH] = VNET_CRYPTO_OP_##n##_HASH, \
+ }, \
+ [VNET_CRYPTO_ALG_HMAC_##n] = { \
+ .name = ("hmac-" s), \
+ .op_by_type[VNET_CRYPTO_OP_TYPE_HMAC] = VNET_CRYPTO_OP_##n##_HMAC, \
+ .variable_key_length = 1, \
+ },
+ foreach_crypto_hash_alg
+#undef _
+
+#define _(n, s, k, t, a) \
+ [VNET_CRYPTO_ALG_##n##_TAG##t##_AAD##a] = { \
+ .name = (s), \
+ .op_by_type[VNET_CRYPTO_OP_TYPE_ENCRYPT] = \
+ VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC, \
+ .op_by_type[VNET_CRYPTO_OP_TYPE_DECRYPT] = \
+ VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC, \
+ },
+ foreach_crypto_aead_async_alg
+#undef _
+
+#define _(c, h, s, k, d) \
+ [VNET_CRYPTO_ALG_##c##_##h##_TAG##d] = { \
+ .name = (s), \
+ .op_by_type[VNET_CRYPTO_OP_TYPE_ENCRYPT] = \
+ VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC, \
+ .op_by_type[VNET_CRYPTO_OP_TYPE_DECRYPT] = \
+ VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC, \
+ },
+ foreach_crypto_link_async_alg
+#undef _
+
+ },
+ .opt_data = {
+#define _(n, s, ...) \
+ [VNET_CRYPTO_OP_##n##_ENC] = { \
+ .alg = VNET_CRYPTO_ALG_##n, \
+ .type = VNET_CRYPTO_OP_TYPE_ENCRYPT, \
+ }, \
+ [VNET_CRYPTO_OP_##n##_DEC] = { \
+ .alg = VNET_CRYPTO_ALG_##n, \
+ .type = VNET_CRYPTO_OP_TYPE_DECRYPT, \
+ },
+ foreach_crypto_cipher_alg foreach_crypto_aead_alg
+#undef _
+
+#define _(n, s) \
+ [VNET_CRYPTO_OP_##n##_HASH] = { \
+ .alg = VNET_CRYPTO_ALG_HASH_##n, \
+ .type = VNET_CRYPTO_OP_TYPE_HASH, \
+ }, \
+ [VNET_CRYPTO_OP_##n##_HMAC] = { \
+ .alg = VNET_CRYPTO_ALG_HMAC_##n, \
+ .type = VNET_CRYPTO_OP_TYPE_HMAC, \
+ },
+ foreach_crypto_hash_alg
+#undef _
+
+#define _(n, s, k, t, a) \
+ [VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_ENC] = { \
+ .alg = VNET_CRYPTO_ALG_##n##_TAG##t##_AAD##a, \
+ .type = VNET_CRYPTO_OP_TYPE_ENCRYPT, \
+ }, \
+ [VNET_CRYPTO_OP_##n##_TAG##t##_AAD##a##_DEC] = { \
+ .alg = VNET_CRYPTO_ALG_##n##_TAG##t##_AAD##a, \
+ .type = VNET_CRYPTO_OP_TYPE_DECRYPT, \
+ },
+ foreach_crypto_aead_async_alg
+#undef _
+
+#define _(c, h, s, k, d) \
+ [VNET_CRYPTO_OP_##c##_##h##_TAG##d##_ENC] = { \
+ .alg = VNET_CRYPTO_ALG_##c##_##h##_TAG##d, \
+ .type = VNET_CRYPTO_OP_TYPE_ENCRYPT, \
+ } , \
+ [VNET_CRYPTO_OP_##c##_##h##_TAG##d##_DEC] = { \
+ .alg = VNET_CRYPTO_ALG_##c##_##h##_TAG##d, \
+ .type = VNET_CRYPTO_OP_TYPE_DECRYPT, \
+ },
+ foreach_crypto_link_async_alg
+#undef _
+
+ },
+};
diff --git a/src/vnet/crypto/node.c b/src/vnet/crypto/node.c
index ee7f344ce68..7d023f3ff9d 100644
--- a/src/vnet/crypto/node.c
+++ b/src/vnet/crypto/node.c
@@ -45,7 +45,7 @@ typedef enum
typedef struct
{
vnet_crypto_op_status_t op_status;
- vnet_crypto_async_op_id_t op;
+ vnet_crypto_op_id_t op;
} crypto_dispatch_trace_t;
static u8 *
@@ -55,15 +55,14 @@ format_crypto_dispatch_trace (u8 * s, va_list * args)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
crypto_dispatch_trace_t *t = va_arg (*args, crypto_dispatch_trace_t *);
- s = format (s, "%U: %U", format_vnet_crypto_async_op, t->op,
+ s = format (s, "%U: %U", format_vnet_crypto_op, t->op,
format_vnet_crypto_op_status, t->op_status);
return s;
}
static void
-vnet_crypto_async_add_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
- vlib_buffer_t * b,
- vnet_crypto_async_op_id_t op_id,
+vnet_crypto_async_add_trace (vlib_main_t *vm, vlib_node_runtime_t *node,
+ vlib_buffer_t *b, vnet_crypto_op_id_t op_id,
vnet_crypto_op_status_t status)
{
crypto_dispatch_trace_t *tr = vlib_add_trace (vm, node, b, sizeof (*tr));
diff --git a/src/vnet/dev/port.c b/src/vnet/dev/port.c
index fccedebdcf4..e538b89a630 100644
--- a/src/vnet/dev/port.c
+++ b/src/vnet/dev/port.c
@@ -564,6 +564,7 @@ vnet_dev_port_if_create (vlib_main_t *vm, vnet_dev_port_t *port, void *ptr)
vnet_dev_port_if_create_args_t *a = ptr;
vnet_dev_port_interfaces_t *ifs = port->interfaces;
vnet_dev_instance_t *di;
+ vnet_dev_tx_queue_t *txq, **qp;
vnet_dev_rv_t rv;
u16 ti = 0;
@@ -614,16 +615,19 @@ vnet_dev_port_if_create (vlib_main_t *vm, vnet_dev_port_t *port, void *ptr)
if ((rv = vnet_dev_tx_queue_alloc (vm, port, ifs->txq_sz)) != VNET_DEV_OK)
goto error;
- foreach_vnet_dev_port_tx_queue (q, port)
+ for (ti = 0; ti < n_threads; ti++)
{
/* if consistent_qp is enabled, we start by assigning queues to workers
* and we end with main */
u16 real_ti = (ti + a->consistent_qp) % n_threads;
- q->assigned_threads = clib_bitmap_set (q->assigned_threads, real_ti, 1);
+ qp = pool_elt_at_index (port->tx_queues, ti % ifs->num_tx_queues);
+ txq = qp[0];
+ txq->assigned_threads =
+ clib_bitmap_set (txq->assigned_threads, real_ti, 1);
log_debug (dev, "port %u tx queue %u assigned to thread %u",
- port->port_id, q->queue_id, real_ti);
- if (++ti >= n_threads)
- break;
+ port->port_id, txq->queue_id, real_ti);
+ if (clib_bitmap_count_set_bits (txq->assigned_threads) > 1)
+ txq->lock_needed = 1;
}
pool_get (dm->dev_instances, di);
diff --git a/src/vnet/ip/ip4_forward.c b/src/vnet/ip/ip4_forward.c
index a378dc5268a..81d6cd1a0bd 100644
--- a/src/vnet/ip/ip4_forward.c
+++ b/src/vnet/ip/ip4_forward.c
@@ -1550,6 +1550,9 @@ ip4_local_check_src (vlib_buffer_t *b, ip4_header_t *ip0,
lb0 = load_balance_get (lbi0);
dpo0 = load_balance_get_bucket_i (lb0, 0);
+ /* Do not cache result for packets with errors, e.g., invalid csum */
+ last_check->first = *error0 == IP4_ERROR_UNKNOWN_PROTOCOL ? 0 : 1;
+
/*
* Must have a route to source otherwise we drop the packet.
* ip4 broadcasts are accepted, e.g. to make dhcp client work
@@ -1572,7 +1575,6 @@ ip4_local_check_src (vlib_buffer_t *b, ip4_header_t *ip0,
last_check->src.as_u32 = ip0->src_address.as_u32;
last_check->lbi = lbi0;
last_check->error = *error0;
- last_check->first = 0;
last_check->fib_index = vnet_buffer (b)->ip.fib_index;
}
else
@@ -1580,7 +1582,8 @@ ip4_local_check_src (vlib_buffer_t *b, ip4_header_t *ip0,
vnet_buffer (b)->ip.adj_index[VLIB_RX] =
vnet_buffer (b)->ip.adj_index[VLIB_TX];
vnet_buffer (b)->ip.adj_index[VLIB_TX] = last_check->lbi;
- *error0 = last_check->error;
+ *error0 =
+ (*error0 == IP4_ERROR_UNKNOWN_PROTOCOL) ? last_check->error : *error0;
}
}
@@ -1652,6 +1655,9 @@ ip4_local_check_src_x2 (vlib_buffer_t **b, ip4_header_t **ip,
dpo[0] = load_balance_get_bucket_i (lb[0], 0);
dpo[1] = load_balance_get_bucket_i (lb[1], 0);
+ /* Do not cache result for packets with errors, e.g., invalid csum */
+ last_check->first = error[1] == IP4_ERROR_UNKNOWN_PROTOCOL ? 0 : 1;
+
error[0] = ((error[0] == IP4_ERROR_UNKNOWN_PROTOCOL &&
dpo[0]->dpoi_type == DPO_RECEIVE) ?
IP4_ERROR_SPOOFED_LOCAL_PACKETS : error[0]);
@@ -1671,7 +1677,6 @@ ip4_local_check_src_x2 (vlib_buffer_t **b, ip4_header_t **ip,
last_check->src.as_u32 = ip[1]->src_address.as_u32;
last_check->lbi = lbi[1];
last_check->error = error[1];
- last_check->first = 0;
last_check->fib_index = vnet_buffer (b[1])->ip.fib_index;
}
else
@@ -1684,8 +1689,10 @@ ip4_local_check_src_x2 (vlib_buffer_t **b, ip4_header_t **ip,
vnet_buffer (b[1])->ip.adj_index[VLIB_TX];
vnet_buffer (b[1])->ip.adj_index[VLIB_TX] = last_check->lbi;
- error[0] = last_check->error;
- error[1] = last_check->error;
+ error[0] = (error[0] == IP4_ERROR_UNKNOWN_PROTOCOL) ? last_check->error :
+ error[0];
+ error[1] = (error[1] == IP4_ERROR_UNKNOWN_PROTOCOL) ? last_check->error :
+ error[1];
}
}
diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c
index ec4db0fed57..6b62ff7f05c 100644
--- a/src/vnet/ipsec/ah_decrypt.c
+++ b/src/vnet/ipsec/ah_decrypt.c
@@ -127,7 +127,7 @@ ah_decrypt_inline (vlib_main_t * vm,
ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
from = vlib_frame_vector_args (from_frame);
n_left = from_frame->n_vectors;
- ipsec_sa_t *sa0 = 0;
+ ipsec_sa_inb_rt_t *irt = 0;
bool anti_replay_result;
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
@@ -149,25 +149,25 @@ ah_decrypt_inline (vlib_main_t * vm,
current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
- sa0 = ipsec_sa_get (current_sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (current_sa_index);
current_sa_bytes = current_sa_pkts = 0;
vlib_prefetch_combined_counter (&ipsec_sa_counters,
thread_index, current_sa_index);
}
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == irt->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&irt->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_TRUE (thread_index != sa0->thread_index))
+ if (PREDICT_TRUE (thread_index != irt->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = irt->thread_index;
next[0] = AH_DECRYPT_NEXT_HANDOFF;
goto next;
}
@@ -202,15 +202,15 @@ ah_decrypt_inline (vlib_main_t * vm,
pd->seq = clib_host_to_net_u32 (ah0->seq_no);
/* anti-replay check */
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, true);
+ irt, pd->seq, ~0, false, &pd->seq_hi, true);
}
else
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, false);
+ irt, pd->seq, ~0, false, &pd->seq_hi, false);
}
if (anti_replay_result)
{
@@ -223,13 +223,14 @@ ah_decrypt_inline (vlib_main_t * vm,
current_sa_bytes += b[0]->current_length;
current_sa_pkts += 1;
- pd->icv_size = sa0->integ_icv_size;
+ pd->icv_size = irt->integ_icv_size;
pd->nexthdr_cached = ah0->nexthdr;
- if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
+ if (PREDICT_TRUE (irt->integ_icv_size))
{
- if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0) &&
- pd->current_data + b[0]->current_length
- + sizeof (u32) > buffer_data_size))
+ if (PREDICT_FALSE (irt->use_esn && pd->current_data +
+ b[0]->current_length +
+ sizeof (u32) >
+ buffer_data_size))
{
ah_decrypt_set_next_index (
b[0], node, vm->thread_index, AH_DECRYPT_ERROR_NO_TAIL_SPACE,
@@ -239,16 +240,16 @@ ah_decrypt_inline (vlib_main_t * vm,
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->integ_op_id);
+ vnet_crypto_op_init (op, irt->integ_op_id);
op->src = (u8 *) ih4;
op->len = b[0]->current_length;
op->digest = (u8 *) ih4 - pd->icv_size;
op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
op->digest_len = pd->icv_size;
- op->key_index = sa0->integ_key_index;
+ op->key_index = irt->integ_key_index;
op->user_data = b - bufs;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (irt->use_esn)
{
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
@@ -311,15 +312,15 @@ ah_decrypt_inline (vlib_main_t * vm,
if (next[0] < AH_DECRYPT_N_NEXT)
goto trace;
- sa0 = ipsec_sa_get (pd->sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (pd->sa_index);
- if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
+ if (PREDICT_TRUE (irt->integ_icv_size))
{
/* redo the anti-reply check. see esp_decrypt for details */
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
if (ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, pd->seq_hi, true, NULL, true))
+ irt, pd->seq, pd->seq_hi, true, NULL, true))
{
ah_decrypt_set_next_index (
b[0], node, vm->thread_index, AH_DECRYPT_ERROR_REPLAY, 0,
@@ -327,12 +328,12 @@ ah_decrypt_inline (vlib_main_t * vm,
goto trace;
}
n_lost = ipsec_sa_anti_replay_advance (
- sa0, thread_index, pd->seq, pd->seq_hi, true);
+ irt, thread_index, pd->seq, pd->seq_hi, true);
}
else
{
if (ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, pd->seq_hi, true, NULL, false))
+ irt, pd->seq, pd->seq_hi, true, NULL, false))
{
ah_decrypt_set_next_index (
b[0], node, vm->thread_index, AH_DECRYPT_ERROR_REPLAY, 0,
@@ -340,7 +341,7 @@ ah_decrypt_inline (vlib_main_t * vm,
goto trace;
}
n_lost = ipsec_sa_anti_replay_advance (
- sa0, thread_index, pd->seq, pd->seq_hi, false);
+ irt, thread_index, pd->seq, pd->seq_hi, false);
}
vlib_prefetch_simple_counter (
&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], thread_index,
@@ -354,7 +355,7 @@ ah_decrypt_inline (vlib_main_t * vm,
b[0]->flags &= ~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED |
VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (irt->is_tunnel))
{ /* tunnel mode */
if (PREDICT_TRUE (pd->nexthdr_cached == IP_PROTOCOL_IP_IN_IP))
next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
@@ -424,10 +425,10 @@ ah_decrypt_inline (vlib_main_t * vm,
trace:
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- sa0 = ipsec_sa_get (vnet_buffer (b[0])->ipsec.sad_index);
+ ipsec_sa_t *sa = ipsec_sa_get (vnet_buffer (b[0])->ipsec.sad_index);
ah_decrypt_trace_t *tr =
vlib_add_trace (vm, node, b[0], sizeof (*tr));
- tr->integ_alg = sa0->integ_alg;
+ tr->integ_alg = sa->integ_alg;
tr->seq_num = pd->seq;
}
diff --git a/src/vnet/ipsec/ah_encrypt.c b/src/vnet/ipsec/ah_encrypt.c
index 86694660878..1b32b8d2c7c 100644
--- a/src/vnet/ipsec/ah_encrypt.c
+++ b/src/vnet/ipsec/ah_encrypt.c
@@ -43,8 +43,7 @@ typedef struct
{
u32 sa_index;
u32 spi;
- u32 seq_lo;
- u32 seq_hi;
+ u64 seq;
ipsec_integ_alg_t integ_alg;
} ah_encrypt_trace_t;
@@ -56,9 +55,9 @@ format_ah_encrypt_trace (u8 * s, va_list * args)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
ah_encrypt_trace_t *t = va_arg (*args, ah_encrypt_trace_t *);
- s = format (s, "ah: sa-index %d spi %u (0x%08x) seq %u:%u integrity %U",
- t->sa_index, t->spi, t->spi, t->seq_hi, t->seq_lo,
- format_ipsec_integ_alg, t->integ_alg);
+ s = format (s, "ah: sa-index %d spi %u (0x%08x) seq %lu integrity %U",
+ t->sa_index, t->spi, t->spi, t->seq, format_ipsec_integ_alg,
+ t->integ_alg);
return s;
}
@@ -128,7 +127,7 @@ ah_encrypt_inline (vlib_main_t * vm,
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
- ipsec_sa_t *sa0 = 0;
+ ipsec_sa_outb_rt_t *ort = 0;
ip4_and_ah_header_t *ih0, *oh0 = 0;
ip6_and_ah_header_t *ih6_0, *oh6_0 = 0;
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
@@ -158,7 +157,7 @@ ah_encrypt_inline (vlib_main_t * vm,
current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
- sa0 = ipsec_sa_get (current_sa_index);
+ ort = ipsec_sa_get_outb_rt_by_index (current_sa_index);
current_sa_bytes = current_sa_pkts = 0;
vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
@@ -168,23 +167,23 @@ ah_encrypt_inline (vlib_main_t * vm,
pd->sa_index = current_sa_index;
next[0] = AH_ENCRYPT_NEXT_DROP;
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == ort->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&ort->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_TRUE (thread_index != sa0->thread_index))
+ if (PREDICT_TRUE (thread_index != ort->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = ort->thread_index;
next[0] = AH_ENCRYPT_NEXT_HANDOFF;
goto next;
}
- if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ if (PREDICT_FALSE (esp_seq_advance (ort)))
{
ah_encrypt_set_next_index (b[0], node, vm->thread_index,
AH_ENCRYPT_ERROR_SEQ_CYCLED, 0, next,
@@ -199,7 +198,7 @@ ah_encrypt_inline (vlib_main_t * vm,
ssize_t adv;
ih0 = vlib_buffer_get_current (b[0]);
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (ort->is_tunnel))
{
if (is_ip6)
adv = -sizeof (ip6_and_ah_header_t);
@@ -211,11 +210,11 @@ ah_encrypt_inline (vlib_main_t * vm,
adv = -sizeof (ah_header_t);
}
- icv_size = sa0->integ_icv_size;
+ icv_size = ort->integ_icv_size;
const u8 padding_len = ah_calc_icv_padding_len (icv_size, is_ip6);
adv -= padding_len;
/* transport mode save the eth header before it is overwritten */
- if (PREDICT_FALSE (!ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_FALSE (!ort->is_tunnel))
{
const u32 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
u8 *l2_hdr_in = (u8 *) vlib_buffer_get_current (b[0]) - l2_len;
@@ -238,16 +237,16 @@ ah_encrypt_inline (vlib_main_t * vm,
oh6_0->ip6.ip_version_traffic_class_and_flow_label =
ih6_0->ip6.ip_version_traffic_class_and_flow_label;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_FALSE (ort->is_tunnel))
{
- ip6_set_dscp_network_order (&oh6_0->ip6, sa0->tunnel.t_dscp);
- tunnel_encap_fixup_6o6 (sa0->tunnel_flags, &ih6_0->ip6,
+ ip6_set_dscp_network_order (&oh6_0->ip6, ort->t_dscp);
+ tunnel_encap_fixup_6o6 (ort->tunnel_flags, &ih6_0->ip6,
&oh6_0->ip6);
}
pd->ip_version_traffic_class_and_flow_label =
oh6_0->ip6.ip_version_traffic_class_and_flow_label;
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (ort->is_tunnel))
{
next_hdr_type = IP_PROTOCOL_IPV6;
}
@@ -260,8 +259,8 @@ ah_encrypt_inline (vlib_main_t * vm,
clib_memcpy_fast (&oh6_0->ip6, &ip6_hdr_template, 8);
oh6_0->ah.reserved = 0;
oh6_0->ah.nexthdr = next_hdr_type;
- oh6_0->ah.spi = clib_net_to_host_u32 (sa0->spi);
- oh6_0->ah.seq_no = clib_net_to_host_u32 (sa0->seq);
+ oh6_0->ah.spi = ort->spi_be;
+ oh6_0->ah.seq_no = clib_net_to_host_u32 (ort->seq64);
oh6_0->ip6.payload_length =
clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
sizeof (ip6_header_t));
@@ -274,18 +273,18 @@ ah_encrypt_inline (vlib_main_t * vm,
oh0 = vlib_buffer_get_current (b[0]);
pd->ttl = ih0->ip4.ttl;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_FALSE (ort->is_tunnel))
{
- if (sa0->tunnel.t_dscp)
- pd->tos = sa0->tunnel.t_dscp << 2;
+ if (ort->t_dscp)
+ pd->tos = ort->t_dscp << 2;
else
{
pd->tos = ih0->ip4.tos;
- if (!(sa0->tunnel_flags &
+ if (!(ort->tunnel_flags &
TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_DSCP))
pd->tos &= 0x3;
- if (!(sa0->tunnel_flags &
+ if (!(ort->tunnel_flags &
TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_ECN))
pd->tos &= 0xfc;
}
@@ -298,7 +297,7 @@ ah_encrypt_inline (vlib_main_t * vm,
pd->current_data = b[0]->current_data;
clib_memset (oh0, 0, sizeof (ip4_and_ah_header_t));
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (ort->is_tunnel))
{
next_hdr_type = IP_PROTOCOL_IP_IN_IP;
}
@@ -314,57 +313,51 @@ ah_encrypt_inline (vlib_main_t * vm,
oh0->ip4.length =
clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
- oh0->ah.spi = clib_net_to_host_u32 (sa0->spi);
- oh0->ah.seq_no = clib_net_to_host_u32 (sa0->seq);
+ oh0->ah.spi = ort->spi_be;
+ oh0->ah.seq_no = clib_net_to_host_u32 (ort->seq64);
oh0->ah.nexthdr = next_hdr_type;
oh0->ah.hdrlen =
(sizeof (ah_header_t) + icv_size + padding_len) / 4 - 2;
}
- if (PREDICT_TRUE (!is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
- !ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)))
+ if (PREDICT_TRUE (!is_ip6 && ort->is_tunnel && !ort->is_tunnel_v6))
{
- clib_memcpy_fast (&oh0->ip4.address_pair,
- &sa0->ip4_hdr.address_pair,
+ clib_memcpy_fast (&oh0->ip4.address_pair, &ort->ip4_hdr.address_pair,
sizeof (ip4_address_pair_t));
- next[0] = sa0->dpo.dpoi_next_node;
- vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
+ next[0] = ort->dpo.dpoi_next_node;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = ort->dpo.dpoi_index;
}
- else if (is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
- ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
+ else if (is_ip6 && ort->is_tunnel && ort->is_tunnel_v6)
{
- clib_memcpy_fast (&oh6_0->ip6.src_address,
- &sa0->ip6_hdr.src_address,
+ clib_memcpy_fast (&oh6_0->ip6.src_address, &ort->ip6_hdr.src_address,
sizeof (ip6_address_t) * 2);
- next[0] = sa0->dpo.dpoi_next_node;
- vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
+ next[0] = ort->dpo.dpoi_next_node;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = ort->dpo.dpoi_index;
}
- if (PREDICT_TRUE (sa0->integ_op_id))
+ if (PREDICT_TRUE (ort->integ_op_id))
{
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->integ_op_id);
+ vnet_crypto_op_init (op, ort->integ_op_id);
op->src = vlib_buffer_get_current (b[0]);
op->len = b[0]->current_length;
op->digest = vlib_buffer_get_current (b[0]) + ip_hdr_size +
sizeof (ah_header_t);
clib_memset (op->digest, 0, icv_size);
op->digest_len = icv_size;
- op->key_index = sa0->integ_key_index;
+ op->key_index = ort->integ_key_index;
op->user_data = b - bufs;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (ort->use_esn)
{
- u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
-
- op->len += sizeof (seq_hi);
- clib_memcpy (op->src + b[0]->current_length, &seq_hi,
- sizeof (seq_hi));
+ *(u32u *) (op->src + b[0]->current_length) =
+ clib_host_to_net_u32 (ort->seq64 >> 32);
+ op->len += sizeof (u32);
}
}
- if (!ipsec_sa_is_set_IS_TUNNEL (sa0))
+ if (!ort->is_tunnel)
{
next[0] = AH_ENCRYPT_NEXT_INTERFACE_OUTPUT;
vlib_buffer_advance (b[0], -sizeof (ethernet_header_t));
@@ -373,13 +366,14 @@ ah_encrypt_inline (vlib_main_t * vm,
next:
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- sa0 = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_t *sa = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_outb_rt_t *ort =
+ ipsec_sa_get_outb_rt_by_index (pd->sa_index);
ah_encrypt_trace_t *tr =
vlib_add_trace (vm, node, b[0], sizeof (*tr));
- tr->spi = sa0->spi;
- tr->seq_lo = sa0->seq;
- tr->seq_hi = sa0->seq_hi;
- tr->integ_alg = sa0->integ_alg;
+ tr->spi = sa->spi;
+ tr->seq = ort->seq64;
+ tr->integ_alg = sa->integ_alg;
tr->sa_index = pd->sa_index;
}
diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h
index 1c3ce776ad2..a31e3145429 100644
--- a/src/vnet/ipsec/esp.h
+++ b/src/vnet/ipsec/esp.h
@@ -79,46 +79,28 @@ typedef struct esp_aead_t_
u32 data[3];
} __clib_packed esp_aead_t;
-#define ESP_SEQ_MAX (4294967295UL)
-
u8 *format_esp_header (u8 * s, va_list * args);
/* TODO seq increment should be atomic to be accessed by multiple workers */
always_inline int
-esp_seq_advance (ipsec_sa_t * sa)
+esp_seq_advance (ipsec_sa_outb_rt_t *ort)
{
- if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa)))
- {
- if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
- {
- if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
- sa->seq_hi == ESP_SEQ_MAX))
- return 1;
- sa->seq_hi++;
- }
- sa->seq++;
- }
- else
- {
- if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
- sa->seq == ESP_SEQ_MAX))
- return 1;
- sa->seq++;
- }
-
+ u64 max = ort->use_esn ? CLIB_U64_MAX : CLIB_U32_MAX;
+ if (ort->seq64 == max)
+ return 1;
+ ort->seq64++;
return 0;
}
always_inline u16
-esp_aad_fill (u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa,
- u32 seq_hi)
+esp_aad_fill (u8 *data, const esp_header_t *esp, int use_esn, u32 seq_hi)
{
esp_aead_t *aad;
aad = (esp_aead_t *) data;
aad->data[0] = esp->spi;
- if (ipsec_sa_is_set_USE_ESN (sa))
+ if (use_esn)
{
/* SPI, seq-hi, seq-low */
aad->data[1] = (u32) clib_host_to_net_u32 (seq_hi);
@@ -218,7 +200,8 @@ typedef struct
{
u8 icv_sz;
u8 iv_sz;
- ipsec_sa_flags_t flags;
+ u8 udp_sz;
+ u8 is_transport;
u32 sa_index;
};
u64 sa_data;
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index 01b2d2971b0..345a60a7fdd 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -251,11 +251,12 @@ esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
}
static_always_inline u16
-esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd,
- esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest,
- u16 *len, vlib_buffer_t *b, u8 *payload)
+esp_insert_esn (vlib_main_t *vm, ipsec_sa_inb_rt_t *irt,
+ esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2,
+ u32 *data_len, u8 **digest, u16 *len, vlib_buffer_t *b,
+ u8 *payload)
{
- if (!ipsec_sa_is_set_USE_ESN (sa))
+ if (!irt->use_esn)
return 0;
/* shift ICV by 4 bytes to insert ESN */
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
@@ -288,17 +289,17 @@ esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd,
}
static_always_inline u8 *
-esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
- esp_decrypt_packet_data_t * pd,
- esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
- ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
+esp_move_icv_esn (vlib_main_t *vm, vlib_buffer_t *first,
+ esp_decrypt_packet_data_t *pd,
+ esp_decrypt_packet_data2_t *pd2, u16 icv_sz,
+ ipsec_sa_inb_rt_t *irt, u8 *extra_esn, u32 *len)
{
u16 dif = 0;
u8 *digest = esp_move_icv (vm, first, pd, pd2, icv_sz, &dif);
if (dif)
*len -= dif;
- if (ipsec_sa_is_set_USE_ESN (sa))
+ if (irt->use_esn)
{
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
@@ -326,9 +327,9 @@ esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
static_always_inline int
esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
const esp_decrypt_packet_data_t *pd,
- esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0,
- vlib_buffer_t *b, u8 icv_sz, u8 *start_src,
- u32 start_len, u8 **digest, u16 *n_ch,
+ esp_decrypt_packet_data2_t *pd2,
+ ipsec_sa_inb_rt_t *irt, vlib_buffer_t *b, u8 icv_sz,
+ u8 *start_src, u32 start_len, u8 **digest, u16 *n_ch,
u32 *integ_total_len)
{
vnet_crypto_op_chunk_t *ch;
@@ -350,7 +351,7 @@ esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
ch->len = cb->current_length;
else
ch->len = cb->current_length - icv_sz;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (irt->use_esn)
{
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
u8 tmp[ESP_MAX_ICV_SIZE];
@@ -422,11 +423,11 @@ esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
}
static_always_inline u32
-esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- esp_decrypt_packet_data_t * pd,
- esp_decrypt_packet_data2_t * pd2,
- ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
- u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
+esp_decrypt_chain_crypto (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ esp_decrypt_packet_data_t *pd,
+ esp_decrypt_packet_data2_t *pd2,
+ ipsec_sa_inb_rt_t *irt, vlib_buffer_t *b, u8 icv_sz,
+ u8 *start, u32 start_len, u8 **tag, u16 *n_ch)
{
vnet_crypto_op_chunk_t *ch;
vlib_buffer_t *cb = b;
@@ -445,7 +446,7 @@ esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
ch->src = ch->dst = vlib_buffer_get_current (cb);
if (pd2->lb == cb)
{
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (irt->is_aead)
{
if (pd2->lb->current_length < icv_sz)
{
@@ -496,8 +497,9 @@ esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
static_always_inline esp_decrypt_error_t
esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
- ipsec_sa_t *sa0, u8 *payload, u16 len, u8 icv_sz,
- u8 iv_sz, esp_decrypt_packet_data_t *pd,
+ ipsec_sa_inb_rt_t *irt, u8 *payload, u16 len,
+ u8 icv_sz, u8 iv_sz,
+ esp_decrypt_packet_data_t *pd,
esp_decrypt_packet_data2_t *pd2, vlib_buffer_t *b,
u32 index)
{
@@ -506,10 +508,10 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_op_t _op, *op = &_op;
const u8 esp_sz = sizeof (esp_header_t);
- if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
+ if (PREDICT_TRUE (irt->integ_op_id != VNET_CRYPTO_OP_NONE))
{
- vnet_crypto_op_init (op, sa0->integ_op_id);
- op->key_index = sa0->integ_key_index;
+ vnet_crypto_op_init (op, irt->integ_op_id);
+ op->key_index = irt->integ_key_index;
op->src = payload;
op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
op->user_data = index;
@@ -531,9 +533,8 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
if (pd2->lb->current_length < icv_sz)
{
u8 extra_esn = 0;
- op->digest =
- esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
- &extra_esn, &op->len);
+ op->digest = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, irt,
+ &extra_esn, &op->len);
if (extra_esn)
{
@@ -558,7 +559,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
- if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
+ if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, irt, b, icv_sz,
payload, pd->current_length,
&op->digest, &op->n_chunks, 0) < 0)
return ESP_DECRYPT_ERROR_NO_BUFFERS;
@@ -566,7 +567,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
else
{
integ_ops = &ptd->integ_ops;
- esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b,
+ esp_insert_esn (vm, irt, pd, pd2, &op->len, &op->digest, &len, b,
payload);
}
out:
@@ -576,27 +577,28 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
payload += esp_sz;
len -= esp_sz;
- if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
+ if (irt->cipher_op_id != VNET_CRYPTO_OP_NONE)
{
- vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
- op->key_index = sa0->crypto_key_index;
+ vnet_crypto_op_init (op, irt->cipher_op_id);
+ op->key_index = irt->cipher_key_index;
op->iv = payload;
- if (ipsec_sa_is_set_IS_CTR (sa0))
+ if (irt->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz -
sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (irt->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
op->aad = (u8 *) nonce - sizeof (esp_aead_t);
- op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi);
+ op->aad_len =
+ esp_aad_fill (op->aad, esp0, irt->use_esn, pd->seq_hi);
op->tag = payload + len;
op->tag_len = 16;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ if (PREDICT_FALSE (irt->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
payload -= iv_sz;
@@ -607,7 +609,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
{
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa0->salt;
+ nonce->salt = irt->salt;
ASSERT (sizeof (u64) == iv_sz);
nonce->iv = *(u64 *) op->iv;
op->iv = (u8 *) nonce;
@@ -621,9 +623,9 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
/* buffer is chained */
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
- esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
- payload, len - pd->iv_sz + pd->icv_sz,
- &op->tag, &op->n_chunks);
+ esp_decrypt_chain_crypto (vm, ptd, pd, pd2, irt, b, icv_sz, payload,
+ len - pd->iv_sz + pd->icv_sz, &op->tag,
+ &op->n_chunks);
crypto_ops = &ptd->chained_crypto_ops;
}
else
@@ -639,8 +641,9 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
static_always_inline esp_decrypt_error_t
esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
- vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0,
- u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
+ vnet_crypto_async_frame_t *f,
+ ipsec_sa_inb_rt_t *irt, u8 *payload, u16 len,
+ u8 icv_sz, u8 iv_sz,
esp_decrypt_packet_data_t *pd,
esp_decrypt_packet_data2_t *pd2, u32 bi,
vlib_buffer_t *b, u16 async_next)
@@ -649,17 +652,17 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
- const u32 key_index = sa0->crypto_key_index;
+ const u32 key_index = irt->cipher_key_index;
u32 crypto_len, integ_len = 0;
i16 crypto_start_offset, integ_start_offset = 0;
u8 flags = 0;
- if (!ipsec_sa_is_set_IS_AEAD (sa0))
+ if (!irt->is_aead)
{
/* linked algs */
integ_start_offset = payload - b->data;
integ_len = len;
- if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
+ if (PREDICT_TRUE (irt->integ_op_id != VNET_CRYPTO_OP_NONE))
flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
if (pd->is_chain)
@@ -674,8 +677,8 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
if (pd2->lb->current_length < icv_sz)
{
u8 extra_esn = 0;
- tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
- &extra_esn, &integ_len);
+ tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, irt, &extra_esn,
+ &integ_len);
if (extra_esn)
{
@@ -698,7 +701,7 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
- if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
+ if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, irt, b, icv_sz,
payload, pd->current_length, &tag, 0,
&integ_len) < 0)
{
@@ -707,7 +710,7 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
}
}
else
- esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
+ esp_insert_esn (vm, irt, pd, pd2, &integ_len, &tag, &len, b, payload);
}
out:
@@ -716,19 +719,19 @@ out:
len -= esp_sz;
iv = payload;
- if (ipsec_sa_is_set_IS_CTR (sa0))
+ if (irt->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (irt->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
aad = (u8 *) nonce - sizeof (esp_aead_t);
- esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
+ esp_aad_fill (aad, esp0, irt->use_esn, pd->seq_hi);
tag = payload + len;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ if (PREDICT_FALSE (irt->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
payload -= iv_sz;
@@ -739,7 +742,7 @@ out:
{
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa0->salt;
+ nonce->salt = irt->salt;
ASSERT (sizeof (u64) == iv_sz);
nonce->iv = *(u64 *) iv;
iv = (u8 *) nonce;
@@ -753,10 +756,9 @@ out:
/* buffer is chained */
flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
- crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
- payload,
- len - pd->iv_sz + pd->icv_sz,
- &tag, 0);
+ crypto_len =
+ esp_decrypt_chain_crypto (vm, ptd, pd, pd2, irt, b, icv_sz, payload,
+ len - pd->iv_sz + pd->icv_sz, &tag, 0);
}
*async_pd = *pd;
@@ -779,10 +781,9 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun,
int is_async)
{
- ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt_by_index (pd->sa_index);
vlib_buffer_t *lb = b;
const u8 esp_sz = sizeof (esp_header_t);
- const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
u8 pad_length = 0, next_header = 0;
u16 icv_sz;
u64 n_lost;
@@ -809,9 +810,9 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
* a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
* implementation, sequential or batching, from decrypting these.
*/
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
- if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ if (ipsec_sa_anti_replay_and_sn_advance (irt, pd->seq, pd->seq_hi, true,
NULL, true))
{
esp_decrypt_set_next_index (b, node, vm->thread_index,
@@ -819,12 +820,12 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
- n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ n_lost = ipsec_sa_anti_replay_advance (irt, vm->thread_index, pd->seq,
pd->seq_hi, true);
}
else
{
- if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ if (ipsec_sa_anti_replay_and_sn_advance (irt, pd->seq, pd->seq_hi, true,
NULL, false))
{
esp_decrypt_set_next_index (b, node, vm->thread_index,
@@ -832,7 +833,7 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
- n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ n_lost = ipsec_sa_anti_replay_advance (irt, vm->thread_index, pd->seq,
pd->seq_hi, false);
}
@@ -899,10 +900,9 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
b->flags &=
~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
- if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
+ if (pd->is_transport && !is_tun) /* transport mode */
{
- u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
- sizeof (udp_header_t) : 0;
+ u8 udp_sz = is_ip6 ? 0 : pd->udp_sz;
u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
u8 *ip = old_ip + adv + udp_sz;
@@ -1012,7 +1012,7 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
if (is_tun)
{
- if (ipsec_sa_is_set_IS_PROTECT (sa0))
+ if (irt->is_protect)
{
/*
* There are two encap possibilities
@@ -1101,21 +1101,18 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp_decrypt_packet_data_t cpd = { };
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
const u8 esp_sz = sizeof (esp_header_t);
- ipsec_sa_t *sa0 = 0;
+ ipsec_sa_inb_rt_t *irt = 0;
bool anti_replay_result;
- int is_async = im->async_mode;
- vnet_crypto_async_op_id_t async_op = ~0;
- vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
+ int is_async = 0;
+ vnet_crypto_op_id_t async_op = ~0;
+ vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_N_OP_IDS];
esp_decrypt_error_t err;
vlib_get_buffers (vm, from, b, n_left);
- if (!is_async)
- {
- vec_reset_length (ptd->crypto_ops);
- vec_reset_length (ptd->integ_ops);
- vec_reset_length (ptd->chained_crypto_ops);
- vec_reset_length (ptd->chained_integ_ops);
- }
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
vec_reset_length (ptd->async_frames);
vec_reset_length (ptd->chunks);
clib_memset (sync_nexts, -1, sizeof (sync_nexts));
@@ -1157,29 +1154,28 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
current_sa_index);
- sa0 = ipsec_sa_get (current_sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (current_sa_index);
- /* fetch the second cacheline ASAP */
- clib_prefetch_load (sa0->cacheline1);
- cpd.icv_sz = sa0->integ_icv_size;
- cpd.iv_sz = sa0->crypto_iv_size;
- cpd.flags = sa0->flags;
+ cpd.icv_sz = irt->integ_icv_size;
+ cpd.iv_sz = irt->cipher_iv_size;
+ cpd.udp_sz = irt->udp_sz;
+ cpd.is_transport = irt->is_transport;
cpd.sa_index = current_sa_index;
- is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
+ is_async = irt->is_async;
}
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == irt->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&irt->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_FALSE (thread_index != sa0->thread_index))
+ if (PREDICT_FALSE (thread_index != irt->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = irt->thread_index;
err = ESP_DECRYPT_ERROR_HANDOFF;
esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
noop_nexts, ESP_DECRYPT_NEXT_HANDOFF,
@@ -1209,15 +1205,15 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
pd->current_length = b[0]->current_length;
/* anti-reply check */
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, true);
+ irt, pd->seq, ~0, false, &pd->seq_hi, true);
}
else
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, false);
+ irt, pd->seq, ~0, false, &pd->seq_hi, false);
}
if (anti_replay_result)
@@ -1244,7 +1240,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (is_async)
{
- async_op = sa0->crypto_async_dec_op_id;
+ async_op = irt->async_op_id;
/* get a frame for this op if we don't yet have one or it's full
*/
@@ -1267,7 +1263,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
}
err = esp_decrypt_prepare_async_frame (
- vm, ptd, async_frames[async_op], sa0, payload, len, cpd.icv_sz,
+ vm, ptd, async_frames[async_op], irt, payload, len, cpd.icv_sz,
cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next_node);
if (ESP_DECRYPT_ERROR_RX_PKTS != err)
{
@@ -1278,7 +1274,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
}
else
{
- err = esp_decrypt_prepare_sync_op (vm, ptd, sa0, payload, len,
+ err = esp_decrypt_prepare_sync_op (vm, ptd, irt, payload, len,
cpd.icv_sz, cpd.iv_sz, pd, pd2,
b[0], n_sync);
if (err != ESP_DECRYPT_ERROR_RX_PKTS)
@@ -1391,12 +1387,14 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
esp_decrypt_trace_t *tr;
tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
- sa0 = ipsec_sa_get (current_sa_index);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ ipsec_sa_t *sa = ipsec_sa_get (current_sa_index);
+ ipsec_sa_inb_rt_t *irt =
+ ipsec_sa_get_inb_rt_by_index (current_sa_index);
+ tr->crypto_alg = sa->crypto_alg;
+ tr->integ_alg = sa->integ_alg;
tr->seq = pd->seq;
- tr->sa_seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
+ tr->sa_seq = irt->seq;
+ tr->sa_seq_hi = irt->seq_hi;
tr->pkt_seq_hi = pd->seq_hi;
}
@@ -1456,18 +1454,20 @@ esp_decrypt_post_inline (vlib_main_t * vm,
/*trace: */
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
esp_decrypt_trace_t *tr;
esp_decrypt_packet_data_t *async_pd =
&(esp_post_data (b[0]))->decrypt_data;
tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
- sa0 = ipsec_sa_get (async_pd->sa_index);
+ sa = ipsec_sa_get (async_pd->sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (async_pd->sa_index);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ tr->crypto_alg = sa->crypto_alg;
+ tr->integ_alg = sa->integ_alg;
tr->seq = pd->seq;
- tr->sa_seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
+ tr->sa_seq = irt->seq;
+ tr->sa_seq_hi = irt->seq_hi;
}
n_left--;
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index f6d1ecaed24..8916eb135f8 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -49,8 +49,7 @@ typedef struct
{
u32 sa_index;
u32 spi;
- u32 seq;
- u32 sa_seq_hi;
+ u64 seq;
u8 udp_encap;
ipsec_crypto_alg_t crypto_alg;
ipsec_integ_alg_t integ_alg;
@@ -71,13 +70,11 @@ format_esp_encrypt_trace (u8 * s, va_list * args)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
- s =
- format (s,
- "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
- t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
- format_ipsec_crypto_alg,
- t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
- t->udp_encap ? " udp-encap-enabled" : "");
+ s = format (
+ s, "esp: sa-index %d spi %u (0x%08x) seq %lu crypto %U integrity %U%s",
+ t->sa_index, t->spi, t->spi, t->seq, format_ipsec_crypto_alg,
+ t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
+ t->udp_encap ? " udp-encap-enabled" : "");
return s;
}
@@ -162,9 +159,9 @@ esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
}
static_always_inline void
-esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
+esp_fill_udp_hdr (ipsec_sa_outb_rt_t *ort, udp_header_t *udp, u16 len)
{
- clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
+ clib_memcpy_fast (udp, &ort->udp_hdr, sizeof (udp_header_t));
udp->length = clib_net_to_host_u16 (len);
}
@@ -223,12 +220,12 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
* message. You can refer to NIST SP800-38a and NIST SP800-38d for more
* details. */
static_always_inline void *
-esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
+esp_generate_iv (ipsec_sa_outb_rt_t *ort, void *payload, int iv_sz)
{
ASSERT (iv_sz >= sizeof (u64));
u64 *iv = (u64 *) (payload - iv_sz);
clib_memset_u8 (iv, 0, iv_sz);
- *iv = clib_pcg64i_random_r (&sa->iv_prng);
+ *iv = clib_pcg64i_random_r (&ort->iv_prng);
return iv;
}
@@ -294,10 +291,9 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
}
static_always_inline u32
-esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- ipsec_sa_t * sa0, vlib_buffer_t * b,
- vlib_buffer_t * lb, u8 icv_sz, u8 * start,
- u32 start_len, u16 * n_ch)
+esp_encrypt_chain_crypto (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ vlib_buffer_t *b, vlib_buffer_t *lb, u8 icv_sz,
+ u8 *start, u32 start_len, u16 *n_ch)
{
vnet_crypto_op_chunk_t *ch;
vlib_buffer_t *cb = b;
@@ -331,10 +327,10 @@ esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
}
static_always_inline u32
-esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- ipsec_sa_t * sa0, vlib_buffer_t * b,
- vlib_buffer_t * lb, u8 icv_sz, u8 * start,
- u32 start_len, u8 * digest, u16 * n_ch)
+esp_encrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ ipsec_sa_outb_rt_t *ort, vlib_buffer_t *b,
+ vlib_buffer_t *lb, u8 icv_sz, u8 *start,
+ u32 start_len, u8 *digest, u16 *n_ch)
{
vnet_crypto_op_chunk_t *ch;
vlib_buffer_t *cb = b;
@@ -352,12 +348,11 @@ esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
if (lb == cb)
{
total_len += ch->len = cb->current_length - icv_sz;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (ort->use_esn)
{
- u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
- clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
- ch->len += sizeof (seq_hi);
- total_len += sizeof (seq_hi);
+ *(u32u *) digest = clib_net_to_host_u32 (ort->seq64 >> 32);
+ ch->len += sizeof (u32);
+ total_len += sizeof (u32);
}
}
else
@@ -379,16 +374,16 @@ esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
always_inline void
esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_op_t **crypto_ops,
- vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
- u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
- vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
- esp_header_t *esp)
+ vnet_crypto_op_t **integ_ops, ipsec_sa_outb_rt_t *ort,
+ u32 seq_hi, u8 *payload, u16 payload_len, u8 iv_sz,
+ u8 icv_sz, u32 bi, vlib_buffer_t **b, vlib_buffer_t *lb,
+ u32 hdr_len, esp_header_t *esp)
{
- if (sa0->crypto_enc_op_id)
+ if (ort->cipher_op_id)
{
vnet_crypto_op_t *op;
vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
+ vnet_crypto_op_init (op, ort->cipher_op_id);
u8 *crypto_start = payload;
/* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
* have enough space for ESP header and footer which includes ICV */
@@ -396,24 +391,24 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
u16 crypto_len = payload_len - icv_sz;
/* generate the IV in front of the payload */
- void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
+ void *pkt_iv = esp_generate_iv (ort, payload, iv_sz);
- op->key_index = sa0->crypto_key_index;
+ op->key_index = ort->cipher_key_index;
op->user_data = bi;
- if (ipsec_sa_is_set_IS_CTR (sa0))
+ if (ort->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (ort->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
op->aad = (u8 *) nonce - sizeof (esp_aead_t);
- op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
+ op->aad_len = esp_aad_fill (op->aad, esp, ort->use_esn, seq_hi);
op->tag = payload + crypto_len;
op->tag_len = 16;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ if (PREDICT_FALSE (ort->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
crypto_start -= iv_sz;
@@ -425,7 +420,7 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa0->salt;
+ nonce->salt = ort->salt;
nonce->iv = *(u64 *) pkt_iv;
op->iv = (u8 *) nonce;
}
@@ -445,9 +440,8 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
op->tag = vlib_buffer_get_tail (lb) - icv_sz;
- esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
- crypto_start, crypto_len + icv_sz,
- &op->n_chunks);
+ esp_encrypt_chain_crypto (vm, ptd, b[0], lb, icv_sz, crypto_start,
+ crypto_len + icv_sz, &op->n_chunks);
}
else
{
@@ -457,14 +451,14 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
}
}
- if (sa0->integ_op_id)
+ if (ort->integ_op_id)
{
vnet_crypto_op_t *op;
vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->integ_op_id);
+ vnet_crypto_op_init (op, ort->integ_op_id);
op->src = payload - iv_sz - sizeof (esp_header_t);
op->digest = payload + payload_len - icv_sz;
- op->key_index = sa0->integ_key_index;
+ op->key_index = ort->integ_key_index;
op->digest_len = icv_sz;
op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
op->user_data = bi;
@@ -476,13 +470,12 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
op->chunk_index = vec_len (ptd->chunks);
op->digest = vlib_buffer_get_tail (lb) - icv_sz;
- esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
+ esp_encrypt_chain_integ (vm, ptd, ort, b[0], lb, icv_sz,
payload - iv_sz - sizeof (esp_header_t),
- payload_len + iv_sz +
- sizeof (esp_header_t), op->digest,
- &op->n_chunks);
+ payload_len + iv_sz + sizeof (esp_header_t),
+ op->digest, &op->n_chunks);
}
- else if (ipsec_sa_is_set_USE_ESN (sa0))
+ else if (ort->use_esn)
{
u32 tmp = clib_net_to_host_u32 (seq_hi);
clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
@@ -494,15 +487,15 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
static_always_inline void
esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_async_frame_t *async_frame,
- ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
- u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
- u32 bi, u16 next, u32 hdr_len, u16 async_next,
- vlib_buffer_t *lb)
+ ipsec_sa_outb_rt_t *ort, vlib_buffer_t *b,
+ esp_header_t *esp, u8 *payload, u32 payload_len,
+ u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
+ u16 async_next, vlib_buffer_t *lb)
{
esp_post_data_t *post = esp_post_data (b);
u8 *tag, *iv, *aad = 0;
u8 flag = 0;
- const u32 key_index = sa->crypto_key_index;
+ const u32 key_index = ort->cipher_key_index;
i16 crypto_start_offset, integ_start_offset;
u16 crypto_total_len, integ_total_len;
@@ -514,19 +507,19 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
tag = payload + crypto_total_len;
/* generate the IV in front of the payload */
- void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
+ void *pkt_iv = esp_generate_iv (ort, payload, iv_sz);
- if (ipsec_sa_is_set_IS_CTR (sa))
+ if (ort->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa))
+ if (ort->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
aad = (u8 *) nonce - sizeof (esp_aead_t);
- esp_aad_fill (aad, esp, sa, sa->seq_hi);
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa)))
+ esp_aad_fill (aad, esp, ort->use_esn, ort->seq64 >> 32);
+ if (PREDICT_FALSE (ort->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
crypto_start_offset -= iv_sz;
@@ -538,7 +531,7 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa->salt;
+ nonce->salt = ort->salt;
nonce->iv = *(u64 *) pkt_iv;
iv = (u8 *) nonce;
}
@@ -558,11 +551,11 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
tag = vlib_buffer_get_tail (lb) - icv_sz;
crypto_total_len = esp_encrypt_chain_crypto (
- vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
+ vm, ptd, b, lb, icv_sz, b->data + crypto_start_offset,
crypto_total_len + icv_sz, 0);
}
- if (sa->integ_op_id)
+ if (ort->integ_op_id)
{
integ_start_offset -= iv_sz + sizeof (esp_header_t);
integ_total_len += iv_sz + sizeof (esp_header_t);
@@ -570,15 +563,14 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
if (b != lb)
{
integ_total_len = esp_encrypt_chain_integ (
- vm, ptd, sa, b, lb, icv_sz,
+ vm, ptd, ort, b, lb, icv_sz,
payload - iv_sz - sizeof (esp_header_t),
payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
}
- else if (ipsec_sa_is_set_USE_ESN (sa))
+ else if (ort->use_esn)
{
- u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
- clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
- integ_total_len += sizeof (seq_hi);
+ *(u32u *) tag = clib_net_to_host_u32 (ort->seq64 >> 32);
+ integ_total_len += sizeof (u32);
}
}
@@ -620,14 +612,13 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
u32 current_sa_index = ~0, current_sa_packets = 0;
u32 current_sa_bytes = 0, spi = 0;
u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
- ipsec_sa_t *sa0 = 0;
- u8 sa_drop_no_crypto = 0;
+ ipsec_sa_outb_rt_t *ort = 0;
vlib_buffer_t *lb;
vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
- vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_ASYNC_OP_N_IDS];
- int is_async = im->async_mode;
- vnet_crypto_async_op_id_t async_op = ~0;
+ vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_N_OP_IDS];
+ int is_async = 0;
+ vnet_crypto_op_id_t async_op = ~0;
u16 drop_next =
(lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
(lt == VNET_LINK_IP4 ? ESP_ENCRYPT_NEXT_DROP4 :
@@ -708,27 +699,20 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
current_sa_packets, current_sa_bytes);
current_sa_packets = current_sa_bytes = 0;
- sa0 = ipsec_sa_get (sa_index0);
+ ort = ipsec_sa_get_outb_rt_by_index (sa_index0);
current_sa_index = sa_index0;
- sa_drop_no_crypto = ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
- sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
- !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0));
-
vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
current_sa_index);
- /* fetch the second cacheline ASAP */
- clib_prefetch_load (sa0->cacheline1);
-
- spi = clib_net_to_host_u32 (sa0->spi);
- esp_align = sa0->esp_block_align;
- icv_sz = sa0->integ_icv_size;
- iv_sz = sa0->crypto_iv_size;
- is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
+ spi = ort->spi_be;
+ icv_sz = ort->integ_icv_size;
+ esp_align = ort->esp_block_align;
+ iv_sz = ort->cipher_iv_size;
+ is_async = ort->is_async;
}
- if (PREDICT_FALSE (sa_drop_no_crypto != 0))
+ if (PREDICT_FALSE (ort->drop_no_crypto != 0))
{
err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
@@ -736,18 +720,18 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
goto trace;
}
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == ort->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&ort->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_FALSE (thread_index != sa0->thread_index))
+ if (PREDICT_FALSE (thread_index != ort->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = ort->thread_index;
err = ESP_ENCRYPT_ERROR_HANDOFF;
esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
noop_nexts, handoff_next,
@@ -772,7 +756,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
lb = vlib_get_buffer (vm, lb->next_buffer);
}
- if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ if (PREDICT_FALSE (esp_seq_advance (ort)))
{
err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
@@ -783,7 +767,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
/* space for IV */
hdr_len = iv_sz;
- if (ipsec_sa_is_set_IS_TUNNEL (sa0))
+ if (ort->is_tunnel)
{
payload = vlib_buffer_get_current (b[0]);
next_hdr_ptr = esp_add_footer_and_icv (
@@ -806,40 +790,39 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp = (esp_header_t *) (payload - hdr_len);
/* optional UDP header */
- if (ipsec_sa_is_set_UDP_ENCAP (sa0))
+ if (ort->udp_encap)
{
hdr_len += sizeof (udp_header_t);
- esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
+ esp_fill_udp_hdr (ort, (udp_header_t *) (payload - hdr_len),
payload_len_total + hdr_len);
}
/* IP header */
- if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
+ if (ort->is_tunnel_v6)
{
ip6_header_t *ip6;
u16 len = sizeof (ip6_header_t);
hdr_len += len;
ip6 = (ip6_header_t *) (payload - hdr_len);
- clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
+ clib_memcpy_fast (ip6, &ort->ip6_hdr, sizeof (ip6_header_t));
if (VNET_LINK_IP6 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IPV6;
- tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
- (const ip6_header_t *) payload,
- ip6);
+ tunnel_encap_fixup_6o6 (ort->tunnel_flags,
+ (const ip6_header_t *) payload, ip6);
}
else if (VNET_LINK_IP4 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
- tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
+ tunnel_encap_fixup_4o6 (ort->tunnel_flags, b[0],
(const ip4_header_t *) payload, ip6);
}
else if (VNET_LINK_MPLS == lt)
{
*next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
tunnel_encap_fixup_mplso6 (
- sa0->tunnel_flags, b[0],
+ ort->tunnel_flags, b[0],
(const mpls_unicast_header_t *) payload, ip6);
}
else
@@ -855,27 +838,25 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
u16 len = sizeof (ip4_header_t);
hdr_len += len;
ip4 = (ip4_header_t *) (payload - hdr_len);
- clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
+ clib_memcpy_fast (ip4, &ort->ip4_hdr, sizeof (ip4_header_t));
if (VNET_LINK_IP6 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IPV6;
- tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
- (const ip6_header_t *)
- payload, ip4);
+ tunnel_encap_fixup_6o4_w_chksum (
+ ort->tunnel_flags, (const ip6_header_t *) payload, ip4);
}
else if (VNET_LINK_IP4 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
- tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
- (const ip4_header_t *)
- payload, ip4);
+ tunnel_encap_fixup_4o4_w_chksum (
+ ort->tunnel_flags, (const ip4_header_t *) payload, ip4);
}
else if (VNET_LINK_MPLS == lt)
{
*next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
tunnel_encap_fixup_mplso4_w_chksum (
- sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
+ ort->tunnel_flags, (const mpls_unicast_header_t *) payload,
ip4);
}
else
@@ -885,8 +866,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
}
- if (ipsec_sa_is_set_UDP_ENCAP (sa0) &&
- ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
+ if (ort->udp_encap && ort->is_tunnel_v6)
{
i16 l3_off = b[0]->current_data - hdr_len;
i16 l4_off = l3_off + sizeof (ip6_header_t);
@@ -894,7 +874,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
set_ip6_udp_cksum_offload (b[0], l3_off, l4_off);
}
- dpo = &sa0->dpo;
+ dpo = &ort->dpo;
if (!is_tun)
{
sync_next[0] = dpo->dpoi_next_node;
@@ -953,7 +933,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp = (esp_header_t *) (payload - hdr_len);
/* optional UDP header */
- if (ipsec_sa_is_set_UDP_ENCAP (sa0))
+ if (ort->udp_encap)
{
hdr_len += sizeof (udp_header_t);
udp = (udp_header_t *) (payload - hdr_len);
@@ -1010,7 +990,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (udp)
{
udp_len = len - ip_len;
- esp_fill_udp_hdr (sa0, udp, udp_len);
+ esp_fill_udp_hdr (ort, udp, udp_len);
}
if (udp && (VNET_LINK_IP6 == lt))
@@ -1036,11 +1016,11 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
}
esp->spi = spi;
- esp->seq = clib_net_to_host_u32 (sa0->seq);
+ esp->seq = clib_net_to_host_u32 (ort->seq64);
if (is_async)
{
- async_op = sa0->crypto_async_enc_op_id;
+ async_op = ort->async_op_id;
/* get a frame for this op if we don't yet have one or it's full
*/
@@ -1063,15 +1043,15 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vec_add1 (ptd->async_frames, async_frames[async_op]);
}
- esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
+ esp_prepare_async_frame (vm, ptd, async_frames[async_op], ort, b[0],
esp, payload, payload_len, iv_sz, icv_sz,
from[b - bufs], sync_next[0], hdr_len,
async_next_node, lb);
}
else
- esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
- payload, payload_len, iv_sz, icv_sz, n_sync, b,
- lb, hdr_len, esp);
+ esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, ort,
+ ort->seq64 >> 32, payload, payload_len, iv_sz,
+ icv_sz, n_sync, b, lb, hdr_len, esp);
vlib_buffer_advance (b[0], 0LL - hdr_len);
@@ -1087,13 +1067,13 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
clib_memset_u8 (tr, 0xff, sizeof (*tr));
else
{
+ ipsec_sa_t *sa = ipsec_sa_get (sa_index0);
tr->sa_index = sa_index0;
- tr->spi = sa0->spi;
- tr->seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
- tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ tr->spi = sa->spi;
+ tr->seq = ort->seq64;
+ tr->udp_encap = ort->udp_encap;
+ tr->crypto_alg = sa->crypto_alg;
+ tr->integ_alg = sa->integ_alg;
}
}
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c
index 8b43dd23cc8..b95b65dfeea 100644
--- a/src/vnet/ipsec/ipsec.c
+++ b/src/vnet/ipsec/ipsec.c
@@ -36,8 +36,6 @@
*/
#define IPSEC4_SPD_DEFAULT_HASH_NUM_BUCKETS (1 << 22)
-ipsec_main_t ipsec_main;
-
esp_async_post_next_t esp_encrypt_async_next;
esp_async_post_next_t esp_decrypt_async_next;
@@ -314,9 +312,9 @@ clib_error_t *
ipsec_rsc_in_use (ipsec_main_t * im)
{
/* return an error is crypto resource are in use */
- if (pool_elts (ipsec_sa_pool) > 0)
+ if (pool_elts (im->sa_pool) > 0)
return clib_error_return (0, "%d SA entries configured",
- pool_elts (ipsec_sa_pool));
+ pool_elts (im->sa_pool));
if (ipsec_itf_count () > 0)
return clib_error_return (0, "%d IPSec interface configured",
ipsec_itf_count ());
@@ -386,7 +384,7 @@ ipsec_set_async_mode (u32 is_enabled)
im->async_mode = is_enabled;
/* change SA crypto op data */
- pool_foreach (sa, ipsec_sa_pool)
+ pool_foreach (sa, im->sa_pool)
ipsec_sa_set_async_mode (sa, is_enabled);
}
@@ -424,7 +422,6 @@ ipsec_init (vlib_main_t * vm)
{
clib_error_t *error;
ipsec_main_t *im = &ipsec_main;
- ipsec_main_crypto_alg_t *a;
/* Backend registration requires the feature arcs to be set up */
if ((error = vlib_call_init_function (vm, vnet_feature_init)))
@@ -471,154 +468,6 @@ ipsec_init (vlib_main_t * vm)
if ((error = vlib_call_init_function (vm, ipsec_cli_init)))
return error;
- vec_validate (im->crypto_algs, IPSEC_CRYPTO_N_ALG - 1);
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_NONE;
- a->enc_op_id = VNET_CRYPTO_OP_NONE;
- a->dec_op_id = VNET_CRYPTO_OP_NONE;
- a->alg = VNET_CRYPTO_ALG_NONE;
- a->iv_size = 0;
- a->block_align = 1;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_DES_CBC;
- a->enc_op_id = VNET_CRYPTO_OP_DES_CBC_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_DES_CBC_DEC;
- a->alg = VNET_CRYPTO_ALG_DES_CBC;
- a->iv_size = a->block_align = 8;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_3DES_CBC;
- a->enc_op_id = VNET_CRYPTO_OP_3DES_CBC_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_3DES_CBC_DEC;
- a->alg = VNET_CRYPTO_ALG_3DES_CBC;
- a->iv_size = a->block_align = 8;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_128;
- a->enc_op_id = VNET_CRYPTO_OP_AES_128_CBC_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_128_CBC_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_128_CBC;
- a->iv_size = a->block_align = 16;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_192;
- a->enc_op_id = VNET_CRYPTO_OP_AES_192_CBC_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_192_CBC_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_192_CBC;
- a->iv_size = a->block_align = 16;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CBC_256;
- a->enc_op_id = VNET_CRYPTO_OP_AES_256_CBC_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_256_CBC_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_256_CBC;
- a->iv_size = a->block_align = 16;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_128;
- a->enc_op_id = VNET_CRYPTO_OP_AES_128_CTR_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_128_CTR_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_128_CTR;
- a->iv_size = 8;
- a->block_align = 1;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_192;
- a->enc_op_id = VNET_CRYPTO_OP_AES_192_CTR_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_192_CTR_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_192_CTR;
- a->iv_size = 8;
- a->block_align = 1;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_CTR_256;
- a->enc_op_id = VNET_CRYPTO_OP_AES_256_CTR_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_256_CTR_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_256_CTR;
- a->iv_size = 8;
- a->block_align = 1;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_128;
- a->enc_op_id = VNET_CRYPTO_OP_AES_128_GCM_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_128_GCM_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_128_GCM;
- a->iv_size = 8;
- a->block_align = 1;
- a->icv_size = 16;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_192;
- a->enc_op_id = VNET_CRYPTO_OP_AES_192_GCM_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_192_GCM_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_192_GCM;
- a->iv_size = 8;
- a->block_align = 1;
- a->icv_size = 16;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_GCM_256;
- a->enc_op_id = VNET_CRYPTO_OP_AES_256_GCM_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_256_GCM_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_256_GCM;
- a->iv_size = 8;
- a->block_align = 1;
- a->icv_size = 16;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_CHACHA20_POLY1305;
- a->enc_op_id = VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC;
- a->alg = VNET_CRYPTO_ALG_CHACHA20_POLY1305;
- a->iv_size = 8;
- a->icv_size = 16;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_NULL_GMAC_128;
- a->enc_op_id = VNET_CRYPTO_OP_AES_128_NULL_GMAC_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_128_NULL_GMAC_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_128_GCM;
- a->iv_size = 8;
- a->block_align = 1;
- a->icv_size = 16;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_NULL_GMAC_192;
- a->enc_op_id = VNET_CRYPTO_OP_AES_192_NULL_GMAC_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_192_NULL_GMAC_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_192_GCM;
- a->iv_size = 8;
- a->block_align = 1;
- a->icv_size = 16;
-
- a = im->crypto_algs + IPSEC_CRYPTO_ALG_AES_NULL_GMAC_256;
- a->enc_op_id = VNET_CRYPTO_OP_AES_256_NULL_GMAC_ENC;
- a->dec_op_id = VNET_CRYPTO_OP_AES_256_NULL_GMAC_DEC;
- a->alg = VNET_CRYPTO_ALG_AES_256_GCM;
- a->iv_size = 8;
- a->block_align = 1;
- a->icv_size = 16;
-
- vec_validate (im->integ_algs, IPSEC_INTEG_N_ALG - 1);
- ipsec_main_integ_alg_t *i;
-
- i = &im->integ_algs[IPSEC_INTEG_ALG_MD5_96];
- i->op_id = VNET_CRYPTO_OP_MD5_HMAC;
- i->alg = VNET_CRYPTO_ALG_HMAC_MD5;
- i->icv_size = 12;
-
- i = &im->integ_algs[IPSEC_INTEG_ALG_SHA1_96];
- i->op_id = VNET_CRYPTO_OP_SHA1_HMAC;
- i->alg = VNET_CRYPTO_ALG_HMAC_SHA1;
- i->icv_size = 12;
-
- i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_256_96];
- i->op_id = VNET_CRYPTO_OP_SHA1_HMAC;
- i->alg = VNET_CRYPTO_ALG_HMAC_SHA256;
- i->icv_size = 12;
-
- i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_256_128];
- i->op_id = VNET_CRYPTO_OP_SHA256_HMAC;
- i->alg = VNET_CRYPTO_ALG_HMAC_SHA256;
- i->icv_size = 16;
-
- i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_384_192];
- i->op_id = VNET_CRYPTO_OP_SHA384_HMAC;
- i->alg = VNET_CRYPTO_ALG_HMAC_SHA384;
- i->icv_size = 24;
-
- i = &im->integ_algs[IPSEC_INTEG_ALG_SHA_512_256];
- i->op_id = VNET_CRYPTO_OP_SHA512_HMAC;
- i->alg = VNET_CRYPTO_ALG_HMAC_SHA512;
- i->icv_size = 32;
-
vec_validate_aligned (im->ptd, vlib_num_workers (), CLIB_CACHE_LINE_BYTES);
im->async_mode = 0;
diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h
index 9ab054cf2a9..c4977ddb6b9 100644
--- a/src/vnet/ipsec/ipsec.h
+++ b/src/vnet/ipsec/ipsec.h
@@ -112,19 +112,22 @@ typedef struct
typedef struct
{
- vnet_crypto_op_id_t enc_op_id;
- vnet_crypto_op_id_t dec_op_id;
- vnet_crypto_alg_t alg;
- u8 iv_size;
- u8 block_align;
- u8 icv_size;
+ const vnet_crypto_op_id_t enc_op_id;
+ const vnet_crypto_op_id_t dec_op_id;
+ const vnet_crypto_alg_t alg;
+ const u8 iv_size;
+ const u8 block_align;
+ const u8 icv_size;
+ const u8 is_aead : 1;
+ const u8 is_ctr : 1;
+ const u8 is_null_gmac : 1;
} ipsec_main_crypto_alg_t;
typedef struct
{
- vnet_crypto_op_id_t op_id;
- vnet_crypto_alg_t alg;
- u8 icv_size;
+ const vnet_crypto_op_id_t op_id;
+ const vnet_crypto_alg_t alg;
+ const u8 icv_size;
} ipsec_main_integ_alg_t;
typedef struct
@@ -224,10 +227,10 @@ typedef struct
u32 esp_default_backend;
/* crypto alg data */
- ipsec_main_crypto_alg_t *crypto_algs;
+ ipsec_main_crypto_alg_t crypto_algs[IPSEC_CRYPTO_N_ALG];
/* crypto integ data */
- ipsec_main_integ_alg_t *integ_algs;
+ ipsec_main_integ_alg_t integ_algs[IPSEC_INTEG_N_ALG];
/* per-thread data */
ipsec_per_thread_data_t *ptd;
@@ -263,6 +266,10 @@ typedef struct
u8 async_mode;
u16 msg_id_base;
+
+ ipsec_sa_t *sa_pool;
+ ipsec_sa_inb_rt_t **inb_sa_runtimes;
+ ipsec_sa_outb_rt_t **outb_sa_runtimes;
} ipsec_main_t;
typedef enum ipsec_format_flags_t_
@@ -396,6 +403,8 @@ extern clib_error_t *ipsec_register_next_header (vlib_main_t *vm,
u8 next_header,
const char *next_node);
+#include <vnet/ipsec/ipsec_funcs.h>
+
#endif /* __IPSEC_H__ */
/*
diff --git a/src/vnet/ipsec/ipsec_api.c b/src/vnet/ipsec/ipsec_api.c
index 21216b1a614..262a8cb8c88 100644
--- a/src/vnet/ipsec/ipsec_api.c
+++ b/src/vnet/ipsec/ipsec_api.c
@@ -40,6 +40,28 @@
#define REPLY_MSG_ID_BASE ipsec_main.msg_id_base
#include <vlibapi/api_helper_macros.h>
+static inline u64
+ipsec_sa_get_inb_seq (ipsec_sa_t *sa)
+{
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ u64 seq;
+
+ seq = irt->seq;
+ if (ipsec_sa_is_set_USE_ESN (sa))
+ seq |= (u64) irt->seq_hi << 32;
+ return seq;
+}
+
+static inline u64
+ipsec_sa_get_outb_seq (ipsec_sa_t *sa)
+{
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ u64 seq;
+
+ seq = ort->seq64;
+ return seq;
+}
+
static void
vl_api_ipsec_spd_add_del_t_handler (vl_api_ipsec_spd_add_del_t * mp)
{
@@ -950,6 +972,8 @@ ipsec_sa_dump_match_sa (index_t itpi, void *arg)
static walk_rc_t
send_ipsec_sa_details (ipsec_sa_t * sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_details_t *mp;
@@ -975,7 +999,7 @@ send_ipsec_sa_details (ipsec_sa_t * sa, void *arg)
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
@@ -992,22 +1016,16 @@ send_ipsec_sa_details (ipsec_sa_t * sa, void *arg)
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
@@ -1036,6 +1054,8 @@ vl_api_ipsec_sa_dump_t_handler (vl_api_ipsec_sa_dump_t * mp)
static walk_rc_t
send_ipsec_sa_v2_details (ipsec_sa_t * sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v2_details_t *mp;
@@ -1061,7 +1081,7 @@ send_ipsec_sa_v2_details (ipsec_sa_t * sa, void *arg)
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
@@ -1078,26 +1098,20 @@ send_ipsec_sa_v2_details (ipsec_sa_t * sa, void *arg)
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
mp->entry.tunnel_flags =
tunnel_encap_decap_flags_encode (sa->tunnel.t_encap_decap_flags);
mp->entry.dscp = ip_dscp_encode (sa->tunnel.t_dscp);
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
@@ -1126,6 +1140,8 @@ vl_api_ipsec_sa_v2_dump_t_handler (vl_api_ipsec_sa_v2_dump_t *mp)
static walk_rc_t
send_ipsec_sa_v3_details (ipsec_sa_t *sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v3_details_t *mp;
@@ -1150,7 +1166,7 @@ send_ipsec_sa_v3_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
@@ -1165,22 +1181,16 @@ send_ipsec_sa_v3_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
@@ -1209,8 +1219,12 @@ vl_api_ipsec_sa_v3_dump_t_handler (vl_api_ipsec_sa_v3_dump_t *mp)
static walk_rc_t
send_ipsec_sa_v4_details (ipsec_sa_t *sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v4_details_t *mp;
+ u32 thread_index = 0;
mp = vl_msg_api_alloc (sizeof (*mp));
clib_memset (mp, 0, sizeof (*mp));
@@ -1233,7 +1247,7 @@ send_ipsec_sa_v4_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
@@ -1248,24 +1262,23 @@ send_ipsec_sa_v4_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
- mp->thread_index = clib_host_to_net_u32 (sa->thread_index);
+ if (ort)
+ thread_index = ort->thread_index;
+ else if (irt)
+ thread_index = irt->thread_index;
+
+ mp->thread_index = clib_host_to_net_u32 (thread_index);
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
vl_api_send_msg (ctx->reg, (u8 *) mp);
@@ -1293,8 +1306,12 @@ vl_api_ipsec_sa_v4_dump_t_handler (vl_api_ipsec_sa_v4_dump_t *mp)
static walk_rc_t
send_ipsec_sa_v5_details (ipsec_sa_t *sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v5_details_t *mp;
+ u32 thread_index = 0;
mp = vl_msg_api_alloc (sizeof (*mp));
clib_memset (mp, 0, sizeof (*mp));
@@ -1317,7 +1334,7 @@ send_ipsec_sa_v5_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
@@ -1332,27 +1349,27 @@ send_ipsec_sa_v5_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
{
mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
-
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->entry.anti_replay_window_size =
- clib_host_to_net_u32 (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (sa));
+ clib_host_to_net_u32 (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (irt));
}
- mp->thread_index = clib_host_to_net_u32 (sa->thread_index);
+ if (ort)
+ thread_index = ort->thread_index;
+ else if (irt)
+ thread_index = irt->thread_index;
+
+ mp->thread_index = clib_host_to_net_u32 (thread_index);
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
vl_api_send_msg (ctx->reg, (u8 *) mp);
@@ -1427,11 +1444,11 @@ vl_api_ipsec_select_backend_t_handler (vl_api_ipsec_select_backend_t * mp)
vl_api_ipsec_select_backend_reply_t *rmp;
ipsec_protocol_t protocol;
int rv = 0;
- if (pool_elts (ipsec_sa_pool) > 0)
- {
- rv = VNET_API_ERROR_INSTANCE_IN_USE;
- goto done;
- }
+ if (pool_elts (im->sa_pool) > 0)
+ {
+ rv = VNET_API_ERROR_INSTANCE_IN_USE;
+ goto done;
+ }
rv = ipsec_proto_decode (mp->protocol, &protocol);
diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c
index 07d9df8f204..77a29d263eb 100644
--- a/src/vnet/ipsec/ipsec_cli.c
+++ b/src/vnet/ipsec/ipsec_cli.c
@@ -473,7 +473,7 @@ ipsec_sa_show_all (vlib_main_t * vm, ipsec_main_t * im, u8 detail)
{
u32 sai;
- pool_foreach_index (sai, ipsec_sa_pool)
+ pool_foreach_index (sai, im->sa_pool)
{
vlib_cli_output (vm, "%U", format_ipsec_sa, sai,
(detail ? IPSEC_FORMAT_DETAIL : IPSEC_FORMAT_BRIEF));
@@ -583,6 +583,7 @@ static clib_error_t *
clear_ipsec_sa_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
+ ipsec_main_t *im = &ipsec_main;
u32 sai = ~0;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
@@ -595,14 +596,14 @@ clear_ipsec_sa_command_fn (vlib_main_t * vm,
if (~0 == sai)
{
- pool_foreach_index (sai, ipsec_sa_pool)
+ pool_foreach_index (sai, im->sa_pool)
{
ipsec_sa_clear (sai);
}
}
else
{
- if (pool_is_free_index (ipsec_sa_pool, sai))
+ if (pool_is_free_index (im->sa_pool, sai))
return clib_error_return (0, "unknown SA index: %d", sai);
else
ipsec_sa_clear (sai);
diff --git a/src/vnet/ipsec/ipsec_format.c b/src/vnet/ipsec/ipsec_format.c
index e421a0d96b4..0bbdc85aaed 100644
--- a/src/vnet/ipsec/ipsec_format.c
+++ b/src/vnet/ipsec/ipsec_format.c
@@ -441,19 +441,24 @@ format_ipsec_sa_flags (u8 * s, va_list * args)
u8 *
format_ipsec_sa (u8 * s, va_list * args)
{
+ ipsec_main_t *im = &ipsec_main;
u32 sai = va_arg (*args, u32);
ipsec_format_flags_t flags = va_arg (*args, ipsec_format_flags_t);
vlib_counter_t counts;
counter_t errors;
ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
- if (pool_is_free_index (ipsec_sa_pool, sai))
+ if (pool_is_free_index (im->sa_pool, sai))
{
s = format (s, "No such SA index: %d", sai);
goto done;
}
sa = ipsec_sa_get (sai);
+ irt = ipsec_sa_get_inb_rt (sa);
+ ort = ipsec_sa_get_outb_rt (sa);
s = format (s, "[%d] sa %u (0x%x) spi %u (0x%08x) protocol:%s flags:[%U]",
sai, sa->id, sa->id, sa->spi, sa->spi,
@@ -464,12 +469,21 @@ format_ipsec_sa (u8 * s, va_list * args)
s = format (s, "\n locks %d", sa->node.fn_locks);
s = format (s, "\n salt 0x%x", clib_net_to_host_u32 (sa->salt));
- s = format (s, "\n thread-index:%d", sa->thread_index);
- s = format (s, "\n seq %u seq-hi %u", sa->seq, sa->seq_hi);
- s = format (s, "\n window-size: %llu",
- IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (sa));
- s = format (s, "\n window: Bl <- %U Tl", format_ipsec_replay_window,
- ipsec_sa_anti_replay_get_64b_window (sa));
+ if (irt)
+ s = format (s, "\n inbound thread-index:%d", irt->thread_index);
+ if (ort)
+ s = format (s, "\n outbound thread-index:%d", ort->thread_index);
+ if (irt)
+ s = format (s, "\n inbound seq %u seq-hi %u", irt->seq, irt->seq_hi);
+ if (ort)
+ s = format (s, "\n outbound seq %lu", ort->seq64);
+ if (irt)
+ {
+ s = format (s, "\n window-size: %llu",
+ IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (irt));
+ s = format (s, "\n window: Bl <- %U Tl", format_ipsec_replay_window,
+ ipsec_sa_anti_replay_get_64b_window (irt));
+ }
s =
format (s, "\n crypto alg %U", format_ipsec_crypto_alg, sa->crypto_alg);
if (sa->crypto_alg && (flags & IPSEC_FORMAT_INSECURE))
@@ -482,9 +496,8 @@ format_ipsec_sa (u8 * s, va_list * args)
s = format (s, " key %U", format_ipsec_key, &sa->integ_key);
else
s = format (s, " key [redacted]");
- s = format (s, "\n UDP:[src:%d dst:%d]",
- clib_host_to_net_u16 (sa->udp_hdr.src_port),
- clib_host_to_net_u16 (sa->udp_hdr.dst_port));
+ s =
+ format (s, "\n UDP:[src:%d dst:%d]", sa->udp_src_port, sa->udp_dst_port);
vlib_get_combined_counter (&ipsec_sa_counters, sai, &counts);
s = format (s, "\n tx/rx:[packets:%Ld bytes:%Ld]", counts.packets,
diff --git a/src/vnet/ipsec/ipsec_funcs.h b/src/vnet/ipsec/ipsec_funcs.h
new file mode 100644
index 00000000000..29788b3d765
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_funcs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef __IPSEC_FUNCS_H__
+#define __IPSEC_FUNCS_H__
+
+#include <vlib/vlib.h>
+#include <vnet/ipsec/ipsec.h>
+
+always_inline ipsec_sa_t *
+ipsec_sa_get (u32 sa_index)
+{
+ return (pool_elt_at_index (ipsec_main.sa_pool, sa_index));
+}
+
+static_always_inline ipsec_sa_outb_rt_t *
+ipsec_sa_get_outb_rt_by_index (u32 sa_index)
+{
+ return ipsec_main.outb_sa_runtimes[sa_index];
+}
+
+static_always_inline ipsec_sa_inb_rt_t *
+ipsec_sa_get_inb_rt_by_index (u32 sa_index)
+{
+ return ipsec_main.inb_sa_runtimes[sa_index];
+}
+
+static_always_inline ipsec_sa_outb_rt_t *
+ipsec_sa_get_outb_rt (ipsec_sa_t *sa)
+{
+ return ipsec_sa_get_outb_rt_by_index (sa - ipsec_main.sa_pool);
+}
+
+static_always_inline ipsec_sa_inb_rt_t *
+ipsec_sa_get_inb_rt (ipsec_sa_t *sa)
+{
+ return ipsec_sa_get_inb_rt_by_index (sa - ipsec_main.sa_pool);
+}
+
+#endif /* __IPSEC_FUNCS_H__ */
diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c
index 1d5195ec793..eb4270ac2b4 100644
--- a/src/vnet/ipsec/ipsec_sa.c
+++ b/src/vnet/ipsec/ipsec_sa.c
@@ -33,8 +33,6 @@ vlib_combined_counter_main_t ipsec_sa_counters = {
/* Per-SA error counters */
vlib_simple_counter_main_t ipsec_sa_err_counters[IPSEC_SA_N_ERRORS];
-ipsec_sa_t *ipsec_sa_pool;
-
static clib_error_t *
ipsec_call_add_del_callbacks (ipsec_main_t * im, ipsec_sa_t * sa,
u32 sa_index, int is_add)
@@ -77,39 +75,71 @@ static void
ipsec_sa_stack (ipsec_sa_t * sa)
{
ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
dpo_id_t tmp = DPO_INVALID;
tunnel_contribute_forwarding (&sa->tunnel, &tmp);
if (IPSEC_PROTOCOL_AH == sa->protocol)
dpo_stack_from_node ((ipsec_sa_is_set_IS_TUNNEL_V6 (sa) ?
- im->ah6_encrypt_node_index :
- im->ah4_encrypt_node_index), &sa->dpo, &tmp);
+ im->ah6_encrypt_node_index :
+ im->ah4_encrypt_node_index),
+ &ort->dpo, &tmp);
else
dpo_stack_from_node ((ipsec_sa_is_set_IS_TUNNEL_V6 (sa) ?
- im->esp6_encrypt_node_index :
- im->esp4_encrypt_node_index), &sa->dpo, &tmp);
+ im->esp6_encrypt_node_index :
+ im->esp4_encrypt_node_index),
+ &ort->dpo, &tmp);
dpo_reset (&tmp);
}
void
ipsec_sa_set_async_mode (ipsec_sa_t *sa, int is_enabled)
{
+ u32 cipher_key_index, integ_key_index;
+ vnet_crypto_op_id_t inb_cipher_op_id, outb_cipher_op_id, integ_op_id;
+ u32 is_async;
if (is_enabled)
{
- sa->crypto_key_index = sa->crypto_async_key_index;
- sa->crypto_enc_op_id = sa->crypto_async_enc_op_id;
- sa->crypto_dec_op_id = sa->crypto_async_dec_op_id;
- sa->integ_key_index = ~0;
- sa->integ_op_id = ~0;
+ if (sa->linked_key_index != ~0)
+ cipher_key_index = sa->linked_key_index;
+ else
+ cipher_key_index = sa->crypto_sync_key_index;
+
+ outb_cipher_op_id = sa->crypto_async_enc_op_id;
+ inb_cipher_op_id = sa->crypto_async_dec_op_id;
+ integ_key_index = ~0;
+ integ_op_id = ~0;
+ is_async = 1;
}
else
{
- sa->crypto_key_index = sa->crypto_sync_key_index;
- sa->crypto_enc_op_id = sa->crypto_sync_enc_op_id;
- sa->crypto_dec_op_id = sa->crypto_sync_dec_op_id;
- sa->integ_key_index = sa->integ_sync_key_index;
- sa->integ_op_id = sa->integ_sync_op_id;
+ cipher_key_index = sa->crypto_sync_key_index;
+ outb_cipher_op_id = sa->crypto_sync_enc_op_id;
+ inb_cipher_op_id = sa->crypto_sync_dec_op_id;
+ integ_key_index = sa->integ_sync_key_index;
+ integ_op_id = sa->integ_sync_op_id;
+ is_async = 0;
+ }
+
+ if (ipsec_sa_get_inb_rt (sa))
+ {
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ irt->cipher_key_index = cipher_key_index;
+ irt->integ_key_index = integ_key_index;
+ irt->cipher_op_id = inb_cipher_op_id;
+ irt->integ_op_id = integ_op_id;
+ irt->is_async = is_async;
+ }
+
+ if (ipsec_sa_get_outb_rt (sa))
+ {
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ ort->cipher_key_index = cipher_key_index;
+ ort->integ_key_index = integ_key_index;
+ ort->cipher_op_id = outb_cipher_op_id;
+ ort->integ_op_id = integ_op_id;
+ ort->is_async = is_async;
}
}
@@ -117,32 +147,11 @@ void
ipsec_sa_set_crypto_alg (ipsec_sa_t * sa, ipsec_crypto_alg_t crypto_alg)
{
ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *alg = im->crypto_algs + crypto_alg;
sa->crypto_alg = crypto_alg;
- sa->crypto_iv_size = im->crypto_algs[crypto_alg].iv_size;
- sa->esp_block_align = clib_max (4, im->crypto_algs[crypto_alg].block_align);
- sa->crypto_sync_enc_op_id = im->crypto_algs[crypto_alg].enc_op_id;
- sa->crypto_sync_dec_op_id = im->crypto_algs[crypto_alg].dec_op_id;
- sa->crypto_calg = im->crypto_algs[crypto_alg].alg;
- ASSERT (sa->crypto_iv_size <= ESP_MAX_IV_SIZE);
- ASSERT (sa->esp_block_align <= ESP_MAX_BLOCK_SIZE);
- if (IPSEC_CRYPTO_ALG_IS_GCM (crypto_alg) ||
- IPSEC_CRYPTO_ALG_CTR_AEAD_OTHERS (crypto_alg))
- {
- sa->integ_icv_size = im->crypto_algs[crypto_alg].icv_size;
- ipsec_sa_set_IS_CTR (sa);
- ipsec_sa_set_IS_AEAD (sa);
- }
- else if (IPSEC_CRYPTO_ALG_IS_CTR (crypto_alg))
- {
- ipsec_sa_set_IS_CTR (sa);
- }
- else if (IPSEC_CRYPTO_ALG_IS_NULL_GMAC (crypto_alg))
- {
- sa->integ_icv_size = im->crypto_algs[crypto_alg].icv_size;
- ipsec_sa_set_IS_CTR (sa);
- ipsec_sa_set_IS_AEAD (sa);
- ipsec_sa_set_IS_NULL_GMAC (sa);
- }
+ sa->crypto_sync_enc_op_id = alg->enc_op_id;
+ sa->crypto_sync_dec_op_id = alg->dec_op_id;
+ sa->crypto_calg = alg->alg;
}
void
@@ -150,18 +159,16 @@ ipsec_sa_set_integ_alg (ipsec_sa_t * sa, ipsec_integ_alg_t integ_alg)
{
ipsec_main_t *im = &ipsec_main;
sa->integ_alg = integ_alg;
- sa->integ_icv_size = im->integ_algs[integ_alg].icv_size;
sa->integ_sync_op_id = im->integ_algs[integ_alg].op_id;
sa->integ_calg = im->integ_algs[integ_alg].alg;
- ASSERT (sa->integ_icv_size <= ESP_MAX_ICV_SIZE);
}
-void
-ipsec_sa_set_async_op_ids (ipsec_sa_t * sa)
+static void
+ipsec_sa_set_async_op_ids (ipsec_sa_t *sa)
{
if (ipsec_sa_is_set_USE_ESN (sa))
{
-#define _(n, s, k) \
+#define _(n, s, ...) \
if (sa->crypto_sync_enc_op_id == VNET_CRYPTO_OP_##n##_ENC) \
sa->crypto_async_enc_op_id = VNET_CRYPTO_OP_##n##_TAG16_AAD12_ENC; \
if (sa->crypto_sync_dec_op_id == VNET_CRYPTO_OP_##n##_DEC) \
@@ -171,7 +178,7 @@ ipsec_sa_set_async_op_ids (ipsec_sa_t * sa)
}
else
{
-#define _(n, s, k) \
+#define _(n, s, ...) \
if (sa->crypto_sync_enc_op_id == VNET_CRYPTO_OP_##n##_ENC) \
sa->crypto_async_enc_op_id = VNET_CRYPTO_OP_##n##_TAG16_AAD8_ENC; \
if (sa->crypto_sync_dec_op_id == VNET_CRYPTO_OP_##n##_DEC) \
@@ -191,12 +198,90 @@ ipsec_sa_set_async_op_ids (ipsec_sa_t * sa)
#undef _
}
+static void
+ipsec_sa_init_runtime (ipsec_sa_t *sa)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *alg = im->crypto_algs + sa->crypto_alg;
+ u8 integ_icv_size;
+
+ if (alg->is_aead)
+ integ_icv_size = im->crypto_algs[sa->crypto_alg].icv_size;
+ else
+ integ_icv_size = im->integ_algs[sa->integ_alg].icv_size;
+ ASSERT (integ_icv_size <= ESP_MAX_ICV_SIZE);
+
+ if (ipsec_sa_get_inb_rt (sa))
+ {
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ irt->anti_reply_huge = ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa);
+ irt->use_anti_replay = ipsec_sa_is_set_USE_ANTI_REPLAY (sa);
+ irt->use_esn = ipsec_sa_is_set_USE_ESN (sa);
+ irt->is_tunnel = ipsec_sa_is_set_IS_TUNNEL (sa);
+ irt->is_transport =
+ !(ipsec_sa_is_set_IS_TUNNEL (sa) || ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
+ irt->udp_sz = ipsec_sa_is_set_UDP_ENCAP (sa) ? sizeof (udp_header_t) : 0;
+ irt->is_ctr = alg->is_ctr;
+ irt->is_aead = alg->is_aead;
+ irt->is_null_gmac = alg->is_null_gmac;
+ irt->cipher_iv_size = im->crypto_algs[sa->crypto_alg].iv_size;
+ irt->integ_icv_size = integ_icv_size;
+ irt->salt = sa->salt;
+ irt->async_op_id = sa->crypto_async_dec_op_id;
+ ASSERT (irt->cipher_iv_size <= ESP_MAX_IV_SIZE);
+ }
+
+ if (ipsec_sa_get_outb_rt (sa))
+ {
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ ort->use_anti_replay = ipsec_sa_is_set_USE_ANTI_REPLAY (sa);
+ ort->use_esn = ipsec_sa_is_set_USE_ESN (sa);
+ ort->is_ctr = alg->is_ctr;
+ ort->is_aead = alg->is_aead;
+ ort->is_null_gmac = alg->is_null_gmac;
+ ort->is_tunnel = ipsec_sa_is_set_IS_TUNNEL (sa);
+ ort->is_tunnel_v6 = ipsec_sa_is_set_IS_TUNNEL_V6 (sa);
+ ort->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa);
+ ort->esp_block_align =
+ clib_max (4, im->crypto_algs[sa->crypto_alg].block_align);
+ ort->cipher_iv_size = im->crypto_algs[sa->crypto_alg].iv_size;
+ ort->integ_icv_size = integ_icv_size;
+ ort->salt = sa->salt;
+ ort->spi_be = clib_host_to_net_u32 (sa->spi);
+ ort->tunnel_flags = sa->tunnel.t_encap_decap_flags;
+ ort->async_op_id = sa->crypto_async_enc_op_id;
+ ort->t_dscp = sa->tunnel.t_dscp;
+
+ ASSERT (ort->cipher_iv_size <= ESP_MAX_IV_SIZE);
+ ASSERT (ort->esp_block_align <= ESP_MAX_BLOCK_SIZE);
+ }
+ ipsec_sa_update_runtime (sa);
+}
+
+void
+ipsec_sa_update_runtime (ipsec_sa_t *sa)
+{
+ if (ipsec_sa_get_inb_rt (sa))
+ {
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ irt->is_protect = ipsec_sa_is_set_IS_PROTECT (sa);
+ }
+ if (ipsec_sa_get_outb_rt (sa))
+ {
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ ort->drop_no_crypto = sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
+ sa->integ_alg == IPSEC_INTEG_ALG_NONE &&
+ !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa);
+ }
+}
+
int
ipsec_sa_update (u32 id, u16 src_port, u16 dst_port, const tunnel_t *tun,
bool is_tun)
{
ipsec_main_t *im = &ipsec_main;
ipsec_sa_t *sa;
+ ipsec_sa_outb_rt_t *ort;
u32 sa_index;
uword *p;
int rv;
@@ -206,7 +291,8 @@ ipsec_sa_update (u32 id, u16 src_port, u16 dst_port, const tunnel_t *tun,
return VNET_API_ERROR_NO_SUCH_ENTRY;
sa = ipsec_sa_get (p[0]);
- sa_index = sa - ipsec_sa_pool;
+ ort = ipsec_sa_get_outb_rt (sa);
+ sa_index = sa - im->sa_pool;
if (is_tun && ipsec_sa_is_set_IS_TUNNEL (sa) &&
(ip_address_cmp (&tun->t_src, &sa->tunnel.t_src) != 0 ||
@@ -267,16 +353,16 @@ ipsec_sa_update (u32 id, u16 src_port, u16 dst_port, const tunnel_t *tun,
tunnel_copy (tun, &sa->tunnel);
if (!ipsec_sa_is_set_IS_INBOUND (sa))
{
- dpo_reset (&sa->dpo);
+ dpo_reset (&ort->dpo);
- sa->tunnel_flags = sa->tunnel.t_encap_decap_flags;
+ ort->tunnel_flags = sa->tunnel.t_encap_decap_flags;
rv = tunnel_resolve (&sa->tunnel, FIB_NODE_TYPE_IPSEC_SA, sa_index);
if (rv)
{
hash_unset (im->sa_index_by_sa_id, sa->id);
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return rv;
}
ipsec_sa_stack (sa);
@@ -285,39 +371,42 @@ ipsec_sa_update (u32 id, u16 src_port, u16 dst_port, const tunnel_t *tun,
{
tunnel_build_v6_hdr (&sa->tunnel,
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
- IP_PROTOCOL_UDP :
- IP_PROTOCOL_IPSEC_ESP),
- &sa->ip6_hdr);
+ IP_PROTOCOL_UDP :
+ IP_PROTOCOL_IPSEC_ESP),
+ &ort->ip6_hdr);
}
else
{
tunnel_build_v4_hdr (&sa->tunnel,
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
- IP_PROTOCOL_UDP :
- IP_PROTOCOL_IPSEC_ESP),
- &sa->ip4_hdr);
+ IP_PROTOCOL_UDP :
+ IP_PROTOCOL_IPSEC_ESP),
+ &ort->ip4_hdr);
}
}
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- if (dst_port != IPSEC_UDP_PORT_NONE &&
- dst_port != clib_net_to_host_u16 (sa->udp_hdr.dst_port))
+ if (dst_port != IPSEC_UDP_PORT_NONE && dst_port != sa->udp_dst_port)
{
if (ipsec_sa_is_set_IS_INBOUND (sa))
{
- ipsec_unregister_udp_port (
- clib_net_to_host_u16 (sa->udp_hdr.dst_port),
- !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
+ ipsec_unregister_udp_port (sa->udp_dst_port,
+ !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
ipsec_register_udp_port (dst_port,
!ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
}
- sa->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ sa->udp_dst_port = dst_port;
+ if (ort)
+ ort->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ }
+ if (src_port != IPSEC_UDP_PORT_NONE && src_port != (sa->udp_src_port))
+ {
+ sa->udp_src_port = src_port;
+ if (ort)
+ ort->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
}
- if (src_port != IPSEC_UDP_PORT_NONE &&
- src_port != clib_net_to_host_u16 (sa->udp_hdr.src_port))
- sa->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
}
return (0);
}
@@ -332,6 +421,9 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
{
vlib_main_t *vm = vlib_get_main ();
ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *alg = im->crypto_algs + crypto_alg;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
clib_error_t *err;
ipsec_sa_t *sa;
u32 sa_index;
@@ -346,13 +438,24 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (getrandom (rand, sizeof (rand), 0) != sizeof (rand))
return VNET_API_ERROR_INIT_FAILED;
- pool_get_aligned_zero (ipsec_sa_pool, sa, CLIB_CACHE_LINE_BYTES);
+ pool_get_aligned_zero (im->sa_pool, sa, CLIB_CACHE_LINE_BYTES);
+ sa_index = sa - im->sa_pool;
+ vec_validate (im->inb_sa_runtimes, sa_index);
+ vec_validate (im->outb_sa_runtimes, sa_index);
+
+ irt = clib_mem_alloc_aligned (sizeof (ipsec_sa_inb_rt_t),
+ _Alignof (ipsec_sa_inb_rt_t));
+ ort = clib_mem_alloc_aligned (sizeof (ipsec_sa_outb_rt_t),
+ _Alignof (ipsec_sa_outb_rt_t));
+ im->inb_sa_runtimes[sa_index] = irt;
+ im->outb_sa_runtimes[sa_index] = ort;
+ clib_memset (irt, 0, sizeof (ipsec_sa_inb_rt_t));
+ clib_memset (ort, 0, sizeof (ipsec_sa_outb_rt_t));
- clib_pcg64i_srandom_r (&sa->iv_prng, rand[0], rand[1]);
+ clib_pcg64i_srandom_r (&ort->iv_prng, rand[0], rand[1]);
fib_node_init (&sa->node, FIB_NODE_TYPE_IPSEC_SA);
fib_node_lock (&sa->node);
- sa_index = sa - ipsec_sa_pool;
vlib_validate_combined_counter (&ipsec_sa_counters, sa_index);
vlib_zero_combined_counter (&ipsec_sa_counters, sa_index);
@@ -369,7 +472,11 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
sa->protocol = proto;
sa->flags = flags;
sa->salt = salt;
- sa->thread_index = (vlib_num_workers ()) ? ~0 : 0;
+ if (irt)
+ irt->thread_index = (vlib_num_workers ()) ? ~0 : 0;
+ if (ort)
+ ort->thread_index = (vlib_num_workers ()) ? ~0 : 0;
+
if (integ_alg != IPSEC_INTEG_ALG_NONE)
{
ipsec_sa_set_integ_alg (sa, integ_alg);
@@ -383,12 +490,15 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
clib_memcpy (&sa->crypto_key, ck, sizeof (sa->crypto_key));
- sa->crypto_sync_key_index = vnet_crypto_key_add (
- vm, im->crypto_algs[crypto_alg].alg, (u8 *) ck->data, ck->len);
- if (~0 == sa->crypto_sync_key_index)
+ if (crypto_alg != IPSEC_CRYPTO_ALG_NONE)
{
- pool_put (ipsec_sa_pool, sa);
- return VNET_API_ERROR_KEY_LENGTH;
+ sa->crypto_sync_key_index = vnet_crypto_key_add (
+ vm, im->crypto_algs[crypto_alg].alg, (u8 *) ck->data, ck->len);
+ if (~0 == sa->crypto_sync_key_index)
+ {
+ pool_put (im->sa_pool, sa);
+ return VNET_API_ERROR_KEY_LENGTH;
+ }
}
if (integ_alg != IPSEC_INTEG_ALG_NONE)
@@ -397,17 +507,17 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
vm, im->integ_algs[integ_alg].alg, (u8 *) ik->data, ik->len);
if (~0 == sa->integ_sync_key_index)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_KEY_LENGTH;
}
}
- if (sa->crypto_async_enc_op_id && !ipsec_sa_is_set_IS_AEAD (sa))
- sa->crypto_async_key_index =
+ if (sa->crypto_async_enc_op_id && alg->is_aead == 0)
+ sa->linked_key_index =
vnet_crypto_key_add_linked (vm, sa->crypto_sync_key_index,
sa->integ_sync_key_index); // AES-CBC & HMAC
else
- sa->crypto_async_key_index = sa->crypto_sync_key_index;
+ sa->linked_key_index = ~0;
if (im->async_mode)
{
@@ -426,14 +536,14 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (err)
{
clib_warning ("%v", err->what);
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_UNIMPLEMENTED;
}
err = ipsec_call_add_del_callbacks (im, sa, sa_index, 1);
if (err)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_SYSCALL_ERROR_1;
}
@@ -443,13 +553,12 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
{
- sa->tunnel_flags = sa->tunnel.t_encap_decap_flags;
rv = tunnel_resolve (&sa->tunnel, FIB_NODE_TYPE_IPSEC_SA, sa_index);
if (rv)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return rv;
}
ipsec_sa_stack (sa);
@@ -461,7 +570,7 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
IP_PROTOCOL_UDP :
IP_PROTOCOL_IPSEC_ESP),
- &sa->ip6_hdr);
+ &ort->ip6_hdr);
}
else
{
@@ -469,37 +578,38 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
IP_PROTOCOL_UDP :
IP_PROTOCOL_IPSEC_ESP),
- &sa->ip4_hdr);
+ &ort->ip4_hdr);
}
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
if (dst_port == IPSEC_UDP_PORT_NONE)
- sa->udp_hdr.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
- else
- sa->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
-
+ dst_port = UDP_DST_PORT_ipsec;
if (src_port == IPSEC_UDP_PORT_NONE)
- sa->udp_hdr.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
- else
- sa->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
+ src_port = UDP_DST_PORT_ipsec;
+ sa->udp_dst_port = dst_port;
+ sa->udp_src_port = src_port;
+ if (ort)
+ {
+ ort->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
+ ort->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ }
if (ipsec_sa_is_set_IS_INBOUND (sa))
- ipsec_register_udp_port (clib_host_to_net_u16 (sa->udp_hdr.dst_port),
- !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
+ ipsec_register_udp_port (dst_port, !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
}
/* window size rounded up to next power of 2 */
if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
{
anti_replay_window_size = 1 << max_log2 (anti_replay_window_size);
- sa->replay_window_huge =
+ irt->replay_window_huge =
clib_bitmap_set_region (0, 0, 1, anti_replay_window_size);
}
else
{
- sa->replay_window = ~0;
+ irt->replay_window = ~0;
}
hash_set (im->sa_index_by_sa_id, sa->id, sa_index);
@@ -507,6 +617,8 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (sa_out_index)
*sa_out_index = sa_index;
+ ipsec_sa_init_runtime (sa);
+
return (0);
}
@@ -516,32 +628,40 @@ ipsec_sa_del (ipsec_sa_t * sa)
vlib_main_t *vm = vlib_get_main ();
ipsec_main_t *im = &ipsec_main;
u32 sa_index;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
- sa_index = sa - ipsec_sa_pool;
+ sa_index = sa - im->sa_pool;
hash_unset (im->sa_index_by_sa_id, sa->id);
tunnel_unresolve (&sa->tunnel);
/* no recovery possible when deleting an SA */
(void) ipsec_call_add_del_callbacks (im, sa, sa_index, 0);
- if (ipsec_sa_is_set_IS_ASYNC (sa))
- {
- if (!ipsec_sa_is_set_IS_AEAD (sa))
- vnet_crypto_key_del (vm, sa->crypto_async_key_index);
- }
+ if (sa->linked_key_index != ~0)
+ vnet_crypto_key_del (vm, sa->linked_key_index);
if (ipsec_sa_is_set_UDP_ENCAP (sa) && ipsec_sa_is_set_IS_INBOUND (sa))
- ipsec_unregister_udp_port (clib_net_to_host_u16 (sa->udp_hdr.dst_port),
+ ipsec_unregister_udp_port (sa->udp_dst_port,
!ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
- dpo_reset (&sa->dpo);
- vnet_crypto_key_del (vm, sa->crypto_sync_key_index);
+ dpo_reset (&ort->dpo);
+ if (sa->crypto_alg != IPSEC_CRYPTO_ALG_NONE)
+ vnet_crypto_key_del (vm, sa->crypto_sync_key_index);
if (sa->integ_alg != IPSEC_INTEG_ALG_NONE)
vnet_crypto_key_del (vm, sa->integ_sync_key_index);
if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
- clib_bitmap_free (sa->replay_window_huge);
- pool_put (ipsec_sa_pool, sa);
+ if (irt && irt->replay_window_huge)
+ clib_bitmap_free (irt->replay_window_huge);
+ foreach_pointer (p, irt, ort)
+ if (p)
+ clib_mem_free (p);
+
+ im->inb_sa_runtimes[sa_index] = 0;
+ im->outb_sa_runtimes[sa_index] = 0;
+
+ pool_put (im->sa_pool, sa);
}
int
@@ -550,23 +670,33 @@ ipsec_sa_bind (u32 id, u32 worker, bool bind)
ipsec_main_t *im = &ipsec_main;
uword *p;
ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
+ u16 thread_index;
p = hash_get (im->sa_index_by_sa_id, id);
if (!p)
return VNET_API_ERROR_INVALID_VALUE;
sa = ipsec_sa_get (p[0]);
+ irt = ipsec_sa_get_inb_rt (sa);
+ ort = ipsec_sa_get_outb_rt (sa);
if (!bind)
{
- sa->thread_index = ~0;
- return 0;
+ thread_index = ~0;
+ goto done;
}
if (worker >= vlib_num_workers ())
return VNET_API_ERROR_INVALID_WORKER;
- sa->thread_index = vlib_get_worker_thread_index (worker);
+ thread_index = vlib_get_worker_thread_index (worker);
+done:
+ if (irt)
+ irt->thread_index = thread_index;
+ if (ort)
+ ort->thread_index = thread_index;
return 0;
}
@@ -642,9 +772,10 @@ ipsec_sa_clear (index_t sai)
void
ipsec_sa_walk (ipsec_sa_walk_cb_t cb, void *ctx)
{
+ ipsec_main_t *im = &ipsec_main;
ipsec_sa_t *sa;
- pool_foreach (sa, ipsec_sa_pool)
+ pool_foreach (sa, im->sa_pool)
{
if (WALK_CONTINUE != cb (sa, ctx))
break;
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index 640d9288a42..ce2964a9493 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -52,24 +52,6 @@ typedef enum
IPSEC_CRYPTO_N_ALG,
} __clib_packed ipsec_crypto_alg_t;
-#define IPSEC_CRYPTO_ALG_IS_NULL_GMAC(_alg) \
- ((_alg == IPSEC_CRYPTO_ALG_AES_NULL_GMAC_128) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_NULL_GMAC_192) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_NULL_GMAC_256))
-
-#define IPSEC_CRYPTO_ALG_IS_GCM(_alg) \
- (((_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)))
-
-#define IPSEC_CRYPTO_ALG_IS_CTR(_alg) \
- (((_alg == IPSEC_CRYPTO_ALG_AES_CTR_128) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_CTR_192) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_CTR_256)))
-
-#define IPSEC_CRYPTO_ALG_CTR_AEAD_OTHERS(_alg) \
- (_alg == IPSEC_CRYPTO_ALG_CHACHA20_POLY1305)
-
#define foreach_ipsec_integ_alg \
_ (0, NONE, "none") \
_ (1, MD5_96, "md5-96") /* RFC2403 */ \
@@ -117,11 +99,8 @@ typedef struct ipsec_key_t_
_ (16, UDP_ENCAP, "udp-encap") \
_ (32, IS_PROTECT, "Protect") \
_ (64, IS_INBOUND, "inbound") \
- _ (128, IS_AEAD, "aead") \
- _ (256, IS_CTR, "ctr") \
_ (512, IS_ASYNC, "async") \
_ (1024, NO_ALGO_NO_DROP, "no-algo-no-drop") \
- _ (2048, IS_NULL_GMAC, "null-gmac") \
_ (4096, ANTI_REPLAY_HUGE, "anti-replay-huge")
typedef enum ipsec_sad_flags_t_
@@ -165,51 +144,87 @@ typedef enum
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
-
- clib_pcg64i_random_t iv_prng;
-
+ u16 is_aead : 1;
+ u16 is_ctr : 1;
+ u16 is_null_gmac : 1;
+ u16 use_esn : 1;
+ u16 use_anti_replay : 1;
+ u16 anti_reply_huge : 1;
+ u16 is_protect : 1;
+ u16 is_tunnel : 1;
+ u16 is_transport : 1;
+ u16 is_async : 1;
+ u16 cipher_op_id;
+ u16 integ_op_id;
+ u8 cipher_iv_size;
+ u8 integ_icv_size;
+ u8 udp_sz;
+ u16 thread_index;
+ u32 salt;
+ u32 seq;
+ u32 seq_hi;
+ u16 async_op_id;
+ vnet_crypto_key_index_t cipher_key_index;
+ vnet_crypto_key_index_t integ_key_index;
union
{
u64 replay_window;
clib_bitmap_t *replay_window_huge;
};
- dpo_id_t dpo;
-
- vnet_crypto_key_index_t crypto_key_index;
- vnet_crypto_key_index_t integ_key_index;
-
- u32 spi;
- u32 seq;
- u32 seq_hi;
+} ipsec_sa_inb_rt_t;
- u16 crypto_enc_op_id;
- u16 crypto_dec_op_id;
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u16 is_aead : 1;
+ u16 is_ctr : 1;
+ u16 is_null_gmac : 1;
+ u16 is_tunnel : 1;
+ u16 is_tunnel_v6 : 1;
+ u16 udp_encap : 1;
+ u16 use_esn : 1;
+ u16 use_anti_replay : 1;
+ u16 drop_no_crypto : 1;
+ u16 is_async : 1;
+ clib_pcg64i_random_t iv_prng;
+ u16 cipher_op_id;
u16 integ_op_id;
- ipsec_sa_flags_t flags;
+ u8 cipher_iv_size;
+ u8 esp_block_align;
+ u8 integ_icv_size;
u16 thread_index;
-
- u16 integ_icv_size : 6;
- u16 crypto_iv_size : 5;
- u16 esp_block_align : 5;
-
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
-
+ u32 salt;
+ u64 seq64;
+ u32 spi_be;
+ ip_dscp_t t_dscp;
+ dpo_id_t dpo;
+ tunnel_encap_decap_flags_t tunnel_flags;
+ u16 async_op_id;
+ vnet_crypto_key_index_t cipher_key_index;
+ vnet_crypto_key_index_t integ_key_index;
union
{
ip4_header_t ip4_hdr;
ip6_header_t ip6_hdr;
};
udp_header_t udp_hdr;
+} ipsec_sa_outb_rt_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ u32 spi;
+
+ ipsec_sa_flags_t flags;
+
+ u16 udp_src_port;
+ u16 udp_dst_port;
/* Salt used in CTR modes (incl. GCM) - stored in network byte order */
u32 salt;
ipsec_protocol_t protocol;
- tunnel_encap_decap_flags_t tunnel_flags;
- u8 __pad[2];
-
- /* data accessed by dataplane code should be above this comment */
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
/* Elements with u64 size multiples */
tunnel_t tunnel;
@@ -222,7 +237,7 @@ typedef struct
vnet_crypto_alg_t crypto_calg;
u32 crypto_sync_key_index;
u32 integ_sync_key_index;
- u32 crypto_async_key_index;
+ u32 linked_key_index;
/* elements with u16 size */
u16 crypto_sync_enc_op_id;
@@ -243,13 +258,6 @@ STATIC_ASSERT (VNET_CRYPTO_N_OP_IDS < (1 << 16), "crypto ops overflow");
STATIC_ASSERT (ESP_MAX_ICV_SIZE < (1 << 6), "integer icv overflow");
STATIC_ASSERT (ESP_MAX_IV_SIZE < (1 << 5), "esp iv overflow");
STATIC_ASSERT (ESP_MAX_BLOCK_SIZE < (1 << 5), "esp alignment overflow");
-STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline1, CLIB_CACHE_LINE_BYTES);
-STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline2, 2 * CLIB_CACHE_LINE_BYTES);
-
-/**
- * Pool of IPSec SAs
- */
-extern ipsec_sa_t *ipsec_sa_pool;
/*
* Ensure that the IPsec data does not overlap with the IP data in
@@ -291,6 +299,7 @@ extern void ipsec_mk_key (ipsec_key_t *key, const u8 *data, u8 len);
extern int ipsec_sa_update (u32 id, u16 src_port, u16 dst_port,
const tunnel_t *tun, bool is_tun);
+extern void ipsec_sa_update_runtime (ipsec_sa_t *sa);
extern int ipsec_sa_add_and_lock (
u32 id, u32 spi, ipsec_protocol_t proto, ipsec_crypto_alg_t crypto_alg,
const ipsec_key_t *ck, ipsec_integ_alg_t integ_alg, const ipsec_key_t *ik,
@@ -327,29 +336,29 @@ extern uword unformat_ipsec_key (unformat_input_t *input, va_list *args);
* Anti Replay definitions
*/
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE(_sa) \
- (u32) (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (_sa)) ? \
- clib_bitmap_bytes (_sa->replay_window_huge) * 8 : \
- BITS (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE(_irt) \
+ (u32) (PREDICT_FALSE (_irt->anti_reply_huge) ? \
+ clib_bitmap_bytes (_irt->replay_window_huge) * 8 : \
+ BITS (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN(_sa, _is_huge) \
- (u32) (_is_huge ? clib_bitmap_bytes (_sa->replay_window_huge) * 8 : \
- BITS (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN(_irt, _is_huge) \
+ (u32) (_is_huge ? clib_bitmap_bytes (_irt->replay_window_huge) * 8 : \
+ BITS (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN(_sa) \
- (u64) (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (_sa)) ? \
- clib_bitmap_count_set_bits (_sa->replay_window_huge) : \
- count_set_bits (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN(_irt) \
+ (u64) (PREDICT_FALSE (_irt->anti_reply_huge) ? \
+ clib_bitmap_count_set_bits (_irt->replay_window_huge) : \
+ count_set_bits (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN(_sa, _is_huge) \
- (u64) (_is_huge ? clib_bitmap_count_set_bits (_sa->replay_window_huge) : \
- count_set_bits (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN(_irt, _is_huge) \
+ (u64) (_is_huge ? clib_bitmap_count_set_bits (_irt->replay_window_huge) : \
+ count_set_bits (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX(_sa) \
- (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_sa) - 1)
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX(_irt) \
+ (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_irt) - 1)
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX_KNOWN_WIN(_sa, _is_huge) \
- (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_sa, _is_huge) - 1)
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX_KNOWN_WIN(_irt, _is_huge) \
+ (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_irt, _is_huge) - 1)
/*
* sequence number less than the lower bound are outside of the window
@@ -364,23 +373,23 @@ extern uword unformat_ipsec_key (unformat_input_t *input, va_list *args);
IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (_sa, _is_huge) + 1)
always_inline u64
-ipsec_sa_anti_replay_get_64b_window (const ipsec_sa_t *sa)
+ipsec_sa_anti_replay_get_64b_window (const ipsec_sa_inb_rt_t *irt)
{
- if (!ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
- return sa->replay_window;
+ if (!irt->anti_reply_huge)
+ return irt->replay_window;
u64 w;
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (sa);
- u32 tl_win_index = sa->seq & (window_size - 1);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (irt);
+ u32 tl_win_index = irt->seq & (window_size - 1);
if (PREDICT_TRUE (tl_win_index >= 63))
- return clib_bitmap_get_multiple (sa->replay_window_huge, tl_win_index - 63,
- 64);
+ return clib_bitmap_get_multiple (irt->replay_window_huge,
+ tl_win_index - 63, 64);
- w = clib_bitmap_get_multiple_no_check (sa->replay_window_huge, 0,
+ w = clib_bitmap_get_multiple_no_check (irt->replay_window_huge, 0,
tl_win_index + 1)
<< (63 - tl_win_index);
- w |= clib_bitmap_get_multiple_no_check (sa->replay_window_huge,
+ w |= clib_bitmap_get_multiple_no_check (irt->replay_window_huge,
window_size - 63 + tl_win_index,
63 - tl_win_index);
@@ -388,18 +397,19 @@ ipsec_sa_anti_replay_get_64b_window (const ipsec_sa_t *sa)
}
always_inline int
-ipsec_sa_anti_replay_check (const ipsec_sa_t *sa, u32 seq, bool ar_huge)
+ipsec_sa_anti_replay_check (const ipsec_sa_inb_rt_t *irt, u32 seq,
+ bool ar_huge)
{
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
/* we assume that the packet is in the window.
* if the packet falls left (sa->seq - seq >= window size),
* the result is wrong */
if (ar_huge)
- return clib_bitmap_get (sa->replay_window_huge, seq & (window_size - 1));
+ return clib_bitmap_get (irt->replay_window_huge, seq & (window_size - 1));
else
- return (sa->replay_window >> (window_size + seq - sa->seq - 1)) & 1;
+ return (irt->replay_window >> (window_size + seq - irt->seq - 1)) & 1;
return 0;
}
@@ -419,36 +429,36 @@ ipsec_sa_anti_replay_check (const ipsec_sa_t *sa, u32 seq, bool ar_huge)
* the high sequence number is set.
*/
always_inline int
-ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
+ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_inb_rt_t *irt, u32 seq,
u32 hi_seq_used, bool post_decrypt,
u32 *hi_seq_req, bool ar_huge)
{
ASSERT ((post_decrypt == false) == (hi_seq_req != 0));
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
u32 window_lower_bound =
- IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND_KNOWN_WIN (sa, ar_huge);
+ IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND_KNOWN_WIN (irt, ar_huge);
- if (!ipsec_sa_is_set_USE_ESN (sa))
+ if (!irt->use_esn)
{
if (hi_seq_req)
/* no ESN, therefore the hi-seq is always 0 */
*hi_seq_req = 0;
- if (!ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
+ if (!irt->use_anti_replay)
return 0;
- if (PREDICT_TRUE (seq > sa->seq))
+ if (PREDICT_TRUE (seq > irt->seq))
return 0;
/* does the packet fall out on the left of the window */
- if (sa->seq >= seq + window_size)
+ if (irt->seq >= seq + window_size)
return 1;
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
}
- if (!ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
+ if (!irt->use_anti_replay)
{
/* there's no AR configured for this SA, but in order
* to know whether a packet has wrapped the hi ESN we need
@@ -463,20 +473,20 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
*/
if (hi_seq_req)
{
- if (seq >= sa->seq)
+ if (seq >= irt->seq)
/* The packet's sequence number is larger that the SA's.
* that can't be a warp - unless we lost more than
* 2^32 packets ... how could we know? */
- *hi_seq_req = sa->seq_hi;
+ *hi_seq_req = irt->seq_hi;
else
{
/* The packet's SN is less than the SAs, so either the SN has
* wrapped or the SN is just old. */
- if (sa->seq - seq > (1 << 30))
+ if (irt->seq - seq > (1 << 30))
/* It's really really really old => it wrapped */
- *hi_seq_req = sa->seq_hi + 1;
+ *hi_seq_req = irt->seq_hi + 1;
else
- *hi_seq_req = sa->seq_hi;
+ *hi_seq_req = irt->seq_hi;
}
}
/*
@@ -486,7 +496,7 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
return 0;
}
- if (PREDICT_TRUE (window_size > 0 && sa->seq >= window_size - 1))
+ if (PREDICT_TRUE (window_size > 0 && irt->seq >= window_size - 1))
{
/*
* the last sequence number VPP received is more than one
@@ -503,7 +513,7 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
*/
if (post_decrypt)
{
- if (hi_seq_used == sa->seq_hi)
+ if (hi_seq_used == irt->seq_hi)
/* the high sequence number used to succesfully decrypt this
* packet is the same as the last-sequence number of the SA.
* that means this packet did not cause a wrap.
@@ -520,7 +530,7 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
/* pre-decrypt it might be the packet that causes a wrap, we
* need to decrypt it to find out */
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi + 1;
+ *hi_seq_req = irt->seq_hi + 1;
return 0;
}
}
@@ -531,13 +541,13 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
* end of the window.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi;
- if (seq <= sa->seq)
+ *hi_seq_req = irt->seq_hi;
+ if (seq <= irt->seq)
/*
* The received seq number is within bounds of the window
* check if it's a duplicate
*/
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
else
/*
* The received sequence number is greater than the window
@@ -562,15 +572,15 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
/*
* the sequence number is less than the lower bound.
*/
- if (seq <= sa->seq)
+ if (seq <= irt->seq)
{
/*
* the packet is within the window upper bound.
* check for duplicates.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi;
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ *hi_seq_req = irt->seq_hi;
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
}
else
{
@@ -584,7 +594,7 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
* we've lost close to 2^32 packets.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi;
+ *hi_seq_req = irt->seq_hi;
return 0;
}
}
@@ -597,8 +607,8 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
* received packet, the SA has moved on to a higher sequence number.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi - 1;
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ *hi_seq_req = irt->seq_hi - 1;
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
}
}
@@ -608,19 +618,20 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
}
always_inline u32
-ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
+ipsec_sa_anti_replay_window_shift (ipsec_sa_inb_rt_t *irt, u32 inc,
+ bool ar_huge)
{
u32 n_lost = 0;
u32 seen = 0;
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
if (inc < window_size)
{
if (ar_huge)
{
/* the number of packets we saw in this section of the window */
- clib_bitmap_t *window = sa->replay_window_huge;
- u32 window_lower_bound = (sa->seq + 1) & (window_size - 1);
+ clib_bitmap_t *window = irt->replay_window_huge;
+ u32 window_lower_bound = (irt->seq + 1) & (window_size - 1);
u32 window_next_lower_bound =
(window_lower_bound + inc) & (window_size - 1);
@@ -706,7 +717,7 @@ ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
}
clib_bitmap_set_no_check (window,
- (sa->seq + inc) & (window_size - 1), 1);
+ (irt->seq + inc) & (window_size - 1), 1);
}
else
{
@@ -715,11 +726,11 @@ ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
* of the window that we will right shift of the end
* as a result of this increments
*/
- u64 old = sa->replay_window & pow2_mask (inc);
+ u64 old = irt->replay_window & pow2_mask (inc);
/* the number of packets we saw in this section of the window */
seen = count_set_bits (old);
- sa->replay_window =
- ((sa->replay_window) >> inc) | (1ULL << (window_size - 1));
+ irt->replay_window =
+ ((irt->replay_window) >> inc) | (1ULL << (window_size - 1));
}
/*
@@ -732,7 +743,7 @@ ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
{
/* holes in the replay window are lost packets */
n_lost = window_size -
- IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN (sa, ar_huge);
+ IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN (irt, ar_huge);
/* any sequence numbers that now fall outside the window
* are forever lost */
@@ -740,13 +751,13 @@ ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
if (PREDICT_FALSE (ar_huge))
{
- clib_bitmap_zero (sa->replay_window_huge);
- clib_bitmap_set_no_check (sa->replay_window_huge,
- (sa->seq + inc) & (window_size - 1), 1);
+ clib_bitmap_zero (irt->replay_window_huge);
+ clib_bitmap_set_no_check (irt->replay_window_huge,
+ (irt->seq + inc) & (window_size - 1), 1);
}
else
{
- sa->replay_window = 1ULL << (window_size - 1);
+ irt->replay_window = 1ULL << (window_size - 1);
}
}
@@ -763,65 +774,65 @@ ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
* the branch cost.
*/
always_inline u64
-ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 thread_index, u32 seq,
- u32 hi_seq, bool ar_huge)
+ipsec_sa_anti_replay_advance (ipsec_sa_inb_rt_t *irt, u32 thread_index,
+ u32 seq, u32 hi_seq, bool ar_huge)
{
u64 n_lost = 0;
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
u32 pos;
- if (ipsec_sa_is_set_USE_ESN (sa))
+ if (irt->use_esn)
{
- int wrap = hi_seq - sa->seq_hi;
+ int wrap = hi_seq - irt->seq_hi;
- if (wrap == 0 && seq > sa->seq)
+ if (wrap == 0 && seq > irt->seq)
{
- pos = seq - sa->seq;
- n_lost = ipsec_sa_anti_replay_window_shift (sa, pos, ar_huge);
- sa->seq = seq;
+ pos = seq - irt->seq;
+ n_lost = ipsec_sa_anti_replay_window_shift (irt, pos, ar_huge);
+ irt->seq = seq;
}
else if (wrap > 0)
{
- pos = seq + ~sa->seq + 1;
- n_lost = ipsec_sa_anti_replay_window_shift (sa, pos, ar_huge);
- sa->seq = seq;
- sa->seq_hi = hi_seq;
+ pos = seq + ~irt->seq + 1;
+ n_lost = ipsec_sa_anti_replay_window_shift (irt, pos, ar_huge);
+ irt->seq = seq;
+ irt->seq_hi = hi_seq;
}
else if (wrap < 0)
{
- pos = ~seq + sa->seq + 1;
+ pos = ~seq + irt->seq + 1;
if (ar_huge)
- clib_bitmap_set_no_check (sa->replay_window_huge,
+ clib_bitmap_set_no_check (irt->replay_window_huge,
seq & (window_size - 1), 1);
else
- sa->replay_window |= (1ULL << (window_size - 1 - pos));
+ irt->replay_window |= (1ULL << (window_size - 1 - pos));
}
else
{
- pos = sa->seq - seq;
+ pos = irt->seq - seq;
if (ar_huge)
- clib_bitmap_set_no_check (sa->replay_window_huge,
+ clib_bitmap_set_no_check (irt->replay_window_huge,
seq & (window_size - 1), 1);
else
- sa->replay_window |= (1ULL << (window_size - 1 - pos));
+ irt->replay_window |= (1ULL << (window_size - 1 - pos));
}
}
else
{
- if (seq > sa->seq)
+ if (seq > irt->seq)
{
- pos = seq - sa->seq;
- n_lost = ipsec_sa_anti_replay_window_shift (sa, pos, ar_huge);
- sa->seq = seq;
+ pos = seq - irt->seq;
+ n_lost = ipsec_sa_anti_replay_window_shift (irt, pos, ar_huge);
+ irt->seq = seq;
}
else
{
- pos = sa->seq - seq;
+ pos = irt->seq - seq;
if (ar_huge)
- clib_bitmap_set_no_check (sa->replay_window_huge,
+ clib_bitmap_set_no_check (irt->replay_window_huge,
seq & (window_size - 1), 1);
else
- sa->replay_window |= (1ULL << (window_size - 1 - pos));
+ irt->replay_window |= (1ULL << (window_size - 1 - pos));
}
}
@@ -840,12 +851,6 @@ ipsec_sa_assign_thread (u16 thread_id)
: (unix_time_now_nsec () % vlib_num_workers ()) + 1);
}
-always_inline ipsec_sa_t *
-ipsec_sa_get (u32 sa_index)
-{
- return (pool_elt_at_index (ipsec_sa_pool, sa_index));
-}
-
#endif /* __IPSEC_SPD_SA_H__ */
/*
diff --git a/src/vnet/ipsec/ipsec_tun.c b/src/vnet/ipsec/ipsec_tun.c
index 5fb07b3ba09..28702bdec47 100644
--- a/src/vnet/ipsec/ipsec_tun.c
+++ b/src/vnet/ipsec/ipsec_tun.c
@@ -470,6 +470,7 @@ ipsec_tun_protect_set_crypto_addr (ipsec_tun_protect_t * itp)
if (!(itp->itp_flags & IPSEC_PROTECT_ITF))
{
ipsec_sa_set_IS_PROTECT (sa);
+ ipsec_sa_update_runtime (sa);
itp->itp_flags |= IPSEC_PROTECT_ENCAPED;
}
}
@@ -497,7 +498,11 @@ ipsec_tun_protect_config (ipsec_main_t * im,
ipsec_sa_lock (itp->itp_out_sa);
if (itp->itp_flags & IPSEC_PROTECT_ITF)
- ipsec_sa_set_NO_ALGO_NO_DROP (ipsec_sa_get (itp->itp_out_sa));
+ {
+ ipsec_sa_t *sa = ipsec_sa_get (itp->itp_out_sa);
+ ipsec_sa_set_NO_ALGO_NO_DROP (sa);
+ ipsec_sa_update_runtime (sa);
+ }
FOR_EACH_IPSEC_PROTECT_INPUT_SAI(itp, sai,
({
@@ -523,12 +528,16 @@ ipsec_tun_protect_unconfig (ipsec_main_t * im, ipsec_tun_protect_t * itp)
FOR_EACH_IPSEC_PROTECT_INPUT_SA(itp, sa,
({
ipsec_sa_unset_IS_PROTECT (sa);
+ ipsec_sa_update_runtime (sa);
}));
ipsec_tun_protect_rx_db_remove (im, itp);
ipsec_tun_protect_tx_db_remove (itp);
- ipsec_sa_unset_NO_ALGO_NO_DROP (ipsec_sa_get (itp->itp_out_sa));
+ sa = ipsec_sa_get (itp->itp_out_sa);
+ ipsec_sa_unset_NO_ALGO_NO_DROP (sa);
+ ipsec_sa_update_runtime (sa);
+
ipsec_sa_unlock(itp->itp_out_sa);
FOR_EACH_IPSEC_PROTECT_INPUT_SAI(itp, sai,
diff --git a/src/vnet/ipsec/main.c b/src/vnet/ipsec/main.c
new file mode 100644
index 00000000000..0a01797e066
--- /dev/null
+++ b/src/vnet/ipsec/main.c
@@ -0,0 +1,201 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright(c) 2025 Cisco Systems, Inc.
+ */
+
+#include <vlib/vlib.h>
+#include <vnet/ipsec/ipsec.h>
+
+ipsec_main_t ipsec_main = {
+ .crypto_algs = {
+ [IPSEC_CRYPTO_ALG_NONE] = {
+ .enc_op_id = VNET_CRYPTO_OP_NONE,
+ .dec_op_id = VNET_CRYPTO_OP_NONE,
+ .alg = VNET_CRYPTO_ALG_NONE,
+ .iv_size = 0,
+ .block_align = 1,
+ },
+
+ [IPSEC_CRYPTO_ALG_DES_CBC] = {
+ .enc_op_id = VNET_CRYPTO_OP_DES_CBC_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_DES_CBC_DEC,
+ .alg = VNET_CRYPTO_ALG_DES_CBC,
+ .iv_size = 8,
+ .block_align = 8,
+ },
+
+ [IPSEC_CRYPTO_ALG_3DES_CBC] = {
+ .enc_op_id = VNET_CRYPTO_OP_3DES_CBC_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_3DES_CBC_DEC,
+ .alg = VNET_CRYPTO_ALG_3DES_CBC,
+ .iv_size = 8,
+ .block_align = 8,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_CBC_128] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_128_CBC_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_128_CBC_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_128_CBC,
+ .iv_size = 16,
+ .block_align = 16,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_CBC_192] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_192_CBC_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_192_CBC_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_192_CBC,
+ .iv_size = 16,
+ .block_align = 16,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_CBC_256] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_256_CBC_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_256_CBC_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_256_CBC,
+ .iv_size = 16,
+ .block_align = 16,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_CTR_128] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_128_CTR_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_128_CTR_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_128_CTR,
+ .iv_size = 8,
+ .block_align = 1,
+ .is_ctr = 1,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_CTR_192] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_192_CTR_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_192_CTR_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_192_CTR,
+ .iv_size = 8,
+ .block_align = 1,
+ .is_ctr = 1,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_CTR_256] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_256_CTR_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_256_CTR_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_256_CTR,
+ .iv_size = 8,
+ .block_align = 1,
+ .is_ctr = 1,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_GCM_128] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_128_GCM_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_128_GCM_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_128_GCM,
+ .iv_size = 8,
+ .block_align = 1,
+ .icv_size = 16,
+ .is_aead = 1,
+ .is_ctr = 1,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_GCM_192] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_192_GCM_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_192_GCM_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_192_GCM,
+ .iv_size = 8,
+ .block_align = 1,
+ .icv_size = 16,
+ .is_aead = 1,
+ .is_ctr = 1,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_GCM_256] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_256_GCM_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_256_GCM_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_256_GCM,
+ .iv_size = 8,
+ .block_align = 1,
+ .icv_size = 16,
+ .is_aead = 1,
+ .is_ctr = 1,
+ },
+
+ [IPSEC_CRYPTO_ALG_CHACHA20_POLY1305] = {
+ .enc_op_id = VNET_CRYPTO_OP_CHACHA20_POLY1305_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_CHACHA20_POLY1305_DEC,
+ .alg = VNET_CRYPTO_ALG_CHACHA20_POLY1305,
+ .iv_size = 8,
+ .icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_NULL_GMAC_128] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_128_NULL_GMAC_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_128_NULL_GMAC_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_128_GCM,
+ .iv_size = 8,
+ .block_align = 1,
+ .icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
+ .is_null_gmac = 1,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_NULL_GMAC_192] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_192_NULL_GMAC_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_192_NULL_GMAC_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_192_GCM,
+ .iv_size = 8,
+ .block_align = 1,
+ .icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
+ .is_null_gmac = 1,
+ },
+
+ [IPSEC_CRYPTO_ALG_AES_NULL_GMAC_256] = {
+ .enc_op_id = VNET_CRYPTO_OP_AES_256_NULL_GMAC_ENC,
+ .dec_op_id = VNET_CRYPTO_OP_AES_256_NULL_GMAC_DEC,
+ .alg = VNET_CRYPTO_ALG_AES_256_GCM,
+ .iv_size = 8,
+ .block_align = 1,
+ .icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
+ .is_null_gmac = 1,
+ },
+ },
+ .integ_algs = {
+ [IPSEC_INTEG_ALG_MD5_96] = {
+ .op_id = VNET_CRYPTO_OP_MD5_HMAC,
+ .alg = VNET_CRYPTO_ALG_HMAC_MD5,
+ .icv_size = 12,
+ },
+
+ [IPSEC_INTEG_ALG_SHA1_96] = {
+ .op_id = VNET_CRYPTO_OP_SHA1_HMAC,
+ .alg = VNET_CRYPTO_ALG_HMAC_SHA1,
+ .icv_size = 12,
+ },
+
+ [IPSEC_INTEG_ALG_SHA_256_96] = {
+ .op_id = VNET_CRYPTO_OP_SHA1_HMAC,
+ .alg = VNET_CRYPTO_ALG_HMAC_SHA256,
+ .icv_size = 12,
+ },
+
+ [IPSEC_INTEG_ALG_SHA_256_128] = {
+ .op_id = VNET_CRYPTO_OP_SHA256_HMAC,
+ .alg = VNET_CRYPTO_ALG_HMAC_SHA256,
+ .icv_size = 16,
+ },
+
+ [IPSEC_INTEG_ALG_SHA_384_192] = {
+ .op_id = VNET_CRYPTO_OP_SHA384_HMAC,
+ .alg = VNET_CRYPTO_ALG_HMAC_SHA384,
+ .icv_size = 24,
+ },
+
+ [IPSEC_INTEG_ALG_SHA_512_256] = {
+ .op_id = VNET_CRYPTO_OP_SHA512_HMAC,
+ .alg = VNET_CRYPTO_ALG_HMAC_SHA512,
+ .icv_size = 32,
+ },
+ },
+};
diff --git a/src/vnet/session/application_interface.h b/src/vnet/session/application_interface.h
index d5656ff8341..21ed97998f2 100644
--- a/src/vnet/session/application_interface.h
+++ b/src/vnet/session/application_interface.h
@@ -688,8 +688,8 @@ app_send_dgram_raw_gso (svm_fifo_t *f, app_session_transport_t *at,
if (do_evt)
{
if (svm_fifo_set_event (f))
- app_send_io_evt_to_vpp (vpp_evt_q, f->shr->master_session_index,
- evt_type, noblock);
+ app_send_io_evt_to_vpp (vpp_evt_q, f->vpp_session_index, evt_type,
+ noblock);
}
return len;
}
@@ -712,8 +712,8 @@ app_send_stream_raw (svm_fifo_t * f, svm_msg_q_t * vpp_evt_q, u8 * data,
if (do_evt)
{
if (rv > 0 && svm_fifo_set_event (f))
- app_send_io_evt_to_vpp (vpp_evt_q, f->shr->master_session_index,
- evt_type, noblock);
+ app_send_io_evt_to_vpp (vpp_evt_q, f->vpp_session_index, evt_type,
+ noblock);
}
return rv;
}
diff --git a/src/vnet/session/application_local.c b/src/vnet/session/application_local.c
index 0800ce2b041..18ea77dc8a8 100644
--- a/src/vnet/session/application_local.c
+++ b/src/vnet/session/application_local.c
@@ -647,8 +647,8 @@ ct_init_accepted_session (app_worker_t *server_wrk, ct_connection_t *ct,
ls->rx_fifo->shr->master_session_index = ls->session_index;
ls->tx_fifo->shr->master_session_index = ls->session_index;
- ls->rx_fifo->master_thread_index = ls->thread_index;
- ls->tx_fifo->master_thread_index = ls->thread_index;
+ ls->rx_fifo->vpp_sh = ls->handle;
+ ls->tx_fifo->vpp_sh = ls->handle;
seg_handle = segment_manager_segment_handle (sm, fs);
segment_manager_segment_reader_unlock (sm);
diff --git a/src/vnet/session/application_worker.c b/src/vnet/session/application_worker.c
index cae340cd64e..ad0b18e8d75 100644
--- a/src/vnet/session/application_worker.c
+++ b/src/vnet/session/application_worker.c
@@ -175,10 +175,10 @@ app_worker_alloc_session_fifos (segment_manager_t * sm, session_t * s)
return rv;
rx_fifo->shr->master_session_index = s->session_index;
- rx_fifo->master_thread_index = s->thread_index;
+ rx_fifo->vpp_sh = s->handle;
tx_fifo->shr->master_session_index = s->session_index;
- tx_fifo->master_thread_index = s->thread_index;
+ tx_fifo->vpp_sh = s->handle;
s->rx_fifo = rx_fifo;
s->tx_fifo = tx_fifo;
@@ -210,10 +210,10 @@ app_worker_alloc_wrk_cl_session (app_worker_t *app_wrk, session_t *ls)
&tx_fifo);
rx_fifo->shr->master_session_index = s->session_index;
- rx_fifo->master_thread_index = s->thread_index;
+ rx_fifo->vpp_sh = s->handle;
tx_fifo->shr->master_session_index = s->session_index;
- tx_fifo->master_thread_index = s->thread_index;
+ tx_fifo->vpp_sh = s->handle;
s->rx_fifo = rx_fifo;
s->tx_fifo = tx_fifo;
diff --git a/src/vnet/session/segment_manager.c b/src/vnet/session/segment_manager.c
index 2b44d92e0b5..8c8b904c33d 100644
--- a/src/vnet/session/segment_manager.c
+++ b/src/vnet/session/segment_manager.c
@@ -625,7 +625,7 @@ segment_manager_del_sessions (segment_manager_t * sm)
*/
while (f)
{
- session = session_get_if_valid (f->shr->master_session_index,
+ session = session_get_if_valid (f->vpp_session_index,
f->master_thread_index);
if (session)
vec_add1 (handles, session_handle (session));
@@ -672,7 +672,7 @@ segment_manager_del_sessions_filter (segment_manager_t *sm,
f = fifo_segment_get_slice_fifo_list (fs, slice_index);
while (f)
{
- session = session_get_if_valid (f->shr->master_session_index,
+ session = session_get_if_valid (f->vpp_session_index,
f->master_thread_index);
if (session)
{
@@ -920,7 +920,7 @@ segment_manager_attach_fifo (segment_manager_t *sm, svm_fifo_t **f,
segment_manager_segment_reader_unlock (sm);
(*f)->shr->master_session_index = s->session_index;
- (*f)->master_thread_index = s->thread_index;
+ (*f)->vpp_sh = s->handle;
}
u32
@@ -1195,7 +1195,7 @@ segment_manager_format_sessions (segment_manager_t * sm, int verbose)
u32 session_index, thread_index;
session_t *session;
- session_index = f->shr->master_session_index;
+ session_index = f->vpp_session_index;
thread_index = f->master_thread_index;
session = session_get (session_index, thread_index);
diff --git a/src/vnet/session/session.c b/src/vnet/session/session.c
index cc0e89fd1e2..2a6ac283fb9 100644
--- a/src/vnet/session/session.c
+++ b/src/vnet/session/session.c
@@ -83,13 +83,15 @@ session_send_evt_to_thread (void *data, void *args, u32 thread_index,
return 0;
}
+/* Deprecated, use session_program_* functions */
int
session_send_io_evt_to_thread (svm_fifo_t * f, session_evt_type_t evt_type)
{
- return session_send_evt_to_thread (&f->shr->master_session_index, 0,
+ return session_send_evt_to_thread (&f->vpp_session_index, 0,
f->master_thread_index, evt_type);
}
+/* Deprecated, use session_program_* functions */
int
session_send_io_evt_to_thread_custom (void *data, u32 thread_index,
session_evt_type_t evt_type)
@@ -121,6 +123,14 @@ session_program_rx_io_evt (session_handle_tu_t sh)
}
int
+session_program_transport_io_evt (session_handle_tu_t sh,
+ session_evt_type_t evt_type)
+{
+ return session_send_evt_to_thread ((void *) &sh.session_index, 0,
+ (u32) sh.thread_index, evt_type);
+}
+
+int
session_send_ctrl_evt_to_thread (session_t * s, session_evt_type_t evt_type)
{
/* only events supported are disconnect, shutdown and reset */
diff --git a/src/vnet/session/session.h b/src/vnet/session/session.h
index 823bdcb02af..daa3bf97f56 100644
--- a/src/vnet/session/session.h
+++ b/src/vnet/session/session.h
@@ -484,12 +484,16 @@ void session_transport_cleanup (session_t * s);
int session_enqueue_notify (session_t *s);
int session_dequeue_notify (session_t * s);
int session_enqueue_notify_cl (session_t *s);
+/* Deprecated, use session_program_* functions */
int session_send_io_evt_to_thread (svm_fifo_t *f, session_evt_type_t evt_type);
+/* Deprecated, use session_program_* functions */
int session_send_io_evt_to_thread_custom (void *data, u32 thread_index,
session_evt_type_t evt_type);
int session_program_tx_io_evt (session_handle_tu_t sh,
session_evt_type_t evt_type);
int session_program_rx_io_evt (session_handle_tu_t sh);
+int session_program_transport_io_evt (session_handle_tu_t sh,
+ session_evt_type_t evt_type);
void session_send_rpc_evt_to_thread (u32 thread_index, void *fp,
void *rpc_args);
void session_send_rpc_evt_to_thread_force (u32 thread_index, void *fp,
@@ -659,7 +663,7 @@ transport_add_tx_event (transport_connection_t * tc)
session_t *s = session_get (tc->s_index, tc->thread_index);
if (svm_fifo_has_event (s->tx_fifo))
return;
- session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
+ session_program_tx_io_evt (s->handle, SESSION_IO_EVT_TX);
}
always_inline u32
diff --git a/src/vnet/session/session_api.c b/src/vnet/session/session_api.c
index c6df47b412b..5ac21c4eb85 100644
--- a/src/vnet/session/session_api.c
+++ b/src/vnet/session/session_api.c
@@ -426,9 +426,12 @@ mq_send_session_connected_cb (u32 app_wrk_index, u32 api_context,
}
/* Setup client session index in advance, in case data arrives
- * before the app processes message and updates it */
+ * before the app processes message and updates it
+ * Maybe this needs to be done via a reply message from app */
s->rx_fifo->shr->client_session_index = api_context;
s->tx_fifo->shr->client_session_index = api_context;
+ s->rx_fifo->app_session_index = api_context;
+ s->tx_fifo->app_session_index = api_context;
snd_msg:
@@ -637,7 +640,7 @@ mq_send_io_rx_event (session_t *s)
mq_evt = svm_msg_q_msg_data (mq, &mq_msg);
mq_evt->event_type = SESSION_IO_EVT_RX;
- mq_evt->session_index = s->rx_fifo->shr->client_session_index;
+ mq_evt->session_index = s->rx_fifo->app_session_index;
(void) svm_fifo_set_event (s->rx_fifo);
@@ -658,7 +661,7 @@ mq_send_io_tx_event (session_t *s)
mq_evt = svm_msg_q_msg_data (mq, &mq_msg);
mq_evt->event_type = SESSION_IO_EVT_TX;
- mq_evt->session_index = s->tx_fifo->shr->client_session_index;
+ mq_evt->session_index = s->tx_fifo->app_session_index;
svm_msg_q_add_raw (mq, &mq_msg);
diff --git a/src/vnet/session/session_debug.c b/src/vnet/session/session_debug.c
index 2a50adac5dd..158751c4eed 100644
--- a/src/vnet/session/session_debug.c
+++ b/src/vnet/session/session_debug.c
@@ -278,7 +278,7 @@ session_node_cmp_event (session_event_t * e, svm_fifo_t * f)
case SESSION_IO_EVT_BUILTIN_RX:
case SESSION_IO_EVT_TX_MAIN:
case SESSION_IO_EVT_TX_FLUSH:
- if (e->session_index == f->shr->master_session_index)
+ if (e->session_index == f->vpp_session_index)
return 1;
break;
case SESSION_CTRL_EVT_CLOSE:
diff --git a/src/vnet/session/session_node.c b/src/vnet/session/session_node.c
index 4d86d409e98..c0ff1de39bc 100644
--- a/src/vnet/session/session_node.c
+++ b/src/vnet/session/session_node.c
@@ -474,6 +474,10 @@ session_mq_accepted_reply_handler (session_worker_t *wrk,
return;
}
+ /* TODO(fcoras) This needs to be part of the reply message */
+ s->rx_fifo->app_session_index = s->rx_fifo->shr->client_session_index;
+ s->tx_fifo->app_session_index = s->tx_fifo->shr->client_session_index;
+
/* Special handling for cut-through sessions */
if (!session_has_transport (s))
{
@@ -640,6 +644,8 @@ session_mq_worker_update_handler (void *data)
}
owner_app_wrk_map = app_wrk->wrk_map_index;
app_wrk = application_get_worker (app, mp->wrk_index);
+ if (!app_wrk)
+ return;
/* This needs to come from the new owner */
if (mp->req_wrk_index == owner_app_wrk_map)
@@ -684,7 +690,7 @@ session_mq_worker_update_handler (void *data)
* Retransmit messages that may have been lost
*/
if (s->tx_fifo && !svm_fifo_is_empty (s->tx_fifo))
- session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
+ session_program_tx_io_evt (s->handle, SESSION_IO_EVT_TX);
if (s->rx_fifo && !svm_fifo_is_empty (s->rx_fifo))
app_worker_rx_notify (app_wrk, s);
diff --git a/src/vnet/session/transport.c b/src/vnet/session/transport.c
index e8c9490decb..ac9b54f333a 100644
--- a/src/vnet/session/transport.c
+++ b/src/vnet/session/transport.c
@@ -247,15 +247,15 @@ format_transport_state (u8 *s, va_list *args)
}
u32
-transport_endpoint_lookup (transport_endpoint_table_t * ht, u8 proto,
- ip46_address_t * ip, u16 port)
+transport_endpoint_lookup (transport_endpoint_table_t *ht, u8 proto,
+ u32 fib_index, ip46_address_t *ip, u16 port)
{
clib_bihash_kv_24_8_t kv;
int rv;
kv.key[0] = ip->as_u64[0];
kv.key[1] = ip->as_u64[1];
- kv.key[2] = (u64) port << 8 | (u64) proto;
+ kv.key[2] = (u64) fib_index << 32 | (u64) port << 8 | (u64) proto;
rv = clib_bihash_search_inline_24_8 (ht, &kv);
if (rv == 0)
@@ -272,7 +272,7 @@ transport_endpoint_table_add (transport_endpoint_table_t * ht, u8 proto,
kv.key[0] = te->ip.as_u64[0];
kv.key[1] = te->ip.as_u64[1];
- kv.key[2] = (u64) te->port << 8 | (u64) proto;
+ kv.key[2] = (u64) te->fib_index << 32 | (u64) te->port << 8 | (u64) proto;
kv.value = value;
clib_bihash_add_del_24_8 (ht, &kv, 1);
@@ -286,7 +286,7 @@ transport_endpoint_table_del (transport_endpoint_table_t * ht, u8 proto,
kv.key[0] = te->ip.as_u64[0];
kv.key[1] = te->ip.as_u64[1];
- kv.key[2] = (u64) te->port << 8 | (u64) proto;
+ kv.key[2] = (u64) te->fib_index << 32 | (u64) te->port << 8 | (u64) proto;
clib_bihash_add_del_24_8 (ht, &kv, 0);
}
@@ -547,14 +547,15 @@ transport_program_endpoint_cleanup (u32 lepi)
}
int
-transport_release_local_endpoint (u8 proto, ip46_address_t *lcl_ip, u16 port)
+transport_release_local_endpoint (u8 proto, u32 fib_index,
+ ip46_address_t *lcl_ip, u16 port)
{
transport_main_t *tm = &tp_main;
local_endpoint_t *lep;
u32 lepi;
- lepi = transport_endpoint_lookup (&tm->local_endpoints_table, proto, lcl_ip,
- port);
+ lepi = transport_endpoint_lookup (&tm->local_endpoints_table, proto,
+ fib_index, lcl_ip, port);
if (lepi == ENDPOINT_INVALID_INDEX)
return -1;
@@ -574,7 +575,8 @@ transport_release_local_endpoint (u8 proto, ip46_address_t *lcl_ip, u16 port)
}
static int
-transport_endpoint_mark_used (u8 proto, ip46_address_t *ip, u16 port)
+transport_endpoint_mark_used (u8 proto, u32 fib_index, ip46_address_t *ip,
+ u16 port)
{
transport_main_t *tm = &tp_main;
local_endpoint_t *lep;
@@ -582,8 +584,8 @@ transport_endpoint_mark_used (u8 proto, ip46_address_t *ip, u16 port)
ASSERT (vlib_get_thread_index () <= transport_cl_thread ());
- tei =
- transport_endpoint_lookup (&tm->local_endpoints_table, proto, ip, port);
+ tei = transport_endpoint_lookup (&tm->local_endpoints_table, proto,
+ fib_index, ip, port);
if (tei != ENDPOINT_INVALID_INDEX)
return SESSION_E_PORTINUSE;
@@ -601,7 +603,8 @@ transport_endpoint_mark_used (u8 proto, ip46_address_t *ip, u16 port)
}
void
-transport_share_local_endpoint (u8 proto, ip46_address_t * lcl_ip, u16 port)
+transport_share_local_endpoint (u8 proto, u32 fib_index,
+ ip46_address_t *lcl_ip, u16 port)
{
transport_main_t *tm = &tp_main;
local_endpoint_t *lep;
@@ -610,8 +613,8 @@ transport_share_local_endpoint (u8 proto, ip46_address_t * lcl_ip, u16 port)
/* Active opens should call this only from a control thread, which are also
* used to allocate and free ports. So, pool has only one writer and
* potentially many readers. Listeners are allocated with barrier */
- lepi = transport_endpoint_lookup (&tm->local_endpoints_table, proto, lcl_ip,
- port);
+ lepi = transport_endpoint_lookup (&tm->local_endpoints_table, proto,
+ fib_index, lcl_ip, port);
if (lepi != ENDPOINT_INVALID_INDEX)
{
lep = pool_elt_at_index (tm->local_endpoints, lepi);
@@ -653,7 +656,8 @@ transport_alloc_local_port (u8 proto, ip46_address_t *lcl_addr,
}
}
- if (!transport_endpoint_mark_used (proto, lcl_addr, port))
+ if (!transport_endpoint_mark_used (proto, rmt->fib_index, lcl_addr,
+ port))
break;
/* IP:port pair already in use, check if 6-tuple available */
@@ -662,7 +666,7 @@ transport_alloc_local_port (u8 proto, ip46_address_t *lcl_addr,
continue;
/* 6-tuple is available so increment lcl endpoint refcount */
- transport_share_local_endpoint (proto, lcl_addr, port);
+ transport_share_local_endpoint (proto, rmt->fib_index, lcl_addr, port);
break;
}
@@ -783,7 +787,8 @@ transport_alloc_local_endpoint (u8 proto, transport_endpoint_cfg_t * rmt_cfg,
{
*lcl_port = rmt_cfg->peer.port;
- if (!transport_endpoint_mark_used (proto, lcl_addr, rmt_cfg->peer.port))
+ if (!transport_endpoint_mark_used (proto, rmt->fib_index, lcl_addr,
+ rmt_cfg->peer.port))
return 0;
/* IP:port pair already in use, check if 6-tuple available */
@@ -793,7 +798,8 @@ transport_alloc_local_endpoint (u8 proto, transport_endpoint_cfg_t * rmt_cfg,
return SESSION_E_PORTINUSE;
/* 6-tuple is available so increment lcl endpoint refcount */
- transport_share_local_endpoint (proto, lcl_addr, rmt_cfg->peer.port);
+ transport_share_local_endpoint (proto, rmt->fib_index, lcl_addr,
+ rmt_cfg->peer.port);
return 0;
}
diff --git a/src/vnet/session/transport.h b/src/vnet/session/transport.h
index 289bf471af0..c864be139f9 100644
--- a/src/vnet/session/transport.h
+++ b/src/vnet/session/transport.h
@@ -248,10 +248,10 @@ int transport_alloc_local_port (u8 proto, ip46_address_t *ip,
transport_endpoint_cfg_t *rmt);
int transport_alloc_local_endpoint (u8 proto, transport_endpoint_cfg_t *rmt,
ip46_address_t *lcl_addr, u16 *lcl_port);
-void transport_share_local_endpoint (u8 proto, ip46_address_t * lcl_ip,
- u16 port);
-int transport_release_local_endpoint (u8 proto, ip46_address_t *lcl_ip,
- u16 port);
+void transport_share_local_endpoint (u8 proto, u32 fib_index,
+ ip46_address_t *lcl_ip, u16 port);
+int transport_release_local_endpoint (u8 proto, u32 fib_index,
+ ip46_address_t *lcl_ip, u16 port);
u16 transport_port_alloc_max_tries ();
void transport_clear_stats ();
void transport_enable_disable (vlib_main_t * vm, u8 is_en);
diff --git a/src/vnet/srv6/sr_localsid.c b/src/vnet/srv6/sr_localsid.c
index 2172fa10ef1..47082e9c96a 100644
--- a/src/vnet/srv6/sr_localsid.c
+++ b/src/vnet/srv6/sr_localsid.c
@@ -2028,6 +2028,7 @@ sr_localsid_un_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
vlib_buffer_t *b0;
ip6_header_t *ip0 = 0;
ip6_ext_header_t *prev0;
+ prev0 = 0;
ip6_sr_header_t *sr0;
u32 next0 = SR_LOCALSID_NEXT_IP6_LOOKUP;
ip6_sr_localsid_t *ls0;
diff --git a/src/vnet/tcp/tcp.c b/src/vnet/tcp/tcp.c
index aea49558882..8851fb9c77e 100644
--- a/src/vnet/tcp/tcp.c
+++ b/src/vnet/tcp/tcp.c
@@ -242,8 +242,8 @@ tcp_connection_cleanup (tcp_connection_t * tc)
/* Cleanup local endpoint if this was an active connect */
if (!(tc->cfg_flags & TCP_CFG_F_NO_ENDPOINT))
- transport_release_local_endpoint (TRANSPORT_PROTO_TCP, &tc->c_lcl_ip,
- tc->c_lcl_port);
+ transport_release_local_endpoint (TRANSPORT_PROTO_TCP, tc->c_fib_index,
+ &tc->c_lcl_ip, tc->c_lcl_port);
/* Check if connection is not yet fully established */
if (tc->state == TCP_STATE_SYN_SENT)
diff --git a/src/vnet/tls/tls.c b/src/vnet/tls/tls.c
index b9ff30ba6a6..08809f70070 100644
--- a/src/vnet/tls/tls.c
+++ b/src/vnet/tls/tls.c
@@ -66,7 +66,7 @@ int
tls_add_vpp_q_rx_evt (session_t * s)
{
if (svm_fifo_set_event (s->rx_fifo))
- session_send_io_evt_to_thread (s->rx_fifo, SESSION_IO_EVT_RX);
+ session_enqueue_notify (s);
return 0;
}
@@ -81,7 +81,7 @@ int
tls_add_vpp_q_tx_evt (session_t * s)
{
if (svm_fifo_set_event (s->tx_fifo))
- session_send_io_evt_to_thread (s->tx_fifo, SESSION_IO_EVT_TX);
+ session_program_tx_io_evt (s->handle, SESSION_IO_EVT_TX);
return 0;
}
@@ -569,7 +569,7 @@ dtls_migrate_ctx (void *arg)
}
if (svm_fifo_max_dequeue (us->tx_fifo))
- session_send_io_evt_to_thread (us->tx_fifo, SESSION_IO_EVT_TX);
+ session_program_tx_io_evt (us->handle, SESSION_IO_EVT_TX);
}
static void
diff --git a/src/vnet/udp/udp.c b/src/vnet/udp/udp.c
index 1fc055f8d50..661a0f6e0f0 100644
--- a/src/vnet/udp/udp.c
+++ b/src/vnet/udp/udp.c
@@ -99,8 +99,8 @@ udp_connection_free (udp_connection_t * uc)
static void
udp_connection_cleanup (udp_connection_t * uc)
{
- transport_release_local_endpoint (TRANSPORT_PROTO_UDP, &uc->c_lcl_ip,
- uc->c_lcl_port);
+ transport_release_local_endpoint (TRANSPORT_PROTO_UDP, uc->c_fib_index,
+ &uc->c_lcl_ip, uc->c_lcl_port);
udp_connection_unregister_port (uc->c_lcl_port, uc->c_is_ip4);
udp_connection_free (uc);
}
@@ -434,8 +434,8 @@ udp_open_connection (transport_endpoint_cfg_t * rmt)
/* If specific source port was requested abort */
if (rmt->peer.port)
{
- transport_release_local_endpoint (TRANSPORT_PROTO_UDP, &lcl_addr,
- lcl_port);
+ transport_release_local_endpoint (
+ TRANSPORT_PROTO_UDP, rmt->fib_index, &lcl_addr, lcl_port);
return SESSION_E_PORTINUSE;
}
@@ -443,8 +443,8 @@ udp_open_connection (transport_endpoint_cfg_t * rmt)
while (udp_connection_port_used_extern (clib_net_to_host_u16 (lcl_port),
rmt->is_ip4))
{
- transport_release_local_endpoint (TRANSPORT_PROTO_UDP, &lcl_addr,
- lcl_port);
+ transport_release_local_endpoint (
+ TRANSPORT_PROTO_UDP, rmt->fib_index, &lcl_addr, lcl_port);
lcl_port =
transport_alloc_local_port (TRANSPORT_PROTO_UDP, &lcl_addr, rmt);
if ((int) lcl_port < 1)