aboutsummaryrefslogtreecommitdiffstats
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/plugins/cnat/cnat_snat_policy.c5
-rw-r--r--src/plugins/hs_apps/proxy.c3
-rw-r--r--src/plugins/http/http.c12
-rw-r--r--src/plugins/http/http.h12
-rw-r--r--src/plugins/http/http_buffer.c2
-rw-r--r--src/plugins/ikev2/ikev2.c9
-rw-r--r--src/plugins/linux-cp/lcp_interface.c16
-rw-r--r--src/plugins/sflow/CMakeLists.txt18
-rw-r--r--src/plugins/sflow/sflow.c55
-rw-r--r--src/plugins/sflow/sflow.h10
-rw-r--r--src/plugins/sflow/sflow_common.h2
-rw-r--r--src/plugins/sflow/sflow_dlapi.h33
-rw-r--r--src/plugins/sflow/sflow_psample.c5
-rw-r--r--src/plugins/sflow/sflow_vapi.c226
-rw-r--r--src/plugins/sflow/sflow_vapi.h55
-rw-r--r--src/plugins/unittest/ipsec_test.c24
-rw-r--r--src/vnet/CMakeLists.txt1
-rw-r--r--src/vnet/crypto/crypto.c91
-rw-r--r--src/vnet/crypto/crypto.h9
-rw-r--r--src/vnet/ipsec/ah_decrypt.c55
-rw-r--r--src/vnet/ipsec/ah_encrypt.c106
-rw-r--r--src/vnet/ipsec/esp.h35
-rw-r--r--src/vnet/ipsec/esp_decrypt.c212
-rw-r--r--src/vnet/ipsec/esp_encrypt.c230
-rw-r--r--src/vnet/ipsec/ipsec.c6
-rw-r--r--src/vnet/ipsec/ipsec.h9
-rw-r--r--src/vnet/ipsec/ipsec_api.c179
-rw-r--r--src/vnet/ipsec/ipsec_cli.c7
-rw-r--r--src/vnet/ipsec/ipsec_format.c33
-rw-r--r--src/vnet/ipsec/ipsec_funcs.h41
-rw-r--r--src/vnet/ipsec/ipsec_sa.c335
-rw-r--r--src/vnet/ipsec/ipsec_sa.h319
-rw-r--r--src/vnet/ipsec/ipsec_tun.c13
-rw-r--r--src/vnet/ipsec/main.c20
-rw-r--r--src/vnet/l2/l2_input_node.c5
-rw-r--r--src/vnet/qos/qos_store.c2
-rw-r--r--src/vpp/conf/80-vpp.conf13
-rw-r--r--src/vppinfra/clib.h1
38 files changed, 1085 insertions, 1124 deletions
diff --git a/src/plugins/cnat/cnat_snat_policy.c b/src/plugins/cnat/cnat_snat_policy.c
index cd9bfef492a..5f15b7d26c9 100644
--- a/src/plugins/cnat/cnat_snat_policy.c
+++ b/src/plugins/cnat/cnat_snat_policy.c
@@ -22,7 +22,8 @@ cnat_snat_policy_main_t cnat_snat_policy_main;
uword
unformat_cnat_snat_interface_map_type (unformat_input_t *input, va_list *args)
{
- u8 *a = va_arg (*args, u8 *);
+ cnat_snat_interface_map_type_t *a =
+ va_arg (*args, cnat_snat_interface_map_type_t *);
if (unformat (input, "include-v4"))
*a = CNAT_SNAT_IF_MAP_INCLUDE_V4;
else if (unformat (input, "include-v6"))
@@ -113,7 +114,7 @@ cnat_snat_policy_add_del_if_command_fn (vlib_main_t *vm,
vnet_main_t *vnm = vnet_get_main ();
int is_add = 1;
u32 sw_if_index = ~0;
- u32 table = 0;
+ cnat_snat_interface_map_type_t table = CNAT_SNAT_IF_MAP_INCLUDE_V4;
int rv;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
diff --git a/src/plugins/hs_apps/proxy.c b/src/plugins/hs_apps/proxy.c
index 1bcc1e85a17..ca088f4bc8a 100644
--- a/src/plugins/hs_apps/proxy.c
+++ b/src/plugins/hs_apps/proxy.c
@@ -1137,7 +1137,8 @@ active_open_tx_callback (session_t * ao_s)
if (sc->pair.is_http)
{
/* notify HTTP transport */
- session_program_rx_io_evt (sc->pair.session_handle);
+ session_program_transport_io_evt (sc->pair.session_handle,
+ SESSION_IO_EVT_RX);
}
else
{
diff --git a/src/plugins/http/http.c b/src/plugins/http/http.c
index 69b661d0611..04a4ad3e0a9 100644
--- a/src/plugins/http/http.c
+++ b/src/plugins/http/http.c
@@ -2575,6 +2575,13 @@ http_app_tx_callback (void *session, transport_send_params_t *sp)
hc = http_conn_get_w_thread (as->connection_index, as->thread_index);
+ if (hc->state == HTTP_CONN_STATE_CLOSED)
+ {
+ HTTP_DBG (1, "conn closed");
+ svm_fifo_dequeue_drop_all (as->tx_fifo);
+ return 0;
+ }
+
max_burst_sz = sp->max_burst_size * TRANSPORT_PACER_MIN_MSS;
sp->max_burst_size = max_burst_sz;
@@ -2606,7 +2613,10 @@ http_app_tx_callback (void *session, transport_send_params_t *sp)
if (hc->state == HTTP_CONN_STATE_APP_CLOSED)
{
if (!svm_fifo_max_dequeue_cons (as->tx_fifo))
- http_disconnect_transport (hc);
+ {
+ session_transport_closed_notify (&hc->connection);
+ http_disconnect_transport (hc);
+ }
}
sent = max_burst_sz - sp->max_burst_size;
diff --git a/src/plugins/http/http.h b/src/plugins/http/http.h
index d61ac0b08c7..d1e81ab0617 100644
--- a/src/plugins/http/http.h
+++ b/src/plugins/http/http.h
@@ -72,12 +72,12 @@ typedef struct
#define http_token_lit(s) (s), sizeof (s) - 1
#define foreach_http_conn_state \
- _ (LISTEN, "listen") \
- _ (CONNECTING, "connecting") \
- _ (ESTABLISHED, "established") \
- _ (TRANSPORT_CLOSED, "transport-closed") \
- _ (APP_CLOSED, "app-closed") \
- _ (CLOSED, "closed")
+ _ (LISTEN, "LISTEN") \
+ _ (CONNECTING, "CONNECTING") \
+ _ (ESTABLISHED, "ESTABLISHED") \
+ _ (TRANSPORT_CLOSED, "TRANSPORT-CLOSED") \
+ _ (APP_CLOSED, "APP-CLOSED") \
+ _ (CLOSED, "CLOSED")
typedef enum http_conn_state_
{
diff --git a/src/plugins/http/http_buffer.c b/src/plugins/http/http_buffer.c
index bc1b8c08630..909aa538396 100644
--- a/src/plugins/http/http_buffer.c
+++ b/src/plugins/http/http_buffer.c
@@ -67,7 +67,7 @@ buf_fifo_get_segs (http_buffer_t *hb, u32 max_len, u32 *n_segs)
max_len = clib_min (bf->len - bf->offset, (u64) max_len);
- vec_validate (bf->segs, _n_segs);
+ vec_validate (bf->segs, _n_segs - 1);
len = svm_fifo_segments (bf->src, 0, bf->segs, &_n_segs, max_len);
if (len < 0)
diff --git a/src/plugins/ikev2/ikev2.c b/src/plugins/ikev2/ikev2.c
index 0e6751ce851..94de4f81b0e 100644
--- a/src/plugins/ikev2/ikev2.c
+++ b/src/plugins/ikev2/ikev2.c
@@ -5551,6 +5551,7 @@ static uword
ikev2_mngr_process_fn (vlib_main_t * vm, vlib_node_runtime_t * rt,
vlib_frame_t * f)
{
+ ipsec_main_t *im = &ipsec_main;
ikev2_main_t *km = &ikev2_main;
ikev2_profile_t *p;
ikev2_child_sa_t *c;
@@ -5631,10 +5632,10 @@ ikev2_mngr_process_fn (vlib_main_t * vm, vlib_node_runtime_t * rt,
/* process ipsec sas */
ipsec_sa_t *sa;
- pool_foreach (sa, ipsec_sa_pool)
- {
- ikev2_mngr_process_ipsec_sa (sa);
- }
+ pool_foreach (sa, im->sa_pool)
+ {
+ ikev2_mngr_process_ipsec_sa (sa);
+ }
ikev2_process_pending_sa_init (vm, km);
}
diff --git a/src/plugins/linux-cp/lcp_interface.c b/src/plugins/linux-cp/lcp_interface.c
index 61665ad4146..9a6b9b11be5 100644
--- a/src/plugins/linux-cp/lcp_interface.c
+++ b/src/plugins/linux-cp/lcp_interface.c
@@ -162,6 +162,22 @@ lcp_itf_pair_get (u32 index)
return pool_elt_at_index (lcp_itf_pair_pool, index);
}
+/* binary-direct API: for access from other plugins, bypassing VAPI.
+ * Important for parameters and return types to be simple C types, rather
+ * than structures. See src/plugins/sflow/sflow_dlapi.h for an example.
+ */
+u32
+lcp_itf_pair_get_vif_index_by_phy (u32 phy_sw_if_index)
+{
+ if (phy_sw_if_index < vec_len (lip_db_by_phy))
+ {
+ lcp_itf_pair_t *lip = lcp_itf_pair_get (lip_db_by_phy[phy_sw_if_index]);
+ if (lip)
+ return lip->lip_vif_index;
+ }
+ return INDEX_INVALID;
+}
+
index_t
lcp_itf_pair_find_by_vif (u32 vif_index)
{
diff --git a/src/plugins/sflow/CMakeLists.txt b/src/plugins/sflow/CMakeLists.txt
index 35433bd24df..c966fcc4480 100644
--- a/src/plugins/sflow/CMakeLists.txt
+++ b/src/plugins/sflow/CMakeLists.txt
@@ -12,39 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-vpp_find_path(NETLINK_INCLUDE_DIR NAMES linux/netlink.h)
-if (NOT NETLINK_INCLUDE_DIR)
- message(WARNING "netlink headers not found - sflow plugin disabled")
- return()
-endif()
-
if ("${CMAKE_SYSTEM_NAME}" STREQUAL "FreeBSD")
message(WARNING "sflow is not supported on FreeBSD - sflow plugin disabled")
return()
endif()
-LIST(FIND excluded_plugins linux-cp exc_index)
-if(${exc_index} EQUAL "-1")
- message(WARNING "sflow plugin - linux-cp plugin included: compiling VAPI calls")
- add_compile_definitions(SFLOW_USE_VAPI)
-else()
- message(WARNING "sflow plugin - linux-cp plugin excluded: not compiling VAPI calls")
-endif()
-
-include_directories(${CMAKE_SOURCE_DIR}/vpp-api ${CMAKE_CURRENT_BINARY_DIR}/../../vpp-api)
add_vpp_plugin(sflow
SOURCES
sflow.c
node.c
sflow_common.h
sflow.h
+ sflow_dlapi.h
sflow_psample.c
sflow_psample.h
sflow_psample_fields.h
sflow_usersock.c
sflow_usersock.h
- sflow_vapi.c
- sflow_vapi.h
MULTIARCH_SOURCES
node.c
diff --git a/src/plugins/sflow/sflow.c b/src/plugins/sflow/sflow.c
index 5aa65062330..02a74d2c7f5 100644
--- a/src/plugins/sflow/sflow.c
+++ b/src/plugins/sflow/sflow.c
@@ -25,6 +25,7 @@
#include <sflow/sflow.api_enum.h>
#include <sflow/sflow.api_types.h>
#include <sflow/sflow_psample.h>
+#include <sflow/sflow_dlapi.h>
#include <vpp-api/client/stat_client.h>
#include <vlib/stats/stats.h>
@@ -181,8 +182,15 @@ retry:
SFLOWUSSpec_setMsgType (&spec, SFLOW_VPP_MSG_IF_COUNTERS);
SFLOWUSSpec_setAttr (&spec, SFLOW_VPP_ATTR_PORTNAME, hw->name,
vec_len (hw->name));
- SFLOWUSSpec_setAttrInt (&spec, SFLOW_VPP_ATTR_IFINDEX, sfif->hw_if_index);
- if (sfif->linux_if_index)
+ SFLOWUSSpec_setAttrInt (&spec, SFLOW_VPP_ATTR_IFINDEX, sfif->sw_if_index);
+
+ if (smp->lcp_itf_pair_get_vif_index_by_phy)
+ {
+ sfif->linux_if_index =
+ (*smp->lcp_itf_pair_get_vif_index_by_phy) (sfif->sw_if_index);
+ }
+
+ if (sfif->linux_if_index != INDEX_INVALID)
{
// We know the corresponding Linux ifIndex for this interface, so include
// that here.
@@ -433,15 +441,6 @@ sflow_process_samples (vlib_main_t *vm, vlib_node_runtime_t *node,
continue;
}
-#ifdef SFLOW_USE_VAPI
-#ifdef SFLOW_TEST_HAMMER_VAPI
- sflow_vapi_check_for_linux_if_index_results (&smp->vac,
- smp->per_interface_data);
- sflow_vapi_read_linux_if_index_numbers (&smp->vac,
- smp->per_interface_data);
-#endif
-#endif
-
// PSAMPLE channel may need extra step (e.g. to learn family_id)
// before it is ready to send
EnumSFLOWPSState psState = SFLOWPS_state (&smp->sflow_psample);
@@ -458,23 +457,6 @@ sflow_process_samples (vlib_main_t *vm, vlib_node_runtime_t *node,
{
// second rollover
smp->now_mono_S = tnow_S;
-#ifdef SFLOW_USE_VAPI
- if (!smp->vac.vapi_unavailable)
- {
- // look up linux if_index numbers
- sflow_vapi_check_for_linux_if_index_results (
- &smp->vac, smp->per_interface_data);
- if (smp->vapi_requests == 0 ||
- (tnow_S % SFLOW_VAPI_POLL_INTERVAL) == 0)
- {
- if (sflow_vapi_read_linux_if_index_numbers (
- &smp->vac, smp->per_interface_data))
- {
- smp->vapi_requests++;
- }
- }
- }
-#endif
// send status info
send_sampling_status_info (smp);
// poll counters for interfaces that are due
@@ -539,11 +521,6 @@ sflow_sampling_start (sflow_main_t *smp)
smp->psample_seq_egress = 0;
smp->psample_send_drops = 0;
-#ifdef SFLOW_USE_VAPI
- // reset vapi request count so that we make a request the first time
- smp->vapi_requests = 0;
-#endif
-
/* open PSAMPLE netlink channel for writing packet samples */
SFLOWPS_open (&smp->sflow_psample);
/* open USERSOCK netlink channel for writing counters */
@@ -1027,6 +1004,18 @@ sflow_init (vlib_main_t *vm)
/* access to counters - TODO: should this only happen on sflow enable? */
sflow_stat_segment_client_init ();
+
+ smp->lcp_itf_pair_get_vif_index_by_phy =
+ vlib_get_plugin_symbol (SFLOW_LCP_LIB, SFLOW_LCP_SYM_GET_VIF_BY_PHY);
+ if (smp->lcp_itf_pair_get_vif_index_by_phy)
+ {
+ SFLOW_NOTICE ("linux-cp found - using LIP vif_index, where available");
+ }
+ else
+ {
+ SFLOW_NOTICE ("linux-cp not found - using VPP sw_if_index");
+ }
+
return error;
}
diff --git a/src/plugins/sflow/sflow.h b/src/plugins/sflow/sflow.h
index 609ff723816..0ec5ac90688 100644
--- a/src/plugins/sflow/sflow.h
+++ b/src/plugins/sflow/sflow.h
@@ -22,7 +22,6 @@
#include <vppinfra/hash.h>
#include <vppinfra/error.h>
#include <sflow/sflow_common.h>
-#include <sflow/sflow_vapi.h>
#include <sflow/sflow_psample.h>
#include <sflow/sflow_usersock.h>
@@ -124,6 +123,8 @@ typedef struct
sflow_fifo_t fifo;
} sflow_per_thread_data_t;
+typedef u32 (*IfIndexLookupFn) (u32);
+
typedef struct
{
/* API message ID base */
@@ -164,12 +165,7 @@ typedef struct
u32 csample_send;
u32 csample_send_drops;
u32 unixsock_seq;
-#ifdef SFLOW_USE_VAPI
- /* vapi query helper thread (transient) */
- CLIB_CACHE_LINE_ALIGN_MARK (_vapi);
- sflow_vapi_client_t vac;
- int vapi_requests;
-#endif
+ IfIndexLookupFn lcp_itf_pair_get_vif_index_by_phy;
} sflow_main_t;
extern sflow_main_t sflow_main;
diff --git a/src/plugins/sflow/sflow_common.h b/src/plugins/sflow/sflow_common.h
index 29784638bb9..26f306b5741 100644
--- a/src/plugins/sflow/sflow_common.h
+++ b/src/plugins/sflow/sflow_common.h
@@ -15,8 +15,6 @@
#ifndef __included_sflow_common_h__
#define __included_sflow_common_h__
-// #define SFLOW_USE_VAPI (set by CMakeLists.txt)
-
extern vlib_log_class_t sflow_logger;
#define SFLOW_DBG(...) vlib_log_debug (sflow_logger, __VA_ARGS__);
#define SFLOW_INFO(...) vlib_log_info (sflow_logger, __VA_ARGS__);
diff --git a/src/plugins/sflow/sflow_dlapi.h b/src/plugins/sflow/sflow_dlapi.h
new file mode 100644
index 00000000000..e983bc8f6fe
--- /dev/null
+++ b/src/plugins/sflow/sflow_dlapi.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright (c) 2025 InMon Corp.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+#ifndef __included_sflow_dlapi_h__
+#define __included_sflow_dlapi_h__
+/* Dynamic-link API
+ * If present, linux-cp plugin will be queried to learn the
+ * Linux if_index for each VPP if_index. If that plugin is not
+ * compiled and loaded, or if the function symbol is not found,
+ * then the interfaces will be reported to NETLINK_USERSOCK
+ * without this extra mapping.
+ */
+#define SFLOW_LCP_LIB "linux_cp_plugin.so"
+#define SFLOW_LCP_SYM_GET_VIF_BY_PHY "lcp_itf_pair_get_vif_index_by_phy"
+#endif /* __included_sflow_dyn_api_h__ */
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/plugins/sflow/sflow_psample.c b/src/plugins/sflow/sflow_psample.c
index 0e4fcfbe790..41df454d999 100644
--- a/src/plugins/sflow/sflow_psample.c
+++ b/src/plugins/sflow/sflow_psample.c
@@ -13,11 +13,6 @@
* limitations under the License.
*/
-#if defined(__cplusplus)
-extern "C"
-{
-#endif
-
#include <vlib/vlib.h>
#include <vnet/vnet.h>
#include <vnet/pg/pg.h>
diff --git a/src/plugins/sflow/sflow_vapi.c b/src/plugins/sflow/sflow_vapi.c
deleted file mode 100644
index cdc89a54c80..00000000000
--- a/src/plugins/sflow/sflow_vapi.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright (c) 2024 InMon Corp.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include <sflow/sflow_vapi.h>
-
-#ifdef SFLOW_USE_VAPI
-
-#include <vlibapi/api.h>
-#include <vlibmemory/api.h>
-#include <vpp/app/version.h>
-#include <stdbool.h>
-
-#include <vapi/vapi.h>
-#include <vapi/memclnt.api.vapi.h>
-#include <vapi/vlib.api.vapi.h>
-
-#ifdef included_interface_types_api_types_h
-#define defined_vapi_enum_if_status_flags
-#define defined_vapi_enum_mtu_proto
-#define defined_vapi_enum_link_duplex
-#define defined_vapi_enum_sub_if_flags
-#define defined_vapi_enum_rx_mode
-#define defined_vapi_enum_if_type
-#define defined_vapi_enum_direction
-#endif
-#include <vapi/lcp.api.vapi.h>
-
-DEFINE_VAPI_MSG_IDS_LCP_API_JSON;
-
-static vapi_error_e
-my_pair_get_cb (struct vapi_ctx_s *ctx, void *callback_ctx, vapi_error_e rv,
- bool is_last, vapi_payload_lcp_itf_pair_get_v2_reply *reply)
-{
- // this is a no-op, but it seems like it's presence is still required. For
- // example, it is called if the pair lookup does not find anything.
- return VAPI_OK;
-}
-
-static vapi_error_e
-my_pair_details_cb (struct vapi_ctx_s *ctx, void *callback_ctx,
- vapi_error_e rv, bool is_last,
- vapi_payload_lcp_itf_pair_details *details)
-{
- sflow_per_interface_data_t *sfif =
- (sflow_per_interface_data_t *) callback_ctx;
- // Setting this here will mean it is sent to hsflowd with the interface
- // counters.
- sfif->linux_if_index = details->vif_index;
- return VAPI_OK;
-}
-
-static vapi_error_e
-sflow_vapi_connect (sflow_vapi_client_t *vac)
-{
- vapi_error_e rv = VAPI_OK;
- vapi_ctx_t ctx = vac->vapi_ctx;
- if (ctx == NULL)
- {
- // first time - open and connect.
- if ((rv = vapi_ctx_alloc (&ctx)) != VAPI_OK)
- {
- SFLOW_ERR ("vap_ctx_alloc() returned %d", rv);
- }
- else
- {
- vac->vapi_ctx = ctx;
- if ((rv = vapi_connect_from_vpp (
- ctx, "api_from_sflow_plugin", SFLOW_VAPI_MAX_REQUEST_Q,
- SFLOW_VAPI_MAX_RESPONSE_Q, VAPI_MODE_BLOCKING, true)) !=
- VAPI_OK)
- {
- SFLOW_ERR ("vapi_connect_from_vpp() returned %d", rv);
- }
- else
- {
- // Connected - but is there a handler for the request we want to
- // send?
- if (!vapi_is_msg_available (ctx,
- vapi_msg_id_lcp_itf_pair_add_del_v2))
- {
- SFLOW_WARN ("vapi_is_msg_available() returned false => "
- "linux-cp plugin not loaded");
- rv = VAPI_EUSER;
- }
- }
- }
- }
- return rv;
-}
-
-// in forked thread
-static void *
-get_lcp_itf_pairs (void *magic)
-{
- sflow_vapi_client_t *vac = magic;
- vapi_error_e rv = VAPI_OK;
-
- sflow_per_interface_data_t *intfs = vac->vapi_itfs;
- vlib_set_thread_name (SFLOW_VAPI_THREAD_NAME);
- if ((rv = sflow_vapi_connect (vac)) != VAPI_OK)
- {
- vac->vapi_unavailable = true;
- }
- else
- {
- vapi_ctx_t ctx = vac->vapi_ctx;
-
- for (int ii = 1; ii < vec_len (intfs); ii++)
- {
- sflow_per_interface_data_t *sfif = vec_elt_at_index (intfs, ii);
- if (sfif && sfif->sflow_enabled)
- {
- // TODO: if we try non-blocking we might not be able to just pour
- // all the requests in here. Might be better to do them one at a
- // time - e.g. when we poll for counters.
- vapi_msg_lcp_itf_pair_get_v2 *msg =
- vapi_alloc_lcp_itf_pair_get_v2 (ctx);
- if (msg)
- {
- msg->payload.sw_if_index = sfif->sw_if_index;
- if ((rv = vapi_lcp_itf_pair_get_v2 (ctx, msg, my_pair_get_cb,
- sfif, my_pair_details_cb,
- sfif)) != VAPI_OK)
- {
- SFLOW_ERR ("vapi_lcp_itf_pair_get_v2 returned %d", rv);
- // vapi.h: "message must be freed by vapi_msg_free if not
- // consumed by vapi_send"
- vapi_msg_free (ctx, msg);
- }
- }
- }
- }
- // We no longer disconnect or free the client structures
- // vapi_disconnect_from_vpp (ctx);
- // vapi_ctx_free (ctx);
- }
- // indicate that we are done - more portable that using pthread_tryjoin_np()
- vac->vapi_request_status = (int) rv;
- clib_atomic_store_rel_n (&vac->vapi_request_active, false);
- // TODO: how to tell if heap-allocated data is stored separately per thread?
- // And if so, how to tell the allocator to GC all data for the thread when it
- // exits?
- return (void *) rv;
-}
-
-int
-sflow_vapi_read_linux_if_index_numbers (sflow_vapi_client_t *vac,
- sflow_per_interface_data_t *itfs)
-{
-
-#ifdef SFLOW_VAPI_TEST_PLUGIN_SYMBOL
- // don't even fork the query thread if the symbol is not there
- if (!vlib_get_plugin_symbol ("linux_cp_plugin.so", "lcp_itf_pair_get"))
- {
- return false;
- }
-#endif
- // previous query is done and results extracted?
- int req_active = clib_atomic_load_acq_n (&vac->vapi_request_active);
- if (req_active == false && vac->vapi_itfs == NULL)
- {
- // make a copy of the current interfaces vector for the lookup thread to
- // write into
- vac->vapi_itfs = vec_dup (itfs);
- pthread_attr_t attr;
- pthread_attr_init (&attr);
- pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_DETACHED);
- pthread_attr_setstacksize (&attr, VLIB_THREAD_STACK_SIZE);
- vac->vapi_request_active = true;
- pthread_create (&vac->vapi_thread, &attr, get_lcp_itf_pairs, vac);
- pthread_attr_destroy (&attr);
- return true;
- }
- return false;
-}
-
-int
-sflow_vapi_check_for_linux_if_index_results (sflow_vapi_client_t *vac,
- sflow_per_interface_data_t *itfs)
-{
- // request completed?
- // TODO: if we use non-blocking mode do we have to call something here to
- // receive results?
- int req_active = clib_atomic_load_acq_n (&vac->vapi_request_active);
- if (req_active == false && vac->vapi_itfs != NULL)
- {
- // yes, extract what we learned
- // TODO: would not have to do this if vector were array of pointers
- // to sflow_per_interface_data_t rather than an actual array, but
- // it does mean we have very clear separation between the threads.
- for (int ii = 1; ii < vec_len (vac->vapi_itfs); ii++)
- {
- sflow_per_interface_data_t *sfif1 =
- vec_elt_at_index (vac->vapi_itfs, ii);
- sflow_per_interface_data_t *sfif2 = vec_elt_at_index (itfs, ii);
- if (sfif1 && sfif2 && sfif1->sflow_enabled && sfif2->sflow_enabled)
- sfif2->linux_if_index = sfif1->linux_if_index;
- }
- vec_free (vac->vapi_itfs);
- vac->vapi_itfs = NULL;
- return true;
- }
- return false;
-}
-
-#endif /* SFLOW_USE_VAPI */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/plugins/sflow/sflow_vapi.h b/src/plugins/sflow/sflow_vapi.h
deleted file mode 100644
index 640fe997684..00000000000
--- a/src/plugins/sflow/sflow_vapi.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (c) 2024 InMon Corp.
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at:
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-#ifndef __included_sflow_vapi_h__
-#define __included_sflow_vapi_h__
-
-#include <vnet/vnet.h>
-#include <sflow/sflow_common.h>
-
-#ifdef SFLOW_USE_VAPI
-
-#define SFLOW_VAPI_POLL_INTERVAL 5
-#define SFLOW_VAPI_MAX_REQUEST_Q 8
-#define SFLOW_VAPI_MAX_RESPONSE_Q 16
-#define SFLOW_VAPI_THREAD_NAME "sflow_vapi" // must be <= 15 characters
-
-// #define SFLOW_VAPI_TEST_PLUGIN_SYMBOL
-
-typedef struct
-{
- volatile int vapi_request_active; // to sync main <-> vapi_thread
- pthread_t vapi_thread;
- sflow_per_interface_data_t *vapi_itfs;
- int vapi_unavailable;
- int vapi_request_status; // written by vapi_thread
- void *vapi_ctx;
-} sflow_vapi_client_t;
-
-int sflow_vapi_read_linux_if_index_numbers (sflow_vapi_client_t *vac,
- sflow_per_interface_data_t *itfs);
-int
-sflow_vapi_check_for_linux_if_index_results (sflow_vapi_client_t *vac,
- sflow_per_interface_data_t *itfs);
-
-#endif /* SFLOW_USE_VAPI */
-#endif /* __included_sflow_vapi_h__ */
-
-/*
- * fd.io coding-style-patch-verification: ON
- *
- * Local Variables:
- * eval: (c-set-style "gnu")
- * End:
- */
diff --git a/src/plugins/unittest/ipsec_test.c b/src/plugins/unittest/ipsec_test.c
index 98253eeb12a..b505c58de3f 100644
--- a/src/plugins/unittest/ipsec_test.c
+++ b/src/plugins/unittest/ipsec_test.c
@@ -40,19 +40,29 @@ test_ipsec_command_fn (vlib_main_t *vm, unformat_input_t *input,
if (~0 != sa_id)
{
ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
u32 sa_index;
sa_index = ipsec_sa_find_and_lock (sa_id);
sa = ipsec_sa_get (sa_index);
+ irt = ipsec_sa_get_inb_rt (sa);
+ ort = ipsec_sa_get_outb_rt (sa);
- sa->seq = seq_num & 0xffffffff;
- sa->seq_hi = seq_num >> 32;
+ if (ort)
+ ort->seq64 = seq_num;
- /* clear the window */
- if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
- clib_bitmap_zero (sa->replay_window_huge);
- else
- sa->replay_window = 0;
+ if (irt)
+ {
+ irt->seq = seq_num & 0xffffffff;
+ irt->seq_hi = seq_num >> 32;
+
+ /* clear the window */
+ if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
+ clib_bitmap_zero (irt->replay_window_huge);
+ else
+ irt->replay_window = 0;
+ }
ipsec_sa_unlock (sa_index);
}
diff --git a/src/vnet/CMakeLists.txt b/src/vnet/CMakeLists.txt
index b6227d45a2a..a071709542a 100644
--- a/src/vnet/CMakeLists.txt
+++ b/src/vnet/CMakeLists.txt
@@ -589,6 +589,7 @@ list(APPEND VNET_HEADERS
ipsec/ipsec_tun.h
ipsec/ipsec_types_api.h
ipsec/ipsec_punt.h
+ ipsec/ipsec_funcs.h
ipsec/esp.h
ipsec/ah.h
)
diff --git a/src/vnet/crypto/crypto.c b/src/vnet/crypto/crypto.c
index 35e7768375d..d1a6a6b12a1 100644
--- a/src/vnet/crypto/crypto.c
+++ b/src/vnet/crypto/crypto.c
@@ -381,17 +381,44 @@ vnet_crypto_register_key_handler (vlib_main_t *vm, u32 engine_index,
return;
}
+static vnet_crypto_key_t *
+vnet_crypoto_key_alloc (u32 length)
+{
+ vnet_crypto_main_t *cm = &crypto_main;
+ u8 expected = 0;
+ vnet_crypto_key_t *k, **kp;
+ u32 alloc_sz = sizeof (vnet_crypto_key_t) + round_pow2 (length, 16);
+
+ while (!__atomic_compare_exchange_n (&cm->keys_lock, &expected, 1, 0,
+ __ATOMIC_ACQUIRE, __ATOMIC_RELAXED))
+ {
+ while (__atomic_load_n (&cm->keys_lock, __ATOMIC_RELAXED))
+ CLIB_PAUSE ();
+ expected = 0;
+ }
+
+ pool_get (cm->keys, kp);
+
+ __atomic_store_n (&cm->keys_lock, 0, __ATOMIC_RELEASE);
+
+ k = clib_mem_alloc_aligned (alloc_sz, alignof (vnet_crypto_key_t));
+ kp[0] = k;
+ *k = (vnet_crypto_key_t){
+ .index = kp - cm->keys,
+ .length = length,
+ };
+
+ return k;
+}
+
u32
vnet_crypto_key_add (vlib_main_t * vm, vnet_crypto_alg_t alg, u8 * data,
u16 length)
{
- u32 index;
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *engine;
- vnet_crypto_key_t *key, **kp;
+ vnet_crypto_key_t *key;
vnet_crypto_alg_data_t *ad = cm->algs + alg;
- u32 alloc_sz = sizeof (vnet_crypto_key_t) + round_pow2 (length, 16);
- u8 need_barrier_sync = 0;
ASSERT (alg != 0);
@@ -407,29 +434,14 @@ vnet_crypto_key_add (vlib_main_t * vm, vnet_crypto_alg_t alg, u8 * data,
return ~0;
}
- need_barrier_sync = pool_get_will_expand (cm->keys);
- /* If the cm->keys will expand, stop the parade. */
- if (need_barrier_sync)
- vlib_worker_thread_barrier_sync (vm);
-
- pool_get (cm->keys, kp);
-
- if (need_barrier_sync)
- vlib_worker_thread_barrier_release (vm);
+ key = vnet_crypoto_key_alloc (length);
+ key->alg = alg;
- key = clib_mem_alloc_aligned (alloc_sz, _Alignof (vnet_crypto_key_t));
- kp[0] = key;
- index = kp - cm->keys;
- *key = (vnet_crypto_key_t){
- .index = index,
- .alg = alg,
- .length = length,
- };
clib_memcpy (key->data, data, length);
vec_foreach (engine, cm->engines)
if (engine->key_op_handler)
- engine->key_op_handler (VNET_CRYPTO_KEY_OP_ADD, index);
- return index;
+ engine->key_op_handler (VNET_CRYPTO_KEY_OP_ADD, key->index);
+ return key->index;
}
void
@@ -478,10 +490,9 @@ vnet_crypto_key_add_linked (vlib_main_t * vm,
vnet_crypto_key_index_t index_crypto,
vnet_crypto_key_index_t index_integ)
{
- u32 index, need_barrier_sync;
vnet_crypto_main_t *cm = &crypto_main;
vnet_crypto_engine_t *engine;
- vnet_crypto_key_t *key_crypto, *key_integ, *key, **kp;
+ vnet_crypto_key_t *key_crypto, *key_integ, *key;
vnet_crypto_alg_t linked_alg;
key_crypto = cm->keys[index_crypto];
@@ -491,33 +502,17 @@ vnet_crypto_key_add_linked (vlib_main_t * vm,
if (linked_alg == ~0)
return ~0;
- need_barrier_sync = pool_get_will_expand (cm->keys);
- /* If the cm->keys will expand, stop the parade. */
- if (need_barrier_sync)
- vlib_worker_thread_barrier_sync (vm);
-
- pool_get (cm->keys, kp);
-
- if (need_barrier_sync)
- vlib_worker_thread_barrier_release (vm);
-
- key = clib_mem_alloc_aligned (sizeof (vnet_crypto_key_t),
- _Alignof (vnet_crypto_key_t));
- kp[0] = key;
- index = kp - cm->keys;
- *key = (vnet_crypto_key_t){
- .index = index,
- .is_link = 1,
- .index_crypto = index_crypto,
- .index_integ = index_integ,
- .alg = linked_alg,
- };
+ key = vnet_crypoto_key_alloc (0);
+ key->is_link = 1;
+ key->index_crypto = index_crypto;
+ key->index_integ = index_integ;
+ key->alg = linked_alg;
vec_foreach (engine, cm->engines)
if (engine->key_op_handler)
- engine->key_op_handler (VNET_CRYPTO_KEY_OP_ADD, index);
+ engine->key_op_handler (VNET_CRYPTO_KEY_OP_ADD, key->index);
- return index;
+ return key->index;
}
u32
diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h
index a4b6ab97620..0a021282b5d 100644
--- a/src/vnet/crypto/crypto.h
+++ b/src/vnet/crypto/crypto.h
@@ -420,16 +420,17 @@ typedef struct
typedef struct
{
- vnet_crypto_alg_data_t algs[VNET_CRYPTO_N_ALGS];
+ vnet_crypto_key_t **keys;
+ u8 keys_lock;
+ u32 crypto_node_index;
vnet_crypto_thread_t *threads;
vnet_crypto_frame_dequeue_t **dequeue_handlers;
- vnet_crypto_op_data_t opt_data[VNET_CRYPTO_N_OP_IDS];
vnet_crypto_engine_t *engines;
- vnet_crypto_key_t **keys;
uword *engine_index_by_name;
uword *alg_index_by_name;
vnet_crypto_async_next_node_t *next_nodes;
- u32 crypto_node_index;
+ vnet_crypto_alg_data_t algs[VNET_CRYPTO_N_ALGS];
+ vnet_crypto_op_data_t opt_data[VNET_CRYPTO_N_OP_IDS];
} vnet_crypto_main_t;
extern vnet_crypto_main_t crypto_main;
diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c
index ec4db0fed57..6b62ff7f05c 100644
--- a/src/vnet/ipsec/ah_decrypt.c
+++ b/src/vnet/ipsec/ah_decrypt.c
@@ -127,7 +127,7 @@ ah_decrypt_inline (vlib_main_t * vm,
ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
from = vlib_frame_vector_args (from_frame);
n_left = from_frame->n_vectors;
- ipsec_sa_t *sa0 = 0;
+ ipsec_sa_inb_rt_t *irt = 0;
bool anti_replay_result;
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
@@ -149,25 +149,25 @@ ah_decrypt_inline (vlib_main_t * vm,
current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
- sa0 = ipsec_sa_get (current_sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (current_sa_index);
current_sa_bytes = current_sa_pkts = 0;
vlib_prefetch_combined_counter (&ipsec_sa_counters,
thread_index, current_sa_index);
}
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == irt->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&irt->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_TRUE (thread_index != sa0->thread_index))
+ if (PREDICT_TRUE (thread_index != irt->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = irt->thread_index;
next[0] = AH_DECRYPT_NEXT_HANDOFF;
goto next;
}
@@ -202,15 +202,15 @@ ah_decrypt_inline (vlib_main_t * vm,
pd->seq = clib_host_to_net_u32 (ah0->seq_no);
/* anti-replay check */
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, true);
+ irt, pd->seq, ~0, false, &pd->seq_hi, true);
}
else
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, false);
+ irt, pd->seq, ~0, false, &pd->seq_hi, false);
}
if (anti_replay_result)
{
@@ -223,13 +223,14 @@ ah_decrypt_inline (vlib_main_t * vm,
current_sa_bytes += b[0]->current_length;
current_sa_pkts += 1;
- pd->icv_size = sa0->integ_icv_size;
+ pd->icv_size = irt->integ_icv_size;
pd->nexthdr_cached = ah0->nexthdr;
- if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
+ if (PREDICT_TRUE (irt->integ_icv_size))
{
- if (PREDICT_FALSE (ipsec_sa_is_set_USE_ESN (sa0) &&
- pd->current_data + b[0]->current_length
- + sizeof (u32) > buffer_data_size))
+ if (PREDICT_FALSE (irt->use_esn && pd->current_data +
+ b[0]->current_length +
+ sizeof (u32) >
+ buffer_data_size))
{
ah_decrypt_set_next_index (
b[0], node, vm->thread_index, AH_DECRYPT_ERROR_NO_TAIL_SPACE,
@@ -239,16 +240,16 @@ ah_decrypt_inline (vlib_main_t * vm,
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->integ_op_id);
+ vnet_crypto_op_init (op, irt->integ_op_id);
op->src = (u8 *) ih4;
op->len = b[0]->current_length;
op->digest = (u8 *) ih4 - pd->icv_size;
op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
op->digest_len = pd->icv_size;
- op->key_index = sa0->integ_key_index;
+ op->key_index = irt->integ_key_index;
op->user_data = b - bufs;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (irt->use_esn)
{
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
@@ -311,15 +312,15 @@ ah_decrypt_inline (vlib_main_t * vm,
if (next[0] < AH_DECRYPT_N_NEXT)
goto trace;
- sa0 = ipsec_sa_get (pd->sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (pd->sa_index);
- if (PREDICT_TRUE (sa0->integ_alg != IPSEC_INTEG_ALG_NONE))
+ if (PREDICT_TRUE (irt->integ_icv_size))
{
/* redo the anti-reply check. see esp_decrypt for details */
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
if (ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, pd->seq_hi, true, NULL, true))
+ irt, pd->seq, pd->seq_hi, true, NULL, true))
{
ah_decrypt_set_next_index (
b[0], node, vm->thread_index, AH_DECRYPT_ERROR_REPLAY, 0,
@@ -327,12 +328,12 @@ ah_decrypt_inline (vlib_main_t * vm,
goto trace;
}
n_lost = ipsec_sa_anti_replay_advance (
- sa0, thread_index, pd->seq, pd->seq_hi, true);
+ irt, thread_index, pd->seq, pd->seq_hi, true);
}
else
{
if (ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, pd->seq_hi, true, NULL, false))
+ irt, pd->seq, pd->seq_hi, true, NULL, false))
{
ah_decrypt_set_next_index (
b[0], node, vm->thread_index, AH_DECRYPT_ERROR_REPLAY, 0,
@@ -340,7 +341,7 @@ ah_decrypt_inline (vlib_main_t * vm,
goto trace;
}
n_lost = ipsec_sa_anti_replay_advance (
- sa0, thread_index, pd->seq, pd->seq_hi, false);
+ irt, thread_index, pd->seq, pd->seq_hi, false);
}
vlib_prefetch_simple_counter (
&ipsec_sa_err_counters[IPSEC_SA_ERROR_LOST], thread_index,
@@ -354,7 +355,7 @@ ah_decrypt_inline (vlib_main_t * vm,
b[0]->flags &= ~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED |
VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (irt->is_tunnel))
{ /* tunnel mode */
if (PREDICT_TRUE (pd->nexthdr_cached == IP_PROTOCOL_IP_IN_IP))
next[0] = AH_DECRYPT_NEXT_IP4_INPUT;
@@ -424,10 +425,10 @@ ah_decrypt_inline (vlib_main_t * vm,
trace:
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- sa0 = ipsec_sa_get (vnet_buffer (b[0])->ipsec.sad_index);
+ ipsec_sa_t *sa = ipsec_sa_get (vnet_buffer (b[0])->ipsec.sad_index);
ah_decrypt_trace_t *tr =
vlib_add_trace (vm, node, b[0], sizeof (*tr));
- tr->integ_alg = sa0->integ_alg;
+ tr->integ_alg = sa->integ_alg;
tr->seq_num = pd->seq;
}
diff --git a/src/vnet/ipsec/ah_encrypt.c b/src/vnet/ipsec/ah_encrypt.c
index 86694660878..1b32b8d2c7c 100644
--- a/src/vnet/ipsec/ah_encrypt.c
+++ b/src/vnet/ipsec/ah_encrypt.c
@@ -43,8 +43,7 @@ typedef struct
{
u32 sa_index;
u32 spi;
- u32 seq_lo;
- u32 seq_hi;
+ u64 seq;
ipsec_integ_alg_t integ_alg;
} ah_encrypt_trace_t;
@@ -56,9 +55,9 @@ format_ah_encrypt_trace (u8 * s, va_list * args)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
ah_encrypt_trace_t *t = va_arg (*args, ah_encrypt_trace_t *);
- s = format (s, "ah: sa-index %d spi %u (0x%08x) seq %u:%u integrity %U",
- t->sa_index, t->spi, t->spi, t->seq_hi, t->seq_lo,
- format_ipsec_integ_alg, t->integ_alg);
+ s = format (s, "ah: sa-index %d spi %u (0x%08x) seq %lu integrity %U",
+ t->sa_index, t->spi, t->spi, t->seq, format_ipsec_integ_alg,
+ t->integ_alg);
return s;
}
@@ -128,7 +127,7 @@ ah_encrypt_inline (vlib_main_t * vm,
vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, thread_index);
- ipsec_sa_t *sa0 = 0;
+ ipsec_sa_outb_rt_t *ort = 0;
ip4_and_ah_header_t *ih0, *oh0 = 0;
ip6_and_ah_header_t *ih6_0, *oh6_0 = 0;
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
@@ -158,7 +157,7 @@ ah_encrypt_inline (vlib_main_t * vm,
current_sa_index, current_sa_pkts,
current_sa_bytes);
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
- sa0 = ipsec_sa_get (current_sa_index);
+ ort = ipsec_sa_get_outb_rt_by_index (current_sa_index);
current_sa_bytes = current_sa_pkts = 0;
vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
@@ -168,23 +167,23 @@ ah_encrypt_inline (vlib_main_t * vm,
pd->sa_index = current_sa_index;
next[0] = AH_ENCRYPT_NEXT_DROP;
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == ort->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&ort->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_TRUE (thread_index != sa0->thread_index))
+ if (PREDICT_TRUE (thread_index != ort->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = ort->thread_index;
next[0] = AH_ENCRYPT_NEXT_HANDOFF;
goto next;
}
- if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ if (PREDICT_FALSE (esp_seq_advance (ort)))
{
ah_encrypt_set_next_index (b[0], node, vm->thread_index,
AH_ENCRYPT_ERROR_SEQ_CYCLED, 0, next,
@@ -199,7 +198,7 @@ ah_encrypt_inline (vlib_main_t * vm,
ssize_t adv;
ih0 = vlib_buffer_get_current (b[0]);
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (ort->is_tunnel))
{
if (is_ip6)
adv = -sizeof (ip6_and_ah_header_t);
@@ -211,11 +210,11 @@ ah_encrypt_inline (vlib_main_t * vm,
adv = -sizeof (ah_header_t);
}
- icv_size = sa0->integ_icv_size;
+ icv_size = ort->integ_icv_size;
const u8 padding_len = ah_calc_icv_padding_len (icv_size, is_ip6);
adv -= padding_len;
/* transport mode save the eth header before it is overwritten */
- if (PREDICT_FALSE (!ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_FALSE (!ort->is_tunnel))
{
const u32 l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
u8 *l2_hdr_in = (u8 *) vlib_buffer_get_current (b[0]) - l2_len;
@@ -238,16 +237,16 @@ ah_encrypt_inline (vlib_main_t * vm,
oh6_0->ip6.ip_version_traffic_class_and_flow_label =
ih6_0->ip6.ip_version_traffic_class_and_flow_label;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_FALSE (ort->is_tunnel))
{
- ip6_set_dscp_network_order (&oh6_0->ip6, sa0->tunnel.t_dscp);
- tunnel_encap_fixup_6o6 (sa0->tunnel_flags, &ih6_0->ip6,
+ ip6_set_dscp_network_order (&oh6_0->ip6, ort->t_dscp);
+ tunnel_encap_fixup_6o6 (ort->tunnel_flags, &ih6_0->ip6,
&oh6_0->ip6);
}
pd->ip_version_traffic_class_and_flow_label =
oh6_0->ip6.ip_version_traffic_class_and_flow_label;
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (ort->is_tunnel))
{
next_hdr_type = IP_PROTOCOL_IPV6;
}
@@ -260,8 +259,8 @@ ah_encrypt_inline (vlib_main_t * vm,
clib_memcpy_fast (&oh6_0->ip6, &ip6_hdr_template, 8);
oh6_0->ah.reserved = 0;
oh6_0->ah.nexthdr = next_hdr_type;
- oh6_0->ah.spi = clib_net_to_host_u32 (sa0->spi);
- oh6_0->ah.seq_no = clib_net_to_host_u32 (sa0->seq);
+ oh6_0->ah.spi = ort->spi_be;
+ oh6_0->ah.seq_no = clib_net_to_host_u32 (ort->seq64);
oh6_0->ip6.payload_length =
clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]) -
sizeof (ip6_header_t));
@@ -274,18 +273,18 @@ ah_encrypt_inline (vlib_main_t * vm,
oh0 = vlib_buffer_get_current (b[0]);
pd->ttl = ih0->ip4.ttl;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_FALSE (ort->is_tunnel))
{
- if (sa0->tunnel.t_dscp)
- pd->tos = sa0->tunnel.t_dscp << 2;
+ if (ort->t_dscp)
+ pd->tos = ort->t_dscp << 2;
else
{
pd->tos = ih0->ip4.tos;
- if (!(sa0->tunnel_flags &
+ if (!(ort->tunnel_flags &
TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_DSCP))
pd->tos &= 0x3;
- if (!(sa0->tunnel_flags &
+ if (!(ort->tunnel_flags &
TUNNEL_ENCAP_DECAP_FLAG_ENCAP_COPY_ECN))
pd->tos &= 0xfc;
}
@@ -298,7 +297,7 @@ ah_encrypt_inline (vlib_main_t * vm,
pd->current_data = b[0]->current_data;
clib_memset (oh0, 0, sizeof (ip4_and_ah_header_t));
- if (PREDICT_TRUE (ipsec_sa_is_set_IS_TUNNEL (sa0)))
+ if (PREDICT_TRUE (ort->is_tunnel))
{
next_hdr_type = IP_PROTOCOL_IP_IN_IP;
}
@@ -314,57 +313,51 @@ ah_encrypt_inline (vlib_main_t * vm,
oh0->ip4.length =
clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, b[0]));
- oh0->ah.spi = clib_net_to_host_u32 (sa0->spi);
- oh0->ah.seq_no = clib_net_to_host_u32 (sa0->seq);
+ oh0->ah.spi = ort->spi_be;
+ oh0->ah.seq_no = clib_net_to_host_u32 (ort->seq64);
oh0->ah.nexthdr = next_hdr_type;
oh0->ah.hdrlen =
(sizeof (ah_header_t) + icv_size + padding_len) / 4 - 2;
}
- if (PREDICT_TRUE (!is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
- !ipsec_sa_is_set_IS_TUNNEL_V6 (sa0)))
+ if (PREDICT_TRUE (!is_ip6 && ort->is_tunnel && !ort->is_tunnel_v6))
{
- clib_memcpy_fast (&oh0->ip4.address_pair,
- &sa0->ip4_hdr.address_pair,
+ clib_memcpy_fast (&oh0->ip4.address_pair, &ort->ip4_hdr.address_pair,
sizeof (ip4_address_pair_t));
- next[0] = sa0->dpo.dpoi_next_node;
- vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
+ next[0] = ort->dpo.dpoi_next_node;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = ort->dpo.dpoi_index;
}
- else if (is_ip6 && ipsec_sa_is_set_IS_TUNNEL (sa0) &&
- ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
+ else if (is_ip6 && ort->is_tunnel && ort->is_tunnel_v6)
{
- clib_memcpy_fast (&oh6_0->ip6.src_address,
- &sa0->ip6_hdr.src_address,
+ clib_memcpy_fast (&oh6_0->ip6.src_address, &ort->ip6_hdr.src_address,
sizeof (ip6_address_t) * 2);
- next[0] = sa0->dpo.dpoi_next_node;
- vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = sa0->dpo.dpoi_index;
+ next[0] = ort->dpo.dpoi_next_node;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = ort->dpo.dpoi_index;
}
- if (PREDICT_TRUE (sa0->integ_op_id))
+ if (PREDICT_TRUE (ort->integ_op_id))
{
vnet_crypto_op_t *op;
vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->integ_op_id);
+ vnet_crypto_op_init (op, ort->integ_op_id);
op->src = vlib_buffer_get_current (b[0]);
op->len = b[0]->current_length;
op->digest = vlib_buffer_get_current (b[0]) + ip_hdr_size +
sizeof (ah_header_t);
clib_memset (op->digest, 0, icv_size);
op->digest_len = icv_size;
- op->key_index = sa0->integ_key_index;
+ op->key_index = ort->integ_key_index;
op->user_data = b - bufs;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (ort->use_esn)
{
- u32 seq_hi = clib_host_to_net_u32 (sa0->seq_hi);
-
- op->len += sizeof (seq_hi);
- clib_memcpy (op->src + b[0]->current_length, &seq_hi,
- sizeof (seq_hi));
+ *(u32u *) (op->src + b[0]->current_length) =
+ clib_host_to_net_u32 (ort->seq64 >> 32);
+ op->len += sizeof (u32);
}
}
- if (!ipsec_sa_is_set_IS_TUNNEL (sa0))
+ if (!ort->is_tunnel)
{
next[0] = AH_ENCRYPT_NEXT_INTERFACE_OUTPUT;
vlib_buffer_advance (b[0], -sizeof (ethernet_header_t));
@@ -373,13 +366,14 @@ ah_encrypt_inline (vlib_main_t * vm,
next:
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- sa0 = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_t *sa = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_outb_rt_t *ort =
+ ipsec_sa_get_outb_rt_by_index (pd->sa_index);
ah_encrypt_trace_t *tr =
vlib_add_trace (vm, node, b[0], sizeof (*tr));
- tr->spi = sa0->spi;
- tr->seq_lo = sa0->seq;
- tr->seq_hi = sa0->seq_hi;
- tr->integ_alg = sa0->integ_alg;
+ tr->spi = sa->spi;
+ tr->seq = ort->seq64;
+ tr->integ_alg = sa->integ_alg;
tr->sa_index = pd->sa_index;
}
diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h
index 1c3ce776ad2..a31e3145429 100644
--- a/src/vnet/ipsec/esp.h
+++ b/src/vnet/ipsec/esp.h
@@ -79,46 +79,28 @@ typedef struct esp_aead_t_
u32 data[3];
} __clib_packed esp_aead_t;
-#define ESP_SEQ_MAX (4294967295UL)
-
u8 *format_esp_header (u8 * s, va_list * args);
/* TODO seq increment should be atomic to be accessed by multiple workers */
always_inline int
-esp_seq_advance (ipsec_sa_t * sa)
+esp_seq_advance (ipsec_sa_outb_rt_t *ort)
{
- if (PREDICT_TRUE (ipsec_sa_is_set_USE_ESN (sa)))
- {
- if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
- {
- if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
- sa->seq_hi == ESP_SEQ_MAX))
- return 1;
- sa->seq_hi++;
- }
- sa->seq++;
- }
- else
- {
- if (PREDICT_FALSE (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) &&
- sa->seq == ESP_SEQ_MAX))
- return 1;
- sa->seq++;
- }
-
+ u64 max = ort->use_esn ? CLIB_U64_MAX : CLIB_U32_MAX;
+ if (ort->seq64 == max)
+ return 1;
+ ort->seq64++;
return 0;
}
always_inline u16
-esp_aad_fill (u8 *data, const esp_header_t *esp, const ipsec_sa_t *sa,
- u32 seq_hi)
+esp_aad_fill (u8 *data, const esp_header_t *esp, int use_esn, u32 seq_hi)
{
esp_aead_t *aad;
aad = (esp_aead_t *) data;
aad->data[0] = esp->spi;
- if (ipsec_sa_is_set_USE_ESN (sa))
+ if (use_esn)
{
/* SPI, seq-hi, seq-low */
aad->data[1] = (u32) clib_host_to_net_u32 (seq_hi);
@@ -218,7 +200,8 @@ typedef struct
{
u8 icv_sz;
u8 iv_sz;
- ipsec_sa_flags_t flags;
+ u8 udp_sz;
+ u8 is_transport;
u32 sa_index;
};
u64 sa_data;
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index 6384bb927a8..345a60a7fdd 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -251,11 +251,12 @@ esp_move_icv (vlib_main_t * vm, vlib_buffer_t * first,
}
static_always_inline u16
-esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd,
- esp_decrypt_packet_data2_t *pd2, u32 *data_len, u8 **digest,
- u16 *len, vlib_buffer_t *b, u8 *payload)
+esp_insert_esn (vlib_main_t *vm, ipsec_sa_inb_rt_t *irt,
+ esp_decrypt_packet_data_t *pd, esp_decrypt_packet_data2_t *pd2,
+ u32 *data_len, u8 **digest, u16 *len, vlib_buffer_t *b,
+ u8 *payload)
{
- if (!ipsec_sa_is_set_USE_ESN (sa))
+ if (!irt->use_esn)
return 0;
/* shift ICV by 4 bytes to insert ESN */
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
@@ -288,17 +289,17 @@ esp_insert_esn (vlib_main_t *vm, ipsec_sa_t *sa, esp_decrypt_packet_data_t *pd,
}
static_always_inline u8 *
-esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
- esp_decrypt_packet_data_t * pd,
- esp_decrypt_packet_data2_t * pd2, u16 icv_sz,
- ipsec_sa_t * sa, u8 * extra_esn, u32 * len)
+esp_move_icv_esn (vlib_main_t *vm, vlib_buffer_t *first,
+ esp_decrypt_packet_data_t *pd,
+ esp_decrypt_packet_data2_t *pd2, u16 icv_sz,
+ ipsec_sa_inb_rt_t *irt, u8 *extra_esn, u32 *len)
{
u16 dif = 0;
u8 *digest = esp_move_icv (vm, first, pd, pd2, icv_sz, &dif);
if (dif)
*len -= dif;
- if (ipsec_sa_is_set_USE_ESN (sa))
+ if (irt->use_esn)
{
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
u16 space_left = vlib_buffer_space_left_at_end (vm, pd2->lb);
@@ -326,9 +327,9 @@ esp_move_icv_esn (vlib_main_t * vm, vlib_buffer_t * first,
static_always_inline int
esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
const esp_decrypt_packet_data_t *pd,
- esp_decrypt_packet_data2_t *pd2, ipsec_sa_t *sa0,
- vlib_buffer_t *b, u8 icv_sz, u8 *start_src,
- u32 start_len, u8 **digest, u16 *n_ch,
+ esp_decrypt_packet_data2_t *pd2,
+ ipsec_sa_inb_rt_t *irt, vlib_buffer_t *b, u8 icv_sz,
+ u8 *start_src, u32 start_len, u8 **digest, u16 *n_ch,
u32 *integ_total_len)
{
vnet_crypto_op_chunk_t *ch;
@@ -350,7 +351,7 @@ esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
ch->len = cb->current_length;
else
ch->len = cb->current_length - icv_sz;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (irt->use_esn)
{
u32 seq_hi = clib_host_to_net_u32 (pd->seq_hi);
u8 tmp[ESP_MAX_ICV_SIZE];
@@ -422,11 +423,11 @@ esp_decrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
}
static_always_inline u32
-esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- esp_decrypt_packet_data_t * pd,
- esp_decrypt_packet_data2_t * pd2,
- ipsec_sa_t * sa0, vlib_buffer_t * b, u8 icv_sz,
- u8 * start, u32 start_len, u8 ** tag, u16 * n_ch)
+esp_decrypt_chain_crypto (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ esp_decrypt_packet_data_t *pd,
+ esp_decrypt_packet_data2_t *pd2,
+ ipsec_sa_inb_rt_t *irt, vlib_buffer_t *b, u8 icv_sz,
+ u8 *start, u32 start_len, u8 **tag, u16 *n_ch)
{
vnet_crypto_op_chunk_t *ch;
vlib_buffer_t *cb = b;
@@ -445,7 +446,7 @@ esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
ch->src = ch->dst = vlib_buffer_get_current (cb);
if (pd2->lb == cb)
{
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (irt->is_aead)
{
if (pd2->lb->current_length < icv_sz)
{
@@ -496,8 +497,9 @@ esp_decrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
static_always_inline esp_decrypt_error_t
esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
- ipsec_sa_t *sa0, u8 *payload, u16 len, u8 icv_sz,
- u8 iv_sz, esp_decrypt_packet_data_t *pd,
+ ipsec_sa_inb_rt_t *irt, u8 *payload, u16 len,
+ u8 icv_sz, u8 iv_sz,
+ esp_decrypt_packet_data_t *pd,
esp_decrypt_packet_data2_t *pd2, vlib_buffer_t *b,
u32 index)
{
@@ -506,10 +508,10 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_op_t _op, *op = &_op;
const u8 esp_sz = sizeof (esp_header_t);
- if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
+ if (PREDICT_TRUE (irt->integ_op_id != VNET_CRYPTO_OP_NONE))
{
- vnet_crypto_op_init (op, sa0->integ_op_id);
- op->key_index = sa0->integ_key_index;
+ vnet_crypto_op_init (op, irt->integ_op_id);
+ op->key_index = irt->integ_key_index;
op->src = payload;
op->flags = VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
op->user_data = index;
@@ -531,9 +533,8 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
if (pd2->lb->current_length < icv_sz)
{
u8 extra_esn = 0;
- op->digest =
- esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
- &extra_esn, &op->len);
+ op->digest = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, irt,
+ &extra_esn, &op->len);
if (extra_esn)
{
@@ -558,7 +559,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
- if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
+ if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, irt, b, icv_sz,
payload, pd->current_length,
&op->digest, &op->n_chunks, 0) < 0)
return ESP_DECRYPT_ERROR_NO_BUFFERS;
@@ -566,7 +567,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
else
{
integ_ops = &ptd->integ_ops;
- esp_insert_esn (vm, sa0, pd, pd2, &op->len, &op->digest, &len, b,
+ esp_insert_esn (vm, irt, pd, pd2, &op->len, &op->digest, &len, b,
payload);
}
out:
@@ -576,27 +577,28 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
payload += esp_sz;
len -= esp_sz;
- if (sa0->crypto_dec_op_id != VNET_CRYPTO_OP_NONE)
+ if (irt->cipher_op_id != VNET_CRYPTO_OP_NONE)
{
- vnet_crypto_op_init (op, sa0->crypto_dec_op_id);
- op->key_index = sa0->crypto_key_index;
+ vnet_crypto_op_init (op, irt->cipher_op_id);
+ op->key_index = irt->cipher_key_index;
op->iv = payload;
- if (ipsec_sa_is_set_IS_CTR (sa0))
+ if (irt->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz -
sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (irt->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
op->aad = (u8 *) nonce - sizeof (esp_aead_t);
- op->aad_len = esp_aad_fill (op->aad, esp0, sa0, pd->seq_hi);
+ op->aad_len =
+ esp_aad_fill (op->aad, esp0, irt->use_esn, pd->seq_hi);
op->tag = payload + len;
op->tag_len = 16;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ if (PREDICT_FALSE (irt->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
payload -= iv_sz;
@@ -607,7 +609,7 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
{
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa0->salt;
+ nonce->salt = irt->salt;
ASSERT (sizeof (u64) == iv_sz);
nonce->iv = *(u64 *) op->iv;
op->iv = (u8 *) nonce;
@@ -621,9 +623,9 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
/* buffer is chained */
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
- esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
- payload, len - pd->iv_sz + pd->icv_sz,
- &op->tag, &op->n_chunks);
+ esp_decrypt_chain_crypto (vm, ptd, pd, pd2, irt, b, icv_sz, payload,
+ len - pd->iv_sz + pd->icv_sz, &op->tag,
+ &op->n_chunks);
crypto_ops = &ptd->chained_crypto_ops;
}
else
@@ -639,8 +641,9 @@ esp_decrypt_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
static_always_inline esp_decrypt_error_t
esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
- vnet_crypto_async_frame_t *f, ipsec_sa_t *sa0,
- u8 *payload, u16 len, u8 icv_sz, u8 iv_sz,
+ vnet_crypto_async_frame_t *f,
+ ipsec_sa_inb_rt_t *irt, u8 *payload, u16 len,
+ u8 icv_sz, u8 iv_sz,
esp_decrypt_packet_data_t *pd,
esp_decrypt_packet_data2_t *pd2, u32 bi,
vlib_buffer_t *b, u16 async_next)
@@ -649,17 +652,17 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
esp_decrypt_packet_data_t *async_pd = &(esp_post_data (b))->decrypt_data;
esp_decrypt_packet_data2_t *async_pd2 = esp_post_data2 (b);
u8 *tag = payload + len, *iv = payload + esp_sz, *aad = 0;
- const u32 key_index = sa0->crypto_key_index;
+ const u32 key_index = irt->cipher_key_index;
u32 crypto_len, integ_len = 0;
i16 crypto_start_offset, integ_start_offset = 0;
u8 flags = 0;
- if (!ipsec_sa_is_set_IS_AEAD (sa0))
+ if (!irt->is_aead)
{
/* linked algs */
integ_start_offset = payload - b->data;
integ_len = len;
- if (PREDICT_TRUE (sa0->integ_op_id != VNET_CRYPTO_OP_NONE))
+ if (PREDICT_TRUE (irt->integ_op_id != VNET_CRYPTO_OP_NONE))
flags |= VNET_CRYPTO_OP_FLAG_HMAC_CHECK;
if (pd->is_chain)
@@ -674,8 +677,8 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
if (pd2->lb->current_length < icv_sz)
{
u8 extra_esn = 0;
- tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, sa0,
- &extra_esn, &integ_len);
+ tag = esp_move_icv_esn (vm, b, pd, pd2, icv_sz, irt, &extra_esn,
+ &integ_len);
if (extra_esn)
{
@@ -698,7 +701,7 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
tag = vlib_buffer_get_tail (pd2->lb) - icv_sz;
flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
- if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, sa0, b, icv_sz,
+ if (esp_decrypt_chain_integ (vm, ptd, pd, pd2, irt, b, icv_sz,
payload, pd->current_length, &tag, 0,
&integ_len) < 0)
{
@@ -707,7 +710,7 @@ esp_decrypt_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
}
}
else
- esp_insert_esn (vm, sa0, pd, pd2, &integ_len, &tag, &len, b, payload);
+ esp_insert_esn (vm, irt, pd, pd2, &integ_len, &tag, &len, b, payload);
}
out:
@@ -716,19 +719,19 @@ out:
len -= esp_sz;
iv = payload;
- if (ipsec_sa_is_set_IS_CTR (sa0))
+ if (irt->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (payload - esp_sz - pd->hdr_sz - sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (irt->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
esp_header_t *esp0 = (esp_header_t *) (payload - esp_sz);
aad = (u8 *) nonce - sizeof (esp_aead_t);
- esp_aad_fill (aad, esp0, sa0, pd->seq_hi);
+ esp_aad_fill (aad, esp0, irt->use_esn, pd->seq_hi);
tag = payload + len;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ if (PREDICT_FALSE (irt->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
payload -= iv_sz;
@@ -739,7 +742,7 @@ out:
{
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa0->salt;
+ nonce->salt = irt->salt;
ASSERT (sizeof (u64) == iv_sz);
nonce->iv = *(u64 *) iv;
iv = (u8 *) nonce;
@@ -753,10 +756,9 @@ out:
/* buffer is chained */
flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
- crypto_len = esp_decrypt_chain_crypto (vm, ptd, pd, pd2, sa0, b, icv_sz,
- payload,
- len - pd->iv_sz + pd->icv_sz,
- &tag, 0);
+ crypto_len =
+ esp_decrypt_chain_crypto (vm, ptd, pd, pd2, irt, b, icv_sz, payload,
+ len - pd->iv_sz + pd->icv_sz, &tag, 0);
}
*async_pd = *pd;
@@ -779,10 +781,9 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
vlib_buffer_t *b, u16 *next, int is_ip6, int is_tun,
int is_async)
{
- ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt_by_index (pd->sa_index);
vlib_buffer_t *lb = b;
const u8 esp_sz = sizeof (esp_header_t);
- const u8 tun_flags = IPSEC_SA_FLAG_IS_TUNNEL | IPSEC_SA_FLAG_IS_TUNNEL_V6;
u8 pad_length = 0, next_header = 0;
u16 icv_sz;
u64 n_lost;
@@ -809,9 +810,9 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
* a sequence s, s+1, s+2, s+3, ... s+n and nothing will prevent any
* implementation, sequential or batching, from decrypting these.
*/
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
- if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ if (ipsec_sa_anti_replay_and_sn_advance (irt, pd->seq, pd->seq_hi, true,
NULL, true))
{
esp_decrypt_set_next_index (b, node, vm->thread_index,
@@ -819,12 +820,12 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
- n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ n_lost = ipsec_sa_anti_replay_advance (irt, vm->thread_index, pd->seq,
pd->seq_hi, true);
}
else
{
- if (ipsec_sa_anti_replay_and_sn_advance (sa0, pd->seq, pd->seq_hi, true,
+ if (ipsec_sa_anti_replay_and_sn_advance (irt, pd->seq, pd->seq_hi, true,
NULL, false))
{
esp_decrypt_set_next_index (b, node, vm->thread_index,
@@ -832,7 +833,7 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
ESP_DECRYPT_NEXT_DROP, pd->sa_index);
return;
}
- n_lost = ipsec_sa_anti_replay_advance (sa0, vm->thread_index, pd->seq,
+ n_lost = ipsec_sa_anti_replay_advance (irt, vm->thread_index, pd->seq,
pd->seq_hi, false);
}
@@ -899,10 +900,9 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
b->flags &=
~(VNET_BUFFER_F_L4_CHECKSUM_COMPUTED | VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
- if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
+ if (pd->is_transport && !is_tun) /* transport mode */
{
- u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
- sizeof (udp_header_t) : 0;
+ u8 udp_sz = is_ip6 ? 0 : pd->udp_sz;
u16 ip_hdr_sz = pd->hdr_sz - udp_sz;
u8 *old_ip = b->data + pd->current_data - ip_hdr_sz - udp_sz;
u8 *ip = old_ip + adv + udp_sz;
@@ -1012,7 +1012,7 @@ esp_decrypt_post_crypto (vlib_main_t *vm, vlib_node_runtime_t *node,
if (is_tun)
{
- if (ipsec_sa_is_set_IS_PROTECT (sa0))
+ if (irt->is_protect)
{
/*
* There are two encap possibilities
@@ -1101,21 +1101,18 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp_decrypt_packet_data_t cpd = { };
u32 current_sa_index = ~0, current_sa_bytes = 0, current_sa_pkts = 0;
const u8 esp_sz = sizeof (esp_header_t);
- ipsec_sa_t *sa0 = 0;
+ ipsec_sa_inb_rt_t *irt = 0;
bool anti_replay_result;
- int is_async = im->async_mode;
+ int is_async = 0;
vnet_crypto_op_id_t async_op = ~0;
vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_N_OP_IDS];
esp_decrypt_error_t err;
vlib_get_buffers (vm, from, b, n_left);
- if (!is_async)
- {
- vec_reset_length (ptd->crypto_ops);
- vec_reset_length (ptd->integ_ops);
- vec_reset_length (ptd->chained_crypto_ops);
- vec_reset_length (ptd->chained_integ_ops);
- }
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+ vec_reset_length (ptd->chained_crypto_ops);
+ vec_reset_length (ptd->chained_integ_ops);
vec_reset_length (ptd->async_frames);
vec_reset_length (ptd->chunks);
clib_memset (sync_nexts, -1, sizeof (sync_nexts));
@@ -1157,29 +1154,28 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
current_sa_index = vnet_buffer (b[0])->ipsec.sad_index;
vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
current_sa_index);
- sa0 = ipsec_sa_get (current_sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (current_sa_index);
- /* fetch the second cacheline ASAP */
- clib_prefetch_load (sa0->cacheline1);
- cpd.icv_sz = sa0->integ_icv_size;
- cpd.iv_sz = sa0->crypto_iv_size;
- cpd.flags = sa0->flags;
+ cpd.icv_sz = irt->integ_icv_size;
+ cpd.iv_sz = irt->cipher_iv_size;
+ cpd.udp_sz = irt->udp_sz;
+ cpd.is_transport = irt->is_transport;
cpd.sa_index = current_sa_index;
- is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
+ is_async = irt->is_async;
}
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == irt->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&irt->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_FALSE (thread_index != sa0->thread_index))
+ if (PREDICT_FALSE (thread_index != irt->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = irt->thread_index;
err = ESP_DECRYPT_ERROR_HANDOFF;
esp_decrypt_set_next_index (b[0], node, thread_index, err, n_noop,
noop_nexts, ESP_DECRYPT_NEXT_HANDOFF,
@@ -1209,15 +1205,15 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
pd->current_length = b[0]->current_length;
/* anti-reply check */
- if (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa0)))
+ if (PREDICT_FALSE (irt->anti_reply_huge))
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, true);
+ irt, pd->seq, ~0, false, &pd->seq_hi, true);
}
else
{
anti_replay_result = ipsec_sa_anti_replay_and_sn_advance (
- sa0, pd->seq, ~0, false, &pd->seq_hi, false);
+ irt, pd->seq, ~0, false, &pd->seq_hi, false);
}
if (anti_replay_result)
@@ -1244,7 +1240,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (is_async)
{
- async_op = sa0->crypto_async_dec_op_id;
+ async_op = irt->async_op_id;
/* get a frame for this op if we don't yet have one or it's full
*/
@@ -1267,7 +1263,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
}
err = esp_decrypt_prepare_async_frame (
- vm, ptd, async_frames[async_op], sa0, payload, len, cpd.icv_sz,
+ vm, ptd, async_frames[async_op], irt, payload, len, cpd.icv_sz,
cpd.iv_sz, pd, pd2, from[b - bufs], b[0], async_next_node);
if (ESP_DECRYPT_ERROR_RX_PKTS != err)
{
@@ -1278,7 +1274,7 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
}
else
{
- err = esp_decrypt_prepare_sync_op (vm, ptd, sa0, payload, len,
+ err = esp_decrypt_prepare_sync_op (vm, ptd, irt, payload, len,
cpd.icv_sz, cpd.iv_sz, pd, pd2,
b[0], n_sync);
if (err != ESP_DECRYPT_ERROR_RX_PKTS)
@@ -1391,12 +1387,14 @@ esp_decrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
{
esp_decrypt_trace_t *tr;
tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
- sa0 = ipsec_sa_get (current_sa_index);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ ipsec_sa_t *sa = ipsec_sa_get (current_sa_index);
+ ipsec_sa_inb_rt_t *irt =
+ ipsec_sa_get_inb_rt_by_index (current_sa_index);
+ tr->crypto_alg = sa->crypto_alg;
+ tr->integ_alg = sa->integ_alg;
tr->seq = pd->seq;
- tr->sa_seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
+ tr->sa_seq = irt->seq;
+ tr->sa_seq_hi = irt->seq_hi;
tr->pkt_seq_hi = pd->seq_hi;
}
@@ -1456,18 +1454,20 @@ esp_decrypt_post_inline (vlib_main_t * vm,
/*trace: */
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- ipsec_sa_t *sa0 = ipsec_sa_get (pd->sa_index);
+ ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
esp_decrypt_trace_t *tr;
esp_decrypt_packet_data_t *async_pd =
&(esp_post_data (b[0]))->decrypt_data;
tr = vlib_add_trace (vm, node, b[0], sizeof (*tr));
- sa0 = ipsec_sa_get (async_pd->sa_index);
+ sa = ipsec_sa_get (async_pd->sa_index);
+ irt = ipsec_sa_get_inb_rt_by_index (async_pd->sa_index);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ tr->crypto_alg = sa->crypto_alg;
+ tr->integ_alg = sa->integ_alg;
tr->seq = pd->seq;
- tr->sa_seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
+ tr->sa_seq = irt->seq;
+ tr->sa_seq_hi = irt->seq_hi;
}
n_left--;
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index 4338cb01e5d..8916eb135f8 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -49,8 +49,7 @@ typedef struct
{
u32 sa_index;
u32 spi;
- u32 seq;
- u32 sa_seq_hi;
+ u64 seq;
u8 udp_encap;
ipsec_crypto_alg_t crypto_alg;
ipsec_integ_alg_t integ_alg;
@@ -71,13 +70,11 @@ format_esp_encrypt_trace (u8 * s, va_list * args)
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
- s =
- format (s,
- "esp: sa-index %d spi %u (0x%08x) seq %u sa-seq-hi %u crypto %U integrity %U%s",
- t->sa_index, t->spi, t->spi, t->seq, t->sa_seq_hi,
- format_ipsec_crypto_alg,
- t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
- t->udp_encap ? " udp-encap-enabled" : "");
+ s = format (
+ s, "esp: sa-index %d spi %u (0x%08x) seq %lu crypto %U integrity %U%s",
+ t->sa_index, t->spi, t->spi, t->seq, format_ipsec_crypto_alg,
+ t->crypto_alg, format_ipsec_integ_alg, t->integ_alg,
+ t->udp_encap ? " udp-encap-enabled" : "");
return s;
}
@@ -162,9 +159,9 @@ esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
}
static_always_inline void
-esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
+esp_fill_udp_hdr (ipsec_sa_outb_rt_t *ort, udp_header_t *udp, u16 len)
{
- clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
+ clib_memcpy_fast (udp, &ort->udp_hdr, sizeof (udp_header_t));
udp->length = clib_net_to_host_u16 (len);
}
@@ -223,12 +220,12 @@ esp_get_ip6_hdr_len (ip6_header_t * ip6, ip6_ext_header_t ** ext_hdr)
* message. You can refer to NIST SP800-38a and NIST SP800-38d for more
* details. */
static_always_inline void *
-esp_generate_iv (ipsec_sa_t *sa, void *payload, int iv_sz)
+esp_generate_iv (ipsec_sa_outb_rt_t *ort, void *payload, int iv_sz)
{
ASSERT (iv_sz >= sizeof (u64));
u64 *iv = (u64 *) (payload - iv_sz);
clib_memset_u8 (iv, 0, iv_sz);
- *iv = clib_pcg64i_random_r (&sa->iv_prng);
+ *iv = clib_pcg64i_random_r (&ort->iv_prng);
return iv;
}
@@ -294,10 +291,9 @@ esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
}
static_always_inline u32
-esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- ipsec_sa_t * sa0, vlib_buffer_t * b,
- vlib_buffer_t * lb, u8 icv_sz, u8 * start,
- u32 start_len, u16 * n_ch)
+esp_encrypt_chain_crypto (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ vlib_buffer_t *b, vlib_buffer_t *lb, u8 icv_sz,
+ u8 *start, u32 start_len, u16 *n_ch)
{
vnet_crypto_op_chunk_t *ch;
vlib_buffer_t *cb = b;
@@ -331,10 +327,10 @@ esp_encrypt_chain_crypto (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
}
static_always_inline u32
-esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
- ipsec_sa_t * sa0, vlib_buffer_t * b,
- vlib_buffer_t * lb, u8 icv_sz, u8 * start,
- u32 start_len, u8 * digest, u16 * n_ch)
+esp_encrypt_chain_integ (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
+ ipsec_sa_outb_rt_t *ort, vlib_buffer_t *b,
+ vlib_buffer_t *lb, u8 icv_sz, u8 *start,
+ u32 start_len, u8 *digest, u16 *n_ch)
{
vnet_crypto_op_chunk_t *ch;
vlib_buffer_t *cb = b;
@@ -352,12 +348,11 @@ esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
if (lb == cb)
{
total_len += ch->len = cb->current_length - icv_sz;
- if (ipsec_sa_is_set_USE_ESN (sa0))
+ if (ort->use_esn)
{
- u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
- clib_memcpy_fast (digest, &seq_hi, sizeof (seq_hi));
- ch->len += sizeof (seq_hi);
- total_len += sizeof (seq_hi);
+ *(u32u *) digest = clib_net_to_host_u32 (ort->seq64 >> 32);
+ ch->len += sizeof (u32);
+ total_len += sizeof (u32);
}
}
else
@@ -379,16 +374,16 @@ esp_encrypt_chain_integ (vlib_main_t * vm, ipsec_per_thread_data_t * ptd,
always_inline void
esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_op_t **crypto_ops,
- vnet_crypto_op_t **integ_ops, ipsec_sa_t *sa0, u32 seq_hi,
- u8 *payload, u16 payload_len, u8 iv_sz, u8 icv_sz, u32 bi,
- vlib_buffer_t **b, vlib_buffer_t *lb, u32 hdr_len,
- esp_header_t *esp)
+ vnet_crypto_op_t **integ_ops, ipsec_sa_outb_rt_t *ort,
+ u32 seq_hi, u8 *payload, u16 payload_len, u8 iv_sz,
+ u8 icv_sz, u32 bi, vlib_buffer_t **b, vlib_buffer_t *lb,
+ u32 hdr_len, esp_header_t *esp)
{
- if (sa0->crypto_enc_op_id)
+ if (ort->cipher_op_id)
{
vnet_crypto_op_t *op;
vec_add2_aligned (crypto_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->crypto_enc_op_id);
+ vnet_crypto_op_init (op, ort->cipher_op_id);
u8 *crypto_start = payload;
/* esp_add_footer_and_icv() in esp_encrypt_inline() makes sure we always
* have enough space for ESP header and footer which includes ICV */
@@ -396,24 +391,24 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
u16 crypto_len = payload_len - icv_sz;
/* generate the IV in front of the payload */
- void *pkt_iv = esp_generate_iv (sa0, payload, iv_sz);
+ void *pkt_iv = esp_generate_iv (ort, payload, iv_sz);
- op->key_index = sa0->crypto_key_index;
+ op->key_index = ort->cipher_key_index;
op->user_data = bi;
- if (ipsec_sa_is_set_IS_CTR (sa0))
+ if (ort->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa0))
+ if (ort->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
op->aad = (u8 *) nonce - sizeof (esp_aead_t);
- op->aad_len = esp_aad_fill (op->aad, esp, sa0, seq_hi);
+ op->aad_len = esp_aad_fill (op->aad, esp, ort->use_esn, seq_hi);
op->tag = payload + crypto_len;
op->tag_len = 16;
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa0)))
+ if (PREDICT_FALSE (ort->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
crypto_start -= iv_sz;
@@ -425,7 +420,7 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa0->salt;
+ nonce->salt = ort->salt;
nonce->iv = *(u64 *) pkt_iv;
op->iv = (u8 *) nonce;
}
@@ -445,9 +440,8 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
op->flags |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
op->chunk_index = vec_len (ptd->chunks);
op->tag = vlib_buffer_get_tail (lb) - icv_sz;
- esp_encrypt_chain_crypto (vm, ptd, sa0, b[0], lb, icv_sz,
- crypto_start, crypto_len + icv_sz,
- &op->n_chunks);
+ esp_encrypt_chain_crypto (vm, ptd, b[0], lb, icv_sz, crypto_start,
+ crypto_len + icv_sz, &op->n_chunks);
}
else
{
@@ -457,14 +451,14 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
}
}
- if (sa0->integ_op_id)
+ if (ort->integ_op_id)
{
vnet_crypto_op_t *op;
vec_add2_aligned (integ_ops[0], op, 1, CLIB_CACHE_LINE_BYTES);
- vnet_crypto_op_init (op, sa0->integ_op_id);
+ vnet_crypto_op_init (op, ort->integ_op_id);
op->src = payload - iv_sz - sizeof (esp_header_t);
op->digest = payload + payload_len - icv_sz;
- op->key_index = sa0->integ_key_index;
+ op->key_index = ort->integ_key_index;
op->digest_len = icv_sz;
op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
op->user_data = bi;
@@ -476,13 +470,12 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
op->chunk_index = vec_len (ptd->chunks);
op->digest = vlib_buffer_get_tail (lb) - icv_sz;
- esp_encrypt_chain_integ (vm, ptd, sa0, b[0], lb, icv_sz,
+ esp_encrypt_chain_integ (vm, ptd, ort, b[0], lb, icv_sz,
payload - iv_sz - sizeof (esp_header_t),
- payload_len + iv_sz +
- sizeof (esp_header_t), op->digest,
- &op->n_chunks);
+ payload_len + iv_sz + sizeof (esp_header_t),
+ op->digest, &op->n_chunks);
}
- else if (ipsec_sa_is_set_USE_ESN (sa0))
+ else if (ort->use_esn)
{
u32 tmp = clib_net_to_host_u32 (seq_hi);
clib_memcpy_fast (op->digest, &tmp, sizeof (seq_hi));
@@ -494,15 +487,15 @@ esp_prepare_sync_op (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
static_always_inline void
esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
vnet_crypto_async_frame_t *async_frame,
- ipsec_sa_t *sa, vlib_buffer_t *b, esp_header_t *esp,
- u8 *payload, u32 payload_len, u8 iv_sz, u8 icv_sz,
- u32 bi, u16 next, u32 hdr_len, u16 async_next,
- vlib_buffer_t *lb)
+ ipsec_sa_outb_rt_t *ort, vlib_buffer_t *b,
+ esp_header_t *esp, u8 *payload, u32 payload_len,
+ u8 iv_sz, u8 icv_sz, u32 bi, u16 next, u32 hdr_len,
+ u16 async_next, vlib_buffer_t *lb)
{
esp_post_data_t *post = esp_post_data (b);
u8 *tag, *iv, *aad = 0;
u8 flag = 0;
- const u32 key_index = sa->crypto_key_index;
+ const u32 key_index = ort->cipher_key_index;
i16 crypto_start_offset, integ_start_offset;
u16 crypto_total_len, integ_total_len;
@@ -514,19 +507,19 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
tag = payload + crypto_total_len;
/* generate the IV in front of the payload */
- void *pkt_iv = esp_generate_iv (sa, payload, iv_sz);
+ void *pkt_iv = esp_generate_iv (ort, payload, iv_sz);
- if (ipsec_sa_is_set_IS_CTR (sa))
+ if (ort->is_ctr)
{
/* construct nonce in a scratch space in front of the IP header */
esp_ctr_nonce_t *nonce =
(esp_ctr_nonce_t *) (pkt_iv - hdr_len - sizeof (*nonce));
- if (ipsec_sa_is_set_IS_AEAD (sa))
+ if (ort->is_aead)
{
/* constuct aad in a scratch space in front of the nonce */
aad = (u8 *) nonce - sizeof (esp_aead_t);
- esp_aad_fill (aad, esp, sa, sa->seq_hi);
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_NULL_GMAC (sa)))
+ esp_aad_fill (aad, esp, ort->use_esn, ort->seq64 >> 32);
+ if (PREDICT_FALSE (ort->is_null_gmac))
{
/* RFC-4543 ENCR_NULL_AUTH_AES_GMAC: IV is part of AAD */
crypto_start_offset -= iv_sz;
@@ -538,7 +531,7 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
nonce->ctr = clib_host_to_net_u32 (1);
}
- nonce->salt = sa->salt;
+ nonce->salt = ort->salt;
nonce->iv = *(u64 *) pkt_iv;
iv = (u8 *) nonce;
}
@@ -558,11 +551,11 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
flag |= VNET_CRYPTO_OP_FLAG_CHAINED_BUFFERS;
tag = vlib_buffer_get_tail (lb) - icv_sz;
crypto_total_len = esp_encrypt_chain_crypto (
- vm, ptd, sa, b, lb, icv_sz, b->data + crypto_start_offset,
+ vm, ptd, b, lb, icv_sz, b->data + crypto_start_offset,
crypto_total_len + icv_sz, 0);
}
- if (sa->integ_op_id)
+ if (ort->integ_op_id)
{
integ_start_offset -= iv_sz + sizeof (esp_header_t);
integ_total_len += iv_sz + sizeof (esp_header_t);
@@ -570,15 +563,14 @@ esp_prepare_async_frame (vlib_main_t *vm, ipsec_per_thread_data_t *ptd,
if (b != lb)
{
integ_total_len = esp_encrypt_chain_integ (
- vm, ptd, sa, b, lb, icv_sz,
+ vm, ptd, ort, b, lb, icv_sz,
payload - iv_sz - sizeof (esp_header_t),
payload_len + iv_sz + sizeof (esp_header_t), tag, 0);
}
- else if (ipsec_sa_is_set_USE_ESN (sa))
+ else if (ort->use_esn)
{
- u32 seq_hi = clib_net_to_host_u32 (sa->seq_hi);
- clib_memcpy_fast (tag, &seq_hi, sizeof (seq_hi));
- integ_total_len += sizeof (seq_hi);
+ *(u32u *) tag = clib_net_to_host_u32 (ort->seq64 >> 32);
+ integ_total_len += sizeof (u32);
}
}
@@ -620,13 +612,12 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
u32 current_sa_index = ~0, current_sa_packets = 0;
u32 current_sa_bytes = 0, spi = 0;
u8 esp_align = 4, iv_sz = 0, icv_sz = 0;
- ipsec_sa_t *sa0 = 0;
- u8 sa_drop_no_crypto = 0;
+ ipsec_sa_outb_rt_t *ort = 0;
vlib_buffer_t *lb;
vnet_crypto_op_t **crypto_ops = &ptd->crypto_ops;
vnet_crypto_op_t **integ_ops = &ptd->integ_ops;
vnet_crypto_async_frame_t *async_frames[VNET_CRYPTO_N_OP_IDS];
- int is_async = im->async_mode;
+ int is_async = 0;
vnet_crypto_op_id_t async_op = ~0;
u16 drop_next =
(lt == VNET_LINK_IP6 ? ESP_ENCRYPT_NEXT_DROP6 :
@@ -708,27 +699,20 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
current_sa_packets, current_sa_bytes);
current_sa_packets = current_sa_bytes = 0;
- sa0 = ipsec_sa_get (sa_index0);
+ ort = ipsec_sa_get_outb_rt_by_index (sa_index0);
current_sa_index = sa_index0;
- sa_drop_no_crypto = ((sa0->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
- sa0->integ_alg == IPSEC_INTEG_ALG_NONE) &&
- !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa0));
-
vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
current_sa_index);
- /* fetch the second cacheline ASAP */
- clib_prefetch_load (sa0->cacheline1);
-
- spi = clib_net_to_host_u32 (sa0->spi);
- esp_align = sa0->esp_block_align;
- icv_sz = sa0->integ_icv_size;
- iv_sz = sa0->crypto_iv_size;
- is_async = im->async_mode | ipsec_sa_is_set_IS_ASYNC (sa0);
+ spi = ort->spi_be;
+ icv_sz = ort->integ_icv_size;
+ esp_align = ort->esp_block_align;
+ iv_sz = ort->cipher_iv_size;
+ is_async = ort->is_async;
}
- if (PREDICT_FALSE (sa_drop_no_crypto != 0))
+ if (PREDICT_FALSE (ort->drop_no_crypto != 0))
{
err = ESP_ENCRYPT_ERROR_NO_ENCRYPTION;
esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
@@ -736,18 +720,18 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
goto trace;
}
- if (PREDICT_FALSE ((u16) ~0 == sa0->thread_index))
+ if (PREDICT_FALSE ((u16) ~0 == ort->thread_index))
{
/* this is the first packet to use this SA, claim the SA
* for this thread. this could happen simultaneously on
* another thread */
- clib_atomic_cmp_and_swap (&sa0->thread_index, ~0,
+ clib_atomic_cmp_and_swap (&ort->thread_index, ~0,
ipsec_sa_assign_thread (thread_index));
}
- if (PREDICT_FALSE (thread_index != sa0->thread_index))
+ if (PREDICT_FALSE (thread_index != ort->thread_index))
{
- vnet_buffer (b[0])->ipsec.thread_index = sa0->thread_index;
+ vnet_buffer (b[0])->ipsec.thread_index = ort->thread_index;
err = ESP_ENCRYPT_ERROR_HANDOFF;
esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
noop_nexts, handoff_next,
@@ -772,7 +756,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
lb = vlib_get_buffer (vm, lb->next_buffer);
}
- if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ if (PREDICT_FALSE (esp_seq_advance (ort)))
{
err = ESP_ENCRYPT_ERROR_SEQ_CYCLED;
esp_encrypt_set_next_index (b[0], node, thread_index, err, n_noop,
@@ -783,7 +767,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
/* space for IV */
hdr_len = iv_sz;
- if (ipsec_sa_is_set_IS_TUNNEL (sa0))
+ if (ort->is_tunnel)
{
payload = vlib_buffer_get_current (b[0]);
next_hdr_ptr = esp_add_footer_and_icv (
@@ -806,40 +790,39 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp = (esp_header_t *) (payload - hdr_len);
/* optional UDP header */
- if (ipsec_sa_is_set_UDP_ENCAP (sa0))
+ if (ort->udp_encap)
{
hdr_len += sizeof (udp_header_t);
- esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
+ esp_fill_udp_hdr (ort, (udp_header_t *) (payload - hdr_len),
payload_len_total + hdr_len);
}
/* IP header */
- if (ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
+ if (ort->is_tunnel_v6)
{
ip6_header_t *ip6;
u16 len = sizeof (ip6_header_t);
hdr_len += len;
ip6 = (ip6_header_t *) (payload - hdr_len);
- clib_memcpy_fast (ip6, &sa0->ip6_hdr, sizeof (ip6_header_t));
+ clib_memcpy_fast (ip6, &ort->ip6_hdr, sizeof (ip6_header_t));
if (VNET_LINK_IP6 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IPV6;
- tunnel_encap_fixup_6o6 (sa0->tunnel_flags,
- (const ip6_header_t *) payload,
- ip6);
+ tunnel_encap_fixup_6o6 (ort->tunnel_flags,
+ (const ip6_header_t *) payload, ip6);
}
else if (VNET_LINK_IP4 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
- tunnel_encap_fixup_4o6 (sa0->tunnel_flags, b[0],
+ tunnel_encap_fixup_4o6 (ort->tunnel_flags, b[0],
(const ip4_header_t *) payload, ip6);
}
else if (VNET_LINK_MPLS == lt)
{
*next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
tunnel_encap_fixup_mplso6 (
- sa0->tunnel_flags, b[0],
+ ort->tunnel_flags, b[0],
(const mpls_unicast_header_t *) payload, ip6);
}
else
@@ -855,27 +838,25 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
u16 len = sizeof (ip4_header_t);
hdr_len += len;
ip4 = (ip4_header_t *) (payload - hdr_len);
- clib_memcpy_fast (ip4, &sa0->ip4_hdr, sizeof (ip4_header_t));
+ clib_memcpy_fast (ip4, &ort->ip4_hdr, sizeof (ip4_header_t));
if (VNET_LINK_IP6 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IPV6;
- tunnel_encap_fixup_6o4_w_chksum (sa0->tunnel_flags,
- (const ip6_header_t *)
- payload, ip4);
+ tunnel_encap_fixup_6o4_w_chksum (
+ ort->tunnel_flags, (const ip6_header_t *) payload, ip4);
}
else if (VNET_LINK_IP4 == lt)
{
*next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
- tunnel_encap_fixup_4o4_w_chksum (sa0->tunnel_flags,
- (const ip4_header_t *)
- payload, ip4);
+ tunnel_encap_fixup_4o4_w_chksum (
+ ort->tunnel_flags, (const ip4_header_t *) payload, ip4);
}
else if (VNET_LINK_MPLS == lt)
{
*next_hdr_ptr = IP_PROTOCOL_MPLS_IN_IP;
tunnel_encap_fixup_mplso4_w_chksum (
- sa0->tunnel_flags, (const mpls_unicast_header_t *) payload,
+ ort->tunnel_flags, (const mpls_unicast_header_t *) payload,
ip4);
}
else
@@ -885,8 +866,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
}
- if (ipsec_sa_is_set_UDP_ENCAP (sa0) &&
- ipsec_sa_is_set_IS_TUNNEL_V6 (sa0))
+ if (ort->udp_encap && ort->is_tunnel_v6)
{
i16 l3_off = b[0]->current_data - hdr_len;
i16 l4_off = l3_off + sizeof (ip6_header_t);
@@ -894,7 +874,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
set_ip6_udp_cksum_offload (b[0], l3_off, l4_off);
}
- dpo = &sa0->dpo;
+ dpo = &ort->dpo;
if (!is_tun)
{
sync_next[0] = dpo->dpoi_next_node;
@@ -953,7 +933,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
esp = (esp_header_t *) (payload - hdr_len);
/* optional UDP header */
- if (ipsec_sa_is_set_UDP_ENCAP (sa0))
+ if (ort->udp_encap)
{
hdr_len += sizeof (udp_header_t);
udp = (udp_header_t *) (payload - hdr_len);
@@ -1010,7 +990,7 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
if (udp)
{
udp_len = len - ip_len;
- esp_fill_udp_hdr (sa0, udp, udp_len);
+ esp_fill_udp_hdr (ort, udp, udp_len);
}
if (udp && (VNET_LINK_IP6 == lt))
@@ -1036,11 +1016,11 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
}
esp->spi = spi;
- esp->seq = clib_net_to_host_u32 (sa0->seq);
+ esp->seq = clib_net_to_host_u32 (ort->seq64);
if (is_async)
{
- async_op = sa0->crypto_async_enc_op_id;
+ async_op = ort->async_op_id;
/* get a frame for this op if we don't yet have one or it's full
*/
@@ -1063,15 +1043,15 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
vec_add1 (ptd->async_frames, async_frames[async_op]);
}
- esp_prepare_async_frame (vm, ptd, async_frames[async_op], sa0, b[0],
+ esp_prepare_async_frame (vm, ptd, async_frames[async_op], ort, b[0],
esp, payload, payload_len, iv_sz, icv_sz,
from[b - bufs], sync_next[0], hdr_len,
async_next_node, lb);
}
else
- esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, sa0, sa0->seq_hi,
- payload, payload_len, iv_sz, icv_sz, n_sync, b,
- lb, hdr_len, esp);
+ esp_prepare_sync_op (vm, ptd, crypto_ops, integ_ops, ort,
+ ort->seq64 >> 32, payload, payload_len, iv_sz,
+ icv_sz, n_sync, b, lb, hdr_len, esp);
vlib_buffer_advance (b[0], 0LL - hdr_len);
@@ -1087,13 +1067,13 @@ esp_encrypt_inline (vlib_main_t *vm, vlib_node_runtime_t *node,
clib_memset_u8 (tr, 0xff, sizeof (*tr));
else
{
+ ipsec_sa_t *sa = ipsec_sa_get (sa_index0);
tr->sa_index = sa_index0;
- tr->spi = sa0->spi;
- tr->seq = sa0->seq;
- tr->sa_seq_hi = sa0->seq_hi;
- tr->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa0);
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
+ tr->spi = sa->spi;
+ tr->seq = ort->seq64;
+ tr->udp_encap = ort->udp_encap;
+ tr->crypto_alg = sa->crypto_alg;
+ tr->integ_alg = sa->integ_alg;
}
}
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c
index a1d4d56768c..b95b65dfeea 100644
--- a/src/vnet/ipsec/ipsec.c
+++ b/src/vnet/ipsec/ipsec.c
@@ -312,9 +312,9 @@ clib_error_t *
ipsec_rsc_in_use (ipsec_main_t * im)
{
/* return an error is crypto resource are in use */
- if (pool_elts (ipsec_sa_pool) > 0)
+ if (pool_elts (im->sa_pool) > 0)
return clib_error_return (0, "%d SA entries configured",
- pool_elts (ipsec_sa_pool));
+ pool_elts (im->sa_pool));
if (ipsec_itf_count () > 0)
return clib_error_return (0, "%d IPSec interface configured",
ipsec_itf_count ());
@@ -384,7 +384,7 @@ ipsec_set_async_mode (u32 is_enabled)
im->async_mode = is_enabled;
/* change SA crypto op data */
- pool_foreach (sa, ipsec_sa_pool)
+ pool_foreach (sa, im->sa_pool)
ipsec_sa_set_async_mode (sa, is_enabled);
}
diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h
index 3409d0e4fb9..c4977ddb6b9 100644
--- a/src/vnet/ipsec/ipsec.h
+++ b/src/vnet/ipsec/ipsec.h
@@ -118,6 +118,9 @@ typedef struct
const u8 iv_size;
const u8 block_align;
const u8 icv_size;
+ const u8 is_aead : 1;
+ const u8 is_ctr : 1;
+ const u8 is_null_gmac : 1;
} ipsec_main_crypto_alg_t;
typedef struct
@@ -263,6 +266,10 @@ typedef struct
u8 async_mode;
u16 msg_id_base;
+
+ ipsec_sa_t *sa_pool;
+ ipsec_sa_inb_rt_t **inb_sa_runtimes;
+ ipsec_sa_outb_rt_t **outb_sa_runtimes;
} ipsec_main_t;
typedef enum ipsec_format_flags_t_
@@ -396,6 +403,8 @@ extern clib_error_t *ipsec_register_next_header (vlib_main_t *vm,
u8 next_header,
const char *next_node);
+#include <vnet/ipsec/ipsec_funcs.h>
+
#endif /* __IPSEC_H__ */
/*
diff --git a/src/vnet/ipsec/ipsec_api.c b/src/vnet/ipsec/ipsec_api.c
index 21216b1a614..262a8cb8c88 100644
--- a/src/vnet/ipsec/ipsec_api.c
+++ b/src/vnet/ipsec/ipsec_api.c
@@ -40,6 +40,28 @@
#define REPLY_MSG_ID_BASE ipsec_main.msg_id_base
#include <vlibapi/api_helper_macros.h>
+static inline u64
+ipsec_sa_get_inb_seq (ipsec_sa_t *sa)
+{
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ u64 seq;
+
+ seq = irt->seq;
+ if (ipsec_sa_is_set_USE_ESN (sa))
+ seq |= (u64) irt->seq_hi << 32;
+ return seq;
+}
+
+static inline u64
+ipsec_sa_get_outb_seq (ipsec_sa_t *sa)
+{
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ u64 seq;
+
+ seq = ort->seq64;
+ return seq;
+}
+
static void
vl_api_ipsec_spd_add_del_t_handler (vl_api_ipsec_spd_add_del_t * mp)
{
@@ -950,6 +972,8 @@ ipsec_sa_dump_match_sa (index_t itpi, void *arg)
static walk_rc_t
send_ipsec_sa_details (ipsec_sa_t * sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_details_t *mp;
@@ -975,7 +999,7 @@ send_ipsec_sa_details (ipsec_sa_t * sa, void *arg)
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
@@ -992,22 +1016,16 @@ send_ipsec_sa_details (ipsec_sa_t * sa, void *arg)
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
@@ -1036,6 +1054,8 @@ vl_api_ipsec_sa_dump_t_handler (vl_api_ipsec_sa_dump_t * mp)
static walk_rc_t
send_ipsec_sa_v2_details (ipsec_sa_t * sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v2_details_t *mp;
@@ -1061,7 +1081,7 @@ send_ipsec_sa_v2_details (ipsec_sa_t * sa, void *arg)
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
@@ -1078,26 +1098,20 @@ send_ipsec_sa_v2_details (ipsec_sa_t * sa, void *arg)
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
mp->entry.tunnel_flags =
tunnel_encap_decap_flags_encode (sa->tunnel.t_encap_decap_flags);
mp->entry.dscp = ip_dscp_encode (sa->tunnel.t_dscp);
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
@@ -1126,6 +1140,8 @@ vl_api_ipsec_sa_v2_dump_t_handler (vl_api_ipsec_sa_v2_dump_t *mp)
static walk_rc_t
send_ipsec_sa_v3_details (ipsec_sa_t *sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v3_details_t *mp;
@@ -1150,7 +1166,7 @@ send_ipsec_sa_v3_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
@@ -1165,22 +1181,16 @@ send_ipsec_sa_v3_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
@@ -1209,8 +1219,12 @@ vl_api_ipsec_sa_v3_dump_t_handler (vl_api_ipsec_sa_v3_dump_t *mp)
static walk_rc_t
send_ipsec_sa_v4_details (ipsec_sa_t *sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v4_details_t *mp;
+ u32 thread_index = 0;
mp = vl_msg_api_alloc (sizeof (*mp));
clib_memset (mp, 0, sizeof (*mp));
@@ -1233,7 +1247,7 @@ send_ipsec_sa_v4_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
@@ -1248,24 +1262,23 @@ send_ipsec_sa_v4_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
- {
- mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
- }
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
+ mp->replay_window =
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
- mp->thread_index = clib_host_to_net_u32 (sa->thread_index);
+ if (ort)
+ thread_index = ort->thread_index;
+ else if (irt)
+ thread_index = irt->thread_index;
+
+ mp->thread_index = clib_host_to_net_u32 (thread_index);
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
vl_api_send_msg (ctx->reg, (u8 *) mp);
@@ -1293,8 +1306,12 @@ vl_api_ipsec_sa_v4_dump_t_handler (vl_api_ipsec_sa_v4_dump_t *mp)
static walk_rc_t
send_ipsec_sa_v5_details (ipsec_sa_t *sa, void *arg)
{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
ipsec_dump_walk_ctx_t *ctx = arg;
vl_api_ipsec_sa_v5_details_t *mp;
+ u32 thread_index = 0;
mp = vl_msg_api_alloc (sizeof (*mp));
clib_memset (mp, 0, sizeof (*mp));
@@ -1317,7 +1334,7 @@ send_ipsec_sa_v5_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_IS_PROTECT (sa))
{
ipsec_sa_dump_match_ctx_t ctx = {
- .sai = sa - ipsec_sa_pool,
+ .sai = sa - im->sa_pool,
.sw_if_index = ~0,
};
ipsec_tun_protect_walk (ipsec_sa_dump_match_sa, &ctx);
@@ -1332,27 +1349,27 @@ send_ipsec_sa_v5_details (ipsec_sa_t *sa, void *arg)
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- mp->entry.udp_src_port = sa->udp_hdr.src_port;
- mp->entry.udp_dst_port = sa->udp_hdr.dst_port;
+ mp->entry.udp_src_port = clib_host_to_net_u16 (sa->udp_src_port);
+ mp->entry.udp_dst_port = clib_host_to_net_u16 (sa->udp_dst_port);
}
- mp->seq_outbound = clib_host_to_net_u64 (((u64) sa->seq));
- mp->last_seq_inbound = clib_host_to_net_u64 (((u64) sa->seq));
- if (ipsec_sa_is_set_USE_ESN (sa))
- {
- mp->seq_outbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- mp->last_seq_inbound |= (u64) (clib_host_to_net_u32 (sa->seq_hi));
- }
- if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
+ mp->seq_outbound = clib_host_to_net_u64 (ipsec_sa_get_outb_seq (sa));
+ mp->last_seq_inbound = clib_host_to_net_u64 (ipsec_sa_get_inb_seq (sa));
+
+ if (ipsec_sa_is_set_USE_ANTI_REPLAY (sa) && irt)
{
mp->replay_window =
- clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (sa));
-
+ clib_host_to_net_u64 (ipsec_sa_anti_replay_get_64b_window (irt));
mp->entry.anti_replay_window_size =
- clib_host_to_net_u32 (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (sa));
+ clib_host_to_net_u32 (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (irt));
}
- mp->thread_index = clib_host_to_net_u32 (sa->thread_index);
+ if (ort)
+ thread_index = ort->thread_index;
+ else if (irt)
+ thread_index = irt->thread_index;
+
+ mp->thread_index = clib_host_to_net_u32 (thread_index);
mp->stat_index = clib_host_to_net_u32 (sa->stat_index);
vl_api_send_msg (ctx->reg, (u8 *) mp);
@@ -1427,11 +1444,11 @@ vl_api_ipsec_select_backend_t_handler (vl_api_ipsec_select_backend_t * mp)
vl_api_ipsec_select_backend_reply_t *rmp;
ipsec_protocol_t protocol;
int rv = 0;
- if (pool_elts (ipsec_sa_pool) > 0)
- {
- rv = VNET_API_ERROR_INSTANCE_IN_USE;
- goto done;
- }
+ if (pool_elts (im->sa_pool) > 0)
+ {
+ rv = VNET_API_ERROR_INSTANCE_IN_USE;
+ goto done;
+ }
rv = ipsec_proto_decode (mp->protocol, &protocol);
diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c
index 07d9df8f204..77a29d263eb 100644
--- a/src/vnet/ipsec/ipsec_cli.c
+++ b/src/vnet/ipsec/ipsec_cli.c
@@ -473,7 +473,7 @@ ipsec_sa_show_all (vlib_main_t * vm, ipsec_main_t * im, u8 detail)
{
u32 sai;
- pool_foreach_index (sai, ipsec_sa_pool)
+ pool_foreach_index (sai, im->sa_pool)
{
vlib_cli_output (vm, "%U", format_ipsec_sa, sai,
(detail ? IPSEC_FORMAT_DETAIL : IPSEC_FORMAT_BRIEF));
@@ -583,6 +583,7 @@ static clib_error_t *
clear_ipsec_sa_command_fn (vlib_main_t * vm,
unformat_input_t * input, vlib_cli_command_t * cmd)
{
+ ipsec_main_t *im = &ipsec_main;
u32 sai = ~0;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
@@ -595,14 +596,14 @@ clear_ipsec_sa_command_fn (vlib_main_t * vm,
if (~0 == sai)
{
- pool_foreach_index (sai, ipsec_sa_pool)
+ pool_foreach_index (sai, im->sa_pool)
{
ipsec_sa_clear (sai);
}
}
else
{
- if (pool_is_free_index (ipsec_sa_pool, sai))
+ if (pool_is_free_index (im->sa_pool, sai))
return clib_error_return (0, "unknown SA index: %d", sai);
else
ipsec_sa_clear (sai);
diff --git a/src/vnet/ipsec/ipsec_format.c b/src/vnet/ipsec/ipsec_format.c
index e421a0d96b4..0bbdc85aaed 100644
--- a/src/vnet/ipsec/ipsec_format.c
+++ b/src/vnet/ipsec/ipsec_format.c
@@ -441,19 +441,24 @@ format_ipsec_sa_flags (u8 * s, va_list * args)
u8 *
format_ipsec_sa (u8 * s, va_list * args)
{
+ ipsec_main_t *im = &ipsec_main;
u32 sai = va_arg (*args, u32);
ipsec_format_flags_t flags = va_arg (*args, ipsec_format_flags_t);
vlib_counter_t counts;
counter_t errors;
ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
- if (pool_is_free_index (ipsec_sa_pool, sai))
+ if (pool_is_free_index (im->sa_pool, sai))
{
s = format (s, "No such SA index: %d", sai);
goto done;
}
sa = ipsec_sa_get (sai);
+ irt = ipsec_sa_get_inb_rt (sa);
+ ort = ipsec_sa_get_outb_rt (sa);
s = format (s, "[%d] sa %u (0x%x) spi %u (0x%08x) protocol:%s flags:[%U]",
sai, sa->id, sa->id, sa->spi, sa->spi,
@@ -464,12 +469,21 @@ format_ipsec_sa (u8 * s, va_list * args)
s = format (s, "\n locks %d", sa->node.fn_locks);
s = format (s, "\n salt 0x%x", clib_net_to_host_u32 (sa->salt));
- s = format (s, "\n thread-index:%d", sa->thread_index);
- s = format (s, "\n seq %u seq-hi %u", sa->seq, sa->seq_hi);
- s = format (s, "\n window-size: %llu",
- IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (sa));
- s = format (s, "\n window: Bl <- %U Tl", format_ipsec_replay_window,
- ipsec_sa_anti_replay_get_64b_window (sa));
+ if (irt)
+ s = format (s, "\n inbound thread-index:%d", irt->thread_index);
+ if (ort)
+ s = format (s, "\n outbound thread-index:%d", ort->thread_index);
+ if (irt)
+ s = format (s, "\n inbound seq %u seq-hi %u", irt->seq, irt->seq_hi);
+ if (ort)
+ s = format (s, "\n outbound seq %lu", ort->seq64);
+ if (irt)
+ {
+ s = format (s, "\n window-size: %llu",
+ IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (irt));
+ s = format (s, "\n window: Bl <- %U Tl", format_ipsec_replay_window,
+ ipsec_sa_anti_replay_get_64b_window (irt));
+ }
s =
format (s, "\n crypto alg %U", format_ipsec_crypto_alg, sa->crypto_alg);
if (sa->crypto_alg && (flags & IPSEC_FORMAT_INSECURE))
@@ -482,9 +496,8 @@ format_ipsec_sa (u8 * s, va_list * args)
s = format (s, " key %U", format_ipsec_key, &sa->integ_key);
else
s = format (s, " key [redacted]");
- s = format (s, "\n UDP:[src:%d dst:%d]",
- clib_host_to_net_u16 (sa->udp_hdr.src_port),
- clib_host_to_net_u16 (sa->udp_hdr.dst_port));
+ s =
+ format (s, "\n UDP:[src:%d dst:%d]", sa->udp_src_port, sa->udp_dst_port);
vlib_get_combined_counter (&ipsec_sa_counters, sai, &counts);
s = format (s, "\n tx/rx:[packets:%Ld bytes:%Ld]", counts.packets,
diff --git a/src/vnet/ipsec/ipsec_funcs.h b/src/vnet/ipsec/ipsec_funcs.h
new file mode 100644
index 00000000000..29788b3d765
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_funcs.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: Apache-2.0
+ * Copyright (c) 2025 Cisco Systems, Inc.
+ */
+
+#ifndef __IPSEC_FUNCS_H__
+#define __IPSEC_FUNCS_H__
+
+#include <vlib/vlib.h>
+#include <vnet/ipsec/ipsec.h>
+
+always_inline ipsec_sa_t *
+ipsec_sa_get (u32 sa_index)
+{
+ return (pool_elt_at_index (ipsec_main.sa_pool, sa_index));
+}
+
+static_always_inline ipsec_sa_outb_rt_t *
+ipsec_sa_get_outb_rt_by_index (u32 sa_index)
+{
+ return ipsec_main.outb_sa_runtimes[sa_index];
+}
+
+static_always_inline ipsec_sa_inb_rt_t *
+ipsec_sa_get_inb_rt_by_index (u32 sa_index)
+{
+ return ipsec_main.inb_sa_runtimes[sa_index];
+}
+
+static_always_inline ipsec_sa_outb_rt_t *
+ipsec_sa_get_outb_rt (ipsec_sa_t *sa)
+{
+ return ipsec_sa_get_outb_rt_by_index (sa - ipsec_main.sa_pool);
+}
+
+static_always_inline ipsec_sa_inb_rt_t *
+ipsec_sa_get_inb_rt (ipsec_sa_t *sa)
+{
+ return ipsec_sa_get_inb_rt_by_index (sa - ipsec_main.sa_pool);
+}
+
+#endif /* __IPSEC_FUNCS_H__ */
diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c
index d37d89d5e3e..eb4270ac2b4 100644
--- a/src/vnet/ipsec/ipsec_sa.c
+++ b/src/vnet/ipsec/ipsec_sa.c
@@ -33,8 +33,6 @@ vlib_combined_counter_main_t ipsec_sa_counters = {
/* Per-SA error counters */
vlib_simple_counter_main_t ipsec_sa_err_counters[IPSEC_SA_N_ERRORS];
-ipsec_sa_t *ipsec_sa_pool;
-
static clib_error_t *
ipsec_call_add_del_callbacks (ipsec_main_t * im, ipsec_sa_t * sa,
u32 sa_index, int is_add)
@@ -77,39 +75,71 @@ static void
ipsec_sa_stack (ipsec_sa_t * sa)
{
ipsec_main_t *im = &ipsec_main;
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
dpo_id_t tmp = DPO_INVALID;
tunnel_contribute_forwarding (&sa->tunnel, &tmp);
if (IPSEC_PROTOCOL_AH == sa->protocol)
dpo_stack_from_node ((ipsec_sa_is_set_IS_TUNNEL_V6 (sa) ?
- im->ah6_encrypt_node_index :
- im->ah4_encrypt_node_index), &sa->dpo, &tmp);
+ im->ah6_encrypt_node_index :
+ im->ah4_encrypt_node_index),
+ &ort->dpo, &tmp);
else
dpo_stack_from_node ((ipsec_sa_is_set_IS_TUNNEL_V6 (sa) ?
- im->esp6_encrypt_node_index :
- im->esp4_encrypt_node_index), &sa->dpo, &tmp);
+ im->esp6_encrypt_node_index :
+ im->esp4_encrypt_node_index),
+ &ort->dpo, &tmp);
dpo_reset (&tmp);
}
void
ipsec_sa_set_async_mode (ipsec_sa_t *sa, int is_enabled)
{
+ u32 cipher_key_index, integ_key_index;
+ vnet_crypto_op_id_t inb_cipher_op_id, outb_cipher_op_id, integ_op_id;
+ u32 is_async;
if (is_enabled)
{
- sa->crypto_key_index = sa->crypto_async_key_index;
- sa->crypto_enc_op_id = sa->crypto_async_enc_op_id;
- sa->crypto_dec_op_id = sa->crypto_async_dec_op_id;
- sa->integ_key_index = ~0;
- sa->integ_op_id = ~0;
+ if (sa->linked_key_index != ~0)
+ cipher_key_index = sa->linked_key_index;
+ else
+ cipher_key_index = sa->crypto_sync_key_index;
+
+ outb_cipher_op_id = sa->crypto_async_enc_op_id;
+ inb_cipher_op_id = sa->crypto_async_dec_op_id;
+ integ_key_index = ~0;
+ integ_op_id = ~0;
+ is_async = 1;
}
else
{
- sa->crypto_key_index = sa->crypto_sync_key_index;
- sa->crypto_enc_op_id = sa->crypto_sync_enc_op_id;
- sa->crypto_dec_op_id = sa->crypto_sync_dec_op_id;
- sa->integ_key_index = sa->integ_sync_key_index;
- sa->integ_op_id = sa->integ_sync_op_id;
+ cipher_key_index = sa->crypto_sync_key_index;
+ outb_cipher_op_id = sa->crypto_sync_enc_op_id;
+ inb_cipher_op_id = sa->crypto_sync_dec_op_id;
+ integ_key_index = sa->integ_sync_key_index;
+ integ_op_id = sa->integ_sync_op_id;
+ is_async = 0;
+ }
+
+ if (ipsec_sa_get_inb_rt (sa))
+ {
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ irt->cipher_key_index = cipher_key_index;
+ irt->integ_key_index = integ_key_index;
+ irt->cipher_op_id = inb_cipher_op_id;
+ irt->integ_op_id = integ_op_id;
+ irt->is_async = is_async;
+ }
+
+ if (ipsec_sa_get_outb_rt (sa))
+ {
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ ort->cipher_key_index = cipher_key_index;
+ ort->integ_key_index = integ_key_index;
+ ort->cipher_op_id = outb_cipher_op_id;
+ ort->integ_op_id = integ_op_id;
+ ort->is_async = is_async;
}
}
@@ -117,32 +147,11 @@ void
ipsec_sa_set_crypto_alg (ipsec_sa_t * sa, ipsec_crypto_alg_t crypto_alg)
{
ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *alg = im->crypto_algs + crypto_alg;
sa->crypto_alg = crypto_alg;
- sa->crypto_iv_size = im->crypto_algs[crypto_alg].iv_size;
- sa->esp_block_align = clib_max (4, im->crypto_algs[crypto_alg].block_align);
- sa->crypto_sync_enc_op_id = im->crypto_algs[crypto_alg].enc_op_id;
- sa->crypto_sync_dec_op_id = im->crypto_algs[crypto_alg].dec_op_id;
- sa->crypto_calg = im->crypto_algs[crypto_alg].alg;
- ASSERT (sa->crypto_iv_size <= ESP_MAX_IV_SIZE);
- ASSERT (sa->esp_block_align <= ESP_MAX_BLOCK_SIZE);
- if (IPSEC_CRYPTO_ALG_IS_GCM (crypto_alg) ||
- IPSEC_CRYPTO_ALG_CTR_AEAD_OTHERS (crypto_alg))
- {
- sa->integ_icv_size = im->crypto_algs[crypto_alg].icv_size;
- ipsec_sa_set_IS_CTR (sa);
- ipsec_sa_set_IS_AEAD (sa);
- }
- else if (IPSEC_CRYPTO_ALG_IS_CTR (crypto_alg))
- {
- ipsec_sa_set_IS_CTR (sa);
- }
- else if (IPSEC_CRYPTO_ALG_IS_NULL_GMAC (crypto_alg))
- {
- sa->integ_icv_size = im->crypto_algs[crypto_alg].icv_size;
- ipsec_sa_set_IS_CTR (sa);
- ipsec_sa_set_IS_AEAD (sa);
- ipsec_sa_set_IS_NULL_GMAC (sa);
- }
+ sa->crypto_sync_enc_op_id = alg->enc_op_id;
+ sa->crypto_sync_dec_op_id = alg->dec_op_id;
+ sa->crypto_calg = alg->alg;
}
void
@@ -150,14 +159,12 @@ ipsec_sa_set_integ_alg (ipsec_sa_t * sa, ipsec_integ_alg_t integ_alg)
{
ipsec_main_t *im = &ipsec_main;
sa->integ_alg = integ_alg;
- sa->integ_icv_size = im->integ_algs[integ_alg].icv_size;
sa->integ_sync_op_id = im->integ_algs[integ_alg].op_id;
sa->integ_calg = im->integ_algs[integ_alg].alg;
- ASSERT (sa->integ_icv_size <= ESP_MAX_ICV_SIZE);
}
-void
-ipsec_sa_set_async_op_ids (ipsec_sa_t * sa)
+static void
+ipsec_sa_set_async_op_ids (ipsec_sa_t *sa)
{
if (ipsec_sa_is_set_USE_ESN (sa))
{
@@ -191,12 +198,90 @@ ipsec_sa_set_async_op_ids (ipsec_sa_t * sa)
#undef _
}
+static void
+ipsec_sa_init_runtime (ipsec_sa_t *sa)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *alg = im->crypto_algs + sa->crypto_alg;
+ u8 integ_icv_size;
+
+ if (alg->is_aead)
+ integ_icv_size = im->crypto_algs[sa->crypto_alg].icv_size;
+ else
+ integ_icv_size = im->integ_algs[sa->integ_alg].icv_size;
+ ASSERT (integ_icv_size <= ESP_MAX_ICV_SIZE);
+
+ if (ipsec_sa_get_inb_rt (sa))
+ {
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ irt->anti_reply_huge = ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa);
+ irt->use_anti_replay = ipsec_sa_is_set_USE_ANTI_REPLAY (sa);
+ irt->use_esn = ipsec_sa_is_set_USE_ESN (sa);
+ irt->is_tunnel = ipsec_sa_is_set_IS_TUNNEL (sa);
+ irt->is_transport =
+ !(ipsec_sa_is_set_IS_TUNNEL (sa) || ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
+ irt->udp_sz = ipsec_sa_is_set_UDP_ENCAP (sa) ? sizeof (udp_header_t) : 0;
+ irt->is_ctr = alg->is_ctr;
+ irt->is_aead = alg->is_aead;
+ irt->is_null_gmac = alg->is_null_gmac;
+ irt->cipher_iv_size = im->crypto_algs[sa->crypto_alg].iv_size;
+ irt->integ_icv_size = integ_icv_size;
+ irt->salt = sa->salt;
+ irt->async_op_id = sa->crypto_async_dec_op_id;
+ ASSERT (irt->cipher_iv_size <= ESP_MAX_IV_SIZE);
+ }
+
+ if (ipsec_sa_get_outb_rt (sa))
+ {
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ ort->use_anti_replay = ipsec_sa_is_set_USE_ANTI_REPLAY (sa);
+ ort->use_esn = ipsec_sa_is_set_USE_ESN (sa);
+ ort->is_ctr = alg->is_ctr;
+ ort->is_aead = alg->is_aead;
+ ort->is_null_gmac = alg->is_null_gmac;
+ ort->is_tunnel = ipsec_sa_is_set_IS_TUNNEL (sa);
+ ort->is_tunnel_v6 = ipsec_sa_is_set_IS_TUNNEL_V6 (sa);
+ ort->udp_encap = ipsec_sa_is_set_UDP_ENCAP (sa);
+ ort->esp_block_align =
+ clib_max (4, im->crypto_algs[sa->crypto_alg].block_align);
+ ort->cipher_iv_size = im->crypto_algs[sa->crypto_alg].iv_size;
+ ort->integ_icv_size = integ_icv_size;
+ ort->salt = sa->salt;
+ ort->spi_be = clib_host_to_net_u32 (sa->spi);
+ ort->tunnel_flags = sa->tunnel.t_encap_decap_flags;
+ ort->async_op_id = sa->crypto_async_enc_op_id;
+ ort->t_dscp = sa->tunnel.t_dscp;
+
+ ASSERT (ort->cipher_iv_size <= ESP_MAX_IV_SIZE);
+ ASSERT (ort->esp_block_align <= ESP_MAX_BLOCK_SIZE);
+ }
+ ipsec_sa_update_runtime (sa);
+}
+
+void
+ipsec_sa_update_runtime (ipsec_sa_t *sa)
+{
+ if (ipsec_sa_get_inb_rt (sa))
+ {
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ irt->is_protect = ipsec_sa_is_set_IS_PROTECT (sa);
+ }
+ if (ipsec_sa_get_outb_rt (sa))
+ {
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
+ ort->drop_no_crypto = sa->crypto_alg == IPSEC_CRYPTO_ALG_NONE &&
+ sa->integ_alg == IPSEC_INTEG_ALG_NONE &&
+ !ipsec_sa_is_set_NO_ALGO_NO_DROP (sa);
+ }
+}
+
int
ipsec_sa_update (u32 id, u16 src_port, u16 dst_port, const tunnel_t *tun,
bool is_tun)
{
ipsec_main_t *im = &ipsec_main;
ipsec_sa_t *sa;
+ ipsec_sa_outb_rt_t *ort;
u32 sa_index;
uword *p;
int rv;
@@ -206,7 +291,8 @@ ipsec_sa_update (u32 id, u16 src_port, u16 dst_port, const tunnel_t *tun,
return VNET_API_ERROR_NO_SUCH_ENTRY;
sa = ipsec_sa_get (p[0]);
- sa_index = sa - ipsec_sa_pool;
+ ort = ipsec_sa_get_outb_rt (sa);
+ sa_index = sa - im->sa_pool;
if (is_tun && ipsec_sa_is_set_IS_TUNNEL (sa) &&
(ip_address_cmp (&tun->t_src, &sa->tunnel.t_src) != 0 ||
@@ -267,16 +353,16 @@ ipsec_sa_update (u32 id, u16 src_port, u16 dst_port, const tunnel_t *tun,
tunnel_copy (tun, &sa->tunnel);
if (!ipsec_sa_is_set_IS_INBOUND (sa))
{
- dpo_reset (&sa->dpo);
+ dpo_reset (&ort->dpo);
- sa->tunnel_flags = sa->tunnel.t_encap_decap_flags;
+ ort->tunnel_flags = sa->tunnel.t_encap_decap_flags;
rv = tunnel_resolve (&sa->tunnel, FIB_NODE_TYPE_IPSEC_SA, sa_index);
if (rv)
{
hash_unset (im->sa_index_by_sa_id, sa->id);
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return rv;
}
ipsec_sa_stack (sa);
@@ -285,39 +371,42 @@ ipsec_sa_update (u32 id, u16 src_port, u16 dst_port, const tunnel_t *tun,
{
tunnel_build_v6_hdr (&sa->tunnel,
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
- IP_PROTOCOL_UDP :
- IP_PROTOCOL_IPSEC_ESP),
- &sa->ip6_hdr);
+ IP_PROTOCOL_UDP :
+ IP_PROTOCOL_IPSEC_ESP),
+ &ort->ip6_hdr);
}
else
{
tunnel_build_v4_hdr (&sa->tunnel,
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
- IP_PROTOCOL_UDP :
- IP_PROTOCOL_IPSEC_ESP),
- &sa->ip4_hdr);
+ IP_PROTOCOL_UDP :
+ IP_PROTOCOL_IPSEC_ESP),
+ &ort->ip4_hdr);
}
}
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
- if (dst_port != IPSEC_UDP_PORT_NONE &&
- dst_port != clib_net_to_host_u16 (sa->udp_hdr.dst_port))
+ if (dst_port != IPSEC_UDP_PORT_NONE && dst_port != sa->udp_dst_port)
{
if (ipsec_sa_is_set_IS_INBOUND (sa))
{
- ipsec_unregister_udp_port (
- clib_net_to_host_u16 (sa->udp_hdr.dst_port),
- !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
+ ipsec_unregister_udp_port (sa->udp_dst_port,
+ !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
ipsec_register_udp_port (dst_port,
!ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
}
- sa->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ sa->udp_dst_port = dst_port;
+ if (ort)
+ ort->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ }
+ if (src_port != IPSEC_UDP_PORT_NONE && src_port != (sa->udp_src_port))
+ {
+ sa->udp_src_port = src_port;
+ if (ort)
+ ort->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
}
- if (src_port != IPSEC_UDP_PORT_NONE &&
- src_port != clib_net_to_host_u16 (sa->udp_hdr.src_port))
- sa->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
}
return (0);
}
@@ -332,6 +421,9 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
{
vlib_main_t *vm = vlib_get_main ();
ipsec_main_t *im = &ipsec_main;
+ ipsec_main_crypto_alg_t *alg = im->crypto_algs + crypto_alg;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
clib_error_t *err;
ipsec_sa_t *sa;
u32 sa_index;
@@ -346,13 +438,24 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (getrandom (rand, sizeof (rand), 0) != sizeof (rand))
return VNET_API_ERROR_INIT_FAILED;
- pool_get_aligned_zero (ipsec_sa_pool, sa, CLIB_CACHE_LINE_BYTES);
+ pool_get_aligned_zero (im->sa_pool, sa, CLIB_CACHE_LINE_BYTES);
+ sa_index = sa - im->sa_pool;
+ vec_validate (im->inb_sa_runtimes, sa_index);
+ vec_validate (im->outb_sa_runtimes, sa_index);
- clib_pcg64i_srandom_r (&sa->iv_prng, rand[0], rand[1]);
+ irt = clib_mem_alloc_aligned (sizeof (ipsec_sa_inb_rt_t),
+ _Alignof (ipsec_sa_inb_rt_t));
+ ort = clib_mem_alloc_aligned (sizeof (ipsec_sa_outb_rt_t),
+ _Alignof (ipsec_sa_outb_rt_t));
+ im->inb_sa_runtimes[sa_index] = irt;
+ im->outb_sa_runtimes[sa_index] = ort;
+ clib_memset (irt, 0, sizeof (ipsec_sa_inb_rt_t));
+ clib_memset (ort, 0, sizeof (ipsec_sa_outb_rt_t));
+
+ clib_pcg64i_srandom_r (&ort->iv_prng, rand[0], rand[1]);
fib_node_init (&sa->node, FIB_NODE_TYPE_IPSEC_SA);
fib_node_lock (&sa->node);
- sa_index = sa - ipsec_sa_pool;
vlib_validate_combined_counter (&ipsec_sa_counters, sa_index);
vlib_zero_combined_counter (&ipsec_sa_counters, sa_index);
@@ -369,7 +472,11 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
sa->protocol = proto;
sa->flags = flags;
sa->salt = salt;
- sa->thread_index = (vlib_num_workers ()) ? ~0 : 0;
+ if (irt)
+ irt->thread_index = (vlib_num_workers ()) ? ~0 : 0;
+ if (ort)
+ ort->thread_index = (vlib_num_workers ()) ? ~0 : 0;
+
if (integ_alg != IPSEC_INTEG_ALG_NONE)
{
ipsec_sa_set_integ_alg (sa, integ_alg);
@@ -389,7 +496,7 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
vm, im->crypto_algs[crypto_alg].alg, (u8 *) ck->data, ck->len);
if (~0 == sa->crypto_sync_key_index)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_KEY_LENGTH;
}
}
@@ -400,17 +507,17 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
vm, im->integ_algs[integ_alg].alg, (u8 *) ik->data, ik->len);
if (~0 == sa->integ_sync_key_index)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_KEY_LENGTH;
}
}
- if (sa->crypto_async_enc_op_id && !ipsec_sa_is_set_IS_AEAD (sa))
- sa->crypto_async_key_index =
+ if (sa->crypto_async_enc_op_id && alg->is_aead == 0)
+ sa->linked_key_index =
vnet_crypto_key_add_linked (vm, sa->crypto_sync_key_index,
sa->integ_sync_key_index); // AES-CBC & HMAC
else
- sa->crypto_async_key_index = sa->crypto_sync_key_index;
+ sa->linked_key_index = ~0;
if (im->async_mode)
{
@@ -429,14 +536,14 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (err)
{
clib_warning ("%v", err->what);
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_UNIMPLEMENTED;
}
err = ipsec_call_add_del_callbacks (im, sa, sa_index, 1);
if (err)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return VNET_API_ERROR_SYSCALL_ERROR_1;
}
@@ -446,13 +553,12 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
{
- sa->tunnel_flags = sa->tunnel.t_encap_decap_flags;
rv = tunnel_resolve (&sa->tunnel, FIB_NODE_TYPE_IPSEC_SA, sa_index);
if (rv)
{
- pool_put (ipsec_sa_pool, sa);
+ pool_put (im->sa_pool, sa);
return rv;
}
ipsec_sa_stack (sa);
@@ -464,7 +570,7 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
IP_PROTOCOL_UDP :
IP_PROTOCOL_IPSEC_ESP),
- &sa->ip6_hdr);
+ &ort->ip6_hdr);
}
else
{
@@ -472,37 +578,38 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
(ipsec_sa_is_set_UDP_ENCAP (sa) ?
IP_PROTOCOL_UDP :
IP_PROTOCOL_IPSEC_ESP),
- &sa->ip4_hdr);
+ &ort->ip4_hdr);
}
}
if (ipsec_sa_is_set_UDP_ENCAP (sa))
{
if (dst_port == IPSEC_UDP_PORT_NONE)
- sa->udp_hdr.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
- else
- sa->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
-
+ dst_port = UDP_DST_PORT_ipsec;
if (src_port == IPSEC_UDP_PORT_NONE)
- sa->udp_hdr.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
- else
- sa->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
+ src_port = UDP_DST_PORT_ipsec;
+ sa->udp_dst_port = dst_port;
+ sa->udp_src_port = src_port;
+ if (ort)
+ {
+ ort->udp_hdr.src_port = clib_host_to_net_u16 (src_port);
+ ort->udp_hdr.dst_port = clib_host_to_net_u16 (dst_port);
+ }
if (ipsec_sa_is_set_IS_INBOUND (sa))
- ipsec_register_udp_port (clib_host_to_net_u16 (sa->udp_hdr.dst_port),
- !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
+ ipsec_register_udp_port (dst_port, !ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
}
/* window size rounded up to next power of 2 */
if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
{
anti_replay_window_size = 1 << max_log2 (anti_replay_window_size);
- sa->replay_window_huge =
+ irt->replay_window_huge =
clib_bitmap_set_region (0, 0, 1, anti_replay_window_size);
}
else
{
- sa->replay_window = ~0;
+ irt->replay_window = ~0;
}
hash_set (im->sa_index_by_sa_id, sa->id, sa_index);
@@ -510,6 +617,8 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto,
if (sa_out_index)
*sa_out_index = sa_index;
+ ipsec_sa_init_runtime (sa);
+
return (0);
}
@@ -519,33 +628,40 @@ ipsec_sa_del (ipsec_sa_t * sa)
vlib_main_t *vm = vlib_get_main ();
ipsec_main_t *im = &ipsec_main;
u32 sa_index;
+ ipsec_sa_inb_rt_t *irt = ipsec_sa_get_inb_rt (sa);
+ ipsec_sa_outb_rt_t *ort = ipsec_sa_get_outb_rt (sa);
- sa_index = sa - ipsec_sa_pool;
+ sa_index = sa - im->sa_pool;
hash_unset (im->sa_index_by_sa_id, sa->id);
tunnel_unresolve (&sa->tunnel);
/* no recovery possible when deleting an SA */
(void) ipsec_call_add_del_callbacks (im, sa, sa_index, 0);
- if (ipsec_sa_is_set_IS_ASYNC (sa))
- {
- if (!ipsec_sa_is_set_IS_AEAD (sa))
- vnet_crypto_key_del (vm, sa->crypto_async_key_index);
- }
+ if (sa->linked_key_index != ~0)
+ vnet_crypto_key_del (vm, sa->linked_key_index);
if (ipsec_sa_is_set_UDP_ENCAP (sa) && ipsec_sa_is_set_IS_INBOUND (sa))
- ipsec_unregister_udp_port (clib_net_to_host_u16 (sa->udp_hdr.dst_port),
+ ipsec_unregister_udp_port (sa->udp_dst_port,
!ipsec_sa_is_set_IS_TUNNEL_V6 (sa));
if (ipsec_sa_is_set_IS_TUNNEL (sa) && !ipsec_sa_is_set_IS_INBOUND (sa))
- dpo_reset (&sa->dpo);
+ dpo_reset (&ort->dpo);
if (sa->crypto_alg != IPSEC_CRYPTO_ALG_NONE)
vnet_crypto_key_del (vm, sa->crypto_sync_key_index);
if (sa->integ_alg != IPSEC_INTEG_ALG_NONE)
vnet_crypto_key_del (vm, sa->integ_sync_key_index);
if (ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
- clib_bitmap_free (sa->replay_window_huge);
- pool_put (ipsec_sa_pool, sa);
+ if (irt && irt->replay_window_huge)
+ clib_bitmap_free (irt->replay_window_huge);
+ foreach_pointer (p, irt, ort)
+ if (p)
+ clib_mem_free (p);
+
+ im->inb_sa_runtimes[sa_index] = 0;
+ im->outb_sa_runtimes[sa_index] = 0;
+
+ pool_put (im->sa_pool, sa);
}
int
@@ -554,23 +670,33 @@ ipsec_sa_bind (u32 id, u32 worker, bool bind)
ipsec_main_t *im = &ipsec_main;
uword *p;
ipsec_sa_t *sa;
+ ipsec_sa_inb_rt_t *irt;
+ ipsec_sa_outb_rt_t *ort;
+ u16 thread_index;
p = hash_get (im->sa_index_by_sa_id, id);
if (!p)
return VNET_API_ERROR_INVALID_VALUE;
sa = ipsec_sa_get (p[0]);
+ irt = ipsec_sa_get_inb_rt (sa);
+ ort = ipsec_sa_get_outb_rt (sa);
if (!bind)
{
- sa->thread_index = ~0;
- return 0;
+ thread_index = ~0;
+ goto done;
}
if (worker >= vlib_num_workers ())
return VNET_API_ERROR_INVALID_WORKER;
- sa->thread_index = vlib_get_worker_thread_index (worker);
+ thread_index = vlib_get_worker_thread_index (worker);
+done:
+ if (irt)
+ irt->thread_index = thread_index;
+ if (ort)
+ ort->thread_index = thread_index;
return 0;
}
@@ -646,9 +772,10 @@ ipsec_sa_clear (index_t sai)
void
ipsec_sa_walk (ipsec_sa_walk_cb_t cb, void *ctx)
{
+ ipsec_main_t *im = &ipsec_main;
ipsec_sa_t *sa;
- pool_foreach (sa, ipsec_sa_pool)
+ pool_foreach (sa, im->sa_pool)
{
if (WALK_CONTINUE != cb (sa, ctx))
break;
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index 640d9288a42..ce2964a9493 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -52,24 +52,6 @@ typedef enum
IPSEC_CRYPTO_N_ALG,
} __clib_packed ipsec_crypto_alg_t;
-#define IPSEC_CRYPTO_ALG_IS_NULL_GMAC(_alg) \
- ((_alg == IPSEC_CRYPTO_ALG_AES_NULL_GMAC_128) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_NULL_GMAC_192) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_NULL_GMAC_256))
-
-#define IPSEC_CRYPTO_ALG_IS_GCM(_alg) \
- (((_alg == IPSEC_CRYPTO_ALG_AES_GCM_128) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_GCM_192) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_GCM_256)))
-
-#define IPSEC_CRYPTO_ALG_IS_CTR(_alg) \
- (((_alg == IPSEC_CRYPTO_ALG_AES_CTR_128) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_CTR_192) || \
- (_alg == IPSEC_CRYPTO_ALG_AES_CTR_256)))
-
-#define IPSEC_CRYPTO_ALG_CTR_AEAD_OTHERS(_alg) \
- (_alg == IPSEC_CRYPTO_ALG_CHACHA20_POLY1305)
-
#define foreach_ipsec_integ_alg \
_ (0, NONE, "none") \
_ (1, MD5_96, "md5-96") /* RFC2403 */ \
@@ -117,11 +99,8 @@ typedef struct ipsec_key_t_
_ (16, UDP_ENCAP, "udp-encap") \
_ (32, IS_PROTECT, "Protect") \
_ (64, IS_INBOUND, "inbound") \
- _ (128, IS_AEAD, "aead") \
- _ (256, IS_CTR, "ctr") \
_ (512, IS_ASYNC, "async") \
_ (1024, NO_ALGO_NO_DROP, "no-algo-no-drop") \
- _ (2048, IS_NULL_GMAC, "null-gmac") \
_ (4096, ANTI_REPLAY_HUGE, "anti-replay-huge")
typedef enum ipsec_sad_flags_t_
@@ -165,51 +144,87 @@ typedef enum
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
-
- clib_pcg64i_random_t iv_prng;
-
+ u16 is_aead : 1;
+ u16 is_ctr : 1;
+ u16 is_null_gmac : 1;
+ u16 use_esn : 1;
+ u16 use_anti_replay : 1;
+ u16 anti_reply_huge : 1;
+ u16 is_protect : 1;
+ u16 is_tunnel : 1;
+ u16 is_transport : 1;
+ u16 is_async : 1;
+ u16 cipher_op_id;
+ u16 integ_op_id;
+ u8 cipher_iv_size;
+ u8 integ_icv_size;
+ u8 udp_sz;
+ u16 thread_index;
+ u32 salt;
+ u32 seq;
+ u32 seq_hi;
+ u16 async_op_id;
+ vnet_crypto_key_index_t cipher_key_index;
+ vnet_crypto_key_index_t integ_key_index;
union
{
u64 replay_window;
clib_bitmap_t *replay_window_huge;
};
- dpo_id_t dpo;
-
- vnet_crypto_key_index_t crypto_key_index;
- vnet_crypto_key_index_t integ_key_index;
-
- u32 spi;
- u32 seq;
- u32 seq_hi;
+} ipsec_sa_inb_rt_t;
- u16 crypto_enc_op_id;
- u16 crypto_dec_op_id;
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u16 is_aead : 1;
+ u16 is_ctr : 1;
+ u16 is_null_gmac : 1;
+ u16 is_tunnel : 1;
+ u16 is_tunnel_v6 : 1;
+ u16 udp_encap : 1;
+ u16 use_esn : 1;
+ u16 use_anti_replay : 1;
+ u16 drop_no_crypto : 1;
+ u16 is_async : 1;
+ clib_pcg64i_random_t iv_prng;
+ u16 cipher_op_id;
u16 integ_op_id;
- ipsec_sa_flags_t flags;
+ u8 cipher_iv_size;
+ u8 esp_block_align;
+ u8 integ_icv_size;
u16 thread_index;
-
- u16 integ_icv_size : 6;
- u16 crypto_iv_size : 5;
- u16 esp_block_align : 5;
-
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline1);
-
+ u32 salt;
+ u64 seq64;
+ u32 spi_be;
+ ip_dscp_t t_dscp;
+ dpo_id_t dpo;
+ tunnel_encap_decap_flags_t tunnel_flags;
+ u16 async_op_id;
+ vnet_crypto_key_index_t cipher_key_index;
+ vnet_crypto_key_index_t integ_key_index;
union
{
ip4_header_t ip4_hdr;
ip6_header_t ip6_hdr;
};
udp_header_t udp_hdr;
+} ipsec_sa_outb_rt_t;
+
+typedef struct
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+
+ u32 spi;
+
+ ipsec_sa_flags_t flags;
+
+ u16 udp_src_port;
+ u16 udp_dst_port;
/* Salt used in CTR modes (incl. GCM) - stored in network byte order */
u32 salt;
ipsec_protocol_t protocol;
- tunnel_encap_decap_flags_t tunnel_flags;
- u8 __pad[2];
-
- /* data accessed by dataplane code should be above this comment */
- CLIB_CACHE_LINE_ALIGN_MARK (cacheline2);
/* Elements with u64 size multiples */
tunnel_t tunnel;
@@ -222,7 +237,7 @@ typedef struct
vnet_crypto_alg_t crypto_calg;
u32 crypto_sync_key_index;
u32 integ_sync_key_index;
- u32 crypto_async_key_index;
+ u32 linked_key_index;
/* elements with u16 size */
u16 crypto_sync_enc_op_id;
@@ -243,13 +258,6 @@ STATIC_ASSERT (VNET_CRYPTO_N_OP_IDS < (1 << 16), "crypto ops overflow");
STATIC_ASSERT (ESP_MAX_ICV_SIZE < (1 << 6), "integer icv overflow");
STATIC_ASSERT (ESP_MAX_IV_SIZE < (1 << 5), "esp iv overflow");
STATIC_ASSERT (ESP_MAX_BLOCK_SIZE < (1 << 5), "esp alignment overflow");
-STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline1, CLIB_CACHE_LINE_BYTES);
-STATIC_ASSERT_OFFSET_OF (ipsec_sa_t, cacheline2, 2 * CLIB_CACHE_LINE_BYTES);
-
-/**
- * Pool of IPSec SAs
- */
-extern ipsec_sa_t *ipsec_sa_pool;
/*
* Ensure that the IPsec data does not overlap with the IP data in
@@ -291,6 +299,7 @@ extern void ipsec_mk_key (ipsec_key_t *key, const u8 *data, u8 len);
extern int ipsec_sa_update (u32 id, u16 src_port, u16 dst_port,
const tunnel_t *tun, bool is_tun);
+extern void ipsec_sa_update_runtime (ipsec_sa_t *sa);
extern int ipsec_sa_add_and_lock (
u32 id, u32 spi, ipsec_protocol_t proto, ipsec_crypto_alg_t crypto_alg,
const ipsec_key_t *ck, ipsec_integ_alg_t integ_alg, const ipsec_key_t *ik,
@@ -327,29 +336,29 @@ extern uword unformat_ipsec_key (unformat_input_t *input, va_list *args);
* Anti Replay definitions
*/
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE(_sa) \
- (u32) (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (_sa)) ? \
- clib_bitmap_bytes (_sa->replay_window_huge) * 8 : \
- BITS (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE(_irt) \
+ (u32) (PREDICT_FALSE (_irt->anti_reply_huge) ? \
+ clib_bitmap_bytes (_irt->replay_window_huge) * 8 : \
+ BITS (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN(_sa, _is_huge) \
- (u32) (_is_huge ? clib_bitmap_bytes (_sa->replay_window_huge) * 8 : \
- BITS (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN(_irt, _is_huge) \
+ (u32) (_is_huge ? clib_bitmap_bytes (_irt->replay_window_huge) * 8 : \
+ BITS (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN(_sa) \
- (u64) (PREDICT_FALSE (ipsec_sa_is_set_ANTI_REPLAY_HUGE (_sa)) ? \
- clib_bitmap_count_set_bits (_sa->replay_window_huge) : \
- count_set_bits (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN(_irt) \
+ (u64) (PREDICT_FALSE (_irt->anti_reply_huge) ? \
+ clib_bitmap_count_set_bits (_irt->replay_window_huge) : \
+ count_set_bits (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN(_sa, _is_huge) \
- (u64) (_is_huge ? clib_bitmap_count_set_bits (_sa->replay_window_huge) : \
- count_set_bits (_sa->replay_window))
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN(_irt, _is_huge) \
+ (u64) (_is_huge ? clib_bitmap_count_set_bits (_irt->replay_window_huge) : \
+ count_set_bits (_irt->replay_window))
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX(_sa) \
- (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_sa) - 1)
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX(_irt) \
+ (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_irt) - 1)
-#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX_KNOWN_WIN(_sa, _is_huge) \
- (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_sa, _is_huge) - 1)
+#define IPSEC_SA_ANTI_REPLAY_WINDOW_MAX_INDEX_KNOWN_WIN(_irt, _is_huge) \
+ (u32) (IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (_irt, _is_huge) - 1)
/*
* sequence number less than the lower bound are outside of the window
@@ -364,23 +373,23 @@ extern uword unformat_ipsec_key (unformat_input_t *input, va_list *args);
IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (_sa, _is_huge) + 1)
always_inline u64
-ipsec_sa_anti_replay_get_64b_window (const ipsec_sa_t *sa)
+ipsec_sa_anti_replay_get_64b_window (const ipsec_sa_inb_rt_t *irt)
{
- if (!ipsec_sa_is_set_ANTI_REPLAY_HUGE (sa))
- return sa->replay_window;
+ if (!irt->anti_reply_huge)
+ return irt->replay_window;
u64 w;
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (sa);
- u32 tl_win_index = sa->seq & (window_size - 1);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE (irt);
+ u32 tl_win_index = irt->seq & (window_size - 1);
if (PREDICT_TRUE (tl_win_index >= 63))
- return clib_bitmap_get_multiple (sa->replay_window_huge, tl_win_index - 63,
- 64);
+ return clib_bitmap_get_multiple (irt->replay_window_huge,
+ tl_win_index - 63, 64);
- w = clib_bitmap_get_multiple_no_check (sa->replay_window_huge, 0,
+ w = clib_bitmap_get_multiple_no_check (irt->replay_window_huge, 0,
tl_win_index + 1)
<< (63 - tl_win_index);
- w |= clib_bitmap_get_multiple_no_check (sa->replay_window_huge,
+ w |= clib_bitmap_get_multiple_no_check (irt->replay_window_huge,
window_size - 63 + tl_win_index,
63 - tl_win_index);
@@ -388,18 +397,19 @@ ipsec_sa_anti_replay_get_64b_window (const ipsec_sa_t *sa)
}
always_inline int
-ipsec_sa_anti_replay_check (const ipsec_sa_t *sa, u32 seq, bool ar_huge)
+ipsec_sa_anti_replay_check (const ipsec_sa_inb_rt_t *irt, u32 seq,
+ bool ar_huge)
{
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
/* we assume that the packet is in the window.
* if the packet falls left (sa->seq - seq >= window size),
* the result is wrong */
if (ar_huge)
- return clib_bitmap_get (sa->replay_window_huge, seq & (window_size - 1));
+ return clib_bitmap_get (irt->replay_window_huge, seq & (window_size - 1));
else
- return (sa->replay_window >> (window_size + seq - sa->seq - 1)) & 1;
+ return (irt->replay_window >> (window_size + seq - irt->seq - 1)) & 1;
return 0;
}
@@ -419,36 +429,36 @@ ipsec_sa_anti_replay_check (const ipsec_sa_t *sa, u32 seq, bool ar_huge)
* the high sequence number is set.
*/
always_inline int
-ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
+ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_inb_rt_t *irt, u32 seq,
u32 hi_seq_used, bool post_decrypt,
u32 *hi_seq_req, bool ar_huge)
{
ASSERT ((post_decrypt == false) == (hi_seq_req != 0));
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
u32 window_lower_bound =
- IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND_KNOWN_WIN (sa, ar_huge);
+ IPSEC_SA_ANTI_REPLAY_WINDOW_LOWER_BOUND_KNOWN_WIN (irt, ar_huge);
- if (!ipsec_sa_is_set_USE_ESN (sa))
+ if (!irt->use_esn)
{
if (hi_seq_req)
/* no ESN, therefore the hi-seq is always 0 */
*hi_seq_req = 0;
- if (!ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
+ if (!irt->use_anti_replay)
return 0;
- if (PREDICT_TRUE (seq > sa->seq))
+ if (PREDICT_TRUE (seq > irt->seq))
return 0;
/* does the packet fall out on the left of the window */
- if (sa->seq >= seq + window_size)
+ if (irt->seq >= seq + window_size)
return 1;
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
}
- if (!ipsec_sa_is_set_USE_ANTI_REPLAY (sa))
+ if (!irt->use_anti_replay)
{
/* there's no AR configured for this SA, but in order
* to know whether a packet has wrapped the hi ESN we need
@@ -463,20 +473,20 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
*/
if (hi_seq_req)
{
- if (seq >= sa->seq)
+ if (seq >= irt->seq)
/* The packet's sequence number is larger that the SA's.
* that can't be a warp - unless we lost more than
* 2^32 packets ... how could we know? */
- *hi_seq_req = sa->seq_hi;
+ *hi_seq_req = irt->seq_hi;
else
{
/* The packet's SN is less than the SAs, so either the SN has
* wrapped or the SN is just old. */
- if (sa->seq - seq > (1 << 30))
+ if (irt->seq - seq > (1 << 30))
/* It's really really really old => it wrapped */
- *hi_seq_req = sa->seq_hi + 1;
+ *hi_seq_req = irt->seq_hi + 1;
else
- *hi_seq_req = sa->seq_hi;
+ *hi_seq_req = irt->seq_hi;
}
}
/*
@@ -486,7 +496,7 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
return 0;
}
- if (PREDICT_TRUE (window_size > 0 && sa->seq >= window_size - 1))
+ if (PREDICT_TRUE (window_size > 0 && irt->seq >= window_size - 1))
{
/*
* the last sequence number VPP received is more than one
@@ -503,7 +513,7 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
*/
if (post_decrypt)
{
- if (hi_seq_used == sa->seq_hi)
+ if (hi_seq_used == irt->seq_hi)
/* the high sequence number used to succesfully decrypt this
* packet is the same as the last-sequence number of the SA.
* that means this packet did not cause a wrap.
@@ -520,7 +530,7 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
/* pre-decrypt it might be the packet that causes a wrap, we
* need to decrypt it to find out */
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi + 1;
+ *hi_seq_req = irt->seq_hi + 1;
return 0;
}
}
@@ -531,13 +541,13 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
* end of the window.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi;
- if (seq <= sa->seq)
+ *hi_seq_req = irt->seq_hi;
+ if (seq <= irt->seq)
/*
* The received seq number is within bounds of the window
* check if it's a duplicate
*/
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
else
/*
* The received sequence number is greater than the window
@@ -562,15 +572,15 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
/*
* the sequence number is less than the lower bound.
*/
- if (seq <= sa->seq)
+ if (seq <= irt->seq)
{
/*
* the packet is within the window upper bound.
* check for duplicates.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi;
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ *hi_seq_req = irt->seq_hi;
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
}
else
{
@@ -584,7 +594,7 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
* we've lost close to 2^32 packets.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi;
+ *hi_seq_req = irt->seq_hi;
return 0;
}
}
@@ -597,8 +607,8 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
* received packet, the SA has moved on to a higher sequence number.
*/
if (hi_seq_req)
- *hi_seq_req = sa->seq_hi - 1;
- return ipsec_sa_anti_replay_check (sa, seq, ar_huge);
+ *hi_seq_req = irt->seq_hi - 1;
+ return ipsec_sa_anti_replay_check (irt, seq, ar_huge);
}
}
@@ -608,19 +618,20 @@ ipsec_sa_anti_replay_and_sn_advance (const ipsec_sa_t *sa, u32 seq,
}
always_inline u32
-ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
+ipsec_sa_anti_replay_window_shift (ipsec_sa_inb_rt_t *irt, u32 inc,
+ bool ar_huge)
{
u32 n_lost = 0;
u32 seen = 0;
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
if (inc < window_size)
{
if (ar_huge)
{
/* the number of packets we saw in this section of the window */
- clib_bitmap_t *window = sa->replay_window_huge;
- u32 window_lower_bound = (sa->seq + 1) & (window_size - 1);
+ clib_bitmap_t *window = irt->replay_window_huge;
+ u32 window_lower_bound = (irt->seq + 1) & (window_size - 1);
u32 window_next_lower_bound =
(window_lower_bound + inc) & (window_size - 1);
@@ -706,7 +717,7 @@ ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
}
clib_bitmap_set_no_check (window,
- (sa->seq + inc) & (window_size - 1), 1);
+ (irt->seq + inc) & (window_size - 1), 1);
}
else
{
@@ -715,11 +726,11 @@ ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
* of the window that we will right shift of the end
* as a result of this increments
*/
- u64 old = sa->replay_window & pow2_mask (inc);
+ u64 old = irt->replay_window & pow2_mask (inc);
/* the number of packets we saw in this section of the window */
seen = count_set_bits (old);
- sa->replay_window =
- ((sa->replay_window) >> inc) | (1ULL << (window_size - 1));
+ irt->replay_window =
+ ((irt->replay_window) >> inc) | (1ULL << (window_size - 1));
}
/*
@@ -732,7 +743,7 @@ ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
{
/* holes in the replay window are lost packets */
n_lost = window_size -
- IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN (sa, ar_huge);
+ IPSEC_SA_ANTI_REPLAY_WINDOW_N_SEEN_KNOWN_WIN (irt, ar_huge);
/* any sequence numbers that now fall outside the window
* are forever lost */
@@ -740,13 +751,13 @@ ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
if (PREDICT_FALSE (ar_huge))
{
- clib_bitmap_zero (sa->replay_window_huge);
- clib_bitmap_set_no_check (sa->replay_window_huge,
- (sa->seq + inc) & (window_size - 1), 1);
+ clib_bitmap_zero (irt->replay_window_huge);
+ clib_bitmap_set_no_check (irt->replay_window_huge,
+ (irt->seq + inc) & (window_size - 1), 1);
}
else
{
- sa->replay_window = 1ULL << (window_size - 1);
+ irt->replay_window = 1ULL << (window_size - 1);
}
}
@@ -763,65 +774,65 @@ ipsec_sa_anti_replay_window_shift (ipsec_sa_t *sa, u32 inc, bool ar_huge)
* the branch cost.
*/
always_inline u64
-ipsec_sa_anti_replay_advance (ipsec_sa_t *sa, u32 thread_index, u32 seq,
- u32 hi_seq, bool ar_huge)
+ipsec_sa_anti_replay_advance (ipsec_sa_inb_rt_t *irt, u32 thread_index,
+ u32 seq, u32 hi_seq, bool ar_huge)
{
u64 n_lost = 0;
- u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (sa, ar_huge);
+ u32 window_size = IPSEC_SA_ANTI_REPLAY_WINDOW_SIZE_KNOWN_WIN (irt, ar_huge);
u32 pos;
- if (ipsec_sa_is_set_USE_ESN (sa))
+ if (irt->use_esn)
{
- int wrap = hi_seq - sa->seq_hi;
+ int wrap = hi_seq - irt->seq_hi;
- if (wrap == 0 && seq > sa->seq)
+ if (wrap == 0 && seq > irt->seq)
{
- pos = seq - sa->seq;
- n_lost = ipsec_sa_anti_replay_window_shift (sa, pos, ar_huge);
- sa->seq = seq;
+ pos = seq - irt->seq;
+ n_lost = ipsec_sa_anti_replay_window_shift (irt, pos, ar_huge);
+ irt->seq = seq;
}
else if (wrap > 0)
{
- pos = seq + ~sa->seq + 1;
- n_lost = ipsec_sa_anti_replay_window_shift (sa, pos, ar_huge);
- sa->seq = seq;
- sa->seq_hi = hi_seq;
+ pos = seq + ~irt->seq + 1;
+ n_lost = ipsec_sa_anti_replay_window_shift (irt, pos, ar_huge);
+ irt->seq = seq;
+ irt->seq_hi = hi_seq;
}
else if (wrap < 0)
{
- pos = ~seq + sa->seq + 1;
+ pos = ~seq + irt->seq + 1;
if (ar_huge)
- clib_bitmap_set_no_check (sa->replay_window_huge,
+ clib_bitmap_set_no_check (irt->replay_window_huge,
seq & (window_size - 1), 1);
else
- sa->replay_window |= (1ULL << (window_size - 1 - pos));
+ irt->replay_window |= (1ULL << (window_size - 1 - pos));
}
else
{
- pos = sa->seq - seq;
+ pos = irt->seq - seq;
if (ar_huge)
- clib_bitmap_set_no_check (sa->replay_window_huge,
+ clib_bitmap_set_no_check (irt->replay_window_huge,
seq & (window_size - 1), 1);
else
- sa->replay_window |= (1ULL << (window_size - 1 - pos));
+ irt->replay_window |= (1ULL << (window_size - 1 - pos));
}
}
else
{
- if (seq > sa->seq)
+ if (seq > irt->seq)
{
- pos = seq - sa->seq;
- n_lost = ipsec_sa_anti_replay_window_shift (sa, pos, ar_huge);
- sa->seq = seq;
+ pos = seq - irt->seq;
+ n_lost = ipsec_sa_anti_replay_window_shift (irt, pos, ar_huge);
+ irt->seq = seq;
}
else
{
- pos = sa->seq - seq;
+ pos = irt->seq - seq;
if (ar_huge)
- clib_bitmap_set_no_check (sa->replay_window_huge,
+ clib_bitmap_set_no_check (irt->replay_window_huge,
seq & (window_size - 1), 1);
else
- sa->replay_window |= (1ULL << (window_size - 1 - pos));
+ irt->replay_window |= (1ULL << (window_size - 1 - pos));
}
}
@@ -840,12 +851,6 @@ ipsec_sa_assign_thread (u16 thread_id)
: (unix_time_now_nsec () % vlib_num_workers ()) + 1);
}
-always_inline ipsec_sa_t *
-ipsec_sa_get (u32 sa_index)
-{
- return (pool_elt_at_index (ipsec_sa_pool, sa_index));
-}
-
#endif /* __IPSEC_SPD_SA_H__ */
/*
diff --git a/src/vnet/ipsec/ipsec_tun.c b/src/vnet/ipsec/ipsec_tun.c
index 5fb07b3ba09..28702bdec47 100644
--- a/src/vnet/ipsec/ipsec_tun.c
+++ b/src/vnet/ipsec/ipsec_tun.c
@@ -470,6 +470,7 @@ ipsec_tun_protect_set_crypto_addr (ipsec_tun_protect_t * itp)
if (!(itp->itp_flags & IPSEC_PROTECT_ITF))
{
ipsec_sa_set_IS_PROTECT (sa);
+ ipsec_sa_update_runtime (sa);
itp->itp_flags |= IPSEC_PROTECT_ENCAPED;
}
}
@@ -497,7 +498,11 @@ ipsec_tun_protect_config (ipsec_main_t * im,
ipsec_sa_lock (itp->itp_out_sa);
if (itp->itp_flags & IPSEC_PROTECT_ITF)
- ipsec_sa_set_NO_ALGO_NO_DROP (ipsec_sa_get (itp->itp_out_sa));
+ {
+ ipsec_sa_t *sa = ipsec_sa_get (itp->itp_out_sa);
+ ipsec_sa_set_NO_ALGO_NO_DROP (sa);
+ ipsec_sa_update_runtime (sa);
+ }
FOR_EACH_IPSEC_PROTECT_INPUT_SAI(itp, sai,
({
@@ -523,12 +528,16 @@ ipsec_tun_protect_unconfig (ipsec_main_t * im, ipsec_tun_protect_t * itp)
FOR_EACH_IPSEC_PROTECT_INPUT_SA(itp, sa,
({
ipsec_sa_unset_IS_PROTECT (sa);
+ ipsec_sa_update_runtime (sa);
}));
ipsec_tun_protect_rx_db_remove (im, itp);
ipsec_tun_protect_tx_db_remove (itp);
- ipsec_sa_unset_NO_ALGO_NO_DROP (ipsec_sa_get (itp->itp_out_sa));
+ sa = ipsec_sa_get (itp->itp_out_sa);
+ ipsec_sa_unset_NO_ALGO_NO_DROP (sa);
+ ipsec_sa_update_runtime (sa);
+
ipsec_sa_unlock(itp->itp_out_sa);
FOR_EACH_IPSEC_PROTECT_INPUT_SAI(itp, sai,
diff --git a/src/vnet/ipsec/main.c b/src/vnet/ipsec/main.c
index e17d1dc5cfe..0a01797e066 100644
--- a/src/vnet/ipsec/main.c
+++ b/src/vnet/ipsec/main.c
@@ -61,6 +61,7 @@ ipsec_main_t ipsec_main = {
.alg = VNET_CRYPTO_ALG_AES_128_CTR,
.iv_size = 8,
.block_align = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_AES_CTR_192] = {
@@ -69,6 +70,7 @@ ipsec_main_t ipsec_main = {
.alg = VNET_CRYPTO_ALG_AES_192_CTR,
.iv_size = 8,
.block_align = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_AES_CTR_256] = {
@@ -77,6 +79,7 @@ ipsec_main_t ipsec_main = {
.alg = VNET_CRYPTO_ALG_AES_256_CTR,
.iv_size = 8,
.block_align = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_AES_GCM_128] = {
@@ -86,6 +89,8 @@ ipsec_main_t ipsec_main = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_aead = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_AES_GCM_192] = {
@@ -95,6 +100,8 @@ ipsec_main_t ipsec_main = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_aead = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_AES_GCM_256] = {
@@ -104,6 +111,8 @@ ipsec_main_t ipsec_main = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_aead = 1,
+ .is_ctr = 1,
},
[IPSEC_CRYPTO_ALG_CHACHA20_POLY1305] = {
@@ -112,6 +121,8 @@ ipsec_main_t ipsec_main = {
.alg = VNET_CRYPTO_ALG_CHACHA20_POLY1305,
.iv_size = 8,
.icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
},
[IPSEC_CRYPTO_ALG_AES_NULL_GMAC_128] = {
@@ -121,6 +132,9 @@ ipsec_main_t ipsec_main = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
+ .is_null_gmac = 1,
},
[IPSEC_CRYPTO_ALG_AES_NULL_GMAC_192] = {
@@ -130,6 +144,9 @@ ipsec_main_t ipsec_main = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
+ .is_null_gmac = 1,
},
[IPSEC_CRYPTO_ALG_AES_NULL_GMAC_256] = {
@@ -139,6 +156,9 @@ ipsec_main_t ipsec_main = {
.iv_size = 8,
.block_align = 1,
.icv_size = 16,
+ .is_ctr = 1,
+ .is_aead = 1,
+ .is_null_gmac = 1,
},
},
.integ_algs = {
diff --git a/src/vnet/l2/l2_input_node.c b/src/vnet/l2/l2_input_node.c
index 76b94809eb3..58a541756da 100644
--- a/src/vnet/l2/l2_input_node.c
+++ b/src/vnet/l2/l2_input_node.c
@@ -215,7 +215,10 @@ classify_and_dispatch (l2input_main_t * msm, vlib_buffer_t * b0, u16 * next0)
vnet_buffer (b0)->sw_if_index[VLIB_TX] = config->output_sw_if_index;
}
else
- feat_mask = L2INPUT_FEAT_DROP;
+ {
+ *next0 = L2INPUT_NEXT_DROP;
+ return;
+ }
/* mask out features from bitmap using packet type and bd config */
u32 feature_bitmap = config->feature_bitmap & feat_mask;
diff --git a/src/vnet/qos/qos_store.c b/src/vnet/qos/qos_store.c
index 3424a914e35..8875585f199 100644
--- a/src/vnet/qos/qos_store.c
+++ b/src/vnet/qos/qos_store.c
@@ -181,7 +181,7 @@ qos_store_cli (vlib_main_t * vm,
enable = 1;
else if (unformat (input, "disable"))
enable = 0;
- else if (unformat (input, "value &d", &value))
+ else if (unformat (input, "value %d", &value))
;
else
break;
diff --git a/src/vpp/conf/80-vpp.conf b/src/vpp/conf/80-vpp.conf
index 2207e2e3824..33230236eb4 100644
--- a/src/vpp/conf/80-vpp.conf
+++ b/src/vpp/conf/80-vpp.conf
@@ -1,8 +1,13 @@
# Number of 2MB hugepages desired
vm.nr_hugepages=1024
-# Must be greater than or equal to (2 * vm.nr_hugepages).
-vm.max_map_count=3096
+# The vm max_map_count must be greater than or equal to (2 * vm.nr_hugepages).
+
+# The system default is often an order of magnitude greater than the
+# value below. If you uncomment this stanza and reboot as-is, watch
+# out for seemingly "random" severe application failures; known to
+# occur in Brave, Firefox, and VirtualBox to name but a few.
+# vm.max_map_count=3096
# All groups allowed to access hugepages
vm.hugetlb_shm_group=0
@@ -12,4 +17,6 @@ vm.hugetlb_shm_group=0
# If the existing kernel.shmmax setting (cat /proc/sys/kernel/shmmax)
# is greater than the calculated TotalHugepageSize then set this parameter
# to current shmmax value.
-kernel.shmmax=2147483648
+# Linux default is 4278190079, you don't need to change it unless you
+# configure more than 2039 2MB hugepages
+# kernel.shmmax=2147483648
diff --git a/src/vppinfra/clib.h b/src/vppinfra/clib.h
index 5348738ec6a..cb90da5c1e0 100644
--- a/src/vppinfra/clib.h
+++ b/src/vppinfra/clib.h
@@ -39,6 +39,7 @@
#define included_clib_h
#include <stddef.h>
+#include <stdalign.h>
#if __has_include(<vppinfra/config.h>)
#include <vppinfra/config.h>