aboutsummaryrefslogtreecommitdiffstats
path: root/src/vnet/ipsec
diff options
context:
space:
mode:
authorNeale Ranns <nranns@cisco.com>2019-02-07 07:26:12 -0800
committerDamjan Marion <dmarion@me.com>2019-06-18 13:54:35 +0000
commitc87b66c86201458c0475d50c6e93f1497f9eec2e (patch)
tree57bf69c2adb85a93b26a86b5a1110e4290e7f391 /src/vnet/ipsec
parent097fa66b986f06281f603767d321ab13ab6c88c3 (diff)
ipsec: ipsec-tun protect
please consult the new tunnel proposal at: https://wiki.fd.io/view/VPP/IPSec Type: feature Change-Id: I52857fc92ae068b85f59be08bdbea1bd5932e291 Signed-off-by: Neale Ranns <nranns@cisco.com>
Diffstat (limited to 'src/vnet/ipsec')
-rw-r--r--src/vnet/ipsec/ah_decrypt.c7
-rw-r--r--src/vnet/ipsec/esp_decrypt.c120
-rw-r--r--src/vnet/ipsec/esp_encrypt.c26
-rw-r--r--src/vnet/ipsec/ipsec.api76
-rw-r--r--src/vnet/ipsec/ipsec.h2
-rw-r--r--src/vnet/ipsec/ipsec_api.c139
-rw-r--r--src/vnet/ipsec/ipsec_cli.c125
-rw-r--r--src/vnet/ipsec/ipsec_format.c35
-rw-r--r--src/vnet/ipsec/ipsec_if.c84
-rw-r--r--src/vnet/ipsec/ipsec_if.h13
-rw-r--r--src/vnet/ipsec/ipsec_io.h2
-rw-r--r--src/vnet/ipsec/ipsec_sa.c25
-rw-r--r--src/vnet/ipsec/ipsec_sa.h10
-rw-r--r--src/vnet/ipsec/ipsec_tun.c398
-rw-r--r--src/vnet/ipsec/ipsec_tun.h114
-rw-r--r--src/vnet/ipsec/ipsec_tun_in.c436
16 files changed, 1481 insertions, 131 deletions
diff --git a/src/vnet/ipsec/ah_decrypt.c b/src/vnet/ipsec/ah_decrypt.c
index d27d42384d5..741fa91b95c 100644
--- a/src/vnet/ipsec/ah_decrypt.c
+++ b/src/vnet/ipsec/ah_decrypt.c
@@ -27,8 +27,7 @@
#define foreach_ah_decrypt_next \
_ (DROP, "error-drop") \
_ (IP4_INPUT, "ip4-input") \
- _ (IP6_INPUT, "ip6-input") \
- _ (IPSEC_GRE_INPUT, "ipsec-gre-input")
+ _ (IP6_INPUT, "ip6-input")
#define _(v, s) AH_DECRYPT_NEXT_##v,
typedef enum
@@ -371,10 +370,6 @@ ah_decrypt_inline (vlib_main_t * vm,
}
}
- /* for IPSec-GRE tunnel next node is ipsec-gre-input */
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_GRE (sa0)))
- next[0] = AH_DECRYPT_NEXT_IPSEC_GRE_INPUT;
-
vnet_buffer (b[0])->sw_if_index[VLIB_TX] = (u32) ~ 0;
trace:
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
diff --git a/src/vnet/ipsec/esp_decrypt.c b/src/vnet/ipsec/esp_decrypt.c
index 710c8f17762..48f08f42e33 100644
--- a/src/vnet/ipsec/esp_decrypt.c
+++ b/src/vnet/ipsec/esp_decrypt.c
@@ -22,12 +22,12 @@
#include <vnet/ipsec/ipsec.h>
#include <vnet/ipsec/esp.h>
#include <vnet/ipsec/ipsec_io.h>
+#include <vnet/ipsec/ipsec_tun.h>
#define foreach_esp_decrypt_next \
_(DROP, "error-drop") \
_(IP4_INPUT, "ip4-input-no-checksum") \
-_(IP6_INPUT, "ip6-input") \
-_(IPSEC_GRE_INPUT, "ipsec-gre-input")
+_(IP6_INPUT, "ip6-input")
#define _(v, s) ESP_DECRYPT_NEXT_##v,
typedef enum
@@ -93,7 +93,7 @@ typedef struct
{
u8 icv_sz;
u8 iv_sz;
- ipsec_sa_flags_t flags:8;
+ ipsec_sa_flags_t flags;
u32 sa_index;
};
u64 sa_data;
@@ -111,7 +111,7 @@ STATIC_ASSERT_SIZEOF (esp_decrypt_packet_data_t, 2 * sizeof (u64));
always_inline uword
esp_decrypt_inline (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * from_frame,
- int is_ip6)
+ int is_ip6, int is_tun)
{
ipsec_main_t *im = &ipsec_main;
u32 thread_index = vm->thread_index;
@@ -378,7 +378,7 @@ esp_decrypt_inline (vlib_main_t * vm,
u16 adv = pd->iv_sz + esp_sz;
u16 tail = sizeof (esp_footer_t) + f->pad_length + pd->icv_sz;
- if ((pd->flags & tun_flags) == 0) /* transport mode */
+ if ((pd->flags & tun_flags) == 0 && !is_tun) /* transport mode */
{
u8 udp_sz = (is_ip6 == 0 && pd->flags & IPSEC_SA_FLAG_UDP_ENCAP) ?
sizeof (udp_header_t) : 0;
@@ -437,12 +437,50 @@ esp_decrypt_inline (vlib_main_t * vm,
{
next[0] = ESP_DECRYPT_NEXT_DROP;
b[0]->error = node->errors[ESP_DECRYPT_ERROR_DECRYPTION_FAILED];
+ goto trace;
+ }
+ if (is_tun)
+ {
+ if (ipsec_sa_is_set_IS_PROTECT (sa0))
+ {
+ /*
+ * Check that the reveal IP header matches that
+ * of the tunnel we are protecting
+ */
+ const ipsec_tun_protect_t *itp;
+
+ itp =
+ ipsec_tun_protect_get (vnet_buffer (b[0])->
+ ipsec.protect_index);
+ if (PREDICT_TRUE (f->next_header == IP_PROTOCOL_IP_IN_IP))
+ {
+ const ip4_header_t *ip4;
+
+ ip4 = vlib_buffer_get_current (b[0]);
+
+ if (!ip46_address_is_equal_v4 (&itp->itp_tun.src,
+ &ip4->dst_address) ||
+ !ip46_address_is_equal_v4 (&itp->itp_tun.dst,
+ &ip4->src_address))
+ next[0] = ESP_DECRYPT_NEXT_DROP;
+
+ }
+ else if (f->next_header == IP_PROTOCOL_IPV6)
+ {
+ const ip6_header_t *ip6;
+
+ ip6 = vlib_buffer_get_current (b[0]);
+
+ if (!ip46_address_is_equal_v6 (&itp->itp_tun.src,
+ &ip6->dst_address) ||
+ !ip46_address_is_equal_v6 (&itp->itp_tun.dst,
+ &ip6->src_address))
+ next[0] = ESP_DECRYPT_NEXT_DROP;
+ }
+ }
}
}
- if (PREDICT_FALSE (ipsec_sa_is_set_IS_GRE (sa0)))
- next[0] = ESP_DECRYPT_NEXT_IPSEC_GRE_INPUT;
-
trace:
if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
@@ -477,7 +515,28 @@ VLIB_NODE_FN (esp4_decrypt_node) (vlib_main_t * vm,
vlib_node_runtime_t * node,
vlib_frame_t * from_frame)
{
- return esp_decrypt_inline (vm, node, from_frame, 0 /* is_ip6 */ );
+ return esp_decrypt_inline (vm, node, from_frame, 0, 0);
+}
+
+VLIB_NODE_FN (esp4_decrypt_tun_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_decrypt_inline (vm, node, from_frame, 0, 1);
+}
+
+VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_decrypt_inline (vm, node, from_frame, 1, 0);
+}
+
+VLIB_NODE_FN (esp6_decrypt_tun_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return esp_decrypt_inline (vm, node, from_frame, 1, 1);
}
/* *INDENT-OFF* */
@@ -497,16 +556,7 @@ VLIB_REGISTER_NODE (esp4_decrypt_node) = {
#undef _
},
};
-/* *INDENT-ON* */
-VLIB_NODE_FN (esp6_decrypt_node) (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
-{
- return esp_decrypt_inline (vm, node, from_frame, 1 /* is_ip6 */ );
-}
-
-/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp6_decrypt_node) = {
.name = "esp6-decrypt",
.vector_size = sizeof (u32),
@@ -523,6 +573,40 @@ VLIB_REGISTER_NODE (esp6_decrypt_node) = {
#undef _
},
};
+
+VLIB_REGISTER_NODE (esp4_decrypt_tun_node) = {
+ .name = "esp4-decrypt-tun",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_decrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
+ .error_strings = esp_decrypt_error_strings,
+
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
+ foreach_esp_decrypt_next
+#undef _
+ },
+};
+
+VLIB_REGISTER_NODE (esp6_decrypt_tun_node) = {
+ .name = "esp6-decrypt-tun",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_decrypt_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
+ .error_strings = esp_decrypt_error_strings,
+
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
+ foreach_esp_decrypt_next
+#undef _
+ },
+};
/* *INDENT-ON* */
/*
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index d7cda052c92..cf485482c0e 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -408,12 +408,18 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
ip_hdr = payload - hdr_len;
/* L2 header */
- l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
- hdr_len += l2_len;
- l2_hdr = payload - hdr_len;
+ if (!is_tun)
+ {
+ l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
+ hdr_len += l2_len;
+ l2_hdr = payload - hdr_len;
+
+ /* copy l2 and ip header */
+ clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
+ }
+ else
+ l2_len = 0;
- /* copy l2 and ip header */
- clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
if (is_ip6)
@@ -440,7 +446,8 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
}
- next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ if (!is_tun)
+ next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
}
esp->spi = spi;
@@ -618,6 +625,13 @@ VNET_FEATURE_INIT (esp4_encrypt_tun_feat_node, static) =
.node_name = "esp4-encrypt-tun",
.runs_before = VNET_FEATURES ("adj-midchain-tx"),
};
+
+VNET_FEATURE_INIT (esp4_ethernet_encrypt_tun_feat_node, static) =
+{
+ .arc_name = "ethernet-output",
+ .node_name = "esp4-encrypt-tun",
+ .runs_before = VNET_FEATURES ("adj-midchain-tx", "adj-midchain-tx-no-count"),
+};
/* *INDENT-ON* */
VLIB_NODE_FN (esp6_encrypt_tun_node) (vlib_main_t * vm,
diff --git a/src/vnet/ipsec/ipsec.api b/src/vnet/ipsec/ipsec.api
index bb9e8056251..12bdad0f9c3 100644
--- a/src/vnet/ipsec/ipsec.api
+++ b/src/vnet/ipsec/ipsec.api
@@ -17,6 +17,7 @@
option version = "3.0.0";
import "vnet/ip/ip_types.api";
+import "vnet/interface_types.api";
/** \brief IPsec: Add/delete Security Policy Database
@param client_index - opaque cookie to identify the sender
@@ -305,6 +306,81 @@ define ipsec_sad_entry_add_del_reply
u32 stat_index;
};
+/** \brief Add or Update Protection for a tunnel with IPSEC
+
+ Tunnel protection directly associates an SA with all packets
+ ingress and egress on the tunnel. This could also be achieved by
+ assigning an SPD to the tunnel, but that would incur an unnessccary
+ SPD entry lookup.
+
+ For tunnels the ESP acts on the post-encapsulated packet. So if this
+ packet:
+ +---------+------+
+ | Payload | O-IP |
+ +---------+------+
+ where O-IP is the overlay IP addrees that was routed into the tunnel,
+ the resulting encapsulated packet will be:
+ +---------+------+------+
+ | Payload | O-IP | T-IP |
+ +---------+------+------+
+ where T-IP is the tunnel's src.dst IP addresses.
+ If the SAs used for protection are in transport mode then the ESP is
+ inserted before T-IP, i.e.:
+ +---------+------+-----+------+
+ | Payload | O-IP | ESP | T-IP |
+ +---------+------+-----+------+
+ If the SAs used for protection are in tunnel mode then another
+ encapsulation occurs, i.e.:
+ +---------+------+------+-----+------+
+ | Payload | O-IP | T-IP | ESP | C-IP |
+ +---------+------+------+-----+------+
+ where C-IP are the crypto endpoint IP addresses defined as the tunnel
+ endpoints in the SA.
+ The mode for the inbound and outbound SA must be the same.
+
+ @param client_index - opaque cookie to identify the sender
+ @param context - sender context, to match reply w/ request
+ @param sw_id_index - Tunnel interface to protect
+ @param sa_in - The ID [set] of inbound SAs
+ @param sa_out - The ID of outbound SA
+*/
+typedef ipsec_tunnel_protect
+{
+ vl_api_interface_index_t sw_if_index;
+ u32 sa_out;
+ u8 n_sa_in;
+ u32 sa_in[n_sa_in];
+};
+
+autoreply define ipsec_tunnel_protect_update
+{
+ u32 client_index;
+ u32 context;
+
+ vl_api_ipsec_tunnel_protect_t tunnel;
+};
+
+autoreply define ipsec_tunnel_protect_del
+{
+ u32 client_index;
+ u32 context;
+
+ vl_api_interface_index_t sw_if_index;
+};
+
+define ipsec_tunnel_protect_dump
+{
+ u32 client_index;
+ u32 context;
+ vl_api_interface_index_t sw_if_index;
+};
+
+define ipsec_tunnel_protect_details
+{
+ u32 context;
+ vl_api_ipsec_tunnel_protect_t tun;
+};
+
/** \brief IPsec: Get SPD interfaces
@param client_index - opaque cookie to identify the sender
@param context - sender context, to match reply w/ request
diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h
index 45576b3c779..c77d0fe7dd8 100644
--- a/src/vnet/ipsec/ipsec.h
+++ b/src/vnet/ipsec/ipsec.h
@@ -115,6 +115,8 @@ typedef struct
uword *ipsec6_if_pool_index_by_key;
uword *ipsec_if_real_dev_by_show_dev;
uword *ipsec_if_by_sw_if_index;
+ uword *tun4_protect_by_key;
+ uword *tun6_protect_by_key;
/* node indices */
u32 error_drop_node_index;
diff --git a/src/vnet/ipsec/ipsec_api.c b/src/vnet/ipsec/ipsec_api.c
index 2c7c0d9626d..99e25f1b17a 100644
--- a/src/vnet/ipsec/ipsec_api.c
+++ b/src/vnet/ipsec/ipsec_api.c
@@ -30,6 +30,7 @@
#if WITH_LIBSSL > 0
#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec_tun.h>
#endif /* IPSEC */
#define vl_typedefs /* define message structures */
@@ -60,7 +61,10 @@ _(IPSEC_SPD_INTERFACE_DUMP, ipsec_spd_interface_dump) \
_(IPSEC_TUNNEL_IF_ADD_DEL, ipsec_tunnel_if_add_del) \
_(IPSEC_TUNNEL_IF_SET_SA, ipsec_tunnel_if_set_sa) \
_(IPSEC_SELECT_BACKEND, ipsec_select_backend) \
-_(IPSEC_BACKEND_DUMP, ipsec_backend_dump)
+_(IPSEC_BACKEND_DUMP, ipsec_backend_dump) \
+_(IPSEC_TUNNEL_PROTECT_UPDATE, ipsec_tunnel_protect_update) \
+_(IPSEC_TUNNEL_PROTECT_DEL, ipsec_tunnel_protect_del) \
+_(IPSEC_TUNNEL_PROTECT_DUMP, ipsec_tunnel_protect_dump)
static void
vl_api_ipsec_spd_add_del_t_handler (vl_api_ipsec_spd_add_del_t * mp)
@@ -104,6 +108,132 @@ static void vl_api_ipsec_interface_add_del_spd_t_handler
REPLY_MACRO (VL_API_IPSEC_INTERFACE_ADD_DEL_SPD_REPLY);
}
+static void vl_api_ipsec_tunnel_protect_update_t_handler
+ (vl_api_ipsec_tunnel_protect_update_t * mp)
+{
+ vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main ();
+ vl_api_ipsec_tunnel_protect_update_reply_t *rmp;
+ u32 sw_if_index, ii, *sa_ins = NULL;
+ int rv;
+
+ sw_if_index = ntohl (mp->tunnel.sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (&(mp->tunnel));
+
+#if WITH_LIBSSL > 0
+
+ for (ii = 0; ii < mp->tunnel.n_sa_in; ii++)
+ vec_add1 (sa_ins, ntohl (mp->tunnel.sa_in[ii]));
+
+ rv = ipsec_tun_protect_update (sw_if_index,
+ ntohl (mp->tunnel.sa_out), sa_ins);
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_IPSEC_TUNNEL_PROTECT_UPDATE_REPLY);
+}
+
+static void vl_api_ipsec_tunnel_protect_del_t_handler
+ (vl_api_ipsec_tunnel_protect_del_t * mp)
+{
+ vlib_main_t *vm __attribute__ ((unused)) = vlib_get_main ();
+ vl_api_ipsec_tunnel_protect_del_reply_t *rmp;
+ int rv;
+ u32 sw_if_index;
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ VALIDATE_SW_IF_INDEX (mp);
+
+#if WITH_LIBSSL > 0
+ rv = ipsec_tun_protect_del (sw_if_index);
+#else
+ rv = VNET_API_ERROR_UNIMPLEMENTED;
+#endif
+
+ BAD_SW_IF_INDEX_LABEL;
+
+ REPLY_MACRO (VL_API_IPSEC_TUNNEL_PROTECT_DEL_REPLY);
+}
+
+typedef struct ipsec_tunnel_protect_walk_ctx_t_
+{
+ vl_api_registration_t *reg;
+ u32 context;
+} ipsec_tunnel_protect_walk_ctx_t;
+
+static walk_rc_t
+send_ipsec_tunnel_protect_details (index_t itpi, void *arg)
+{
+ ipsec_tunnel_protect_walk_ctx_t *ctx = arg;
+ vl_api_ipsec_tunnel_protect_details_t *mp;
+ ipsec_tun_protect_t *itp;
+ u32 sai, ii = 0;
+
+ itp = ipsec_tun_protect_get (itpi);
+
+
+ mp = vl_msg_api_alloc (sizeof (*mp) + (sizeof (u32) * itp->itp_n_sa_in));
+ clib_memset (mp, 0, sizeof (*mp));
+ mp->_vl_msg_id = ntohs (VL_API_IPSEC_TUNNEL_PROTECT_DETAILS);
+ mp->context = ctx->context;
+
+ mp->tun.sw_if_index = htonl (itp->itp_sw_if_index);
+
+ mp->tun.sa_out = htonl (itp->itp_out_sa);
+ mp->tun.n_sa_in = itp->itp_n_sa_in;
+ /* *INDENT-OFF* */
+ FOR_EACH_IPSEC_PROTECT_INPUT_SAI(itp, sai,
+ ({
+ mp->tun.sa_in[ii++] = htonl (sai);
+ }));
+ /* *INDENT-ON* */
+
+ vl_api_send_msg (ctx->reg, (u8 *) mp);
+
+ return (WALK_CONTINUE);
+}
+
+static void
+vl_api_ipsec_tunnel_protect_dump_t_handler (vl_api_ipsec_tunnel_protect_dump_t
+ * mp)
+{
+ vl_api_registration_t *reg;
+ u32 sw_if_index;
+
+#if WITH_LIBSSL > 0
+ reg = vl_api_client_index_to_registration (mp->client_index);
+ if (!reg)
+ return;
+
+ ipsec_tunnel_protect_walk_ctx_t ctx = {
+ .reg = reg,
+ .context = mp->context,
+ };
+
+ sw_if_index = ntohl (mp->sw_if_index);
+
+ if (~0 == sw_if_index)
+ {
+ ipsec_tun_protect_walk (send_ipsec_tunnel_protect_details, &ctx);
+ }
+ else
+ {
+ index_t itpi;
+
+ itpi = ipsec_tun_protect_find (sw_if_index);
+
+ if (INDEX_INVALID != itpi)
+ send_ipsec_tunnel_protect_details (itpi, &ctx);
+ }
+#else
+ clib_warning ("unimplemented");
+#endif
+}
+
static int
ipsec_spd_action_decode (vl_api_ipsec_spd_action_t in,
ipsec_policy_action_t * out)
@@ -879,6 +1009,13 @@ ipsec_api_hookup (vlib_main_t * vm)
#undef _
/*
+ * Adding and deleting SAs is MP safe since when they are added/delete
+ * no traffic is using them
+ */
+ am->is_mp_safe[VL_API_IPSEC_SAD_ENTRY_ADD_DEL] = 1;
+ am->is_mp_safe[VL_API_IPSEC_SAD_ENTRY_ADD_DEL_REPLY] = 1;
+
+ /*
* Set up the (msg_name, crc, message-id) table
*/
setup_message_id_table (am);
diff --git a/src/vnet/ipsec/ipsec_cli.c b/src/vnet/ipsec/ipsec_cli.c
index 4172e104a98..1648179bc20 100644
--- a/src/vnet/ipsec/ipsec_cli.c
+++ b/src/vnet/ipsec/ipsec_cli.c
@@ -22,6 +22,7 @@
#include <vnet/fib/fib.h>
#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec_tun.h>
static clib_error_t *
set_interface_spd_command_fn (vlib_main_t * vm,
@@ -105,7 +106,7 @@ ipsec_sa_add_del_command_fn (vlib_main_t * vm,
is_add = 0;
else if (unformat (line_input, "spi %u", &spi))
;
- else if (unformat (line_input, "salt %u", &salt))
+ else if (unformat (line_input, "salt 0x%x", &salt))
;
else if (unformat (line_input, "esp"))
proto = IPSEC_PROTOCOL_ESP;
@@ -446,12 +447,52 @@ show_ipsec_sa_command_fn (vlib_main_t * vm,
return 0;
}
+static clib_error_t *
+clear_ipsec_sa_command_fn (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ipsec_main_t *im = &ipsec_main;
+ u32 sai = ~0;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "%u", &sai))
+ ;
+ else
+ break;
+ }
+
+ if (~0 == sai)
+ {
+ /* *INDENT-OFF* */
+ pool_foreach_index (sai, im->sad, ({
+ ipsec_sa_clear(sai);
+ }));
+ /* *INDENT-ON* */
+ }
+ else
+ {
+ if (pool_is_free_index (im->sad, sai))
+ return clib_error_return (0, "unknown SA index: %d", sai);
+ else
+ ipsec_sa_clear (sai);
+ }
+
+ return 0;
+}
+
/* *INDENT-OFF* */
VLIB_CLI_COMMAND (show_ipsec_sa_command, static) = {
.path = "show ipsec sa",
.short_help = "show ipsec sa [index]",
.function = show_ipsec_sa_command_fn,
};
+
+VLIB_CLI_COMMAND (clear_ipsec_sa_command, static) = {
+ .path = "clear ipsec sa",
+ .short_help = "clear ipsec sa [index]",
+ .function = clear_ipsec_sa_command_fn,
+};
/* *INDENT-ON* */
static clib_error_t *
@@ -823,6 +864,88 @@ VLIB_CLI_COMMAND (create_ipsec_tunnel_command, static) = {
};
/* *INDENT-ON* */
+static clib_error_t *
+ipsec_tun_protect_cmd (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ unformat_input_t _line_input, *line_input = &_line_input;
+ u32 sw_if_index, is_del, sa_in, sa_out, *sa_ins = NULL;
+ vnet_main_t *vnm;
+
+ is_del = 0;
+ sw_if_index = ~0;
+ vnm = vnet_get_main ();
+
+ if (!unformat_user (input, unformat_line_input, line_input))
+ return 0;
+
+ while (unformat_check_input (line_input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (line_input, "del"))
+ is_del = 1;
+ else if (unformat (line_input, "add"))
+ is_del = 0;
+ else if (unformat (line_input, "sa-in %d", &sa_in))
+ vec_add1 (sa_ins, sa_in);
+ else if (unformat (line_input, "sa-out %d", &sa_out))
+ ;
+ else if (unformat (line_input, "%U",
+ unformat_vnet_sw_interface, vnm, &sw_if_index))
+ ;
+ else
+ return (clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, line_input));
+ }
+
+ if (!is_del)
+ ipsec_tun_protect_update (sw_if_index, sa_out, sa_ins);
+
+ unformat_free (line_input);
+ return NULL;
+}
+
+/**
+ * Protect tunnel with IPSEC
+ */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ipsec_tun_protect_cmd_node, static) =
+{
+ .path = "ipsec tunnel protect",
+ .function = ipsec_tun_protect_cmd,
+ .short_help = "ipsec tunnel protect <interface> input-sa <SA> output-sa <SA>",
+ // this is not MP safe
+};
+/* *INDENT-ON* */
+
+static walk_rc_t
+ipsec_tun_protect_show_one (index_t itpi, void *ctx)
+{
+ vlib_cli_output (ctx, "%U", format_ipsec_tun_protect, itpi);
+
+ return (WALK_CONTINUE);
+}
+
+static clib_error_t *
+ipsec_tun_protect_show (vlib_main_t * vm,
+ unformat_input_t * input, vlib_cli_command_t * cmd)
+{
+ ipsec_tun_protect_walk (ipsec_tun_protect_show_one, vm);
+
+ return NULL;
+}
+
+/**
+ * show IPSEC tunnel protection
+ */
+/* *INDENT-OFF* */
+VLIB_CLI_COMMAND (ipsec_tun_protect_show_node, static) =
+{
+ .path = "show ipsec protect",
+ .function = ipsec_tun_protect_show,
+ .short_help = "show ipsec protect",
+};
+/* *INDENT-ON* */
+
clib_error_t *
ipsec_cli_init (vlib_main_t * vm)
{
diff --git a/src/vnet/ipsec/ipsec_format.c b/src/vnet/ipsec/ipsec_format.c
index d0d073bd2bb..1e6e2d5cb01 100644
--- a/src/vnet/ipsec/ipsec_format.c
+++ b/src/vnet/ipsec/ipsec_format.c
@@ -22,6 +22,7 @@
#include <vnet/fib/fib_table.h>
#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/ipsec_tun.h>
u8 *
format_ipsec_policy_action (u8 * s, va_list * args)
@@ -368,6 +369,40 @@ done:
return (s);
}
+u8 *
+format_ipsec_tun_protect (u8 * s, va_list * args)
+{
+ u32 itpi = va_arg (*args, u32);
+ ipsec_tun_protect_t *itp;
+ u32 sai;
+
+ if (pool_is_free_index (ipsec_protect_pool, itpi))
+ {
+ s = format (s, "No such tunnel index: %d", itpi);
+ goto done;
+ }
+
+ itp = pool_elt_at_index (ipsec_protect_pool, itpi);
+
+ s = format (s, "%U", format_vnet_sw_if_index_name,
+ vnet_get_main (), itp->itp_sw_if_index);
+ s = format (s, "\n output-sa:");
+ s =
+ format (s, "\n %U", format_ipsec_sa, itp->itp_out_sa,
+ IPSEC_FORMAT_BRIEF);
+
+ s = format (s, "\n input-sa:");
+ /* *INDENT-OFF* */
+ FOR_EACH_IPSEC_PROTECT_INPUT_SAI(itp, sai,
+ ({
+ s = format (s, "\n %U", format_ipsec_sa, sai, IPSEC_FORMAT_BRIEF);
+ }));
+ /* *INDENT-ON* */
+
+done:
+ return (s);
+}
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/vnet/ipsec/ipsec_if.c b/src/vnet/ipsec/ipsec_if.c
index 6627d295f1e..8e4f3f1359d 100644
--- a/src/vnet/ipsec/ipsec_if.c
+++ b/src/vnet/ipsec/ipsec_if.c
@@ -84,7 +84,7 @@ ipsec_if_tunnel_stack (adj_index_t ai)
}
/**
- * @brief Call back when restacking all adjacencies on a GRE interface
+ * @brief Call back when restacking all adjacencies on a IPSec interface
*/
static adj_walk_rc_t
ipsec_if_adj_walk_cb (adj_index_t ai, void *ctx)
@@ -100,7 +100,7 @@ ipsec_if_tunnel_restack (ipsec_tunnel_if_t * it)
fib_protocol_t proto;
/*
- * walk all the adjacencies on th GRE interface and restack them
+ * walk all the adjacencies on the IPSec interface and restack them
*/
FOR_EACH_FIB_IP_PROTOCOL (proto)
{
@@ -434,86 +434,6 @@ ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm,
}
int
-ipsec_add_del_ipsec_gre_tunnel (vnet_main_t * vnm,
- const ipsec_gre_tunnel_add_del_args_t * args)
-{
- ipsec_tunnel_if_t *t = 0;
- ipsec_main_t *im = &ipsec_main;
- uword *p;
- ipsec_sa_t *sa;
- ipsec4_tunnel_key_t key;
- u32 isa, osa;
-
- p = hash_get (im->sa_index_by_sa_id, args->local_sa_id);
- if (!p)
- return VNET_API_ERROR_INVALID_VALUE;
- osa = p[0];
- sa = pool_elt_at_index (im->sad, p[0]);
- ipsec_sa_set_IS_GRE (sa);
-
- p = hash_get (im->sa_index_by_sa_id, args->remote_sa_id);
- if (!p)
- return VNET_API_ERROR_INVALID_VALUE;
- isa = p[0];
- sa = pool_elt_at_index (im->sad, p[0]);
- ipsec_sa_set_IS_GRE (sa);
-
- /* we form the key from the input/remote SA whose tunnel is srouce
- * at the remote end */
- if (ipsec_sa_is_set_IS_TUNNEL (sa))
- {
- key.remote_ip = sa->tunnel_src_addr.ip4.as_u32;
- key.spi = clib_host_to_net_u32 (sa->spi);
- }
- else
- {
- key.remote_ip = args->src.as_u32;
- key.spi = clib_host_to_net_u32 (sa->spi);
- }
-
- p = hash_get (im->ipsec4_if_pool_index_by_key, key.as_u64);
-
- if (args->is_add)
- {
- /* check if same src/dst pair exists */
- if (p)
- return VNET_API_ERROR_INVALID_VALUE;
-
- pool_get_aligned (im->tunnel_interfaces, t, CLIB_CACHE_LINE_BYTES);
- clib_memset (t, 0, sizeof (*t));
-
- t->input_sa_index = isa;
- t->output_sa_index = osa;
- t->hw_if_index = ~0;
- hash_set (im->ipsec4_if_pool_index_by_key, key.as_u64,
- t - im->tunnel_interfaces);
-
- /*1st interface, register protocol */
- if (pool_elts (im->tunnel_interfaces) == 1)
- {
- ip4_register_protocol (IP_PROTOCOL_IPSEC_ESP,
- ipsec4_if_input_node.index);
- /* TBD, GRE IPSec6
- *
- ip6_register_protocol (IP_PROTOCOL_IPSEC_ESP,
- ipsec6_if_input_node.index);
- */
- }
- }
- else
- {
- /* check if exists */
- if (!p)
- return VNET_API_ERROR_INVALID_VALUE;
-
- t = pool_elt_at_index (im->tunnel_interfaces, p[0]);
- hash_unset (im->ipsec4_if_pool_index_by_key, key.as_u64);
- pool_put (im->tunnel_interfaces, t);
- }
- return 0;
-}
-
-int
ipsec_set_interface_sa (vnet_main_t * vnm, u32 hw_if_index, u32 sa_id,
u8 is_outbound)
{
diff --git a/src/vnet/ipsec/ipsec_if.h b/src/vnet/ipsec/ipsec_if.h
index 40867108293..042dddee880 100644
--- a/src/vnet/ipsec/ipsec_if.h
+++ b/src/vnet/ipsec/ipsec_if.h
@@ -84,23 +84,10 @@ typedef CLIB_PACKED
}) ipsec6_tunnel_key_t;
/* *INDENT-ON* */
-typedef struct
-{
- u8 is_add;
- u32 local_sa_id;
- u32 remote_sa_id;
- ip4_address_t src;
- ip4_address_t dst;
-} ipsec_gre_tunnel_add_del_args_t;
-
extern int ipsec_add_del_tunnel_if_internal (vnet_main_t * vnm,
ipsec_add_del_tunnel_args_t *
args, u32 * sw_if_index);
extern int ipsec_add_del_tunnel_if (ipsec_add_del_tunnel_args_t * args);
-extern int ipsec_add_del_ipsec_gre_tunnel (vnet_main_t * vnm,
- const
- ipsec_gre_tunnel_add_del_args_t *
- args);
extern int ipsec_set_interface_sa (vnet_main_t * vnm, u32 hw_if_index,
u32 sa_id, u8 is_outbound);
diff --git a/src/vnet/ipsec/ipsec_io.h b/src/vnet/ipsec/ipsec_io.h
index 32668a0474f..b17d3d7ecf0 100644
--- a/src/vnet/ipsec/ipsec_io.h
+++ b/src/vnet/ipsec/ipsec_io.h
@@ -30,9 +30,9 @@ typedef enum
_ (PUNT, "punt-dispatch") \
_ (DROP, "error-drop")
-#define _(v, s) IPSEC_INPUT_NEXT_##v,
typedef enum
{
+#define _(v, s) IPSEC_INPUT_NEXT_##v,
foreach_ipsec_input_next
#undef _
IPSEC_INPUT_N_NEXT,
diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c
index e8a015957ce..afdecfee10d 100644
--- a/src/vnet/ipsec/ipsec_sa.c
+++ b/src/vnet/ipsec/ipsec_sa.c
@@ -17,6 +17,7 @@
#include <vnet/ipsec/esp.h>
#include <vnet/udp/udp.h>
#include <vnet/fib/fib_table.h>
+#include <vnet/ipsec/ipsec_tun.h>
/**
* @brief
@@ -292,7 +293,7 @@ ipsec_sa_del (u32 id)
{
clib_warning ("sa_id %u used in policy", sa->id);
/* sa used in policy */
- return VNET_API_ERROR_SYSCALL_ERROR_1;
+ return VNET_API_ERROR_RSRC_IN_USE;
}
hash_unset (im->sa_index_by_sa_id, sa->id);
err = ipsec_call_add_del_callbacks (im, sa, sa_index, 0);
@@ -313,12 +314,20 @@ ipsec_sa_del (u32 id)
return 0;
}
+void
+ipsec_sa_clear (index_t sai)
+{
+ vlib_zero_combined_counter (&ipsec_sa_counters, sai);
+}
+
u8
ipsec_is_sa_used (u32 sa_index)
{
ipsec_main_t *im = &ipsec_main;
+ ipsec_tun_protect_t *itp;
ipsec_tunnel_if_t *t;
ipsec_policy_t *p;
+ u32 sai;
/* *INDENT-OFF* */
pool_foreach(p, im->policies, ({
@@ -335,8 +344,20 @@ ipsec_is_sa_used (u32 sa_index)
if (t->output_sa_index == sa_index)
return 1;
}));
+
+ /* *INDENT-OFF* */
+ pool_foreach(itp, ipsec_protect_pool, ({
+ FOR_EACH_IPSEC_PROTECT_INPUT_SAI(itp, sai,
+ ({
+ if (sai == sa_index)
+ return 1;
+ }));
+ if (itp->itp_out_sa == sa_index)
+ return 1;
+ }));
/* *INDENT-ON* */
+
return 0;
}
@@ -415,7 +436,7 @@ ipsec_sa_back_walk (fib_node_t * node, fib_node_back_walk_ctx_t * ctx)
}
/*
- * Virtual function table registered by MPLS GRE tunnels
+ * Virtual function table registered by SAs
* for participation in the FIB object graph.
*/
const static fib_node_vft_t ipsec_sa_vft = {
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index 53035aa2a20..284826772a0 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -93,7 +93,7 @@ typedef struct ipsec_key_t_
_ (4, IS_TUNNEL, "tunnel") \
_ (8, IS_TUNNEL_V6, "tunnel-v6") \
_ (16, UDP_ENCAP, "udp-encap") \
- _ (32, IS_GRE, "GRE") \
+ _ (32, IS_PROTECT, "Protect") \
_ (64, IS_INBOUND, "inboud") \
_ (128, IS_AEAD, "aead") \
@@ -183,6 +183,13 @@ foreach_ipsec_sa_flags
}
foreach_ipsec_sa_flags
#undef _
+#define _(a,v,s) \
+ always_inline int \
+ ipsec_sa_unset_##v (ipsec_sa_t *sa) { \
+ return (sa->flags &= ~IPSEC_SA_FLAG_##v); \
+ }
+ foreach_ipsec_sa_flags
+#undef _
/**
* @brief
* SA packet & bytes counters
@@ -205,6 +212,7 @@ extern int ipsec_sa_add (u32 id,
const ip46_address_t * tunnel_dst_addr,
u32 * sa_index);
extern u32 ipsec_sa_del (u32 id);
+extern void ipsec_sa_clear (index_t sai);
extern void ipsec_sa_set_crypto_alg (ipsec_sa_t * sa,
ipsec_crypto_alg_t crypto_alg);
extern void ipsec_sa_set_integ_alg (ipsec_sa_t * sa,
diff --git a/src/vnet/ipsec/ipsec_tun.c b/src/vnet/ipsec/ipsec_tun.c
new file mode 100644
index 00000000000..a389cefc991
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_tun.c
@@ -0,0 +1,398 @@
+/*
+ * ipsec_tun.h : IPSEC tunnel protection
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ipsec/ipsec_tun.h>
+#include <vnet/ipsec/esp.h>
+#include <vnet/udp/udp.h>
+
+/**
+ * Pool of tunnel protection objects
+ */
+ipsec_tun_protect_t *ipsec_protect_pool;
+
+/**
+ * DB of protected tunnels
+ */
+typedef struct ipsec_protect_db_t_
+{
+ u32 *tunnels;
+ u32 count;
+} ipsec_protect_db_t;
+
+static ipsec_protect_db_t ipsec_protect_db;
+
+static int
+ipsec_tun_protect_feature_set (ipsec_tun_protect_t * itp, u8 enable)
+{
+ u32 sai = itp->itp_out_sa;
+ int is_ip4, is_l2, rv;
+
+ is_ip4 = ip46_address_is_ip4 (&itp->itp_tun.src);
+ is_l2 = itp->itp_flags & IPSEC_PROTECT_L2;
+
+ if (is_ip4)
+ {
+ if (is_l2)
+ rv = vnet_feature_enable_disable ("ethernet-output",
+ "esp4-encrypt-tun",
+ itp->itp_sw_if_index, enable,
+ &sai, sizeof (sai));
+ else
+ rv = vnet_feature_enable_disable ("ip4-output",
+ "esp4-encrypt-tun",
+ itp->itp_sw_if_index, enable,
+ &sai, sizeof (sai));
+ }
+ else
+ {
+ if (is_l2)
+ rv = vnet_feature_enable_disable ("ethernet-output",
+ "esp6-encrypt-tun",
+ itp->itp_sw_if_index, enable,
+ &sai, sizeof (sai));
+ else
+ rv = vnet_feature_enable_disable ("ip6-output",
+ "esp6-encrypt-tun",
+ itp->itp_sw_if_index, enable,
+ &sai, sizeof (sai));
+ }
+
+ ASSERT (!rv);
+ return (rv);
+}
+
+static void
+ipsec_tun_protect_db_add (ipsec_main_t * im, const ipsec_tun_protect_t * itp)
+{
+ const ipsec_sa_t *sa;
+ u32 sai;
+
+ /* *INDENT-OFF* */
+ FOR_EACH_IPSEC_PROTECT_INPUT_SAI(itp, sai,
+ ({
+ sa = ipsec_sa_get (sai);
+
+ ipsec_tun_lkup_result_t res = {
+ .tun_index = itp - ipsec_protect_pool,
+ .sa_index = sai,
+ };
+
+ /*
+ * The key is formed from the tunnel's destination
+ * as the packet lookup is done from the packet's source
+ */
+ if (ip46_address_is_ip4 (&itp->itp_crypto.dst))
+ {
+ ipsec4_tunnel_key_t key = {
+ .remote_ip = itp->itp_crypto.dst.ip4.as_u32,
+ .spi = clib_host_to_net_u32 (sa->spi),
+ };
+ hash_set (im->tun4_protect_by_key, key.as_u64, res.as_u64);
+ }
+ else
+ {
+ ipsec6_tunnel_key_t key = {
+ .remote_ip = itp->itp_crypto.dst.ip6,
+ .spi = clib_host_to_net_u32 (sa->spi),
+ };
+ hash_set_mem_alloc (&im->tun6_protect_by_key, &key, res.as_u64);
+ }
+ }))
+ /* *INDENT-ON* */
+}
+
+static void
+ipsec_tun_protect_db_remove (ipsec_main_t * im,
+ const ipsec_tun_protect_t * itp)
+{
+ const ipsec_sa_t *sa;
+
+ /* *INDENT-OFF* */
+ FOR_EACH_IPSEC_PROTECT_INPUT_SA(itp, sa,
+ ({
+ if (ip46_address_is_ip4 (&itp->itp_crypto.dst))
+ {
+ ipsec4_tunnel_key_t key = {
+ .remote_ip = itp->itp_crypto.dst.ip4.as_u32,
+ .spi = clib_host_to_net_u32 (sa->spi),
+ };
+ hash_unset (im->tun4_protect_by_key, &key);
+ }
+ else
+ {
+ ipsec6_tunnel_key_t key = {
+ .remote_ip = itp->itp_crypto.dst.ip6,
+ .spi = clib_host_to_net_u32 (sa->spi),
+ };
+ hash_unset_mem_free (&im->tun6_protect_by_key, &key);
+ }
+ }))
+ /* *INDENT-ON* */
+}
+
+static void
+ipsec_tun_protect_config (ipsec_main_t * im,
+ ipsec_tun_protect_t * itp, u32 sa_out, u32 * sas_in)
+{
+ ipsec_sa_t *sa;
+ u32 ii;
+
+ itp->itp_n_sa_in = vec_len (sas_in);
+ for (ii = 0; ii < itp->itp_n_sa_in; ii++)
+ itp->itp_in_sas[ii] = sas_in[ii];
+ itp->itp_out_sa = sa_out;
+
+ /* *INDENT-OFF* */
+ FOR_EACH_IPSEC_PROTECT_INPUT_SA(itp, sa,
+ ({
+ if (ipsec_sa_is_set_IS_TUNNEL (sa))
+ {
+ itp->itp_crypto.src = sa->tunnel_dst_addr;
+ itp->itp_crypto.dst = sa->tunnel_src_addr;
+ ipsec_sa_set_IS_PROTECT (sa);
+ itp->itp_flags |= IPSEC_PROTECT_ENCAPED;
+ }
+ else
+ {
+ itp->itp_crypto.src = itp->itp_tun.src;
+ itp->itp_crypto.dst = itp->itp_tun.dst;
+ itp->itp_flags &= ~IPSEC_PROTECT_ENCAPED;
+ }
+ }));
+ /* *INDENT-ON* */
+
+ /*
+ * add to the DB against each SA
+ */
+ ipsec_tun_protect_db_add (im, itp);
+
+ /*
+ * enable the encrypt feature for egress.
+ */
+ ipsec_tun_protect_feature_set (itp, 1);
+
+}
+
+static void
+ipsec_tun_protect_unconfig (ipsec_main_t * im, ipsec_tun_protect_t * itp)
+{
+ ipsec_sa_t *sa;
+
+ ipsec_tun_protect_feature_set (itp, 0);
+
+ /* *INDENT-OFF* */
+ FOR_EACH_IPSEC_PROTECT_INPUT_SA(itp, sa,
+ ({
+ ipsec_sa_unset_IS_PROTECT (sa);
+ }));
+ /* *INDENT-ON* */
+
+ ipsec_tun_protect_db_remove (im, itp);
+}
+
+index_t
+ipsec_tun_protect_find (u32 sw_if_index)
+{
+ if (vec_len (ipsec_protect_db.tunnels) < sw_if_index)
+ return (INDEX_INVALID);
+
+ return (ipsec_protect_db.tunnels[sw_if_index]);
+}
+
+int
+ipsec_tun_protect_update (u32 sw_if_index, u32 sa_out, u32 * sas_in)
+{
+ u32 itpi, ii;
+ ipsec_tun_protect_t *itp;
+ ipsec_main_t *im;
+ int rv;
+
+ rv = 0;
+ im = &ipsec_main;
+ vec_validate_init_empty (ipsec_protect_db.tunnels, sw_if_index,
+ INDEX_INVALID);
+ itpi = ipsec_protect_db.tunnels[sw_if_index];
+
+ vec_foreach_index (ii, sas_in)
+ {
+ sas_in[ii] = ipsec_get_sa_index_by_sa_id (sas_in[ii]);
+ if (~0 == sas_in[ii])
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+ }
+
+ sa_out = ipsec_get_sa_index_by_sa_id (sa_out);
+
+ if (~0 == sa_out)
+ {
+ rv = VNET_API_ERROR_INVALID_VALUE;
+ goto out;
+ }
+
+ if (INDEX_INVALID == itpi)
+ {
+ vnet_device_class_t *dev_class;
+ vnet_hw_interface_t *hi;
+ vnet_main_t *vnm;
+ u8 is_l2;
+
+ vnm = vnet_get_main ();
+ hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
+ dev_class = vnet_get_device_class (vnm, hi->dev_class_index);
+
+ if (NULL == dev_class->ip_tun_desc)
+ {
+ rv = VNET_API_ERROR_INVALID_SW_IF_INDEX;
+ goto out;
+ }
+
+ pool_get_zero (ipsec_protect_pool, itp);
+
+ itp->itp_sw_if_index = sw_if_index;
+ ipsec_protect_db.tunnels[sw_if_index] = itp - ipsec_protect_pool;
+ ipsec_protect_db.count++;
+
+ itp->itp_n_sa_in = vec_len (sas_in);
+ for (ii = 0; ii < itp->itp_n_sa_in; ii++)
+ itp->itp_in_sas[ii] = sas_in[ii];
+ itp->itp_out_sa = sa_out;
+
+ rv = dev_class->ip_tun_desc (sw_if_index,
+ &itp->itp_tun.src,
+ &itp->itp_tun.dst, &is_l2);
+
+ if (rv)
+ goto out;
+
+ if (is_l2)
+ itp->itp_flags |= IPSEC_PROTECT_L2;
+
+ /*
+ * add to the tunnel DB for ingress
+ * - if the SA is in trasnport mode, then the packates will arrivw
+ * with the IP src,dst of the protected tunnel, in which case we can
+ * simply strip the IP header and hand the payload to the protocol
+ * appropriate input handler
+ * - if the SA is in tunnel mode then there are two IP headers present
+ * one for the crytpo tunnel endpoints (described in the SA) and one
+ * for the tunnel endpoints. The outer IP headers in the srriving
+ * packets will have the crypto endpoints. So the DB needs to contain
+ * the crpto endpoint. Once the crypto header is stripped, revealing,
+ * the tunnel-IP we have 2 choices:
+ * 1) do a tunnel lookup based on the revealed header
+ * 2) skip the tunnel lookup and assume that the packet matches the
+ * one that is protected here.
+ * If we did 1) then we would allow our peer to use the SA for tunnel
+ * X to inject traffic onto tunnel Y, this is not good. If we do 2)
+ * then we don't verify that the peer is indeed using SA for tunnel
+ * X and addressing tunnel X. So we take a compromise, once the SA
+ * matches to tunnel X we veriy that the inner IP matches the value
+ * of the tunnel we are protecting, else it's dropped.
+ */
+ ipsec_tun_protect_config (im, itp, sa_out, sas_in);
+
+ if (1 == hash_elts (im->tun4_protect_by_key))
+ ip4_register_protocol (IP_PROTOCOL_IPSEC_ESP,
+ ipsec4_tun_input_node.index);
+ if (1 == hash_elts (im->tun6_protect_by_key))
+ ip6_register_protocol (IP_PROTOCOL_IPSEC_ESP,
+ ipsec6_tun_input_node.index);
+ }
+ else
+ {
+ /* updating SAs only */
+ itp = pool_elt_at_index (ipsec_protect_pool, itpi);
+
+ ipsec_tun_protect_unconfig (im, itp);
+ ipsec_tun_protect_config (im, itp, sa_out, sas_in);
+ }
+
+ vec_free (sas_in);
+out:
+ return (rv);
+}
+
+int
+ipsec_tun_protect_del (u32 sw_if_index)
+{
+ ipsec_tun_protect_t *itp;
+ ipsec_main_t *im;
+ index_t itpi;
+
+ im = &ipsec_main;
+
+ vec_validate_init_empty (ipsec_protect_db.tunnels, sw_if_index,
+ INDEX_INVALID);
+ itpi = ipsec_protect_db.tunnels[sw_if_index];
+
+ if (INDEX_INVALID == itpi)
+ return (VNET_API_ERROR_NO_SUCH_ENTRY);
+
+ itp = ipsec_tun_protect_get (itpi);
+ ipsec_tun_protect_unconfig (im, itp);
+
+ ipsec_protect_db.tunnels[itp->itp_sw_if_index] = INDEX_INVALID;
+
+ pool_put (ipsec_protect_pool, itp);
+
+ /* if (0 == hash_elts (im->tun4_protect_by_key)) */
+ /* ip4_unregister_protocol (IP_PROTOCOL_IPSEC_ESP); */
+ /* if (0 == hash_elts (im->tun6_protect_by_key)) */
+ /* ip6_unregister_protocol (IP_PROTOCOL_IPSEC_ESP); */
+
+ return (0);
+}
+
+void
+ipsec_tun_protect_walk (ipsec_tun_protect_walk_cb_t fn, void *ctx)
+{
+ index_t itpi;
+
+ /* *INDENT-OFF* */
+ pool_foreach_index(itpi, ipsec_protect_pool,
+ ({
+ fn (itpi, ctx);
+ }));
+ /* *INDENT-ON* */
+}
+
+clib_error_t *
+ipsec_tunnel_protect_init (vlib_main_t * vm)
+{
+ ipsec_main_t *im;
+
+ im = &ipsec_main;
+ im->tun6_protect_by_key = hash_create_mem (0,
+ sizeof (ipsec6_tunnel_key_t),
+ sizeof (u64));
+ im->tun4_protect_by_key = hash_create (0, sizeof (u64));
+
+ return 0;
+}
+
+VLIB_INIT_FUNCTION (ipsec_tunnel_protect_init);
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec_tun.h b/src/vnet/ipsec/ipsec_tun.h
new file mode 100644
index 00000000000..be5cef9a8fc
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_tun.h
@@ -0,0 +1,114 @@
+/*
+ * ipsec_tun.h : IPSEC tunnel protection
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/ipsec/ipsec.h>
+
+typedef enum ipsec_protect_flags_t_
+{
+ IPSEC_PROTECT_L2 = (1 << 0),
+ IPSEC_PROTECT_ENCAPED = (1 << 1),
+} __clib_packed ipsec_protect_flags_t;
+
+typedef struct ipsec_ep_t_
+{
+ ip46_address_t src;
+ ip46_address_t dst;
+} ipsec_ep_t;
+
+typedef struct ipsec_tun_protect_t_
+{
+ CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
+ u32 itp_out_sa;
+
+ /* not using a vector since we want the memory inline
+ * with this struct */
+ u32 itp_n_sa_in;
+ u32 itp_in_sas[4];
+
+ u32 itp_sw_if_index;
+
+ ipsec_ep_t itp_crypto;
+
+ ipsec_protect_flags_t itp_flags;
+
+ ipsec_ep_t itp_tun;
+
+} ipsec_tun_protect_t;
+
+#define FOR_EACH_IPSEC_PROTECT_INPUT_SAI(_itp, _sai, body) \
+{ \
+ u32 __ii; \
+ for (__ii = 0; __ii < _itp->itp_n_sa_in; __ii++) { \
+ _sai = itp->itp_in_sas[__ii]; \
+ body; \
+ } \
+}
+#define FOR_EACH_IPSEC_PROTECT_INPUT_SA(_itp, _sa, body) \
+{ \
+ u32 __ii; \
+ for (__ii = 0; __ii < _itp->itp_n_sa_in; __ii++) { \
+ _sa = ipsec_sa_get(itp->itp_in_sas[__ii]); \
+ body; \
+ } \
+}
+
+extern int ipsec_tun_protect_update (u32 sw_if_index, u32 sa_out,
+ u32 sa_ins[2]);
+extern int ipsec_tun_protect_del (u32 sw_if_index);
+
+typedef walk_rc_t (*ipsec_tun_protect_walk_cb_t) (index_t itpi, void *arg);
+extern void ipsec_tun_protect_walk (ipsec_tun_protect_walk_cb_t fn,
+ void *cttx);
+extern index_t ipsec_tun_protect_find (u32 sw_if_index);
+
+extern u8 *format_ipsec_tun_protect (u8 * s, va_list * args);
+
+// FIXME
+extern vlib_node_registration_t ipsec4_tun_input_node;
+extern vlib_node_registration_t ipsec6_tun_input_node;
+
+/*
+ * DP API
+ */
+extern ipsec_tun_protect_t *ipsec_protect_pool;
+
+typedef struct ipsec_tun_lkup_result_t_
+{
+ union
+ {
+ struct
+ {
+ u32 tun_index;
+ u32 sa_index;
+ };
+ u64 as_u64;
+ };
+} ipsec_tun_lkup_result_t;
+
+always_inline ipsec_tun_protect_t *
+ipsec_tun_protect_get (u32 index)
+{
+ return (pool_elt_at_index (ipsec_protect_pool, index));
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/src/vnet/ipsec/ipsec_tun_in.c b/src/vnet/ipsec/ipsec_tun_in.c
new file mode 100644
index 00000000000..2ce1691b242
--- /dev/null
+++ b/src/vnet/ipsec/ipsec_tun_in.c
@@ -0,0 +1,436 @@
+/*
+ * ipsec_tun_protect_in.c : IPSec interface input node
+ *
+ * Copyright (c) 2015 Cisco and/or its affiliates.
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at:
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include <vnet/vnet.h>
+#include <vnet/api_errno.h>
+#include <vnet/ip/ip.h>
+
+#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/esp.h>
+#include <vnet/ipsec/ipsec_io.h>
+#include <vnet/ipsec/ipsec_punt.h>
+#include <vnet/ipsec/ipsec_tun.h>
+#include <vnet/ip/ip4_input.h>
+
+/* Statistics (not really errors) */
+#define foreach_ipsec_tun_protect_input_error \
+ _(RX, "good packets received") \
+ _(DISABLED, "ipsec packets received on disabled interface") \
+ _(NO_TUNNEL, "no matching tunnel") \
+ _(TUNNEL_MISMATCH, "SPI-tunnel mismatch") \
+ _(SPI_0, "SPI 0")
+
+static char *ipsec_tun_protect_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_ipsec_tun_protect_input_error
+#undef _
+};
+
+typedef enum
+{
+#define _(sym,str) IPSEC_TUN_PROTECT_INPUT_ERROR_##sym,
+ foreach_ipsec_tun_protect_input_error
+#undef _
+ IPSEC_TUN_PROTECT_INPUT_N_ERROR,
+} ipsec_tun_protect_input_error_t;
+
+typedef enum ipsec_tun_next_t_
+{
+#define _(v, s) IPSEC_TUN_PROTECT_NEXT_##v,
+ foreach_ipsec_input_next
+#undef _
+ IPSEC_TUN_PROTECT_NEXT_DECRYPT,
+ IPSEC_TUN_PROTECT_N_NEXT,
+} ipsec_tun_next_t;
+
+typedef struct
+{
+ u32 spi;
+ u32 seq;
+} ipsec_tun_protect_input_trace_t;
+
+static u8 *
+format_ipsec_tun_protect_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+ ipsec_tun_protect_input_trace_t *t =
+ va_arg (*args, ipsec_tun_protect_input_trace_t *);
+
+ s = format (s, "IPSec: spi %u seq %u", t->spi, t->seq);
+ return s;
+}
+
+always_inline u16
+ipsec_ip4_if_no_tunnel (vlib_node_runtime_t * node,
+ vlib_buffer_t * b,
+ const esp_header_t * esp, const ip4_header_t * ip4)
+{
+ if (PREDICT_FALSE (0 == esp->spi))
+ {
+ b->error = node->errors[IPSEC_TUN_PROTECT_INPUT_ERROR_SPI_0];
+ b->punt_reason = ipsec_punt_reason[(ip4->protocol == IP_PROTOCOL_UDP ?
+ IPSEC_PUNT_IP4_SPI_UDP_0 :
+ IPSEC_PUNT_IP4_SPI_0)];
+ }
+ else
+ {
+ b->error = node->errors[IPSEC_TUN_PROTECT_INPUT_ERROR_NO_TUNNEL];
+ b->punt_reason = ipsec_punt_reason[IPSEC_PUNT_IP4_NO_SUCH_TUNNEL];
+ }
+ return IPSEC_INPUT_NEXT_PUNT;
+}
+
+always_inline u16
+ipsec_ip6_if_no_tunnel (vlib_node_runtime_t * node,
+ vlib_buffer_t * b, const esp_header_t * esp)
+{
+ if (PREDICT_FALSE (0 == esp->spi))
+ {
+ b->error = node->errors[IPSEC_TUN_PROTECT_INPUT_ERROR_NO_TUNNEL];
+ b->punt_reason = ipsec_punt_reason[IPSEC_PUNT_IP6_SPI_0];
+ }
+ else
+ {
+ b->error = node->errors[IPSEC_TUN_PROTECT_INPUT_ERROR_NO_TUNNEL];
+ b->punt_reason = ipsec_punt_reason[IPSEC_PUNT_IP6_NO_SUCH_TUNNEL];
+ }
+ return (IPSEC_INPUT_NEXT_PUNT);
+}
+
+always_inline uword
+ipsec_tun_protect_input_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame, int is_ip6)
+{
+ ipsec_main_t *im = &ipsec_main;
+ vnet_main_t *vnm = im->vnet_main;
+ vnet_interface_main_t *vim = &vnm->interface_main;
+
+ int is_trace = node->flags & VLIB_NODE_FLAG_TRACE;
+ u32 thread_index = vm->thread_index;
+
+ u32 n_left_from, *from;
+ u16 nexts[VLIB_FRAME_SIZE], *next;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b;
+
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ vlib_get_buffers (vm, from, bufs, n_left_from);
+ b = bufs;
+ next = nexts;
+
+ clib_memset_u16 (nexts, im->esp4_decrypt_next_index, n_left_from);
+
+ u64 n_bytes = 0, n_packets = 0;
+ u32 n_disabled = 0, n_no_tunnel = 0;
+
+ u32 last_sw_if_index = ~0;
+ ipsec_tun_lkup_result_t last_result = {
+ .tun_index = ~0
+ };
+ ipsec4_tunnel_key_t last_key4;
+ ipsec6_tunnel_key_t last_key6;
+
+ vlib_combined_counter_main_t *rx_counter;
+ vlib_combined_counter_main_t *drop_counter;
+ ipsec_tun_protect_t *itp0;
+
+ if (is_ip6)
+ clib_memset (&last_key6, 0xff, sizeof (last_key6));
+ else
+ last_key4.as_u64 = ~0;
+
+ rx_counter = vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_RX;
+ drop_counter = vim->combined_sw_if_counters + VNET_INTERFACE_COUNTER_DROP;
+
+ while (n_left_from > 0)
+ {
+ u32 sw_if_index0, len0, hdr_sz0;
+ ipsec_tun_lkup_result_t itr0;
+ ipsec4_tunnel_key_t key40;
+ ipsec6_tunnel_key_t key60;
+ ip4_header_t *ip40;
+ ip6_header_t *ip60;
+ esp_header_t *esp0;
+
+ ip40 = vlib_buffer_get_current (b[0]);
+
+ if (is_ip6)
+ {
+ ip60 = (ip6_header_t *) ip40;
+ esp0 = (esp_header_t *) (ip60 + 1);
+ hdr_sz0 = sizeof (ip6_header_t);
+ }
+ else
+ {
+ /* NAT UDP port 4500 case, don't advance any more */
+ if (ip40->protocol == IP_PROTOCOL_UDP)
+ {
+ esp0 =
+ (esp_header_t *) ((u8 *) ip40 + ip4_header_bytes (ip40) +
+ sizeof (udp_header_t));
+ hdr_sz0 = 0;
+ }
+ else
+ {
+ esp0 = (esp_header_t *) ((u8 *) ip40 + ip4_header_bytes (ip40));
+ hdr_sz0 = ip4_header_bytes (ip40);
+ }
+ }
+
+ /* stats for the tunnel include all the data after the IP header
+ just like a norml IP-IP tunnel */
+ vlib_buffer_advance (b[0], hdr_sz0);
+ len0 = vlib_buffer_length_in_chain (vm, b[0]);
+
+ if (is_ip6)
+ {
+ key60.remote_ip = ip60->src_address;
+ key60.spi = esp0->spi;
+
+ if (memcmp (&key60, &last_key6, sizeof (last_key6)) == 0)
+ {
+ itr0 = last_result;
+ }
+ else
+ {
+ uword *p = hash_get_mem (im->tun6_protect_by_key, &key60);
+ if (p)
+ {
+ itr0.as_u64 = p[0];
+ last_result = itr0;
+ clib_memcpy_fast (&last_key6, &key60, sizeof (key60));
+ }
+ else
+ {
+ next[0] = ipsec_ip6_if_no_tunnel (node, b[0], esp0);
+ n_no_tunnel++;
+ goto trace00;
+ }
+ }
+ }
+ else
+ {
+ key40.remote_ip = ip40->src_address.as_u32;
+ key40.spi = esp0->spi;
+
+ if (key40.as_u64 == last_key4.as_u64)
+ {
+ itr0 = last_result;
+ }
+ else
+ {
+ uword *p = hash_get (im->tun4_protect_by_key, key40.as_u64);
+ if (p)
+ {
+ itr0.as_u64 = p[0];
+ last_result = itr0;
+ last_key4.as_u64 = key40.as_u64;
+ }
+ else
+ {
+ next[0] = ipsec_ip4_if_no_tunnel (node, b[0], esp0, ip40);
+ n_no_tunnel++;
+ goto trace00;
+ }
+ }
+ }
+
+ itp0 = pool_elt_at_index (ipsec_protect_pool, itr0.tun_index);
+ vnet_buffer (b[0])->ipsec.sad_index = itr0.sa_index;
+ vnet_buffer (b[0])->ipsec.protect_index = itr0.tun_index;
+
+ sw_if_index0 = itp0->itp_sw_if_index;
+ vnet_buffer (b[0])->sw_if_index[VLIB_RX] = sw_if_index0;
+
+ if (PREDICT_FALSE (!vnet_sw_interface_is_admin_up (vnm, sw_if_index0)))
+ {
+ vlib_increment_combined_counter
+ (drop_counter, thread_index, sw_if_index0, 1, len0);
+ n_disabled++;
+ b[0]->error = node->errors[IPSEC_TUN_PROTECT_INPUT_ERROR_DISABLED];
+ next[0] = IPSEC_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+ else
+ {
+ if (PREDICT_TRUE (sw_if_index0 == last_sw_if_index))
+ {
+ n_packets++;
+ n_bytes += len0;
+ }
+ else
+ {
+ if (n_packets && !(itp0->itp_flags & IPSEC_PROTECT_ENCAPED))
+ {
+ vlib_increment_combined_counter
+ (rx_counter, thread_index, last_sw_if_index,
+ n_packets, n_bytes);
+ }
+
+ last_sw_if_index = sw_if_index0;
+ n_packets = 1;
+ n_bytes = len0;
+ }
+
+ /*
+ * compare the packet's outer IP headers to that of the tunnels
+ */
+ if (is_ip6)
+ {
+ if (PREDICT_FALSE
+ (!ip46_address_is_equal_v6
+ (&itp0->itp_crypto.dst, &ip60->src_address)
+ || !ip46_address_is_equal_v6 (&itp0->itp_crypto.src,
+ &ip60->dst_address)))
+ {
+ b[0]->error =
+ node->errors
+ [IPSEC_TUN_PROTECT_INPUT_ERROR_TUNNEL_MISMATCH];
+ next[0] = IPSEC_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+ }
+ else
+ {
+ if (PREDICT_FALSE
+ (!ip46_address_is_equal_v4
+ (&itp0->itp_crypto.dst, &ip40->src_address)
+ || !ip46_address_is_equal_v4 (&itp0->itp_crypto.src,
+ &ip40->dst_address)))
+ {
+ b[0]->error =
+ node->errors
+ [IPSEC_TUN_PROTECT_INPUT_ERROR_TUNNEL_MISMATCH];
+ next[0] = IPSEC_INPUT_NEXT_DROP;
+ goto trace00;
+ }
+ }
+
+ /*
+ * There are two encap possibilities
+ * 1) the tunnel and ths SA are prodiving encap, i.e. it's
+ * MAC | SA-IP | TUN-IP | ESP | PAYLOAD
+ * implying the SA is in tunnel mode (on a tunnel interface)
+ * 2) only the tunnel provides encap
+ * MAC | TUN-IP | ESP | PAYLOAD
+ * implying the SA is in transport mode.
+ *
+ * For 2) we need only strip the tunnel encap and we're good.
+ * since the tunnel and crypto ecnap (int the tun=protect
+ * object) are the same and we verified above that these match
+ * for 1) we need to strip the SA-IP outer headers, to
+ * reveal the tunnel IP and then check that this matches
+ * the configured tunnel. this we can;t do here since it
+ * involves a lookup in the per-tunnel-type DB - so ship
+ * the packet to the tunnel-types provided node to do that
+ */
+ next[0] = IPSEC_TUN_PROTECT_NEXT_DECRYPT;
+ }
+ trace00:
+ if (PREDICT_FALSE (is_trace))
+ {
+ if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
+ {
+ ipsec_tun_protect_input_trace_t *tr =
+ vlib_add_trace (vm, node, b[0], sizeof (*tr));
+ tr->spi = clib_host_to_net_u32 (esp0->spi);
+ tr->seq = clib_host_to_net_u32 (esp0->seq);
+ }
+ }
+
+ /* next */
+ b += 1;
+ next += 1;
+ n_left_from -= 1;
+ }
+
+ if (n_packets && !(itp0->itp_flags & IPSEC_PROTECT_ENCAPED))
+ {
+ vlib_increment_combined_counter (rx_counter,
+ thread_index,
+ last_sw_if_index, n_packets, n_bytes);
+ }
+
+ vlib_node_increment_counter (vm, node->node_index,
+ IPSEC_TUN_PROTECT_INPUT_ERROR_RX,
+ from_frame->n_vectors - (n_disabled +
+ n_no_tunnel));
+
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, from_frame->n_vectors);
+
+ return from_frame->n_vectors;
+}
+
+VLIB_NODE_FN (ipsec4_tun_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return ipsec_tun_protect_input_inline (vm, node, from_frame,
+ 0 /* is_ip6 */ );
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ipsec4_tun_input_node) = {
+ .name = "ipsec4-tun-input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ipsec_tun_protect_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(ipsec_tun_protect_input_error_strings),
+ .error_strings = ipsec_tun_protect_input_error_strings,
+ .n_next_nodes = IPSEC_TUN_PROTECT_N_NEXT,
+ .next_nodes = {
+ [IPSEC_TUN_PROTECT_NEXT_DROP] = "ip4-drop",
+ [IPSEC_TUN_PROTECT_NEXT_PUNT] = "punt-dispatch",
+ [IPSEC_TUN_PROTECT_NEXT_DECRYPT] = "esp4-decrypt-tun",
+ }
+};
+/* *INDENT-ON* */
+
+VLIB_NODE_FN (ipsec6_tun_input_node) (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ return ipsec_tun_protect_input_inline (vm, node, from_frame,
+ 1 /* is_ip6 */ );
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (ipsec6_tun_input_node) = {
+ .name = "ipsec6-tun-input",
+ .vector_size = sizeof (u32),
+ .format_trace = format_ipsec_tun_protect_input_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+ .n_errors = ARRAY_LEN(ipsec_tun_protect_input_error_strings),
+ .error_strings = ipsec_tun_protect_input_error_strings,
+ .n_next_nodes = IPSEC_TUN_PROTECT_N_NEXT,
+ .next_nodes = {
+ [IPSEC_TUN_PROTECT_NEXT_DROP] = "ip6-drop",
+ [IPSEC_TUN_PROTECT_NEXT_PUNT] = "punt-dispatch",
+ [IPSEC_TUN_PROTECT_NEXT_DECRYPT] = "esp6-decrypt-tun",
+ }
+};
+/* *INDENT-ON* */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */