summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDamjan Marion <damarion@cisco.com>2019-03-19 15:38:40 +0100
committerNeale Ranns <nranns@cisco.com>2019-03-26 10:31:01 +0000
commitc59b9a26ed9a6bc083db2868b6993add6fd2ba5b (patch)
tree70496bdc4ad01ab9e11cd07913f2aec681fac324
parent8e22054209ae9c4f08dae16f1aff910d8c8d0b76 (diff)
ipsec: esp-encrypt rework
Change-Id: Ibe7f806b9d600994e83c9f1be526fdb0a1ef1833 Signed-off-by: Damjan Marion <damarion@cisco.com>
-rw-r--r--src/vnet/crypto/crypto.h1
-rw-r--r--src/vnet/ipsec/esp.h4
-rw-r--r--src/vnet/ipsec/esp_encrypt.c578
-rw-r--r--src/vnet/ipsec/ipsec.c2
-rw-r--r--src/vnet/ipsec/ipsec.h9
-rw-r--r--src/vnet/ipsec/ipsec_sa.c43
-rw-r--r--src/vnet/ipsec/ipsec_sa.h3
-rw-r--r--src/vppinfra/string.h76
-rw-r--r--src/vppinfra/vector_avx2.h13
-rw-r--r--src/vppinfra/vector_sse42.h12
10 files changed, 499 insertions, 242 deletions
diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h
index 7b65aa5c43d..4ce4d254725 100644
--- a/src/vnet/crypto/crypto.h
+++ b/src/vnet/crypto/crypto.h
@@ -85,6 +85,7 @@ typedef struct
u8 *iv;
u8 *src;
u8 *dst;
+ uword user_data;
} vnet_crypto_op_t;
typedef struct
diff --git a/src/vnet/ipsec/esp.h b/src/vnet/ipsec/esp.h
index 8f900da428c..063b74b7c5c 100644
--- a/src/vnet/ipsec/esp.h
+++ b/src/vnet/ipsec/esp.h
@@ -55,7 +55,9 @@ typedef CLIB_PACKED (struct {
/* *INDENT-ON* */
#define ESP_WINDOW_SIZE (64)
-#define ESP_SEQ_MAX (4294967295UL)
+#define ESP_SEQ_MAX (4294967295UL)
+#define ESP_MAX_BLOCK_SIZE (16)
+#define ESP_MAX_ICV_SIZE (16)
u8 *format_esp_header (u8 * s, va_list * args);
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c
index 1e29ee34f3b..c792a149f12 100644
--- a/src/vnet/ipsec/esp_encrypt.c
+++ b/src/vnet/ipsec/esp_encrypt.c
@@ -39,12 +39,12 @@ typedef enum
ESP_ENCRYPT_N_NEXT,
} esp_encrypt_next_t;
-#define foreach_esp_encrypt_error \
- _(RX_PKTS, "ESP pkts received") \
- _(NO_BUFFER, "No buffer (packet dropped)") \
- _(DECRYPTION_FAILED, "ESP encryption failed") \
- _(SEQ_CYCLED, "sequence number cycled")
-
+#define foreach_esp_encrypt_error \
+ _(RX_PKTS, "ESP pkts received") \
+ _(SEQ_CYCLED, "sequence number cycled (packet dropped)") \
+ _(CRYPTO_ENGINE_ERROR, "crypto engine error (packet dropped)") \
+ _(CHAINED_BUFFER, "chained buffers (packet dropped)") \
+ _(NO_TRAILER_SPACE, "no trailer space (packet dropped)")
typedef enum
{
@@ -86,301 +86,397 @@ format_esp_encrypt_trace (u8 * s, va_list * args)
return s;
}
-always_inline void
-esp_encrypt_cbc (vlib_main_t * vm, ipsec_sa_t * sa,
- u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
+/* pad packet in input buffer */
+static_always_inline u8 *
+esp_add_footer_and_icv (vlib_buffer_t * b, u8 block_size, u8 icv_sz)
{
- vnet_crypto_op_t _op, *op = &_op;
+ static const u8 pad_data[ESP_MAX_BLOCK_SIZE] = {
+ 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
+ 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x00, 0x00,
+ };
+
+ u16 min_length = b->current_length + sizeof (esp_footer_t);
+ u16 new_length = round_pow2 (min_length, block_size);
+ u8 pad_bytes = new_length - min_length;
+ esp_footer_t *f = (esp_footer_t *) (vlib_buffer_get_current (b) +
+ new_length - sizeof (esp_footer_t));
+
+ if (pad_bytes)
+ clib_memcpy_fast ((u8 *) f - pad_bytes, pad_data, ESP_MAX_BLOCK_SIZE);
+
+ f->pad_length = pad_bytes;
+ b->current_length = new_length + icv_sz;
+ return &f->next_header;
+}
+static_always_inline void
+esp_update_ip4_hdr (ip4_header_t * ip4, u16 len, int is_transport, int is_udp)
+{
+ ip_csum_t sum = ip4->checksum;
+ u16 old_len = 0;
- if (PREDICT_FALSE (sa->crypto_enc_op_type == VNET_CRYPTO_OP_NONE))
- return;
+ if (is_transport)
+ {
+ u8 prot = is_udp ? IP_PROTOCOL_UDP : IP_PROTOCOL_IPSEC_ESP;
+ old_len = ip4->length;
+ sum = ip_csum_update (sum, ip4->protocol, prot, ip4_header_t, protocol);
+ ip4->protocol = prot;
+ }
- op->op = sa->crypto_enc_op_type;
- op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
- op->iv = iv;
- op->src = in;
- op->dst = out;
- op->len = in_len;
- op->key = key;
+ ip4->length = len = clib_net_to_host_u16 (len);
+ sum = ip_csum_update (ip4->checksum, old_len, len, ip4_header_t, length);
+ ip4->checksum = ip_csum_fold (sum);
+}
- vnet_crypto_process_ops (vm, op, 1);
+static_always_inline void
+esp_fill_udp_hdr (ipsec_sa_t * sa, udp_header_t * udp, u16 len)
+{
+ clib_memcpy_fast (udp, &sa->udp_hdr, sizeof (udp_header_t));
+ udp->length = clib_net_to_host_u16 (len);
}
-always_inline uword
-esp_encrypt_inline (vlib_main_t * vm,
- vlib_node_runtime_t * node, vlib_frame_t * from_frame,
- int is_ip6)
+static_always_inline u8
+ext_hdr_is_pre_esp (u8 nexthdr)
{
- u32 *from = vlib_frame_vector_args (from_frame);
- u32 n_left_from = from_frame->n_vectors;
- ipsec_main_t *im = &ipsec_main;
- u32 new_bufs[VLIB_FRAME_SIZE];
- vlib_buffer_t *i_bufs[VLIB_FRAME_SIZE], **ib = i_bufs;
- vlib_buffer_t *o_bufs[VLIB_FRAME_SIZE], **ob = o_bufs;
- u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
- u32 n_alloc, thread_index = vm->thread_index;
+#ifdef CLIB_HAVE_VEC128
+ static const u8x16 ext_hdr_types = {
+ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS,
+ IP_PROTOCOL_IPV6_ROUTE,
+ IP_PROTOCOL_IPV6_FRAGMENTATION,
+ };
+
+ return !u8x16_is_all_zero (ext_hdr_types == u8x16_splat (nexthdr));
+#else
+ return ((nexthdr ^ IP_PROTOCOL_IP6_HOP_BY_HOP_OPTIONS) |
+ (nexthdr ^ IP_PROTOCOL_IPV6_ROUTE) |
+ (nexthdr ^ IP_PROTOCOL_IPV6_FRAGMENTATION) != 0);
+#endif
+}
+
+static_always_inline u8
+esp_get_ip6_hdr_len (ip6_header_t * ip6)
+{
+ /* this code assumes that HbH, route and frag headers will be before
+ others, if that is not the case, they will end up encrypted */
+
+ u8 len = sizeof (ip6_header_t);
+ ip6_ext_header_t *p;
+
+ /* if next packet doesn't have ext header */
+ if (ext_hdr_is_pre_esp (ip6->protocol) == 0)
+ return len;
- n_alloc = vlib_buffer_alloc (vm, new_bufs, n_left_from);
- if (n_alloc != n_left_from)
+ p = (void *) (ip6 + 1);
+ len += ip6_ext_header_len (p);
+
+ while (ext_hdr_is_pre_esp (p->next_hdr))
{
- vlib_node_increment_counter (vm, node->node_index,
- ESP_ENCRYPT_ERROR_NO_BUFFER,
- n_left_from - n_alloc);
- if (n_alloc == 0)
- goto done;
- n_left_from = n_alloc;
+ len += ip6_ext_header_len (p);
+ p = ip6_ext_next_header (p);
}
- vlib_get_buffers (vm, from, ib, n_left_from);
- vlib_get_buffers (vm, new_bufs, ob, n_left_from);
+ return len;
+}
- while (n_left_from > 0)
+static_always_inline int
+esp_trailer_icv_overflow (vlib_node_runtime_t * node, vlib_buffer_t * b,
+ u16 * next, u16 buffer_data_size)
+{
+ if (b->current_data + b->current_length <= buffer_data_size)
+ return 0;
+
+ b->current_length -= buffer_data_size - b->current_data;
+ b->error = node->errors[ESP_ENCRYPT_ERROR_NO_TRAILER_SPACE];
+ next[0] = ESP_ENCRYPT_NEXT_DROP;
+ return 1;
+}
+
+static_always_inline void
+esp_process_ops (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vnet_crypto_op_t * ops, vlib_buffer_t * b[], u16 * nexts)
+{
+ u32 n_fail, n_ops = vec_len (ops);
+ vnet_crypto_op_t *op = ops;
+
+ if (n_ops == 0)
+ return;
+
+ n_fail = n_ops - vnet_crypto_process_ops (vm, op, n_ops);
+
+ while (n_fail)
{
- u32 sa_index0;
- ipsec_sa_t *sa0;
- ip4_and_esp_header_t *oh0 = 0;
- ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
- ip4_and_udp_and_esp_header_t *iuh0, *ouh0 = 0;
- esp_header_t *o_esp0;
- esp_footer_t *f0;
- u8 ip_udp_hdr_size;
- u8 next_hdr_type;
- u32 ip_proto = 0;
- u8 transport_mode = 0;
- u32 esp_seq_err;
-
- next[0] = ESP_ENCRYPT_NEXT_DROP;
-
- sa_index0 = vnet_buffer (ib[0])->ipsec.sad_index;
- sa0 = pool_elt_at_index (im->sad, sa_index0);
-
- vlib_prefetch_combined_counter (&ipsec_sa_counters, thread_index,
- sa_index0);
-
- esp_seq_err = esp_seq_advance (sa0);
-
- /* grab free buffer */
- ob[0]->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
- ob[0]->current_data = sizeof (ethernet_header_t);
- iuh0 = vlib_buffer_get_current (ib[0]);
-
- if (is_ip6)
+ ASSERT (op - ops < n_ops);
+
+ if (op->status != VNET_CRYPTO_OP_STATUS_COMPLETED)
{
- ih6_0 = vlib_buffer_get_current (ib[0]);
- next_hdr_type = IP_PROTOCOL_IPV6;
- oh6_0 = vlib_buffer_get_current (ob[0]);
-
- oh6_0->ip6.ip_version_traffic_class_and_flow_label =
- ih6_0->ip6.ip_version_traffic_class_and_flow_label;
- oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
- ip_udp_hdr_size = sizeof (ip6_header_t);
- o_esp0 = vlib_buffer_get_current (ob[0]) + ip_udp_hdr_size;
- oh6_0->ip6.hop_limit = 254;
- oh6_0->ip6.src_address.as_u64[0] = ih6_0->ip6.src_address.as_u64[0];
- oh6_0->ip6.src_address.as_u64[1] = ih6_0->ip6.src_address.as_u64[1];
- oh6_0->ip6.dst_address.as_u64[0] = ih6_0->ip6.dst_address.as_u64[0];
- oh6_0->ip6.dst_address.as_u64[1] = ih6_0->ip6.dst_address.as_u64[1];
- o_esp0->spi = clib_net_to_host_u32 (sa0->spi);
- o_esp0->seq = clib_net_to_host_u32 (sa0->seq);
- ip_proto = ih6_0->ip6.protocol;
-
- next[0] = ESP_ENCRYPT_NEXT_IP6_LOOKUP;
+ u32 bi = op->user_data;
+ b[bi]->error = node->errors[ESP_ENCRYPT_ERROR_CRYPTO_ENGINE_ERROR];
+ nexts[bi] = ESP_ENCRYPT_NEXT_DROP;
+ n_fail--;
}
- else
+ op++;
+ }
+}
+
+always_inline uword
+esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, int is_ip6)
+{
+ ipsec_main_t *im = &ipsec_main;
+ ipsec_per_thread_data_t *ptd = vec_elt_at_index (im->ptd, vm->thread_index);
+ u32 *from = vlib_frame_vector_args (frame);
+ u32 n_left = frame->n_vectors;
+ vlib_buffer_t *bufs[VLIB_FRAME_SIZE], **b = bufs;
+ u16 nexts[VLIB_FRAME_SIZE], *next = nexts;
+ u32 thread_index = vm->thread_index;
+ u16 buffer_data_size = vlib_buffer_get_default_data_size (vm);
+ u32 current_sa_index = ~0, current_sa_packets = 0;
+ u32 current_sa_bytes = 0, spi = 0;
+ u8 block_sz = 0, iv_sz = 0, icv_sz = 0;
+ ipsec_sa_t *sa0 = 0;
+
+ vlib_get_buffers (vm, from, b, n_left);
+ vec_reset_length (ptd->crypto_ops);
+ vec_reset_length (ptd->integ_ops);
+
+ while (n_left > 0)
+ {
+ u32 sa_index0 = vnet_buffer (b[0])->ipsec.sad_index;
+ dpo_id_t *dpo;
+ esp_header_t *esp;
+ u8 *payload, *next_hdr_ptr;
+ u16 payload_len;
+ u32 hdr_len;
+
+ if (n_left > 2)
{
- next_hdr_type = IP_PROTOCOL_IP_IN_IP;
- oh0 = vlib_buffer_get_current (ob[0]);
- ouh0 = vlib_buffer_get_current (ob[0]);
-
- oh0->ip4.ip_version_and_header_length = 0x45;
- oh0->ip4.tos = iuh0->ip4.tos;
- oh0->ip4.fragment_id = 0;
- oh0->ip4.flags_and_fragment_offset = 0;
- oh0->ip4.ttl = 254;
- if (sa0->udp_encap)
- {
- ouh0->udp.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
- ouh0->udp.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
- ouh0->udp.checksum = 0;
- ouh0->ip4.protocol = IP_PROTOCOL_UDP;
- ip_udp_hdr_size = sizeof (udp_header_t) + sizeof (ip4_header_t);
- }
- else
- {
- oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
- ip_udp_hdr_size = sizeof (ip4_header_t);
- }
- o_esp0 = vlib_buffer_get_current (ob[0]) + ip_udp_hdr_size;
- oh0->ip4.src_address.as_u32 = iuh0->ip4.src_address.as_u32;
- oh0->ip4.dst_address.as_u32 = iuh0->ip4.dst_address.as_u32;
- o_esp0->spi = clib_net_to_host_u32 (sa0->spi);
- o_esp0->seq = clib_net_to_host_u32 (sa0->seq);
- ip_proto = iuh0->ip4.protocol;
-
- next[0] = ESP_ENCRYPT_NEXT_IP4_LOOKUP;
+ u8 *p;
+ vlib_prefetch_buffer_header (b[2], LOAD);
+ p = vlib_buffer_get_current (b[1]);
+ CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
+ p -= CLIB_CACHE_LINE_BYTES;
+ CLIB_PREFETCH (p, CLIB_CACHE_LINE_BYTES, LOAD);
}
- if (PREDICT_TRUE (!is_ip6 && sa0->is_tunnel && !sa0->is_tunnel_ip6))
+ if (vnet_buffer (b[0])->ipsec.sad_index != current_sa_index)
{
- oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
- oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
+ sa0 = pool_elt_at_index (im->sad, sa_index0);
+ current_sa_index = sa_index0;
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ sa_index0, current_sa_packets,
+ current_sa_bytes);
+ current_sa_packets = current_sa_bytes = 0;
+ spi = clib_net_to_host_u32 (sa0->spi);
+ block_sz = sa0->crypto_block_size;
+ icv_sz = sa0->integ_trunc_size;
+ iv_sz = sa0->crypto_iv_size;
+ }
- next[0] = sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_next_node;
- vnet_buffer (ob[0])->ip.adj_index[VLIB_TX] =
- sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_index;
+ if (vlib_buffer_chain_linearize (vm, b[0]) != 1)
+ {
+ b[0]->error = node->errors[ESP_ENCRYPT_ERROR_CHAINED_BUFFER];
+ next[0] = ESP_ENCRYPT_NEXT_DROP;
+ goto trace;
}
- else if (is_ip6 && sa0->is_tunnel && sa0->is_tunnel_ip6)
+
+ if (PREDICT_FALSE (esp_seq_advance (sa0)))
{
- oh6_0->ip6.src_address.as_u64[0] =
- sa0->tunnel_src_addr.ip6.as_u64[0];
- oh6_0->ip6.src_address.as_u64[1] =
- sa0->tunnel_src_addr.ip6.as_u64[1];
- oh6_0->ip6.dst_address.as_u64[0] =
- sa0->tunnel_dst_addr.ip6.as_u64[0];
- oh6_0->ip6.dst_address.as_u64[1] =
- sa0->tunnel_dst_addr.ip6.as_u64[1];
-
- next[0] = sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_next_node;
- vnet_buffer (ob[0])->ip.adj_index[VLIB_TX] =
- sa0->dpo[IPSEC_PROTOCOL_ESP].dpoi_index;
+ b[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
+ next[0] = ESP_ENCRYPT_NEXT_DROP;
+ goto trace;
}
- else
+
+ /* space for IV */
+ hdr_len = iv_sz;
+
+ if (sa0->is_tunnel)
{
- next_hdr_type = ip_proto;
- if (vnet_buffer (ib[0])->sw_if_index[VLIB_TX] != ~0)
+ payload = vlib_buffer_get_current (b[0]);
+ next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz);
+ payload_len = b[0]->current_length;
+
+ if (esp_trailer_icv_overflow (node, b[0], next, buffer_data_size))
+ goto trace;
+
+ /* ESP header */
+ hdr_len += sizeof (*esp);
+ esp = (esp_header_t *) (payload - hdr_len);
+
+ /* optional UDP header */
+ if (sa0->udp_encap)
{
- transport_mode = 1;
- ethernet_header_t *ieh0, *oeh0;
- ieh0 =
- (ethernet_header_t *) ((u8 *)
- vlib_buffer_get_current (ib[0]) -
- sizeof (ethernet_header_t));
- oeh0 = (ethernet_header_t *) ob[0]->data;
- clib_memcpy_fast (oeh0, ieh0, sizeof (ethernet_header_t));
- next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
- vnet_buffer (ob[0])->sw_if_index[VLIB_TX] =
- vnet_buffer (ib[0])->sw_if_index[VLIB_TX];
+ hdr_len += sizeof (udp_header_t);
+ esp_fill_udp_hdr (sa0, (udp_header_t *) (payload - hdr_len),
+ payload_len + hdr_len);
}
- if (is_ip6)
- vlib_buffer_advance (ib[0], sizeof (ip6_header_t));
+ /* IP header */
+ if (sa0->is_tunnel_ip6)
+ {
+ ip6_header_t *ip6;
+ u16 len = sizeof (ip6_header_t);
+ hdr_len += len;
+ ip6 = (ip6_header_t *) (payload - hdr_len);
+ clib_memcpy_fast (ip6, &sa0->ip6_hdr, len);
+ *next_hdr_ptr = IP_PROTOCOL_IPV6;
+ len = payload_len + hdr_len - len;
+ ip6->payload_length = clib_net_to_host_u16 (len);
+ }
else
- vlib_buffer_advance (ib[0], sizeof (ip4_header_t));
+ {
+ ip4_header_t *ip4;
+ u16 len = sizeof (ip4_header_t);
+ hdr_len += len;
+ ip4 = (ip4_header_t *) (payload - hdr_len);
+ clib_memcpy_fast (ip4, &sa0->ip4_hdr, len);
+ *next_hdr_ptr = IP_PROTOCOL_IP_IN_IP;
+ len = payload_len + hdr_len;
+ esp_update_ip4_hdr (ip4, len, /* is_transport */ 0, 0);
+ }
+
+ dpo = sa0->dpo + IPSEC_PROTOCOL_ESP;
+ next[0] = dpo->dpoi_next_node;
+ vnet_buffer (b[0])->ip.adj_index[VLIB_TX] = dpo->dpoi_index;
}
+ else /* transport mode */
+ {
+ u8 *l2_hdr, l2_len, *ip_hdr, ip_len;
+ udp_header_t *udp = 0;
+ u8 *old_ip_hdr = vlib_buffer_get_current (b[0]);
- ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
- vlib_increment_combined_counter
- (&ipsec_sa_counters, thread_index, sa_index0,
- 1, ib[0]->current_length);
+ ip_len = is_ip6 ?
+ esp_get_ip6_hdr_len ((ip6_header_t *) old_ip_hdr) :
+ ip4_header_bytes ((ip4_header_t *) old_ip_hdr);
- if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE))
- {
+ vlib_buffer_advance (b[0], ip_len);
+ payload = vlib_buffer_get_current (b[0]);
+ next_hdr_ptr = esp_add_footer_and_icv (b[0], block_sz, icv_sz);
+ payload_len = b[0]->current_length;
- const int BLOCK_SIZE = sa0->crypto_block_size;
- const int IV_SIZE = sa0->crypto_iv_size;
- int blocks = 1 + (ib[0]->current_length + 1) / BLOCK_SIZE;
-
- /* pad packet in input buffer */
- u8 pad_bytes = BLOCK_SIZE * blocks - 2 - ib[0]->current_length;
- u8 i;
- u8 *padding =
- vlib_buffer_get_current (ib[0]) + ib[0]->current_length;
- ib[0]->current_length = BLOCK_SIZE * blocks;
- for (i = 0; i < pad_bytes; ++i)
+ if (esp_trailer_icv_overflow (node, b[0], next, buffer_data_size))
+ goto trace;
+
+ /* ESP header */
+ hdr_len += sizeof (*esp);
+ esp = (esp_header_t *) (payload - hdr_len);
+
+ /* optional UDP header */
+ if (sa0->udp_encap)
{
- padding[i] = i + 1;
+ hdr_len += sizeof (udp_header_t);
+ udp = (udp_header_t *) (payload - hdr_len);
}
- f0 = vlib_buffer_get_current (ib[0]) + ib[0]->current_length - 2;
- f0->pad_length = pad_bytes;
- f0->next_header = next_hdr_type;
- ob[0]->current_length = ip_udp_hdr_size + sizeof (esp_header_t) +
- BLOCK_SIZE * blocks + IV_SIZE;
+ /* IP header */
+ hdr_len += ip_len;
+ ip_hdr = payload - hdr_len;
- vnet_buffer (ob[0])->sw_if_index[VLIB_RX] =
- vnet_buffer (ib[0])->sw_if_index[VLIB_RX];
+ /* L2 header */
+ l2_len = vnet_buffer (b[0])->ip.save_rewrite_length;
+ hdr_len += l2_len;
+ l2_hdr = payload - hdr_len;
- u8 *iv = vlib_buffer_get_current (ob[0]) + ip_udp_hdr_size +
- sizeof (esp_header_t);
+ /* copy l2 and ip header */
+ clib_memcpy_le32 (l2_hdr, old_ip_hdr - l2_len, l2_len);
+ clib_memcpy_le64 (ip_hdr, old_ip_hdr, ip_len);
- clib_memcpy_fast ((u8 *) vlib_buffer_get_current (ob[0]) +
- ip_udp_hdr_size + sizeof (esp_header_t), iv,
- IV_SIZE);
+ if (is_ip6)
+ {
+ ip6_header_t *ip6 = (ip6_header_t *) (ip_hdr);
+ *next_hdr_ptr = ip6->protocol;
+ ip6->protocol = IP_PROTOCOL_IPSEC_ESP;
+ ip6->payload_length = payload_len + hdr_len - l2_len - ip_len;
+ }
+ else
+ {
+ u16 len;
+ ip4_header_t *ip4 = (ip4_header_t *) (ip_hdr);
+ *next_hdr_ptr = ip4->protocol;
+ len = payload_len + hdr_len + l2_len;
+ if (udp)
+ {
+ esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 1);
+ esp_fill_udp_hdr (sa0, udp, len - ip_len);
+ }
+ else
+ esp_update_ip4_hdr (ip4, len, /* is_transport */ 1, 0);
+ }
- esp_encrypt_cbc (vm, sa0, (u8 *) vlib_buffer_get_current (ib[0]),
- (u8 *) vlib_buffer_get_current (ob[0]) +
- ip_udp_hdr_size + sizeof (esp_header_t) +
- IV_SIZE, BLOCK_SIZE * blocks,
- sa0->crypto_key.data, iv);
+ next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
}
- ob[0]->current_length +=
- hmac_calc (vm, sa0, (u8 *) o_esp0,
- ob[0]->current_length - ip_udp_hdr_size,
- vlib_buffer_get_current (ob[0]) + ob[0]->current_length);
-
+ esp->spi = spi;
+ esp->seq = clib_net_to_host_u32 (sa0->seq);
- if (is_ip6)
+ if (sa0->crypto_enc_op_type)
{
- oh6_0->ip6.payload_length =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, ob[0]) -
- sizeof (ip6_header_t));
+ vnet_crypto_op_t *op;
+ vec_add2_aligned (ptd->crypto_ops, op, 1, CLIB_CACHE_LINE_BYTES);
+ op->op = sa0->crypto_enc_op_type;
+ op->iv = payload - iv_sz;
+ op->src = op->dst = payload;
+ op->key = sa0->crypto_key.data;
+ op->len = payload_len - icv_sz;
+ op->flags = VNET_CRYPTO_OP_FLAG_INIT_IV;
+ op->user_data = b - bufs;
}
- else
+
+ if (sa0->integ_op_type)
{
- oh0->ip4.length =
- clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, ob[0]));
- oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
- if (sa0->udp_encap)
+ vnet_crypto_op_t *op;
+ vec_add2_aligned (ptd->integ_ops, op, 1, CLIB_CACHE_LINE_BYTES);
+ op->op = sa0->integ_op_type;
+ op->src = payload - iv_sz - sizeof (esp_header_t);
+ op->dst = payload + payload_len - icv_sz;
+ op->key = sa0->integ_key.data;
+ op->key_len = sa0->integ_key.len;
+ op->hmac_trunc_len = icv_sz;
+ op->len = payload_len - icv_sz + iv_sz + sizeof (esp_header_t);
+ op->flags = 0;
+ op->user_data = b - bufs;
+ if (sa0->use_esn)
{
- ouh0->udp.length =
- clib_host_to_net_u16 (clib_net_to_host_u16
- (oh0->ip4.length) -
- ip4_header_bytes (&oh0->ip4));
+ u32 seq_hi = clib_net_to_host_u32 (sa0->seq_hi);
+ clib_memcpy_fast (op->dst, &seq_hi, sizeof (seq_hi));
+ op->len += sizeof (seq_hi);
}
}
- if (transport_mode)
- vlib_buffer_reset (ob[0]);
+ vlib_buffer_advance (b[0], 0LL - hdr_len);
- if (PREDICT_FALSE (esp_seq_err))
- {
- ob[0]->error = node->errors[ESP_ENCRYPT_ERROR_SEQ_CYCLED];
- next[0] = ESP_ENCRYPT_NEXT_DROP;
- }
+ current_sa_packets += 1;
+ current_sa_bytes += payload_len;
- if (PREDICT_FALSE (ib[0]->flags & VLIB_BUFFER_IS_TRACED))
+ trace:
+ if (PREDICT_FALSE (b[0]->flags & VLIB_BUFFER_IS_TRACED))
{
- if (ob[0])
- {
- ob[0]->flags |= VLIB_BUFFER_IS_TRACED;
- ob[0]->trace_index = ib[0]->trace_index;
- esp_encrypt_trace_t *tr =
- vlib_add_trace (vm, node, ob[0], sizeof (*tr));
- tr->sa_index = sa_index0;
- tr->spi = sa0->spi;
- tr->seq = sa0->seq - 1;
- tr->udp_encap = sa0->udp_encap;
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
- }
+ esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, b[0],
+ sizeof (*tr));
+ tr->sa_index = sa_index0;
+ tr->spi = sa0->spi;
+ tr->seq = sa0->seq - 1;
+ tr->udp_encap = sa0->udp_encap;
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
}
-
/* next */
- n_left_from -= 1;
- ib += 1;
- ob += 1;
+ n_left -= 1;
next += 1;
+ b += 1;
}
+ vlib_increment_combined_counter (&ipsec_sa_counters, thread_index,
+ current_sa_index, current_sa_packets,
+ current_sa_bytes);
+
+ esp_process_ops (vm, node, ptd->crypto_ops, bufs, nexts);
+ esp_process_ops (vm, node, ptd->integ_ops, bufs, nexts);
+
vlib_node_increment_counter (vm, node->node_index,
- ESP_ENCRYPT_ERROR_RX_PKTS, n_alloc);
+ ESP_ENCRYPT_ERROR_RX_PKTS, frame->n_vectors);
- vlib_buffer_enqueue_to_next (vm, node, new_bufs, nexts, n_alloc);
-done:
- vlib_buffer_free (vm, from, from_frame->n_vectors);
- return n_alloc;
+ vlib_buffer_enqueue_to_next (vm, node, from, nexts, frame->n_vectors);
+ return frame->n_vectors;
}
VLIB_NODE_FN (esp4_encrypt_node) (vlib_main_t * vm,
diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c
index ac9e139fd85..92210493e78 100644
--- a/src/vnet/ipsec/ipsec.c
+++ b/src/vnet/ipsec/ipsec.c
@@ -316,6 +316,8 @@ ipsec_init (vlib_main_t * vm)
i->op_type = VNET_CRYPTO_OP_SHA512_HMAC;
i->trunc_size = 32;
+ vec_validate_aligned (im->ptd, vec_len (vlib_mains), CLIB_CACHE_LINE_BYTES);
+
return 0;
}
diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h
index cfb096f69ba..9a5dbb3e8b2 100644
--- a/src/vnet/ipsec/ipsec.h
+++ b/src/vnet/ipsec/ipsec.h
@@ -80,6 +80,12 @@ typedef struct
typedef struct
{
+ vnet_crypto_op_t *crypto_ops;
+ vnet_crypto_op_t *integ_ops;
+} ipsec_per_thread_data_t;
+
+typedef struct
+{
/* pool of tunnel instances */
ipsec_spd_t *spds;
/* Pool of security associations */
@@ -142,6 +148,9 @@ typedef struct
/* crypto integ data */
ipsec_main_integ_alg_t *integ_algs;
+
+ /* per-thread data */
+ ipsec_per_thread_data_t *ptd;
} ipsec_main_t;
extern ipsec_main_t ipsec_main;
diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c
index 337ca34eae9..3e29c0406e3 100644
--- a/src/vnet/ipsec/ipsec_sa.c
+++ b/src/vnet/ipsec/ipsec_sa.c
@@ -14,6 +14,8 @@
*/
#include <vnet/ipsec/ipsec.h>
+#include <vnet/ipsec/esp.h>
+#include <vnet/udp/udp.h>
#include <vnet/fib/fib_table.h>
/**
@@ -97,6 +99,7 @@ ipsec_sa_set_crypto_alg (ipsec_sa_t * sa, ipsec_crypto_alg_t crypto_alg)
sa->crypto_block_size = im->crypto_algs[crypto_alg].block_size;
sa->crypto_enc_op_type = im->crypto_algs[crypto_alg].enc_op_type;
sa->crypto_dec_op_type = im->crypto_algs[crypto_alg].dec_op_type;
+ ASSERT (sa->crypto_block_size <= ESP_MAX_BLOCK_SIZE);
}
void
@@ -106,6 +109,7 @@ ipsec_sa_set_integ_alg (ipsec_sa_t * sa, ipsec_integ_alg_t integ_alg)
sa->integ_alg = integ_alg;
sa->integ_trunc_size = im->integ_algs[integ_alg].trunc_size;
sa->integ_op_type = im->integ_algs[integ_alg].op_type;
+ ASSERT (sa->integ_trunc_size <= ESP_MAX_ICV_SIZE);
}
int
@@ -199,7 +203,46 @@ ipsec_sa_add (u32 id,
sa->sibling = fib_entry_child_add (sa->fib_entry_index,
FIB_NODE_TYPE_IPSEC_SA, sa_index);
ipsec_sa_stack (sa);
+
+ /* generate header templates */
+ if (sa->is_tunnel_ip6)
+ {
+ sa->ip6_hdr.ip_version_traffic_class_and_flow_label = 0x60;
+ sa->ip6_hdr.hop_limit = 254;
+ sa->ip6_hdr.src_address.as_u64[0] =
+ sa->tunnel_src_addr.ip6.as_u64[0];
+ sa->ip6_hdr.src_address.as_u64[1] =
+ sa->tunnel_src_addr.ip6.as_u64[1];
+ sa->ip6_hdr.dst_address.as_u64[0] =
+ sa->tunnel_dst_addr.ip6.as_u64[0];
+ sa->ip6_hdr.dst_address.as_u64[1] =
+ sa->tunnel_dst_addr.ip6.as_u64[1];
+ if (sa->udp_encap)
+ sa->ip6_hdr.protocol = IP_PROTOCOL_UDP;
+ else
+ sa->ip6_hdr.protocol = IP_PROTOCOL_IPSEC_ESP;
+ }
+ else
+ {
+ sa->ip4_hdr.ip_version_and_header_length = 0x45;
+ sa->ip4_hdr.ttl = 254;
+ sa->ip4_hdr.src_address.as_u32 = sa->tunnel_src_addr.ip4.as_u32;
+ sa->ip4_hdr.dst_address.as_u32 = sa->tunnel_dst_addr.ip4.as_u32;
+
+ if (sa->udp_encap)
+ sa->ip4_hdr.protocol = IP_PROTOCOL_UDP;
+ else
+ sa->ip4_hdr.protocol = IP_PROTOCOL_IPSEC_ESP;
+ sa->ip4_hdr.checksum = ip4_header_checksum (&sa->ip4_hdr);
+ }
}
+
+ if (sa->udp_encap)
+ {
+ sa->udp_hdr.src_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
+ sa->udp_hdr.dst_port = clib_host_to_net_u16 (UDP_DST_PORT_ipsec);
+ }
+
hash_set (im->sa_index_by_sa_id, sa->id, sa_index);
if (sa_out_index)
diff --git a/src/vnet/ipsec/ipsec_sa.h b/src/vnet/ipsec/ipsec_sa.h
index a6ade604b20..d3335a2d956 100644
--- a/src/vnet/ipsec/ipsec_sa.h
+++ b/src/vnet/ipsec/ipsec_sa.h
@@ -124,6 +124,9 @@ typedef struct
u8 udp_encap;
ip46_address_t tunnel_src_addr;
ip46_address_t tunnel_dst_addr;
+ ip4_header_t ip4_hdr;
+ ip6_header_t ip6_hdr;
+ udp_header_t udp_hdr;
fib_node_index_t fib_entry_index;
u32 sibling;
diff --git a/src/vppinfra/string.h b/src/vppinfra/string.h
index d9cd8fe1af9..4755a9868d6 100644
--- a/src/vppinfra/string.h
+++ b/src/vppinfra/string.h
@@ -214,6 +214,82 @@ memset_s_inline (void *s, rsize_t smax, int c, rsize_t n)
#define clib_memset(s,c,n) memset_s_inline(s,n,c,n)
static_always_inline void
+clib_memcpy_le (u8 * dst, u8 * src, u8 len, u8 max_len)
+{
+#if defined (CLIB_HxAVE_VEC256)
+ u8x32 s, d;
+ u8x32 mask = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17,
+ 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31
+ };
+ u8x32 lv = u8x32_splat (len);
+ u8x32 add = u8x32_splat (32);
+
+ s = u8x32_load_unaligned (src);
+ d = u8x32_load_unaligned (dst);
+ d = u8x32_blend (d, s, u8x32_is_greater (lv, mask));
+ u8x32_store_unaligned (d, dst);
+
+ if (max_len <= 32)
+ return;
+
+ mask += add;
+ s = u8x32_load_unaligned (src + 32);
+ d = u8x32_load_unaligned (dst + 32);
+ d = u8x32_blend (d, s, u8x32_is_greater (lv, mask));
+ u8x32_store_unaligned (d, dst + 32);
+
+#elif defined (CLIB_HAVE_VEC128) && !defined (__aarch64__)
+ u8x16 s, d;
+ u8x16 mask = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
+ u8x16 lv = u8x16_splat (len);
+ u8x16 add = u8x16_splat (16);
+
+ s = u8x16_load_unaligned (src);
+ d = u8x16_load_unaligned (dst);
+ d = u8x16_blend (d, s, u8x16_is_greater (lv, mask));
+ u8x16_store_unaligned (d, dst);
+
+ if (max_len <= 16)
+ return;
+
+ mask += add;
+ s = u8x16_load_unaligned (src + 16);
+ d = u8x16_load_unaligned (dst + 16);
+ d = u8x16_blend (d, s, u8x16_is_greater (lv, mask));
+ u8x16_store_unaligned (d, dst + 16);
+
+ if (max_len <= 32)
+ return;
+
+ mask += add;
+ s = u8x16_load_unaligned (src + 32);
+ d = u8x16_load_unaligned (dst + 32);
+ d = u8x16_blend (d, s, u8x16_is_greater (lv, mask));
+ u8x16_store_unaligned (d, dst + 32);
+
+ mask += add;
+ s = u8x16_load_unaligned (src + 48);
+ d = u8x16_load_unaligned (dst + 48);
+ d = u8x16_blend (d, s, u8x16_is_greater (lv, mask));
+ u8x16_store_unaligned (d, dst + 48);
+#else
+ clib_memcpy_fast (dst, src, len);
+#endif
+}
+
+static_always_inline void
+clib_memcpy_le64 (u8 * dst, u8 * src, u8 len)
+{
+ clib_memcpy_le (dst, src, len, 64);
+}
+
+static_always_inline void
+clib_memcpy_le32 (u8 * dst, u8 * src, u8 len)
+{
+ clib_memcpy_le (dst, src, len, 32);
+}
+
+static_always_inline void
clib_memset_u64 (void *p, u64 val, uword count)
{
u64 *ptr = p;
diff --git a/src/vppinfra/vector_avx2.h b/src/vppinfra/vector_avx2.h
index 51625618823..b9d6549da99 100644
--- a/src/vppinfra/vector_avx2.h
+++ b/src/vppinfra/vector_avx2.h
@@ -247,6 +247,19 @@ u32x8_scatter_one (u32x8 r, int index, void *p)
*(u32 *) p = r[index];
}
+static_always_inline u8x32
+u8x32_is_greater (u8x32 v1, u8x32 v2)
+{
+ return (u8x32) _mm256_cmpgt_epi8 ((__m256i) v1, (__m256i) v2);
+}
+
+static_always_inline u8x32
+u8x32_blend (u8x32 v1, u8x32 v2, u8x32 mask)
+{
+ return (u8x32) _mm256_blendv_epi8 ((__m256i) v1, (__m256i) v2,
+ (__m256i) mask);
+}
+
#endif /* included_vector_avx2_h */
/*
diff --git a/src/vppinfra/vector_sse42.h b/src/vppinfra/vector_sse42.h
index 5d6a47d3915..ee5b4dcc7df 100644
--- a/src/vppinfra/vector_sse42.h
+++ b/src/vppinfra/vector_sse42.h
@@ -691,6 +691,18 @@ u32x4_scatter_one (u32x4 r, int index, void *p)
*(u32 *) p = r[index];
}
+static_always_inline u8x16
+u8x16_is_greater (u8x16 v1, u8x16 v2)
+{
+ return (u8x16) _mm_cmpgt_epi8 ((__m128i) v1, (__m128i) v2);
+}
+
+static_always_inline u8x16
+u8x16_blend (u8x16 v1, u8x16 v2, u8x16 mask)
+{
+ return (u8x16) _mm_blendv_epi8 ((__m128i) v1, (__m128i) v2, (__m128i) mask);
+}
+
#endif /* included_vector_sse2_h */