summaryrefslogtreecommitdiffstats
path: root/vnet/vnet/ipsec/esp_encrypt.c
diff options
context:
space:
mode:
authorKeith Burns (alagalah) <alagalah@gmail.com>2016-08-06 11:00:56 -0700
committerDave Barach <openvpp@barachs.net>2016-08-09 13:58:49 +0000
commit166a9d4c21821bdbc9a2d5a818ec5ae0e8bdc036 (patch)
tree1098ca0b2f63fc0f99c40ffb51fa0c85b8024a55 /vnet/vnet/ipsec/esp_encrypt.c
parente2c987e21fa9d2d2be93176429679dd8eaca9576 (diff)
VPP-256 - Coding style cleanup vnet/vnet/ipsec
Change-Id: Ie9e05e2c74e7843a937870e2c64db0499f03a6a4 Signed-off-by: Keith Burns (alagalah) <alagalah@gmail.com>
Diffstat (limited to 'vnet/vnet/ipsec/esp_encrypt.c')
-rw-r--r--vnet/vnet/ipsec/esp_encrypt.c572
1 files changed, 300 insertions, 272 deletions
diff --git a/vnet/vnet/ipsec/esp_encrypt.c b/vnet/vnet/ipsec/esp_encrypt.c
index 596c3f41734..44999bd5beb 100644
--- a/vnet/vnet/ipsec/esp_encrypt.c
+++ b/vnet/vnet/ipsec/esp_encrypt.c
@@ -31,10 +31,11 @@ _(IP6_INPUT, "ip6-input") \
_(INTERFACE_OUTPUT, "interface-output")
#define _(v, s) ESP_ENCRYPT_NEXT_##v,
-typedef enum {
+typedef enum
+{
foreach_esp_encrypt_next
#undef _
- ESP_ENCRYPT_N_NEXT,
+ ESP_ENCRYPT_N_NEXT,
} esp_encrypt_next_t;
#define foreach_esp_encrypt_error \
@@ -44,14 +45,15 @@ typedef enum {
_(SEQ_CYCLED, "sequence number cycled")
-typedef enum {
+typedef enum
+{
#define _(sym,str) ESP_ENCRYPT_ERROR_##sym,
foreach_esp_encrypt_error
#undef _
- ESP_ENCRYPT_N_ERROR,
+ ESP_ENCRYPT_N_ERROR,
} esp_encrypt_error_t;
-static char * esp_encrypt_error_strings[] = {
+static char *esp_encrypt_error_strings[] = {
#define _(sym,string) string,
foreach_esp_encrypt_error
#undef _
@@ -59,7 +61,8 @@ static char * esp_encrypt_error_strings[] = {
vlib_node_registration_t esp_encrypt_node;
-typedef struct {
+typedef struct
+{
u32 spi;
u32 seq;
ipsec_crypto_alg_t crypto_alg;
@@ -67,66 +70,65 @@ typedef struct {
} esp_encrypt_trace_t;
/* packet trace format function */
-static u8 * format_esp_encrypt_trace (u8 * s, va_list * args)
+static u8 *
+format_esp_encrypt_trace (u8 * s, va_list * args)
{
CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
- esp_encrypt_trace_t * t = va_arg (*args, esp_encrypt_trace_t *);
+ esp_encrypt_trace_t *t = va_arg (*args, esp_encrypt_trace_t *);
s = format (s, "esp: spi %u seq %u crypto %U integrity %U",
- t->spi, t->seq,
- format_ipsec_crypto_alg, t->crypto_alg,
- format_ipsec_integ_alg, t->integ_alg);
+ t->spi, t->seq,
+ format_ipsec_crypto_alg, t->crypto_alg,
+ format_ipsec_integ_alg, t->integ_alg);
return s;
}
always_inline void
-esp_encrypt_aes_cbc(ipsec_crypto_alg_t alg,
- u8 * in,
- u8 * out,
- size_t in_len,
- u8 * key,
- u8 * iv)
+esp_encrypt_aes_cbc (ipsec_crypto_alg_t alg,
+ u8 * in, u8 * out, size_t in_len, u8 * key, u8 * iv)
{
- esp_main_t * em = &esp_main;
- u32 cpu_index = os_get_cpu_number();
- EVP_CIPHER_CTX * ctx = &(em->per_thread_data[cpu_index].encrypt_ctx);
- const EVP_CIPHER * cipher = NULL;
+ esp_main_t *em = &esp_main;
+ u32 cpu_index = os_get_cpu_number ();
+ EVP_CIPHER_CTX *ctx = &(em->per_thread_data[cpu_index].encrypt_ctx);
+ const EVP_CIPHER *cipher = NULL;
int out_len;
- ASSERT(alg < IPSEC_CRYPTO_N_ALG);
+ ASSERT (alg < IPSEC_CRYPTO_N_ALG);
- if (PREDICT_FALSE(em->esp_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE))
+ if (PREDICT_FALSE (em->esp_crypto_algs[alg].type == IPSEC_CRYPTO_ALG_NONE))
return;
- if (PREDICT_FALSE(alg != em->per_thread_data[cpu_index].last_encrypt_alg)) {
- cipher = em->esp_crypto_algs[alg].type;
- em->per_thread_data[cpu_index].last_encrypt_alg = alg;
- }
+ if (PREDICT_FALSE (alg != em->per_thread_data[cpu_index].last_encrypt_alg))
+ {
+ cipher = em->esp_crypto_algs[alg].type;
+ em->per_thread_data[cpu_index].last_encrypt_alg = alg;
+ }
- EVP_EncryptInit_ex(ctx, cipher, NULL, key, iv);
+ EVP_EncryptInit_ex (ctx, cipher, NULL, key, iv);
- EVP_EncryptUpdate(ctx, out, &out_len, in, in_len);
- EVP_EncryptFinal_ex(ctx, out + out_len, &out_len);
+ EVP_EncryptUpdate (ctx, out, &out_len, in, in_len);
+ EVP_EncryptFinal_ex (ctx, out + out_len, &out_len);
}
always_inline int
esp_seq_advance (ipsec_sa_t * sa)
{
- if (PREDICT_TRUE(sa->use_esn))
+ if (PREDICT_TRUE (sa->use_esn))
{
- if (PREDICT_FALSE(sa->seq == ESP_SEQ_MAX))
- {
- if (PREDICT_FALSE(sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
- return 1;
- sa->seq_hi++;
- }
+ if (PREDICT_FALSE (sa->seq == ESP_SEQ_MAX))
+ {
+ if (PREDICT_FALSE
+ (sa->use_anti_replay && sa->seq_hi == ESP_SEQ_MAX))
+ return 1;
+ sa->seq_hi++;
+ }
sa->seq++;
}
else
{
- if (PREDICT_FALSE(sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
- return 1;
+ if (PREDICT_FALSE (sa->use_anti_replay && sa->seq == ESP_SEQ_MAX))
+ return 1;
sa->seq++;
}
@@ -135,26 +137,26 @@ esp_seq_advance (ipsec_sa_t * sa)
static uword
esp_encrypt_node_fn (vlib_main_t * vm,
- vlib_node_runtime_t * node,
- vlib_frame_t * from_frame)
+ vlib_node_runtime_t * node, vlib_frame_t * from_frame)
{
- u32 n_left_from, *from, * to_next = 0, next_index;
+ u32 n_left_from, *from, *to_next = 0, next_index;
from = vlib_frame_vector_args (from_frame);
n_left_from = from_frame->n_vectors;
ipsec_main_t *im = &ipsec_main;
- u32 * recycle = 0;
- u32 cpu_index = os_get_cpu_number();
+ u32 *recycle = 0;
+ u32 cpu_index = os_get_cpu_number ();
- ipsec_alloc_empty_buffers(vm, im);
+ ipsec_alloc_empty_buffers (vm, im);
- u32 * empty_buffers = im->empty_buffers[cpu_index];
+ u32 *empty_buffers = im->empty_buffers[cpu_index];
- if (PREDICT_FALSE(vec_len (empty_buffers) < n_left_from)){
- vlib_node_increment_counter (vm, esp_encrypt_node.index,
- ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from);
- clib_warning("no enough empty buffers. discarding frame");
- goto free_buffers_and_exit;
- }
+ if (PREDICT_FALSE (vec_len (empty_buffers) < n_left_from))
+ {
+ vlib_node_increment_counter (vm, esp_encrypt_node.index,
+ ESP_ENCRYPT_ERROR_NO_BUFFER, n_left_from);
+ clib_warning ("no enough empty buffers. discarding frame");
+ goto free_buffers_and_exit;
+ }
next_index = node->cached_next_index;
@@ -165,234 +167,253 @@ esp_encrypt_node_fn (vlib_main_t * vm,
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
while (n_left_from > 0 && n_left_to_next > 0)
- {
- u32 i_bi0, o_bi0, next0;
- vlib_buffer_t * i_b0, *o_b0 = 0;
- u32 sa_index0;
- ipsec_sa_t * sa0;
- ip4_and_esp_header_t * ih0, * oh0 = 0;
- ip6_and_esp_header_t * ih6_0, * oh6_0 = 0;
- uword last_empty_buffer;
- esp_header_t * o_esp0;
- esp_footer_t *f0;
- u8 is_ipv6;
- u8 ip_hdr_size;
- u8 next_hdr_type;
- u32 ip_proto = 0;
- u8 transport_mode = 0;
-
- i_bi0 = from[0];
- from += 1;
- n_left_from -= 1;
- n_left_to_next -= 1;
-
- next0 = ESP_ENCRYPT_NEXT_DROP;
-
- i_b0 = vlib_get_buffer (vm, i_bi0);
- sa_index0 = vnet_buffer(i_b0)->output_features.ipsec_sad_index;
- sa0 = pool_elt_at_index(im->sad, sa_index0);
-
- if (PREDICT_FALSE(esp_seq_advance(sa0)))
- {
- clib_warning("sequence number counter has cycled SPI %u", sa0->spi);
- vlib_node_increment_counter (vm, esp_encrypt_node.index,
- ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
- //TODO: rekey SA
- o_bi0 = i_bi0;
- goto trace;
- }
-
- /* grab free buffer */
- last_empty_buffer = vec_len (empty_buffers) - 1;
- o_bi0 = empty_buffers[last_empty_buffer];
- o_b0 = vlib_get_buffer (vm, o_bi0);
- o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
- o_b0->current_data = sizeof(ethernet_header_t);
- ih0 = vlib_buffer_get_current (i_b0);
- vlib_prefetch_buffer_with_index (vm, empty_buffers[last_empty_buffer-1], STORE);
- _vec_len (empty_buffers) = last_empty_buffer;
- to_next[0] = o_bi0;
- to_next += 1;
-
- /* add old buffer to the recycle list */
- vec_add1(recycle, i_bi0);
-
- /* is ipv6 */
- if (PREDICT_FALSE((ih0->ip4.ip_version_and_header_length & 0xF0 ) == 0x60))
- {
- is_ipv6 = 1;
- ih6_0 = vlib_buffer_get_current (i_b0);
- ip_hdr_size = sizeof(ip6_header_t);
- next_hdr_type = IP_PROTOCOL_IPV6;
- oh6_0 = vlib_buffer_get_current (o_b0);
- o_esp0 = vlib_buffer_get_current (o_b0) + sizeof(ip6_header_t);
-
- oh6_0->ip6.ip_version_traffic_class_and_flow_label =
- ih6_0->ip6.ip_version_traffic_class_and_flow_label;
- oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
- oh6_0->ip6.hop_limit = 254;
- oh6_0->ip6.src_address.as_u64[0] = ih6_0->ip6.src_address.as_u64[0];
- oh6_0->ip6.src_address.as_u64[1] = ih6_0->ip6.src_address.as_u64[1];
- oh6_0->ip6.dst_address.as_u64[0] = ih6_0->ip6.dst_address.as_u64[0];
- oh6_0->ip6.dst_address.as_u64[1] = ih6_0->ip6.dst_address.as_u64[1];
- oh6_0->esp.spi = clib_net_to_host_u32(sa0->spi);
- oh6_0->esp.seq = clib_net_to_host_u32(sa0->seq);
- ip_proto = ih6_0->ip6.protocol;
- }
- else
- {
- is_ipv6 = 0;
- ip_hdr_size = sizeof(ip4_header_t);
- next_hdr_type = IP_PROTOCOL_IP_IN_IP;
- oh0 = vlib_buffer_get_current (o_b0);
- o_esp0 = vlib_buffer_get_current (o_b0) + sizeof(ip4_header_t);
-
- oh0->ip4.ip_version_and_header_length = 0x45;
- oh0->ip4.tos = ih0->ip4.tos;
- oh0->ip4.fragment_id = 0;
- oh0->ip4.flags_and_fragment_offset = 0;
- oh0->ip4.ttl = 254;
- oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
- oh0->ip4.src_address.as_u32 = ih0->ip4.src_address.as_u32;
- oh0->ip4.dst_address.as_u32 = ih0->ip4.dst_address.as_u32;
- oh0->esp.spi = clib_net_to_host_u32(sa0->spi);
- oh0->esp.seq = clib_net_to_host_u32(sa0->seq);
- ip_proto = ih0->ip4.protocol;
- }
-
- if (PREDICT_TRUE(sa0->is_tunnel && !sa0->is_tunnel_ip6))
- {
- oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
- oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
-
- /* in tunnel mode send it back to FIB */
- next0 = ESP_ENCRYPT_NEXT_IP4_INPUT;
- vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32)~0;
- }
- else if(sa0->is_tunnel && sa0->is_tunnel_ip6)
- {
- oh6_0->ip6.src_address.as_u64[0] = sa0->tunnel_src_addr.ip6.as_u64[0];
- oh6_0->ip6.src_address.as_u64[1] = sa0->tunnel_src_addr.ip6.as_u64[1];
- oh6_0->ip6.dst_address.as_u64[0] = sa0->tunnel_dst_addr.ip6.as_u64[0];
- oh6_0->ip6.dst_address.as_u64[1] = sa0->tunnel_dst_addr.ip6.as_u64[1];
-
- /* in tunnel mode send it back to FIB */
- next0 = ESP_ENCRYPT_NEXT_IP6_INPUT;
- vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32)~0;
- }
- else
- {
- transport_mode = 1;
- ethernet_header_t *ieh0, *oeh0;
- ieh0 = (ethernet_header_t *) i_b0->data;
- oeh0 = (ethernet_header_t *) o_b0->data;
- clib_memcpy (oeh0, ieh0, sizeof(ethernet_header_t));
- vlib_buffer_advance(i_b0, ip_hdr_size);
- next_hdr_type = ip_proto;
- next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
- o_b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
- vnet_buffer (o_b0)->sw_if_index[VLIB_TX] =
- vnet_buffer (i_b0)->sw_if_index[VLIB_TX];
- vnet_buffer(o_b0)->output_features.bitmap =
- vnet_buffer(i_b0)->output_features.bitmap;
- }
-
- ASSERT(sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
-
- if (PREDICT_TRUE(sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE)) {
-
- const int BLOCK_SIZE = 16;
- const int IV_SIZE = 16;
- int blocks = 1 + (i_b0->current_length + 1) / BLOCK_SIZE;
-
- /* pad packet in input buffer */
- u8 pad_bytes = BLOCK_SIZE * blocks - 2 - i_b0->current_length;
- u8 i;
- u8 * padding = vlib_buffer_get_current (i_b0) + i_b0->current_length;
- i_b0->current_length = BLOCK_SIZE * blocks;
- for (i = 0; i < pad_bytes; ++i)
- {
- padding[i] = i + 1;
- }
- f0 = vlib_buffer_get_current (i_b0) + i_b0->current_length - 2;
- f0->pad_length = pad_bytes;
- f0->next_header = next_hdr_type;
-
- o_b0->current_length = ip_hdr_size + sizeof(esp_header_t) +
- BLOCK_SIZE * blocks + IV_SIZE;
-
- vnet_buffer (o_b0)->sw_if_index[VLIB_RX] =
- vnet_buffer (i_b0)->sw_if_index[VLIB_RX];
-
- u8 iv[16];
- RAND_bytes(iv, sizeof(iv));
-
- clib_memcpy((u8 *) vlib_buffer_get_current (o_b0) + ip_hdr_size +
- sizeof(esp_header_t), iv, 16 );
-
- esp_encrypt_aes_cbc(sa0->crypto_alg,
- (u8 *) vlib_buffer_get_current (i_b0),
- (u8 *) vlib_buffer_get_current (o_b0) +
- ip_hdr_size + sizeof(esp_header_t) + IV_SIZE,
- BLOCK_SIZE * blocks,
- sa0->crypto_key,
- iv);
- }
-
- o_b0->current_length += hmac_calc(sa0->integ_alg, sa0->integ_key,
- sa0->integ_key_len,
- (u8 *) o_esp0,
- o_b0->current_length - ip_hdr_size,
- vlib_buffer_get_current (o_b0) +
- o_b0->current_length,
- sa0->use_esn,
- sa0->seq_hi);
-
-
- if (PREDICT_FALSE(is_ipv6))
- {
- oh6_0->ip6.payload_length = clib_host_to_net_u16 (
- vlib_buffer_length_in_chain (vm, o_b0) - sizeof(ip6_header_t));
- }
- else
- {
- oh0->ip4.length = clib_host_to_net_u16 (
- vlib_buffer_length_in_chain (vm, o_b0));
- oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
- }
-
- if (transport_mode)
- vlib_buffer_reset (o_b0);
-
-trace:
- if (PREDICT_FALSE(i_b0->flags & VLIB_BUFFER_IS_TRACED)) {
- if (o_b0) {
- o_b0->flags |= VLIB_BUFFER_IS_TRACED;
- o_b0->trace_index = i_b0->trace_index;
- }
- esp_encrypt_trace_t *tr = vlib_add_trace (vm, node, o_b0, sizeof (*tr));
- tr->spi = sa0->spi;
- tr->seq = sa0->seq - 1;
- tr->crypto_alg = sa0->crypto_alg;
- tr->integ_alg = sa0->integ_alg;
- }
-
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next, o_bi0, next0);
- }
+ {
+ u32 i_bi0, o_bi0, next0;
+ vlib_buffer_t *i_b0, *o_b0 = 0;
+ u32 sa_index0;
+ ipsec_sa_t *sa0;
+ ip4_and_esp_header_t *ih0, *oh0 = 0;
+ ip6_and_esp_header_t *ih6_0, *oh6_0 = 0;
+ uword last_empty_buffer;
+ esp_header_t *o_esp0;
+ esp_footer_t *f0;
+ u8 is_ipv6;
+ u8 ip_hdr_size;
+ u8 next_hdr_type;
+ u32 ip_proto = 0;
+ u8 transport_mode = 0;
+
+ i_bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ next0 = ESP_ENCRYPT_NEXT_DROP;
+
+ i_b0 = vlib_get_buffer (vm, i_bi0);
+ sa_index0 = vnet_buffer (i_b0)->output_features.ipsec_sad_index;
+ sa0 = pool_elt_at_index (im->sad, sa_index0);
+
+ if (PREDICT_FALSE (esp_seq_advance (sa0)))
+ {
+ clib_warning ("sequence number counter has cycled SPI %u",
+ sa0->spi);
+ vlib_node_increment_counter (vm, esp_encrypt_node.index,
+ ESP_ENCRYPT_ERROR_SEQ_CYCLED, 1);
+ //TODO: rekey SA
+ o_bi0 = i_bi0;
+ goto trace;
+ }
+
+ /* grab free buffer */
+ last_empty_buffer = vec_len (empty_buffers) - 1;
+ o_bi0 = empty_buffers[last_empty_buffer];
+ o_b0 = vlib_get_buffer (vm, o_bi0);
+ o_b0->flags = VLIB_BUFFER_TOTAL_LENGTH_VALID;
+ o_b0->current_data = sizeof (ethernet_header_t);
+ ih0 = vlib_buffer_get_current (i_b0);
+ vlib_prefetch_buffer_with_index (vm,
+ empty_buffers[last_empty_buffer -
+ 1], STORE);
+ _vec_len (empty_buffers) = last_empty_buffer;
+ to_next[0] = o_bi0;
+ to_next += 1;
+
+ /* add old buffer to the recycle list */
+ vec_add1 (recycle, i_bi0);
+
+ /* is ipv6 */
+ if (PREDICT_FALSE
+ ((ih0->ip4.ip_version_and_header_length & 0xF0) == 0x60))
+ {
+ is_ipv6 = 1;
+ ih6_0 = vlib_buffer_get_current (i_b0);
+ ip_hdr_size = sizeof (ip6_header_t);
+ next_hdr_type = IP_PROTOCOL_IPV6;
+ oh6_0 = vlib_buffer_get_current (o_b0);
+ o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip6_header_t);
+
+ oh6_0->ip6.ip_version_traffic_class_and_flow_label =
+ ih6_0->ip6.ip_version_traffic_class_and_flow_label;
+ oh6_0->ip6.protocol = IP_PROTOCOL_IPSEC_ESP;
+ oh6_0->ip6.hop_limit = 254;
+ oh6_0->ip6.src_address.as_u64[0] =
+ ih6_0->ip6.src_address.as_u64[0];
+ oh6_0->ip6.src_address.as_u64[1] =
+ ih6_0->ip6.src_address.as_u64[1];
+ oh6_0->ip6.dst_address.as_u64[0] =
+ ih6_0->ip6.dst_address.as_u64[0];
+ oh6_0->ip6.dst_address.as_u64[1] =
+ ih6_0->ip6.dst_address.as_u64[1];
+ oh6_0->esp.spi = clib_net_to_host_u32 (sa0->spi);
+ oh6_0->esp.seq = clib_net_to_host_u32 (sa0->seq);
+ ip_proto = ih6_0->ip6.protocol;
+ }
+ else
+ {
+ is_ipv6 = 0;
+ ip_hdr_size = sizeof (ip4_header_t);
+ next_hdr_type = IP_PROTOCOL_IP_IN_IP;
+ oh0 = vlib_buffer_get_current (o_b0);
+ o_esp0 = vlib_buffer_get_current (o_b0) + sizeof (ip4_header_t);
+
+ oh0->ip4.ip_version_and_header_length = 0x45;
+ oh0->ip4.tos = ih0->ip4.tos;
+ oh0->ip4.fragment_id = 0;
+ oh0->ip4.flags_and_fragment_offset = 0;
+ oh0->ip4.ttl = 254;
+ oh0->ip4.protocol = IP_PROTOCOL_IPSEC_ESP;
+ oh0->ip4.src_address.as_u32 = ih0->ip4.src_address.as_u32;
+ oh0->ip4.dst_address.as_u32 = ih0->ip4.dst_address.as_u32;
+ oh0->esp.spi = clib_net_to_host_u32 (sa0->spi);
+ oh0->esp.seq = clib_net_to_host_u32 (sa0->seq);
+ ip_proto = ih0->ip4.protocol;
+ }
+
+ if (PREDICT_TRUE (sa0->is_tunnel && !sa0->is_tunnel_ip6))
+ {
+ oh0->ip4.src_address.as_u32 = sa0->tunnel_src_addr.ip4.as_u32;
+ oh0->ip4.dst_address.as_u32 = sa0->tunnel_dst_addr.ip4.as_u32;
+
+ /* in tunnel mode send it back to FIB */
+ next0 = ESP_ENCRYPT_NEXT_IP4_INPUT;
+ vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ }
+ else if (sa0->is_tunnel && sa0->is_tunnel_ip6)
+ {
+ oh6_0->ip6.src_address.as_u64[0] =
+ sa0->tunnel_src_addr.ip6.as_u64[0];
+ oh6_0->ip6.src_address.as_u64[1] =
+ sa0->tunnel_src_addr.ip6.as_u64[1];
+ oh6_0->ip6.dst_address.as_u64[0] =
+ sa0->tunnel_dst_addr.ip6.as_u64[0];
+ oh6_0->ip6.dst_address.as_u64[1] =
+ sa0->tunnel_dst_addr.ip6.as_u64[1];
+
+ /* in tunnel mode send it back to FIB */
+ next0 = ESP_ENCRYPT_NEXT_IP6_INPUT;
+ vnet_buffer (o_b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+ }
+ else
+ {
+ transport_mode = 1;
+ ethernet_header_t *ieh0, *oeh0;
+ ieh0 = (ethernet_header_t *) i_b0->data;
+ oeh0 = (ethernet_header_t *) o_b0->data;
+ clib_memcpy (oeh0, ieh0, sizeof (ethernet_header_t));
+ vlib_buffer_advance (i_b0, ip_hdr_size);
+ next_hdr_type = ip_proto;
+ next0 = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT;
+ o_b0->flags |= BUFFER_OUTPUT_FEAT_DONE;
+ vnet_buffer (o_b0)->sw_if_index[VLIB_TX] =
+ vnet_buffer (i_b0)->sw_if_index[VLIB_TX];
+ vnet_buffer (o_b0)->output_features.bitmap =
+ vnet_buffer (i_b0)->output_features.bitmap;
+ }
+
+ ASSERT (sa0->crypto_alg < IPSEC_CRYPTO_N_ALG);
+
+ if (PREDICT_TRUE (sa0->crypto_alg != IPSEC_CRYPTO_ALG_NONE))
+ {
+
+ const int BLOCK_SIZE = 16;
+ const int IV_SIZE = 16;
+ int blocks = 1 + (i_b0->current_length + 1) / BLOCK_SIZE;
+
+ /* pad packet in input buffer */
+ u8 pad_bytes = BLOCK_SIZE * blocks - 2 - i_b0->current_length;
+ u8 i;
+ u8 *padding =
+ vlib_buffer_get_current (i_b0) + i_b0->current_length;
+ i_b0->current_length = BLOCK_SIZE * blocks;
+ for (i = 0; i < pad_bytes; ++i)
+ {
+ padding[i] = i + 1;
+ }
+ f0 = vlib_buffer_get_current (i_b0) + i_b0->current_length - 2;
+ f0->pad_length = pad_bytes;
+ f0->next_header = next_hdr_type;
+
+ o_b0->current_length = ip_hdr_size + sizeof (esp_header_t) +
+ BLOCK_SIZE * blocks + IV_SIZE;
+
+ vnet_buffer (o_b0)->sw_if_index[VLIB_RX] =
+ vnet_buffer (i_b0)->sw_if_index[VLIB_RX];
+
+ u8 iv[16];
+ RAND_bytes (iv, sizeof (iv));
+
+ clib_memcpy ((u8 *) vlib_buffer_get_current (o_b0) +
+ ip_hdr_size + sizeof (esp_header_t), iv, 16);
+
+ esp_encrypt_aes_cbc (sa0->crypto_alg,
+ (u8 *) vlib_buffer_get_current (i_b0),
+ (u8 *) vlib_buffer_get_current (o_b0) +
+ ip_hdr_size + sizeof (esp_header_t) +
+ IV_SIZE, BLOCK_SIZE * blocks,
+ sa0->crypto_key, iv);
+ }
+
+ o_b0->current_length += hmac_calc (sa0->integ_alg, sa0->integ_key,
+ sa0->integ_key_len,
+ (u8 *) o_esp0,
+ o_b0->current_length -
+ ip_hdr_size,
+ vlib_buffer_get_current (o_b0) +
+ o_b0->current_length,
+ sa0->use_esn, sa0->seq_hi);
+
+
+ if (PREDICT_FALSE (is_ipv6))
+ {
+ oh6_0->ip6.payload_length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0) -
+ sizeof (ip6_header_t));
+ }
+ else
+ {
+ oh0->ip4.length =
+ clib_host_to_net_u16 (vlib_buffer_length_in_chain (vm, o_b0));
+ oh0->ip4.checksum = ip4_header_checksum (&oh0->ip4);
+ }
+
+ if (transport_mode)
+ vlib_buffer_reset (o_b0);
+
+ trace:
+ if (PREDICT_FALSE (i_b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ if (o_b0)
+ {
+ o_b0->flags |= VLIB_BUFFER_IS_TRACED;
+ o_b0->trace_index = i_b0->trace_index;
+ }
+ esp_encrypt_trace_t *tr =
+ vlib_add_trace (vm, node, o_b0, sizeof (*tr));
+ tr->spi = sa0->spi;
+ tr->seq = sa0->seq - 1;
+ tr->crypto_alg = sa0->crypto_alg;
+ tr->integ_alg = sa0->integ_alg;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, o_bi0,
+ next0);
+ }
vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
vlib_node_increment_counter (vm, esp_encrypt_node.index,
- ESP_ENCRYPT_ERROR_RX_PKTS,
- from_frame->n_vectors);
+ ESP_ENCRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
free_buffers_and_exit:
- vlib_buffer_free (vm, recycle, vec_len(recycle));
- vec_free(recycle);
+ vlib_buffer_free (vm, recycle, vec_len (recycle));
+ vec_free (recycle);
return from_frame->n_vectors;
}
+/* *INDENT-OFF* */
VLIB_REGISTER_NODE (esp_encrypt_node) = {
.function = esp_encrypt_node_fn,
.name = "esp-encrypt",
@@ -410,6 +431,13 @@ VLIB_REGISTER_NODE (esp_encrypt_node) = {
#undef _
},
};
+/* *INDENT-ON* */
VLIB_NODE_FUNCTION_MULTIARCH (esp_encrypt_node, esp_encrypt_node_fn)
-
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */