aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSzymon Sliwa <szs@semihalf.com>2018-01-26 12:30:41 +0100
committerSzymon Sliwa <szs@semihalf.com>2018-02-06 14:48:32 +0100
commitafb5b006e4451a9dcb3270d4a5755413e65f5497 (patch)
treee5a8de9f1c1bda994b9a9248cab18d8506b052c4
parent9775521a9ca1466e9a02fe4d69afafc55dcbc09c (diff)
plugins: odp: Add support for async crypto mode
By default ipsec picks asynchronuous crypto. After the operation it may turn out that the operation was performed synchronously anyways, in such case the packet is send further by the esp_* node because there will be no notification event sent about the crypto completion. To use asynchronous mode put async in the odp section of the startup.conf file, like this: odp { async } Falls back to synchronous mode. Change-Id: I5301df5f1c93a5ccd53a9c0ed2c4cacb9ca5fdd4 Signed-off-by: Szymon Sliwa <szs@semihalf.com>
-rw-r--r--src/plugins/odp.am3
-rw-r--r--src/plugins/odp/ipsec/crypto_input.c155
-rw-r--r--src/plugins/odp/ipsec/esp_decrypt.c106
-rw-r--r--src/plugins/odp/ipsec/esp_encrypt.c104
-rw-r--r--src/plugins/odp/ipsec/ipsec.c40
-rwxr-xr-xsrc/plugins/odp/odp_packet.c1
-rw-r--r--src/vpp/conf/startup.conf3
7 files changed, 391 insertions, 21 deletions
diff --git a/src/plugins/odp.am b/src/plugins/odp.am
index fb874f71..88c5c166 100644
--- a/src/plugins/odp.am
+++ b/src/plugins/odp.am
@@ -23,7 +23,8 @@ odp_plugin_la_SOURCES = odp/cli.c \
odp/thread.c \
odp/ipsec/ipsec.c \
odp/ipsec/esp_encrypt.c \
- odp/ipsec/esp_decrypt.c
+ odp/ipsec/esp_decrypt.c \
+ odp/ipsec/crypto_input.c
noinst_HEADERS += odp/odp_packet.h \
odp/ipsec/ipsec.h \
diff --git a/src/plugins/odp/ipsec/crypto_input.c b/src/plugins/odp/ipsec/crypto_input.c
new file mode 100644
index 00000000..03d2ccd8
--- /dev/null
+++ b/src/plugins/odp/ipsec/crypto_input.c
@@ -0,0 +1,155 @@
+#include <vlib/vlib.h>
+#include <vnet/ip/ip.h>
+
+#include <odp/ipsec/ipsec.h>
+#include <odp/ipsec/esp.h>
+#include <odp/odp_packet.h>
+
+#include <assert.h>
+
+#define foreach_odp_crypto_input_next \
+ _(DROP, "error-drop") \
+ _(ENCRYPT_POST, "odp-crypto-esp-encrypt-post") \
+ _(DECRYPT_POST, "odp-crypto-esp-decrypt-post")
+
+typedef enum
+{
+#define _(f, s) ODP_CRYPTO_INPUT_NEXT_##f,
+ foreach_odp_crypto_input_next
+#undef _
+ODP_CRYPTO_INPUT_N_NEXT,
+} odp_crypto_input_next_t;
+
+#define foreach_crypto_input_error \
+_(DEQUE_COP, "Dequed crypto operations")
+
+typedef enum
+{
+#define _(sym,str) CRYPTO_INPUT_ERROR_##sym,
+ foreach_crypto_input_error
+#undef _
+} crypto_input_error_t;
+
+static char *crypto_input_error_strings[] = {
+#define _(sym,string) string,
+ foreach_crypto_input_error
+#undef _
+};
+
+typedef struct
+{
+ u32 next_index;
+ odp_packet_t pkt;
+} odp_packet_crypto_trace_t;
+
+static u8 *
+format_odp_crypto_input_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+
+ s = format (s, " odp-crypto-input ");
+
+ return s;
+}
+
+static uword
+odp_dequeue_cops (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame, odp_queue_t queue, u32 next_node_index)
+{
+ u32 next_index = next_node_index, n_deq, n_cops, *to_next = 0;
+ const int MAX_EVENTS = (1<<8);
+ odp_event_t events[MAX_EVENTS];
+
+ n_deq = odp_queue_deq_multi(queue, events, MAX_EVENTS);
+
+ n_cops = n_deq;
+
+ int index = 0;
+ while(n_cops > 0) {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while(n_cops > 0 && n_left_to_next > 0) {
+ odp_event_t event = events[index++];
+
+ ASSERT(ODP_EVENT_CRYPTO_COMPL == odp_event_type(event));
+
+ odp_crypto_compl_t compl;
+ odp_crypto_op_result_t result;
+ odp_packet_t pkt;
+ vlib_buffer_t *b0;
+ u32 bi0;
+
+ compl = odp_crypto_compl_from_event(event);
+ odp_crypto_compl_result(compl, &result);
+ pkt = result.pkt;
+
+ b0 = vlib_buffer_from_odp_packet(pkt);
+ bi0 = vlib_get_buffer_index (vm, b0);
+
+ to_next[0] = bi0;
+ to_next += 1;
+
+ n_cops -= 1;
+ n_left_to_next -= 1;
+
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ {
+ odp_packet_crypto_trace_t *tr;
+ tr = vlib_add_trace (vm, node, b0, sizeof (*tR));
+ tr->next_index = next_index;
+ }
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, bi0, next_node_index);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, odp_crypto_input_node.index,
+ CRYPTO_INPUT_ERROR_DEQUE_COP,
+ n_deq);
+
+ return n_deq;
+}
+
+static uword
+odp_crypto_input_node_fn (vlib_main_t * vm, vlib_node_runtime_t * node,
+ vlib_frame_t * frame)
+{
+ odp_crypto_main_t *ocm = &odp_crypto_main;
+ u32 thread_index = vlib_get_thread_index ();
+ odp_crypto_worker_main_t *cwm =
+ vec_elt_at_index (ocm->workers, thread_index);
+ u32 n_cops_dequeued = 0;
+ n_cops_dequeued += odp_dequeue_cops(vm, node, frame, cwm->post_encrypt, ODP_CRYPTO_INPUT_NEXT_ENCRYPT_POST);
+ n_cops_dequeued += odp_dequeue_cops(vm, node, frame, cwm->post_decrypt, ODP_CRYPTO_INPUT_NEXT_DECRYPT_POST);
+ return n_cops_dequeued;
+}
+
+
+VLIB_REGISTER_NODE (odp_crypto_input_node) =
+{
+ .function = odp_crypto_input_node_fn,
+ .name = "odp-crypto-input",
+ .format_trace = format_odp_crypto_input_trace,
+ .type = VLIB_NODE_TYPE_INPUT,
+ .state = VLIB_NODE_STATE_DISABLED,
+
+ .n_errors = ARRAY_LEN(crypto_input_error_strings),
+ .error_strings = crypto_input_error_strings,
+
+ .n_next_nodes = ODP_CRYPTO_INPUT_N_NEXT,
+ .next_nodes =
+ {
+#define _(s, n) [ODP_CRYPTO_INPUT_NEXT_##s] = n,
+ foreach_odp_crypto_input_next
+#undef _
+ },
+};
+
+VLIB_NODE_FUNCTION_MULTIARCH (odp_crypto_input_node, odp_crypto_input_node_fn);
+
+
+
diff --git a/src/plugins/odp/ipsec/esp_decrypt.c b/src/plugins/odp/ipsec/esp_decrypt.c
index 9086f33a..1ef72fe8 100644
--- a/src/plugins/odp/ipsec/esp_decrypt.c
+++ b/src/plugins/odp/ipsec/esp_decrypt.c
@@ -70,6 +70,7 @@ typedef struct
} esp_decrypt_trace_t;
vlib_node_registration_t odp_crypto_esp_decrypt_node;
+vlib_node_registration_t odp_crypto_esp_decrypt_post_node;
/* packet trace format function */
static u8 *
@@ -85,6 +86,17 @@ format_esp_decrypt_trace (u8 * s, va_list * args)
return s;
}
+/* packet trace format function */
+static u8 *
+format_esp_decrypt_post_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+
+ s = format (s, "POST DECRYPT CRYPTO (ODP)");
+ return s;
+}
+
static uword
esp_decrypt_node_fn (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * from_frame)
@@ -117,6 +129,7 @@ esp_decrypt_node_fn (vlib_main_t * vm,
while (n_left_from > 0)
{
u32 n_left_to_next;
+ u32 buffers_passed = 0;
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
@@ -134,7 +147,9 @@ esp_decrypt_node_fn (vlib_main_t * vm,
u8 tunnel_mode = 1;
u8 transport_ip6 = 0;
sa_data_t *sa_sess_data;
-
+ odp_crypto_op_param_t crypto_op_params;
+ odp_crypto_op_result_t result;
+ odp_bool_t posted = 0;
bi0 = from[0];
from += 1;
@@ -188,10 +203,6 @@ esp_decrypt_node_fn (vlib_main_t * vm,
}
}
- odp_crypto_op_param_t crypto_op_params;
- odp_bool_t posted = 0;
- odp_crypto_op_result_t result;
-
crypto_op_params.session = sa_sess_data->sess;
crypto_op_params.ctx = NULL;
crypto_op_params.aad.ptr = NULL;
@@ -399,23 +410,28 @@ esp_decrypt_node_fn (vlib_main_t * vm,
next0 = ESP_DECRYPT_NEXT_IPSEC_GRE_INPUT;
vnet_buffer (b0)->sw_if_index[VLIB_TX] = (u32) ~ 0;
+
+ vnet_buffer (b0)->post_crypto.next_index = next0;
}
trace:
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- b0->flags |= VLIB_BUFFER_IS_TRACED;
- b0->trace_index = b0->trace_index;
esp_decrypt_trace_t *tr =
vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->crypto_alg = sa0->crypto_alg;
tr->integ_alg = sa0->integ_alg;
}
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
- n_left_to_next, bi0, next0);
+ if (!posted)
+ {
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index, to_next,
+ n_left_to_next, bi0, next0);
+ buffers_passed += 1;
+ }
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ if (buffers_passed > 0)
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
vlib_node_increment_counter (vm, odp_crypto_esp_decrypt_node.index,
ESP_DECRYPT_ERROR_RX_PKTS,
@@ -450,6 +466,76 @@ VLIB_REGISTER_NODE (odp_crypto_esp_decrypt_node) = {
/* *INDENT-ON* */
VLIB_NODE_FUNCTION_MULTIARCH (odp_crypto_esp_decrypt_node, esp_decrypt_node_fn)
+ static uword
+ esp_decrypt_post_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, *to_next = 0, next_index;
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0;
+ vlib_buffer_t *b0 = 0;
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ to_next[0] = bi0;
+ to_next += 1;
+
+ next0 = vnet_buffer (b0)->post_crypto.next_index;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, bi0,
+ next0);
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ vlib_add_trace (vm, node, b0, 0);
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+
+ }
+ vlib_node_increment_counter (vm, odp_crypto_esp_decrypt_post_node.index,
+ ESP_DECRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
+
+ return from_frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (odp_crypto_esp_decrypt_post_node) = {
+ .function = esp_decrypt_post_node_fn,
+ .name = "odp-crypto-esp-decrypt-post",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_decrypt_post_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_decrypt_error_strings),
+ .error_strings = esp_decrypt_error_strings,
+
+ .n_next_nodes = ESP_DECRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_DECRYPT_NEXT_##s] = n,
+ foreach_esp_decrypt_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/odp/ipsec/esp_encrypt.c b/src/plugins/odp/ipsec/esp_encrypt.c
index 65c4c60f..3e4517f1 100644
--- a/src/plugins/odp/ipsec/esp_encrypt.c
+++ b/src/plugins/odp/ipsec/esp_encrypt.c
@@ -69,6 +69,7 @@ typedef struct
} esp_encrypt_trace_t;
vlib_node_registration_t odp_crypto_esp_encrypt_node;
+vlib_node_registration_t odp_crypto_esp_encrypt_post_node;
/* packet trace format function */
static u8 *
@@ -85,6 +86,18 @@ format_esp_encrypt_trace (u8 * s, va_list * args)
return s;
}
+/* packet trace format function */
+static u8 *
+format_esp_encrypt_post_trace (u8 * s, va_list * args)
+{
+ CLIB_UNUSED (vlib_main_t * vm) = va_arg (*args, vlib_main_t *);
+ CLIB_UNUSED (vlib_node_t * node) = va_arg (*args, vlib_node_t *);
+
+ s = format (s, "POST ENCRYPT CRYPTO (ODP) esp");
+ return s;
+}
+
+
static uword
esp_encrypt_node_fn (vlib_main_t * vm,
vlib_node_runtime_t * node, vlib_frame_t * from_frame)
@@ -118,6 +131,7 @@ esp_encrypt_node_fn (vlib_main_t * vm,
while (n_left_from > 0)
{
u32 n_left_to_next;
+ u32 buffers_passed = 0;
vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
@@ -138,6 +152,7 @@ esp_encrypt_node_fn (vlib_main_t * vm,
u32 ip_proto = 0;
u8 transport_mode = 0;
sa_data_t *sa_sess_data;
+ odp_bool_t posted = 0;
bi0 = from[0];
from += 1;
@@ -331,7 +346,6 @@ esp_encrypt_node_fn (vlib_main_t * vm,
BLOCK_SIZE * blocks + sizeof (esp_header_t) + IV_SIZE;
odp_crypto_op_param_t crypto_op_params;
- odp_bool_t posted = 0;
odp_crypto_op_result_t result;
crypto_op_params.session = sa_sess_data->sess;
@@ -370,6 +384,8 @@ esp_encrypt_node_fn (vlib_main_t * vm,
b0->current_length +=
em->esp_integ_algs[sa0->integ_alg].trunc_size;
+ vnet_buffer (b0)->post_crypto.next_index = next0;
+
int ret =
odp_crypto_operation (&crypto_op_params, &posted, &result);
if (ret != 0)
@@ -406,8 +422,6 @@ esp_encrypt_node_fn (vlib_main_t * vm,
trace:
if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
{
- b0->flags |= VLIB_BUFFER_IS_TRACED;
- b0->trace_index = b0->trace_index;
esp_encrypt_trace_t *tr =
vlib_add_trace (vm, node, b0, sizeof (*tr));
tr->spi = sa0->spi;
@@ -416,11 +430,17 @@ esp_encrypt_node_fn (vlib_main_t * vm,
tr->integ_alg = sa0->integ_alg;
}
- vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
- to_next, n_left_to_next, bi0,
- next0);
+ if (!posted)
+ {
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, bi0,
+ next0);
+ buffers_passed += 1;
+ }
+
}
- vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ if (buffers_passed > 0)
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
}
vlib_node_increment_counter (vm, odp_crypto_esp_encrypt_node.index,
ESP_ENCRYPT_ERROR_RX_PKTS,
@@ -455,6 +475,76 @@ VLIB_REGISTER_NODE (odp_crypto_esp_encrypt_node) = {
/* *INDENT-ON* */
VLIB_NODE_FUNCTION_MULTIARCH (odp_crypto_esp_encrypt_node, esp_encrypt_node_fn)
+ static uword
+ esp_encrypt_post_node_fn (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * from_frame)
+{
+ u32 n_left_from, *from, *to_next = 0, next_index;
+ from = vlib_frame_vector_args (from_frame);
+ n_left_from = from_frame->n_vectors;
+
+ next_index = node->cached_next_index;
+
+ while (n_left_from > 0)
+ {
+ u32 n_left_to_next;
+
+ vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
+
+ while (n_left_from > 0 && n_left_to_next > 0)
+ {
+ u32 bi0, next0;
+ vlib_buffer_t *b0 = 0;
+
+ bi0 = from[0];
+ from += 1;
+ n_left_from -= 1;
+ n_left_to_next -= 1;
+
+ b0 = vlib_get_buffer (vm, bi0);
+
+ to_next[0] = bi0;
+ to_next += 1;
+
+ next0 = vnet_buffer (b0)->post_crypto.next_index;
+
+ vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
+ to_next, n_left_to_next, bi0,
+ next0);
+ if (PREDICT_FALSE (b0->flags & VLIB_BUFFER_IS_TRACED))
+ vlib_add_trace (vm, node, b0, 0);
+
+ }
+ vlib_put_next_frame (vm, node, next_index, n_left_to_next);
+ }
+ vlib_node_increment_counter (vm, odp_crypto_esp_encrypt_post_node.index,
+ ESP_ENCRYPT_ERROR_RX_PKTS,
+ from_frame->n_vectors);
+
+ return from_frame->n_vectors;
+}
+
+/* *INDENT-OFF* */
+VLIB_REGISTER_NODE (odp_crypto_esp_encrypt_post_node) = {
+ .function = esp_encrypt_post_node_fn,
+ .name = "odp-crypto-esp-encrypt-post",
+ .vector_size = sizeof (u32),
+ .format_trace = format_esp_encrypt_post_trace,
+ .type = VLIB_NODE_TYPE_INTERNAL,
+
+ .n_errors = ARRAY_LEN(esp_encrypt_error_strings),
+ .error_strings = esp_encrypt_error_strings,
+
+ .n_next_nodes = ESP_ENCRYPT_N_NEXT,
+ .next_nodes = {
+#define _(s,n) [ESP_ENCRYPT_NEXT_##s] = n,
+ foreach_esp_encrypt_next
+#undef _
+ },
+};
+/* *INDENT-ON* */
+
/*
* fd.io coding-style-patch-verification: ON
*
diff --git a/src/plugins/odp/ipsec/ipsec.c b/src/plugins/odp/ipsec/ipsec.c
index 339240f6..40146b20 100644
--- a/src/plugins/odp/ipsec/ipsec.c
+++ b/src/plugins/odp/ipsec/ipsec.c
@@ -71,6 +71,11 @@ create_sess (ipsec_sa_t * sa, sa_data_t * sa_sess_data, int is_outbound)
odp_crypto_session_param_t crypto_params;
odp_crypto_session_param_init (&crypto_params);
+ odp_crypto_main_t *ocm = &odp_crypto_main;
+ u32 thread_index = vlib_get_thread_index ();
+ odp_crypto_worker_main_t *cwm =
+ vec_elt_at_index (ocm->workers, thread_index);
+
esp_main_t *em = &odp_esp_main;
int trunc_size = em->esp_integ_algs[sa->integ_alg].trunc_size;
@@ -80,9 +85,9 @@ create_sess (ipsec_sa_t * sa, sa_data_t * sa_sess_data, int is_outbound)
crypto_params.auth_cipher_text = 1;
- /* Synchronous mode */
- crypto_params.pref_mode = ODP_CRYPTO_SYNC;
- crypto_params.compl_queue = ODP_QUEUE_INVALID;
+ crypto_params.pref_mode = ODP_CRYPTO_ASYNC;
+ crypto_params.compl_queue =
+ (is_outbound ? cwm->post_encrypt : cwm->post_decrypt);
crypto_params.output_pool = ODP_POOL_INVALID;
if (sa->crypto_alg == IPSEC_CRYPTO_ALG_AES_CBC_128)
@@ -209,6 +214,7 @@ ipsec_init (vlib_main_t * vm)
odp_crypto_main_t *ocm = &odp_crypto_main;
vlib_thread_main_t *tm = vlib_get_thread_main ();
vlib_node_t *ipsec_node, *crypto_node, *error_node;
+ odp_crypto_worker_main_t *cwm;
memset (im, 0, sizeof (im[0]));
@@ -249,8 +255,36 @@ ipsec_init (vlib_main_t * vm)
vec_alloc (ocm->workers, tm->n_vlib_mains);
_vec_len (ocm->workers) = tm->n_vlib_mains;
+ for (cwm = ocm->workers + 1; cwm < vec_end (ocm->workers); cwm++)
+ {
+ cwm->post_encrypt = odp_queue_create (NULL, NULL);
+ cwm->post_decrypt = odp_queue_create (NULL, NULL);
+ }
+
esp_init ();
+ int i;
+ for (i = 1; i < tm->n_vlib_mains; i++)
+ vlib_node_set_state (vlib_mains[i], odp_crypto_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+
+ /* If there are no worker threads, enable polling
+ crypto devices on the main thread, else
+ assign the post crypt queues of the second
+ thread to the main thread crypto sessions */
+ if (tm->n_vlib_mains == 1)
+ {
+ ocm->workers[0].post_encrypt = odp_queue_create (NULL, NULL);
+ ocm->workers[0].post_decrypt = odp_queue_create (NULL, NULL);
+ vlib_node_set_state (vlib_mains[0], odp_crypto_input_node.index,
+ VLIB_NODE_STATE_POLLING);
+ }
+ else
+ {
+ ocm->workers[0].post_encrypt = ocm->workers[1].post_encrypt;
+ ocm->workers[0].post_decrypt = ocm->workers[1].post_decrypt;
+ }
+
return 0;
}
diff --git a/src/plugins/odp/odp_packet.c b/src/plugins/odp/odp_packet.c
index fb9ab192..43ad6772 100755
--- a/src/plugins/odp/odp_packet.c
+++ b/src/plugins/odp/odp_packet.c
@@ -10,6 +10,7 @@
#include <vlib/unix/unix.h>
#include <vnet/ip/ip.h>
#include <vnet/ethernet/ethernet.h>
+#include <odp/ipsec/ipsec.h>
#include <vnet/plugin/plugin.h>
#include <vpp/app/version.h>
#include <odp/odp_packet.h>
diff --git a/src/vpp/conf/startup.conf b/src/vpp/conf/startup.conf
index 6947d378..1aee5ca0 100644
--- a/src/vpp/conf/startup.conf
+++ b/src/vpp/conf/startup.conf
@@ -132,6 +132,9 @@ cpu {
## Make use of ODP crypto API to accelerate IPsec
# enable-odp-crypto
+
+ ## To use asynchronous mode of the crypto operations
+ # async
# }
# Adjusting the plugin path depending on where the VPP plugins are: