From 9a9604b09f15691d7c4ddf29afd99a31e7e31eed Mon Sep 17 00:00:00 2001 From: Xiaoming Jiang Date: Thu, 9 Mar 2023 02:03:50 +0000 Subject: crypto: make crypto-dispatch node working in adaptive mode This patch can make crypto dispatch node adaptively switching between pooling and interrupt mode, and improve vpp overall performance. Type: improvement Signed-off-by: Xiaoming Jiang Change-Id: I845ed1d29ba9f3c507ea95a337f6dca7f8d6e24e --- src/plugins/crypto_sw_scheduler/main.c | 14 +-- .../dpdk/cryptodev/cryptodev_raw_data_path.c | 3 +- src/plugins/wireguard/wireguard.c | 2 - src/vnet/crypto/cli.c | 50 +-------- src/vnet/crypto/crypto.api | 1 + src/vnet/crypto/crypto.c | 118 +-------------------- src/vnet/crypto/crypto.h | 20 +--- src/vnet/crypto/crypto_api.c | 2 - src/vnet/crypto/node.c | 23 +++- src/vnet/ipsec/ipsec.c | 25 +---- src/vnet/ipsec/ipsec.h | 5 +- src/vnet/ipsec/ipsec_sa.c | 2 - test/test_ipsec_esp.py | 5 - 13 files changed, 33 insertions(+), 237 deletions(-) diff --git a/src/plugins/crypto_sw_scheduler/main.c b/src/plugins/crypto_sw_scheduler/main.c index abdffab2b9c..1cc7a08ca11 100644 --- a/src/plugins/crypto_sw_scheduler/main.c +++ b/src/plugins/crypto_sw_scheduler/main.c @@ -25,14 +25,14 @@ crypto_sw_scheduler_set_worker_crypto (u32 worker_idx, u8 enabled) crypto_sw_scheduler_main_t *cm = &crypto_sw_scheduler_main; vlib_thread_main_t *tm = vlib_get_thread_main (); crypto_sw_scheduler_per_thread_data_t *ptd = 0; - u32 count = 0, i = vlib_num_workers () > 0; + u32 count = 0, i; if (worker_idx >= vlib_num_workers ()) { return VNET_API_ERROR_INVALID_VALUE; } - for (; i < tm->n_vlib_mains; i++) + for (i = 0; i < tm->n_vlib_mains; i++) { ptd = cm->per_thread_data + i; count += ptd->self_crypto_enabled; @@ -458,11 +458,6 @@ crypto_sw_scheduler_process_aead (vlib_main_t *vm, u32 tail, head; u8 found = 0; - u8 recheck_queues = - crypto_main.dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT; - - run_half_queues: - /* get a pending frame to process */ if (ptd->self_crypto_enabled) { @@ -573,11 +568,6 @@ crypto_sw_scheduler_process_aead (vlib_main_t *vm, return f; } - if (!found && recheck_queues) - { - recheck_queues = 0; - goto run_half_queues; - } return 0; } diff --git a/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c b/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c index a8265b82c82..3a2f46e6739 100644 --- a/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c +++ b/src/plugins/dpdk/cryptodev/cryptodev_raw_data_path.c @@ -516,8 +516,7 @@ cryptodev_raw_dequeue (vlib_main_t *vm, u32 *nb_elts_processed, } } - if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT && - inflight > 0) + if (inflight > 0) vlib_node_set_interrupt_pending (vlib_get_main_by_index (vm->thread_index), cm->crypto_node_index); diff --git a/src/plugins/wireguard/wireguard.c b/src/plugins/wireguard/wireguard.c index 5d73638f8f9..a87f73dc6f2 100644 --- a/src/plugins/wireguard/wireguard.c +++ b/src/plugins/wireguard/wireguard.c @@ -30,8 +30,6 @@ wg_async_post_next_t wg_decrypt_async_next; void wg_set_async_mode (u32 is_enabled) { - vnet_crypto_request_async_mode (is_enabled); - if (is_enabled) wg_op_mode_set_ASYNC (); else diff --git a/src/vnet/crypto/cli.c b/src/vnet/crypto/cli.c index 4ee14ac1100..4851217d9f3 100644 --- a/src/vnet/crypto/cli.c +++ b/src/vnet/crypto/cli.c @@ -316,7 +316,6 @@ show_crypto_async_status_command_fn (vlib_main_t * vm, vlib_cli_command_t * cmd) { vnet_crypto_main_t *cm = &crypto_main; - u32 skip_master = vlib_num_workers () > 0; vlib_thread_main_t *tm = vlib_get_thread_main (); unformat_input_t _line_input, *line_input = &_line_input; int i; @@ -324,12 +323,7 @@ show_crypto_async_status_command_fn (vlib_main_t * vm, if (unformat_user (input, unformat_line_input, line_input)) unformat_free (line_input); - vlib_cli_output (vm, "Crypto async dispatch mode: %s", - cm->dispatch_mode == - VNET_CRYPTO_ASYNC_DISPATCH_POLLING ? "POLLING" : - "INTERRUPT"); - - for (i = skip_master; i < tm->n_vlib_mains; i++) + for (i = 0; i < tm->n_vlib_mains; i++) { vlib_node_state_t state = vlib_node_get_state ( vlib_get_main_by_index (i), cm->crypto_node_index); @@ -435,48 +429,6 @@ VLIB_CLI_COMMAND (set_crypto_async_handler_command, static) = }; /* *INDENT-ON* */ -static inline void -print_crypto_async_dispatch_warning () -{ - clib_warning ("Switching dispatch mode might not work is some situations."); - clib_warning - ("Use 'show crypto async status' to verify that the nodes' states were set"); - clib_warning ("and if not, set 'crypto async dispatch' mode again."); -} - -static clib_error_t * -set_crypto_async_dispatch_polling_command_fn (vlib_main_t * vm, - unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - print_crypto_async_dispatch_warning (); - vnet_crypto_set_async_dispatch_mode (VNET_CRYPTO_ASYNC_DISPATCH_POLLING); - return 0; -} - -static clib_error_t * -set_crypto_async_dispatch_interrupt_command_fn (vlib_main_t * vm, - unformat_input_t * input, - vlib_cli_command_t * cmd) -{ - print_crypto_async_dispatch_warning (); - vnet_crypto_set_async_dispatch_mode (VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT); - return 0; -} - -/* *INDENT-OFF* */ -VLIB_CLI_COMMAND (set_crypto_async_dispatch_polling_command, static) = -{ - .path = "set crypto async dispatch polling", - .short_help = "set crypto async dispatch polling|interrupt", - .function = set_crypto_async_dispatch_polling_command_fn, -}; -VLIB_CLI_COMMAND (set_crypto_async_dispatch_interrupt_command, static) = -{ - .path = "set crypto async dispatch interrupt", - .short_help = "set crypto async dispatch polling|interrupt", - .function = set_crypto_async_dispatch_interrupt_command_fn, -}; /* * fd.io coding-style-patch-verification: ON * diff --git a/src/vnet/crypto/crypto.api b/src/vnet/crypto/crypto.api index 6eccd8524ba..61553e82fe3 100644 --- a/src/vnet/crypto/crypto.api +++ b/src/vnet/crypto/crypto.api @@ -36,6 +36,7 @@ enum crypto_op_class_type:u8 autoreply define crypto_set_async_dispatch { + option deprecated; u32 client_index; u32 context; vl_api_crypto_dispatch_mode_t mode; diff --git a/src/vnet/crypto/crypto.c b/src/vnet/crypto/crypto.c index 156dab46517..5d951a118b8 100644 --- a/src/vnet/crypto/crypto.c +++ b/src/vnet/crypto/crypto.c @@ -284,8 +284,6 @@ vnet_crypto_register_enqueue_handler (vlib_main_t *vm, u32 engine_index, vnet_crypto_async_op_data_t *otd = cm->async_opt_data + opt; vec_validate_aligned (cm->enqueue_handlers, VNET_CRYPTO_ASYNC_OP_N_IDS, CLIB_CACHE_LINE_BYTES); - vec_validate_aligned (cm->dequeue_handlers, VNET_CRYPTO_ASYNC_OP_N_IDS, - CLIB_CACHE_LINE_BYTES); if (!enqueue_hdl) return; @@ -370,6 +368,8 @@ vnet_crypto_register_dequeue_handler (vlib_main_t *vm, u32 engine_index, e->dequeue_handler = deq_fn; + vnet_crypto_update_cm_dequeue_handlers (); + return; } @@ -527,41 +527,6 @@ vnet_crypto_key_add_linked (vlib_main_t * vm, return index; } -clib_error_t * -crypto_dispatch_enable_disable (int is_enable) -{ - vnet_crypto_main_t *cm = &crypto_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); - u32 skip_master = vlib_num_workers () > 0, i; - vlib_node_state_t state = VLIB_NODE_STATE_DISABLED; - u8 state_change = 0; - - CLIB_MEMORY_STORE_BARRIER (); - if (is_enable && cm->async_refcnt > 0) - { - state_change = 1; - state = - cm->dispatch_mode == - VNET_CRYPTO_ASYNC_DISPATCH_POLLING ? VLIB_NODE_STATE_POLLING : - VLIB_NODE_STATE_INTERRUPT; - } - - if (!is_enable && cm->async_refcnt == 0) - { - state_change = 1; - state = VLIB_NODE_STATE_DISABLED; - } - - if (state_change) - for (i = skip_master; i < tm->n_vlib_mains; i++) - { - vlib_main_t *ovm = vlib_get_main_by_index (i); - if (state != vlib_node_get_state (ovm, cm->crypto_node_index)) - vlib_node_set_state (ovm, cm->crypto_node_index, state); - } - return 0; -} - static_always_inline void crypto_set_active_async_engine (vnet_crypto_async_op_data_t * od, vnet_crypto_async_op_id_t id, u32 ei) @@ -573,7 +538,6 @@ crypto_set_active_async_engine (vnet_crypto_async_op_data_t * od, { od->active_engine_index_async = ei; cm->enqueue_handlers[id] = ce->enqueue_handlers[id]; - cm->dequeue_handlers[id] = ce->dequeue_handler; } } @@ -585,9 +549,6 @@ vnet_crypto_set_async_handler2 (char *alg_name, char *engine) vnet_crypto_async_alg_data_t *ad; int i; - if (cm->async_refcnt) - return -EBUSY; - p = hash_get_mem (cm->async_alg_index_by_name, alg_name); if (!p) return -1; @@ -644,80 +605,6 @@ vnet_crypto_register_post_node (vlib_main_t * vm, char *post_node_name) return nn->next_idx; } -void -vnet_crypto_request_async_mode (int is_enable) -{ - vnet_crypto_main_t *cm = &crypto_main; - vlib_thread_main_t *tm = vlib_get_thread_main (); - u32 skip_master = vlib_num_workers () > 0, i; - vlib_node_state_t state = VLIB_NODE_STATE_DISABLED; - u8 state_change = 0; - - CLIB_MEMORY_STORE_BARRIER (); - if (is_enable && cm->async_refcnt == 0) - { - state_change = 1; - state = - cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_POLLING ? - VLIB_NODE_STATE_POLLING : VLIB_NODE_STATE_INTERRUPT; - } - if (!is_enable && cm->async_refcnt == 1) - { - state_change = 1; - state = VLIB_NODE_STATE_DISABLED; - } - - if (state_change) - { - - for (i = skip_master; i < tm->n_vlib_mains; i++) - { - vlib_main_t *ovm = vlib_get_main_by_index (i); - if (state != vlib_node_get_state (ovm, cm->crypto_node_index)) - vlib_node_set_state (ovm, cm->crypto_node_index, state); - } - - if (is_enable) - vnet_crypto_update_cm_dequeue_handlers (); - } - - if (is_enable) - cm->async_refcnt += 1; - else if (cm->async_refcnt > 0) - cm->async_refcnt -= 1; -} - -void -vnet_crypto_set_async_dispatch_mode (u8 mode) -{ - vnet_crypto_main_t *cm = &crypto_main; - u32 skip_master = vlib_num_workers () > 0, i; - vlib_thread_main_t *tm = vlib_get_thread_main (); - vlib_node_state_t state = VLIB_NODE_STATE_DISABLED; - - CLIB_MEMORY_STORE_BARRIER (); - cm->dispatch_mode = mode; - if (mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT) - { - state = - cm->async_refcnt == 0 ? - VLIB_NODE_STATE_DISABLED : VLIB_NODE_STATE_INTERRUPT; - } - else if (mode == VNET_CRYPTO_ASYNC_DISPATCH_POLLING) - { - state = - cm->async_refcnt == 0 ? - VLIB_NODE_STATE_DISABLED : VLIB_NODE_STATE_POLLING; - } - - for (i = skip_master; i < tm->n_vlib_mains; i++) - { - vlib_main_t *ovm = vlib_get_main_by_index (i); - if (state != vlib_node_get_state (ovm, cm->crypto_node_index)) - vlib_node_set_state (ovm, cm->crypto_node_index, state); - } -} - int vnet_crypto_is_set_async_handler (vnet_crypto_async_op_id_t op) { @@ -813,7 +700,6 @@ vnet_crypto_init (vlib_main_t * vm) vlib_thread_main_t *tm = vlib_get_thread_main (); vnet_crypto_thread_t *ct = 0; - cm->dispatch_mode = VNET_CRYPTO_ASYNC_DISPATCH_POLLING; cm->engine_index_by_name = hash_create_string ( /* size */ 0, sizeof (uword)); cm->alg_index_by_name = hash_create_string (0, sizeof (uword)); diff --git a/src/vnet/crypto/crypto.h b/src/vnet/crypto/crypto.h index 0bb5e367f21..36fde2a548f 100644 --- a/src/vnet/crypto/crypto.h +++ b/src/vnet/crypto/crypto.h @@ -467,12 +467,8 @@ typedef struct uword *alg_index_by_name; uword *async_alg_index_by_name; vnet_crypto_async_alg_data_t *async_algs; - u32 async_refcnt; vnet_crypto_async_next_node_t *next_nodes; u32 crypto_node_index; -#define VNET_CRYPTO_ASYNC_DISPATCH_POLLING 0 -#define VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT 1 - u8 dispatch_mode; } vnet_crypto_main_t; extern vnet_crypto_main_t crypto_main; @@ -500,21 +496,13 @@ u32 vnet_crypto_key_add_linked (vlib_main_t * vm, vnet_crypto_key_index_t index_crypto, vnet_crypto_key_index_t index_integ); -clib_error_t *crypto_dispatch_enable_disable (int is_enable); - int vnet_crypto_set_async_handler2 (char *alg_name, char *engine); int vnet_crypto_is_set_async_handler (vnet_crypto_async_op_id_t opt); -void vnet_crypto_request_async_mode (int is_enable); - -void vnet_crypto_set_async_dispatch_mode (u8 mode); - vnet_crypto_async_alg_t vnet_crypto_link_algs (vnet_crypto_alg_t crypto_alg, vnet_crypto_alg_t integ_alg); -clib_error_t *crypto_dispatch_enable_disable (int is_enable); - format_function_t format_vnet_crypto_alg; format_function_t format_vnet_crypto_engine; format_function_t format_vnet_crypto_op; @@ -593,7 +581,8 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm, { vnet_crypto_main_t *cm = &crypto_main; vlib_thread_main_t *tm = vlib_get_thread_main (); - u32 i = vlib_num_workers () > 0; + u32 i; + vlib_node_t *n; frame->state = VNET_CRYPTO_FRAME_STATE_PENDING; frame->enqueue_thread_index = vm->thread_index; @@ -608,9 +597,10 @@ vnet_crypto_async_submit_open_frame (vlib_main_t * vm, if (PREDICT_TRUE (ret == 0)) { - if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT) + n = vlib_get_node (vm, cm->crypto_node_index); + if (n->state == VLIB_NODE_STATE_INTERRUPT) { - for (; i < tm->n_vlib_mains; i++) + for (i = 0; i < tm->n_vlib_mains; i++) vlib_node_set_interrupt_pending (vlib_get_main_by_index (i), cm->crypto_node_index); } diff --git a/src/vnet/crypto/crypto_api.c b/src/vnet/crypto/crypto_api.c index 49b12a3d377..482a9b698cb 100644 --- a/src/vnet/crypto/crypto_api.c +++ b/src/vnet/crypto/crypto_api.c @@ -46,8 +46,6 @@ vl_api_crypto_set_async_dispatch_t_handler (vl_api_crypto_set_async_dispatch_t vl_api_crypto_set_async_dispatch_reply_t *rmp; int rv = 0; - vnet_crypto_set_async_dispatch_mode ((u8) mp->mode); - REPLY_MACRO (VL_API_CRYPTO_SET_ASYNC_DISPATCH_REPLY); } diff --git a/src/vnet/crypto/node.c b/src/vnet/crypto/node.c index 216b924f96e..cabbfb08d57 100644 --- a/src/vnet/crypto/node.c +++ b/src/vnet/crypto/node.c @@ -135,8 +135,11 @@ crypto_dequeue_frame (vlib_main_t * vm, vlib_node_runtime_t * node, vnet_crypto_async_free_frame (vm, cf); } /* signal enqueue-thread to dequeue the processed frame (n_elts>0) */ - if (cm->dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT - && n_elts > 0) + if (n_elts > 0 && + ((node->state == VLIB_NODE_STATE_POLLING && + (node->flags & + VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)) || + node->state == VLIB_NODE_STATE_INTERRUPT)) { vlib_node_set_interrupt_pending ( vlib_get_main_by_index (enqueue_thread_idx), @@ -161,8 +164,6 @@ VLIB_NODE_FN (crypto_dispatch_node) (vlib_main_t * vm, u32 n_dispatched = 0, n_cache = 0, index; vec_foreach_index (index, cm->dequeue_handlers) { - if (PREDICT_FALSE (cm->dequeue_handlers[index] == 0)) - continue; n_cache = crypto_dequeue_frame ( vm, node, ct, cm->dequeue_handlers[index], n_cache, &n_dispatched); } @@ -171,6 +172,17 @@ VLIB_NODE_FN (crypto_dispatch_node) (vlib_main_t * vm, vlib_buffer_enqueue_to_next_vec (vm, node, &ct->buffer_indices, &ct->nexts, n_cache); + /* if there are still pending tasks and node in interrupt mode, + sending current thread signal to dequeue next loop */ + if (pool_elts (ct->frame_pool) > 0 && + ((node->state == VLIB_NODE_STATE_POLLING && + (node->flags & + VLIB_NODE_FLAG_SWITCH_FROM_POLLING_TO_INTERRUPT_MODE)) || + node->state == VLIB_NODE_STATE_INTERRUPT)) + { + vlib_node_set_interrupt_pending (vm, node->node_index); + } + return n_dispatched; } @@ -178,7 +190,8 @@ VLIB_NODE_FN (crypto_dispatch_node) (vlib_main_t * vm, VLIB_REGISTER_NODE (crypto_dispatch_node) = { .name = "crypto-dispatch", .type = VLIB_NODE_TYPE_INPUT, - .state = VLIB_NODE_STATE_DISABLED, + .flags = VLIB_NODE_FLAG_ADAPTIVE_MODE, + .state = VLIB_NODE_STATE_INTERRUPT, .format_trace = format_crypto_dispatch_trace, .n_errors = ARRAY_LEN(vnet_crypto_async_error_strings), diff --git a/src/vnet/ipsec/ipsec.c b/src/vnet/ipsec/ipsec.c index 86cb8982d07..14fc697e2eb 100644 --- a/src/vnet/ipsec/ipsec.c +++ b/src/vnet/ipsec/ipsec.c @@ -275,8 +275,7 @@ ipsec_register_esp_backend ( const char *esp6_decrypt_node_name, const char *esp6_decrypt_tun_node_name, const char *esp_mpls_encrypt_node_tun_name, check_support_cb_t esp_check_support_cb, - add_del_sa_sess_cb_t esp_add_del_sa_sess_cb, - enable_disable_cb_t enable_disable_cb) + add_del_sa_sess_cb_t esp_add_del_sa_sess_cb) { ipsec_esp_backend_t *b; @@ -307,7 +306,6 @@ ipsec_register_esp_backend ( b->check_support_cb = esp_check_support_cb; b->add_del_sa_sess_cb = esp_add_del_sa_sess_cb; - b->enable_disable_cb = enable_disable_cb; return b - im->esp_backends; } @@ -358,18 +356,6 @@ ipsec_select_esp_backend (ipsec_main_t * im, u32 backend_idx) if (pool_is_free_index (im->esp_backends, backend_idx)) return VNET_API_ERROR_INVALID_VALUE; - /* disable current backend */ - if (im->esp_current_backend != ~0) - { - ipsec_esp_backend_t *cb = pool_elt_at_index (im->esp_backends, - im->esp_current_backend); - if (cb->enable_disable_cb) - { - if ((cb->enable_disable_cb) (0) != 0) - return -1; - } - } - ipsec_esp_backend_t *b = pool_elt_at_index (im->esp_backends, backend_idx); im->esp_current_backend = backend_idx; im->esp4_encrypt_node_index = b->esp4_encrypt_node_index; @@ -388,11 +374,6 @@ ipsec_select_esp_backend (ipsec_main_t * im, u32 backend_idx) im->esp6_encrypt_tun_node_index = b->esp6_encrypt_tun_node_index; im->esp_mpls_encrypt_tun_node_index = b->esp_mpls_encrypt_tun_node_index; - if (b->enable_disable_cb) - { - if ((b->enable_disable_cb) (1) != 0) - return -1; - } return 0; } @@ -402,8 +383,6 @@ ipsec_set_async_mode (u32 is_enabled) ipsec_main_t *im = &ipsec_main; ipsec_sa_t *sa; - vnet_crypto_request_async_mode (is_enabled); - im->async_mode = is_enabled; /* change SA crypto op data */ @@ -482,7 +461,7 @@ ipsec_init (vlib_main_t * vm) vm, im, "crypto engine backend", "esp4-encrypt", "esp4-encrypt-tun", "esp4-decrypt", "esp4-decrypt-tun", "esp6-encrypt", "esp6-encrypt-tun", "esp6-decrypt", "esp6-decrypt-tun", "esp-mpls-encrypt-tun", - ipsec_check_esp_support, NULL, crypto_dispatch_enable_disable); + ipsec_check_esp_support, NULL); im->esp_default_backend = idx; rv = ipsec_select_esp_backend (im, idx); diff --git a/src/vnet/ipsec/ipsec.h b/src/vnet/ipsec/ipsec.h index 5b515295a0a..4aa09d7560e 100644 --- a/src/vnet/ipsec/ipsec.h +++ b/src/vnet/ipsec/ipsec.h @@ -93,8 +93,6 @@ typedef struct add_del_sa_sess_cb_t add_del_sa_sess_cb; /* check support function */ check_support_cb_t check_support_cb; - /* enable or disable function */ - enable_disable_cb_t enable_disable_cb; u32 esp4_encrypt_node_index; u32 esp4_decrypt_node_index; u32 esp4_encrypt_next_index; @@ -381,8 +379,7 @@ u32 ipsec_register_esp_backend ( const char *esp6_decrypt_node_name, const char *esp6_decrypt_tun_node_name, const char *esp_mpls_encrypt_tun_node_name, check_support_cb_t esp_check_support_cb, - add_del_sa_sess_cb_t esp_add_del_sa_sess_cb, - enable_disable_cb_t enable_disable_cb); + add_del_sa_sess_cb_t esp_add_del_sa_sess_cb); int ipsec_select_ah_backend (ipsec_main_t * im, u32 ah_backend_idx); int ipsec_select_esp_backend (ipsec_main_t * im, u32 esp_backend_idx); diff --git a/src/vnet/ipsec/ipsec_sa.c b/src/vnet/ipsec/ipsec_sa.c index 95db17e7fb1..c842caf8f16 100644 --- a/src/vnet/ipsec/ipsec_sa.c +++ b/src/vnet/ipsec/ipsec_sa.c @@ -406,7 +406,6 @@ ipsec_sa_add_and_lock (u32 id, u32 spi, ipsec_protocol_t proto, } else if (ipsec_sa_is_set_IS_ASYNC (sa)) { - vnet_crypto_request_async_mode (1); ipsec_sa_set_async_mode (sa, 1 /* is_enabled */); } else @@ -506,7 +505,6 @@ ipsec_sa_del (ipsec_sa_t * sa) if (ipsec_sa_is_set_IS_ASYNC (sa)) { - vnet_crypto_request_async_mode (0); if (!ipsec_sa_is_set_IS_AEAD (sa)) vnet_crypto_key_del (vm, sa->crypto_async_key_index); } diff --git a/test/test_ipsec_esp.py b/test/test_ipsec_esp.py index c1bed91759e..4dc74bcb219 100644 --- a/test/test_ipsec_esp.py +++ b/test/test_ipsec_esp.py @@ -822,11 +822,6 @@ class TestIpsecEspAsync(TemplateIpsecEsp): self.p_async.spd.remove_vpp_config() self.p_async.sa.remove_vpp_config() - # async mode should have been disabled now that there are - # no async SAs. there's no API for this, so a reluctant - # screen scrape. - self.assertTrue("DISABLED" in self.vapi.cli("sh crypto async status")) - class TestIpsecEspHandoff( TemplateIpsecEsp, IpsecTun6HandoffTests, IpsecTun4HandoffTests -- cgit 1.2.3-korg