aboutsummaryrefslogtreecommitdiffstats
path: root/src/plugins/crypto_sw_scheduler
diff options
context:
space:
mode:
authorAlexander Skorichenko <askorichenko@netgate.com>2023-04-24 09:34:23 +0200
committerMatthew Smith <mgsmith@netgate.com>2023-04-25 18:02:43 +0000
commit61cdc0981084f049067626b0123db700035120df (patch)
tree2aea52ebb2d553921b2c14443c3b576f96e18f62 /src/plugins/crypto_sw_scheduler
parent980f3fb2d3a3f62b296341cb423df6e1d93a193d (diff)
crypto-sw-scheduler: fix interrupt mode
Type: fix Currently sw_scheduler runs interchangeably over queues of one selected type either ENCRYPT or DECRYPT. Then switches the type for the next run. This works fine in polling mode as missed frames get processed on the next run. In interrupt mode if all of the workers miss a frame on the first run the interrupt flag is lowered so the frame remains pending in queues waiting for another crypto event to raise the interrupt. With this fix force sw_scheduler in interrupt mode check the second half of the queues if the first pass returned no results. This guarantees a pending frame gets into processing before interrupt is reset. Change-Id: I7e91d125702336eba72c6a3abaeabcae010d396a Signed-off-by: Alexander Skorichenko <askorichenko@netgate.com>
Diffstat (limited to 'src/plugins/crypto_sw_scheduler')
-rw-r--r--src/plugins/crypto_sw_scheduler/main.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/src/plugins/crypto_sw_scheduler/main.c b/src/plugins/crypto_sw_scheduler/main.c
index 563e3591191..abdffab2b9c 100644
--- a/src/plugins/crypto_sw_scheduler/main.c
+++ b/src/plugins/crypto_sw_scheduler/main.c
@@ -458,6 +458,11 @@ crypto_sw_scheduler_process_aead (vlib_main_t *vm,
u32 tail, head;
u8 found = 0;
+ u8 recheck_queues =
+ crypto_main.dispatch_mode == VNET_CRYPTO_ASYNC_DISPATCH_INTERRUPT;
+
+ run_half_queues:
+
/* get a pending frame to process */
if (ptd->self_crypto_enabled)
{
@@ -568,6 +573,11 @@ crypto_sw_scheduler_process_aead (vlib_main_t *vm,
return f;
}
+ if (!found && recheck_queues)
+ {
+ recheck_queues = 0;
+ goto run_half_queues;
+ }
return 0;
}