diff options
author | PiotrX Kleski <piotrx.kleski@intel.com> | 2020-05-05 14:14:22 +0200 |
---|---|---|
committer | Neale Ranns <nranns@cisco.com> | 2020-05-24 07:31:49 +0000 |
commit | fdca4dd1a1a817e65bf44e435261d893fc0c51d6 (patch) | |
tree | 4f2c012bb3be8360cbf7c59d5bda3bd3451c233c /src/vnet | |
parent | cbe053e14f2852b42e4e3218af8756a6d7e730c8 (diff) |
ipsec: fixed chaining ops after add footer and icv
In case there is no free space in first buffer for ICV and footer,
additional buffer will be added, but esp_encrypt will stay in single
buffer mode.
The issue happens for the following payload sizes:
- TCP packets with payload 1992
- ICMP packets with payload 2004
This fix moves the single/chained buffer ops selection to after
esp_add_footer_and_icv call.
Type: fix
Signed-off-by: Fan Zhang <roy.fan.zhang@intel.com>
Signed-off-by: PiotrX Kleski <piotrx.kleski@intel.com>
Change-Id: Ic5ceba418f738933f96edb3e489ca2d149033b79
Diffstat (limited to 'src/vnet')
-rw-r--r-- | src/vnet/ipsec/esp_encrypt.c | 19 |
1 files changed, 11 insertions, 8 deletions
diff --git a/src/vnet/ipsec/esp_encrypt.c b/src/vnet/ipsec/esp_encrypt.c index e9feb8b40a1..e80f98624b9 100644 --- a/src/vnet/ipsec/esp_encrypt.c +++ b/src/vnet/ipsec/esp_encrypt.c @@ -695,18 +695,10 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, if (n_bufs > 1) { - crypto_ops = &ptd->chained_crypto_ops; - integ_ops = &ptd->chained_integ_ops; - /* find last buffer in the chain */ while (lb->flags & VLIB_BUFFER_NEXT_PRESENT) lb = vlib_get_buffer (vm, lb->next_buffer); } - else - { - crypto_ops = &ptd->crypto_ops; - integ_ops = &ptd->integ_ops; - } if (PREDICT_FALSE (esp_seq_advance (sa0))) { @@ -879,6 +871,17 @@ esp_encrypt_inline (vlib_main_t * vm, vlib_node_runtime_t * node, next[0] = ESP_ENCRYPT_NEXT_INTERFACE_OUTPUT; } + if (lb != b[0]) + { + crypto_ops = &ptd->chained_crypto_ops; + integ_ops = &ptd->chained_integ_ops; + } + else + { + crypto_ops = &ptd->crypto_ops; + integ_ops = &ptd->integ_ops; + } + esp->spi = spi; esp->seq = clib_net_to_host_u32 (sa0->seq); |