diff options
author | Mohammed Hawari <mohammed@hawari.fr> | 2020-11-10 10:26:45 +0100 |
---|---|---|
committer | Damjan Marion <dmarion@me.com> | 2020-12-01 08:00:27 +0000 |
commit | bb7991a037a23eb966aee2d6f8069353c6a64c94 (patch) | |
tree | 4bf032fc1cb66240dcc2b393e02b5675e8bcee4b /src/plugins/avf | |
parent | 8f5c6107c98b0b0c9a5110858dc403b706e05de2 (diff) |
avf: add TSO support
Change-Id: Ica663e1d07225acf53fd74b0051a5a2a34174960
Signed-off-by: Mohammed Hawari <mohammed@hawari.fr>
Type: improvement
Diffstat (limited to 'src/plugins/avf')
-rw-r--r-- | src/plugins/avf/avf.h | 15 | ||||
-rw-r--r-- | src/plugins/avf/device.c | 15 | ||||
-rw-r--r-- | src/plugins/avf/output.c | 111 |
3 files changed, 120 insertions, 21 deletions
diff --git a/src/plugins/avf/avf.h b/src/plugins/avf/avf.h index 66a791ec9b2..025fa6ea4e9 100644 --- a/src/plugins/avf/avf.h +++ b/src/plugins/avf/avf.h @@ -20,7 +20,14 @@ #include <avf/virtchnl.h> +#include <vppinfra/types.h> +#include <vppinfra/error_bootstrap.h> +#include <vppinfra/lock.h> + #include <vlib/log.h> +#include <vlib/pci/pci.h> + +#include <vnet/interface.h> #define AVF_QUEUE_SZ_MAX 4096 #define AVF_QUEUE_SZ_MIN 64 @@ -66,6 +73,13 @@ #define AVF_TXD_OFFSET_IPLEN(val) AVF_TXD_OFFSET( 7, 4, val) #define AVF_TXD_OFFSET_L4LEN(val) AVF_TXD_OFFSET(14, 4, val) +#define AVF_TXD_DTYP_CTX 0x1ULL +#define AVF_TXD_CTX_CMD_TSO AVF_TXD_CMD(0) +#define AVF_TXD_CTX_SEG(val,x) (((u64)val) << (30 + x)) +#define AVF_TXD_CTX_SEG_TLEN(val) AVF_TXD_CTX_SEG(val,0) +#define AVF_TXD_CTX_SEG_MSS(val) AVF_TXD_CTX_SEG(val,20) + + extern vlib_log_class_registration_t avf_log; #define avf_log_err(dev, f, ...) \ @@ -158,6 +172,7 @@ typedef struct volatile u32 *qtx_tail; u16 next; u16 size; + u32 ctx_desc_placeholder_bi; clib_spinlock_t lock; avf_tx_desc_t *descs; u32 *bufs; diff --git a/src/plugins/avf/device.c b/src/plugins/avf/device.c index bb2dcf74e07..c6bf2df4443 100644 --- a/src/plugins/avf/device.c +++ b/src/plugins/avf/device.c @@ -298,6 +298,8 @@ avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size) { clib_error_t *err; avf_txq_t *txq; + u8 bpi = vlib_buffer_pool_get_default_for_numa (vm, + ad->numa_node); if (qid >= ad->num_queue_pairs) { @@ -313,6 +315,14 @@ avf_txq_init (vlib_main_t * vm, avf_device_t * ad, u16 qid, u16 txq_size) txq = vec_elt_at_index (ad->txqs, qid); txq->size = txq_size; txq->next = 0; + + /* Prepare a placeholder buffer to maintain a 1-1 + relationship between bufs and descs when a context + descriptor is added in descs */ + if (!vlib_buffer_alloc_from_pool + (vm, &txq->ctx_desc_placeholder_bi, 1, bpi)) + return clib_error_return (0, "buffer allocation error"); + txq->descs = vlib_physmem_alloc_aligned_on_numa (vm, txq->size * sizeof (avf_tx_desc_t), 2 * CLIB_CACHE_LINE_BYTES, @@ -1442,6 +1452,8 @@ avf_delete_if (vlib_main_t * vm, avf_device_t * ad, int with_barrier) vlib_buffer_free_from_ring (vm, txq->bufs, first, txq->size, txq->n_enqueued); } + /* Free the placeholder buffer */ + vlib_buffer_free_one(vm, txq->ctx_desc_placeholder_bi); vec_free (txq->bufs); clib_ring_free (txq->rs_slots); } @@ -1638,7 +1650,8 @@ avf_create_if (vlib_main_t * vm, avf_create_if_args_t * args) vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ad->hw_if_index); hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_MAC_FILTER | - VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD; + VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD | + VNET_HW_INTERFACE_FLAG_SUPPORTS_GSO; ethernet_set_flags (vnm, ad->hw_if_index, ETHERNET_INTERFACE_FLAG_DEFAULT_L3); diff --git a/src/plugins/avf/output.c b/src/plugins/avf/output.c index b1b5bd005ff..90cc56fc7d8 100644 --- a/src/plugins/avf/output.c +++ b/src/plugins/avf/output.c @@ -88,6 +88,14 @@ avf_tx_prepare_cksum (vlib_buffer_t * b, u8 is_tso) if (is_ip4) ip4->checksum = 0; + if (is_tso) + { + if (is_ip4) + ip4->length = 0; + else + ip6->payload_length = 0; + } + if (is_tcp || is_udp) { if (is_ip4) @@ -120,13 +128,56 @@ avf_tx_prepare_cksum (vlib_buffer_t * b, u8 is_tso) return flags; } +static_always_inline int +avf_tx_fill_ctx_desc (vlib_main_t * vm, avf_txq_t * txq, avf_tx_desc_t * d, + vlib_buffer_t * b) +{ + vlib_buffer_t *ctx_ph = vlib_get_buffer (vm, txq->ctx_desc_placeholder_bi); + if (PREDICT_FALSE (ctx_ph->ref_count == 255)) + { + /* We need a new placeholder buffer */ + u32 new_bi; + u8 bpi = vlib_buffer_pool_get_default_for_numa (vm, vm->numa_node); + if (PREDICT_TRUE + (vlib_buffer_alloc_from_pool (vm, &new_bi, 1, bpi) == 1)) + { + /* Remove our own reference on the current placeholder buffer */ + ctx_ph->ref_count--; + /* Replace with the new placeholder buffer */ + txq->ctx_desc_placeholder_bi = new_bi; + ctx_ph = vlib_get_buffer (vm, new_bi); + } + else + /* Impossible to enqueue a ctx descriptor, fail */ + return 1; + } + + /* Acquire a reference on the placeholder buffer */ + ctx_ph->ref_count++; + + u16 l234hdr_sz = + vnet_buffer (b)->l4_hdr_offset - + vnet_buffer (b)->l2_hdr_offset + vnet_buffer2 (b)->gso_l4_hdr_sz; + u16 tlen = vlib_buffer_length_in_chain (vm, b) - l234hdr_sz; + d[0].qword[0] = 0; + d[0].qword[1] = AVF_TXD_DTYP_CTX | AVF_TXD_CTX_CMD_TSO + | AVF_TXD_CTX_SEG_MSS (vnet_buffer2 (b)->gso_size) | + AVF_TXD_CTX_SEG_TLEN (tlen); + return 0; +} + + static_always_inline u16 avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq, u32 * buffers, u32 n_packets, int use_va_dma) { u16 next = txq->next; u64 bits = AVF_TXD_CMD_EOP | AVF_TXD_CMD_RSV; + const u32 offload_mask = VNET_BUFFER_F_OFFLOAD_IP_CKSUM | + VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM | + VNET_BUFFER_F_GSO; u64 one_by_one_offload_flags = 0; + int is_tso; u16 n_desc = 0; u16 *slot, n_desc_left, n_packets_left = n_packets; u16 mask = txq->size - 1; @@ -160,9 +211,7 @@ avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq, or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags; - if (or_flags & - (VLIB_BUFFER_NEXT_PRESENT | VNET_BUFFER_F_OFFLOAD_IP_CKSUM | - VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | VNET_BUFFER_F_OFFLOAD_UDP_CKSUM)) + if (or_flags & (VLIB_BUFFER_NEXT_PRESENT | offload_mask)) goto one_by_one; vlib_buffer_copy_indices (txq->bufs + next, buffers, 4); @@ -199,17 +248,14 @@ avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq, one_by_one_offload_flags = 0; txq->bufs[next] = buffers[0]; b[0] = vlib_get_buffer (vm, buffers[0]); - - if (PREDICT_FALSE (b[0]->flags & (VNET_BUFFER_F_OFFLOAD_IP_CKSUM | - VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | - VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))) - one_by_one_offload_flags |= - avf_tx_prepare_cksum (b[0], 0 /* no TSO */ ); + is_tso = ! !(b[0]->flags & VNET_BUFFER_F_GSO); + if (PREDICT_FALSE (is_tso || b[0]->flags & offload_mask)) + one_by_one_offload_flags |= avf_tx_prepare_cksum (b[0], is_tso); /* Deal with chain buffer if present */ - if (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT) + if (is_tso || b[0]->flags & VLIB_BUFFER_NEXT_PRESENT) { - n_desc_needed = 1; + n_desc_needed = 1 + is_tso; b0 = b[0]; /* Wish there were a buffer count for chain buffer */ @@ -220,7 +266,7 @@ avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq, } /* spec says data descriptor is limited to 8 segments */ - if (PREDICT_FALSE (n_desc_needed > 8)) + if (PREDICT_FALSE (!is_tso && n_desc_needed > 8)) { vlib_buffer_free_one (vm, buffers[0]); vlib_error_count (vm, node->node_index, @@ -237,6 +283,19 @@ avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq, */ break; + /* Enqueue a context descriptor if needed */ + if (PREDICT_FALSE (is_tso)) + { + if (avf_tx_fill_ctx_desc (vm, txq, d, b[0])) + /* Failure to acquire ref on ctx placeholder */ + break; + txq->bufs[next + 1] = txq->bufs[next]; + txq->bufs[next] = txq->ctx_desc_placeholder_bi; + next += 1; + n_desc += 1; + n_desc_left -= 1; + d += 1; + } while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT) { if (use_va_dma) @@ -292,16 +351,14 @@ avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq, b[0] = vlib_get_buffer (vm, buffers[0]); one_by_one_offload_flags = 0; - if (PREDICT_FALSE (b[0]->flags & (VNET_BUFFER_F_OFFLOAD_IP_CKSUM | - VNET_BUFFER_F_OFFLOAD_TCP_CKSUM | - VNET_BUFFER_F_OFFLOAD_UDP_CKSUM))) - one_by_one_offload_flags |= - avf_tx_prepare_cksum (b[0], 0 /* no TSO */ ); + is_tso = ! !(b[0]->flags & VNET_BUFFER_F_GSO); + if (PREDICT_FALSE (is_tso || b[0]->flags & offload_mask)) + one_by_one_offload_flags |= avf_tx_prepare_cksum (b[0], is_tso); /* Deal with chain buffer if present */ - if (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT) + if (is_tso || b[0]->flags & VLIB_BUFFER_NEXT_PRESENT) { - n_desc_needed = 1; + n_desc_needed = 1 + is_tso; b0 = b[0]; while (b0->flags & VLIB_BUFFER_NEXT_PRESENT) @@ -311,7 +368,7 @@ avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq, } /* Spec says data descriptor is limited to 8 segments */ - if (PREDICT_FALSE (n_desc_needed > 8)) + if (PREDICT_FALSE (!is_tso && n_desc_needed > 8)) { vlib_buffer_free_one (vm, buffers[0]); vlib_error_count (vm, node->node_index, @@ -324,6 +381,20 @@ avf_tx_enqueue (vlib_main_t * vm, vlib_node_runtime_t * node, avf_txq_t * txq, if (PREDICT_FALSE (n_desc_left < n_desc_needed)) break; + /* Enqueue a context descriptor if needed */ + if (PREDICT_FALSE (is_tso)) + { + if (avf_tx_fill_ctx_desc (vm, txq, d, b[0])) + /* Failure to acquire ref on ctx placeholder */ + break; + + txq->bufs[(next + 1) & mask] = txq->bufs[next & mask]; + txq->bufs[next & mask] = txq->ctx_desc_placeholder_bi; + next += 1; + n_desc += 1; + n_desc_left -= 1; + d = txq->descs + (next & mask); + } while (b[0]->flags & VLIB_BUFFER_NEXT_PRESENT) { if (use_va_dma) |