diff options
author | Damjan Marion <damarion@cisco.com> | 2016-09-19 13:44:37 +0200 |
---|---|---|
committer | Damjan Marion <dmarion.lists@gmail.com> | 2016-09-21 07:54:59 +0000 |
commit | b58598b73107eb314b1f6dde7c86a7b75e3497df (patch) | |
tree | 0b317bdc4f2e0d681797b1984bad9ef0e2213df4 | |
parent | a7cc4479db4b3be0eb2b3ebf7cb569a0a4ed0c17 (diff) |
dpdk: fix wrong tx ring size calculations
At many places code was using constant ring size od 4096
which was defined in macro DPDK_TX_RING_SIZE.
As we support differnet ring size and default value s now
1024, we need to remove DPDK_TX_RING_SIZE and use
value stored in device structure.
For that reason dpdk_device_t.nb_tx_desc is moved to
first cacheline.
Change-Id: I2c2ac11f0f5e8ae779d34f9a9104eaf2921ec34c
Signed-off-by: Damjan Marion <damarion@cisco.com>
-rw-r--r-- | vnet/vnet/devices/dpdk/device.c | 36 | ||||
-rw-r--r-- | vnet/vnet/devices/dpdk/dpdk.h | 5 | ||||
-rw-r--r-- | vnet/vnet/devices/dpdk/init.c | 4 | ||||
-rw-r--r-- | vnet/vnet/devices/dpdk/vhost_user.c | 4 |
4 files changed, 22 insertions, 27 deletions
diff --git a/vnet/vnet/devices/dpdk/device.c b/vnet/vnet/devices/dpdk/device.c index 119d6039f74..3649178f388 100644 --- a/vnet/vnet/devices/dpdk/device.c +++ b/vnet/vnet/devices/dpdk/device.c @@ -281,7 +281,7 @@ static_always_inline n_packets = ring->tx_head - ring->tx_tail; - tx_head = ring->tx_head % DPDK_TX_RING_SIZE; + tx_head = ring->tx_head % xd->nb_tx_desc; /* * Ensure rte_eth_tx_burst is not called with 0 packets, which can lead to @@ -296,7 +296,7 @@ static_always_inline * a bit because it decreases the probability of having to issue two tx_burst * calls due to a ring wrap. */ - ASSERT (n_packets < DPDK_TX_RING_SIZE); + ASSERT (n_packets < xd->nb_tx_desc); /* * If there is no flowcontrol callback, there is only temporary buffering @@ -317,7 +317,7 @@ static_always_inline do { /* start the burst at the tail */ - tx_tail = ring->tx_tail % DPDK_TX_RING_SIZE; + tx_tail = ring->tx_tail % xd->nb_tx_desc; /* * This device only supports one TX queue, @@ -354,15 +354,14 @@ static_always_inline rv = rte_eth_tx_burst (xd->device_index, (uint16_t) queue_id, &tx_vector[tx_tail], - (uint16_t) (DPDK_TX_RING_SIZE - - tx_tail)); + (uint16_t) (xd->nb_tx_desc - tx_tail)); /* * If we transmitted everything we wanted, then allow 1 retry * so we can try to transmit the rest. If we didn't transmit * everything, stop now. */ - n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0; + n_retry = (rv == xd->nb_tx_desc - tx_tail) ? 1 : 0; } } #if DPDK_VHOST_USER @@ -438,7 +437,7 @@ static_always_inline int i; u32 bytes = 0; struct rte_mbuf **pkts = &tx_vector[tx_tail]; - for (i = 0; i < (DPDK_TX_RING_SIZE - tx_tail); i++) + for (i = 0; i < (xd->nb_tx_desc - tx_tail); i++) { struct rte_mbuf *buff = pkts[i]; bytes += rte_pktmbuf_data_len (buff); @@ -447,7 +446,7 @@ static_always_inline rte_vhost_enqueue_burst (&xd->vu_vhost_dev, offset + VIRTIO_RXQ, &tx_vector[tx_tail], - (uint16_t) (DPDK_TX_RING_SIZE - + (uint16_t) (xd->nb_tx_desc - tx_tail)); if (PREDICT_TRUE (rv > 0)) @@ -476,7 +475,7 @@ static_always_inline rte_pktmbuf_free (tx_vector[tx_tail + c]); } - n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0; + n_retry = (rv == xd->nb_tx_desc - tx_tail) ? 1 : 0; } if (xd->need_txlock) @@ -504,15 +503,14 @@ static_always_inline */ rv = rte_kni_tx_burst (xd->kni, &tx_vector[tx_tail], - (uint16_t) (DPDK_TX_RING_SIZE - - tx_tail)); + (uint16_t) (xd->nb_tx_desc - tx_tail)); /* * If we transmitted everything we wanted, then allow 1 retry * so we can try to transmit the rest. If we didn't transmit * everything, stop now. */ - n_retry = (rv == DPDK_TX_RING_SIZE - tx_tail) ? 1 : 0; + n_retry = (rv == xd->nb_tx_desc - tx_tail) ? 1 : 0; } } #endif @@ -632,7 +630,7 @@ dpdk_interface_tx (vlib_main_t * vm, ASSERT (n_packets <= VLIB_FRAME_SIZE); - if (PREDICT_FALSE (n_on_ring + n_packets > DPDK_TX_RING_SIZE)) + if (PREDICT_FALSE (n_on_ring + n_packets > xd->nb_tx_desc)) { /* * Overflowing the ring should never happen. @@ -668,7 +666,7 @@ dpdk_interface_tx (vlib_main_t * vm, from = vlib_frame_vector_args (f); n_left = n_packets; - i = ring->tx_head % DPDK_TX_RING_SIZE; + i = ring->tx_head % xd->nb_tx_desc; while (n_left >= 4) { @@ -770,9 +768,9 @@ dpdk_interface_tx (vlib_main_t * vm, if (PREDICT_TRUE (any_clone == 0)) { - tx_vector[i % DPDK_TX_RING_SIZE] = mb0; + tx_vector[i % xd->nb_tx_desc] = mb0; i++; - tx_vector[i % DPDK_TX_RING_SIZE] = mb1; + tx_vector[i % xd->nb_tx_desc] = mb1; i++; } else @@ -780,12 +778,12 @@ dpdk_interface_tx (vlib_main_t * vm, /* cloning was done, need to check for failure */ if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0)) { - tx_vector[i % DPDK_TX_RING_SIZE] = mb0; + tx_vector[i % xd->nb_tx_desc] = mb0; i++; } if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0)) { - tx_vector[i % DPDK_TX_RING_SIZE] = mb1; + tx_vector[i % xd->nb_tx_desc] = mb1; i++; } } @@ -839,7 +837,7 @@ dpdk_interface_tx (vlib_main_t * vm, if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0)) { - tx_vector[i % DPDK_TX_RING_SIZE] = mb0; + tx_vector[i % xd->nb_tx_desc] = mb0; i++; } n_left--; diff --git a/vnet/vnet/devices/dpdk/dpdk.h b/vnet/vnet/devices/dpdk/dpdk.h index 2cb301ce291..48072560eed 100644 --- a/vnet/vnet/devices/dpdk/dpdk.h +++ b/vnet/vnet/devices/dpdk/dpdk.h @@ -214,6 +214,7 @@ typedef struct #define DPDK_DEVICE_FLAG_VHOST_USER (1 << 4) #define DPDK_DEVICE_FLAG_HAVE_SUBIF (1 << 5) + u16 nb_tx_desc; CLIB_CACHE_LINE_ALIGN_MARK (cacheline1); u8 *interface_name_suffix; @@ -225,7 +226,6 @@ typedef struct u16 tx_q_used; u16 rx_q_used; u16 nb_rx_desc; - u16 nb_tx_desc; u16 *cpu_socket_id_by_queue; struct rte_eth_conf port_conf; struct rte_eth_txconf tx_conf; @@ -265,9 +265,6 @@ typedef struct u8 need_txlock; /* Used by VNET_DPDK_DEV_VHOST_USER */ } dpdk_device_t; - -#define DPDK_TX_RING_SIZE (4 * 1024) - #define DPDK_STATS_POLL_INTERVAL (10.0) #define DPDK_MIN_STATS_POLL_INTERVAL (0.001) /* 1msec */ diff --git a/vnet/vnet/devices/dpdk/init.c b/vnet/vnet/devices/dpdk/init.c index a2cc8849f41..44b4dc31097 100644 --- a/vnet/vnet/devices/dpdk/init.c +++ b/vnet/vnet/devices/dpdk/init.c @@ -666,7 +666,7 @@ dpdk_lib_init (dpdk_main_t * dm) CLIB_CACHE_LINE_BYTES); for (j = 0; j < tm->n_vlib_mains; j++) { - vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE, + vec_validate_ha (xd->tx_vectors[j], xd->nb_tx_desc, sizeof (tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES); vec_reset_length (xd->tx_vectors[j]); } @@ -775,7 +775,7 @@ dpdk_lib_init (dpdk_main_t * dm) CLIB_CACHE_LINE_BYTES); for (j = 0; j < tm->n_vlib_mains; j++) { - vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE, + vec_validate_ha (xd->tx_vectors[j], xd->nb_tx_desc, sizeof (tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES); vec_reset_length (xd->tx_vectors[j]); } diff --git a/vnet/vnet/devices/dpdk/vhost_user.c b/vnet/vnet/devices/dpdk/vhost_user.c index f8910ad65d7..946c6e1f1db 100644 --- a/vnet/vnet/devices/dpdk/vhost_user.c +++ b/vnet/vnet/devices/dpdk/vhost_user.c @@ -266,7 +266,7 @@ dpdk_create_vhost_user_if_internal (u32 * hw_if_index, u32 if_id, u8 * hwaddr) // reset tx vectors for (j = 0; j < tm->n_vlib_mains; j++) { - vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE, + vec_validate_ha (xd->tx_vectors[j], xd->nb_tx_desc, sizeof (tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES); vec_reset_length (xd->tx_vectors[j]); } @@ -338,7 +338,7 @@ dpdk_create_vhost_user_if_internal (u32 * hw_if_index, u32 if_id, u8 * hwaddr) for (j = 0; j < tm->n_vlib_mains; j++) { - vec_validate_ha (xd->tx_vectors[j], DPDK_TX_RING_SIZE, + vec_validate_ha (xd->tx_vectors[j], xd->nb_tx_desc, sizeof (tx_ring_hdr_t), CLIB_CACHE_LINE_BYTES); vec_reset_length (xd->tx_vectors[j]); } |