diff options
author | David Hotham <david.hotham@metaswitch.com> | 2016-11-01 10:51:24 +0000 |
---|---|---|
committer | Damjan Marion <dmarion.lists@gmail.com> | 2016-11-01 23:50:36 +0000 |
commit | 63f70d213166c181b2ceff496069590070212168 (patch) | |
tree | a7d470455ed2fd2cb5981e33d3de73b78bcd3473 /vnet | |
parent | 2231150b52b58c4114f8520cde8b26df2761e064 (diff) |
dpdk-hqos: don't hold up packets indefinitely under low load
Change-Id: If884637a6db0cb813a40920194795da2e98c8b23
Signed-off-by: David Hotham <david.hotham@metaswitch.com>
Diffstat (limited to 'vnet')
-rw-r--r-- | vnet/vnet/devices/dpdk/dpdk.h | 5 | ||||
-rw-r--r-- | vnet/vnet/devices/dpdk/hqos.c | 29 |
2 files changed, 34 insertions, 0 deletions
diff --git a/vnet/vnet/devices/dpdk/dpdk.h b/vnet/vnet/devices/dpdk/dpdk.h index e34d4b97bdd..dfbfce5066b 100644 --- a/vnet/vnet/devices/dpdk/dpdk.h +++ b/vnet/vnet/devices/dpdk/dpdk.h @@ -184,6 +184,7 @@ typedef struct u32 hqos_burst_deq; u32 pkts_enq_len; u32 swq_pos; + u32 flush_count; } dpdk_device_hqos_per_hqos_thread_t; typedef struct @@ -304,6 +305,10 @@ typedef struct dpdk_efd_t #define DPDK_HQOS_DBG_BYPASS 0 #endif +#ifndef HQOS_FLUSH_COUNT_THRESHOLD +#define HQOS_FLUSH_COUNT_THRESHOLD 100000 +#endif + typedef struct dpdk_device_config_hqos_t { u32 hqos_thread; diff --git a/vnet/vnet/devices/dpdk/hqos.c b/vnet/vnet/devices/dpdk/hqos.c index d05ae09ac2b..12bf3fa6388 100644 --- a/vnet/vnet/devices/dpdk/hqos.c +++ b/vnet/vnet/devices/dpdk/hqos.c @@ -351,6 +351,7 @@ dpdk_port_setup_hqos (dpdk_device_t * xd, dpdk_device_config_hqos_t * hqos) vec_validate (xd->hqos_ht->pkts_deq, hqos->burst_deq - 1); xd->hqos_ht->pkts_enq_len = 0; xd->hqos_ht->swq_pos = 0; + xd->hqos_ht->flush_count = 0; /* Set up per-thread device data for each worker thread */ for (i = 0; i < worker_thread_count; i++) @@ -416,6 +417,7 @@ dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm) u32 pkts_enq_len = hqos->pkts_enq_len; u32 swq_pos = hqos->swq_pos; u32 n_swq = vec_len (hqos->swq), i; + u32 flush_count = hqos->flush_count; for (i = 0; i < n_swq; i++) { @@ -446,10 +448,23 @@ dpdk_hqos_thread_internal_hqos_dbg_bypass (vlib_main_t * vm) rte_pktmbuf_free (pkts_enq[n_pkts]); pkts_enq_len = 0; + flush_count = 0; break; } } + if (pkts_enq_len) + { + flush_count++; + if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD)) + { + rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len); + + pkts_enq_len = 0; + flush_count = 0; + } + } hqos->pkts_enq_len = pkts_enq_len; + hqos->flush_count = flush_count; /* Advance to next device */ dev_pos++; @@ -490,6 +505,7 @@ dpdk_hqos_thread_internal (vlib_main_t * vm) u32 pkts_enq_len = hqos->pkts_enq_len; u32 swq_pos = hqos->swq_pos; u32 n_swq = vec_len (hqos->swq), i; + u32 flush_count = hqos->flush_count; /* * SWQ dequeue and HQoS enqueue for current device @@ -517,10 +533,23 @@ dpdk_hqos_thread_internal (vlib_main_t * vm) rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len); pkts_enq_len = 0; + flush_count = 0; break; } } + if (pkts_enq_len) + { + flush_count++; + if (PREDICT_FALSE (flush_count == HQOS_FLUSH_COUNT_THRESHOLD)) + { + rte_sched_port_enqueue (hqos->hqos, pkts_enq, pkts_enq_len); + + pkts_enq_len = 0; + flush_count = 0; + } + } hqos->pkts_enq_len = pkts_enq_len; + hqos->flush_count = flush_count; /* * HQoS dequeue and HWQ TX enqueue for current device |