summaryrefslogtreecommitdiffstats
path: root/src/plugins/vmxnet3
diff options
context:
space:
mode:
authorSteven Luong <sluong@cisco.com>2019-02-26 16:45:54 -0800
committerSteven Luong <sluong@cisco.com>2019-02-26 16:47:11 -0800
commit854559d154288945e03af6b6b7ce91c383667180 (patch)
tree9e4a1d3c03674d3337d39def3f55ea087506aebb /src/plugins/vmxnet3
parent1bb981d89880a1d54dccd94ca6216b927740af4a (diff)
vmxnet3: RSS support
Configurable up to 16 RX queues per interface. Default is 1. Change-Id: If9e2beffeb7e7dc8c2264b4db902132b2fea02c1 Signed-off-by: Steven Luong <sluong@cisco.com>
Diffstat (limited to 'src/plugins/vmxnet3')
-rw-r--r--src/plugins/vmxnet3/README.md1
-rw-r--r--src/plugins/vmxnet3/cli.c4
-rw-r--r--src/plugins/vmxnet3/vmxnet3.api35
-rw-r--r--src/plugins/vmxnet3/vmxnet3.c160
-rw-r--r--src/plugins/vmxnet3/vmxnet3.h43
-rw-r--r--src/plugins/vmxnet3/vmxnet3_api.c35
-rw-r--r--src/plugins/vmxnet3/vmxnet3_test.c32
7 files changed, 229 insertions, 81 deletions
diff --git a/src/plugins/vmxnet3/README.md b/src/plugins/vmxnet3/README.md
index d1082aa11e4..b5829939506 100644
--- a/src/plugins/vmxnet3/README.md
+++ b/src/plugins/vmxnet3/README.md
@@ -15,7 +15,6 @@ vfio driver can still be used with recent kernels which support no-iommu mode.
##Known issues
-* RSS
* VLAN filter
## Usage
diff --git a/src/plugins/vmxnet3/cli.c b/src/plugins/vmxnet3/cli.c
index 571c3dbd042..0f0cf60fa40 100644
--- a/src/plugins/vmxnet3/cli.c
+++ b/src/plugins/vmxnet3/cli.c
@@ -50,6 +50,8 @@ vmxnet3_create_command_fn (vlib_main_t * vm, unformat_input_t * input,
;
else if (unformat (line_input, "num-tx-queues %u", &args.txq_num))
;
+ else if (unformat (line_input, "num-rx-queues %u", &args.rxq_num))
+ ;
else
return clib_error_return (0, "unknown input `%U'",
format_unformat_error, input);
@@ -67,7 +69,7 @@ VLIB_CLI_COMMAND (vmxnet3_create_command, static) = {
.path = "create interface vmxnet3",
.short_help = "create interface vmxnet3 <pci-address>"
"[rx-queue-size <size>] [tx-queue-size <size>]"
- "[num-tx-queues <number>]",
+ "[num-tx-queues <number>] [num-rx-queues <number>]",
.function = vmxnet3_create_command_fn,
};
/* *INDENT-ON* */
diff --git a/src/plugins/vmxnet3/vmxnet3.api b/src/plugins/vmxnet3/vmxnet3.api
index 8666820db69..804ba538be5 100644
--- a/src/plugins/vmxnet3/vmxnet3.api
+++ b/src/plugins/vmxnet3/vmxnet3.api
@@ -25,6 +25,7 @@ option version = "1.0.0";
ddddddddddddddddbbbbbbbbsssssfff
@param enable_elog - turn on elog (optional - default is off)
@param rxq_size - receive queue size (optional - default is 1024)
+ @param rxq_num - number of receive queues (optional - default is 1)
@param txq_size - transmit queue size (optional - default is 1024)
@param txq_num - number of transmit queues (optional - default is 1)
*/
@@ -37,6 +38,7 @@ define vmxnet3_create
u32 pci_addr;
i32 enable_elog;
u16 rxq_size;
+ u16 rxq_num;
u16 txq_size;
u16 txq_num;
};
@@ -83,6 +85,23 @@ typeonly define vmxnet3_tx_list
u16 tx_consume;
};
+/** \brief vmxnet3_rx_list structure
+ @param rx_qsize - rx queue size
+ @param rx_fill - rx fill count
+ @param rx_next - rx next index
+ @param rx_produce - rx produce index
+ @param rx_consume - rx consume index
+*/
+
+typeonly define vmxnet3_rx_list
+{
+ u16 rx_qsize;
+ u16 rx_fill[2];
+ u16 rx_next;
+ u16 rx_produce[2];
+ u16 rx_consume[2];
+};
+
/** \brief Memory interface details structure
@param context - sender context, to match reply w/ request (memif_dump)
@param sw_if_index - index of the interface
@@ -91,12 +110,9 @@ typeonly define vmxnet3_tx_list
@param pci_addr - pci address of the interface
@param version - vmxnet3 hardware version
@param admin_up_down - interface administrative status
- @param rx_qsize - rx queue size
- @param rx_fill - rx fill count
- @param rx_next - rx next index
- @param rx_produce - rx produce index
- @param rx_consume - rx consume index
- @param tx_count - number of of elements in tx_list
+ @param rx_count - number of elements in rx_list
+ @param rx_list - list of vmxnet3_rx_list
+ @param tx_count - number of elements in tx_list
@param tx_list - list of vmnxnet3_tx_list
*/
define vmxnet3_details
@@ -110,11 +126,8 @@ define vmxnet3_details
u8 version;
u8 admin_up_down;
- u16 rx_qsize;
- u16 rx_fill[2];
- u16 rx_next;
- u16 rx_produce[2];
- u16 rx_consume[2];
+ u8 rx_count;
+ vl_api_vmxnet3_rx_list_t rx_list[16];
u8 tx_count;
vl_api_vmxnet3_tx_list_t tx_list[8];
diff --git a/src/plugins/vmxnet3/vmxnet3.c b/src/plugins/vmxnet3/vmxnet3.c
index 6995905a181..43f9cbed793 100644
--- a/src/plugins/vmxnet3/vmxnet3.c
+++ b/src/plugins/vmxnet3/vmxnet3.c
@@ -206,6 +206,7 @@ vmxnet3_provision_driver_shared (vlib_main_t * vm, vmxnet3_device_t * vd)
}
rx->cfg.comp_address = vmxnet3_dma_addr (vm, vd, rxq->rx_comp);
rx->cfg.num_comp = rxq->size;
+ rx->cfg.intr_index = qid;
rx++;
}
@@ -221,6 +222,13 @@ vmxnet3_provision_driver_shared (vlib_main_t * vm, vmxnet3_device_t * vd)
shared->misc.upt_features = VMXNET3_F_RXCSUM;
if (vd->lro_enable)
shared->misc.upt_features |= VMXNET3_F_LRO;
+ if (vd->num_rx_queues > 1)
+ {
+ shared->misc.upt_features |= VMXNET3_F_RSS;
+ shared->rss.version = 1;
+ shared->rss.address = vmxnet3_dma_addr (vm, vd, vd->rss);
+ shared->rss.length = sizeof (*vd->rss);
+ }
shared->misc.max_num_rx_sg = 0;
shared->misc.upt_version_support = VMXNET3_UPT_VERSION_SELECT;
shared->misc.queue_desc_address = vmxnet3_dma_addr (vm, vd, vd->queues);
@@ -230,7 +238,7 @@ vmxnet3_provision_driver_shared (vlib_main_t * vm, vmxnet3_device_t * vd)
shared->misc.num_tx_queues = vd->num_tx_queues;
shared->misc.num_rx_queues = vd->num_rx_queues;
shared->interrupt.num_intrs = vd->num_intrs;
- shared->interrupt.event_intr_index = 1;
+ shared->interrupt.event_intr_index = vd->num_rx_queues;
shared->interrupt.control = VMXNET3_IC_DISABLE_ALL;
shared->rx_filter.mode = VMXNET3_RXMODE_UCAST | VMXNET3_RXMODE_BCAST |
VMXNET3_RXMODE_ALL_MULTI | VMXNET3_RXMODE_PROMISC;
@@ -359,6 +367,40 @@ vmxnet3_txq_init (vlib_main_t * vm, vmxnet3_device_t * vd, u16 qid, u16 qsz)
return 0;
}
+static const u8 vmxnet3_rss_key[VMXNET3_RSS_MAX_KEY_SZ] = {
+ 0x3b, 0x56, 0xd1, 0x56, 0x13, 0x4a, 0xe7, 0xac,
+ 0xe8, 0x79, 0x09, 0x75, 0xe8, 0x65, 0x79, 0x28,
+ 0x35, 0x12, 0xb9, 0x56, 0x7c, 0x76, 0x4b, 0x70,
+ 0xd8, 0x56, 0xa3, 0x18, 0x9b, 0x0a, 0xee, 0xf3,
+ 0x96, 0xa6, 0x9f, 0x8f, 0x9e, 0x8c, 0x90, 0xc9,
+};
+
+static clib_error_t *
+vmxnet3_rss_init (vlib_main_t * vm, vmxnet3_device_t * vd)
+{
+ vmxnet3_rss_shared *rss;
+ size_t size = sizeof (*rss);
+ u8 i;
+
+ vd->rss = vlib_physmem_alloc_aligned_on_numa (vm, size, 512, vd->numa_node);
+ if (vd->rss == 0)
+ return vlib_physmem_last_error (vm);
+
+ clib_memset (vd->rss, 0, size);
+ rss = vd->rss;
+ rss->hash_type =
+ VMXNET3_RSS_HASH_TYPE_IPV4 | VMXNET3_RSS_HASH_TYPE_TCP_IPV4 |
+ VMXNET3_RSS_HASH_TYPE_IPV6 | VMXNET3_RSS_HASH_TYPE_TCP_IPV6;
+ rss->hash_func = VMXNET3_RSS_HASH_FUNC_TOEPLITZ;
+ rss->hash_key_sz = VMXNET3_RSS_MAX_KEY_SZ;
+ rss->ind_table_sz = VMXNET3_RSS_MAX_IND_TABLE_SZ;
+ clib_memcpy (rss->hash_key, vmxnet3_rss_key, VMXNET3_RSS_MAX_KEY_SZ);
+ for (i = 0; i < rss->ind_table_sz; i++)
+ rss->ind_table[i] = i % vd->num_rx_queues;
+
+ return 0;
+}
+
static clib_error_t *
vmxnet3_device_init (vlib_main_t * vm, vmxnet3_device_t * vd,
vmxnet3_create_if_args_t * args)
@@ -444,9 +486,19 @@ vmxnet3_device_init (vlib_main_t * vm, vmxnet3_device_t * vd,
clib_memset (vd->queues, 0, size);
- error = vmxnet3_rxq_init (vm, vd, 0, args->rxq_size);
- if (error)
- return error;
+ if (vd->num_rx_queues > 1)
+ {
+ error = vmxnet3_rss_init (vm, vd);
+ if (error)
+ return error;
+ }
+
+ for (i = 0; i < vd->num_rx_queues; i++)
+ {
+ error = vmxnet3_rxq_init (vm, vd, i, args->rxq_size);
+ if (error)
+ return error;
+ }
for (i = 0; i < tm->n_vlib_mains; i++)
{
@@ -471,25 +523,11 @@ vmxnet3_device_init (vlib_main_t * vm, vmxnet3_device_t * vd,
return error;
}
- /* Disable interrupts */
- vmxnet3_disable_interrupt (vd);
-
- vec_foreach_index (i, vd->rxqs)
- {
- vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, i);
-
- vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
- vmxnet3_rxq_refill_ring1 (vm, vd, rxq);
- }
- vd->flags |= VMXNET3_DEVICE_F_INITIALIZED;
-
- vmxnet3_enable_interrupt (vd);
-
return error;
}
static void
-vmxnet3_irq_0_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
+vmxnet3_rxq_irq_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
{
vnet_main_t *vnm = vnet_get_main ();
vmxnet3_main_t *vmxm = &vmxnet3_main;
@@ -502,7 +540,8 @@ vmxnet3_irq_0_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
}
static void
-vmxnet3_irq_1_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h, u16 line)
+vmxnet3_event_irq_handler (vlib_main_t * vm, vlib_pci_dev_handle_t h,
+ u16 line)
{
vnet_main_t *vnm = vnet_get_main ();
vmxnet3_main_t *vmxm = &vmxnet3_main;
@@ -539,7 +578,7 @@ vmxnet3_queue_size_valid (u16 qsz)
}
static u8
-vmxnet3_queue_num_valid (u16 num)
+vmxnet3_tx_queue_num_valid (u16 num)
{
vlib_thread_main_t *tm = vlib_get_thread_main ();
@@ -548,6 +587,14 @@ vmxnet3_queue_num_valid (u16 num)
return 1;
}
+static u8
+vmxnet3_rx_queue_num_valid (u16 num)
+{
+ if (num > VMXNET3_RXQ_MAX)
+ return 0;
+ return 1;
+}
+
void
vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args)
{
@@ -556,19 +603,35 @@ vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args)
vmxnet3_device_t *vd;
vlib_pci_dev_handle_t h;
clib_error_t *error = 0;
+ u16 qid;
+ u32 num_intr;
if (args->txq_num == 0)
args->txq_num = 1;
- if (!vmxnet3_queue_num_valid (args->txq_num))
+ if (args->rxq_num == 0)
+ args->rxq_num = 1;
+ if (!vmxnet3_rx_queue_num_valid (args->rxq_num))
+ {
+ args->rv = VNET_API_ERROR_INVALID_VALUE;
+ args->error =
+ clib_error_return (error, "number of rx queues must be <= %u",
+ VMXNET3_RXQ_MAX);
+ vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
+ format_vlib_pci_addr, &args->addr,
+ "number of rx queues must be <= %u", VMXNET3_RXQ_MAX);
+ return;
+ }
+
+ if (!vmxnet3_tx_queue_num_valid (args->txq_num))
{
args->rv = VNET_API_ERROR_INVALID_VALUE;
args->error =
clib_error_return (error,
- "number of queues must be <= %u and <= number of "
+ "number of tx queues must be <= %u and <= number of "
"CPU's assigned to VPP", VMXNET3_TXQ_MAX);
vlib_log (VLIB_LOG_LEVEL_ERR, vmxm->log_default, "%U: %s",
format_vlib_pci_addr, &args->addr,
- "number of queues must be <= %u and <= number of "
+ "number of tx queues must be <= %u and <= number of "
"CPU's assigned to VPP", VMXNET3_TXQ_MAX);
return;
}
@@ -607,6 +670,7 @@ vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args)
pool_get (vmxm->devices, vd);
vd->num_tx_queues = args->txq_num;
+ vd->num_rx_queues = args->rxq_num;
vd->dev_instance = vd - vmxm->devices;
vd->per_interface_next_index = ~0;
vd->pci_addr = args->addr;
@@ -634,8 +698,7 @@ vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args)
*/
vd->pci_dev_handle = h;
vd->numa_node = vlib_pci_get_numa_node (vm, h);
- vd->num_rx_queues = 1;
- vd->num_intrs = 2;
+ vd->num_intrs = vd->num_rx_queues + 1; // +1 for the event interrupt
vlib_pci_set_private_data (vm, h, vd->dev_instance);
@@ -657,23 +720,31 @@ vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args)
goto error;
}
- if ((error = vlib_pci_register_msix_handler (vm, h, 0, 1,
- &vmxnet3_irq_0_handler)))
+ num_intr = vlib_pci_get_num_msix_interrupts (vm, h);
+ if (num_intr < vd->num_rx_queues + 1)
+ {
+ vmxnet3_log_error (vd,
+ "No sufficient interrupt lines (%u) for rx queues",
+ num_intr);
+ goto error;
+ }
+ if ((error = vlib_pci_register_msix_handler (vm, h, 0, vd->num_rx_queues,
+ &vmxnet3_rxq_irq_handler)))
{
vmxnet3_log_error (vd,
"error encountered on pci register msix handler 0");
goto error;
}
- if ((error = vlib_pci_register_msix_handler (vm, h, 1, 1,
- &vmxnet3_irq_1_handler)))
+ if ((error = vlib_pci_register_msix_handler (vm, h, vd->num_rx_queues, 1,
+ &vmxnet3_event_irq_handler)))
{
vmxnet3_log_error (vd,
"error encountered on pci register msix handler 1");
goto error;
}
- if ((error = vlib_pci_enable_msix_irq (vm, h, 0, 2)))
+ if ((error = vlib_pci_enable_msix_irq (vm, h, 0, vd->num_rx_queues + 1)))
{
vmxnet3_log_error (vd, "error encountered on pci enable msix irq");
goto error;
@@ -714,7 +785,26 @@ vmxnet3_create_if (vlib_main_t * vm, vmxnet3_create_if_args_t * args)
vnet_hw_interface_set_input_node (vnm, vd->hw_if_index,
vmxnet3_input_node.index);
- vnet_hw_interface_assign_rx_thread (vnm, vd->hw_if_index, 0, ~0);
+ /* Disable interrupts */
+ vmxnet3_disable_interrupt (vd);
+ vec_foreach_index (qid, vd->rxqs)
+ {
+ vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
+ u32 thread_index;
+ u32 numa_node;
+
+ vnet_hw_interface_assign_rx_thread (vnm, vd->hw_if_index, qid, ~0);
+ thread_index = vnet_get_device_input_thread_index (vnm, vd->hw_if_index,
+ qid);
+ numa_node = vlib_mains[thread_index]->numa_node;
+ rxq->buffer_pool_index =
+ vlib_buffer_pool_get_default_for_numa (vm, numa_node);
+ vmxnet3_rxq_refill_ring0 (vm, vd, rxq);
+ vmxnet3_rxq_refill_ring1 (vm, vd, rxq);
+ }
+ vd->flags |= VMXNET3_DEVICE_F_INITIALIZED;
+ vmxnet3_enable_interrupt (vd);
+
vnet_hw_interface_set_link_speed (vnm, vd->hw_if_index,
vd->link_speed * 1000);
if (vd->flags & VMXNET3_DEVICE_F_LINK_UP)
@@ -736,7 +826,7 @@ vmxnet3_delete_if (vlib_main_t * vm, vmxnet3_device_t * vd)
vnet_main_t *vnm = vnet_get_main ();
vmxnet3_main_t *vmxm = &vmxnet3_main;
u32 i, bi;
- u16 desc_idx;
+ u16 desc_idx, qid;
/* Quiesce the device */
vmxnet3_reg_write (vd, 1, VMXNET3_REG_CMD, VMXNET3_CMD_QUIESCE_DEV);
@@ -747,7 +837,8 @@ vmxnet3_delete_if (vlib_main_t * vm, vmxnet3_device_t * vd)
if (vd->hw_if_index)
{
vnet_hw_interface_set_flags (vnm, vd->hw_if_index, 0);
- vnet_hw_interface_unassign_rx_thread (vnm, vd->hw_if_index, 0);
+ vec_foreach_index (qid, vd->rxqs)
+ vnet_hw_interface_unassign_rx_thread (vnm, vd->hw_if_index, qid);
ethernet_delete_interface (vnm, vd->hw_if_index);
}
@@ -804,6 +895,7 @@ vmxnet3_delete_if (vlib_main_t * vm, vmxnet3_device_t * vd)
vlib_physmem_free (vm, vd->driver_shared);
vlib_physmem_free (vm, vd->queues);
+ vlib_physmem_free (vm, vd->rss);
clib_error_free (vd->error);
clib_memset (vd, 0, sizeof (*vd));
diff --git a/src/plugins/vmxnet3/vmxnet3.h b/src/plugins/vmxnet3/vmxnet3.h
index 084fbc5d3b0..7bc05ec4267 100644
--- a/src/plugins/vmxnet3/vmxnet3.h
+++ b/src/plugins/vmxnet3/vmxnet3.h
@@ -70,7 +70,25 @@ enum
#undef _
};
+#define foreach_vmxnet3_rss_hash_type \
+ _(0, IPV4, "ipv4") \
+ _(1, TCP_IPV4, "tcp ipv4") \
+ _(2, IPV6, "ipv6") \
+ _(3, TCP_IPV6, "tcp ipv6")
+
+enum
+{
+#define _(a, b, c) VMXNET3_RSS_HASH_TYPE_##b = (1 << a),
+ foreach_vmxnet3_rss_hash_type
+#undef _
+};
+
+#define VMXNET3_RSS_HASH_FUNC_TOEPLITZ 1
+#define VMXNET3_RSS_MAX_KEY_SZ 40
+#define VMXNET3_RSS_MAX_IND_TABLE_SZ 128
+
#define VMXNET3_TXQ_MAX 8
+#define VMXNET3_RXQ_MAX 16
#define VMXNET3_TX_START(vd) ((vd)->queues)
#define VMXNET3_RX_START(vd) \
((vd)->queues + (vd)->num_tx_queues * sizeof (vmxnet3_tx_queue))
@@ -464,6 +482,16 @@ typedef CLIB_PACKED (struct
u32 flags[2];
}) vmxnet3_tx_desc;
+typedef CLIB_PACKED (struct
+ {
+ u16 hash_type;
+ u16 hash_func;
+ u16 hash_key_sz;
+ u16 ind_table_sz;
+ u8 hash_key[VMXNET3_RSS_MAX_KEY_SZ];
+ u8 ind_table[VMXNET3_RSS_MAX_IND_TABLE_SZ];
+ }) vmxnet3_rss_shared;
+
typedef struct
{
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
@@ -487,6 +515,7 @@ typedef struct
CLIB_CACHE_LINE_ALIGN_MARK (cacheline0);
u16 size;
u8 int_mode;
+ u8 buffer_pool_index;
vmxnet3_rx_ring rx_ring[VMXNET3_RX_RING_SIZE];
vmxnet3_rx_desc *rx_desc[VMXNET3_RX_RING_SIZE];
vmxnet3_rx_comp *rx_comp;
@@ -547,12 +576,11 @@ typedef struct
u8 version;
u8 mac_addr[6];
- /* error */
clib_error_t *error;
vmxnet3_shared *driver_shared;
void *queues;
-
+ vmxnet3_rss_shared *rss;
u32 link_speed;
u8 lro_enable;
vmxnet3_tx_stats *tx_stats;
@@ -574,6 +602,7 @@ typedef struct
vlib_pci_addr_t addr;
u32 enable_elog;
u16 rxq_size;
+ u16 rxq_num;
u16 txq_size;
u16 txq_num;
/* return */
@@ -670,8 +699,9 @@ vmxnet3_rxq_refill_ring0 (vlib_main_t * vm, vmxnet3_device_t * vd,
return 0;
n_alloc =
- vlib_buffer_alloc_to_ring (vm, ring->bufs, ring->produce, rxq->size,
- n_refill);
+ vlib_buffer_alloc_to_ring_from_pool (vm, ring->bufs, ring->produce,
+ rxq->size, n_refill,
+ rxq->buffer_pool_index);
if (PREDICT_FALSE (n_alloc != n_refill))
{
if (n_alloc)
@@ -715,8 +745,9 @@ vmxnet3_rxq_refill_ring1 (vlib_main_t * vm, vmxnet3_device_t * vd,
return 0;
n_alloc =
- vlib_buffer_alloc_to_ring (vm, ring->bufs, ring->produce, rxq->size,
- n_refill);
+ vlib_buffer_alloc_to_ring_from_pool (vm, ring->bufs, ring->produce,
+ rxq->size, n_refill,
+ rxq->buffer_pool_index);
if (PREDICT_FALSE (n_alloc != n_refill))
{
if (n_alloc)
diff --git a/src/plugins/vmxnet3/vmxnet3_api.c b/src/plugins/vmxnet3/vmxnet3_api.c
index 635657c2bb6..0a66ffe0778 100644
--- a/src/plugins/vmxnet3/vmxnet3_api.c
+++ b/src/plugins/vmxnet3/vmxnet3_api.c
@@ -112,8 +112,8 @@ reply:
static void
send_vmxnet3_details (vl_api_registration_t * reg, vmxnet3_device_t * vd,
- vmxnet3_rxq_t * rxq, vnet_sw_interface_t * swif,
- u8 * interface_name, u32 context)
+ vnet_sw_interface_t * swif, u8 * interface_name,
+ u32 context)
{
vl_api_vmxnet3_details_t *mp;
vnet_main_t *vnm = vnet_get_main ();
@@ -141,15 +141,23 @@ send_vmxnet3_details (vl_api_registration_t * reg, vmxnet3_device_t * vd,
mp->pci_addr = ntohl (vd->pci_addr.as_u32);
mp->admin_up_down = (swif->flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? 1 : 0;
- mp->rx_qsize = htons (rxq->size);
- mp->rx_next = htons (rxq->rx_comp_ring.next);
- for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
- {
- ring = &rxq->rx_ring[rid];
- mp->rx_fill[rid] = htons (ring->fill);
- mp->rx_produce[rid] = htons (ring->produce);
- mp->rx_consume[rid] = htons (ring->consume);
- }
+ mp->rx_count = clib_min (vec_len (vd->rxqs), VMXNET3_RXQ_MAX);
+ vec_foreach_index (qid, vd->rxqs)
+ {
+ vmxnet3_rxq_t *rxq = vec_elt_at_index (vd->rxqs, qid);
+ vl_api_vmxnet3_rx_list_t *rx_list = &mp->rx_list[qid];
+
+ ASSERT (qid < VMXNET3_RXQ_MAX);
+ rx_list->rx_qsize = htons (rxq->size);
+ rx_list->rx_next = htons (rxq->rx_comp_ring.next);
+ for (rid = 0; rid < VMXNET3_RX_RING_SIZE; rid++)
+ {
+ ring = &rxq->rx_ring[rid];
+ rx_list->rx_fill[rid] = htons (ring->fill);
+ rx_list->rx_produce[rid] = htons (ring->produce);
+ rx_list->rx_consume[rid] = htons (ring->consume);
+ }
+ }
mp->tx_count = clib_min (vec_len (vd->txqs), VMXNET3_TXQ_MAX);
vec_foreach_index (qid, vd->txqs)
@@ -180,8 +188,6 @@ vl_api_vmxnet3_dump_t_handler (vl_api_vmxnet3_dump_t * mp)
vmxnet3_device_t *vd;
u8 *if_name = 0;
vl_api_registration_t *reg;
- vmxnet3_rxq_t *rxq;
- u16 qid = 0;
reg = vl_api_client_index_to_registration (mp->client_index);
if (!reg)
@@ -193,8 +199,7 @@ vl_api_vmxnet3_dump_t_handler (vl_api_vmxnet3_dump_t * mp)
swif = vnet_get_sw_interface (vnm, vd->sw_if_index);
if_name = format (if_name, "%U%c", format_vnet_sw_interface_name, vnm,
swif, 0);
- rxq = vec_elt_at_index (vd->rxqs, qid);
- send_vmxnet3_details (reg, vd, rxq, swif, if_name, mp->context);
+ send_vmxnet3_details (reg, vd, swif, if_name, mp->context);
_vec_len (if_name) = 0;
}));
/* *INDENT-ON* */
diff --git a/src/plugins/vmxnet3/vmxnet3_test.c b/src/plugins/vmxnet3/vmxnet3_test.c
index 848b1698797..bb9f93e95cc 100644
--- a/src/plugins/vmxnet3/vmxnet3_test.c
+++ b/src/plugins/vmxnet3/vmxnet3_test.c
@@ -248,21 +248,27 @@ vl_api_vmxnet3_details_t_handler (vl_api_vmxnet3_details_t * mp)
fformat (vam->ofp, "%s: sw_if_index %u mac %U\n"
" version: %u\n"
" PCI Address: %U\n"
- " state %s\n"
- " RX Queue 0\n"
- " RX completion next index %u\n"
- " ring 0 size %u fill %u consume %u produce %u\n"
- " ring 1 size %u fill %u consume %u produce %u\n",
+ " state %s\n",
mp->if_name, ntohl (mp->sw_if_index), format_ethernet_address,
mp->hw_addr, mp->version,
- format_pci_addr, &pci_addr,
- mp->admin_up_down ? "up" : "down",
- ntohs (mp->rx_next),
- ntohs (mp->rx_qsize), ntohs (mp->rx_fill[0]),
- ntohs (mp->rx_consume[0]),
- ntohs (mp->rx_produce[0]),
- ntohs (mp->rx_qsize), ntohs (mp->rx_fill[1]),
- ntohs (mp->rx_consume[1]), ntohs (mp->rx_produce[1]));
+ format_pci_addr, &pci_addr, mp->admin_up_down ? "up" : "down");
+ for (qid = 0; qid < mp->rx_count; qid++)
+ {
+ vl_api_vmxnet3_rx_list_t *rx_list = &mp->rx_list[qid];
+ fformat (vam->ofp,
+ " RX Queue %u\n"
+ " RX completion next index %u\n"
+ " ring 0 size %u fill %u consume %u produce %u\n"
+ " ring 1 size %u fill %u consume %u produce %u\n",
+ qid,
+ ntohs (rx_list->rx_next),
+ ntohs (rx_list->rx_qsize), ntohs (rx_list->rx_fill[0]),
+ ntohs (rx_list->rx_consume[0]),
+ ntohs (rx_list->rx_produce[0]),
+ ntohs (rx_list->rx_qsize), ntohs (rx_list->rx_fill[1]),
+ ntohs (rx_list->rx_consume[1]),
+ ntohs (rx_list->rx_produce[1]));
+ }
for (qid = 0; qid < mp->tx_count; qid++)
{
vl_api_vmxnet3_tx_list_t *tx_list = &mp->tx_list[qid];