aboutsummaryrefslogtreecommitdiffstats
path: root/doc/guides/sample_app_ug/quota_watermark.rst
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 14:51:32 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 16:20:45 +0200
commit7595afa4d30097c1177b69257118d8ad89a539be (patch)
tree4bfeadc905c977e45e54a90c42330553b8942e4e /doc/guides/sample_app_ug/quota_watermark.rst
parentce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff)
Imported Upstream version 17.05
Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'doc/guides/sample_app_ug/quota_watermark.rst')
-rw-r--r--doc/guides/sample_app_ug/quota_watermark.rst182
1 files changed, 99 insertions, 83 deletions
diff --git a/doc/guides/sample_app_ug/quota_watermark.rst b/doc/guides/sample_app_ug/quota_watermark.rst
index c56683aa..2c3a4320 100644
--- a/doc/guides/sample_app_ug/quota_watermark.rst
+++ b/doc/guides/sample_app_ug/quota_watermark.rst
@@ -1,5 +1,5 @@
.. BSD LICENSE
- Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
+ Copyright(c) 2010-2017 Intel Corporation. All rights reserved.
All rights reserved.
Redistribution and use in source and binary forms, with or without
@@ -31,11 +31,13 @@
Quota and Watermark Sample Application
======================================
-The Quota and Watermark sample application is a simple example of packet processing using Data Plane Development Kit (DPDK) that
-showcases the use of a quota as the maximum number of packets enqueue/dequeue at a time and low and high watermarks
-to signal low and high ring usage respectively.
+The Quota and Watermark sample application is a simple example of packet
+processing using Data Plane Development Kit (DPDK) that showcases the use
+of a quota as the maximum number of packets enqueue/dequeue at a time and
+low and high thresholds, or watermarks, to signal low and high ring usage
+respectively.
-Additionally, it shows how ring watermarks can be used to feedback congestion notifications to data producers by
+Additionally, it shows how the thresholds can be used to feedback congestion notifications to data producers by
temporarily stopping processing overloaded rings and sending Ethernet flow control frames.
This sample application is split in two parts:
@@ -64,7 +66,7 @@ each stage of which being connected by rings, as shown in :numref:`figure_pipeli
An adjustable quota value controls how many packets are being moved through the pipeline per enqueue and dequeue.
-Adjustable watermark values associated with the rings control a back-off mechanism that
+Adjustable threshold values associated with the rings control a back-off mechanism that
tries to prevent the pipeline from being overloaded by:
* Stopping enqueuing on rings for which the usage has crossed the high watermark threshold
@@ -136,7 +138,7 @@ issue the following command:
.. code-block:: console
- ./qw/build/qw -c f -n 4 -- -p 5
+ ./qw/build/qw -l 0-3 -n 4 -- -p 5
Refer to the *DPDK Getting Started Guide* for general information on running applications and
the Environment Abstraction Layer (EAL) options.
@@ -157,7 +159,7 @@ To run the application in a linuxapp environment on logical core 0, issue the fo
.. code-block:: console
- ./qwctl/build/qwctl -c 1 -n 4 --proc-type=secondary
+ ./qwctl/build/qwctl -l 0 -n 4 --proc-type=secondary
Refer to the *DPDK Getting Started* Guide for general information on running applications and
the Environment Abstraction Layer (EAL) options.
@@ -202,9 +204,9 @@ Then, a call to init_dpdk(), defined in init.c, is made to initialize the poll m
/* Bind the drivers to usable devices */
- ret = rte_eal_pci_probe();
+ ret = rte_pci_probe();
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eal_pci_probe(): error %d\n", ret);
+ rte_exit(EXIT_FAILURE, "rte_pci_probe(): error %d\n", ret);
if (rte_eth_dev_count() < 2)
rte_exit(EXIT_FAILURE, "Not enough Ethernet port available\n");
@@ -216,25 +218,26 @@ in the *DPDK Getting Started Guide* and the *DPDK API Reference*.
Shared Variables Setup
^^^^^^^^^^^^^^^^^^^^^^
-The quota and low_watermark shared variables are put into an rte_memzone using a call to setup_shared_variables():
+The quota and high and low watermark shared variables are put into an rte_memzone using a call to setup_shared_variables():
.. code-block:: c
void
setup_shared_variables(void)
{
- const struct rte_memzone *qw_memzone;
-
- qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME, 2 * sizeof(int), rte_socket_id(), RTE_MEMZONE_2MB);
+ const struct rte_memzone *qw_memzone;
- if (qw_memzone == NULL)
- rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
+ qw_memzone = rte_memzone_reserve(QUOTA_WATERMARK_MEMZONE_NAME,
+ 3 * sizeof(int), rte_socket_id(), 0);
+ if (qw_memzone == NULL)
+ rte_exit(EXIT_FAILURE, "%s\n", rte_strerror(rte_errno));
- quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
- }
+ quota = qw_memzone->addr;
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
+ }
-These two variables are initialized to a default value in main() and
+These three variables are initialized to a default value in main() and
can be changed while qw is running using the qwctl control program.
Application Arguments
@@ -254,32 +257,24 @@ It contains a set of mbuf objects that are used by the driver and the applicatio
.. code-block:: c
/* Create a pool of mbuf to store packets */
-
- mbuf_pool = rte_mempool_create("mbuf_pool", MBUF_PER_POOL, MBUF_SIZE, 32, sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL, rte_socket_id(), 0);
+ mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", MBUF_PER_POOL, 32, 0,
+ MBUF_DATA_SIZE, rte_socket_id());
if (mbuf_pool == NULL)
rte_panic("%s\n", rte_strerror(rte_errno));
The rte_mempool is a generic structure used to handle pools of objects.
-In this case, it is necessary to create a pool that will be used by the driver,
-which expects to have some reserved space in the mempool structure, sizeof(struct rte_pktmbuf_pool_private) bytes.
+In this case, it is necessary to create a pool that will be used by the driver.
-The number of allocated pkt mbufs is MBUF_PER_POOL, with a size of MBUF_SIZE each.
+The number of allocated pkt mbufs is MBUF_PER_POOL, with a data room size
+of MBUF_DATA_SIZE each.
A per-lcore cache of 32 mbufs is kept.
The memory is allocated in on the master lcore's socket, but it is possible to extend this code to allocate one mbuf pool per socket.
-Two callback pointers are also given to the rte_mempool_create() function:
-
-* The first callback pointer is to rte_pktmbuf_pool_init() and is used to initialize the private data of the mempool,
- which is needed by the driver.
- This function is provided by the mbuf API, but can be copied and extended by the developer.
-
-* The second callback pointer given to rte_mempool_create() is the mbuf initializer.
-
-The default is used, that is, rte_pktmbuf_init(), which is provided in the rte_mbuf library.
-If a more complex application wants to extend the rte_pktmbuf structure for its own needs,
-a new function derived from rte_pktmbuf_init() can be created.
+The rte_pktmbuf_pool_create() function uses the default mbuf pool and mbuf
+initializers, respectively rte_pktmbuf_pool_init() and rte_pktmbuf_init().
+An advanced application may want to use the mempool API to create the
+mbuf pool with more control.
Ports Configuration and Pairing
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
@@ -357,27 +352,37 @@ This is done using the following code:
/* Process each port round robin style */
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
-
- ring = rings[lcore_id][port_id];
-
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(ring) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
-
- /* Enqueue received packets on the RX ring */
-
- nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts, *quota);
-
- ret = rte_ring_enqueue_bulk(ring, (void *) pkts, nb_rx_pkts);
- if (ret == -EDQUOT) {
- ring_state[port_id] = RING_OVERLOADED;
- send_pause_frame(port_id, 1337);
- }
+ if (!is_bit_set(port_id, portmask))
+ continue;
+
+ ring = rings[lcore_id][port_id];
+
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(ring) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
+
+ /* Enqueue received packets on the RX ring */
+ nb_rx_pkts = rte_eth_rx_burst(port_id, 0, pkts,
+ (uint16_t) *quota);
+ ret = rte_ring_enqueue_bulk(ring, (void *) pkts,
+ nb_rx_pkts, &free);
+ if (RING_SIZE - free > *high_watermark) {
+ ring_state[port_id] = RING_OVERLOADED;
+ send_pause_frame(port_id, 1337);
+ }
+
+ if (ret == 0) {
+
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_rx_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
}
For each port in the port mask, the corresponding ring's pointer is fetched into ring and that ring's state is checked:
@@ -398,30 +403,40 @@ This thread is running on most of the logical cores to create and arbitrarily lo
previous_lcore_id = get_previous_lcore_id(lcore_id);
for (port_id = 0; port_id < RTE_MAX_ETHPORTS; port_id++) {
- if (!is_bit_set(port_id, portmask))
- continue;
-
- tx = rings[lcore_id][port_id];
- rx = rings[previous_lcore_id][port_id];
- if (ring_state[port_id] != RING_READY) {
- if (rte_ring_count(tx) > *low_watermark)
- continue;
- else
- ring_state[port_id] = RING_READY;
- }
-
- /* Dequeue up to quota mbuf from rx */
-
- nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts, *quota);
-
- if (unlikely(nb_dq_pkts < 0))
- continue;
-
- /* Enqueue them on tx */
-
- ret = rte_ring_enqueue_bulk(tx, pkts, nb_dq_pkts);
- if (ret == -EDQUOT)
- ring_state[port_id] = RING_OVERLOADED;
+ if (!is_bit_set(port_id, portmask))
+ continue;
+
+ tx = rings[lcore_id][port_id];
+ rx = rings[previous_lcore_id][port_id];
+
+ if (ring_state[port_id] != RING_READY) {
+ if (rte_ring_count(tx) > *low_watermark)
+ continue;
+ else
+ ring_state[port_id] = RING_READY;
+ }
+
+ /* Dequeue up to quota mbuf from rx */
+ nb_dq_pkts = rte_ring_dequeue_burst(rx, pkts,
+ *quota, NULL);
+ if (unlikely(nb_dq_pkts < 0))
+ continue;
+
+ /* Enqueue them on tx */
+ ret = rte_ring_enqueue_bulk(tx, pkts,
+ nb_dq_pkts, &free);
+ if (RING_SIZE - free > *high_watermark)
+ ring_state[port_id] = RING_OVERLOADED;
+
+ if (ret == 0) {
+
+ /*
+ * Return mbufs to the pool,
+ * effectively dropping packets
+ */
+ for (i = 0; i < nb_dq_pkts; i++)
+ rte_pktmbuf_free(pkts[i]);
+ }
}
The thread's logic works mostly like receive_stage(),
@@ -490,5 +505,6 @@ low_watermark from the rte_memzone previously created by qw.
quota = qw_memzone->addr;
- low_watermark = (unsigned int *) qw_memzone->addr + sizeof(int);
+ low_watermark = (unsigned int *) qw_memzone->addr + 1;
+ high_watermark = (unsigned int *) qw_memzone->addr + 2;
}