aboutsummaryrefslogtreecommitdiffstats
path: root/examples
diff options
context:
space:
mode:
authorLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:15:11 +0000
committerLuca Boccassi <luca.boccassi@gmail.com>2017-11-08 14:45:54 +0000
commit055c52583a2794da8ba1e85a48cce3832372b12f (patch)
tree8ceb1cb78fbb46a0f341f8ee24feb3c6b5540013 /examples
parentf239aed5e674965691846e8ce3f187dd47523689 (diff)
New upstream version 17.11-rc3
Change-Id: I6a5baa40612fe0c20f30b5fa773a6cbbac63a685 Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/Makefile6
-rw-r--r--examples/bond/main.c18
-rw-r--r--examples/cmdline/main.c1
-rw-r--r--examples/distributor/main.c28
-rw-r--r--examples/ethtool/Makefile1
-rw-r--r--examples/ethtool/lib/Makefile1
-rw-r--r--examples/ethtool/lib/rte_ethtool.c44
-rw-r--r--examples/ethtool/lib/rte_ethtool.h42
-rw-r--r--examples/eventdev_pipeline_sw_pmd/main.c40
-rw-r--r--examples/exception_path/main.c39
-rw-r--r--examples/flow_classify/Makefile57
-rw-r--r--examples/flow_classify/flow_classify.c852
-rw-r--r--examples/flow_classify/ipv4_rules_file.txt14
-rw-r--r--examples/flow_filtering/Makefile49
-rw-r--r--examples/flow_filtering/flow_blocks.c150
-rw-r--r--examples/flow_filtering/main.c244
-rw-r--r--examples/helloworld/main.c1
-rw-r--r--examples/ip_fragmentation/main.c35
-rw-r--r--examples/ip_pipeline/Makefile2
-rw-r--r--examples/ip_pipeline/app.h4
-rw-r--r--examples/ip_pipeline/config_parse.c19
-rw-r--r--examples/ip_pipeline/init.c10
-rw-r--r--examples/ip_pipeline/pipeline/hash_func.h178
-rw-r--r--examples/ip_pipeline/pipeline/hash_func_arm64.h261
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification.c10
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c53
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_passthrough_be.c10
-rw-r--r--examples/ip_pipeline/pipeline/pipeline_routing_be.c15
-rw-r--r--examples/ip_reassembly/main.c50
-rw-r--r--examples/ipsec-secgw/Makefile6
-rw-r--r--examples/ipsec-secgw/esp.c144
-rw-r--r--examples/ipsec-secgw/esp.h10
-rw-r--r--examples/ipsec-secgw/ipip.h3
-rw-r--r--examples/ipsec-secgw/ipsec-secgw.c105
-rw-r--r--examples/ipsec-secgw/ipsec.c306
-rw-r--r--examples/ipsec-secgw/ipsec.h33
-rw-r--r--examples/ipsec-secgw/sa.c154
-rw-r--r--examples/ipv4_multicast/main.c32
-rw-r--r--examples/kni/main.c59
-rw-r--r--examples/l2fwd-cat/Makefile5
-rw-r--r--examples/l2fwd-cat/cat.c85
-rw-r--r--examples/l2fwd-cat/l2fwd-cat.c12
-rw-r--r--examples/l2fwd-crypto/main.c115
-rw-r--r--examples/l2fwd-jobstats/main.c62
-rw-r--r--examples/l2fwd-keepalive/main.c52
-rw-r--r--examples/l2fwd/main.c49
-rw-r--r--examples/l3fwd-acl/main.c41
-rw-r--r--examples/l3fwd-power/main.c76
-rw-r--r--examples/l3fwd-vf/main.c52
-rw-r--r--examples/l3fwd/l3fwd.h10
-rw-r--r--examples/l3fwd/l3fwd_altivec.h284
-rw-r--r--examples/l3fwd/l3fwd_common.h2
-rw-r--r--examples/l3fwd/l3fwd_em.c19
-rw-r--r--examples/l3fwd/l3fwd_em.h6
-rw-r--r--examples/l3fwd/l3fwd_em_hlm.h14
-rw-r--r--examples/l3fwd/l3fwd_em_sequential.h4
-rw-r--r--examples/l3fwd/l3fwd_lpm.c20
-rw-r--r--examples/l3fwd/l3fwd_lpm.h4
-rw-r--r--examples/l3fwd/l3fwd_lpm_altivec.h164
-rw-r--r--examples/l3fwd/l3fwd_lpm_neon.h4
-rw-r--r--examples/l3fwd/l3fwd_lpm_sse.h4
-rw-r--r--examples/l3fwd/l3fwd_neon.h3
-rw-r--r--examples/l3fwd/main.c32
-rw-r--r--examples/link_status_interrupt/main.c26
-rw-r--r--examples/load_balancer/config.c16
-rw-r--r--examples/load_balancer/init.c36
-rw-r--r--examples/load_balancer/main.c2
-rw-r--r--examples/load_balancer/main.h11
-rw-r--r--examples/load_balancer/runtime.c14
-rw-r--r--examples/multi_process/client_server_mp/mp_client/client.c24
-rw-r--r--examples/multi_process/client_server_mp/mp_server/Makefile2
-rw-r--r--examples/multi_process/client_server_mp/mp_server/args.c4
-rw-r--r--examples/multi_process/client_server_mp/mp_server/args.h2
-rw-r--r--examples/multi_process/client_server_mp/mp_server/init.c12
-rw-r--r--examples/multi_process/client_server_mp/mp_server/main.c4
-rw-r--r--examples/multi_process/client_server_mp/shared/common.h4
-rw-r--r--examples/multi_process/l2fwd_fork/flib.c2
-rw-r--r--examples/multi_process/l2fwd_fork/main.c30
-rw-r--r--examples/multi_process/simple_mp/main.c5
-rw-r--r--examples/multi_process/simple_mp/mp_commands.c3
-rw-r--r--examples/multi_process/simple_mp/mp_commands.h1
-rw-r--r--examples/multi_process/symmetric_mp/main.c23
-rw-r--r--examples/netmap_compat/lib/compat_netmap.c53
-rw-r--r--examples/netmap_compat/lib/compat_netmap.h2
-rw-r--r--examples/packet_ordering/main.c19
-rw-r--r--examples/performance-thread/common/lthread.h2
-rw-r--r--examples/performance-thread/common/lthread_diag.c3
-rw-r--r--examples/performance-thread/common/lthread_sched.c17
-rw-r--r--examples/performance-thread/common/lthread_tls.c13
-rw-r--r--examples/performance-thread/l3fwd-thread/main.c151
-rw-r--r--examples/performance-thread/pthread_shim/main.c6
-rw-r--r--examples/performance-thread/pthread_shim/pthread_shim.c5
-rw-r--r--examples/ptpclient/ptpclient.c12
-rw-r--r--examples/qos_meter/main.c4
-rw-r--r--examples/qos_sched/args.c12
-rw-r--r--examples/qos_sched/cmdline.c30
-rw-r--r--examples/qos_sched/init.c33
-rw-r--r--examples/qos_sched/main.c14
-rw-r--r--examples/qos_sched/main.h24
-rw-r--r--examples/qos_sched/stats.c16
-rw-r--r--examples/quota_watermark/qw/init.c8
-rw-r--r--examples/quota_watermark/qw/init.h4
-rw-r--r--examples/quota_watermark/qw/main.c14
-rw-r--r--examples/quota_watermark/qw/main.h2
-rw-r--r--examples/rxtx_callbacks/main.c10
-rw-r--r--examples/server_node_efd/node/node.c22
-rw-r--r--examples/server_node_efd/server/Makefile2
-rw-r--r--examples/server_node_efd/server/init.c19
-rw-r--r--examples/server_node_efd/server/main.c4
-rw-r--r--examples/server_node_efd/shared/common.h4
-rw-r--r--examples/service_cores/Makefile (renamed from examples/vhost_xen/Makefile)16
-rw-r--r--examples/service_cores/main.c246
-rw-r--r--examples/skeleton/basicfwd.c12
-rw-r--r--examples/tep_termination/main.c6
-rw-r--r--examples/tep_termination/vxlan_setup.c6
-rw-r--r--examples/tep_termination/vxlan_setup.h10
-rw-r--r--examples/timer/main.c1
-rw-r--r--examples/vhost/main.c10
-rw-r--r--examples/vhost_scsi/scsi.c4
-rw-r--r--examples/vhost_xen/main.c1522
-rw-r--r--examples/vhost_xen/main.h66
-rw-r--r--examples/vhost_xen/vhost_monitor.c595
-rw-r--r--examples/vhost_xen/virtio-net.h113
-rw-r--r--examples/vhost_xen/xen_vhost.h148
-rw-r--r--examples/vhost_xen/xenstore_parse.c775
-rw-r--r--examples/vm_power_manager/Makefile16
-rw-r--r--examples/vm_power_manager/channel_manager.c67
-rw-r--r--examples/vm_power_manager/channel_manager.h25
-rw-r--r--examples/vm_power_manager/channel_monitor.c336
-rw-r--r--examples/vm_power_manager/channel_monitor.h18
-rw-r--r--examples/vm_power_manager/guest_cli/vm_power_cli_guest.c108
-rw-r--r--examples/vm_power_manager/guest_cli/vm_power_cli_guest.h6
-rw-r--r--examples/vm_power_manager/main.c261
-rw-r--r--examples/vm_power_manager/power_manager.c68
-rw-r--r--examples/vm_power_manager/power_manager.h65
-rw-r--r--examples/vm_power_manager/vm_power_cli.c21
-rw-r--r--examples/vmdq/main.c8
-rw-r--r--examples/vmdq_dcb/main.c8
138 files changed, 4988 insertions, 4480 deletions
diff --git a/examples/Makefile b/examples/Makefile
index 28354ff8..9f7974a1 100644
--- a/examples/Makefile
+++ b/examples/Makefile
@@ -43,13 +43,15 @@ DIRS-$(CONFIG_RTE_LIBRTE_DISTRIBUTOR) += distributor
DIRS-y += ethtool
DIRS-y += exception_path
DIRS-$(CONFIG_RTE_LIBRTE_EFD) += server_node_efd
+DIRS-$(CONFIG_RTE_LIBRTE_FLOW_CLASSIFY) += flow_classify
+DIRS-y += flow_filtering
DIRS-y += helloworld
DIRS-$(CONFIG_RTE_LIBRTE_PIPELINE) += ip_pipeline
ifeq ($(CONFIG_RTE_LIBRTE_LPM),y)
DIRS-$(CONFIG_RTE_IP_FRAG) += ip_reassembly
DIRS-$(CONFIG_RTE_IP_FRAG) += ip_fragmentation
endif
-ifeq ($(CONFIG_RTE_LIBRTE_ACL)$(CONFIG_RTE_LIBRTE_HASH)$(CONFIG_RTE_LIBRTE_LPM),yyy)
+ifeq ($(CONFIG_RTE_LIBRTE_ACL)$(CONFIG_RTE_LIBRTE_HASH)$(CONFIG_RTE_LIBRTE_LPM)$(CONFIG_RTE_LIBRTE_SECURITY),yyyy)
DIRS-$(CONFIG_RTE_LIBRTE_CRYPTODEV) += ipsec-secgw
endif
DIRS-$(CONFIG_RTE_LIBRTE_HASH) += ipv4_multicast
@@ -83,13 +85,13 @@ DIRS-$(CONFIG_RTE_LIBRTE_METER) += qos_meter
DIRS-$(CONFIG_RTE_LIBRTE_SCHED) += qos_sched
DIRS-y += quota_watermark
DIRS-$(CONFIG_RTE_ETHDEV_RXTX_CALLBACKS) += rxtx_callbacks
+DIRS-y += service_cores
DIRS-y += skeleton
ifeq ($(CONFIG_RTE_LIBRTE_HASH),y)
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += tep_termination
endif
DIRS-$(CONFIG_RTE_LIBRTE_TIMER) += timer
DIRS-$(CONFIG_RTE_LIBRTE_VHOST) += vhost vhost_scsi
-DIRS-$(CONFIG_RTE_LIBRTE_XEN_DOM0) += vhost_xen
DIRS-y += vmdq
DIRS-y += vmdq_dcb
ifeq ($(CONFIG_RTE_LIBRTE_POWER), y)
diff --git a/examples/bond/main.c b/examples/bond/main.c
index 2d019d43..8e3b1f34 100644
--- a/examples/bond/main.c
+++ b/examples/bond/main.c
@@ -51,7 +51,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -61,7 +60,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -141,10 +139,10 @@
addr.addr_bytes[0], addr.addr_bytes[1], addr.addr_bytes[2], \
addr.addr_bytes[3], addr.addr_bytes[4], addr.addr_bytes[5])
-uint8_t slaves[RTE_MAX_ETHPORTS];
-uint8_t slaves_count;
+uint16_t slaves[RTE_MAX_ETHPORTS];
+uint16_t slaves_count;
-static uint8_t BOND_PORT = 0xff;
+static uint16_t BOND_PORT = 0xffff;
static struct rte_mempool *mbuf_pool;
@@ -171,7 +169,7 @@ static struct rte_eth_conf port_conf = {
};
static void
-slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
+slave_port_init(uint16_t portid, struct rte_mempool *mbuf_pool)
{
int retval;
uint16_t nb_rxd = RTE_RX_DESC_DEFAULT;
@@ -215,7 +213,7 @@ slave_port_init(uint8_t portid, struct rte_mempool *mbuf_pool)
struct ether_addr addr;
rte_eth_macaddr_get(portid, &addr);
- printf("Port %u MAC: ", (unsigned)portid);
+ printf("Port %u MAC: ", portid);
PRINT_MAC(addr);
printf("\n");
}
@@ -234,7 +232,7 @@ bond_port_init(struct rte_mempool *mbuf_pool)
rte_exit(EXIT_FAILURE,
"Faled to create bond port\n");
- BOND_PORT = (uint8_t)retval;
+ BOND_PORT = retval;
retval = rte_eth_dev_configure(BOND_PORT, 1, 1, &port_conf);
if (retval != 0)
@@ -675,10 +673,10 @@ static void cmd_show_parsed(__attribute__((unused)) void *parsed_result,
struct cmdline *cl,
__attribute__((unused)) void *data)
{
- uint8_t slaves[16] = {0};
+ uint16_t slaves[16] = {0};
uint8_t len = 16;
struct ether_addr addr;
- uint8_t i = 0;
+ uint16_t i = 0;
while (i < slaves_count) {
rte_eth_macaddr_get(i, &addr);
diff --git a/examples/cmdline/main.c b/examples/cmdline/main.c
index c966df03..c6de944e 100644
--- a/examples/cmdline/main.c
+++ b/examples/cmdline/main.c
@@ -71,7 +71,6 @@
#include <cmdline.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_debug.h>
diff --git a/examples/distributor/main.c b/examples/distributor/main.c
index 87603d03..61e6e6b9 100644
--- a/examples/distributor/main.c
+++ b/examples/distributor/main.c
@@ -132,7 +132,7 @@ static void print_stats(void);
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rxRings = 1, txRings = rte_lcore_count() - 1;
@@ -175,13 +175,13 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
struct rte_eth_link link;
rte_eth_link_get_nowait(port, &link);
while (!link.link_status) {
- printf("Waiting for Link up on port %"PRIu8"\n", port);
+ printf("Waiting for Link up on port %"PRIu16"\n", port);
sleep(1);
rte_eth_link_get_nowait(port, &link);
}
if (!link.link_status) {
- printf("Link down on port %"PRIu8"\n", port);
+ printf("Link down on port %"PRIu16"\n", port);
return 0;
}
@@ -189,7 +189,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
+ port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
@@ -210,9 +210,9 @@ struct lcore_params {
static int
lcore_rx(struct lcore_params *p)
{
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
- uint8_t port;
+ uint16_t port;
struct rte_mbuf *bufs[BURST_SIZE*2];
for (port = 0; port < nb_ports; port++) {
@@ -312,9 +312,9 @@ flush_one_port(struct output_buffer *outbuf, uint8_t outp)
}
static inline void
-flush_all_ports(struct output_buffer *tx_buffers, uint8_t nb_ports)
+flush_all_ports(struct output_buffer *tx_buffers, uint16_t nb_ports)
{
- uint8_t outp;
+ uint16_t outp;
for (outp = 0; outp < nb_ports; outp++) {
/* skip ports that are not enabled */
@@ -384,9 +384,9 @@ static int
lcore_tx(struct rte_ring *in_r)
{
static struct output_buffer tx_buffers[RTE_MAX_ETHPORTS];
- const uint8_t nb_ports = rte_eth_dev_count();
+ const uint16_t nb_ports = rte_eth_dev_count();
const int socket_id = rte_socket_id();
- uint8_t port;
+ uint16_t port;
for (port = 0; port < nb_ports; port++) {
/* skip ports that are not enabled */
@@ -668,8 +668,8 @@ main(int argc, char *argv[])
struct rte_ring *rx_dist_ring;
unsigned lcore_id, worker_id = 0;
unsigned nb_ports;
- uint8_t portid;
- uint8_t nb_ports_available;
+ uint16_t portid;
+ uint16_t nb_ports_available;
uint64_t t, freq;
/* catch ctrl-c so we can print on exit */
@@ -719,10 +719,10 @@ main(int argc, char *argv[])
continue;
}
/* init port */
- printf("Initializing port %u... done\n", (unsigned) portid);
+ printf("Initializing port %u... done\n", portid);
if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
+ rte_exit(EXIT_FAILURE, "Cannot initialize port %u\n",
portid);
}
diff --git a/examples/ethtool/Makefile b/examples/ethtool/Makefile
index 30b42b70..e86d68ac 100644
--- a/examples/ethtool/Makefile
+++ b/examples/ethtool/Makefile
@@ -47,6 +47,5 @@ DIRS-y += lib ethtool-app
endif
DEPDIRS-ethtool-app := lib
-DEPDIRS-lib := librte_eal librte_ether
include $(RTE_SDK)/mk/rte.extsubdir.mk
diff --git a/examples/ethtool/lib/Makefile b/examples/ethtool/lib/Makefile
index 266babad..cabd82a0 100644
--- a/examples/ethtool/lib/Makefile
+++ b/examples/ethtool/lib/Makefile
@@ -59,5 +59,6 @@ ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
LDLIBS += -lrte_pmd_ixgbe
endif
endif
+LDLIBS += -lrte_eal -lrte_ethdev
include $(RTE_SDK)/mk/rte.extlib.mk
diff --git a/examples/ethtool/lib/rte_ethtool.c b/examples/ethtool/lib/rte_ethtool.c
index 252382cb..c70c5478 100644
--- a/examples/ethtool/lib/rte_ethtool.c
+++ b/examples/ethtool/lib/rte_ethtool.c
@@ -36,7 +36,7 @@
#include <rte_version.h>
#include <rte_ethdev.h>
#include <rte_ether.h>
-#include <rte_pci.h>
+#include <rte_bus_pci.h>
#ifdef RTE_LIBRTE_IXGBE_PMD
#include <rte_pmd_ixgbe.h>
#endif
@@ -47,7 +47,7 @@
int
-rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
+rte_ethtool_get_drvinfo(uint16_t port_id, struct ethtool_drvinfo *drvinfo)
{
struct rte_eth_dev_info dev_info;
struct rte_dev_reg_info reg_info;
@@ -106,7 +106,7 @@ rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo)
}
int
-rte_ethtool_get_regs_len(uint8_t port_id)
+rte_ethtool_get_regs_len(uint16_t port_id)
{
struct rte_dev_reg_info reg_info;
int ret;
@@ -121,7 +121,7 @@ rte_ethtool_get_regs_len(uint8_t port_id)
}
int
-rte_ethtool_get_regs(uint8_t port_id, struct ethtool_regs *regs, void *data)
+rte_ethtool_get_regs(uint16_t port_id, struct ethtool_regs *regs, void *data)
{
struct rte_dev_reg_info reg_info;
int status;
@@ -141,7 +141,7 @@ rte_ethtool_get_regs(uint8_t port_id, struct ethtool_regs *regs, void *data)
}
int
-rte_ethtool_get_link(uint8_t port_id)
+rte_ethtool_get_link(uint16_t port_id)
{
struct rte_eth_link link;
@@ -151,13 +151,13 @@ rte_ethtool_get_link(uint8_t port_id)
}
int
-rte_ethtool_get_eeprom_len(uint8_t port_id)
+rte_ethtool_get_eeprom_len(uint16_t port_id)
{
return rte_eth_dev_get_eeprom_length(port_id);
}
int
-rte_ethtool_get_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
+rte_ethtool_get_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
void *words)
{
struct rte_dev_eeprom_info eeprom_info;
@@ -180,7 +180,7 @@ rte_ethtool_get_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
}
int
-rte_ethtool_set_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
+rte_ethtool_set_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
void *words)
{
struct rte_dev_eeprom_info eeprom_info;
@@ -203,7 +203,7 @@ rte_ethtool_set_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
}
int
-rte_ethtool_get_pauseparam(uint8_t port_id,
+rte_ethtool_get_pauseparam(uint16_t port_id,
struct ethtool_pauseparam *pause_param)
{
struct rte_eth_fc_conf fc_conf;
@@ -238,7 +238,7 @@ rte_ethtool_get_pauseparam(uint8_t port_id,
}
int
-rte_ethtool_set_pauseparam(uint8_t port_id,
+rte_ethtool_set_pauseparam(uint16_t port_id,
struct ethtool_pauseparam *pause_param)
{
struct rte_eth_fc_conf fc_conf;
@@ -281,7 +281,7 @@ rte_ethtool_set_pauseparam(uint8_t port_id,
}
int
-rte_ethtool_net_open(uint8_t port_id)
+rte_ethtool_net_open(uint16_t port_id)
{
rte_eth_dev_stop(port_id);
@@ -289,7 +289,7 @@ rte_ethtool_net_open(uint8_t port_id)
}
int
-rte_ethtool_net_stop(uint8_t port_id)
+rte_ethtool_net_stop(uint16_t port_id)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
rte_eth_dev_stop(port_id);
@@ -298,7 +298,7 @@ rte_ethtool_net_stop(uint8_t port_id)
}
int
-rte_ethtool_net_get_mac_addr(uint8_t port_id, struct ether_addr *addr)
+rte_ethtool_net_get_mac_addr(uint16_t port_id, struct ether_addr *addr)
{
RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
if (addr == NULL)
@@ -309,7 +309,7 @@ rte_ethtool_net_get_mac_addr(uint8_t port_id, struct ether_addr *addr)
}
int
-rte_ethtool_net_set_mac_addr(uint8_t port_id, struct ether_addr *addr)
+rte_ethtool_net_set_mac_addr(uint16_t port_id, struct ether_addr *addr)
{
if (addr == NULL)
return -EINVAL;
@@ -317,7 +317,7 @@ rte_ethtool_net_set_mac_addr(uint8_t port_id, struct ether_addr *addr)
}
int
-rte_ethtool_net_validate_addr(uint8_t port_id __rte_unused,
+rte_ethtool_net_validate_addr(uint16_t port_id __rte_unused,
struct ether_addr *addr)
{
if (addr == NULL)
@@ -326,7 +326,7 @@ rte_ethtool_net_validate_addr(uint8_t port_id __rte_unused,
}
int
-rte_ethtool_net_change_mtu(uint8_t port_id, int mtu)
+rte_ethtool_net_change_mtu(uint16_t port_id, int mtu)
{
if (mtu < 0 || mtu > UINT16_MAX)
return -EINVAL;
@@ -334,7 +334,7 @@ rte_ethtool_net_change_mtu(uint8_t port_id, int mtu)
}
int
-rte_ethtool_net_get_stats64(uint8_t port_id, struct rte_eth_stats *stats)
+rte_ethtool_net_get_stats64(uint16_t port_id, struct rte_eth_stats *stats)
{
if (stats == NULL)
return -EINVAL;
@@ -342,13 +342,13 @@ rte_ethtool_net_get_stats64(uint8_t port_id, struct rte_eth_stats *stats)
}
int
-rte_ethtool_net_vlan_rx_add_vid(uint8_t port_id, uint16_t vid)
+rte_ethtool_net_vlan_rx_add_vid(uint16_t port_id, uint16_t vid)
{
return rte_eth_dev_vlan_filter(port_id, vid, 1);
}
int
-rte_ethtool_net_vlan_rx_kill_vid(uint8_t port_id, uint16_t vid)
+rte_ethtool_net_vlan_rx_kill_vid(uint16_t port_id, uint16_t vid)
{
return rte_eth_dev_vlan_filter(port_id, vid, 0);
}
@@ -361,7 +361,7 @@ rte_ethtool_net_vlan_rx_kill_vid(uint8_t port_id, uint16_t vid)
* driver can register device-specific implementation
*/
int
-rte_ethtool_net_set_rx_mode(uint8_t port_id)
+rte_ethtool_net_set_rx_mode(uint16_t port_id)
{
uint16_t num_vfs;
struct rte_eth_dev_info dev_info;
@@ -387,7 +387,7 @@ rte_ethtool_net_set_rx_mode(uint8_t port_id)
int
-rte_ethtool_get_ringparam(uint8_t port_id,
+rte_ethtool_get_ringparam(uint16_t port_id,
struct ethtool_ringparam *ring_param)
{
struct rte_eth_dev_info dev_info;
@@ -419,7 +419,7 @@ rte_ethtool_get_ringparam(uint8_t port_id,
int
-rte_ethtool_set_ringparam(uint8_t port_id,
+rte_ethtool_set_ringparam(uint16_t port_id,
struct ethtool_ringparam *ring_param)
{
struct rte_eth_rxq_info rx_qinfo;
diff --git a/examples/ethtool/lib/rte_ethtool.h b/examples/ethtool/lib/rte_ethtool.h
index 18f44404..1cd8f4d2 100644
--- a/examples/ethtool/lib/rte_ethtool.h
+++ b/examples/ethtool/lib/rte_ethtool.h
@@ -79,7 +79,7 @@ extern "C" {
* - (0) if successful.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo);
+int rte_ethtool_get_drvinfo(uint16_t port_id, struct ethtool_drvinfo *drvinfo);
/**
* Retrieve the Ethernet device register length in bytes.
@@ -93,7 +93,7 @@ int rte_ethtool_get_drvinfo(uint8_t port_id, struct ethtool_drvinfo *drvinfo);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_regs_len(uint8_t port_id);
+int rte_ethtool_get_regs_len(uint16_t port_id);
/**
* Retrieve the Ethernet device register information according to
@@ -111,7 +111,7 @@ int rte_ethtool_get_regs_len(uint8_t port_id);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_regs(uint8_t port_id, struct ethtool_regs *regs,
+int rte_ethtool_get_regs(uint16_t port_id, struct ethtool_regs *regs,
void *data);
/**
@@ -127,7 +127,7 @@ int rte_ethtool_get_regs(uint8_t port_id, struct ethtool_regs *regs,
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_link(uint8_t port_id);
+int rte_ethtool_get_link(uint16_t port_id);
/**
* Retrieve the Ethernet device EEPROM size
@@ -141,7 +141,7 @@ int rte_ethtool_get_link(uint8_t port_id);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_eeprom_len(uint8_t port_id);
+int rte_ethtool_get_eeprom_len(uint16_t port_id);
/**
* Retrieve EEPROM content based upon eeprom range described in ethtool
@@ -159,7 +159,7 @@ int rte_ethtool_get_eeprom_len(uint8_t port_id);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
+int rte_ethtool_get_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
void *words);
/**
@@ -179,7 +179,7 @@ int rte_ethtool_get_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_set_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
+int rte_ethtool_set_eeprom(uint16_t port_id, struct ethtool_eeprom *eeprom,
void *words);
/**
@@ -199,7 +199,7 @@ int rte_ethtool_set_eeprom(uint8_t port_id, struct ethtool_eeprom *eeprom,
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_get_pauseparam(uint8_t port_id,
+int rte_ethtool_get_pauseparam(uint16_t port_id,
struct ethtool_pauseparam *pause_param);
/**
@@ -217,7 +217,7 @@ int rte_ethtool_get_pauseparam(uint8_t port_id,
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_set_pauseparam(uint8_t port_id,
+int rte_ethtool_set_pauseparam(uint16_t port_id,
struct ethtool_pauseparam *param);
/**
@@ -231,7 +231,7 @@ int rte_ethtool_set_pauseparam(uint8_t port_id,
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_open(uint8_t port_id);
+int rte_ethtool_net_open(uint16_t port_id);
/**
* Stop the Ethernet device.
@@ -242,7 +242,7 @@ int rte_ethtool_net_open(uint8_t port_id);
* - (0) if successful.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_ethtool_net_stop(uint8_t port_id);
+int rte_ethtool_net_stop(uint16_t port_id);
/**
* Get the Ethernet device MAC address.
@@ -255,7 +255,7 @@ int rte_ethtool_net_stop(uint8_t port_id);
* - (0) if successful.
* - (-ENODEV) if *port_id* invalid.
*/
-int rte_ethtool_net_get_mac_addr(uint8_t port_id, struct ether_addr *addr);
+int rte_ethtool_net_get_mac_addr(uint16_t port_id, struct ether_addr *addr);
/**
* Setting the Ethernet device MAC address.
@@ -271,7 +271,7 @@ int rte_ethtool_net_get_mac_addr(uint8_t port_id, struct ether_addr *addr);
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_set_mac_addr(uint8_t port_id, struct ether_addr *addr);
+int rte_ethtool_net_set_mac_addr(uint16_t port_id, struct ether_addr *addr);
/**
* Validate if the provided MAC address is valid unicast address
@@ -287,7 +287,7 @@ int rte_ethtool_net_set_mac_addr(uint8_t port_id, struct ether_addr *addr);
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_validate_addr(uint8_t port_id, struct ether_addr *addr);
+int rte_ethtool_net_validate_addr(uint16_t port_id, struct ether_addr *addr);
/**
* Setting the Ethernet device maximum Tx unit.
@@ -303,7 +303,7 @@ int rte_ethtool_net_validate_addr(uint8_t port_id, struct ether_addr *addr);
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_change_mtu(uint8_t port_id, int mtu);
+int rte_ethtool_net_change_mtu(uint16_t port_id, int mtu);
/**
* Retrieve the Ethernet device traffic statistics
@@ -319,7 +319,7 @@ int rte_ethtool_net_change_mtu(uint8_t port_id, int mtu);
* - (-EINVAL) if parameters invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_get_stats64(uint8_t port_id, struct rte_eth_stats *stats);
+int rte_ethtool_net_get_stats64(uint16_t port_id, struct rte_eth_stats *stats);
/**
* Update the Ethernet device VLAN filter with new vid
@@ -334,7 +334,7 @@ int rte_ethtool_net_get_stats64(uint8_t port_id, struct rte_eth_stats *stats);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_vlan_rx_add_vid(uint8_t port_id, uint16_t vid);
+int rte_ethtool_net_vlan_rx_add_vid(uint16_t port_id, uint16_t vid);
/**
* Remove VLAN id from Ethernet device.
@@ -349,7 +349,7 @@ int rte_ethtool_net_vlan_rx_add_vid(uint8_t port_id, uint16_t vid);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_vlan_rx_kill_vid(uint8_t port_id, uint16_t vid);
+int rte_ethtool_net_vlan_rx_kill_vid(uint16_t port_id, uint16_t vid);
/**
* Setting the Ethernet device rx mode.
@@ -362,7 +362,7 @@ int rte_ethtool_net_vlan_rx_kill_vid(uint8_t port_id, uint16_t vid);
* - (-ENODEV) if *port_id* invalid.
* - others depends on the specific operations implementation.
*/
-int rte_ethtool_net_set_rx_mode(uint8_t port_id);
+int rte_ethtool_net_set_rx_mode(uint16_t port_id);
/**
* Getting ring parameters for Ethernet device.
@@ -380,7 +380,7 @@ int rte_ethtool_net_set_rx_mode(uint8_t port_id);
* Only the tx_pending and rx_pending fields of struct ethtool_ringparam
* are used, and the function only gets parameters for queue 0.
*/
-int rte_ethtool_get_ringparam(uint8_t port_id,
+int rte_ethtool_get_ringparam(uint16_t port_id,
struct ethtool_ringparam *ring_param);
/**
@@ -399,7 +399,7 @@ int rte_ethtool_get_ringparam(uint8_t port_id,
* Only the tx_pending and rx_pending fields of struct ethtool_ringparam
* are used, and the function only sets parameters for queue 0.
*/
-int rte_ethtool_set_ringparam(uint8_t port_id,
+int rte_ethtool_set_ringparam(uint16_t port_id,
struct ethtool_ringparam *ring_param);
diff --git a/examples/eventdev_pipeline_sw_pmd/main.c b/examples/eventdev_pipeline_sw_pmd/main.c
index dd75cb7a..5f431d87 100644
--- a/examples/eventdev_pipeline_sw_pmd/main.c
+++ b/examples/eventdev_pipeline_sw_pmd/main.c
@@ -46,6 +46,7 @@
#include <rte_cycles.h>
#include <rte_ethdev.h>
#include <rte_eventdev.h>
+#include <rte_service.h>
#define MAX_NUM_STAGES 8
#define BATCH_SIZE 16
@@ -76,6 +77,7 @@ struct fastpath_data {
uint32_t rx_lock;
uint32_t tx_lock;
uint32_t sched_lock;
+ uint32_t evdev_service_id;
bool rx_single;
bool tx_single;
bool sched_single;
@@ -108,7 +110,7 @@ struct config_data {
static struct config_data cdata = {
.num_packets = (1L << 25), /* do ~32M packets */
.num_fids = 512,
- .queue_type = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY,
+ .queue_type = RTE_SCHED_TYPE_ATOMIC,
.next_qid = {-1},
.qid = {-1},
.num_stages = 1,
@@ -233,7 +235,7 @@ producer(void)
}
static inline void
-schedule_devices(uint8_t dev_id, unsigned int lcore_id)
+schedule_devices(unsigned int lcore_id)
{
if (fdata->rx_core[lcore_id] && (fdata->rx_single ||
rte_atomic32_cmpset(&(fdata->rx_lock), 0, 1))) {
@@ -243,7 +245,7 @@ schedule_devices(uint8_t dev_id, unsigned int lcore_id)
if (fdata->sched_core[lcore_id] && (fdata->sched_single ||
rte_atomic32_cmpset(&(fdata->sched_lock), 0, 1))) {
- rte_event_schedule(dev_id);
+ rte_service_run_iter_on_app_lcore(fdata->evdev_service_id, 1);
if (cdata.dump_dev_signal) {
rte_event_dev_dump(0, stdout);
cdata.dump_dev_signal = 0;
@@ -294,7 +296,7 @@ worker(void *arg)
while (!fdata->done) {
uint16_t i;
- schedule_devices(dev_id, lcore_id);
+ schedule_devices(lcore_id);
if (!fdata->worker_core[lcore_id]) {
rte_pause();
@@ -490,10 +492,10 @@ parse_app_args(int argc, char **argv)
cdata.enable_queue_priorities = 1;
break;
case 'o':
- cdata.queue_type = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY;
+ cdata.queue_type = RTE_SCHED_TYPE_ORDERED;
break;
case 'p':
- cdata.queue_type = RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY;
+ cdata.queue_type = RTE_SCHED_TYPE_PARALLEL;
break;
case 'q':
cdata.quiet = 1;
@@ -684,7 +686,7 @@ setup_eventdev(struct prod_data *prod_data,
.new_event_threshold = 4096,
};
struct rte_event_queue_conf wkr_q_conf = {
- .event_queue_cfg = cdata.queue_type,
+ .schedule_type = cdata.queue_type,
.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
.nb_atomic_flows = 1024,
.nb_atomic_order_sequences = 1024,
@@ -696,11 +698,7 @@ setup_eventdev(struct prod_data *prod_data,
};
const struct rte_event_queue_conf tx_q_conf = {
.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST,
- .event_queue_cfg =
- RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY |
- RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
- .nb_atomic_flows = 1024,
- .nb_atomic_order_sequences = 1024,
+ .event_queue_cfg = RTE_EVENT_QUEUE_CFG_SINGLE_LINK,
};
struct port_link worker_queues[MAX_NUM_STAGES];
@@ -755,11 +753,11 @@ setup_eventdev(struct prod_data *prod_data,
}
const char *type_str = "Atomic";
- switch (wkr_q_conf.event_queue_cfg) {
- case RTE_EVENT_QUEUE_CFG_ORDERED_ONLY:
+ switch (wkr_q_conf.schedule_type) {
+ case RTE_SCHED_TYPE_ORDERED:
type_str = "Ordered";
break;
- case RTE_EVENT_QUEUE_CFG_PARALLEL_ONLY:
+ case RTE_SCHED_TYPE_PARALLEL:
type_str = "Parallel";
break;
}
@@ -843,6 +841,14 @@ setup_eventdev(struct prod_data *prod_data,
*cons_data = (struct cons_data){.dev_id = dev_id,
.port_id = i };
+ ret = rte_event_dev_service_id_get(dev_id,
+ &fdata->evdev_service_id);
+ if (ret != -ESRCH && ret != 0) {
+ printf("Error getting the service ID for sw eventdev\n");
+ return -1;
+ }
+ rte_service_runstate_set(fdata->evdev_service_id, 1);
+ rte_service_set_runstate_mapped_check(fdata->evdev_service_id, 0);
if (rte_event_dev_start(dev_id) < 0) {
printf("Error starting eventdev\n");
return -1;
@@ -911,9 +917,9 @@ main(int argc, char **argv)
printf("\tworkers: %u\n", cdata.num_workers);
printf("\tpackets: %"PRIi64"\n", cdata.num_packets);
printf("\tQueue-prio: %u\n", cdata.enable_queue_priorities);
- if (cdata.queue_type == RTE_EVENT_QUEUE_CFG_ORDERED_ONLY)
+ if (cdata.queue_type == RTE_SCHED_TYPE_ORDERED)
printf("\tqid0 type: ordered\n");
- if (cdata.queue_type == RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY)
+ if (cdata.queue_type == RTE_SCHED_TYPE_ATOMIC)
printf("\tqid0 type: atomic\n");
printf("\tCores available: %u\n", rte_lcore_count());
printf("\tCores used: %u\n", cores_needed);
diff --git a/examples/exception_path/main.c b/examples/exception_path/main.c
index e551e6d1..f8f5bbdf 100644
--- a/examples/exception_path/main.c
+++ b/examples/exception_path/main.c
@@ -55,7 +55,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
@@ -63,7 +62,6 @@
#include <rte_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
@@ -135,7 +133,7 @@ static uint64_t input_cores_mask = 0;
static uint64_t output_cores_mask = 0;
/* Array storing port_id that is associated with each lcore */
-static uint8_t port_ids[RTE_MAX_LCORE];
+static uint16_t port_ids[RTE_MAX_LCORE];
/* Structure type for recording lcore-specific stats */
struct stats {
@@ -360,8 +358,8 @@ static void
setup_port_lcore_affinities(void)
{
unsigned long i;
- uint8_t tx_port = 0;
- uint8_t rx_port = 0;
+ uint16_t tx_port = 0;
+ uint16_t rx_port = 0;
/* Setup port_ids[] array, and check masks were ok */
RTE_LCORE_FOREACH(i) {
@@ -444,24 +442,23 @@ parse_args(int argc, char **argv)
/* Initialise a single port on an Ethernet device */
static void
-init_port(uint8_t port)
+init_port(uint16_t port)
{
int ret;
uint16_t nb_rxd = NB_RXD;
uint16_t nb_txd = NB_TXD;
/* Initialise device and RX/TX queues */
- PRINT_INFO("Initialising port %u ...", (unsigned)port);
+ PRINT_INFO("Initialising port %u ...", port);
fflush(stdout);
ret = rte_eth_dev_configure(port, 1, 1, &port_conf);
if (ret < 0)
- FATAL_ERROR("Could not configure port%u (%d)",
- (unsigned)port, ret);
+ FATAL_ERROR("Could not configure port%u (%d)", port, ret);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(port, &nb_rxd, &nb_txd);
if (ret < 0)
FATAL_ERROR("Could not adjust number of descriptors for port%u (%d)",
- (unsigned)port, ret);
+ port, ret);
ret = rte_eth_rx_queue_setup(port, 0, nb_rxd,
rte_eth_dev_socket_id(port),
@@ -469,29 +466,30 @@ init_port(uint8_t port)
pktmbuf_pool);
if (ret < 0)
FATAL_ERROR("Could not setup up RX queue for port%u (%d)",
- (unsigned)port, ret);
+ port, ret);
ret = rte_eth_tx_queue_setup(port, 0, nb_txd,
rte_eth_dev_socket_id(port),
NULL);
if (ret < 0)
FATAL_ERROR("Could not setup up TX queue for port%u (%d)",
- (unsigned)port, ret);
+ port, ret);
ret = rte_eth_dev_start(port);
if (ret < 0)
- FATAL_ERROR("Could not start port%u (%d)", (unsigned)port, ret);
+ FATAL_ERROR("Could not start port%u (%d)", port, ret);
rte_eth_promiscuous_enable(port);
}
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -506,14 +504,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -546,7 +543,7 @@ main(int argc, char** argv)
{
int ret;
unsigned i,high_port;
- uint8_t nb_sys_ports, port;
+ uint16_t nb_sys_ports, port;
/* Associate signal_hanlder function with USR signals */
signal(SIGUSR1, signal_handler);
diff --git a/examples/flow_classify/Makefile b/examples/flow_classify/Makefile
new file mode 100644
index 00000000..eecdde14
--- /dev/null
+++ b/examples/flow_classify/Makefile
@@ -0,0 +1,57 @@
+# BSD LICENSE
+#
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Intel Corporation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+# binary name
+APP = flow_classify
+
+
+# all source are stored in SRCS-y
+SRCS-y := flow_classify.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/flow_classify/flow_classify.c b/examples/flow_classify/flow_classify.c
new file mode 100644
index 00000000..766f1dd0
--- /dev/null
+++ b/examples/flow_classify/flow_classify.c
@@ -0,0 +1,852 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdint.h>
+#include <inttypes.h>
+#include <getopt.h>
+
+#include <rte_eal.h>
+#include <rte_ethdev.h>
+#include <rte_cycles.h>
+#include <rte_lcore.h>
+#include <rte_mbuf.h>
+#include <rte_flow.h>
+#include <rte_flow_classify.h>
+#include <rte_table_acl.h>
+
+#define RX_RING_SIZE 128
+#define TX_RING_SIZE 512
+
+#define NUM_MBUFS 8191
+#define MBUF_CACHE_SIZE 250
+#define BURST_SIZE 32
+
+#define MAX_NUM_CLASSIFY 30
+#define FLOW_CLASSIFY_MAX_RULE_NUM 91
+#define FLOW_CLASSIFY_MAX_PRIORITY 8
+#define FLOW_CLASSIFIER_NAME_SIZE 64
+
+#define COMMENT_LEAD_CHAR ('#')
+#define OPTION_RULE_IPV4 "rule_ipv4"
+#define RTE_LOGTYPE_FLOW_CLASSIFY RTE_LOGTYPE_USER3
+#define flow_classify_log(format, ...) \
+ RTE_LOG(ERR, FLOW_CLASSIFY, format, ##__VA_ARGS__)
+
+#define uint32_t_to_char(ip, a, b, c, d) do {\
+ *a = (unsigned char)(ip >> 24 & 0xff);\
+ *b = (unsigned char)(ip >> 16 & 0xff);\
+ *c = (unsigned char)(ip >> 8 & 0xff);\
+ *d = (unsigned char)(ip & 0xff);\
+ } while (0)
+
+enum {
+ CB_FLD_SRC_ADDR,
+ CB_FLD_DST_ADDR,
+ CB_FLD_SRC_PORT,
+ CB_FLD_SRC_PORT_DLM,
+ CB_FLD_SRC_PORT_MASK,
+ CB_FLD_DST_PORT,
+ CB_FLD_DST_PORT_DLM,
+ CB_FLD_DST_PORT_MASK,
+ CB_FLD_PROTO,
+ CB_FLD_PRIORITY,
+ CB_FLD_NUM,
+};
+
+static struct{
+ const char *rule_ipv4_name;
+} parm_config;
+const char cb_port_delim[] = ":";
+
+static const struct rte_eth_conf port_conf_default = {
+ .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
+};
+
+struct flow_classifier {
+ struct rte_flow_classifier *cls;
+ uint32_t table_id[RTE_FLOW_CLASSIFY_TABLE_MAX];
+};
+
+struct flow_classifier_acl {
+ struct flow_classifier cls;
+} __rte_cache_aligned;
+
+/* ACL field definitions for IPv4 5 tuple rule */
+
+enum {
+ PROTO_FIELD_IPV4,
+ SRC_FIELD_IPV4,
+ DST_FIELD_IPV4,
+ SRCP_FIELD_IPV4,
+ DSTP_FIELD_IPV4,
+ NUM_FIELDS_IPV4
+};
+
+enum {
+ PROTO_INPUT_IPV4,
+ SRC_INPUT_IPV4,
+ DST_INPUT_IPV4,
+ SRCP_DESTP_INPUT_IPV4
+};
+
+static struct rte_acl_field_def ipv4_defs[NUM_FIELDS_IPV4] = {
+ /* first input field - always one byte long. */
+ {
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint8_t),
+ .field_index = PROTO_FIELD_IPV4,
+ .input_index = PROTO_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, next_proto_id),
+ },
+ /* next input field (IPv4 source address) - 4 consecutive bytes. */
+ {
+ /* rte_flow uses a bit mask for IPv4 addresses */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint32_t),
+ .field_index = SRC_FIELD_IPV4,
+ .input_index = SRC_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, src_addr),
+ },
+ /* next input field (IPv4 destination address) - 4 consecutive bytes. */
+ {
+ /* rte_flow uses a bit mask for IPv4 addresses */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint32_t),
+ .field_index = DST_FIELD_IPV4,
+ .input_index = DST_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ offsetof(struct ipv4_hdr, dst_addr),
+ },
+ /*
+ * Next 2 fields (src & dst ports) form 4 consecutive bytes.
+ * They share the same input index.
+ */
+ {
+ /* rte_flow uses a bit mask for protocol ports */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint16_t),
+ .field_index = SRCP_FIELD_IPV4,
+ .input_index = SRCP_DESTP_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, src_port),
+ },
+ {
+ /* rte_flow uses a bit mask for protocol ports */
+ .type = RTE_ACL_FIELD_TYPE_BITMASK,
+ .size = sizeof(uint16_t),
+ .field_index = DSTP_FIELD_IPV4,
+ .input_index = SRCP_DESTP_INPUT_IPV4,
+ .offset = sizeof(struct ether_hdr) +
+ sizeof(struct ipv4_hdr) +
+ offsetof(struct tcp_hdr, dst_port),
+ },
+};
+
+/* flow classify data */
+static int num_classify_rules;
+static struct rte_flow_classify_rule *rules[MAX_NUM_CLASSIFY];
+static struct rte_flow_classify_ipv4_5tuple_stats ntuple_stats;
+static struct rte_flow_classify_stats classify_stats = {
+ .stats = (void **)&ntuple_stats
+};
+
+/* parameters for rte_flow_classify_validate and
+ * rte_flow_classify_table_entry_add functions
+ */
+
+static struct rte_flow_item eth_item = { RTE_FLOW_ITEM_TYPE_ETH,
+ 0, 0, 0 };
+static struct rte_flow_item end_item = { RTE_FLOW_ITEM_TYPE_END,
+ 0, 0, 0 };
+
+/* sample actions:
+ * "actions count / end"
+ */
+static struct rte_flow_action count_action = { RTE_FLOW_ACTION_TYPE_COUNT, 0};
+static struct rte_flow_action end_action = { RTE_FLOW_ACTION_TYPE_END, 0};
+static struct rte_flow_action actions[2];
+
+/* sample attributes */
+static struct rte_flow_attr attr;
+
+/* flow_classify.c: * Based on DPDK skeleton forwarding example. */
+
+/*
+ * Initializes a given port using global settings and with the RX buffers
+ * coming from the mbuf_pool passed as a parameter.
+ */
+static inline int
+port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+{
+ struct rte_eth_conf port_conf = port_conf_default;
+ struct ether_addr addr;
+ const uint16_t rx_rings = 1, tx_rings = 1;
+ int retval;
+ uint16_t q;
+
+ if (port >= rte_eth_dev_count())
+ return -1;
+
+ /* Configure the Ethernet device. */
+ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+
+ /* Allocate and set up 1 RX queue per Ethernet port. */
+ for (q = 0; q < rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ rte_eth_dev_socket_id(port), NULL, mbuf_pool);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Allocate and set up 1 TX queue per Ethernet port. */
+ for (q = 0; q < tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ rte_eth_dev_socket_id(port), NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Start the Ethernet port. */
+ retval = rte_eth_dev_start(port);
+ if (retval < 0)
+ return retval;
+
+ /* Display the port MAC address. */
+ rte_eth_macaddr_get(port, &addr);
+ printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
+ " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
+ port,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+
+ /* Enable RX in promiscuous mode for the Ethernet device. */
+ rte_eth_promiscuous_enable(port);
+
+ return 0;
+}
+
+/*
+ * The lcore main. This is the main thread that does the work, reading from
+ * an input port classifying the packets and writing to an output port.
+ */
+static __attribute__((noreturn)) void
+lcore_main(struct flow_classifier *cls_app)
+{
+ const uint8_t nb_ports = rte_eth_dev_count();
+ uint8_t port;
+ int ret;
+ int i = 0;
+
+ ret = rte_flow_classify_table_entry_delete(cls_app->cls,
+ cls_app->table_id[0], rules[7]);
+ if (ret)
+ printf("table_entry_delete failed [7] %d\n\n", ret);
+ else
+ printf("table_entry_delete succeeded [7]\n\n");
+
+ /*
+ * Check that the port is on the same NUMA node as the polling thread
+ * for best performance.
+ */
+ for (port = 0; port < nb_ports; port++)
+ if (rte_eth_dev_socket_id(port) > 0 &&
+ rte_eth_dev_socket_id(port) != (int)rte_socket_id()) {
+ printf("\n\n");
+ printf("WARNING: port %u is on remote NUMA node\n",
+ port);
+ printf("to polling thread.\n");
+ printf("Performance will not be optimal.\n");
+
+ printf("\nCore %u forwarding packets. ",
+ rte_lcore_id());
+ printf("[Ctrl+C to quit]\n");
+ }
+ /* Run until the application is quit or killed. */
+ for (;;) {
+ /*
+ * Receive packets on a port, classify them and forward them
+ * on the paired port.
+ * The mapping is 0 -> 1, 1 -> 0, 2 -> 3, 3 -> 2, etc.
+ */
+ for (port = 0; port < nb_ports; port++) {
+ /* Get burst of RX packets, from first port of pair. */
+ struct rte_mbuf *bufs[BURST_SIZE];
+ const uint16_t nb_rx = rte_eth_rx_burst(port, 0,
+ bufs, BURST_SIZE);
+
+ if (unlikely(nb_rx == 0))
+ continue;
+
+ for (i = 0; i < MAX_NUM_CLASSIFY; i++) {
+ if (rules[i]) {
+ ret = rte_flow_classifier_query(
+ cls_app->cls,
+ cls_app->table_id[0],
+ bufs, nb_rx, rules[i],
+ &classify_stats);
+ if (ret)
+ printf(
+ "rule [%d] query failed ret [%d]\n\n",
+ i, ret);
+ else {
+ printf(
+ "rule[%d] count=%"PRIu64"\n",
+ i, ntuple_stats.counter1);
+
+ printf("proto = %d\n",
+ ntuple_stats.ipv4_5tuple.proto);
+ }
+ }
+ }
+
+ /* Send burst of TX packets, to second port of pair. */
+ const uint16_t nb_tx = rte_eth_tx_burst(port ^ 1, 0,
+ bufs, nb_rx);
+
+ /* Free any unsent packets. */
+ if (unlikely(nb_tx < nb_rx)) {
+ uint16_t buf;
+
+ for (buf = nb_tx; buf < nb_rx; buf++)
+ rte_pktmbuf_free(bufs[buf]);
+ }
+ }
+ }
+}
+
+/*
+ * Parse IPv4 5 tuple rules file, ipv4_rules_file.txt.
+ * Expected format:
+ * <src_ipv4_addr>'/'<masklen> <space> \
+ * <dst_ipv4_addr>'/'<masklen> <space> \
+ * <src_port> <space> ":" <src_port_mask> <space> \
+ * <dst_port> <space> ":" <dst_port_mask> <space> \
+ * <proto>'/'<proto_mask> <space> \
+ * <priority>
+ */
+
+static int
+get_cb_field(char **in, uint32_t *fd, int base, unsigned long lim,
+ char dlm)
+{
+ unsigned long val;
+ char *end;
+
+ errno = 0;
+ val = strtoul(*in, &end, base);
+ if (errno != 0 || end[0] != dlm || val > lim)
+ return -EINVAL;
+ *fd = (uint32_t)val;
+ *in = end + 1;
+ return 0;
+}
+
+static int
+parse_ipv4_net(char *in, uint32_t *addr, uint32_t *mask_len)
+{
+ uint32_t a, b, c, d, m;
+
+ if (get_cb_field(&in, &a, 0, UINT8_MAX, '.'))
+ return -EINVAL;
+ if (get_cb_field(&in, &b, 0, UINT8_MAX, '.'))
+ return -EINVAL;
+ if (get_cb_field(&in, &c, 0, UINT8_MAX, '.'))
+ return -EINVAL;
+ if (get_cb_field(&in, &d, 0, UINT8_MAX, '/'))
+ return -EINVAL;
+ if (get_cb_field(&in, &m, 0, sizeof(uint32_t) * CHAR_BIT, 0))
+ return -EINVAL;
+
+ addr[0] = IPv4(a, b, c, d);
+ mask_len[0] = m;
+ return 0;
+}
+
+static int
+parse_ipv4_5tuple_rule(char *str, struct rte_eth_ntuple_filter *ntuple_filter)
+{
+ int i, ret;
+ char *s, *sp, *in[CB_FLD_NUM];
+ static const char *dlm = " \t\n";
+ int dim = CB_FLD_NUM;
+ uint32_t temp;
+
+ s = str;
+ for (i = 0; i != dim; i++, s = NULL) {
+ in[i] = strtok_r(s, dlm, &sp);
+ if (in[i] == NULL)
+ return -EINVAL;
+ }
+
+ ret = parse_ipv4_net(in[CB_FLD_SRC_ADDR],
+ &ntuple_filter->src_ip,
+ &ntuple_filter->src_ip_mask);
+ if (ret != 0) {
+ flow_classify_log("failed to read source address/mask: %s\n",
+ in[CB_FLD_SRC_ADDR]);
+ return ret;
+ }
+
+ ret = parse_ipv4_net(in[CB_FLD_DST_ADDR],
+ &ntuple_filter->dst_ip,
+ &ntuple_filter->dst_ip_mask);
+ if (ret != 0) {
+ flow_classify_log("failed to read source address/mask: %s\n",
+ in[CB_FLD_DST_ADDR]);
+ return ret;
+ }
+
+ if (get_cb_field(&in[CB_FLD_SRC_PORT], &temp, 0, UINT16_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->src_port = (uint16_t)temp;
+
+ if (strncmp(in[CB_FLD_SRC_PORT_DLM], cb_port_delim,
+ sizeof(cb_port_delim)) != 0)
+ return -EINVAL;
+
+ if (get_cb_field(&in[CB_FLD_SRC_PORT_MASK], &temp, 0, UINT16_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->src_port_mask = (uint16_t)temp;
+
+ if (get_cb_field(&in[CB_FLD_DST_PORT], &temp, 0, UINT16_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->dst_port = (uint16_t)temp;
+
+ if (strncmp(in[CB_FLD_DST_PORT_DLM], cb_port_delim,
+ sizeof(cb_port_delim)) != 0)
+ return -EINVAL;
+
+ if (get_cb_field(&in[CB_FLD_DST_PORT_MASK], &temp, 0, UINT16_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->dst_port_mask = (uint16_t)temp;
+
+ if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, '/'))
+ return -EINVAL;
+ ntuple_filter->proto = (uint8_t)temp;
+
+ if (get_cb_field(&in[CB_FLD_PROTO], &temp, 0, UINT8_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->proto_mask = (uint8_t)temp;
+
+ if (get_cb_field(&in[CB_FLD_PRIORITY], &temp, 0, UINT16_MAX, 0))
+ return -EINVAL;
+ ntuple_filter->priority = (uint16_t)temp;
+ if (ntuple_filter->priority > FLOW_CLASSIFY_MAX_PRIORITY)
+ ret = -EINVAL;
+
+ return ret;
+}
+
+/* Bypass comment and empty lines */
+static inline int
+is_bypass_line(char *buff)
+{
+ int i = 0;
+
+ /* comment line */
+ if (buff[0] == COMMENT_LEAD_CHAR)
+ return 1;
+ /* empty line */
+ while (buff[i] != '\0') {
+ if (!isspace(buff[i]))
+ return 0;
+ i++;
+ }
+ return 1;
+}
+
+static uint32_t
+convert_depth_to_bitmask(uint32_t depth_val)
+{
+ uint32_t bitmask = 0;
+ int i, j;
+
+ for (i = depth_val, j = 0; i > 0; i--, j++)
+ bitmask |= (1 << (31 - j));
+ return bitmask;
+}
+
+static int
+add_classify_rule(struct rte_eth_ntuple_filter *ntuple_filter,
+ struct flow_classifier *cls_app)
+{
+ int ret = -1;
+ int key_found;
+ struct rte_flow_error error;
+ struct rte_flow_item_ipv4 ipv4_spec;
+ struct rte_flow_item_ipv4 ipv4_mask;
+ struct rte_flow_item ipv4_udp_item;
+ struct rte_flow_item ipv4_tcp_item;
+ struct rte_flow_item ipv4_sctp_item;
+ struct rte_flow_item_udp udp_spec;
+ struct rte_flow_item_udp udp_mask;
+ struct rte_flow_item udp_item;
+ struct rte_flow_item_tcp tcp_spec;
+ struct rte_flow_item_tcp tcp_mask;
+ struct rte_flow_item tcp_item;
+ struct rte_flow_item_sctp sctp_spec;
+ struct rte_flow_item_sctp sctp_mask;
+ struct rte_flow_item sctp_item;
+ struct rte_flow_item pattern_ipv4_5tuple[4];
+ struct rte_flow_classify_rule *rule;
+ uint8_t ipv4_proto;
+
+ if (num_classify_rules >= MAX_NUM_CLASSIFY) {
+ printf(
+ "\nINFO: classify rule capacity %d reached\n",
+ num_classify_rules);
+ return ret;
+ }
+
+ /* set up parameters for validate and add */
+ memset(&ipv4_spec, 0, sizeof(ipv4_spec));
+ ipv4_spec.hdr.next_proto_id = ntuple_filter->proto;
+ ipv4_spec.hdr.src_addr = ntuple_filter->src_ip;
+ ipv4_spec.hdr.dst_addr = ntuple_filter->dst_ip;
+ ipv4_proto = ipv4_spec.hdr.next_proto_id;
+
+ memset(&ipv4_mask, 0, sizeof(ipv4_mask));
+ ipv4_mask.hdr.next_proto_id = ntuple_filter->proto_mask;
+ ipv4_mask.hdr.src_addr = ntuple_filter->src_ip_mask;
+ ipv4_mask.hdr.src_addr =
+ convert_depth_to_bitmask(ipv4_mask.hdr.src_addr);
+ ipv4_mask.hdr.dst_addr = ntuple_filter->dst_ip_mask;
+ ipv4_mask.hdr.dst_addr =
+ convert_depth_to_bitmask(ipv4_mask.hdr.dst_addr);
+
+ switch (ipv4_proto) {
+ case IPPROTO_UDP:
+ ipv4_udp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_udp_item.spec = &ipv4_spec;
+ ipv4_udp_item.mask = &ipv4_mask;
+ ipv4_udp_item.last = NULL;
+
+ udp_spec.hdr.src_port = ntuple_filter->src_port;
+ udp_spec.hdr.dst_port = ntuple_filter->dst_port;
+ udp_spec.hdr.dgram_len = 0;
+ udp_spec.hdr.dgram_cksum = 0;
+
+ udp_mask.hdr.src_port = ntuple_filter->src_port_mask;
+ udp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
+ udp_mask.hdr.dgram_len = 0;
+ udp_mask.hdr.dgram_cksum = 0;
+
+ udp_item.type = RTE_FLOW_ITEM_TYPE_UDP;
+ udp_item.spec = &udp_spec;
+ udp_item.mask = &udp_mask;
+ udp_item.last = NULL;
+
+ attr.priority = ntuple_filter->priority;
+ pattern_ipv4_5tuple[1] = ipv4_udp_item;
+ pattern_ipv4_5tuple[2] = udp_item;
+ break;
+ case IPPROTO_TCP:
+ ipv4_tcp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_tcp_item.spec = &ipv4_spec;
+ ipv4_tcp_item.mask = &ipv4_mask;
+ ipv4_tcp_item.last = NULL;
+
+ memset(&tcp_spec, 0, sizeof(tcp_spec));
+ tcp_spec.hdr.src_port = ntuple_filter->src_port;
+ tcp_spec.hdr.dst_port = ntuple_filter->dst_port;
+
+ memset(&tcp_mask, 0, sizeof(tcp_mask));
+ tcp_mask.hdr.src_port = ntuple_filter->src_port_mask;
+ tcp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
+
+ tcp_item.type = RTE_FLOW_ITEM_TYPE_TCP;
+ tcp_item.spec = &tcp_spec;
+ tcp_item.mask = &tcp_mask;
+ tcp_item.last = NULL;
+
+ attr.priority = ntuple_filter->priority;
+ pattern_ipv4_5tuple[1] = ipv4_tcp_item;
+ pattern_ipv4_5tuple[2] = tcp_item;
+ break;
+ case IPPROTO_SCTP:
+ ipv4_sctp_item.type = RTE_FLOW_ITEM_TYPE_IPV4;
+ ipv4_sctp_item.spec = &ipv4_spec;
+ ipv4_sctp_item.mask = &ipv4_mask;
+ ipv4_sctp_item.last = NULL;
+
+ sctp_spec.hdr.src_port = ntuple_filter->src_port;
+ sctp_spec.hdr.dst_port = ntuple_filter->dst_port;
+ sctp_spec.hdr.cksum = 0;
+ sctp_spec.hdr.tag = 0;
+
+ sctp_mask.hdr.src_port = ntuple_filter->src_port_mask;
+ sctp_mask.hdr.dst_port = ntuple_filter->dst_port_mask;
+ sctp_mask.hdr.cksum = 0;
+ sctp_mask.hdr.tag = 0;
+
+ sctp_item.type = RTE_FLOW_ITEM_TYPE_SCTP;
+ sctp_item.spec = &sctp_spec;
+ sctp_item.mask = &sctp_mask;
+ sctp_item.last = NULL;
+
+ attr.priority = ntuple_filter->priority;
+ pattern_ipv4_5tuple[1] = ipv4_sctp_item;
+ pattern_ipv4_5tuple[2] = sctp_item;
+ break;
+ default:
+ return ret;
+ }
+
+ attr.ingress = 1;
+ pattern_ipv4_5tuple[0] = eth_item;
+ pattern_ipv4_5tuple[3] = end_item;
+ actions[0] = count_action;
+ actions[1] = end_action;
+
+ rule = rte_flow_classify_table_entry_add(
+ cls_app->cls, cls_app->table_id[0], &key_found,
+ &attr, pattern_ipv4_5tuple, actions, &error);
+ if (rule == NULL) {
+ printf("table entry add failed ipv4_proto = %u\n",
+ ipv4_proto);
+ ret = -1;
+ return ret;
+ }
+
+ rules[num_classify_rules] = rule;
+ num_classify_rules++;
+ return 0;
+}
+
+static int
+add_rules(const char *rule_path, struct flow_classifier *cls_app)
+{
+ FILE *fh;
+ char buff[LINE_MAX];
+ unsigned int i = 0;
+ unsigned int total_num = 0;
+ struct rte_eth_ntuple_filter ntuple_filter;
+ int ret;
+
+ fh = fopen(rule_path, "rb");
+ if (fh == NULL)
+ rte_exit(EXIT_FAILURE, "%s: fopen %s failed\n", __func__,
+ rule_path);
+
+ ret = fseek(fh, 0, SEEK_SET);
+ if (ret)
+ rte_exit(EXIT_FAILURE, "%s: fseek %d failed\n", __func__,
+ ret);
+
+ i = 0;
+ while (fgets(buff, LINE_MAX, fh) != NULL) {
+ i++;
+
+ if (is_bypass_line(buff))
+ continue;
+
+ if (total_num >= FLOW_CLASSIFY_MAX_RULE_NUM - 1) {
+ printf("\nINFO: classify rule capacity %d reached\n",
+ total_num);
+ break;
+ }
+
+ if (parse_ipv4_5tuple_rule(buff, &ntuple_filter) != 0)
+ rte_exit(EXIT_FAILURE,
+ "%s Line %u: parse rules error\n",
+ rule_path, i);
+
+ if (add_classify_rule(&ntuple_filter, cls_app) != 0)
+ rte_exit(EXIT_FAILURE, "add rule error\n");
+
+ total_num++;
+ }
+
+ fclose(fh);
+ return 0;
+}
+
+/* display usage */
+static void
+print_usage(const char *prgname)
+{
+ printf("%s usage:\n", prgname);
+ printf("[EAL options] -- --"OPTION_RULE_IPV4"=FILE: ");
+ printf("specify the ipv4 rules file.\n");
+ printf("Each rule occupies one line in the file.\n");
+}
+
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ static struct option lgopts[] = {
+ {OPTION_RULE_IPV4, 1, 0, 0},
+ {NULL, 0, 0, 0}
+ };
+
+ argvopt = argv;
+
+ while ((opt = getopt_long(argc, argvopt, "",
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* long options */
+ case 0:
+ if (!strncmp(lgopts[option_index].name,
+ OPTION_RULE_IPV4,
+ sizeof(OPTION_RULE_IPV4)))
+ parm_config.rule_ipv4_name = optarg;
+ break;
+ default:
+ print_usage(prgname);
+ return -1;
+ }
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 1; /* reset getopt lib */
+ return ret;
+}
+
+/*
+ * The main function, which does initialization and calls the lcore_main
+ * function.
+ */
+int
+main(int argc, char *argv[])
+{
+ struct rte_mempool *mbuf_pool;
+ uint8_t nb_ports;
+ uint8_t portid;
+ int ret;
+ int socket_id;
+ struct rte_table_acl_params table_acl_params;
+ struct rte_flow_classify_table_params cls_table_params;
+ struct flow_classifier *cls_app;
+ struct rte_flow_classifier_params cls_params;
+ uint32_t size;
+
+ /* Initialize the Environment Abstraction Layer (EAL). */
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
+
+ argc -= ret;
+ argv += ret;
+
+ /* parse application arguments (after the EAL ones) */
+ ret = parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid flow_classify parameters\n");
+
+ /* Check that there is an even number of ports to send/receive on. */
+ nb_ports = rte_eth_dev_count();
+ if (nb_ports < 2 || (nb_ports & 1))
+ rte_exit(EXIT_FAILURE, "Error: number of ports must be even\n");
+
+ /* Creates a new mempool in memory to hold the mbufs. */
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
+ MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+
+ /* Initialize all ports. */
+ for (portid = 0; portid < nb_ports; portid++)
+ if (port_init(portid, mbuf_pool) != 0)
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ portid);
+
+ if (rte_lcore_count() > 1)
+ printf("\nWARNING: Too many lcores enabled. Only 1 used.\n");
+
+ socket_id = rte_eth_dev_socket_id(0);
+
+ /* Memory allocation */
+ size = RTE_CACHE_LINE_ROUNDUP(sizeof(struct flow_classifier_acl));
+ cls_app = rte_zmalloc(NULL, size, RTE_CACHE_LINE_SIZE);
+ if (cls_app == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot allocate classifier memory\n");
+
+ cls_params.name = "flow_classifier";
+ cls_params.socket_id = socket_id;
+ cls_params.type = RTE_FLOW_CLASSIFY_TABLE_TYPE_ACL;
+
+ cls_app->cls = rte_flow_classifier_create(&cls_params);
+ if (cls_app->cls == NULL) {
+ rte_free(cls_app);
+ rte_exit(EXIT_FAILURE, "Cannot create classifier\n");
+ }
+
+ /* initialise ACL table params */
+ table_acl_params.name = "table_acl_ipv4_5tuple";
+ table_acl_params.n_rules = FLOW_CLASSIFY_MAX_RULE_NUM;
+ table_acl_params.n_rule_fields = RTE_DIM(ipv4_defs);
+ memcpy(table_acl_params.field_format, ipv4_defs, sizeof(ipv4_defs));
+
+ /* initialise table create params */
+ cls_table_params.ops = &rte_table_acl_ops,
+ cls_table_params.arg_create = &table_acl_params,
+
+ ret = rte_flow_classify_table_create(cls_app->cls, &cls_table_params,
+ &cls_app->table_id[0]);
+ if (ret) {
+ rte_flow_classifier_free(cls_app->cls);
+ rte_free(cls_app);
+ rte_exit(EXIT_FAILURE, "Failed to create classifier table\n");
+ }
+
+ /* read file of IPv4 5 tuple rules and initialize parameters
+ * for rte_flow_classify_validate and rte_flow_classify_table_entry_add
+ * API's.
+ */
+ if (add_rules(parm_config.rule_ipv4_name, cls_app)) {
+ rte_flow_classifier_free(cls_app->cls);
+ rte_free(cls_app);
+ rte_exit(EXIT_FAILURE, "Failed to add rules\n");
+ }
+
+ /* Call lcore_main on the master core only. */
+ lcore_main(cls_app);
+
+ return 0;
+}
diff --git a/examples/flow_classify/ipv4_rules_file.txt b/examples/flow_classify/ipv4_rules_file.txt
new file mode 100644
index 00000000..dfa0631f
--- /dev/null
+++ b/examples/flow_classify/ipv4_rules_file.txt
@@ -0,0 +1,14 @@
+#file format:
+#src_ip/masklen dst_ip/masklen src_port : mask dst_port : mask proto/mask priority
+#
+2.2.2.3/24 2.2.2.7/24 32 : 0xffff 33 : 0xffff 17/0xff 0
+9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 17/0xff 1
+9.9.9.3/24 9.9.9.7/24 32 : 0xffff 33 : 0xffff 6/0xff 2
+9.9.8.3/24 9.9.8.7/24 32 : 0xffff 33 : 0xffff 6/0xff 3
+6.7.8.9/24 2.3.4.5/24 32 : 0x0000 33 : 0x0000 132/0xff 4
+6.7.8.9/32 192.168.0.36/32 10 : 0xffff 11 : 0xffff 6/0xfe 5
+6.7.8.9/24 192.168.0.36/24 10 : 0xffff 11 : 0xffff 6/0xfe 6
+6.7.8.9/16 192.168.0.36/16 10 : 0xffff 11 : 0xffff 6/0xfe 7
+6.7.8.9/8 192.168.0.36/8 10 : 0xffff 11 : 0xffff 6/0xfe 8
+#error rules
+#9.8.7.6/8 192.168.0.36/8 10 : 0xffff 11 : 0xffff 6/0xfe 9 \ No newline at end of file
diff --git a/examples/flow_filtering/Makefile b/examples/flow_filtering/Makefile
new file mode 100644
index 00000000..70b82fe3
--- /dev/null
+++ b/examples/flow_filtering/Makefile
@@ -0,0 +1,49 @@
+#
+# BSD LICENSE
+#
+# Copyright 2017 Mellanox.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in
+# the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Mellanox nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+
+ifeq ($(RTE_SDK),)
+$(error "Please define RTE_SDK environment variable")
+endif
+
+# Default target, can be overridden by command line or environment
+RTE_TARGET ?= x86_64-native-linuxapp-gcc
+
+include $(RTE_SDK)/mk/rte.vars.mk
+
+APP = flow
+
+SRCS-y := main.c
+
+CFLAGS += -O3
+CFLAGS += $(WERROR_FLAGS)
+
+include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/flow_filtering/flow_blocks.c b/examples/flow_filtering/flow_blocks.c
new file mode 100644
index 00000000..f92df102
--- /dev/null
+++ b/examples/flow_filtering/flow_blocks.c
@@ -0,0 +1,150 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Mellanox nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define MAX_PATTERN_NUM 4
+
+struct rte_flow *
+generate_ipv4_flow(uint8_t port_id, uint16_t rx_q,
+ uint32_t src_ip, uint32_t src_mask,
+ uint32_t dest_ip, uint32_t dest_mask,
+ struct rte_flow_error *error);
+
+
+/**
+ * create a flow rule that sends packets with matching src and dest ip
+ * to selected queue.
+ *
+ * @param port_id
+ * The selected port.
+ * @param rx_q
+ * The selected target queue.
+ * @param src_ip
+ * The src ip value to match the input packet.
+ * @param src_mask
+ * The mask to apply to the src ip.
+ * @param dest_ip
+ * The dest ip value to match the input packet.
+ * @param dest_mask
+ * The mask to apply to the dest ip.
+ * @param[out] error
+ * Perform verbose error reporting if not NULL.
+ *
+ * @return
+ * A flow if the rule could be created else return NULL.
+ */
+struct rte_flow *
+generate_ipv4_flow(uint8_t port_id, uint16_t rx_q,
+ uint32_t src_ip, uint32_t src_mask,
+ uint32_t dest_ip, uint32_t dest_mask,
+ struct rte_flow_error *error)
+{
+ struct rte_flow_attr attr;
+ struct rte_flow_item pattern[MAX_PATTERN_NUM];
+ struct rte_flow_action action[MAX_PATTERN_NUM];
+ struct rte_flow *flow = NULL;
+ struct rte_flow_action_queue queue = { .index = rx_q };
+ struct rte_flow_item_eth eth_spec;
+ struct rte_flow_item_eth eth_mask;
+ struct rte_flow_item_vlan vlan_spec;
+ struct rte_flow_item_vlan vlan_mask;
+ struct rte_flow_item_ipv4 ip_spec;
+ struct rte_flow_item_ipv4 ip_mask;
+ int res;
+
+ memset(pattern, 0, sizeof(pattern));
+ memset(action, 0, sizeof(action));
+
+ /*
+ * set the rule attribute.
+ * in this case only ingress packets will be checked.
+ */
+ memset(&attr, 0, sizeof(struct rte_flow_attr));
+ attr.ingress = 1;
+
+ /*
+ * create the action sequence.
+ * one action only, move packet to queue
+ */
+
+ action[0].type = RTE_FLOW_ACTION_TYPE_QUEUE;
+ action[0].conf = &queue;
+ action[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ /*
+ * set the first level of the pattern (eth).
+ * since in this example we just want to get the
+ * ipv4 we set this level to allow all.
+ */
+ memset(&eth_spec, 0, sizeof(struct rte_flow_item_eth));
+ memset(&eth_mask, 0, sizeof(struct rte_flow_item_eth));
+ eth_spec.type = 0;
+ eth_mask.type = 0;
+ pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+ pattern[0].spec = &eth_spec;
+ pattern[0].mask = &eth_mask;
+
+ /*
+ * setting the second level of the pattern (vlan).
+ * since in this example we just want to get the
+ * ipv4 we also set this level to allow all.
+ */
+ memset(&vlan_spec, 0, sizeof(struct rte_flow_item_vlan));
+ memset(&vlan_mask, 0, sizeof(struct rte_flow_item_vlan));
+ pattern[1].type = RTE_FLOW_ITEM_TYPE_VLAN;
+ pattern[1].spec = &vlan_spec;
+ pattern[1].mask = &vlan_mask;
+
+ /*
+ * setting the third level of the pattern (ip).
+ * in this example this is the level we care about
+ * so we set it according to the parameters.
+ */
+ memset(&ip_spec, 0, sizeof(struct rte_flow_item_ipv4));
+ memset(&ip_mask, 0, sizeof(struct rte_flow_item_ipv4));
+ ip_spec.hdr.dst_addr = htonl(dest_ip);
+ ip_mask.hdr.dst_addr = dest_mask;
+ ip_spec.hdr.src_addr = htonl(src_ip);
+ ip_mask.hdr.src_addr = src_mask;
+ pattern[2].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ pattern[2].spec = &ip_spec;
+ pattern[2].mask = &ip_mask;
+
+ /* the final level must be always type end */
+ pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+
+ res = rte_flow_validate(port_id, &attr, pattern, action, error);
+ if (!res)
+ flow = rte_flow_create(port_id, &attr, pattern, action, error);
+
+ return flow;
+}
+
diff --git a/examples/flow_filtering/main.c b/examples/flow_filtering/main.c
new file mode 100644
index 00000000..7d739b4a
--- /dev/null
+++ b/examples/flow_filtering/main.c
@@ -0,0 +1,244 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright 2017 Mellanox.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Mellanox. nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <sys/queue.h>
+#include <netinet/in.h>
+#include <setjmp.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <errno.h>
+#include <getopt.h>
+#include <signal.h>
+#include <stdbool.h>
+
+#include <rte_eal.h>
+#include <rte_common.h>
+#include <rte_malloc.h>
+#include <rte_ether.h>
+#include <rte_ethdev.h>
+#include <rte_mempool.h>
+#include <rte_mbuf.h>
+#include <rte_net.h>
+#include <rte_flow.h>
+
+static volatile bool force_quit;
+
+static uint8_t port_id;
+static uint16_t nr_queues = 5;
+static uint8_t selected_queue = 1;
+struct rte_mempool *mbuf_pool;
+struct rte_flow *flow;
+
+#define SRC_IP ((0<<24) + (0<<16) + (0<<8) + 0) /* src ip = 0.0.0.0 */
+#define DEST_IP ((192<<24) + (168<<16) + (1<<8) + 1) /* dest ip = 192.168.1.1 */
+#define FULL_MASK 0xffffffff /* full mask */
+#define EMPTY_MASK 0x0 /* empty mask */
+
+#include "flow_blocks.c"
+
+static inline void
+print_ether_addr(const char *what, struct ether_addr *eth_addr)
+{
+ char buf[ETHER_ADDR_FMT_SIZE];
+ ether_format_addr(buf, ETHER_ADDR_FMT_SIZE, eth_addr);
+ printf("%s%s", what, buf);
+}
+
+static void
+main_loop(void)
+{
+ struct rte_mbuf *mbufs[32];
+ struct ether_hdr *eth_hdr;
+ struct rte_flow_error error;
+ uint16_t nb_rx;
+ uint16_t i;
+ uint16_t j;
+
+ while (!force_quit) {
+ for (i = 0; i < nr_queues; i++) {
+ nb_rx = rte_eth_rx_burst(port_id,
+ i, mbufs, 32);
+ if (nb_rx) {
+ for (j = 0; j < nb_rx; j++) {
+ struct rte_mbuf *m = mbufs[j];
+
+ eth_hdr = rte_pktmbuf_mtod(m,
+ struct ether_hdr *);
+ print_ether_addr("src=",
+ &eth_hdr->s_addr);
+ print_ether_addr(" - dst=",
+ &eth_hdr->d_addr);
+ printf(" - queue=0x%x",
+ (unsigned int)i);
+ printf("\n");
+
+ rte_pktmbuf_free(m);
+ }
+ }
+ }
+ }
+
+ /* closing and releasing resources */
+ rte_flow_flush(port_id, &error);
+ rte_eth_dev_stop(port_id);
+ rte_eth_dev_close(port_id);
+}
+
+static void
+assert_link_status(void)
+{
+ struct rte_eth_link link;
+
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get(port_id, &link);
+ if (link.link_status == ETH_LINK_DOWN)
+ rte_exit(EXIT_FAILURE, ":: error: link is still down\n");
+}
+
+static void
+init_port(void)
+{
+ int ret;
+ uint16_t i;
+ struct rte_eth_conf port_conf = {
+ .rxmode = {
+ .split_hdr_size = 0,
+ /**< Header Split disabled */
+ .header_split = 0,
+ /**< IP checksum offload disabled */
+ .hw_ip_checksum = 0,
+ /**< VLAN filtering disabled */
+ .hw_vlan_filter = 0,
+ /**< Jumbo Frame Support disabled */
+ .jumbo_frame = 0,
+ /**< CRC stripped by hardware */
+ .hw_strip_crc = 1,
+ },
+ };
+
+ printf(":: initializing port: %d\n", port_id);
+ ret = rte_eth_dev_configure(port_id,
+ nr_queues, nr_queues, &port_conf);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ ":: cannot configure device: err=%d, port=%u\n",
+ ret, port_id);
+ }
+
+ /* only set Rx queues: something we care only so far */
+ for (i = 0; i < nr_queues; i++) {
+ ret = rte_eth_rx_queue_setup(port_id, i, 512,
+ rte_eth_dev_socket_id(port_id),
+ NULL,
+ mbuf_pool);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ ":: Rx queue setup failed: err=%d, port=%u\n",
+ ret, port_id);
+ }
+ }
+
+ rte_eth_promiscuous_enable(port_id);
+ ret = rte_eth_dev_start(port_id);
+ if (ret < 0) {
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_start:err=%d, port=%u\n",
+ ret, port_id);
+ }
+
+ assert_link_status();
+
+ printf(":: initializing port: %d done\n", port_id);
+}
+
+static void
+signal_handler(int signum)
+{
+ if (signum == SIGINT || signum == SIGTERM) {
+ printf("\n\nSignal %d received, preparing to exit...\n",
+ signum);
+ force_quit = true;
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ int ret;
+ uint8_t nr_ports;
+ struct rte_flow_error error;
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, ":: invalid EAL arguments\n");
+
+ force_quit = false;
+ signal(SIGINT, signal_handler);
+ signal(SIGTERM, signal_handler);
+
+ nr_ports = rte_eth_dev_count();
+ if (nr_ports == 0)
+ rte_exit(EXIT_FAILURE, ":: no Ethernet ports found\n");
+ port_id = 0;
+ if (nr_ports != 1) {
+ printf(":: warn: %d ports detected, but we use only one: port %u\n",
+ nr_ports, port_id);
+ }
+ mbuf_pool = rte_pktmbuf_pool_create("mbuf_pool", 4096, 128, 0,
+ RTE_MBUF_DEFAULT_BUF_SIZE,
+ rte_socket_id());
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot init mbuf pool\n");
+
+ init_port();
+
+ /* create flow for send packet with */
+ flow = generate_ipv4_flow(port_id, selected_queue,
+ SRC_IP, EMPTY_MASK,
+ DEST_IP, FULL_MASK, &error);
+ if (!flow) {
+ printf("Flow can't be created %d message: %s\n",
+ error.type,
+ error.message ? error.message : "(no stated reason)");
+ rte_exit(EXIT_FAILURE, "error in creating flow");
+ }
+
+ main_loop();
+
+ return 0;
+}
diff --git a/examples/helloworld/main.c b/examples/helloworld/main.c
index 8b7a2de0..916b6ad0 100644
--- a/examples/helloworld/main.c
+++ b/examples/helloworld/main.c
@@ -38,7 +38,6 @@
#include <sys/queue.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
diff --git a/examples/ip_fragmentation/main.c b/examples/ip_fragmentation/main.c
index 8c0e1791..5aefe098 100644
--- a/examples/ip_fragmentation/main.c
+++ b/examples/ip_fragmentation/main.c
@@ -48,7 +48,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -58,7 +57,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -154,7 +152,7 @@ struct rx_queue {
struct rte_mempool *indirect_pool;
struct rte_lpm *lpm;
struct rte_lpm6 *lpm6;
- uint8_t portid;
+ uint16_t portid;
};
#define MAX_RX_QUEUE_PER_LCORE 16
@@ -240,7 +238,7 @@ static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
@@ -261,11 +259,12 @@ send_burst(struct lcore_queue_conf *qconf, uint16_t n, uint8_t port)
static inline void
l3fwd_simple_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf,
- uint8_t queueid, uint8_t port_in)
+ uint8_t queueid, uint16_t port_in)
{
struct rx_queue *rxq;
uint32_t i, len, next_hop;
- uint8_t port_out, ipv6;
+ uint8_t ipv6;
+ uint16_t port_out;
int32_t len2;
ipv6 = 0;
@@ -403,7 +402,7 @@ main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct lcore_queue_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
@@ -423,7 +422,7 @@ main_loop(__attribute__((unused)) void *dummy)
portid = qconf->rx_queue_list[i].portid;
RTE_LOG(INFO, IP_FRAG, " -- lcoreid=%u portid=%d\n", lcore_id,
- (int) portid);
+ portid);
}
while (1) {
@@ -600,11 +599,12 @@ print_ethaddr(const char *name, struct ether_addr *eth_addr)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -619,14 +619,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up .Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -708,7 +707,7 @@ parse_ptype(struct rte_mbuf *m)
/* callback function to detect packet type for a queue of a port */
static uint16_t
-cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
@@ -876,7 +875,7 @@ main(int argc, char **argv)
uint16_t queueid = 0;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1035,7 +1034,7 @@ main(int argc, char **argv)
if (init_routing_table() < 0)
rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/ip_pipeline/Makefile b/examples/ip_pipeline/Makefile
index dc7e0ddd..12ce0a1d 100644
--- a/examples/ip_pipeline/Makefile
+++ b/examples/ip_pipeline/Makefile
@@ -43,7 +43,7 @@ APP = ip_pipeline
VPATH += $(SRCDIR)/pipeline
-INC += $(wildcard *.h) $(wildcard pipeline/*.h)
+INC += $(sort $(wildcard *.h)) $(sort $(wildcard pipeline/*.h))
# all source are stored in SRCS-y
SRCS-$(CONFIG_RTE_LIBRTE_PIPELINE) := main.c
diff --git a/examples/ip_pipeline/app.h b/examples/ip_pipeline/app.h
index e41290e7..94e7a6df 100644
--- a/examples/ip_pipeline/app.h
+++ b/examples/ip_pipeline/app.h
@@ -428,10 +428,6 @@ struct app_eal_params {
/* Interrupt mode for VFIO (legacy|msi|msix) */
char *vfio_intr;
- /* Support running on Xen dom0 without hugetlbfs */
- uint32_t xen_dom0_present;
- int xen_dom0;
-
uint32_t parsed;
};
diff --git a/examples/ip_pipeline/config_parse.c b/examples/ip_pipeline/config_parse.c
index 0b761346..3211c6ab 100644
--- a/examples/ip_pipeline/config_parse.c
+++ b/examples/ip_pipeline/config_parse.c
@@ -809,21 +809,6 @@ parse_eal(struct app_params *app,
continue;
}
- /* xen_dom0 */
- if (strcmp(entry->name, "xen_dom0") == 0) {
- int val;
-
- PARSE_ERROR_DUPLICATE((p->xen_dom0_present == 0),
- section_name,
- entry->name);
- p->xen_dom0_present = 1;
-
- val = parser_read_arg_bool(entry->value);
- PARSE_ERROR((val >= 0), section_name, entry->name);
- p->xen_dom0 = val;
- continue;
- }
-
/* unrecognized */
PARSE_ERROR_INVALID(0, section_name, entry->name);
}
@@ -2643,10 +2628,6 @@ save_eal_params(struct app_params *app, FILE *f)
if (p->vfio_intr)
fprintf(f, "%s = %s\n", "vfio_intr", p->vfio_intr);
- if (p->xen_dom0_present)
- fprintf(f, "%s = %s\n", "xen_dom0",
- (p->xen_dom0) ? "yes" : "no");
-
fputc('\n', f);
}
diff --git a/examples/ip_pipeline/init.c b/examples/ip_pipeline/init.c
index 7cde49a4..e56e4048 100644
--- a/examples/ip_pipeline/init.c
+++ b/examples/ip_pipeline/init.c
@@ -49,6 +49,7 @@
#include <rte_ip.h>
#include <rte_eal.h>
#include <rte_malloc.h>
+#include <rte_bus_pci.h>
#include "app.h"
#include "pipeline.h"
@@ -296,11 +297,6 @@ app_init_eal(struct app_params *app)
app->eal_argv[n_args++] = strdup(buffer);
}
- if ((p->xen_dom0_present) && (p->xen_dom0)) {
- snprintf(buffer, sizeof(buffer), "--xen-dom0");
- app->eal_argv[n_args++] = strdup(buffer);
- }
-
snprintf(buffer, sizeof(buffer), "--");
app->eal_argv[n_args++] = strdup(buffer);
@@ -1236,7 +1232,7 @@ app_init_tap(struct app_params *app)
#ifdef RTE_LIBRTE_KNI
static int
-kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
+kni_config_network_interface(uint16_t port_id, uint8_t if_up) {
int ret = 0;
if (port_id >= rte_eth_dev_count())
@@ -1250,7 +1246,7 @@ kni_config_network_interface(uint8_t port_id, uint8_t if_up) {
}
static int
-kni_change_mtu(uint8_t port_id, unsigned new_mtu) {
+kni_change_mtu(uint16_t port_id, unsigned int new_mtu) {
int ret;
if (port_id >= rte_eth_dev_count())
diff --git a/examples/ip_pipeline/pipeline/hash_func.h b/examples/ip_pipeline/pipeline/hash_func.h
index b112369c..42128c1f 100644
--- a/examples/ip_pipeline/pipeline/hash_func.h
+++ b/examples/ip_pipeline/pipeline/hash_func.h
@@ -34,48 +34,56 @@
#define __INCLUDE_HASH_FUNC_H__
static inline uint64_t
-hash_xor_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0;
- xor0 = seed ^ k[0];
+ xor0 = seed ^ (k[0] & m[0]);
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
-hash_xor_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0;
- xor0 = (k[0] ^ seed) ^ k[1];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
-hash_xor_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0;
- xor0 = (k[0] ^ seed) ^ k[1];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
- xor0 ^= k[2];
+ xor0 ^= k[2] & m[2];
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
-hash_xor_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0, xor1;
- xor0 = (k[0] ^ seed) ^ k[1];
- xor1 = k[2] ^ k[3];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
xor0 ^= xor1;
@@ -83,30 +91,34 @@ hash_xor_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_xor_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0, xor1;
- xor0 = (k[0] ^ seed) ^ k[1];
- xor1 = k[2] ^ k[3];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
xor0 ^= xor1;
- xor0 ^= k[4];
+ xor0 ^= k[4] & m[4];
return (xor0 >> 32) ^ xor0;
}
static inline uint64_t
-hash_xor_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0, xor1, xor2;
- xor0 = (k[0] ^ seed) ^ k[1];
- xor1 = k[2] ^ k[3];
- xor2 = k[4] ^ k[5];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
xor0 ^= xor1;
@@ -116,17 +128,19 @@ hash_xor_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_xor_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0, xor1, xor2;
- xor0 = (k[0] ^ seed) ^ k[1];
- xor1 = k[2] ^ k[3];
- xor2 = k[4] ^ k[5];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
xor0 ^= xor1;
- xor2 ^= k[6];
+ xor2 ^= k[6] & m[6];
xor0 ^= xor2;
@@ -134,15 +148,17 @@ hash_xor_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_xor_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_xor_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t xor0, xor1, xor2, xor3;
- xor0 = (k[0] ^ seed) ^ k[1];
- xor1 = k[2] ^ k[3];
- xor2 = k[4] ^ k[5];
- xor3 = k[6] ^ k[7];
+ xor0 = ((k[0] & m[0]) ^ seed) ^ (k[1] & m[1]);
+ xor1 = (k[2] & m[2]) ^ (k[3] & m[3]);
+ xor2 = (k[4] & m[4]) ^ (k[5] & m[5]);
+ xor3 = (k[6] & m[6]) ^ (k[7] & m[7]);
xor0 ^= xor1;
xor2 ^= xor3;
@@ -157,26 +173,30 @@ hash_xor_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
#include <x86intrin.h>
static inline uint64_t
-hash_crc_key8(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t crc0;
- crc0 = _mm_crc32_u64(seed, k[0]);
+ crc0 = _mm_crc32_u64(seed, k[0] & m[0]);
return crc0;
}
static inline uint64_t
-hash_crc_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, crc0, crc1;
- k0 = k[0];
+ k0 = k[0] & m[0];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
crc0 ^= crc1;
@@ -184,16 +204,18 @@ hash_crc_key16(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, crc0, crc1;
- k0 = k[0];
- k2 = k[2];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
crc0 = _mm_crc32_u64(crc0, k2);
@@ -203,18 +225,20 @@ hash_crc_key24(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, crc0, crc1, crc2, crc3;
- k0 = k[0];
- k2 = k[2];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
- crc2 = _mm_crc32_u64(k2, k[3]);
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
crc3 = k2 >> 32;
crc0 = _mm_crc32_u64(crc0, crc1);
@@ -226,19 +250,21 @@ hash_crc_key32(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, crc0, crc1, crc2, crc3;
- k0 = k[0];
- k2 = k[2];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
- crc2 = _mm_crc32_u64(k2, k[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
crc0 = _mm_crc32_u64(crc0, crc1);
crc1 = _mm_crc32_u64(crc2, crc3);
@@ -249,20 +275,22 @@ hash_crc_key40(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, k5, crc0, crc1, crc2, crc3;
- k0 = k[0];
- k2 = k[2];
- k5 = k[5];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
- crc2 = _mm_crc32_u64(k2, k[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
crc1 = _mm_crc32_u64(crc3, k5);
@@ -273,22 +301,24 @@ hash_crc_key48(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
- k0 = k[0];
- k2 = k[2];
- k5 = k[5];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
- crc2 = _mm_crc32_u64(k2, k[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
- crc4 = _mm_crc32_u64(k5, k[6]);
+ crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
crc5 = k5 >> 32;
crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
@@ -300,23 +330,25 @@ hash_crc_key56(void *key, __rte_unused uint32_t key_size, uint64_t seed)
}
static inline uint64_t
-hash_crc_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
+hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
{
uint64_t *k = key;
+ uint64_t *m = mask;
uint64_t k0, k2, k5, crc0, crc1, crc2, crc3, crc4, crc5;
- k0 = k[0];
- k2 = k[2];
- k5 = k[5];
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
crc0 = _mm_crc32_u64(k0, seed);
- crc1 = _mm_crc32_u64(k0 >> 32, k[1]);
+ crc1 = _mm_crc32_u64(k0 >> 32, k[1] & m[1]);
- crc2 = _mm_crc32_u64(k2, k[3]);
- crc3 = _mm_crc32_u64(k2 >> 32, k[4]);
+ crc2 = _mm_crc32_u64(k2, k[3] & m[3]);
+ crc3 = _mm_crc32_u64(k2 >> 32, k[4] & m[4]);
- crc4 = _mm_crc32_u64(k5, k[6]);
- crc5 = _mm_crc32_u64(k5 >> 32, k[7]);
+ crc4 = _mm_crc32_u64(k5, k[6] & m[6]);
+ crc5 = _mm_crc32_u64(k5 >> 32, k[7] & m[7]);
crc0 = _mm_crc32_u64(crc0, (crc1 << 32) ^ crc2);
crc1 = _mm_crc32_u64(crc3, (crc4 << 32) ^ crc5);
@@ -335,6 +367,8 @@ hash_crc_key64(void *key, __rte_unused uint32_t key_size, uint64_t seed)
#define hash_default_key56 hash_crc_key56
#define hash_default_key64 hash_crc_key64
+#elif defined(RTE_ARCH_ARM64)
+#include "hash_func_arm64.h"
#else
#define hash_default_key8 hash_xor_key8
diff --git a/examples/ip_pipeline/pipeline/hash_func_arm64.h b/examples/ip_pipeline/pipeline/hash_func_arm64.h
new file mode 100644
index 00000000..ae6c0f41
--- /dev/null
+++ b/examples/ip_pipeline/pipeline/hash_func_arm64.h
@@ -0,0 +1,261 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Linaro Limited. All rights reserved.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef __HASH_FUNC_ARM64_H__
+#define __HASH_FUNC_ARM64_H__
+
+#define _CRC32CX(crc, val) \
+ __asm__("crc32cx %w[c], %w[c], %x[v]":[c] "+r" (crc):[v] "r" (val))
+
+static inline uint64_t
+hash_crc_key8(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key;
+ uint64_t *m = mask;
+ uint32_t crc0;
+
+ crc0 = seed;
+ _CRC32CX(crc0, k[0] & m[0]);
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key16(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1;
+
+ k0 = k[0] & m[0];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key24(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ _CRC32CX(crc0, k2);
+
+ crc0 ^= crc1;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key32(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+
+ _CRC32CX(crc0, crc1);
+ _CRC32CX(crc2, crc3);
+
+ crc0 ^= crc2;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key40(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ _CRC32CX(crc0, crc1);
+ _CRC32CX(crc2, crc3);
+
+ crc0 ^= crc2;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key48(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, k5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key56(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ crc4 = k5;
+ _CRC32CX(crc4, k[6] & m[6]);
+ crc5 = k5 >> 32;
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+static inline uint64_t
+hash_crc_key64(void *key, void *mask, __rte_unused uint32_t key_size,
+ uint64_t seed)
+{
+ uint64_t *k = key, k0, k2, k5;
+ uint64_t *m = mask;
+ uint32_t crc0, crc1, crc2, crc3, crc4, crc5;
+
+ k0 = k[0] & m[0];
+ k2 = k[2] & m[2];
+ k5 = k[5] & m[5];
+
+ crc0 = k0;
+ _CRC32CX(crc0, seed);
+ crc1 = k0 >> 32;
+ _CRC32CX(crc1, k[1] & m[1]);
+
+ crc2 = k2;
+ _CRC32CX(crc2, k[3] & m[3]);
+ crc3 = k2 >> 32;
+ _CRC32CX(crc3, k[4] & m[4]);
+
+ crc4 = k5;
+ _CRC32CX(crc4, k[6] & m[6]);
+ crc5 = k5 >> 32;
+ _CRC32CX(crc5, k[7] & m[7]);
+
+ _CRC32CX(crc0, ((uint64_t)crc1 << 32) ^ crc2);
+ _CRC32CX(crc3, ((uint64_t)crc4 << 32) ^ crc5);
+
+ crc0 ^= crc3;
+
+ return crc0;
+}
+
+#define hash_default_key8 hash_crc_key8
+#define hash_default_key16 hash_crc_key16
+#define hash_default_key24 hash_crc_key24
+#define hash_default_key32 hash_crc_key32
+#define hash_default_key40 hash_crc_key40
+#define hash_default_key48 hash_crc_key48
+#define hash_default_key56 hash_crc_key56
+#define hash_default_key64 hash_crc_key64
+
+#endif
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
index 9ef50cc9..70b19381 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification.c
@@ -88,8 +88,10 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
uint32_t *signature)
{
uint8_t buffer[PIPELINE_FC_FLOW_KEY_MAX_SIZE];
+ uint8_t m[PIPELINE_FC_FLOW_KEY_MAX_SIZE]; /* key mask */
void *key_buffer = (key_out) ? key_out : buffer;
+ memset(m, 0xFF, sizeof(m));
switch (key_in->type) {
case FLOW_KEY_QINQ:
{
@@ -101,7 +103,7 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
qinq->cvlan = rte_cpu_to_be_16(key_in->key.qinq.cvlan);
if (signature)
- *signature = (uint32_t) hash_default_key8(qinq, 8, 0);
+ *signature = (uint32_t) hash_default_key8(qinq, m, 8, 0);
return 0;
}
@@ -118,7 +120,7 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
ipv4->port_dst = rte_cpu_to_be_16(key_in->key.ipv4_5tuple.port_dst);
if (signature)
- *signature = (uint32_t) hash_default_key16(ipv4, 16, 0);
+ *signature = (uint32_t) hash_default_key16(ipv4, m, 16, 0);
return 0;
}
@@ -136,7 +138,7 @@ app_pipeline_fc_key_convert(struct pipeline_fc_key *key_in,
ipv6->port_dst = rte_cpu_to_be_16(key_in->key.ipv6_5tuple.port_dst);
if (signature)
- *signature = (uint32_t) hash_default_key64(ipv6, 64, 0);
+ *signature = (uint32_t) hash_default_key64(ipv6, m, 64, 0);
return 0;
}
@@ -832,12 +834,12 @@ app_pipeline_fc_add_bulk(struct app_params *app,
}
/* Free resources */
- app_msg_free(app, rsp);
for (i = rsp->n_keys; i < n_keys; i++)
if (new_flow[i])
rte_free(flow[i]);
+ app_msg_free(app, rsp);
rte_free(flow_rsp);
rte_free(flow_req);
rte_free(new_flow);
diff --git a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
index 026f00cd..929d81cb 100644
--- a/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_flow_classification_be.c
@@ -492,40 +492,16 @@ static void *pipeline_fc_init(struct pipeline_params *params,
/* Tables */
p->n_tables = 1;
{
- struct rte_table_hash_key8_ext_params
- table_hash_key8_params = {
- .n_entries = p_fc->n_flows,
- .n_entries_ext = p_fc->n_flows,
- .signature_offset = p_fc->hash_offset,
+ struct rte_table_hash_params table_hash_params = {
+ .name = p->name,
+ .key_size = p_fc->key_size,
.key_offset = p_fc->key_offset,
- .f_hash = hash_func[(p_fc->key_size / 8) - 1],
.key_mask = (p_fc->key_mask_present) ?
p_fc->key_mask : NULL,
- .seed = 0,
- };
-
- struct rte_table_hash_key16_ext_params
- table_hash_key16_params = {
- .n_entries = p_fc->n_flows,
- .n_entries_ext = p_fc->n_flows,
- .signature_offset = p_fc->hash_offset,
- .key_offset = p_fc->key_offset,
- .f_hash = hash_func[(p_fc->key_size / 8) - 1],
- .key_mask = (p_fc->key_mask_present) ?
- p_fc->key_mask : NULL,
- .seed = 0,
- };
-
- struct rte_table_hash_ext_params
- table_hash_params = {
- .key_size = p_fc->key_size,
.n_keys = p_fc->n_flows,
- .n_buckets = p_fc->n_flows / 4,
- .n_buckets_ext = p_fc->n_flows / 4,
+ .n_buckets = rte_align32pow2(p_fc->n_flows / 4),
.f_hash = hash_func[(p_fc->key_size / 8) - 1],
.seed = 0,
- .signature_offset = p_fc->hash_offset,
- .key_offset = p_fc->key_offset,
};
struct rte_pipeline_table_params table_params = {
@@ -542,32 +518,19 @@ static void *pipeline_fc_init(struct pipeline_params *params,
switch (p_fc->key_size) {
case 8:
- if (p_fc->hash_offset != 0) {
- table_params.ops =
- &rte_table_hash_key8_ext_ops;
- } else {
- table_params.ops =
- &rte_table_hash_key8_ext_dosig_ops;
- }
- table_params.arg_create = &table_hash_key8_params;
+ table_params.ops = &rte_table_hash_key8_ext_ops;
break;
case 16:
- if (p_fc->hash_offset != 0) {
- table_params.ops =
- &rte_table_hash_key16_ext_ops;
- } else {
- table_params.ops =
- &rte_table_hash_key16_ext_dosig_ops;
- }
- table_params.arg_create = &table_hash_key16_params;
+ table_params.ops = &rte_table_hash_key16_ext_ops;
break;
default:
table_params.ops = &rte_table_hash_ext_ops;
- table_params.arg_create = &table_hash_params;
}
+ table_params.arg_create = &table_hash_params;
+
status = rte_pipeline_table_create(p->p,
&table_params,
&p->table_id[0]);
diff --git a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
index 8cb2f0c7..93eedbe8 100644
--- a/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_passthrough_be.c
@@ -102,7 +102,7 @@ pkt_work_dma(
/* Read (dma_dst), compute (hash), write (hash) */
if (hash_enabled) {
- uint32_t hash = p->f_hash(dma_dst, dma_size, 0);
+ uint32_t hash = p->f_hash(dma_src, dma_mask, dma_size, 0);
*dma_hash = hash;
if (lb_hash) {
@@ -173,10 +173,10 @@ pkt4_work_dma(
/* Read (dma_dst), compute (hash), write (hash) */
if (hash_enabled) {
- uint32_t hash0 = p->f_hash(dma_dst0, dma_size, 0);
- uint32_t hash1 = p->f_hash(dma_dst1, dma_size, 0);
- uint32_t hash2 = p->f_hash(dma_dst2, dma_size, 0);
- uint32_t hash3 = p->f_hash(dma_dst3, dma_size, 0);
+ uint32_t hash0 = p->f_hash(dma_src0, dma_mask, dma_size, 0);
+ uint32_t hash1 = p->f_hash(dma_src1, dma_mask, dma_size, 0);
+ uint32_t hash2 = p->f_hash(dma_src2, dma_mask, dma_size, 0);
+ uint32_t hash3 = p->f_hash(dma_src3, dma_mask, dma_size, 0);
*dma_hash0 = hash0;
*dma_hash1 = hash1;
diff --git a/examples/ip_pipeline/pipeline/pipeline_routing_be.c b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
index 78317165..0414f248 100644
--- a/examples/ip_pipeline/pipeline/pipeline_routing_be.c
+++ b/examples/ip_pipeline/pipeline/pipeline_routing_be.c
@@ -1349,17 +1349,20 @@ pipeline_routing_init(struct pipeline_params *params,
/* ARP table configuration */
if (p_rt->params.n_arp_entries) {
- struct rte_table_hash_key8_ext_params table_arp_params = {
- .n_entries = p_rt->params.n_arp_entries,
- .n_entries_ext = p_rt->params.n_arp_entries,
+ struct rte_table_hash_params table_arp_params = {
+ .name = p->name,
+ .key_size = 8,
+ .key_offset = p_rt->params.arp_key_offset,
+ .key_mask = NULL,
+ .n_keys = p_rt->params.n_arp_entries,
+ .n_buckets =
+ rte_align32pow2(p_rt->params.n_arp_entries / 4),
.f_hash = hash_default_key8,
.seed = 0,
- .signature_offset = 0, /* Unused */
- .key_offset = p_rt->params.arp_key_offset,
};
struct rte_pipeline_table_params table_params = {
- .ops = &rte_table_hash_key8_ext_dosig_ops,
+ .ops = &rte_table_hash_key8_ext_ops,
.arg_create = &table_arp_params,
.f_action_hit = get_arp_table_ah_hit(p_rt),
.f_action_miss = NULL,
diff --git a/examples/ip_reassembly/main.c b/examples/ip_reassembly/main.c
index e62636cb..756f90ef 100644
--- a/examples/ip_reassembly/main.c
+++ b/examples/ip_reassembly/main.c
@@ -49,7 +49,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -59,7 +58,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -84,10 +82,10 @@
#define MAX_JUMBO_PKT_LEN 9600
#define BUF_SIZE RTE_MBUF_DEFAULT_DATAROOM
-#define MBUF_SIZE \
- (BUF_SIZE + sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM)
+#define MBUF_DATA_SIZE RTE_MBUF_DEFAULT_BUF_SIZE
#define NB_MBUF 8192
+#define MEMPOOL_CACHE_SIZE 256
/* allow max jumbo frame 9.5 KB */
#define JUMBO_FRAME_MAX_SIZE 0x2600
@@ -166,7 +164,7 @@ struct rx_queue {
struct rte_mempool *pool;
struct rte_lpm *lpm;
struct rte_lpm6 *lpm6;
- uint8_t portid;
+ uint16_t portid;
};
struct tx_lcore_stat {
@@ -277,7 +275,7 @@ static struct rte_lpm6 *socket_lpm6[RTE_MAX_NUMA_NODES];
* send burst of packets on an output interface.
*/
static inline uint32_t
-send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint8_t port)
+send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint16_t port)
{
uint32_t fill, len, k, n;
struct mbuf_table *txmb;
@@ -307,7 +305,7 @@ send_burst(struct lcore_queue_conf *qconf, uint32_t thresh, uint8_t port)
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t fill, lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -337,7 +335,7 @@ send_single_packet(struct rte_mbuf *m, uint8_t port)
}
static inline void
-reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
+reassemble(struct rte_mbuf *m, uint16_t portid, uint32_t queue,
struct lcore_queue_conf *qconf, uint64_t tms)
{
struct ether_hdr *eth_hdr;
@@ -346,7 +344,7 @@ reassemble(struct rte_mbuf *m, uint8_t portid, uint32_t queue,
struct rx_queue *rxq;
void *d_addr_bytes;
uint32_t next_hop;
- uint8_t dst_port;
+ uint16_t dst_port;
rxq = &qconf->rx_queue_list[queue];
@@ -454,7 +452,7 @@ main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t diff_tsc, cur_tsc, prev_tsc;
int i, j, nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct lcore_queue_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
@@ -473,7 +471,7 @@ main_loop(__attribute__((unused)) void *dummy)
for (i = 0; i < qconf->n_rx_queue; i++) {
portid = qconf->rx_queue_list[i].portid;
- RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%hhu\n", lcore_id,
+ RTE_LOG(INFO, IP_RSMBL, " -- lcoreid=%u portid=%u\n", lcore_id,
portid);
}
@@ -732,11 +730,12 @@ print_ethaddr(const char *name, const struct ether_addr *eth_addr)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -751,14 +750,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -909,11 +907,11 @@ setup_queue_tbl(struct rx_queue *rxq, uint32_t lcore, uint32_t queue)
snprintf(buf, sizeof(buf), "mbuf_pool_%u_%u", lcore, queue);
- if ((rxq->pool = rte_mempool_create(buf, nb_mbuf, MBUF_SIZE, 0,
- sizeof(struct rte_pktmbuf_pool_private),
- rte_pktmbuf_pool_init, NULL, rte_pktmbuf_init, NULL,
- socket, MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET)) == NULL) {
- RTE_LOG(ERR, IP_RSMBL, "mempool_create(%s) failed", buf);
+ rxq->pool = rte_pktmbuf_pool_create(buf, nb_mbuf, MEMPOOL_CACHE_SIZE, 0,
+ MBUF_DATA_SIZE, socket);
+ if (rxq->pool == NULL) {
+ RTE_LOG(ERR, IP_RSMBL,
+ "rte_pktmbuf_pool_create(%s) failed", buf);
return -1;
}
@@ -987,7 +985,7 @@ queue_dump_stat(void)
qconf = &lcore_queue_conf[lcore];
for (i = 0; i < qconf->n_rx_queue; i++) {
- fprintf(stdout, " -- lcoreid=%u portid=%hhu "
+ fprintf(stdout, " -- lcoreid=%u portid=%u "
"frag tbl stat:\n",
lcore, qconf->rx_queue_list[i].portid);
rte_ip_frag_table_statistics_dump(stdout,
@@ -1024,7 +1022,7 @@ main(int argc, char **argv)
uint16_t queueid;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1177,7 +1175,7 @@ main(int argc, char **argv)
if (init_routing_table() < 0)
rte_exit(EXIT_FAILURE, "Cannot init routing table\n");
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
signal(SIGUSR1, signal_handler);
signal(SIGTERM, signal_handler);
diff --git a/examples/ipsec-secgw/Makefile b/examples/ipsec-secgw/Makefile
index 17e91551..9fd33cb7 100644
--- a/examples/ipsec-secgw/Makefile
+++ b/examples/ipsec-secgw/Makefile
@@ -38,6 +38,12 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
+ifneq ($(MAKECMDGOALS),clean)
+ifneq ($(CONFIG_RTE_LIBRTE_SECURITY),y)
+$(error "RTE_LIBRTE_SECURITY is required to build ipsec-secgw")
+endif
+endif
+
APP = ipsec-secgw
CFLAGS += -O3 -gdwarf-2
diff --git a/examples/ipsec-secgw/esp.c b/examples/ipsec-secgw/esp.c
index 70bb81f7..c3efe52b 100644
--- a/examples/ipsec-secgw/esp.c
+++ b/examples/ipsec-secgw/esp.c
@@ -58,8 +58,11 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct rte_crypto_sym_op *sym_cop;
int32_t payload_len, ip_hdr_len;
- RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO)
+ return 0;
+
+ RTE_ASSERT(m != NULL);
RTE_ASSERT(cop != NULL);
ip4 = rte_pktmbuf_mtod(m, struct ip *);
@@ -103,12 +106,12 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
aad = get_aad(m);
memcpy(aad, iv - sizeof(struct esp_hdr), 8);
sym_cop->aead.aad.data = aad;
- sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
sym_cop->cipher.data.offset = ip_hdr_len + sizeof(struct esp_hdr) +
@@ -154,7 +157,7 @@ esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, void*,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
}
@@ -175,29 +178,44 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
RTE_ASSERT(sa != NULL);
RTE_ASSERT(cop != NULL);
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ if (m->ol_flags & PKT_RX_SEC_OFFLOAD) {
+ if (m->ol_flags & PKT_RX_SEC_OFFLOAD_FAILED)
+ cop->status = RTE_CRYPTO_OP_STATUS_ERROR;
+ else
+ cop->status = RTE_CRYPTO_OP_STATUS_SUCCESS;
+ } else
+ cop->status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+ }
+
if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
RTE_LOG(ERR, IPSEC_ESP, "failed crypto op\n");
return -1;
}
- nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
- rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
- pad_len = nexthdr - 1;
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
+ sa->ol_flags & RTE_SECURITY_RX_HW_TRAILER_OFFLOAD) {
+ nexthdr = &m->inner_esp_next_proto;
+ } else {
+ nexthdr = rte_pktmbuf_mtod_offset(m, uint8_t*,
+ rte_pktmbuf_pkt_len(m) - sa->digest_len - 1);
+ pad_len = nexthdr - 1;
+
+ padding = pad_len - *pad_len;
+ for (i = 0; i < *pad_len; i++) {
+ if (padding[i] != i + 1) {
+ RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
+ return -EINVAL;
+ }
+ }
- padding = pad_len - *pad_len;
- for (i = 0; i < *pad_len; i++) {
- if (padding[i] != i + 1) {
- RTE_LOG(ERR, IPSEC_ESP, "invalid padding\n");
+ if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "failed to remove pad_len + digest\n");
return -EINVAL;
}
}
- if (rte_pktmbuf_trim(m, *pad_len + 2 + sa->digest_len)) {
- RTE_LOG(ERR, IPSEC_ESP,
- "failed to remove pad_len + digest\n");
- return -EINVAL;
- }
-
if (unlikely(sa->flags == TRANSPORT)) {
ip = rte_pktmbuf_mtod(m, struct ip *);
ip4 = (struct ip *)rte_pktmbuf_adj(m,
@@ -211,7 +229,8 @@ esp_inbound_post(struct rte_mbuf *m, struct ipsec_sa *sa,
/* XXX No option headers supported */
memmove(ip6, ip, sizeof(struct ip6_hdr));
ip6->ip6_nxt = *nexthdr;
- ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
+ sizeof(struct ip6_hdr));
}
} else
ipip_inbound(m, sizeof(struct esp_hdr) + sa->iv_len);
@@ -226,14 +245,13 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct ip *ip4;
struct ip6_hdr *ip6;
struct esp_hdr *esp = NULL;
- uint8_t *padding, *new_ip, nlp;
+ uint8_t *padding = NULL, *new_ip, nlp;
struct rte_crypto_sym_op *sym_cop;
int32_t i;
uint16_t pad_payload_len, pad_len, ip_hdr_len;
RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
- RTE_ASSERT(cop != NULL);
ip_hdr_len = 0;
@@ -283,12 +301,19 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
return -EINVAL;
}
- padding = (uint8_t *)rte_pktmbuf_append(m, pad_len + sa->digest_len);
- if (unlikely(padding == NULL)) {
- RTE_LOG(ERR, IPSEC_ESP, "not enough mbuf trailing space\n");
- return -ENOSPC;
+ /* Add trailer padding if it is not constructed by HW */
+ if (sa->type != RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO ||
+ (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO &&
+ !(sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD))) {
+ padding = (uint8_t *)rte_pktmbuf_append(m, pad_len +
+ sa->digest_len);
+ if (unlikely(padding == NULL)) {
+ RTE_LOG(ERR, IPSEC_ESP,
+ "not enough mbuf trailing space\n");
+ return -ENOSPC;
+ }
+ rte_prefetch0(padding);
}
- rte_prefetch0(padding);
switch (sa->flags) {
case IP4_TUNNEL:
@@ -306,14 +331,15 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sizeof(struct esp_hdr) + sa->iv_len);
memmove(new_ip, ip4, ip_hdr_len);
esp = (struct esp_hdr *)(new_ip + ip_hdr_len);
+ ip4 = (struct ip *)new_ip;
if (likely(ip4->ip_v == IPVERSION)) {
- ip4 = (struct ip *)new_ip;
ip4->ip_p = IPPROTO_ESP;
ip4->ip_len = htons(rte_pktmbuf_data_len(m));
} else {
ip6 = (struct ip6_hdr *)new_ip;
ip6->ip6_nxt = IPPROTO_ESP;
- ip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ ip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
+ sizeof(struct ip6_hdr));
}
}
@@ -321,15 +347,46 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
esp->spi = rte_cpu_to_be_32(sa->spi);
esp->seq = rte_cpu_to_be_32((uint32_t)sa->seq);
+ /* set iv */
uint64_t *iv = (uint64_t *)(esp + 1);
+ if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
+ *iv = rte_cpu_to_be_64(sa->seq);
+ } else {
+ switch (sa->cipher_algo) {
+ case RTE_CRYPTO_CIPHER_NULL:
+ case RTE_CRYPTO_CIPHER_AES_CBC:
+ memset(iv, 0, sa->iv_len);
+ break;
+ case RTE_CRYPTO_CIPHER_AES_CTR:
+ *iv = rte_cpu_to_be_64(sa->seq);
+ break;
+ default:
+ RTE_LOG(ERR, IPSEC_ESP,
+ "unsupported cipher algorithm %u\n",
+ sa->cipher_algo);
+ return -EINVAL;
+ }
+ }
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ if (sa->ol_flags & RTE_SECURITY_TX_HW_TRAILER_OFFLOAD) {
+ /* Set the inner esp next protocol for HW trailer */
+ m->inner_esp_next_proto = nlp;
+ m->packet_type |= RTE_PTYPE_TUNNEL_ESP;
+ } else {
+ padding[pad_len - 2] = pad_len - 2;
+ padding[pad_len - 1] = nlp;
+ }
+ goto done;
+ }
+
+ RTE_ASSERT(cop != NULL);
sym_cop = get_sym_cop(cop);
sym_cop->m_src = m;
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
uint8_t *aad;
- *iv = sa->seq;
sym_cop->aead.data.offset = ip_hdr_len +
sizeof(struct esp_hdr) + sa->iv_len;
sym_cop->aead.data.length = pad_payload_len;
@@ -342,30 +399,28 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct cnt_blk *icb = get_cnt_blk(m);
icb->salt = sa->salt;
- icb->iv = sa->seq;
+ icb->iv = rte_cpu_to_be_64(sa->seq);
icb->cnt = rte_cpu_to_be_32(1);
aad = get_aad(m);
memcpy(aad, esp, 8);
sym_cop->aead.aad.data = aad;
- sym_cop->aead.aad.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.aad.phys_addr = rte_pktmbuf_iova_offset(m,
aad - rte_pktmbuf_mtod(m, uint8_t *));
sym_cop->aead.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->aead.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
} else {
switch (sa->cipher_algo) {
case RTE_CRYPTO_CIPHER_NULL:
case RTE_CRYPTO_CIPHER_AES_CBC:
- memset(iv, 0, sa->iv_len);
sym_cop->cipher.data.offset = ip_hdr_len +
sizeof(struct esp_hdr);
sym_cop->cipher.data.length = pad_payload_len + sa->iv_len;
break;
case RTE_CRYPTO_CIPHER_AES_CTR:
- *iv = sa->seq;
sym_cop->cipher.data.offset = ip_hdr_len +
sizeof(struct esp_hdr) + sa->iv_len;
sym_cop->cipher.data.length = pad_payload_len;
@@ -384,7 +439,7 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
struct cnt_blk *icb = get_cnt_blk(m);
icb->salt = sa->salt;
- icb->iv = sa->seq;
+ icb->iv = rte_cpu_to_be_64(sa->seq);
icb->cnt = rte_cpu_to_be_32(1);
switch (sa->auth_algo) {
@@ -403,25 +458,30 @@ esp_outbound(struct rte_mbuf *m, struct ipsec_sa *sa,
sym_cop->auth.digest.data = rte_pktmbuf_mtod_offset(m, uint8_t *,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
- sym_cop->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ sym_cop->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - sa->digest_len);
}
+done:
return 0;
}
int
-esp_outbound_post(struct rte_mbuf *m __rte_unused,
- struct ipsec_sa *sa __rte_unused,
- struct rte_crypto_op *cop)
+esp_outbound_post(struct rte_mbuf *m,
+ struct ipsec_sa *sa,
+ struct rte_crypto_op *cop)
{
RTE_ASSERT(m != NULL);
RTE_ASSERT(sa != NULL);
- RTE_ASSERT(cop != NULL);
- if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
- RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
- return -1;
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ m->ol_flags |= PKT_TX_SEC_OFFLOAD;
+ } else {
+ RTE_ASSERT(cop != NULL);
+ if (cop->status != RTE_CRYPTO_OP_STATUS_SUCCESS) {
+ RTE_LOG(ERR, IPSEC_ESP, "Failed crypto op\n");
+ return -1;
+ }
}
return 0;
diff --git a/examples/ipsec-secgw/esp.h b/examples/ipsec-secgw/esp.h
index fa5cc8af..23601e37 100644
--- a/examples/ipsec-secgw/esp.h
+++ b/examples/ipsec-secgw/esp.h
@@ -35,16 +35,6 @@
struct mbuf;
-/* RFC4303 */
-struct esp_hdr {
- uint32_t spi;
- uint32_t seq;
- /* Payload */
- /* Padding */
- /* Pad Length */
- /* Next Header */
- /* Integrity Check Value - ICV */
-};
int
esp_inbound(struct rte_mbuf *m, struct ipsec_sa *sa,
diff --git a/examples/ipsec-secgw/ipip.h b/examples/ipsec-secgw/ipip.h
index ff1dccdb..93393d52 100644
--- a/examples/ipsec-secgw/ipip.h
+++ b/examples/ipsec-secgw/ipip.h
@@ -72,7 +72,8 @@ ipip_outbound(struct rte_mbuf *m, uint32_t offset, uint32_t is_ipv6,
/* Per RFC4301 5.1.2.1 */
outip6->ip6_flow = htonl(IP6_VERSION << 28 | ds_ecn << 20);
- outip6->ip6_plen = htons(rte_pktmbuf_data_len(m));
+ outip6->ip6_plen = htons(rte_pktmbuf_data_len(m) -
+ sizeof(struct ip6_hdr));
outip6->ip6_nxt = IPPROTO_ESP;
outip6->ip6_hops = IPDEFTTL;
diff --git a/examples/ipsec-secgw/ipsec-secgw.c b/examples/ipsec-secgw/ipsec-secgw.c
index 99dc270c..c98454a9 100644
--- a/examples/ipsec-secgw/ipsec-secgw.c
+++ b/examples/ipsec-secgw/ipsec-secgw.c
@@ -57,7 +57,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -161,14 +160,15 @@ static int32_t numa_on = 1; /**< NUMA is enabled by default. */
static uint32_t nb_lcores;
static uint32_t single_sa;
static uint32_t single_sa_idx;
+static uint32_t frame_size;
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -204,11 +204,9 @@ static struct rte_eth_conf port_conf = {
.mq_mode = ETH_MQ_RX_RSS,
.max_rx_pkt_len = ETHER_MAX_LEN,
.split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 1, /**< IP checksum offload enabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
+ .offloads = DEV_RX_OFFLOAD_CHECKSUM |
+ DEV_RX_OFFLOAD_CRC_STRIP,
+ .ignore_offload_bitfield = 1,
},
.rx_adv_conf = {
.rss_conf = {
@@ -290,7 +288,7 @@ prepare_traffic(struct rte_mbuf **pkts, struct ipsec_traffic *t,
}
static inline void
-prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
+prepare_tx_pkt(struct rte_mbuf *pkt, uint16_t port)
{
struct ip *ip;
struct ether_hdr *ethhdr;
@@ -320,7 +318,7 @@ prepare_tx_pkt(struct rte_mbuf *pkt, uint8_t port)
}
static inline void
-prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
+prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint16_t port)
{
int32_t i;
const int32_t prefetch_offset = 2;
@@ -336,7 +334,7 @@ prepare_tx_burst(struct rte_mbuf *pkts[], uint16_t nb_pkts, uint8_t port)
/* Send burst of packets on an output interface */
static inline int32_t
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int32_t ret;
@@ -359,7 +357,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
/* Enqueue a single packet, and send burst if queue is filled */
static inline int32_t
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
uint16_t len;
@@ -646,7 +644,7 @@ route6_pkts(struct rt_ctx *rt_ctx, struct rte_mbuf *pkts[], uint8_t nb_pkts)
static inline void
process_pkts(struct lcore_conf *qconf, struct rte_mbuf **pkts,
- uint8_t nb_pkts, uint8_t portid)
+ uint8_t nb_pkts, uint16_t portid)
{
struct ipsec_traffic traffic;
@@ -691,7 +689,8 @@ main_loop(__attribute__((unused)) void *dummy)
uint32_t lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int32_t i, nb_rx;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
struct lcore_conf *qconf;
int32_t socket_id;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -728,7 +727,7 @@ main_loop(__attribute__((unused)) void *dummy)
portid = rxql[i].port_id;
queueid = rxql[i].queue_id;
RTE_LOG(INFO, IPSEC,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
@@ -759,7 +758,8 @@ main_loop(__attribute__((unused)) void *dummy)
static int32_t
check_params(void)
{
- uint8_t lcore, portid, nb_ports;
+ uint8_t lcore;
+ uint16_t portid, nb_ports;
uint16_t i;
int32_t socket_id;
@@ -797,7 +797,7 @@ check_params(void)
}
static uint8_t
-get_port_nb_rx_queues(const uint8_t port)
+get_port_nb_rx_queues(const uint16_t port)
{
int32_t queue = -1;
uint16_t i;
@@ -843,6 +843,7 @@ print_usage(const char *prgname)
" -p PORTMASK: hexadecimal bitmask of ports to configure\n"
" -P : enable promiscuous mode\n"
" -u PORTMASK: hexadecimal bitmask of unprotected ports\n"
+ " -j FRAMESIZE: jumbo frame maximum size\n"
" --"OPTION_CONFIG": (port,queue,lcore): "
"rx queues configuration\n"
" --single-sa SAIDX: use single SA index for outbound, "
@@ -981,7 +982,7 @@ parse_args(int32_t argc, char **argv)
argvopt = argv;
- while ((opt = getopt_long(argc, argvopt, "p:Pu:f:",
+ while ((opt = getopt_long(argc, argvopt, "p:Pu:f:j:",
lgopts, &option_index)) != EOF) {
switch (opt) {
@@ -1020,6 +1021,23 @@ parse_args(int32_t argc, char **argv)
}
f_present = 1;
break;
+ case 'j':
+ {
+ int32_t size = parse_decimal(optarg);
+ if (size <= 1518) {
+ printf("Invalid jumbo frame size\n");
+ if (size < 0) {
+ print_usage(prgname);
+ return -1;
+ }
+ printf("Using default value 9000\n");
+ frame_size = 9000;
+ } else {
+ frame_size = size;
+ }
+ }
+ printf("Enabled jumbo frames size %u\n", frame_size);
+ break;
case 0:
if (parse_args_long_options(lgopts, option_index)) {
print_usage(prgname);
@@ -1055,11 +1073,12 @@ print_ethaddr(const char *name, const struct ether_addr *eth_addr)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -1074,14 +1093,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (uint32_t)link.link_speed,
+ printf(
+ "Port%d Link Up - speed %u Mbps -%s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -1113,7 +1131,8 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
uint16_t qp, struct lcore_params *params,
struct ipsec_ctx *ipsec_ctx,
const struct rte_cryptodev_capabilities *cipher,
- const struct rte_cryptodev_capabilities *auth)
+ const struct rte_cryptodev_capabilities *auth,
+ const struct rte_cryptodev_capabilities *aead)
{
int32_t ret = 0;
unsigned long i;
@@ -1124,6 +1143,8 @@ add_mapping(struct rte_hash *map, const char *str, uint16_t cdev_id,
key.cipher_algo = cipher->sym.cipher.algo;
if (auth)
key.auth_algo = auth->sym.auth.algo;
+ if (aead)
+ key.aead_algo = aead->sym.aead.algo;
ret = rte_hash_lookup(map, &key);
if (ret != -ENOENT)
@@ -1192,6 +1213,12 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
if (i->op != RTE_CRYPTO_OP_TYPE_SYMMETRIC)
continue;
+ if (i->sym.xform_type == RTE_CRYPTO_SYM_XFORM_AEAD) {
+ ret |= add_mapping(map, str, cdev_id, qp, params,
+ ipsec_ctx, NULL, NULL, i);
+ continue;
+ }
+
if (i->sym.xform_type != RTE_CRYPTO_SYM_XFORM_CIPHER)
continue;
@@ -1204,7 +1231,7 @@ add_cdev_mapping(struct rte_cryptodev_info *dev_info, uint16_t cdev_id,
continue;
ret |= add_mapping(map, str, cdev_id, qp, params,
- ipsec_ctx, i, j);
+ ipsec_ctx, i, j, NULL);
}
}
@@ -1322,7 +1349,7 @@ cryptodevs_init(void)
}
static void
-port_init(uint8_t portid)
+port_init(uint16_t portid)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
@@ -1357,6 +1384,16 @@ port_init(uint8_t portid)
printf("Creating queues: nb_rx_queue=%d nb_tx_queue=%u...\n",
nb_rx_queue, nb_tx_queue);
+ if (frame_size) {
+ port_conf.rxmode.max_rx_pkt_len = frame_size;
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME;
+ }
+
+ if (dev_info.rx_offload_capa & DEV_RX_OFFLOAD_SECURITY)
+ port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_SECURITY;
+ if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_SECURITY)
+ port_conf.txmode.offloads |= DEV_TX_OFFLOAD_SECURITY;
+
ret = rte_eth_dev_configure(portid, nb_rx_queue, nb_tx_queue,
&port_conf);
if (ret < 0)
@@ -1421,11 +1458,14 @@ static void
pool_init(struct socket_ctx *ctx, int32_t socket_id, uint32_t nb_mbuf)
{
char s[64];
+ uint32_t buff_size = frame_size ? (frame_size + RTE_PKTMBUF_HEADROOM) :
+ RTE_MBUF_DEFAULT_BUF_SIZE;
+
snprintf(s, sizeof(s), "mbuf_pool_%d", socket_id);
ctx->mbuf_pool = rte_pktmbuf_pool_create(s, nb_mbuf,
MEMPOOL_CACHE_SIZE, ipsec_metadata_size(),
- RTE_MBUF_DEFAULT_BUF_SIZE,
+ buff_size,
socket_id);
if (ctx->mbuf_pool == NULL)
rte_exit(EXIT_FAILURE, "Cannot init mbuf pool on socket %d\n",
@@ -1438,8 +1478,9 @@ int32_t
main(int32_t argc, char **argv)
{
int32_t ret;
- uint32_t lcore_id, nb_ports;
- uint8_t portid, socket_id;
+ uint32_t lcore_id;
+ uint8_t socket_id;
+ uint16_t portid, nb_ports;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1522,7 +1563,7 @@ main(int32_t argc, char **argv)
rte_eth_promiscuous_enable(portid);
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/ipsec-secgw/ipsec.c b/examples/ipsec-secgw/ipsec.c
index 0afb9d67..c24284d6 100644
--- a/examples/ipsec-secgw/ipsec.c
+++ b/examples/ipsec-secgw/ipsec.c
@@ -37,7 +37,9 @@
#include <rte_branch_prediction.h>
#include <rte_log.h>
#include <rte_crypto.h>
+#include <rte_security.h>
#include <rte_cryptodev.h>
+#include <rte_ethdev.h>
#include <rte_mbuf.h>
#include <rte_hash.h>
@@ -49,21 +51,28 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
{
struct rte_cryptodev_info cdev_info;
unsigned long cdev_id_qp = 0;
- int32_t ret;
+ int32_t ret = 0;
struct cdev_key key = { 0 };
key.lcore_id = (uint8_t)rte_lcore_id();
key.cipher_algo = (uint8_t)sa->cipher_algo;
key.auth_algo = (uint8_t)sa->auth_algo;
+ key.aead_algo = (uint8_t)sa->aead_algo;
- ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
- (void **)&cdev_id_qp);
- if (ret < 0) {
- RTE_LOG(ERR, IPSEC, "No cryptodev: core %u, cipher_algo %u, "
- "auth_algo %u\n", key.lcore_id, key.cipher_algo,
- key.auth_algo);
- return -1;
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
+ ret = rte_hash_lookup_data(ipsec_ctx->cdev_map, &key,
+ (void **)&cdev_id_qp);
+ if (ret < 0) {
+ RTE_LOG(ERR, IPSEC,
+ "No cryptodev: core %u, cipher_algo %u, "
+ "auth_algo %u, aead_algo %u\n",
+ key.lcore_id,
+ key.cipher_algo,
+ key.auth_algo,
+ key.aead_algo);
+ return -1;
+ }
}
RTE_LOG_DP(DEBUG, IPSEC, "Create session for SA spi %u on cryptodev "
@@ -71,23 +80,153 @@ create_session(struct ipsec_ctx *ipsec_ctx, struct ipsec_sa *sa)
ipsec_ctx->tbl[cdev_id_qp].id,
ipsec_ctx->tbl[cdev_id_qp].qp);
- sa->crypto_session = rte_cryptodev_sym_session_create(
- ipsec_ctx->session_pool);
- rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
- sa->crypto_session, sa->xforms,
- ipsec_ctx->session_pool);
-
- rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id, &cdev_info);
- if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
- ret = rte_cryptodev_queue_pair_attach_sym_session(
- ipsec_ctx->tbl[cdev_id_qp].id,
- ipsec_ctx->tbl[cdev_id_qp].qp,
- sa->crypto_session);
- if (ret < 0) {
- RTE_LOG(ERR, IPSEC,
- "Session cannot be attached to qp %u ",
- ipsec_ctx->tbl[cdev_id_qp].qp);
- return -1;
+ if (sa->type != RTE_SECURITY_ACTION_TYPE_NONE) {
+ struct rte_security_session_conf sess_conf = {
+ .action_type = sa->type,
+ .protocol = RTE_SECURITY_PROTOCOL_IPSEC,
+ .ipsec = {
+ .spi = sa->spi,
+ .salt = sa->salt,
+ .options = { 0 },
+ .direction = sa->direction,
+ .proto = RTE_SECURITY_IPSEC_SA_PROTO_ESP,
+ .mode = (sa->flags == IP4_TUNNEL ||
+ sa->flags == IP6_TUNNEL) ?
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL :
+ RTE_SECURITY_IPSEC_SA_MODE_TRANSPORT,
+ },
+ .crypto_xform = sa->xforms
+
+ };
+
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL) {
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_cryptodev_get_sec_ctx(
+ ipsec_ctx->tbl[cdev_id_qp].id);
+
+ if (sess_conf.ipsec.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL) {
+ struct rte_security_ipsec_tunnel_param *tunnel =
+ &sess_conf.ipsec.tunnel;
+ if (sa->flags == IP4_TUNNEL) {
+ tunnel->type =
+ RTE_SECURITY_IPSEC_TUNNEL_IPV4;
+ tunnel->ipv4.ttl = IPDEFTTL;
+
+ memcpy((uint8_t *)&tunnel->ipv4.src_ip,
+ (uint8_t *)&sa->src.ip.ip4, 4);
+
+ memcpy((uint8_t *)&tunnel->ipv4.dst_ip,
+ (uint8_t *)&sa->dst.ip.ip4, 4);
+ }
+ /* TODO support for Transport and IPV6 tunnel */
+ }
+
+ sa->sec_session = rte_security_session_create(ctx,
+ &sess_conf, ipsec_ctx->session_pool);
+ if (sa->sec_session == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "SEC Session init failed: err: %d\n", ret);
+ return -1;
+ }
+ } else if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ struct rte_flow_error err;
+ struct rte_security_ctx *ctx = (struct rte_security_ctx *)
+ rte_eth_dev_get_sec_ctx(
+ sa->portid);
+ const struct rte_security_capability *sec_cap;
+
+ sa->sec_session = rte_security_session_create(ctx,
+ &sess_conf, ipsec_ctx->session_pool);
+ if (sa->sec_session == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "SEC Session init failed: err: %d\n", ret);
+ return -1;
+ }
+
+ sec_cap = rte_security_capabilities_get(ctx);
+
+ /* iterate until ESP tunnel*/
+ while (sec_cap->action !=
+ RTE_SECURITY_ACTION_TYPE_NONE) {
+
+ if (sec_cap->action == sa->type &&
+ sec_cap->protocol ==
+ RTE_SECURITY_PROTOCOL_IPSEC &&
+ sec_cap->ipsec.mode ==
+ RTE_SECURITY_IPSEC_SA_MODE_TUNNEL &&
+ sec_cap->ipsec.direction == sa->direction)
+ break;
+ sec_cap++;
+ }
+
+ if (sec_cap->action == RTE_SECURITY_ACTION_TYPE_NONE) {
+ RTE_LOG(ERR, IPSEC,
+ "No suitable security capability found\n");
+ return -1;
+ }
+
+ sa->ol_flags = sec_cap->ol_flags;
+ sa->security_ctx = ctx;
+ sa->pattern[0].type = RTE_FLOW_ITEM_TYPE_ETH;
+
+ sa->pattern[1].type = RTE_FLOW_ITEM_TYPE_IPV4;
+ sa->pattern[1].mask = &rte_flow_item_ipv4_mask;
+ if (sa->flags & IP6_TUNNEL) {
+ sa->pattern[1].spec = &sa->ipv6_spec;
+ memcpy(sa->ipv6_spec.hdr.dst_addr,
+ sa->dst.ip.ip6.ip6_b, 16);
+ memcpy(sa->ipv6_spec.hdr.src_addr,
+ sa->src.ip.ip6.ip6_b, 16);
+ } else {
+ sa->pattern[1].spec = &sa->ipv4_spec;
+ sa->ipv4_spec.hdr.dst_addr = sa->dst.ip.ip4;
+ sa->ipv4_spec.hdr.src_addr = sa->src.ip.ip4;
+ }
+
+ sa->pattern[2].type = RTE_FLOW_ITEM_TYPE_ESP;
+ sa->pattern[2].spec = &sa->esp_spec;
+ sa->pattern[2].mask = &rte_flow_item_esp_mask;
+ sa->esp_spec.hdr.spi = sa->spi;
+
+ sa->pattern[3].type = RTE_FLOW_ITEM_TYPE_END;
+
+ sa->action[0].type = RTE_FLOW_ACTION_TYPE_SECURITY;
+ sa->action[0].conf = sa->sec_session;
+
+ sa->action[1].type = RTE_FLOW_ACTION_TYPE_END;
+
+ sa->attr.egress = (sa->direction ==
+ RTE_SECURITY_IPSEC_SA_DIR_EGRESS);
+ sa->flow = rte_flow_create(sa->portid,
+ &sa->attr, sa->pattern, sa->action, &err);
+ if (sa->flow == NULL) {
+ RTE_LOG(ERR, IPSEC,
+ "Failed to create ipsec flow msg: %s\n",
+ err.message);
+ return -1;
+ }
+ }
+ } else {
+ sa->crypto_session = rte_cryptodev_sym_session_create(
+ ipsec_ctx->session_pool);
+ rte_cryptodev_sym_session_init(ipsec_ctx->tbl[cdev_id_qp].id,
+ sa->crypto_session, sa->xforms,
+ ipsec_ctx->session_pool);
+
+ rte_cryptodev_info_get(ipsec_ctx->tbl[cdev_id_qp].id,
+ &cdev_info);
+ if (cdev_info.sym.max_nb_sessions_per_qp > 0) {
+ ret = rte_cryptodev_queue_pair_attach_sym_session(
+ ipsec_ctx->tbl[cdev_id_qp].id,
+ ipsec_ctx->tbl[cdev_id_qp].qp,
+ sa->crypto_session);
+ if (ret < 0) {
+ RTE_LOG(ERR, IPSEC,
+ "Session cannot be attached to qp %u\n",
+ ipsec_ctx->tbl[cdev_id_qp].qp);
+ return -1;
+ }
}
}
sa->cdev_id_qp = cdev_id_qp;
@@ -125,7 +264,9 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
{
int32_t ret = 0, i;
struct ipsec_mbuf_metadata *priv;
+ struct rte_crypto_sym_op *sym_cop;
struct ipsec_sa *sa;
+ struct cdev_qp *cqp;
for (i = 0; i < nb_pkts; i++) {
if (unlikely(sas[i] == NULL)) {
@@ -140,23 +281,76 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
sa = sas[i];
priv->sa = sa;
- priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
- priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
-
- rte_prefetch0(&priv->sym_cop);
-
- if ((unlikely(sa->crypto_session == NULL)) &&
- create_session(ipsec_ctx, sa)) {
- rte_pktmbuf_free(pkts[i]);
- continue;
- }
-
- rte_crypto_op_attach_sym_session(&priv->cop,
- sa->crypto_session);
-
- ret = xform_func(pkts[i], sa, &priv->cop);
- if (unlikely(ret)) {
- rte_pktmbuf_free(pkts[i]);
+ switch (sa->type) {
+ case RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL:
+ priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ rte_prefetch0(&priv->sym_cop);
+
+ if ((unlikely(sa->sec_session == NULL)) &&
+ create_session(ipsec_ctx, sa)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
+ sym_cop = get_sym_cop(&priv->cop);
+ sym_cop->m_src = pkts[i];
+
+ rte_security_attach_session(&priv->cop,
+ sa->sec_session);
+ break;
+ case RTE_SECURITY_ACTION_TYPE_NONE:
+
+ priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ rte_prefetch0(&priv->sym_cop);
+
+ if ((unlikely(sa->crypto_session == NULL)) &&
+ create_session(ipsec_ctx, sa)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
+ rte_crypto_op_attach_sym_session(&priv->cop,
+ sa->crypto_session);
+
+ ret = xform_func(pkts[i], sa, &priv->cop);
+ if (unlikely(ret)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL:
+ break;
+ case RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO:
+ priv->cop.type = RTE_CRYPTO_OP_TYPE_SYMMETRIC;
+ priv->cop.status = RTE_CRYPTO_OP_STATUS_NOT_PROCESSED;
+
+ rte_prefetch0(&priv->sym_cop);
+
+ if ((unlikely(sa->sec_session == NULL)) &&
+ create_session(ipsec_ctx, sa)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
+ rte_security_attach_session(&priv->cop,
+ sa->sec_session);
+
+ ret = xform_func(pkts[i], sa, &priv->cop);
+ if (unlikely(ret)) {
+ rte_pktmbuf_free(pkts[i]);
+ continue;
+ }
+
+ cqp = &ipsec_ctx->tbl[sa->cdev_id_qp];
+ cqp->ol_pkts[cqp->ol_pkts_cnt++] = pkts[i];
+ if (sa->ol_flags & RTE_SECURITY_TX_OLOAD_NEED_MDATA)
+ rte_security_set_pkt_metadata(
+ sa->security_ctx,
+ sa->sec_session, pkts[i], NULL);
continue;
}
@@ -167,7 +361,7 @@ ipsec_enqueue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
static inline int
ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
- struct rte_mbuf *pkts[], uint16_t max_pkts)
+ struct rte_mbuf *pkts[], uint16_t max_pkts)
{
int32_t nb_pkts = 0, ret = 0, i, j, nb_cops;
struct ipsec_mbuf_metadata *priv;
@@ -182,6 +376,19 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
if (ipsec_ctx->last_qp == ipsec_ctx->nb_qps)
ipsec_ctx->last_qp %= ipsec_ctx->nb_qps;
+ while (cqp->ol_pkts_cnt > 0 && nb_pkts < max_pkts) {
+ pkt = cqp->ol_pkts[--cqp->ol_pkts_cnt];
+ rte_prefetch0(pkt);
+ priv = get_priv(pkt);
+ sa = priv->sa;
+ ret = xform_func(pkt, sa, &priv->cop);
+ if (unlikely(ret)) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+ pkts[nb_pkts++] = pkt;
+ }
+
if (cqp->in_flight == 0)
continue;
@@ -199,11 +406,14 @@ ipsec_dequeue(ipsec_xform_fn xform_func, struct ipsec_ctx *ipsec_ctx,
RTE_ASSERT(sa != NULL);
- ret = xform_func(pkt, sa, cops[j]);
- if (unlikely(ret))
- rte_pktmbuf_free(pkt);
- else
- pkts[nb_pkts++] = pkt;
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_NONE) {
+ ret = xform_func(pkt, sa, cops[j]);
+ if (unlikely(ret)) {
+ rte_pktmbuf_free(pkt);
+ continue;
+ }
+ }
+ pkts[nb_pkts++] = pkt;
}
}
diff --git a/examples/ipsec-secgw/ipsec.h b/examples/ipsec-secgw/ipsec.h
index da1fb1b2..775b316f 100644
--- a/examples/ipsec-secgw/ipsec.h
+++ b/examples/ipsec-secgw/ipsec.h
@@ -38,6 +38,8 @@
#include <rte_byteorder.h>
#include <rte_crypto.h>
+#include <rte_security.h>
+#include <rte_flow.h>
#define RTE_LOGTYPE_IPSEC RTE_LOGTYPE_USER1
#define RTE_LOGTYPE_IPSEC_ESP RTE_LOGTYPE_USER2
@@ -99,7 +101,10 @@ struct ipsec_sa {
uint32_t cdev_id_qp;
uint64_t seq;
uint32_t salt;
- struct rte_cryptodev_sym_session *crypto_session;
+ union {
+ struct rte_cryptodev_sym_session *crypto_session;
+ struct rte_security_session *sec_session;
+ };
enum rte_crypto_cipher_algorithm cipher_algo;
enum rte_crypto_auth_algorithm auth_algo;
enum rte_crypto_aead_algorithm aead_algo;
@@ -117,7 +122,28 @@ struct ipsec_sa {
uint8_t auth_key[MAX_KEY_SIZE];
uint16_t auth_key_len;
uint16_t aad_len;
- struct rte_crypto_sym_xform *xforms;
+ union {
+ struct rte_crypto_sym_xform *xforms;
+ struct rte_security_ipsec_xform *sec_xform;
+ };
+ enum rte_security_session_action_type type;
+ enum rte_security_ipsec_sa_direction direction;
+ uint16_t portid;
+ struct rte_security_ctx *security_ctx;
+ uint32_t ol_flags;
+
+#define MAX_RTE_FLOW_PATTERN (4)
+#define MAX_RTE_FLOW_ACTIONS (2)
+ struct rte_flow_item pattern[MAX_RTE_FLOW_PATTERN];
+ struct rte_flow_action action[MAX_RTE_FLOW_ACTIONS];
+ struct rte_flow_attr attr;
+ union {
+ struct rte_flow_item_ipv4 ipv4_spec;
+ struct rte_flow_item_ipv6 ipv6_spec;
+ };
+ struct rte_flow_item_esp esp_spec;
+ struct rte_flow *flow;
+ struct rte_security_session_conf sess_conf;
} __rte_cache_aligned;
struct ipsec_mbuf_metadata {
@@ -133,6 +159,8 @@ struct cdev_qp {
uint16_t in_flight;
uint16_t len;
struct rte_crypto_op *buf[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+ struct rte_mbuf *ol_pkts[MAX_PKT_BURST] __rte_aligned(sizeof(void *));
+ uint16_t ol_pkts_cnt;
};
struct ipsec_ctx {
@@ -150,6 +178,7 @@ struct cdev_key {
uint16_t lcore_id;
uint8_t cipher_algo;
uint8_t auth_algo;
+ uint8_t aead_algo;
};
struct socket_ctx {
diff --git a/examples/ipsec-secgw/sa.c b/examples/ipsec-secgw/sa.c
index 7be0e628..4c448e5c 100644
--- a/examples/ipsec-secgw/sa.c
+++ b/examples/ipsec-secgw/sa.c
@@ -41,16 +41,20 @@
#include <rte_memzone.h>
#include <rte_crypto.h>
+#include <rte_security.h>
#include <rte_cryptodev.h>
#include <rte_byteorder.h>
#include <rte_errno.h>
#include <rte_ip.h>
#include <rte_random.h>
+#include <rte_ethdev.h>
#include "ipsec.h"
#include "esp.h"
#include "parser.h"
+#define IPDEFTTL 64
+
struct supported_cipher_algo {
const char *keyword;
enum rte_crypto_cipher_algorithm algo;
@@ -238,6 +242,8 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
uint32_t src_p = 0;
uint32_t dst_p = 0;
uint32_t mode_p = 0;
+ uint32_t type_p = 0;
+ uint32_t portid_p = 0;
if (strcmp(tokens[0], "in") == 0) {
ri = &nb_sa_in;
@@ -375,7 +381,6 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
rule->auth_algo = algo->algo;
rule->auth_key_len = algo->key_len;
rule->digest_len = algo->digest_len;
- rule->aad_len = algo->key_len;
/* NULL algorithm and combined algos do not
* require auth key
@@ -431,7 +436,7 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
rule->aead_algo = algo->algo;
rule->cipher_key_len = algo->key_len;
rule->digest_len = algo->digest_len;
- rule->aad_len = algo->key_len;
+ rule->aad_len = algo->aad_len;
rule->block_size = algo->block_size;
rule->iv_len = algo->iv_len;
@@ -550,6 +555,52 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
continue;
}
+ if (strcmp(tokens[ti], "type") == 0) {
+ APP_CHECK_PRESENCE(type_p, tokens[ti], status);
+ if (status->status < 0)
+ return;
+
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+
+ if (strcmp(tokens[ti], "inline-crypto-offload") == 0)
+ rule->type =
+ RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO;
+ else if (strcmp(tokens[ti],
+ "inline-protocol-offload") == 0)
+ rule->type =
+ RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL;
+ else if (strcmp(tokens[ti],
+ "lookaside-protocol-offload") == 0)
+ rule->type =
+ RTE_SECURITY_ACTION_TYPE_LOOKASIDE_PROTOCOL;
+ else if (strcmp(tokens[ti], "no-offload") == 0)
+ rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
+ else {
+ APP_CHECK(0, status, "Invalid input \"%s\"",
+ tokens[ti]);
+ return;
+ }
+
+ type_p = 1;
+ continue;
+ }
+
+ if (strcmp(tokens[ti], "port_id") == 0) {
+ APP_CHECK_PRESENCE(portid_p, tokens[ti], status);
+ if (status->status < 0)
+ return;
+ INCREMENT_TOKEN_INDEX(ti, n_tokens, status);
+ if (status->status < 0)
+ return;
+ rule->portid = atoi(tokens[ti]);
+ if (status->status < 0)
+ return;
+ portid_p = 1;
+ continue;
+ }
+
/* unrecognizeable input */
APP_CHECK(0, status, "unrecognized input \"%s\"",
tokens[ti]);
@@ -580,6 +631,14 @@ parse_sa_tokens(char **tokens, uint32_t n_tokens,
if (status->status < 0)
return;
+ if ((rule->type != RTE_SECURITY_ACTION_TYPE_NONE) && (portid_p == 0))
+ printf("Missing portid option, falling back to non-offload\n");
+
+ if (!type_p || !portid_p) {
+ rule->type = RTE_SECURITY_ACTION_TYPE_NONE;
+ rule->portid = -1;
+ }
+
*ri = *ri + 1;
}
@@ -647,9 +706,11 @@ print_one_sa_rule(const struct ipsec_sa *sa, int inbound)
struct sa_ctx {
struct ipsec_sa sa[IPSEC_SA_MAX_ENTRIES];
- struct {
- struct rte_crypto_sym_xform a;
- struct rte_crypto_sym_xform b;
+ union {
+ struct {
+ struct rte_crypto_sym_xform a;
+ struct rte_crypto_sym_xform b;
+ };
} xf[IPSEC_SA_MAX_ENTRIES];
};
@@ -682,6 +743,33 @@ sa_create(const char *name, int32_t socket_id)
}
static int
+check_eth_dev_caps(uint16_t portid, uint32_t inbound)
+{
+ struct rte_eth_dev_info dev_info;
+
+ rte_eth_dev_info_get(portid, &dev_info);
+
+ if (inbound) {
+ if ((dev_info.rx_offload_capa &
+ DEV_RX_OFFLOAD_SECURITY) == 0) {
+ RTE_LOG(WARNING, PORT,
+ "hardware RX IPSec offload is not supported\n");
+ return -EINVAL;
+ }
+
+ } else { /* outbound */
+ if ((dev_info.tx_offload_capa &
+ DEV_TX_OFFLOAD_SECURITY) == 0) {
+ RTE_LOG(WARNING, PORT,
+ "hardware TX IPSec offload is not supported\n");
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+
+static int
sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
uint32_t nb_entries, uint32_t inbound)
{
@@ -700,6 +788,16 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
*sa = entries[i];
sa->seq = 0;
+ if (sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_PROTOCOL ||
+ sa->type == RTE_SECURITY_ACTION_TYPE_INLINE_CRYPTO) {
+ if (check_eth_dev_caps(sa->portid, inbound))
+ return -EINVAL;
+ }
+
+ sa->direction = (inbound == 1) ?
+ RTE_SECURITY_IPSEC_SA_DIR_INGRESS :
+ RTE_SECURITY_IPSEC_SA_DIR_EGRESS;
+
switch (sa->flags) {
case IP4_TUNNEL:
sa->src.ip.ip4 = rte_cpu_to_be_32(sa->src.ip.ip4);
@@ -709,37 +807,21 @@ sa_add_rules(struct sa_ctx *sa_ctx, const struct ipsec_sa entries[],
if (sa->aead_algo == RTE_CRYPTO_AEAD_AES_GCM) {
iv_length = 16;
- if (inbound) {
- sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
- sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
- sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
- sa_ctx->xf[idx].a.aead.key.length =
- sa->cipher_key_len;
- sa_ctx->xf[idx].a.aead.op =
- RTE_CRYPTO_AEAD_OP_DECRYPT;
- sa_ctx->xf[idx].a.next = NULL;
- sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
- sa_ctx->xf[idx].a.aead.iv.length = iv_length;
- sa_ctx->xf[idx].a.aead.aad_length =
- sa->aad_len;
- sa_ctx->xf[idx].a.aead.digest_length =
- sa->digest_len;
- } else { /* outbound */
- sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
- sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
- sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
- sa_ctx->xf[idx].a.aead.key.length =
- sa->cipher_key_len;
- sa_ctx->xf[idx].a.aead.op =
- RTE_CRYPTO_AEAD_OP_ENCRYPT;
- sa_ctx->xf[idx].a.next = NULL;
- sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
- sa_ctx->xf[idx].a.aead.iv.length = iv_length;
- sa_ctx->xf[idx].a.aead.aad_length =
- sa->aad_len;
- sa_ctx->xf[idx].a.aead.digest_length =
- sa->digest_len;
- }
+ sa_ctx->xf[idx].a.type = RTE_CRYPTO_SYM_XFORM_AEAD;
+ sa_ctx->xf[idx].a.aead.algo = sa->aead_algo;
+ sa_ctx->xf[idx].a.aead.key.data = sa->cipher_key;
+ sa_ctx->xf[idx].a.aead.key.length =
+ sa->cipher_key_len;
+ sa_ctx->xf[idx].a.aead.op = (inbound == 1) ?
+ RTE_CRYPTO_AEAD_OP_DECRYPT :
+ RTE_CRYPTO_AEAD_OP_ENCRYPT;
+ sa_ctx->xf[idx].a.next = NULL;
+ sa_ctx->xf[idx].a.aead.iv.offset = IV_OFFSET;
+ sa_ctx->xf[idx].a.aead.iv.length = iv_length;
+ sa_ctx->xf[idx].a.aead.aad_length =
+ sa->aad_len;
+ sa_ctx->xf[idx].a.aead.digest_length =
+ sa->digest_len;
sa->xforms = &sa_ctx->xf[idx].a;
diff --git a/examples/ipv4_multicast/main.c b/examples/ipv4_multicast/main.c
index 9a13d353..83ac0d80 100644
--- a/examples/ipv4_multicast/main.c
+++ b/examples/ipv4_multicast/main.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -116,7 +114,7 @@ static struct ether_addr ports_eth_addr[MAX_PORTS];
/* mask of enabled ports */
static uint32_t enabled_port_mask = 0;
-static uint8_t nb_ports = 0;
+static uint16_t nb_ports;
static int rx_queue_per_lcore = 1;
@@ -195,7 +193,7 @@ static struct mcast_group_params mcast_group_table[] = {
/* Send burst of packets on an output interface */
static void
-send_burst(struct lcore_queue_conf *qconf, uint8_t port)
+send_burst(struct lcore_queue_conf *qconf, uint16_t port)
{
struct rte_mbuf **m_table;
uint16_t n, queueid;
@@ -312,7 +310,7 @@ mcast_out_pkt(struct rte_mbuf *pkt, int use_clone)
*/
static inline void
mcast_send_pkt(struct rte_mbuf *pkt, struct ether_addr *dest_addr,
- struct lcore_queue_conf *qconf, uint8_t port)
+ struct lcore_queue_conf *qconf, uint16_t port)
{
struct ether_hdr *ethdr;
uint16_t len;
@@ -343,7 +341,7 @@ mcast_forward(struct rte_mbuf *m, struct lcore_queue_conf *qconf)
struct ipv4_hdr *iphdr;
uint32_t dest_addr, port_mask, port_num, use_clone;
int32_t hash;
- uint8_t port;
+ uint16_t port;
union {
uint64_t as_int;
struct ether_addr as_addr;
@@ -407,7 +405,7 @@ static inline void
send_timeout_burst(struct lcore_queue_conf *qconf)
{
uint64_t cur_tsc;
- uint8_t portid;
+ uint16_t portid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
cur_tsc = rte_rdtsc();
@@ -428,7 +426,7 @@ main_loop(__rte_unused void *dummy)
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
unsigned lcore_id;
int i, j, nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct lcore_queue_conf *qconf;
lcore_id = rte_lcore_id();
@@ -448,7 +446,7 @@ main_loop(__rte_unused void *dummy)
portid = qconf->rx_queue_list[i];
RTE_LOG(INFO, IPv4_MULTICAST, " -- lcoreid=%u portid=%d\n",
- lcore_id, (int) portid);
+ lcore_id, portid);
}
while (1) {
@@ -610,11 +608,12 @@ init_mcast_hash(void)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -629,14 +628,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -673,7 +671,7 @@ main(int argc, char **argv)
uint16_t queueid;
unsigned lcore_id = 0, rx_lcore_id = 0;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
ret = rte_eal_init(argc, argv);
diff --git a/examples/kni/main.c b/examples/kni/main.c
index e3bc2fb7..3f173854 100644
--- a/examples/kni/main.c
+++ b/examples/kni/main.c
@@ -53,7 +53,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
#include <rte_launch.h>
@@ -61,7 +60,7 @@
#include <rte_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
+#include <rte_bus_pci.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
@@ -110,7 +109,7 @@
* Structure of port parameters
*/
struct kni_port_params {
- uint8_t port_id;/* Port ID */
+ uint16_t port_id;/* Port ID */
unsigned lcore_rx; /* lcore ID for RX */
unsigned lcore_tx; /* lcore ID for TX */
uint32_t nb_lcore_k; /* Number of lcores for KNI multi kernel threads */
@@ -162,8 +161,8 @@ struct kni_interface_stats {
/* kni device statistics array */
static struct kni_interface_stats kni_stats[RTE_MAX_ETHPORTS];
-static int kni_change_mtu(uint8_t port_id, unsigned new_mtu);
-static int kni_config_network_interface(uint8_t port_id, uint8_t if_up);
+static int kni_change_mtu(uint16_t port_id, unsigned int new_mtu);
+static int kni_config_network_interface(uint16_t port_id, uint8_t if_up);
static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
@@ -171,7 +170,7 @@ static rte_atomic32_t kni_stop = RTE_ATOMIC32_INIT(0);
static void
print_stats(void)
{
- uint8_t i;
+ uint16_t i;
printf("\n**KNI example application statistics**\n"
"====== ============== ============ ============ ============ ============\n"
@@ -238,7 +237,8 @@ kni_burst_free_mbufs(struct rte_mbuf **pkts, unsigned num)
static void
kni_ingress(struct kni_port_params *p)
{
- uint8_t i, port_id;
+ uint8_t i;
+ uint16_t port_id;
unsigned nb_rx, num;
uint32_t nb_kni;
struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
@@ -274,7 +274,8 @@ kni_ingress(struct kni_port_params *p)
static void
kni_egress(struct kni_port_params *p)
{
- uint8_t i, port_id;
+ uint8_t i;
+ uint16_t port_id;
unsigned nb_tx, num;
uint32_t nb_kni;
struct rte_mbuf *pkts_burst[PKT_BURST_SZ];
@@ -416,7 +417,7 @@ parse_config(const char *arg)
int i, j, nb_token;
char *str_fld[_NUM_FLD];
unsigned long int_fld[_NUM_FLD];
- uint8_t port_id, nb_kni_port_params = 0;
+ uint16_t port_id, nb_kni_port_params = 0;
memset(&kni_port_params_array, 0, sizeof(kni_port_params_array));
while (((p = strchr(p0, '(')) != NULL) &&
@@ -445,7 +446,7 @@ parse_config(const char *arg)
}
i = 0;
- port_id = (uint8_t)int_fld[i++];
+ port_id = int_fld[i++];
if (port_id >= RTE_MAX_ETHPORTS) {
printf("Port ID %d could not exceed the maximum %d\n",
port_id, RTE_MAX_ETHPORTS);
@@ -601,7 +602,7 @@ init_kni(void)
/* Initialise a single port on an Ethernet device */
static void
-init_port(uint8_t port)
+init_port(uint16_t port)
{
int ret;
uint16_t nb_rxd = NB_RXD;
@@ -643,11 +644,12 @@ init_port(uint8_t port)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status\n");
@@ -662,14 +664,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up - speed %uMbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -698,7 +699,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* Callback for request of changing MTU */
static int
-kni_change_mtu(uint8_t port_id, unsigned new_mtu)
+kni_change_mtu(uint16_t port_id, unsigned int new_mtu)
{
int ret;
struct rte_eth_conf conf;
@@ -741,7 +742,7 @@ kni_change_mtu(uint8_t port_id, unsigned new_mtu)
/* Callback for request of configuring network interface up/down */
static int
-kni_config_network_interface(uint8_t port_id, uint8_t if_up)
+kni_config_network_interface(uint16_t port_id, uint8_t if_up)
{
int ret = 0;
@@ -766,7 +767,7 @@ kni_config_network_interface(uint8_t port_id, uint8_t if_up)
}
static int
-kni_alloc(uint8_t port_id)
+kni_alloc(uint16_t port_id)
{
uint8_t i;
struct rte_kni *kni;
@@ -790,7 +791,7 @@ kni_alloc(uint8_t port_id)
} else
snprintf(conf.name, RTE_KNI_NAMESIZE,
"vEth%u", port_id);
- conf.group_id = (uint16_t)port_id;
+ conf.group_id = port_id;
conf.mbuf_size = MAX_PACKET_SZ;
/*
* The first KNI device associated to a port
@@ -803,8 +804,11 @@ kni_alloc(uint8_t port_id)
memset(&dev_info, 0, sizeof(dev_info));
rte_eth_dev_info_get(port_id, &dev_info);
- conf.addr = dev_info.pci_dev->addr;
- conf.id = dev_info.pci_dev->id;
+
+ if (dev_info.pci_dev) {
+ conf.addr = dev_info.pci_dev->addr;
+ conf.id = dev_info.pci_dev->id;
+ }
memset(&ops, 0, sizeof(ops));
ops.port_id = port_id;
@@ -825,7 +829,7 @@ kni_alloc(uint8_t port_id)
}
static int
-kni_free_kni(uint8_t port_id)
+kni_free_kni(uint16_t port_id)
{
uint8_t i;
struct kni_port_params **p = kni_port_params_array;
@@ -848,7 +852,7 @@ int
main(int argc, char** argv)
{
int ret;
- uint8_t nb_sys_ports, port;
+ uint16_t nb_sys_ports, port;
unsigned i;
/* Associate signal_hanlder function with USR signals */
@@ -919,9 +923,6 @@ main(int argc, char** argv)
continue;
kni_free_kni(port);
}
-#ifdef RTE_LIBRTE_XEN_DOM0
- rte_kni_close();
-#endif
for (i = 0; i < RTE_MAX_ETHPORTS; i++)
if (kni_port_params_array[i]) {
rte_free(kni_port_params_array[i]);
diff --git a/examples/l2fwd-cat/Makefile b/examples/l2fwd-cat/Makefile
index ae921ade..a7fe6d68 100644
--- a/examples/l2fwd-cat/Makefile
+++ b/examples/l2fwd-cat/Makefile
@@ -40,9 +40,6 @@ endif
# Default target, can be overridden by command line or environment
RTE_TARGET ?= x86_64-native-linuxapp-gcc
-# Location of PQoS library and includes,
-PQOS_LIBRARY_PATH = $(PQOS_INSTALL_PATH)/libpqos.a
-
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
@@ -65,6 +62,6 @@ CFLAGS += -I$(PQOS_INSTALL_PATH)/../include
CFLAGS_cat.o := -D_GNU_SOURCE
LDLIBS += -L$(PQOS_INSTALL_PATH)
-LDLIBS += $(PQOS_LIBRARY_PATH)
+LDLIBS += -lpqos
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/l2fwd-cat/cat.c b/examples/l2fwd-cat/cat.c
index 6133bf5b..689dcb11 100644
--- a/examples/l2fwd-cat/cat.c
+++ b/examples/l2fwd-cat/cat.c
@@ -53,7 +53,11 @@
static const struct pqos_cap *m_cap;
static const struct pqos_cpuinfo *m_cpu;
static const struct pqos_capability *m_cap_l3ca;
+#if PQOS_VERSION <= 103
static unsigned m_sockets[PQOS_MAX_SOCKETS];
+#else
+static unsigned int *m_sockets;
+#endif
static unsigned m_sock_count;
static struct cat_config m_config[PQOS_MAX_CORES];
static unsigned m_config_count;
@@ -271,16 +275,16 @@ parse_l3ca(const char *l3ca)
/* scan the separator '@', ','(next) or '\0'(finish) */
l3ca += strcspn(l3ca, "@,");
- if (*l3ca == '@') {
- /* explicit assign cpu_set */
- offset = parse_set(l3ca + 1, &cpuset);
- if (offset < 0 || CPU_COUNT(&cpuset) == 0)
- goto err;
+ if (*l3ca != '@')
+ goto err;
- end = l3ca + 1 + offset;
- } else
+ /* explicit assign cpu_set */
+ offset = parse_set(l3ca + 1, &cpuset);
+ if (offset < 0 || CPU_COUNT(&cpuset) == 0)
goto err;
+ end = l3ca + 1 + offset;
+
if (*end != ',' && *end != '\0')
goto err;
@@ -353,9 +357,6 @@ parse_l3ca(const char *l3ca)
idx++;
} while (*end != '\0' && idx < PQOS_MAX_CORES);
- if (m_config_count == 0)
- goto err;
-
return 0;
err:
@@ -408,7 +409,11 @@ check_cpus(void)
goto exit;
}
+#if PQOS_VERSION <= 103
ret = pqos_l3ca_assoc_get(cpu_id, &cos_id);
+#else
+ ret = pqos_alloc_assoc_get(cpu_id, &cos_id);
+#endif
if (ret != PQOS_RETVAL_OK) {
printf("PQOS: Failed to read COS "
"associated to cpu %u.\n",
@@ -512,7 +517,11 @@ check_and_select_classes(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
for (j = 0; j < m_cpu->num_cores; j++) {
cpu_id = m_cpu->cores[j].lcore;
+#if PQOS_VERSION <= 103
ret = pqos_l3ca_assoc_get(cpu_id, &cos_id);
+#else
+ ret = pqos_alloc_assoc_get(cpu_id, &cos_id);
+#endif
if (ret != PQOS_RETVAL_OK) {
printf("PQOS: Failed to read COS associated to "
"cpu %u on phy_pkg %u.\n", cpu_id, phy_pkg_id);
@@ -598,10 +607,19 @@ configure_cat(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
l3ca.cdp = m_config[i].cdp;
if (m_config[i].cdp == 1) {
+#if PQOS_VERSION <= 103
l3ca.code_mask = m_config[i].code_mask;
l3ca.data_mask = m_config[i].data_mask;
+#else
+ l3ca.u.s.code_mask = m_config[i].code_mask;
+ l3ca.u.s.data_mask = m_config[i].data_mask;
+#endif
} else
+#if PQOS_VERSION <= 103
l3ca.ways_mask = m_config[i].mask;
+#else
+ l3ca.u.ways_mask = m_config[i].mask;
+#endif
for (j = 0; j < m_sock_count; j++) {
phy_pkg_id = m_sockets[j];
@@ -637,7 +655,11 @@ configure_cat(unsigned cos_id_map[][PQOS_MAX_SOCKETS])
cos_id = cos_id_map[i][phy_pkg_id];
+#if PQOS_VERSION <= 103
ret = pqos_l3ca_assoc_set(cpu_id, cos_id);
+#else
+ ret = pqos_alloc_assoc_set(cpu_id, cos_id);
+#endif
if (ret != PQOS_RETVAL_OK) {
printf("PQOS: Failed to associate COS %u to "
"cpu %u\n", cos_id, cpu_id);
@@ -754,24 +776,43 @@ print_cat_config(void)
if (tab[n].cdp == 1) {
printf("PQOS: COS: %u, cMASK: 0x%llx, "
"dMASK: 0x%llx\n", tab[n].class_id,
+#if PQOS_VERSION <= 103
(unsigned long long)tab[n].code_mask,
(unsigned long long)tab[n].data_mask);
+#else
+ (unsigned long long)tab[n].u.s.code_mask,
+ (unsigned long long)tab[n].u.s.data_mask);
+#endif
} else {
printf("PQOS: COS: %u, MASK: 0x%llx\n",
tab[n].class_id,
+#if PQOS_VERSION <= 103
(unsigned long long)tab[n].ways_mask);
+#else
+ (unsigned long long)tab[n].u.ways_mask);
+#endif
}
}
}
for (i = 0; i < m_sock_count; i++) {
+#if PQOS_VERSION <= 103
unsigned lcores[PQOS_MAX_SOCKET_CORES] = {0};
+#else
+ unsigned int *lcores = NULL;
+#endif
unsigned lcount = 0;
unsigned n = 0;
+#if PQOS_VERSION <= 103
ret = pqos_cpu_get_cores(m_cpu, m_sockets[i],
PQOS_MAX_SOCKET_CORES, &lcount, &lcores[0]);
if (ret != PQOS_RETVAL_OK) {
+#else
+ lcores = pqos_cpu_get_cores(m_cpu, m_sockets[i],
+ &lcount);
+ if (lcores == NULL || lcount == 0) {
+#endif
printf("PQOS: Error retrieving core information!\n");
return;
}
@@ -780,13 +821,21 @@ print_cat_config(void)
for (n = 0; n < lcount; n++) {
unsigned class_id = 0;
+#if PQOS_VERSION <= 103
ret = pqos_l3ca_assoc_get(lcores[n], &class_id);
+#else
+ ret = pqos_alloc_assoc_get(lcores[n], &class_id);
+#endif
if (ret == PQOS_RETVAL_OK)
printf("PQOS: CPU: %u, COS: %u\n", lcores[n],
class_id);
else
printf("PQOS: CPU: %u, ERROR\n", lcores[n]);
}
+
+#if PQOS_VERSION > 103
+ free(lcores);
+#endif
}
}
@@ -849,7 +898,12 @@ cat_fini(void)
m_cap = NULL;
m_cpu = NULL;
m_cap_l3ca = NULL;
+#if PQOS_VERSION <= 103
memset(m_sockets, 0, sizeof(m_sockets));
+#else
+ if (m_sockets != NULL)
+ free(m_sockets);
+#endif
m_sock_count = 0;
memset(m_config, 0, sizeof(m_config));
m_config_count = 0;
@@ -875,7 +929,11 @@ cat_exit(void)
if (CPU_ISSET(cpu_id, &m_config[i].cpumask) == 0)
continue;
+#if PQOS_VERSION <= 103
ret = pqos_l3ca_assoc_set(cpu_id, 0);
+#else
+ ret = pqos_alloc_assoc_set(cpu_id, 0);
+#endif
if (ret != PQOS_RETVAL_OK) {
printf("PQOS: Failed to associate COS 0 to "
"cpu %u\n", cpu_id);
@@ -927,7 +985,9 @@ cat_init(int argc, char **argv)
/* PQoS Initialization - Check and initialize CAT capability */
cfg.fd_log = STDOUT_FILENO;
cfg.verbose = 0;
+#if PQOS_VERSION <= 103
cfg.cdp_cfg = PQOS_REQUIRE_CDP_ANY;
+#endif
ret = pqos_init(&cfg);
if (ret != PQOS_RETVAL_OK) {
printf("PQOS: Error initializing PQoS library!\n");
@@ -953,9 +1013,14 @@ cat_init(int argc, char **argv)
}
/* Get CPU socket information */
+#if PQOS_VERSION <= 103
ret = pqos_cpu_get_sockets(m_cpu, PQOS_MAX_SOCKETS, &m_sock_count,
m_sockets);
if (ret != PQOS_RETVAL_OK) {
+#else
+ m_sockets = pqos_cpu_get_sockets(m_cpu, &m_sock_count);
+ if (m_sockets == NULL) {
+#endif
printf("PQOS: Error retrieving CPU socket information!\n");
ret = -EFAULT;
goto err;
diff --git a/examples/l2fwd-cat/l2fwd-cat.c b/examples/l2fwd-cat/l2fwd-cat.c
index c293bd9c..b10ac896 100644
--- a/examples/l2fwd-cat/l2fwd-cat.c
+++ b/examples/l2fwd-cat/l2fwd-cat.c
@@ -59,7 +59,7 @@ static const struct rte_eth_conf port_conf_default = {
* coming from the mbuf_pool passed as a parameter.
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
@@ -106,7 +106,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
" %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
- (unsigned)port,
+ port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
@@ -124,8 +124,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
static __attribute__((noreturn)) void
lcore_main(void)
{
- const uint8_t nb_ports = rte_eth_dev_count();
- uint8_t port;
+ const uint16_t nb_ports = rte_eth_dev_count();
+ uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
@@ -181,7 +181,7 @@ main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
unsigned nb_ports;
- uint8_t portid;
+ uint16_t portid;
/* Initialize the Environment Abstraction Layer (EAL). */
int ret = rte_eal_init(argc, argv);
@@ -217,7 +217,7 @@ main(int argc, char *argv[])
/* Initialize all ports. */
for (portid = 0; portid < nb_ports; portid++)
if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
if (rte_lcore_count() > 1)
diff --git a/examples/l2fwd-crypto/main.c b/examples/l2fwd-crypto/main.c
index f020be32..d4e1682c 100644
--- a/examples/l2fwd-crypto/main.c
+++ b/examples/l2fwd-crypto/main.c
@@ -67,8 +67,6 @@
#include <rte_memcpy.h>
#include <rte_memory.h>
#include <rte_mempool.h>
-#include <rte_memzone.h>
-#include <rte_pci.h>
#include <rte_per_lcore.h>
#include <rte_prefetch.h>
#include <rte_random.h>
@@ -86,6 +84,8 @@ enum cdev_type {
#define MAX_STR_LEN 32
#define MAX_KEY_SIZE 128
+#define MAX_IV_SIZE 16
+#define MAX_AAD_SIZE 65535
#define MAX_PKT_BURST 32
#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
#define MAX_SESSIONS 32
@@ -112,7 +112,7 @@ static uint64_t l2fwd_enabled_port_mask;
static uint64_t l2fwd_enabled_crypto_mask;
/* list of enabled ports */
-static uint32_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
+static uint16_t l2fwd_dst_ports[RTE_MAX_ETHPORTS];
struct pkt_buffer {
@@ -139,7 +139,7 @@ enum l2fwd_crypto_xform_chain {
struct l2fwd_key {
uint8_t *data;
uint32_t length;
- phys_addr_t phys_addr;
+ rte_iova_t phys_addr;
};
struct l2fwd_iv {
@@ -224,7 +224,7 @@ struct l2fwd_crypto_params {
/** lcore configuration */
struct lcore_queue_conf {
unsigned nb_rx_ports;
- unsigned rx_port_list[MAX_RX_QUEUE_PER_LCORE];
+ uint16_t rx_port_list[MAX_RX_QUEUE_PER_LCORE];
unsigned nb_crypto_devs;
unsigned cryptodev_list[MAX_RX_QUEUE_PER_LCORE];
@@ -290,7 +290,7 @@ print_stats(void)
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
uint64_t total_packets_enqueued, total_packets_dequeued,
total_packets_errors;
- unsigned portid;
+ uint16_t portid;
uint64_t cdevid;
total_packets_dropped = 0;
@@ -496,7 +496,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
uint8_t *) + ipdata_offset + data_len;
}
- op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->auth.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
/* For wireless algorithms, offset/length must be in bits */
@@ -534,7 +534,16 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
uint8_t *iv_ptr = rte_crypto_op_ctod_offset(op, uint8_t *,
IV_OFFSET);
/* Copy IV at the end of the crypto operation */
- rte_memcpy(iv_ptr, cparams->aead_iv.data, cparams->aead_iv.length);
+ /*
+ * If doing AES-CCM, nonce is copied one byte
+ * after the start of IV field
+ */
+ if (cparams->aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
+ rte_memcpy(iv_ptr + 1, cparams->aead_iv.data,
+ cparams->aead_iv.length);
+ else
+ rte_memcpy(iv_ptr, cparams->aead_iv.data,
+ cparams->aead_iv.length);
op->sym->aead.data.offset = ipdata_offset;
op->sym->aead.data.length = data_len;
@@ -548,7 +557,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
uint8_t *) + ipdata_offset + data_len;
}
- op->sym->auth.digest.phys_addr = rte_pktmbuf_mtophys_offset(m,
+ op->sym->aead.digest.phys_addr = rte_pktmbuf_iova_offset(m,
rte_pktmbuf_pkt_len(m) - cparams->digest_length);
if (cparams->aad.length) {
@@ -566,7 +575,7 @@ l2fwd_simple_crypto_enqueue(struct rte_mbuf *m,
/* Send the burst of packets on an output interface */
static int
l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
- uint8_t port)
+ uint16_t port)
{
struct rte_mbuf **pkt_buffer;
unsigned ret;
@@ -587,7 +596,7 @@ l2fwd_send_burst(struct lcore_queue_conf *qconf, unsigned n,
/* Enqueue packets for TX and prepare them to be sent */
static int
-l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
+l2fwd_send_packet(struct rte_mbuf *m, uint16_t port)
{
unsigned lcore_id, len;
struct lcore_queue_conf *qconf;
@@ -610,7 +619,7 @@ l2fwd_send_packet(struct rte_mbuf *m, uint8_t port)
}
static void
-l2fwd_mac_updating(struct rte_mbuf *m, unsigned int dest_portid)
+l2fwd_mac_updating(struct rte_mbuf *m, uint16_t dest_portid)
{
struct ether_hdr *eth;
void *tmp;
@@ -626,17 +635,17 @@ l2fwd_mac_updating(struct rte_mbuf *m, unsigned int dest_portid)
}
static void
-l2fwd_simple_forward(struct rte_mbuf *m, unsigned int portid,
+l2fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct l2fwd_crypto_options *options)
{
- unsigned int dst_port;
+ uint16_t dst_port;
dst_port = l2fwd_dst_ports[portid];
if (options->mac_updating)
l2fwd_mac_updating(m, dst_port);
- l2fwd_send_packet(m, (uint8_t) dst_port);
+ l2fwd_send_packet(m, dst_port);
}
/** Generate random key */
@@ -708,7 +717,8 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
unsigned lcore_id = rte_lcore_id();
uint64_t prev_tsc = 0, diff_tsc, cur_tsc, timer_tsc = 0;
- unsigned i, j, portid, nb_rx, len;
+ unsigned int i, j, nb_rx, len;
+ uint16_t portid;
struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -796,6 +806,14 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
if (!options->aad_param)
generate_random_key(port_cparams[i].aad.data,
port_cparams[i].aad.length);
+ /*
+ * If doing AES-CCM, first 18 bytes has to be reserved,
+ * and actual AAD should start from byte 18
+ */
+ if (port_cparams[i].aead_algo == RTE_CRYPTO_AEAD_AES_CCM)
+ memmove(port_cparams[i].aad.data + 18,
+ port_cparams[i].aad.data,
+ port_cparams[i].aad.length);
} else
port_cparams[i].aad.length = 0;
@@ -865,7 +883,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
continue;
l2fwd_send_burst(&lcore_queue_conf[lcore_id],
qconf->pkt_buf[portid].len,
- (uint8_t) portid);
+ portid);
qconf->pkt_buf[portid].len = 0;
}
@@ -899,7 +917,7 @@ l2fwd_main_loop(struct l2fwd_crypto_options *options)
cparams = &port_cparams[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
@@ -1081,15 +1099,16 @@ parse_cipher_op(enum rte_crypto_cipher_operation *op, char *optarg)
return -1;
}
-/** Parse crypto key command line argument */
+/** Parse bytes from command line argument */
static int
-parse_key(uint8_t *data, char *input_arg)
+parse_bytes(uint8_t *data, char *input_arg, uint16_t max_size)
{
unsigned byte_count;
char *token;
+ errno = 0;
for (byte_count = 0, token = strtok(input_arg, ":");
- (byte_count < MAX_KEY_SIZE) && (token != NULL);
+ (byte_count < max_size) && (token != NULL);
token = strtok(NULL, ":")) {
int number = (int)strtol(token, NULL, 16);
@@ -1229,7 +1248,8 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "cipher_key") == 0) {
options->ckey_param = 1;
options->cipher_xform.cipher.key.length =
- parse_key(options->cipher_xform.cipher.key.data, optarg);
+ parse_bytes(options->cipher_xform.cipher.key.data, optarg,
+ MAX_KEY_SIZE);
if (options->cipher_xform.cipher.key.length > 0)
return 0;
else
@@ -1242,7 +1262,7 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "cipher_iv") == 0) {
options->cipher_iv_param = 1;
options->cipher_iv.length =
- parse_key(options->cipher_iv.data, optarg);
+ parse_bytes(options->cipher_iv.data, optarg, MAX_IV_SIZE);
if (options->cipher_iv.length > 0)
return 0;
else
@@ -1265,7 +1285,8 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "auth_key") == 0) {
options->akey_param = 1;
options->auth_xform.auth.key.length =
- parse_key(options->auth_xform.auth.key.data, optarg);
+ parse_bytes(options->auth_xform.auth.key.data, optarg,
+ MAX_KEY_SIZE);
if (options->auth_xform.auth.key.length > 0)
return 0;
else
@@ -1279,7 +1300,7 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "auth_iv") == 0) {
options->auth_iv_param = 1;
options->auth_iv.length =
- parse_key(options->auth_iv.data, optarg);
+ parse_bytes(options->auth_iv.data, optarg, MAX_IV_SIZE);
if (options->auth_iv.length > 0)
return 0;
else
@@ -1302,7 +1323,8 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "aead_key") == 0) {
options->aead_key_param = 1;
options->aead_xform.aead.key.length =
- parse_key(options->aead_xform.aead.key.data, optarg);
+ parse_bytes(options->aead_xform.aead.key.data, optarg,
+ MAX_KEY_SIZE);
if (options->aead_xform.aead.key.length > 0)
return 0;
else
@@ -1316,7 +1338,7 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "aead_iv") == 0) {
options->aead_iv_param = 1;
options->aead_iv.length =
- parse_key(options->aead_iv.data, optarg);
+ parse_bytes(options->aead_iv.data, optarg, MAX_IV_SIZE);
if (options->aead_iv.length > 0)
return 0;
else
@@ -1329,7 +1351,7 @@ l2fwd_crypto_parse_args_long_options(struct l2fwd_crypto_options *options,
else if (strcmp(lgopts[option_index].name, "aad") == 0) {
options->aad_param = 1;
options->aad.length =
- parse_key(options->aad.data, optarg);
+ parse_bytes(options->aad.data, optarg, MAX_AAD_SIZE);
if (options->aad.length > 0)
return 0;
else
@@ -1731,11 +1753,12 @@ l2fwd_crypto_parse_args(struct l2fwd_crypto_options *options,
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -1750,14 +1773,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -2319,7 +2341,7 @@ initialize_cryptodevs(struct l2fwd_crypto_options *options, unsigned nb_ports,
static int
initialize_ports(struct l2fwd_crypto_options *options)
{
- uint8_t last_portid, portid;
+ uint16_t last_portid, portid;
unsigned enabled_portcount = 0;
unsigned nb_ports = rte_eth_dev_count();
@@ -2340,12 +2362,12 @@ initialize_ports(struct l2fwd_crypto_options *options)
continue;
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
retval = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (retval < 0) {
printf("Cannot configure device: err=%d, port=%u\n",
- retval, (unsigned) portid);
+ retval, portid);
return -1;
}
@@ -2353,7 +2375,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
&nb_txd);
if (retval < 0) {
printf("Cannot adjust number of descriptors: err=%d, port=%u\n",
- retval, (unsigned) portid);
+ retval, portid);
return -1;
}
@@ -2364,7 +2386,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
NULL, l2fwd_pktmbuf_pool);
if (retval < 0) {
printf("rte_eth_rx_queue_setup:err=%d, port=%u\n",
- retval, (unsigned) portid);
+ retval, portid);
return -1;
}
@@ -2375,7 +2397,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
NULL);
if (retval < 0) {
printf("rte_eth_tx_queue_setup:err=%d, port=%u\n",
- retval, (unsigned) portid);
+ retval, portid);
return -1;
}
@@ -2384,7 +2406,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
retval = rte_eth_dev_start(portid);
if (retval < 0) {
printf("rte_eth_dev_start:err=%d, port=%u\n",
- retval, (unsigned) portid);
+ retval, portid);
return -1;
}
@@ -2393,7 +2415,7 @@ initialize_ports(struct l2fwd_crypto_options *options)
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
@@ -2461,7 +2483,7 @@ reserve_key_memory(struct l2fwd_crypto_options *options)
options->aad.data = rte_malloc("aad", MAX_KEY_SIZE, 0);
if (options->aad.data == NULL)
rte_exit(EXIT_FAILURE, "Failed to allocate memory for AAD");
- options->aad.phys_addr = rte_malloc_virt2phy(options->aad.data);
+ options->aad.phys_addr = rte_malloc_virt2iova(options->aad.data);
}
int
@@ -2470,7 +2492,8 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
struct l2fwd_crypto_options options;
- uint8_t nb_ports, nb_cryptodevs, portid, cdev_id;
+ uint8_t nb_cryptodevs, cdev_id;
+ uint16_t nb_ports, portid;
unsigned lcore_id, rx_lcore_id;
int ret, enabled_cdevcount, enabled_portcount;
uint8_t enabled_cdevs[RTE_CRYPTO_MAX_DEVS] = {0};
@@ -2547,7 +2570,7 @@ main(int argc, char **argv)
qconf->rx_port_list[qconf->nb_rx_ports] = portid;
qconf->nb_rx_ports++;
- printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned)portid);
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
}
/* Enable Crypto devices */
diff --git a/examples/l2fwd-jobstats/main.c b/examples/l2fwd-jobstats/main.c
index 98936206..485370de 100644
--- a/examples/l2fwd-jobstats/main.c
+++ b/examples/l2fwd-jobstats/main.c
@@ -43,7 +43,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -53,7 +52,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_debug.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
@@ -176,7 +174,7 @@ show_lcore_stats(unsigned lcore_id)
uint64_t busy, busy_min, busy_max;
/* Jobs statistics. */
- const uint8_t port_cnt = qconf->n_rx_port;
+ const uint16_t port_cnt = qconf->n_rx_port;
uint64_t jobs_exec_cnt[port_cnt], jobs_period[port_cnt];
uint64_t jobs_exec[port_cnt], jobs_exec_min[port_cnt],
jobs_exec_max[port_cnt];
@@ -414,11 +412,11 @@ l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct rte_mbuf *m;
- const uint8_t port_idx = (uintptr_t) arg;
+ const uint16_t port_idx = (uintptr_t) arg;
const unsigned lcore_id = rte_lcore_id();
struct lcore_queue_conf *qconf = &lcore_queue_conf[lcore_id];
struct rte_jobstats *job = &qconf->port_fwd_jobs[port_idx];
- const uint8_t portid = qconf->rx_port_list[port_idx];
+ const uint16_t portid = qconf->rx_port_list[port_idx];
uint8_t j;
uint16_t total_nb_rx;
@@ -428,7 +426,7 @@ l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
/* Call rx burst 2 times. This allow rte_jobstats logic to see if this
* function must be called more frequently. */
- total_nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, pkts_burst,
+ total_nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
MAX_PKT_BURST);
for (j = 0; j < total_nb_rx; j++) {
@@ -438,7 +436,7 @@ l2fwd_fwd_job(__rte_unused struct rte_timer *timer, void *arg)
}
if (total_nb_rx == MAX_PKT_BURST) {
- const uint16_t nb_rx = rte_eth_rx_burst((uint8_t) portid, 0, pkts_burst,
+ const uint16_t nb_rx = rte_eth_rx_burst(portid, 0, pkts_burst,
MAX_PKT_BURST);
total_nb_rx += nb_rx;
@@ -464,7 +462,7 @@ l2fwd_flush_job(__rte_unused struct rte_timer *timer, __rte_unused void *arg)
uint64_t now;
unsigned lcore_id;
struct lcore_queue_conf *qconf;
- uint8_t portid;
+ uint16_t portid;
unsigned i;
uint32_t sent;
struct rte_eth_dev_tx_buffer *buffer;
@@ -714,11 +712,12 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -733,14 +732,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -776,9 +774,9 @@ main(int argc, char **argv)
unsigned nb_ports_in_mask = 0;
int ret;
char name[RTE_JOBSTATS_NAMESIZE];
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
uint8_t i;
/* init EAL */
@@ -861,7 +859,7 @@ main(int argc, char **argv)
qconf->rx_port_list[qconf->n_rx_port] = portid;
qconf->n_rx_port++;
- printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
}
nb_ports_available = nb_ports;
@@ -870,24 +868,24 @@ main(int argc, char **argv)
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
- printf("Skipping disabled port %u\n", (unsigned) portid);
+ printf("Skipping disabled port %u\n", portid);
nb_ports_available--;
continue;
}
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot adjust number of descriptors: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
@@ -899,7 +897,7 @@ main(int argc, char **argv)
l2fwd_pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* init one TX queue on each port */
fflush(stdout);
@@ -907,8 +905,9 @@ main(int argc, char **argv)
rte_eth_dev_socket_id(portid),
NULL);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup:err=%d, port=%u\n",
+ ret, portid);
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
@@ -916,7 +915,7 @@ main(int argc, char **argv)
rte_eth_dev_socket_id(portid));
if (tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
@@ -924,21 +923,22 @@ main(int argc, char **argv)
rte_eth_tx_buffer_count_callback,
&port_statistics[portid].dropped);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
printf("done:\n");
rte_eth_promiscuous_enable(portid);
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
diff --git a/examples/l2fwd-keepalive/main.c b/examples/l2fwd-keepalive/main.c
index 83bc542c..358ca5ec 100644
--- a/examples/l2fwd-keepalive/main.c
+++ b/examples/l2fwd-keepalive/main.c
@@ -51,7 +51,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -61,7 +60,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -157,7 +155,7 @@ print_stats(__attribute__((unused)) struct rte_timer *ptr_timer,
__attribute__((unused)) void *ptr_data)
{
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
- unsigned portid;
+ uint16_t portid;
total_packets_dropped = 0;
total_packets_tx = 0;
@@ -299,7 +297,7 @@ l2fwd_main_loop(void)
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
@@ -479,11 +477,12 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -498,14 +497,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -560,9 +558,9 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
struct sigaction signal_handler;
@@ -653,7 +651,7 @@ main(int argc, char **argv)
qconf->rx_port_list[qconf->n_rx_port] = portid;
qconf->n_rx_port++;
printf("Lcore %u: RX port %u\n",
- rx_lcore_id, (unsigned) portid);
+ rx_lcore_id, portid);
}
nb_ports_available = nb_ports;
@@ -662,26 +660,25 @@ main(int argc, char **argv)
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
- printf("Skipping disabled port %u\n",
- (unsigned) portid);
+ printf("Skipping disabled port %u\n", portid);
nb_ports_available--;
continue;
}
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot adjust number of descriptors: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_macaddr_get(portid, &l2fwd_ports_eth_addr[portid]);
@@ -694,7 +691,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* init one TX queue on each port */
fflush(stdout);
@@ -704,7 +701,7 @@ main(int argc, char **argv)
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
@@ -712,7 +709,7 @@ main(int argc, char **argv)
rte_eth_dev_socket_id(portid));
if (tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
@@ -720,21 +717,22 @@ main(int argc, char **argv)
rte_eth_tx_buffer_count_callback,
&port_statistics[portid].dropped);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"rte_eth_dev_start:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_promiscuous_enable(portid);
printf("Port %u, MAC address: "
"%02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
diff --git a/examples/l2fwd/main.c b/examples/l2fwd/main.c
index 14263358..e89e2e1b 100644
--- a/examples/l2fwd/main.c
+++ b/examples/l2fwd/main.c
@@ -52,7 +52,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -62,7 +61,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -304,7 +302,7 @@ l2fwd_main_loop(void)
for (i = 0; i < qconf->n_rx_port; i++) {
portid = qconf->rx_port_list[i];
- nb_rx = rte_eth_rx_burst((uint8_t) portid, 0,
+ nb_rx = rte_eth_rx_burst(portid, 0,
pkts_burst, MAX_PKT_BURST);
port_statistics[portid].rx += nb_rx;
@@ -480,11 +478,12 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -503,14 +502,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -553,9 +551,9 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
@@ -644,7 +642,7 @@ main(int argc, char **argv)
qconf->rx_port_list[qconf->n_rx_port] = portid;
qconf->n_rx_port++;
- printf("Lcore %u: RX port %u\n", rx_lcore_id, (unsigned) portid);
+ printf("Lcore %u: RX port %u\n", rx_lcore_id, portid);
}
nb_ports_available = nb_ports;
@@ -653,24 +651,24 @@ main(int argc, char **argv)
for (portid = 0; portid < nb_ports; portid++) {
/* skip ports that are not enabled */
if ((l2fwd_enabled_port_mask & (1 << portid)) == 0) {
- printf("Skipping disabled port %u\n", (unsigned) portid);
+ printf("Skipping disabled port %u\n", portid);
nb_ports_available--;
continue;
}
/* init port */
- printf("Initializing port %u... ", (unsigned) portid);
+ printf("Initializing port %u... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
rte_exit(EXIT_FAILURE, "Cannot configure device: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &nb_rxd,
&nb_txd);
if (ret < 0)
rte_exit(EXIT_FAILURE,
"Cannot adjust number of descriptors: err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
rte_eth_macaddr_get(portid,&l2fwd_ports_eth_addr[portid]);
@@ -682,7 +680,7 @@ main(int argc, char **argv)
l2fwd_pktmbuf_pool);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_rx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* init one TX queue on each port */
fflush(stdout);
@@ -691,7 +689,7 @@ main(int argc, char **argv)
NULL);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
/* Initialize TX buffers */
tx_buffer[portid] = rte_zmalloc_socket("tx_buffer",
@@ -699,7 +697,7 @@ main(int argc, char **argv)
rte_eth_dev_socket_id(portid));
if (tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(tx_buffer[portid], MAX_PKT_BURST);
@@ -707,21 +705,22 @@ main(int argc, char **argv)
rte_eth_tx_buffer_count_callback,
&port_statistics[portid].dropped);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ portid);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
rte_exit(EXIT_FAILURE, "rte_eth_dev_start:err=%d, port=%u\n",
- ret, (unsigned) portid);
+ ret, portid);
printf("done: \n");
rte_eth_promiscuous_enable(portid);
printf("Port %u, MAC address: %02X:%02X:%02X:%02X:%02X:%02X\n\n",
- (unsigned) portid,
+ portid,
l2fwd_ports_eth_addr[portid].addr_bytes[0],
l2fwd_ports_eth_addr[portid].addr_bytes[1],
l2fwd_ports_eth_addr[portid].addr_bytes[2],
diff --git a/examples/l3fwd-acl/main.c b/examples/l3fwd-acl/main.c
index 8eff4de4..e50b1a1a 100644
--- a/examples/l3fwd-acl/main.c
+++ b/examples/l3fwd-acl/main.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -121,7 +119,7 @@ static int promiscuous_on; /**< Ports set in promiscuous mode off by default. */
static int numa_on = 1; /**< NUMA is enabled by default. */
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
@@ -131,7 +129,7 @@ struct lcore_rx_queue {
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -184,7 +182,7 @@ static inline int
is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len);
#endif
static inline void
-send_single_packet(struct rte_mbuf *m, uint8_t port);
+send_single_packet(struct rte_mbuf *m, uint16_t port);
#define MAX_ACL_RULE_NUM 100000
#define DEFAULT_MAX_CATEGORIES 1
@@ -1026,6 +1024,7 @@ add_rules(const char *rule_path,
char buff[LINE_MAX];
FILE *fh = fopen(rule_path, "rb");
unsigned int i = 0;
+ int val;
if (fh == NULL)
rte_exit(EXIT_FAILURE, "%s: Open %s failed\n", __func__,
@@ -1042,7 +1041,11 @@ add_rules(const char *rule_path,
rte_exit(EXIT_FAILURE, "Not find any route entries in %s!\n",
rule_path);
- fseek(fh, 0, SEEK_SET);
+ val = fseek(fh, 0, SEEK_SET);
+ if (val < 0) {
+ rte_exit(EXIT_FAILURE, "%s: File seek operation failed\n",
+ __func__);
+ }
acl_rules = calloc(acl_num, rule_size);
@@ -1297,7 +1300,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
/* Enqueue a single packet, and send burst if queue is filled */
static inline void
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
struct lcore_conf *qconf;
@@ -1358,7 +1361,8 @@ main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
struct lcore_conf *qconf;
int socketid;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1)
@@ -1381,7 +1385,7 @@ main_loop(__attribute__((unused)) void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
@@ -1500,7 +1504,7 @@ check_port_config(const unsigned nb_ports)
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
@@ -1829,11 +1833,12 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -1848,14 +1853,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -1893,7 +1897,8 @@ main(int argc, char **argv)
uint16_t queueid;
unsigned lcore_id;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint16_t portid;
+ uint8_t nb_rx_queue, queue, socketid;
/* init EAL */
ret = rte_eal_init(argc, argv);
diff --git a/examples/l3fwd-power/main.c b/examples/l3fwd-power/main.c
index fd442f5e..0a4ed145 100644
--- a/examples/l3fwd-power/main.c
+++ b/examples/l3fwd-power/main.c
@@ -50,7 +50,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -60,7 +59,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -174,7 +172,7 @@ enum freq_scale_hint_t
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
enum freq_scale_hint_t freq_up_hint;
uint32_t zero_rx_packet_count;
@@ -190,7 +188,7 @@ struct lcore_rx_queue {
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -308,8 +306,8 @@ static lookup_struct_t *ipv6_l3fwd_lookup_struct[NB_SOCKETS];
#define IPV6_L3FWD_NUM_ROUTES \
(sizeof(ipv6_l3fwd_route_array) / sizeof(ipv6_l3fwd_route_array[0]))
-static uint8_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
-static uint8_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static uint16_t ipv4_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
+static uint16_t ipv6_l3fwd_out_if[L3FWD_HASH_ENTRIES] __rte_cache_aligned;
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
@@ -370,7 +368,7 @@ static struct rte_timer power_timers[RTE_MAX_LCORE];
static inline uint32_t power_idle_heuristic(uint32_t zero_rx_packet_count);
static inline enum freq_scale_hint_t power_freq_scaleup_heuristic( \
- unsigned lcore_id, uint8_t port_id, uint16_t queue_id);
+ unsigned int lcore_id, uint16_t port_id, uint16_t queue_id);
/* exit signal handler */
static void
@@ -451,7 +449,7 @@ power_timer_cb(__attribute__((unused)) struct rte_timer *tim,
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
struct lcore_conf *qconf;
@@ -523,8 +521,8 @@ print_ipv6_key(struct ipv6_5tuple key)
key.port_dst, key.port_src, key.proto);
}
-static inline uint8_t
-get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t * ipv4_l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
@@ -559,11 +557,11 @@ get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
/* Find destination port */
ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : ipv4_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
}
-static inline uint8_t
-get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint16_t portid,
lookup_struct_t *ipv6_l3fwd_lookup_struct)
{
struct ipv6_5tuple key;
@@ -599,18 +597,18 @@ get_ipv6_dst_port(struct ipv6_hdr *ipv6_hdr, uint8_t portid,
/* Find destination port */
ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : ipv6_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-static inline uint8_t
-get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t) ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
+ return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
next_hop : portid);
}
@@ -634,7 +632,7 @@ parse_ptype_one(struct rte_mbuf *m)
}
static uint16_t
-cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
@@ -648,7 +646,7 @@ cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
}
static int
-add_cb_parse_ptype(uint8_t portid, uint16_t queueid)
+add_cb_parse_ptype(uint16_t portid, uint16_t queueid)
{
printf("Port %d: softly parse packet type info\n", portid);
if (rte_eth_add_rx_callback(portid, queueid, cb_parse_ptype, NULL))
@@ -659,13 +657,13 @@ add_cb_parse_ptype(uint8_t portid, uint16_t queueid)
}
static inline void
-l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid,
+l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
void *d_addr_bytes;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -752,13 +750,11 @@ power_idle_heuristic(uint32_t zero_rx_packet_count)
*/
else
return SUSPEND_THRESHOLD;
-
- return 0;
}
static inline enum freq_scale_hint_t
power_freq_scaleup_heuristic(unsigned lcore_id,
- uint8_t port_id,
+ uint16_t port_id,
uint16_t queue_id)
{
/**
@@ -805,7 +801,8 @@ sleep_until_rx_interrupt(int num)
{
struct rte_epoll_event event[num];
int n, i;
- uint8_t port_id, queue_id;
+ uint16_t port_id;
+ uint8_t queue_id;
void *data;
RTE_LOG(INFO, L3FWD_POWER,
@@ -832,7 +829,8 @@ static void turn_on_intr(struct lcore_conf *qconf)
{
int i;
struct lcore_rx_queue *rx_queue;
- uint8_t port_id, queue_id;
+ uint8_t queue_id;
+ uint16_t port_id;
for (i = 0; i < qconf->n_rx_queue; ++i) {
rx_queue = &(qconf->rx_queue_list[i]);
@@ -848,7 +846,8 @@ static void turn_on_intr(struct lcore_conf *qconf)
static int event_register(struct lcore_conf *qconf)
{
struct lcore_rx_queue *rx_queue;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
uint32_t data;
int ret;
int i;
@@ -879,7 +878,8 @@ main_loop(__attribute__((unused)) void *dummy)
uint64_t prev_tsc, diff_tsc, cur_tsc;
uint64_t prev_tsc_power = 0, cur_tsc_power, diff_tsc_power;
int i, j, nb_rx;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct lcore_conf *qconf;
struct lcore_rx_queue *rx_queue;
enum freq_scale_hint_t lcore_scaleup_hint;
@@ -904,7 +904,7 @@ main_loop(__attribute__((unused)) void *dummy)
for (i = 0; i < qconf->n_rx_queue; i++) {
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%hhu "
+ RTE_LOG(INFO, L3FWD_POWER, " -- lcoreid=%u portid=%u "
"rxqueueid=%hhu\n", lcore_id, portid, queueid);
}
@@ -1111,7 +1111,7 @@ check_port_config(const unsigned nb_ports)
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
@@ -1541,11 +1541,12 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint8_t count, all_ports_up, print_flag = 0;
+ uint16_t portid;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -1594,7 +1595,7 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
}
}
-static int check_ptype(uint8_t portid)
+static int check_ptype(uint16_t portid)
{
int i, ret;
int ptype_l3_ipv4 = 0;
@@ -1645,13 +1646,14 @@ main(int argc, char **argv)
struct rte_eth_dev_info dev_info;
struct rte_eth_txconf *txconf;
int ret;
- unsigned nb_ports;
+ uint16_t nb_ports;
uint16_t queueid;
unsigned lcore_id;
uint64_t hz;
uint32_t n_tx_queue, nb_lcores;
uint32_t dev_rxq_num, dev_txq_num;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
+ uint16_t portid;
uint16_t org_rxq_intr = port_conf.intr_conf.rxq;
/* catch SIGINT and restore cpufreq governor to ondemand */
@@ -1751,7 +1753,7 @@ main(int argc, char **argv)
rte_eth_dev_socket_id(portid));
if (qconf->tx_buffer[portid] == NULL)
rte_exit(EXIT_FAILURE, "Can't allocate tx buffer for port %u\n",
- (unsigned) portid);
+ portid);
rte_eth_tx_buffer_init(qconf->tx_buffer[portid], MAX_PKT_BURST);
}
@@ -1871,7 +1873,7 @@ main(int argc, char **argv)
rte_spinlock_init(&(locks[portid]));
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
/* launch per-lcore init on every lcore */
rte_eal_mp_remote_launch(main_loop, NULL, CALL_MASTER);
diff --git a/examples/l3fwd-vf/main.c b/examples/l3fwd-vf/main.c
index 34e4a6be..6ef89fc8 100644
--- a/examples/l3fwd-vf/main.c
+++ b/examples/l3fwd-vf/main.c
@@ -48,7 +48,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -59,7 +58,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -155,7 +153,7 @@ struct mbuf_table {
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
@@ -165,7 +163,7 @@ struct lcore_rx_queue {
#define MAX_LCORE_PARAMS 1024
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -300,7 +298,7 @@ static struct lcore_conf lcore_conf[RTE_MAX_LCORE];
static rte_spinlock_t spinlock_conf[RTE_MAX_ETHPORTS] = {RTE_SPINLOCK_INITIALIZER};
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
@@ -324,7 +322,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint32_t lcore_id;
uint16_t len;
@@ -396,8 +394,9 @@ print_key(struct ipv4_5tuple key)
(unsigned)key.ip_dst, (unsigned)key.ip_src, key.port_dst, key.port_src, key.proto);
}
-static inline uint8_t
-get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+static inline uint16_t
+get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
+ lookup_struct_t *l3fwd_lookup_struct)
{
struct ipv4_5tuple key;
struct tcp_hdr *tcp;
@@ -430,29 +429,31 @@ get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd
/* Find destination port */
ret = rte_hash_lookup(l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0)? portid : l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : l3fwd_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-static inline uint8_t
-get_dst_port(struct ipv4_hdr *ipv4_hdr, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+static inline uint32_t
+get_dst_port(struct ipv4_hdr *ipv4_hdr, uint16_t portid,
+ lookup_struct_t *l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t) ((rte_lpm_lookup(l3fwd_lookup_struct,
- rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0)?
- next_hop : portid);
+ return ((rte_lpm_lookup(l3fwd_lookup_struct,
+ rte_be_to_cpu_32(ipv4_hdr->dst_addr), &next_hop) == 0) ?
+ next_hop : portid);
}
#endif
static inline void
-l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid, lookup_struct_t * l3fwd_lookup_struct)
+l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid,
+ lookup_struct_t *l3fwd_lookup_struct)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
void *tmp;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -496,7 +497,8 @@ main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, j, nb_rx;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
@@ -516,8 +518,8 @@ main_loop(__attribute__((unused)) void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n", lcore_id,
- portid, queueid);
+ RTE_LOG(INFO, L3FWD, " --lcoreid=%u portid=%u rxqueueid=%hhu\n",
+ lcore_id, portid, queueid);
}
while (1) {
@@ -624,7 +626,7 @@ check_port_config(const unsigned nb_ports)
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
@@ -676,8 +678,8 @@ print_usage(const char *prgname)
static void
signal_handler(int signum)
{
- uint8_t portid;
- uint8_t nb_ports = rte_eth_dev_count();
+ uint16_t portid;
+ uint16_t nb_ports = rte_eth_dev_count();
/* When we receive a SIGINT signal */
if (signum == SIGINT) {
@@ -749,7 +751,7 @@ parse_config(const char *q_arg)
nb_lcore_params);
return -1;
}
- lcore_params_array[nb_lcore_params].port_id = (uint8_t)int_fld[FLD_PORT];
+ lcore_params_array[nb_lcore_params].port_id = int_fld[FLD_PORT];
lcore_params_array[nb_lcore_params].queue_id = (uint8_t)int_fld[FLD_QUEUE];
lcore_params_array[nb_lcore_params].lcore_id = (uint8_t)int_fld[FLD_LCORE];
++nb_lcore_params;
@@ -953,11 +955,11 @@ main(int argc, char **argv)
struct rte_eth_txconf *txconf;
int ret;
unsigned nb_ports;
- uint16_t queueid;
+ uint16_t queueid, portid;
unsigned lcore_id;
uint32_t nb_lcores;
uint16_t n_tx_queue;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
signal(SIGINT, signal_handler);
/* init EAL */
diff --git a/examples/l3fwd/l3fwd.h b/examples/l3fwd/l3fwd.h
index 011ba148..4bb15943 100644
--- a/examples/l3fwd/l3fwd.h
+++ b/examples/l3fwd/l3fwd.h
@@ -83,7 +83,7 @@ struct mbuf_table {
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
@@ -117,7 +117,7 @@ extern struct lcore_conf lcore_conf[RTE_MAX_LCORE];
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct lcore_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
@@ -139,7 +139,7 @@ send_burst(struct lcore_conf *qconf, uint16_t n, uint8_t port)
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
send_single_packet(struct lcore_conf *qconf,
- struct rte_mbuf *m, uint8_t port)
+ struct rte_mbuf *m, uint16_t port)
{
uint16_t len;
@@ -212,11 +212,11 @@ int
lpm_check_ptype(int portid);
uint16_t
-em_cb_parse_ptype(uint8_t port, uint16_t queue, struct rte_mbuf *pkts[],
+em_cb_parse_ptype(uint16_t port, uint16_t queue, struct rte_mbuf *pkts[],
uint16_t nb_pkts, uint16_t max_pkts, void *user_param);
uint16_t
-lpm_cb_parse_ptype(uint8_t port, uint16_t queue, struct rte_mbuf *pkts[],
+lpm_cb_parse_ptype(uint16_t port, uint16_t queue, struct rte_mbuf *pkts[],
uint16_t nb_pkts, uint16_t max_pkts, void *user_param);
int
diff --git a/examples/l3fwd/l3fwd_altivec.h b/examples/l3fwd/l3fwd_altivec.h
new file mode 100644
index 00000000..a1d25eaa
--- /dev/null
+++ b/examples/l3fwd/l3fwd_altivec.h
@@ -0,0 +1,284 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 IBM Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _L3FWD_ALTIVEC_H_
+#define _L3FWD_ALTIVEC_H_
+
+#include "l3fwd.h"
+#include "l3fwd_common.h"
+
+/*
+ * Update source and destination MAC addresses in the ethernet header.
+ * Perform RFC1812 checks and updates for IPV4 packets.
+ */
+static inline void
+processx4_step3(struct rte_mbuf *pkt[FWDSTEP], uint16_t dst_port[FWDSTEP])
+{
+ vector unsigned int te[FWDSTEP];
+ vector unsigned int ve[FWDSTEP];
+ vector unsigned int *p[FWDSTEP];
+
+ p[0] = rte_pktmbuf_mtod(pkt[0], vector unsigned int *);
+ p[1] = rte_pktmbuf_mtod(pkt[1], vector unsigned int *);
+ p[2] = rte_pktmbuf_mtod(pkt[2], vector unsigned int *);
+ p[3] = rte_pktmbuf_mtod(pkt[3], vector unsigned int *);
+
+ ve[0] = (vector unsigned int)val_eth[dst_port[0]];
+ te[0] = *p[0];
+
+ ve[1] = (vector unsigned int)val_eth[dst_port[1]];
+ te[1] = *p[1];
+
+ ve[2] = (vector unsigned int)val_eth[dst_port[2]];
+ te[2] = *p[2];
+
+ ve[3] = (vector unsigned int)val_eth[dst_port[3]];
+ te[3] = *p[3];
+
+ /* Update first 12 bytes, keep rest bytes intact. */
+ te[0] = (vector unsigned int)vec_sel(
+ (vector unsigned short)ve[0],
+ (vector unsigned short)te[0],
+ (vector unsigned short) {0, 0, 0, 0,
+ 0, 0, 0xffff, 0xffff});
+
+ te[1] = (vector unsigned int)vec_sel(
+ (vector unsigned short)ve[1],
+ (vector unsigned short)te[1],
+ (vector unsigned short) {0, 0, 0, 0,
+ 0, 0, 0xffff, 0xffff});
+
+ te[2] = (vector unsigned int)vec_sel(
+ (vector unsigned short)ve[2],
+ (vector unsigned short)te[2],
+ (vector unsigned short) {0, 0, 0, 0, 0,
+ 0, 0xffff, 0xffff});
+
+ te[3] = (vector unsigned int)vec_sel(
+ (vector unsigned short)ve[3],
+ (vector unsigned short)te[3],
+ (vector unsigned short) {0, 0, 0, 0,
+ 0, 0, 0xffff, 0xffff});
+
+ *p[0] = te[0];
+ *p[1] = te[1];
+ *p[2] = te[2];
+ *p[3] = te[3];
+
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[0] + 1),
+ &dst_port[0], pkt[0]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[1] + 1),
+ &dst_port[1], pkt[1]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[2] + 1),
+ &dst_port[2], pkt[2]->packet_type);
+ rfc1812_process((struct ipv4_hdr *)((struct ether_hdr *)p[3] + 1),
+ &dst_port[3], pkt[3]->packet_type);
+}
+
+/*
+ * Group consecutive packets with the same destination port in bursts of 4.
+ * Suppose we have array of destination ports:
+ * dst_port[] = {a, b, c, d,, e, ... }
+ * dp1 should contain: <a, b, c, d>, dp2: <b, c, d, e>.
+ * We doing 4 comparisons at once and the result is 4 bit mask.
+ * This mask is used as an index into prebuild array of pnum values.
+ */
+static inline uint16_t *
+port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, vector unsigned short dp1,
+ vector unsigned short dp2)
+{
+ union {
+ uint16_t u16[FWDSTEP + 1];
+ uint64_t u64;
+ } *pnum = (void *)pn;
+
+ int32_t v;
+
+ v = vec_any_eq(dp1, dp2);
+
+
+ /* update last port counter. */
+ lp[0] += gptbl[v].lpv;
+
+ /* if dest port value has changed. */
+ if (v != GRPMSK) {
+ pnum->u64 = gptbl[v].pnum;
+ pnum->u16[FWDSTEP] = 1;
+ lp = pnum->u16 + gptbl[v].idx;
+ }
+
+ return lp;
+}
+
+/**
+ * Process one packet:
+ * Update source and destination MAC addresses in the ethernet header.
+ * Perform RFC1812 checks and updates for IPV4 packets.
+ */
+static inline void
+process_packet(struct rte_mbuf *pkt, uint16_t *dst_port)
+{
+ struct ether_hdr *eth_hdr;
+ vector unsigned int te, ve;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt, struct ether_hdr *);
+
+ te = *(vector unsigned int *)eth_hdr;
+ ve = (vector unsigned int)val_eth[dst_port[0]];
+
+ rfc1812_process((struct ipv4_hdr *)(eth_hdr + 1), dst_port,
+ pkt->packet_type);
+
+ /* dynamically vec_sel te and ve for MASK_ETH (0x3f) */
+ te = (vector unsigned int)vec_sel(
+ (vector unsigned short)ve,
+ (vector unsigned short)te,
+ (vector unsigned short){0, 0, 0, 0,
+ 0, 0, 0xffff, 0xffff});
+
+ *(vector unsigned int *)eth_hdr = te;
+}
+
+/**
+ * Send packets burst from pkts_burst to the ports in dst_port array
+ */
+static __rte_always_inline void
+send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
+ uint16_t dst_port[MAX_PKT_BURST], int nb_rx)
+{
+ int32_t k;
+ int j = 0;
+ uint16_t dlp;
+ uint16_t *lp;
+ uint16_t pnum[MAX_PKT_BURST + 1];
+
+ /*
+ * Finish packet processing and group consecutive
+ * packets with the same destination port.
+ */
+ k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
+ if (k != 0) {
+ vector unsigned short dp1, dp2;
+
+ lp = pnum;
+ lp[0] = 1;
+
+ processx4_step3(pkts_burst, dst_port);
+
+ /* dp1: <d[0], d[1], d[2], d[3], ... > */
+ dp1 = *(vector unsigned short *)dst_port;
+
+ for (j = FWDSTEP; j != k; j += FWDSTEP) {
+ processx4_step3(&pkts_burst[j], &dst_port[j]);
+
+ /*
+ * dp2:
+ * <d[j-3], d[j-2], d[j-1], d[j], ... >
+ */
+ dp2 = *((vector unsigned short *)
+ &dst_port[j - FWDSTEP + 1]);
+ lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
+
+ /*
+ * dp1:
+ * <d[j], d[j+1], d[j+2], d[j+3], ... >
+ */
+ dp1 = vec_sro(dp2, (vector unsigned char) {
+ 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, (FWDSTEP - 1) * sizeof(dst_port[0])});
+ }
+
+ /*
+ * dp2: <d[j-3], d[j-2], d[j-1], d[j-1], ... >
+ */
+ dp2 = vec_perm(dp1, (vector unsigned short){},
+ (vector unsigned char){0xf9});
+ lp = port_groupx4(&pnum[j - FWDSTEP], lp, dp1, dp2);
+
+ /*
+ * remove values added by the last repeated
+ * dst port.
+ */
+ lp[0]--;
+ dlp = dst_port[j - 1];
+ } else {
+ /* set dlp and lp to the never used values. */
+ dlp = BAD_PORT - 1;
+ lp = pnum + MAX_PKT_BURST;
+ }
+
+ /* Process up to last 3 packets one by one. */
+ switch (nb_rx % FWDSTEP) {
+ case 3:
+ process_packet(pkts_burst[j], dst_port + j);
+ GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
+ j++;
+ /* fall-through */
+ case 2:
+ process_packet(pkts_burst[j], dst_port + j);
+ GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
+ j++;
+ /* fall-through */
+ case 1:
+ process_packet(pkts_burst[j], dst_port + j);
+ GROUP_PORT_STEP(dlp, dst_port, lp, pnum, j);
+ j++;
+ }
+
+ /*
+ * Send packets out, through destination port.
+ * Consecutive packets with the same destination port
+ * are already grouped together.
+ * If destination port for the packet equals BAD_PORT,
+ * then free the packet without sending it out.
+ */
+ for (j = 0; j < nb_rx; j += k) {
+
+ int32_t m;
+ uint16_t pn;
+
+ pn = dst_port[j];
+ k = pnum[j];
+
+ if (likely(pn != BAD_PORT))
+ send_packetsx4(qconf, pn, pkts_burst + j, k);
+ else
+ for (m = j; m != j + k; m++)
+ rte_pktmbuf_free(pkts_burst[m]);
+
+ }
+}
+
+#endif /* _L3FWD_ALTIVEC_H_ */
diff --git a/examples/l3fwd/l3fwd_common.h b/examples/l3fwd/l3fwd_common.h
index 2867365d..7002a43a 100644
--- a/examples/l3fwd/l3fwd_common.h
+++ b/examples/l3fwd/l3fwd_common.h
@@ -207,7 +207,7 @@ static const struct {
};
static __rte_always_inline void
-send_packetsx4(struct lcore_conf *qconf, uint8_t port, struct rte_mbuf *m[],
+send_packetsx4(struct lcore_conf *qconf, uint16_t port, struct rte_mbuf *m[],
uint32_t num)
{
uint32_t len, j, n;
diff --git a/examples/l3fwd/l3fwd_em.c b/examples/l3fwd/l3fwd_em.c
index 53d081bd..2b7c173b 100644
--- a/examples/l3fwd/l3fwd_em.c
+++ b/examples/l3fwd/l3fwd_em.c
@@ -274,8 +274,8 @@ em_mask_key(void *key, xmm_t mask)
#error No vector engine (SSE, NEON, ALTIVEC) available, check your toolchain
#endif
-static inline uint8_t
-em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
+static inline uint16_t
+em_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
{
int ret = 0;
union ipv4_5tuple_host key;
@@ -292,11 +292,11 @@ em_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
/* Find destination port */
ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
+ return (ret < 0) ? portid : ipv4_l3fwd_out_if[ret];
}
-static inline uint8_t
-em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
+static inline uint16_t
+em_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct)
{
int ret = 0;
union ipv6_5tuple_host key;
@@ -325,7 +325,7 @@ em_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
/* Find destination port */
ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
+ return (ret < 0) ? portid : ipv6_l3fwd_out_if[ret];
}
#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
@@ -628,7 +628,7 @@ em_parse_ptype(struct rte_mbuf *m)
}
uint16_t
-em_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+em_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
@@ -649,7 +649,8 @@ em_main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -671,7 +672,7 @@ em_main_loop(__attribute__((unused)) void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
diff --git a/examples/l3fwd/l3fwd_em.h b/examples/l3fwd/l3fwd_em.h
index d509a1fc..302291d7 100644
--- a/examples/l3fwd/l3fwd_em.h
+++ b/examples/l3fwd/l3fwd_em.h
@@ -35,12 +35,12 @@
#define __L3FWD_EM_H__
static __rte_always_inline void
-l3fwd_em_simple_forward(struct rte_mbuf *m, uint8_t portid,
+l3fwd_em_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
- uint8_t dst_port;
+ uint16_t dst_port;
uint32_t tcp_or_udp;
uint32_t l3_ptypes;
@@ -112,7 +112,7 @@ l3fwd_em_simple_forward(struct rte_mbuf *m, uint8_t portid,
*/
static inline void
l3fwd_em_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t j;
diff --git a/examples/l3fwd/l3fwd_em_hlm.h b/examples/l3fwd/l3fwd_em_hlm.h
index 520672d5..9d7afe05 100644
--- a/examples/l3fwd/l3fwd_em_hlm.h
+++ b/examples/l3fwd/l3fwd_em_hlm.h
@@ -52,7 +52,7 @@
static __rte_always_inline void
em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
- uint8_t portid, uint16_t dst_port[])
+ uint16_t portid, uint16_t dst_port[])
{
int i;
int32_t ret[EM_HASH_LOOKUP_COUNT];
@@ -68,7 +68,7 @@ em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
EM_HASH_LOOKUP_COUNT, ret);
for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
- dst_port[i] = (uint8_t) ((ret[i] < 0) ?
+ dst_port[i] = ((ret[i] < 0) ?
portid : ipv4_l3fwd_out_if[ret[i]]);
if (dst_port[i] >= RTE_MAX_ETHPORTS ||
@@ -79,7 +79,7 @@ em_get_dst_port_ipv4xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
static __rte_always_inline void
em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
- uint8_t portid, uint16_t dst_port[])
+ uint16_t portid, uint16_t dst_port[])
{
int i;
int32_t ret[EM_HASH_LOOKUP_COUNT];
@@ -95,7 +95,7 @@ em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
EM_HASH_LOOKUP_COUNT, ret);
for (i = 0; i < EM_HASH_LOOKUP_COUNT; i++) {
- dst_port[i] = (uint8_t) ((ret[i] < 0) ?
+ dst_port[i] = ((ret[i] < 0) ?
portid : ipv6_l3fwd_out_if[ret[i]]);
if (dst_port[i] >= RTE_MAX_ETHPORTS ||
@@ -106,9 +106,9 @@ em_get_dst_port_ipv6xN(struct lcore_conf *qconf, struct rte_mbuf *m[],
static __rte_always_inline uint16_t
em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint8_t portid)
+ uint16_t portid)
{
- uint8_t next_hop;
+ uint16_t next_hop;
struct ipv4_hdr *ipv4_hdr;
struct ipv6_hdr *ipv6_hdr;
uint32_t tcp_or_udp;
@@ -158,7 +158,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
*/
static inline void
l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t i, j, pos;
uint16_t dst_port[MAX_PKT_BURST];
diff --git a/examples/l3fwd/l3fwd_em_sequential.h b/examples/l3fwd/l3fwd_em_sequential.h
index cb7c2abb..fa89f0f3 100644
--- a/examples/l3fwd/l3fwd_em_sequential.h
+++ b/examples/l3fwd/l3fwd_em_sequential.h
@@ -51,7 +51,7 @@
static __rte_always_inline uint16_t
em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint8_t portid)
+ uint16_t portid)
{
uint8_t next_hop;
struct ipv4_hdr *ipv4_hdr;
@@ -103,7 +103,7 @@ em_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
*/
static inline void
l3fwd_em_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t i, j;
uint16_t dst_port[MAX_PKT_BURST];
diff --git a/examples/l3fwd/l3fwd_lpm.c b/examples/l3fwd/l3fwd_lpm.c
index ff1e4035..2d0e1724 100644
--- a/examples/l3fwd/l3fwd_lpm.c
+++ b/examples/l3fwd/l3fwd_lpm.c
@@ -105,7 +105,7 @@ struct rte_lpm *ipv4_l3fwd_lpm_lookup_struct[NB_SOCKETS];
struct rte_lpm6 *ipv6_l3fwd_lpm_lookup_struct[NB_SOCKETS];
static inline uint16_t
-lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
+lpm_get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid, void *lookup_struct)
{
uint32_t next_hop;
struct rte_lpm *ipv4_l3fwd_lookup_struct =
@@ -117,7 +117,7 @@ lpm_get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid, void *lookup_struct)
}
static inline uint16_t
-lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
+lpm_get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid, void *lookup_struct)
{
uint32_t next_hop;
struct rte_lpm6 *ipv6_l3fwd_lookup_struct =
@@ -130,7 +130,7 @@ lpm_get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid, void *lookup_struct)
static __rte_always_inline uint16_t
lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint8_t portid)
+ uint16_t portid)
{
struct ipv6_hdr *ipv6_hdr;
struct ipv4_hdr *ipv4_hdr;
@@ -162,7 +162,7 @@ lpm_get_dst_port(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
*/
static __rte_always_inline uint16_t
lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
- uint32_t dst_ipv4, uint8_t portid)
+ uint32_t dst_ipv4, uint16_t portid)
{
uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
@@ -191,6 +191,8 @@ lpm_get_dst_port_with_ipv4(const struct lcore_conf *qconf, struct rte_mbuf *pkt,
#include "l3fwd_lpm_sse.h"
#elif defined RTE_MACHINE_CPUFLAG_NEON
#include "l3fwd_lpm_neon.h"
+#elif defined(RTE_ARCH_PPC_64)
+#include "l3fwd_lpm_altivec.h"
#else
#include "l3fwd_lpm.h"
#endif
@@ -203,7 +205,8 @@ lpm_main_loop(__attribute__((unused)) void *dummy)
unsigned lcore_id;
uint64_t prev_tsc, diff_tsc, cur_tsc;
int i, nb_rx;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
struct lcore_conf *qconf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
US_PER_S * BURST_TX_DRAIN_US;
@@ -225,7 +228,7 @@ lpm_main_loop(__attribute__((unused)) void *dummy)
portid = qconf->rx_queue_list[i].port_id;
queueid = qconf->rx_queue_list[i].queue_id;
RTE_LOG(INFO, L3FWD,
- " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
@@ -263,7 +266,8 @@ lpm_main_loop(__attribute__((unused)) void *dummy)
if (nb_rx == 0)
continue;
-#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON
+#if defined RTE_ARCH_X86 || defined RTE_MACHINE_CPUFLAG_NEON \
+ || defined RTE_ARCH_PPC_64
l3fwd_lpm_send_packets(nb_rx, pkts_burst,
portid, qconf);
#else
@@ -413,7 +417,7 @@ lpm_parse_ptype(struct rte_mbuf *m)
}
uint16_t
-lpm_cb_parse_ptype(uint8_t port __rte_unused, uint16_t queue __rte_unused,
+lpm_cb_parse_ptype(uint16_t port __rte_unused, uint16_t queue __rte_unused,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
uint16_t max_pkts __rte_unused,
void *user_param __rte_unused)
diff --git a/examples/l3fwd/l3fwd_lpm.h b/examples/l3fwd/l3fwd_lpm.h
index 55c3e832..53b7fc80 100644
--- a/examples/l3fwd/l3fwd_lpm.h
+++ b/examples/l3fwd/l3fwd_lpm.h
@@ -35,7 +35,7 @@
#define __L3FWD_LPM_H__
static __rte_always_inline void
-l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint8_t portid,
+l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint16_t portid,
struct lcore_conf *qconf)
{
struct ether_hdr *eth_hdr;
@@ -104,7 +104,7 @@ l3fwd_lpm_simple_forward(struct rte_mbuf *m, uint8_t portid,
static inline void
l3fwd_lpm_no_opt_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t j;
diff --git a/examples/l3fwd/l3fwd_lpm_altivec.h b/examples/l3fwd/l3fwd_lpm_altivec.h
new file mode 100644
index 00000000..36ca983f
--- /dev/null
+++ b/examples/l3fwd/l3fwd_lpm_altivec.h
@@ -0,0 +1,164 @@
+/*-
+ * BSD LICENSE
+ *
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
+ * Copyright(c) 2017 IBM Corporation.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __L3FWD_LPM_ALTIVEC_H__
+#define __L3FWD_LPM_ALTIVEC_H__
+
+#include "l3fwd_altivec.h"
+
+/*
+ * Read packet_type and destination IPV4 addresses from 4 mbufs.
+ */
+static inline void
+processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
+ vector unsigned int *dip,
+ uint32_t *ipv4_flag)
+{
+ struct ipv4_hdr *ipv4_hdr;
+ struct ether_hdr *eth_hdr;
+ uint32_t x0, x1, x2, x3;
+
+ eth_hdr = rte_pktmbuf_mtod(pkt[0], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x0 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] = pkt[0]->packet_type & RTE_PTYPE_L3_IPV4;
+
+ rte_compiler_barrier();
+ eth_hdr = rte_pktmbuf_mtod(pkt[1], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x1 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[1]->packet_type;
+
+ rte_compiler_barrier();
+ eth_hdr = rte_pktmbuf_mtod(pkt[2], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x2 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[2]->packet_type;
+
+ rte_compiler_barrier();
+ eth_hdr = rte_pktmbuf_mtod(pkt[3], struct ether_hdr *);
+ ipv4_hdr = (struct ipv4_hdr *)(eth_hdr + 1);
+ x3 = ipv4_hdr->dst_addr;
+ ipv4_flag[0] &= pkt[3]->packet_type;
+
+ rte_compiler_barrier();
+ dip[0] = (vector unsigned int){x0, x1, x2, x3};
+}
+
+/*
+ * Lookup into LPM for destination port.
+ * If lookup fails, use incoming port (portid) as destination port.
+ */
+static inline void
+processx4_step2(const struct lcore_conf *qconf,
+ vector unsigned int dip,
+ uint32_t ipv4_flag,
+ uint8_t portid,
+ struct rte_mbuf *pkt[FWDSTEP],
+ uint16_t dprt[FWDSTEP])
+{
+ rte_xmm_t dst;
+ const vector unsigned char bswap_mask = (vector unsigned char){
+ 3, 2, 1, 0,
+ 7, 6, 5, 4,
+ 11, 10, 9, 8,
+ 15, 14, 13, 12};
+
+ /* Byte swap 4 IPV4 addresses. */
+ dip = (vector unsigned int)vec_perm(*(vector unsigned char *)&dip,
+ (vector unsigned char){}, bswap_mask);
+
+ /* if all 4 packets are IPV4. */
+ if (likely(ipv4_flag)) {
+ rte_lpm_lookupx4(qconf->ipv4_lookup_struct, (xmm_t)dip,
+ (uint32_t *)&dst, portid);
+ /* get rid of unused upper 16 bit for each dport. */
+ dst.x = (xmm_t)vec_packs(dst.x, dst.x);
+ *(uint64_t *)dprt = dst.u64[0];
+ } else {
+ dst.x = (xmm_t)dip;
+ dprt[0] = lpm_get_dst_port_with_ipv4(qconf, pkt[0],
+ dst.u32[0], portid);
+ dprt[1] = lpm_get_dst_port_with_ipv4(qconf, pkt[1],
+ dst.u32[1], portid);
+ dprt[2] = lpm_get_dst_port_with_ipv4(qconf, pkt[2],
+ dst.u32[2], portid);
+ dprt[3] = lpm_get_dst_port_with_ipv4(qconf, pkt[3],
+ dst.u32[3], portid);
+ }
+}
+
+/*
+ * Buffer optimized handling of packets, invoked
+ * from main_loop.
+ */
+static inline void
+l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
+ uint8_t portid, struct lcore_conf *qconf)
+{
+ int32_t j;
+ uint16_t dst_port[MAX_PKT_BURST];
+ vector unsigned int dip[MAX_PKT_BURST / FWDSTEP];
+ uint32_t ipv4_flag[MAX_PKT_BURST / FWDSTEP];
+ const int32_t k = RTE_ALIGN_FLOOR(nb_rx, FWDSTEP);
+
+ for (j = 0; j != k; j += FWDSTEP)
+ processx4_step1(&pkts_burst[j], &dip[j / FWDSTEP],
+ &ipv4_flag[j / FWDSTEP]);
+
+ for (j = 0; j != k; j += FWDSTEP)
+ processx4_step2(qconf, dip[j / FWDSTEP],
+ ipv4_flag[j / FWDSTEP],
+ portid, &pkts_burst[j], &dst_port[j]);
+
+ /* Classify last up to 3 packets one by one */
+ switch (nb_rx % FWDSTEP) {
+ case 3:
+ dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+ j++;
+ /* fall-through */
+ case 2:
+ dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+ j++;
+ /* fall-through */
+ case 1:
+ dst_port[j] = lpm_get_dst_port(qconf, pkts_burst[j], portid);
+ j++;
+ /* fall-through */
+ }
+
+ send_packets_multi(qconf, pkts_burst, dst_port, nb_rx);
+}
+
+#endif /* __L3FWD_LPM_ALTIVEC_H__ */
diff --git a/examples/l3fwd/l3fwd_lpm_neon.h b/examples/l3fwd/l3fwd_lpm_neon.h
index baedbfe8..85f314d1 100644
--- a/examples/l3fwd/l3fwd_lpm_neon.h
+++ b/examples/l3fwd/l3fwd_lpm_neon.h
@@ -82,7 +82,7 @@ static inline void
processx4_step2(const struct lcore_conf *qconf,
int32x4_t dip,
uint32_t ipv4_flag,
- uint8_t portid,
+ uint16_t portid,
struct rte_mbuf *pkt[FWDSTEP],
uint16_t dprt[FWDSTEP])
{
@@ -115,7 +115,7 @@ processx4_step2(const struct lcore_conf *qconf,
*/
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t i = 0, j = 0;
uint16_t dst_port[MAX_PKT_BURST];
diff --git a/examples/l3fwd/l3fwd_lpm_sse.h b/examples/l3fwd/l3fwd_lpm_sse.h
index 4e294c84..d474396e 100644
--- a/examples/l3fwd/l3fwd_lpm_sse.h
+++ b/examples/l3fwd/l3fwd_lpm_sse.h
@@ -79,7 +79,7 @@ static inline void
processx4_step2(const struct lcore_conf *qconf,
__m128i dip,
uint32_t ipv4_flag,
- uint8_t portid,
+ uint16_t portid,
struct rte_mbuf *pkt[FWDSTEP],
uint16_t dprt[FWDSTEP])
{
@@ -112,7 +112,7 @@ processx4_step2(const struct lcore_conf *qconf,
*/
static inline void
l3fwd_lpm_send_packets(int nb_rx, struct rte_mbuf **pkts_burst,
- uint8_t portid, struct lcore_conf *qconf)
+ uint16_t portid, struct lcore_conf *qconf)
{
int32_t j;
uint16_t dst_port[MAX_PKT_BURST];
diff --git a/examples/l3fwd/l3fwd_neon.h b/examples/l3fwd/l3fwd_neon.h
index 42d50d3c..b319b5a9 100644
--- a/examples/l3fwd/l3fwd_neon.h
+++ b/examples/l3fwd/l3fwd_neon.h
@@ -114,6 +114,7 @@ port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, uint16x8_t dp1,
/* update last port counter. */
lp[0] += gptbl[v].lpv;
+ rte_compiler_barrier();
/* if dest port value has changed. */
if (v != GRPMSK) {
@@ -192,7 +193,7 @@ send_packets_multi(struct lcore_conf *qconf, struct rte_mbuf **pkts_burst,
* dp1:
* <d[j], d[j+1], d[j+2], d[j+3], ... >
*/
- dp1 = vextq_u16(dp1, dp1, FWDSTEP - 1);
+ dp1 = vextq_u16(dp2, dp1, FWDSTEP - 1);
}
/*
diff --git a/examples/l3fwd/main.c b/examples/l3fwd/main.c
index 81995fdb..6229568f 100644
--- a/examples/l3fwd/main.c
+++ b/examples/l3fwd/main.c
@@ -50,7 +50,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -60,7 +59,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -124,7 +122,7 @@ uint32_t hash_entry_number = HASH_ENTRY_NUMBER_DEFAULT;
struct lcore_conf lcore_conf[RTE_MAX_LCORE];
struct lcore_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
} __rte_cache_aligned;
@@ -245,7 +243,7 @@ check_lcore_params(void)
static int
check_port_config(const unsigned nb_ports)
{
- unsigned portid;
+ uint16_t portid;
uint16_t i;
for (i = 0; i < nb_lcore_params; ++i) {
@@ -263,7 +261,7 @@ check_port_config(const unsigned nb_ports)
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
@@ -445,7 +443,7 @@ parse_config(const char *q_arg)
static void
parse_eth_dest(const char *optarg)
{
- uint8_t portid;
+ uint16_t portid;
char *port_end;
uint8_t c, *dest, peer_addr[6];
@@ -750,11 +748,12 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -773,14 +772,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps -%s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -818,7 +816,7 @@ signal_handler(int signum)
}
static int
-prepare_ptype_parser(uint8_t portid, uint16_t queueid)
+prepare_ptype_parser(uint16_t portid, uint16_t queueid)
{
if (parse_ptype) {
printf("Port %d: softly parse packet type info\n", portid);
@@ -847,10 +845,10 @@ main(int argc, char **argv)
struct rte_eth_txconf *txconf;
int ret;
unsigned nb_ports;
- uint16_t queueid;
+ uint16_t queueid, portid;
unsigned lcore_id;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
/* init EAL */
ret = rte_eal_init(argc, argv);
@@ -1048,7 +1046,7 @@ main(int argc, char **argv)
}
- check_all_ports_link_status((uint8_t)nb_ports, enabled_port_mask);
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
ret = 0;
/* launch per-lcore init on every lcore */
diff --git a/examples/link_status_interrupt/main.c b/examples/link_status_interrupt/main.c
index f4e3969a..bc47dcce 100644
--- a/examples/link_status_interrupt/main.c
+++ b/examples/link_status_interrupt/main.c
@@ -50,7 +50,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -60,7 +59,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -145,7 +143,7 @@ print_stats(void)
{
struct rte_eth_link link;
uint64_t total_packets_dropped, total_packets_tx, total_packets_rx;
- unsigned portid;
+ uint16_t portid;
total_packets_dropped = 0;
total_packets_tx = 0;
@@ -165,7 +163,7 @@ print_stats(void)
continue;
memset(&link, 0, sizeof(link));
- rte_eth_link_get_nowait((uint8_t)portid, &link);
+ rte_eth_link_get_nowait(portid, &link);
printf("\nStatistics for port %u ------------------------------"
"\nLink status: %25s"
"\nLink speed: %26u"
@@ -470,7 +468,7 @@ lsi_parse_args(int argc, char **argv)
* int.
*/
static int
-lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
+lsi_event_callback(uint16_t port_id, enum rte_eth_event_type type, void *param,
void *ret_param)
{
struct rte_eth_link link;
@@ -494,11 +492,12 @@ lsi_event_callback(uint8_t port_id, enum rte_eth_event_type type, void *param,
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint8_t count, all_ports_up, print_flag = 0;
+ uint16_t portid;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -513,14 +512,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -553,8 +551,8 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t portid, portid_last = 0;
+ uint16_t nb_ports;
+ uint16_t portid, portid_last = 0;
unsigned lcore_id, rx_lcore_id;
unsigned nb_ports_in_mask = 0;
diff --git a/examples/load_balancer/config.c b/examples/load_balancer/config.c
index 50325095..755a86e4 100644
--- a/examples/load_balancer/config.c
+++ b/examples/load_balancer/config.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -240,7 +238,7 @@ parse_arg_rx(const char *arg)
if (lp->io.rx.n_nic_queues >= APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE) {
return -9;
}
- lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].port = (uint8_t) port;
+ lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].port = port;
lp->io.rx.nic_queues[lp->io.rx.n_nic_queues].queue = (uint8_t) queue;
lp->io.rx.n_nic_queues ++;
@@ -318,7 +316,7 @@ parse_arg_tx(const char *arg)
if (lp->io.tx.n_nic_ports >= APP_MAX_NIC_TX_PORTS_PER_IO_LCORE) {
return -9;
}
- lp->io.tx.nic_ports[lp->io.tx.n_nic_ports] = (uint8_t) port;
+ lp->io.tx.nic_ports[lp->io.tx.n_nic_ports] = port;
lp->io.tx.n_nic_ports ++;
n_tuples ++;
@@ -488,7 +486,7 @@ app_check_lpm_table(void)
static int
app_check_every_rx_port_is_tx_enabled(void)
{
- uint8_t port;
+ uint16_t port;
for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
if ((app_get_nic_rx_queues_per_port(port) > 0) && (app.nic_tx_port_mask[port] == 0)) {
@@ -762,7 +760,7 @@ app_parse_args(int argc, char **argv)
}
int
-app_get_nic_rx_queues_per_port(uint8_t port)
+app_get_nic_rx_queues_per_port(uint16_t port)
{
uint32_t i, count;
@@ -781,7 +779,7 @@ app_get_nic_rx_queues_per_port(uint8_t port)
}
int
-app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out)
+app_get_lcore_for_nic_rx(uint16_t port, uint8_t queue, uint32_t *lcore_out)
{
uint32_t lcore;
@@ -808,7 +806,7 @@ app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out)
}
int
-app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out)
+app_get_lcore_for_nic_tx(uint16_t port, uint32_t *lcore_out)
{
uint32_t lcore;
@@ -901,7 +899,7 @@ app_print_params(void)
/* Print NIC RX configuration */
printf("NIC RX ports: ");
for (port = 0; port < APP_MAX_NIC_PORTS; port ++) {
- uint32_t n_rx_queues = app_get_nic_rx_queues_per_port((uint8_t) port);
+ uint32_t n_rx_queues = app_get_nic_rx_queues_per_port(port);
if (n_rx_queues == 0) {
continue;
diff --git a/examples/load_balancer/init.c b/examples/load_balancer/init.c
index 717232e6..3dab7f25 100644
--- a/examples/load_balancer/init.c
+++ b/examples/load_balancer/init.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -308,7 +306,7 @@ app_init_rings_tx(void)
continue;
}
- if (app_get_lcore_for_nic_tx((uint8_t) port, &lcore_io) < 0) {
+ if (app_get_lcore_for_nic_tx(port, &lcore_io) < 0) {
rte_panic("Algorithmic error (no I/O core to handle TX of port %u)\n",
port);
}
@@ -359,11 +357,12 @@ app_init_rings_tx(void)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
uint32_t n_rx_queues, n_tx_queues;
@@ -383,14 +382,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up - speed %uMbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -422,7 +420,8 @@ app_init_nics(void)
{
unsigned socket;
uint32_t lcore;
- uint8_t port, queue;
+ uint16_t port;
+ uint8_t queue;
int ret;
uint32_t n_rx_queues, n_tx_queues;
@@ -440,14 +439,14 @@ app_init_nics(void)
}
/* Init port */
- printf("Initializing NIC port %u ...\n", (unsigned) port);
+ printf("Initializing NIC port %u ...\n", port);
ret = rte_eth_dev_configure(
port,
(uint8_t) n_rx_queues,
(uint8_t) n_tx_queues,
&port_conf);
if (ret < 0) {
- rte_panic("Cannot init NIC port %u (%d)\n", (unsigned) port, ret);
+ rte_panic("Cannot init NIC port %u (%d)\n", port, ret);
}
rte_eth_promiscuous_enable(port);
@@ -457,7 +456,7 @@ app_init_nics(void)
port, &nic_rx_ring_size, &nic_tx_ring_size);
if (ret < 0) {
rte_panic("Cannot adjust number of descriptors for port %u (%d)\n",
- (unsigned) port, ret);
+ port, ret);
}
app.nic_rx_ring_size = nic_rx_ring_size;
app.nic_tx_ring_size = nic_tx_ring_size;
@@ -473,8 +472,7 @@ app_init_nics(void)
pool = app.lcore_params[lcore].pool;
printf("Initializing NIC port %u RX queue %u ...\n",
- (unsigned) port,
- (unsigned) queue);
+ port, queue);
ret = rte_eth_rx_queue_setup(
port,
queue,
@@ -484,9 +482,7 @@ app_init_nics(void)
pool);
if (ret < 0) {
rte_panic("Cannot init RX queue %u for port %u (%d)\n",
- (unsigned) queue,
- (unsigned) port,
- ret);
+ queue, port, ret);
}
}
@@ -495,7 +491,7 @@ app_init_nics(void)
app_get_lcore_for_nic_tx(port, &lcore);
socket = rte_lcore_to_socket_id(lcore);
printf("Initializing NIC port %u TX queue 0 ...\n",
- (unsigned) port);
+ port);
ret = rte_eth_tx_queue_setup(
port,
0,
diff --git a/examples/load_balancer/main.c b/examples/load_balancer/main.c
index 65ceea4a..7ced84d4 100644
--- a/examples/load_balancer/main.c
+++ b/examples/load_balancer/main.c
@@ -48,7 +48,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -58,7 +57,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
diff --git a/examples/load_balancer/main.h b/examples/load_balancer/main.h
index dc407555..bef2ba04 100644
--- a/examples/load_balancer/main.h
+++ b/examples/load_balancer/main.h
@@ -248,7 +248,7 @@ struct app_lcore_params_io {
struct {
/* NIC */
struct {
- uint8_t port;
+ uint16_t port;
uint8_t queue;
} nic_queues[APP_MAX_NIC_RX_QUEUES_PER_IO_LCORE];
uint32_t n_nic_queues;
@@ -275,7 +275,7 @@ struct app_lcore_params_io {
struct rte_ring *rings[APP_MAX_NIC_PORTS][APP_MAX_WORKER_LCORES];
/* NIC */
- uint8_t nic_ports[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
+ uint16_t nic_ports[APP_MAX_NIC_TX_PORTS_PER_IO_LCORE];
uint32_t n_nic_ports;
/* Internal buffers */
@@ -368,9 +368,10 @@ void app_print_usage(void);
void app_init(void);
int app_lcore_main_loop(void *arg);
-int app_get_nic_rx_queues_per_port(uint8_t port);
-int app_get_lcore_for_nic_rx(uint8_t port, uint8_t queue, uint32_t *lcore_out);
-int app_get_lcore_for_nic_tx(uint8_t port, uint32_t *lcore_out);
+int app_get_nic_rx_queues_per_port(uint16_t port);
+int app_get_lcore_for_nic_rx(uint16_t port, uint8_t queue,
+ uint32_t *lcore_out);
+int app_get_lcore_for_nic_tx(uint16_t port, uint32_t *lcore_out);
int app_is_socket_used(uint32_t socket);
uint32_t app_get_lcores_io_rx(void);
uint32_t app_get_lcores_worker(void);
diff --git a/examples/load_balancer/runtime.c b/examples/load_balancer/runtime.c
index e54b7851..f65e14f0 100644
--- a/examples/load_balancer/runtime.c
+++ b/examples/load_balancer/runtime.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -188,7 +186,7 @@ app_lcore_io_rx(
uint32_t i;
for (i = 0; i < lp->rx.n_nic_queues; i ++) {
- uint8_t port = lp->rx.nic_queues[i].port;
+ uint16_t port = lp->rx.nic_queues[i].port;
uint8_t queue = lp->rx.nic_queues[i].queue;
uint32_t n_mbufs, j;
@@ -213,7 +211,7 @@ app_lcore_io_rx(
printf("I/O RX %u in (NIC port %u): NIC drop ratio = %.2f avg burst size = %.2f\n",
lcore,
- (unsigned) port,
+ port,
(double) stats.imissed / (double) (stats.imissed + stats.ipackets),
((double) lp->rx.nic_queues_count[i]) / ((double) lp->rx.nic_queues_iters[i]));
lp->rx.nic_queues_iters[i] = 0;
@@ -339,7 +337,7 @@ app_lcore_io_tx(
uint32_t i;
for (i = 0; i < lp->tx.n_nic_ports; i ++) {
- uint8_t port = lp->tx.nic_ports[i];
+ uint16_t port = lp->tx.nic_ports[i];
struct rte_ring *ring = lp->tx.rings[port][worker];
uint32_t n_mbufs, n_pkts;
int ret;
@@ -395,7 +393,7 @@ app_lcore_io_tx(
printf("\t\t\tI/O TX %u out (port %u): avg burst size = %.2f\n",
lcore,
- (unsigned) port,
+ port,
((double) lp->tx.nic_ports_count[port]) / ((double) lp->tx.nic_ports_iters[port]));
lp->tx.nic_ports_iters[port] = 0;
lp->tx.nic_ports_count[port] = 0;
@@ -418,7 +416,7 @@ app_lcore_io_tx(
static inline void
app_lcore_io_tx_flush(struct app_lcore_params_io *lp)
{
- uint8_t port;
+ uint16_t port;
uint32_t i;
for (i = 0; i < lp->tx.n_nic_ports; i++) {
@@ -569,7 +567,7 @@ app_lcore_worker(
if (lp->rings_out_iters[port] == APP_STATS){
printf("\t\tWorker %u out (NIC port %u): enq success rate = %.2f\n",
(unsigned) lp->worker_id,
- (unsigned) port,
+ port,
((double) lp->rings_out_count[port]) / ((double) lp->rings_out_iters[port]));
lp->rings_out_iters[port] = 0;
lp->rings_out_count[port] = 0;
diff --git a/examples/multi_process/client_server_mp/mp_client/client.c b/examples/multi_process/client_server_mp/mp_client/client.c
index f8453e57..30ce4b34 100644
--- a/examples/multi_process/client_server_mp/mp_client/client.c
+++ b/examples/multi_process/client_server_mp/mp_client/client.c
@@ -57,7 +57,6 @@
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_string_fns.h>
@@ -74,7 +73,7 @@ static uint8_t client_id = 0;
#define MBQ_CAPACITY 32
/* maps input ports to output ports for packets */
-static uint8_t output_ports[RTE_MAX_ETHPORTS];
+static uint16_t output_ports[RTE_MAX_ETHPORTS];
/* buffers up a set of packet that are ready to send */
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
@@ -150,7 +149,7 @@ static void
flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
void *userdata) {
int i;
- uint8_t port_id = (uintptr_t)userdata;
+ uint16_t port_id = (uintptr_t)userdata;
tx_stats->tx_drop[port_id] += count;
@@ -161,7 +160,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
}
static void
-configure_tx_buffer(uint8_t port_id, uint16_t size)
+configure_tx_buffer(uint16_t port_id, uint16_t size)
{
int ret;
@@ -171,15 +170,16 @@ configure_tx_buffer(uint8_t port_id, uint16_t size)
rte_eth_dev_socket_id(port_id));
if (tx_buffer[port_id] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) port_id);
+ port_id);
rte_eth_tx_buffer_init(tx_buffer[port_id], size);
ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
flush_tx_error_callback, (void *)(intptr_t)port_id);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) port_id);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ port_id);
}
/*
@@ -195,8 +195,8 @@ configure_output_ports(const struct port_info *ports)
rte_exit(EXIT_FAILURE, "Too many ethernet ports. RTE_MAX_ETHPORTS = %u\n",
(unsigned)RTE_MAX_ETHPORTS);
for (i = 0; i < ports->num_ports - 1; i+=2){
- uint8_t p1 = ports->id[i];
- uint8_t p2 = ports->id[i+1];
+ uint16_t p1 = ports->id[i];
+ uint16_t p2 = ports->id[i+1];
output_ports[p1] = p2;
output_ports[p2] = p1;
@@ -215,8 +215,8 @@ static void
handle_packet(struct rte_mbuf *buf)
{
int sent;
- const uint8_t in_port = buf->port;
- const uint8_t out_port = output_ports[in_port];
+ const uint16_t in_port = buf->port;
+ const uint16_t out_port = output_ports[in_port];
struct rte_eth_dev_tx_buffer *buffer = tx_buffer[out_port];
sent = rte_eth_tx_buffer(out_port, client_id, buffer, buf);
@@ -275,7 +275,7 @@ main(int argc, char *argv[])
for (;;) {
uint16_t i, rx_pkts;
- uint8_t port;
+ uint16_t port;
rx_pkts = rte_ring_dequeue_burst(rx_ring, pkts,
PKT_READ_SIZE, NULL);
diff --git a/examples/multi_process/client_server_mp/mp_server/Makefile b/examples/multi_process/client_server_mp/mp_server/Makefile
index 5552999b..160c17b6 100644
--- a/examples/multi_process/client_server_mp/mp_server/Makefile
+++ b/examples/multi_process/client_server_mp/mp_server/Makefile
@@ -49,7 +49,7 @@ APP = mp_server
# all source are stored in SRCS-y
SRCS-y := main.c init.c args.c
-INC := $(wildcard *.h)
+INC := $(sort $(wildcard *.h))
CFLAGS += $(WERROR_FLAGS) -O3
CFLAGS += -I$(SRCDIR)/../shared
diff --git a/examples/multi_process/client_server_mp/mp_server/args.c b/examples/multi_process/client_server_mp/mp_server/args.c
index bf8c666c..a65884fd 100644
--- a/examples/multi_process/client_server_mp/mp_server/args.c
+++ b/examples/multi_process/client_server_mp/mp_server/args.c
@@ -74,7 +74,7 @@ parse_portmask(uint8_t max_ports, const char *portmask)
{
char *end = NULL;
unsigned long pm;
- uint8_t count = 0;
+ uint16_t count = 0;
if (portmask == NULL || *portmask == '\0')
return -1;
@@ -128,7 +128,7 @@ parse_num_clients(const char *clients)
* on error.
*/
int
-parse_app_args(uint8_t max_ports, int argc, char *argv[])
+parse_app_args(uint16_t max_ports, int argc, char *argv[])
{
int option_index, opt;
char **argvopt = argv;
diff --git a/examples/multi_process/client_server_mp/mp_server/args.h b/examples/multi_process/client_server_mp/mp_server/args.h
index 23af1bd3..33888b89 100644
--- a/examples/multi_process/client_server_mp/mp_server/args.h
+++ b/examples/multi_process/client_server_mp/mp_server/args.h
@@ -34,6 +34,6 @@
#ifndef _ARGS_H_
#define _ARGS_H_
-int parse_app_args(uint8_t max_ports, int argc, char *argv[]);
+int parse_app_args(uint16_t max_ports, int argc, char *argv[]);
#endif /* ifndef _ARGS_H_ */
diff --git a/examples/multi_process/client_server_mp/mp_server/init.c b/examples/multi_process/client_server_mp/mp_server/init.c
index 0bc92921..c8d02113 100644
--- a/examples/multi_process/client_server_mp/mp_server/init.c
+++ b/examples/multi_process/client_server_mp/mp_server/init.c
@@ -56,7 +56,6 @@
#include <rte_memcpy.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
@@ -114,7 +113,7 @@ init_mbuf_pools(void)
* - start the port and report its status to stdout
*/
static int
-init_port(uint8_t port_num)
+init_port(uint16_t port_num)
{
/* for port configuration all features are off by default */
const struct rte_eth_conf port_conf = {
@@ -129,7 +128,7 @@ init_port(uint8_t port_num)
uint16_t q;
int retval;
- printf("Port %u init ... ", (unsigned)port_num);
+ printf("Port %u init ... ", port_num);
fflush(stdout);
/* Standard DPDK port initialisation - config port, then set up
@@ -200,11 +199,12 @@ init_shm_rings(void)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -262,7 +262,7 @@ init(int argc, char *argv[])
{
int retval;
const struct rte_memzone *mz;
- uint8_t i, total_ports;
+ uint16_t i, total_ports;
/* init EAL, parsing EAL args */
retval = rte_eal_init(argc, argv);
diff --git a/examples/multi_process/client_server_mp/mp_server/main.c b/examples/multi_process/client_server_mp/mp_server/main.c
index 7055b543..6eda556a 100644
--- a/examples/multi_process/client_server_mp/mp_server/main.c
+++ b/examples/multi_process/client_server_mp/mp_server/main.c
@@ -44,7 +44,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
@@ -59,7 +58,6 @@
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ethdev.h>
#include <rte_byteorder.h>
#include <rte_malloc.h>
@@ -88,7 +86,7 @@ struct client_rx_buf {
static struct client_rx_buf *cl_rx_buf;
static const char *
-get_printable_mac_addr(uint8_t port)
+get_printable_mac_addr(uint16_t port)
{
static const char err_address[] = "00:00:00:00:00:00";
static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
diff --git a/examples/multi_process/client_server_mp/shared/common.h b/examples/multi_process/client_server_mp/shared/common.h
index 631c4632..35a3b01d 100644
--- a/examples/multi_process/client_server_mp/shared/common.h
+++ b/examples/multi_process/client_server_mp/shared/common.h
@@ -57,8 +57,8 @@ struct tx_stats{
} __rte_cache_aligned;
struct port_info {
- uint8_t num_ports;
- uint8_t id[RTE_MAX_ETHPORTS];
+ uint16_t num_ports;
+ uint16_t id[RTE_MAX_ETHPORTS];
volatile struct rx_stats rx_stats;
volatile struct tx_stats tx_stats[MAX_CLIENTS];
};
diff --git a/examples/multi_process/l2fwd_fork/flib.c b/examples/multi_process/l2fwd_fork/flib.c
index c22e983b..a3f1d275 100644
--- a/examples/multi_process/l2fwd_fork/flib.c
+++ b/examples/multi_process/l2fwd_fork/flib.c
@@ -54,7 +54,6 @@
#include <rte_malloc.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -64,7 +63,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
diff --git a/examples/multi_process/l2fwd_fork/main.c b/examples/multi_process/l2fwd_fork/main.c
index f8a626ba..deace273 100644
--- a/examples/multi_process/l2fwd_fork/main.c
+++ b/examples/multi_process/l2fwd_fork/main.c
@@ -51,7 +51,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -62,7 +61,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -140,7 +138,8 @@ struct lcore_resource_struct {
/* ring[1] for slave send ack, master read */
struct rte_ring *ring[2];
int port_num; /* Total port numbers */
- uint8_t port[RTE_MAX_ETHPORTS]; /* Port id for that lcore to receive packets */
+ /* Port id for that lcore to receive packets */
+ uint16_t port[RTE_MAX_ETHPORTS];
}__attribute__((packed)) __rte_cache_aligned;
static struct lcore_resource_struct lcore_resource[RTE_MAX_LCORE];
@@ -871,11 +870,12 @@ l2fwd_parse_args(int argc, char **argv)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -890,14 +890,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up- speed %u Mbps- %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -930,9 +929,9 @@ main(int argc, char **argv)
struct lcore_queue_conf *qconf;
struct rte_eth_dev_info dev_info;
int ret;
- uint8_t nb_ports;
- uint8_t nb_ports_available;
- uint8_t portid, last_port;
+ uint16_t nb_ports;
+ uint16_t nb_ports_available;
+ uint16_t portid, last_port;
unsigned rx_lcore_id;
unsigned nb_ports_in_mask = 0;
unsigned i;
@@ -1204,10 +1203,7 @@ main(int argc, char **argv)
message_pool = rte_mempool_create("ms_msg_pool",
NB_CORE_MSGBUF * RTE_MAX_LCORE,
sizeof(enum l2fwd_cmd), NB_CORE_MSGBUF / 2,
- 0,
- rte_pktmbuf_pool_init, NULL,
- rte_pktmbuf_init, NULL,
- rte_socket_id(), 0);
+ 0, NULL, NULL, NULL, NULL, rte_socket_id(), 0);
if (message_pool == NULL)
rte_exit(EXIT_FAILURE, "Create msg mempool failed\n");
diff --git a/examples/multi_process/simple_mp/main.c b/examples/multi_process/simple_mp/main.c
index 2843d94e..62537b02 100644
--- a/examples/multi_process/simple_mp/main.c
+++ b/examples/multi_process/simple_mp/main.c
@@ -54,7 +54,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -67,6 +66,7 @@
#include <rte_mempool.h>
#include <cmdline_rdline.h>
#include <cmdline_parse.h>
+#include <cmdline_parse_string.h>
#include <cmdline_socket.h>
#include <cmdline.h>
#include "mp_commands.h"
@@ -76,7 +76,6 @@
static const char *_MSG_POOL = "MSG_POOL";
static const char *_SEC_2_PRI = "SEC_2_PRI";
static const char *_PRI_2_SEC = "PRI_2_SEC";
-const unsigned string_size = 64;
struct rte_ring *send_ring, *recv_ring;
struct rte_mempool *message_pool;
@@ -121,7 +120,7 @@ main(int argc, char **argv)
send_ring = rte_ring_create(_PRI_2_SEC, ring_size, rte_socket_id(), flags);
recv_ring = rte_ring_create(_SEC_2_PRI, ring_size, rte_socket_id(), flags);
message_pool = rte_mempool_create(_MSG_POOL, pool_size,
- string_size, pool_cache, priv_data_sz,
+ STR_TOKEN_SIZE, pool_cache, priv_data_sz,
NULL, NULL, NULL, NULL,
rte_socket_id(), flags);
} else {
diff --git a/examples/multi_process/simple_mp/mp_commands.c b/examples/multi_process/simple_mp/mp_commands.c
index 8da244bb..ef6dc58d 100644
--- a/examples/multi_process/simple_mp/mp_commands.c
+++ b/examples/multi_process/simple_mp/mp_commands.c
@@ -42,7 +42,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_atomic.h>
#include <rte_branch_prediction.h>
@@ -78,7 +77,7 @@ static void cmd_send_parsed(void *parsed_result,
if (rte_mempool_get(message_pool, &msg) < 0)
rte_panic("Failed to get message buffer\n");
- snprintf((char *)msg, string_size, "%s", res->message);
+ snprintf((char *)msg, STR_TOKEN_SIZE, "%s", res->message);
if (rte_ring_enqueue(send_ring, msg) < 0) {
printf("Failed to send message - message discarded\n");
rte_mempool_put(message_pool, msg);
diff --git a/examples/multi_process/simple_mp/mp_commands.h b/examples/multi_process/simple_mp/mp_commands.h
index 7e9a4ab2..452b3645 100644
--- a/examples/multi_process/simple_mp/mp_commands.h
+++ b/examples/multi_process/simple_mp/mp_commands.h
@@ -34,7 +34,6 @@
#ifndef _SIMPLE_MP_COMMANDS_H_
#define _SIMPLE_MP_COMMANDS_H_
-extern const unsigned string_size;
extern struct rte_ring *send_ring;
extern struct rte_mempool *message_pool;
extern volatile int quit;
diff --git a/examples/multi_process/symmetric_mp/main.c b/examples/multi_process/symmetric_mp/main.c
index 0f497910..6fb285c7 100644
--- a/examples/multi_process/symmetric_mp/main.c
+++ b/examples/multi_process/symmetric_mp/main.c
@@ -56,7 +56,6 @@
#include <rte_common.h>
#include <rte_log.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
@@ -65,7 +64,6 @@
#include <rte_branch_prediction.h>
#include <rte_debug.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_mempool.h>
@@ -102,7 +100,7 @@ struct port_stats{
static int proc_id = -1;
static unsigned num_procs = 0;
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports = 0;
static struct lcore_ports lcore_ports[RTE_MAX_LCORE];
@@ -202,7 +200,8 @@ smp_parse_args(int argc, char **argv)
* coming from the mbuf_pool passed as parameter
*/
static inline int
-smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
+smp_port_init(uint16_t port, struct rte_mempool *mbuf_pool,
+ uint16_t num_queues)
{
struct rte_eth_conf port_conf = {
.rxmode = {
@@ -237,7 +236,7 @@ smp_port_init(uint8_t port, struct rte_mempool *mbuf_pool, uint16_t num_queues)
if (port >= rte_eth_dev_count())
return -1;
- printf("# Initialising port %u... ", (unsigned)port);
+ printf("# Initialising port %u... ", port);
fflush(stdout);
rte_eth_dev_info_get(port, &info);
@@ -362,11 +361,12 @@ lcore_main(void *arg __rte_unused)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -381,14 +381,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
diff --git a/examples/netmap_compat/lib/compat_netmap.c b/examples/netmap_compat/lib/compat_netmap.c
index af2d9f3f..12b3fcbe 100644
--- a/examples/netmap_compat/lib/compat_netmap.c
+++ b/examples/netmap_compat/lib/compat_netmap.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_malloc.h>
#include <rte_mbuf.h>
-#include <rte_memzone.h>
#include <rte_spinlock.h>
#include <rte_string_fns.h>
@@ -129,7 +128,7 @@ static void netmap_unregif(uint32_t idx, uint32_t port);
static int32_t
-ifname_to_portid(const char *ifname, uint8_t *port)
+ifname_to_portid(const char *ifname, uint16_t *port)
{
char *endptr;
uint64_t portid;
@@ -140,7 +139,7 @@ ifname_to_portid(const char *ifname, uint8_t *port)
portid >= RTE_DIM(ports) || errno != 0)
return -EINVAL;
- *port = (uint8_t)portid;
+ *port = portid;
return 0;
}
@@ -222,10 +221,10 @@ fd_release(int32_t fd)
}
static int
-check_nmreq(struct nmreq *req, uint8_t *port)
+check_nmreq(struct nmreq *req, uint16_t *port)
{
int32_t rc;
- uint8_t portid;
+ uint16_t portid;
if (req == NULL)
return -EINVAL;
@@ -242,7 +241,7 @@ check_nmreq(struct nmreq *req, uint8_t *port)
}
if (ports[portid].pool == NULL) {
- RTE_LOG(ERR, USER1, "Misconfigured portid %hhu\n", portid);
+ RTE_LOG(ERR, USER1, "Misconfigured portid %u\n", portid);
return -EINVAL;
}
@@ -262,7 +261,7 @@ check_nmreq(struct nmreq *req, uint8_t *port)
static int
ioctl_niocginfo(__rte_unused int fd, void * param)
{
- uint8_t portid;
+ uint16_t portid;
struct nmreq *req;
int32_t rc;
@@ -283,7 +282,7 @@ ioctl_niocginfo(__rte_unused int fd, void * param)
}
static void
-netmap_ring_setup(struct netmap_ring *ring, uint8_t port, uint32_t ringid,
+netmap_ring_setup(struct netmap_ring *ring, uint16_t port, uint32_t ringid,
uint32_t num_slots)
{
uint32_t j;
@@ -305,7 +304,7 @@ netmap_ring_setup(struct netmap_ring *ring, uint8_t port, uint32_t ringid,
}
static int
-netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
+netmap_regif(struct nmreq *req, uint32_t idx, uint16_t port)
{
struct netmap_if *nmif;
struct netmap_ring *ring;
@@ -313,7 +312,7 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
int32_t rc;
if (ports[port].fd < RTE_DIM(fd_port)) {
- RTE_LOG(ERR, USER1, "port %hhu already in use by fd: %u\n",
+ RTE_LOG(ERR, USER1, "port %u already in use by fd: %u\n",
port, IDX_TO_FD(ports[port].fd));
return -EBUSY;
}
@@ -399,7 +398,7 @@ netmap_regif(struct nmreq *req, uint32_t idx, uint8_t port)
static int
ioctl_niocregif(int32_t fd, void * param)
{
- uint8_t portid;
+ uint16_t portid;
int32_t rc;
uint32_t idx;
struct nmreq *req;
@@ -422,7 +421,7 @@ netmap_unregif(uint32_t idx, uint32_t port)
{
fd_port[idx].port = FD_PORT_RSRV;
ports[port].fd = UINT32_MAX;
- rte_eth_dev_stop((uint8_t)port);
+ rte_eth_dev_stop(port);
}
/**
@@ -460,7 +459,7 @@ ioctl_niocunregif(int fd)
* packets as it can hold coming from its dpdk port.
*/
static inline int
-rx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
+rx_sync_ring(struct netmap_ring *ring, uint16_t port, uint16_t ring_number,
uint16_t max_burst)
{
int32_t i, n_rx;
@@ -513,7 +512,7 @@ rx_sync_if(uint32_t port)
for (i = 0; i < nifp->ni_rx_rings + 1; i++) {
r = NETMAP_RXRING(nifp, i);
- rx_sync_ring(r, (uint8_t)port, (uint16_t)i, burst);
+ rx_sync_ring(r, port, (uint16_t)i, burst);
rc += r->avail;
}
@@ -542,7 +541,7 @@ ioctl_niocrxsync(int fd)
* buffers into rte_mbufs and sending them out on the rings's dpdk port.
*/
static int
-tx_sync_ring(struct netmap_ring *ring, uint8_t port, uint16_t ring_number,
+tx_sync_ring(struct netmap_ring *ring, uint16_t port, uint16_t ring_number,
struct rte_mempool *pool, uint16_t max_burst)
{
uint32_t i, n_tx;
@@ -608,7 +607,7 @@ tx_sync_if(uint32_t port)
for (i = 0; i < nifp->ni_tx_rings + 1; i++) {
r = NETMAP_TXRING(nifp, i);
- tx_sync_ring(r, (uint8_t)port, (uint16_t)i, mp, burst);
+ tx_sync_ring(r, port, (uint16_t)i, mp, burst);
rc += r->avail;
}
@@ -686,7 +685,7 @@ rte_netmap_init(const struct rte_netmap_conf *conf)
int
-rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
+rte_netmap_init_port(uint16_t portid, const struct rte_netmap_port_conf *conf)
{
int32_t ret;
uint16_t i;
@@ -696,17 +695,17 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
portid >= RTE_DIM(ports) ||
conf->nr_tx_rings > netmap.conf.max_rings ||
conf->nr_rx_rings > netmap.conf.max_rings) {
- RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
+ RTE_LOG(ERR, USER1, "%s(%u): invalid parameters\n",
__func__, portid);
return -EINVAL;
}
- rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
- tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots);
+ rx_slots = (uint16_t)rte_align32pow2(conf->nr_rx_slots);
+ tx_slots = (uint16_t)rte_align32pow2(conf->nr_tx_slots);
if (tx_slots > netmap.conf.max_slots ||
rx_slots > netmap.conf.max_slots) {
- RTE_LOG(ERR, USER1, "%s(%hhu): invalid parameters\n",
+ RTE_LOG(ERR, USER1, "%s(%u): invalid parameters\n",
__func__, portid);
return -EINVAL;
}
@@ -715,15 +714,15 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
conf->nr_tx_rings, conf->eth_conf);
if (ret < 0) {
- RTE_LOG(ERR, USER1, "Couldn't configure port %hhu\n", portid);
- return ret;
+ RTE_LOG(ERR, USER1, "Couldn't configure port %u\n", portid);
+ return ret;
}
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_slots, &tx_slots);
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't ot adjust number of descriptors for port %hhu\n",
+ "Couldn't ot adjust number of descriptors for port %u\n",
portid);
return ret;
}
@@ -734,8 +733,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't configure TX queue %"PRIu16" of "
- "port %"PRIu8"\n",
+ "fail to configure TX queue %u of port %u\n",
i, portid);
return ret;
}
@@ -745,8 +743,7 @@ rte_netmap_init_port(uint8_t portid, const struct rte_netmap_port_conf *conf)
if (ret < 0) {
RTE_LOG(ERR, USER1,
- "Couldn't configure RX queue %"PRIu16" of "
- "port %"PRIu8"\n",
+ "fail to configure RX queue %u of port %u\n",
i, portid);
return ret;
}
diff --git a/examples/netmap_compat/lib/compat_netmap.h b/examples/netmap_compat/lib/compat_netmap.h
index 3dc7a2f4..76b2d2b4 100644
--- a/examples/netmap_compat/lib/compat_netmap.h
+++ b/examples/netmap_compat/lib/compat_netmap.h
@@ -67,7 +67,7 @@ struct rte_netmap_port_conf {
};
int rte_netmap_init(const struct rte_netmap_conf *conf);
-int rte_netmap_init_port(uint8_t portid,
+int rte_netmap_init_port(uint16_t portid,
const struct rte_netmap_port_conf *conf);
int rte_netmap_close(int fd);
diff --git a/examples/packet_ordering/main.c b/examples/packet_ordering/main.c
index b26c33df..3add7be4 100644
--- a/examples/packet_ordering/main.c
+++ b/examples/packet_ordering/main.c
@@ -269,21 +269,22 @@ configure_tx_buffers(struct rte_eth_dev_tx_buffer *tx_buffer[])
rte_eth_dev_socket_id(port_id));
if (tx_buffer[port_id] == NULL)
rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx on port %u\n",
- (unsigned) port_id);
+ port_id);
rte_eth_tx_buffer_init(tx_buffer[port_id], MAX_PKTS_BURST);
ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
flush_tx_error_callback, NULL);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned) port_id);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ port_id);
}
return 0;
}
static inline int
-configure_eth_port(uint8_t port_id)
+configure_eth_port(uint16_t port_id)
{
struct ether_addr addr;
const uint16_t rxRings = 1, txRings = 1;
@@ -326,7 +327,7 @@ configure_eth_port(uint8_t port_id)
rte_eth_macaddr_get(port_id, &addr);
printf("Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port_id,
+ port_id,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
@@ -401,7 +402,7 @@ rx_thread(struct rte_ring *ring_out)
uint32_t seqn = 0;
uint16_t i, ret = 0;
uint16_t nb_rx_pkts;
- uint8_t port_id;
+ uint16_t port_id;
struct rte_mbuf *pkts[MAX_PKTS_BURST];
RTE_LOG(INFO, REORDERAPP, "%s() started on lcore %u\n", __func__,
@@ -632,8 +633,8 @@ main(int argc, char **argv)
int ret;
unsigned nb_ports;
unsigned int lcore_id, last_lcore_id, master_lcore_id;
- uint8_t port_id;
- uint8_t nb_ports_available;
+ uint16_t port_id;
+ uint16_t nb_ports_available;
struct worker_thread_args worker_args = {NULL, NULL};
struct send_thread_args send_args = {NULL, NULL};
struct rte_ring *rx_to_workers;
@@ -687,7 +688,7 @@ main(int argc, char **argv)
continue;
}
/* init port */
- printf("Initializing port %u... done\n", (unsigned) port_id);
+ printf("Initializing port %u... done\n", port_id);
if (configure_eth_port(port_id) != 0)
rte_exit(EXIT_FAILURE, "Cannot initialize port %"PRIu8"\n",
diff --git a/examples/performance-thread/common/lthread.h b/examples/performance-thread/common/lthread.h
index 5c2c1a5f..0cde5919 100644
--- a/examples/performance-thread/common/lthread.h
+++ b/examples/performance-thread/common/lthread.h
@@ -87,7 +87,7 @@ int _lthread_desched_sleep(struct lthread *lt);
void _lthread_free(struct lthread *lt);
-struct lthread_sched *_lthread_sched_get(int lcore_id);
+struct lthread_sched *_lthread_sched_get(unsigned int lcore_id);
struct lthread_stack *_stack_alloc(void);
diff --git a/examples/performance-thread/common/lthread_diag.c b/examples/performance-thread/common/lthread_diag.c
index bce1a0c3..b5007d77 100644
--- a/examples/performance-thread/common/lthread_diag.c
+++ b/examples/performance-thread/common/lthread_diag.c
@@ -296,8 +296,7 @@ _lthread_diag_default_cb(uint64_t time, struct lthread *lt, int diag_event,
/*
* plug in default diag callback with mask off
*/
-void _lthread_diag_ctor(void)__attribute__((constructor));
-void _lthread_diag_ctor(void)
+RTE_INIT(_lthread_diag_ctor)
{
diag_cb = _lthread_diag_default_cb;
diag_mask = 0;
diff --git a/examples/performance-thread/common/lthread_sched.c b/examples/performance-thread/common/lthread_sched.c
index 98291478..779aeb17 100644
--- a/examples/performance-thread/common/lthread_sched.c
+++ b/examples/performance-thread/common/lthread_sched.c
@@ -117,8 +117,7 @@ uint64_t diag_mask;
/* constructor */
-void lthread_sched_ctor(void) __attribute__ ((constructor));
-void lthread_sched_ctor(void)
+RTE_INIT(lthread_sched_ctor)
{
memset(schedcore, 0, sizeof(schedcore));
rte_atomic16_init(&num_schedulers);
@@ -562,11 +561,14 @@ void lthread_run(void)
* Return the scheduler for this lcore
*
*/
-struct lthread_sched *_lthread_sched_get(int lcore_id)
+struct lthread_sched *_lthread_sched_get(unsigned int lcore_id)
{
- if (lcore_id > LTHREAD_MAX_LCORES)
- return NULL;
- return schedcore[lcore_id];
+ struct lthread_sched *res = NULL;
+
+ if (lcore_id < LTHREAD_MAX_LCORES)
+ res = schedcore[lcore_id];
+
+ return res;
}
/*
@@ -578,10 +580,9 @@ int lthread_set_affinity(unsigned lcoreid)
struct lthread *lt = THIS_LTHREAD;
struct lthread_sched *dest_sched;
- if (unlikely(lcoreid > LTHREAD_MAX_LCORES))
+ if (unlikely(lcoreid >= LTHREAD_MAX_LCORES))
return POSIX_ERRNO(EINVAL);
-
DIAG_EVENT(lt, LT_DIAG_LTHREAD_AFFINITY, lcoreid, 0);
dest_sched = schedcore[lcoreid];
diff --git a/examples/performance-thread/common/lthread_tls.c b/examples/performance-thread/common/lthread_tls.c
index 47505f2d..2259fad4 100644
--- a/examples/performance-thread/common/lthread_tls.c
+++ b/examples/performance-thread/common/lthread_tls.c
@@ -62,9 +62,7 @@ RTE_DEFINE_PER_LTHREAD(void *, dummy);
static struct lthread_key key_table[LTHREAD_MAX_KEYS];
-void lthread_tls_ctor(void) __attribute__((constructor));
-
-void lthread_tls_ctor(void)
+RTE_INIT(thread_tls_ctor)
{
key_pool = NULL;
key_pool_init = 0;
@@ -198,11 +196,12 @@ void _lthread_tls_destroy(struct lthread *lt)
void
*lthread_getspecific(unsigned int k)
{
+ void *res = NULL;
- if (k > LTHREAD_MAX_KEYS)
- return NULL;
+ if (k < LTHREAD_MAX_KEYS)
+ res = THIS_LTHREAD->tls->data[k];
- return THIS_LTHREAD->tls->data[k];
+ return res;
}
/*
@@ -212,7 +211,7 @@ void
*/
int lthread_setspecific(unsigned int k, const void *data)
{
- if (k > LTHREAD_MAX_KEYS)
+ if (k >= LTHREAD_MAX_KEYS)
return POSIX_ERRNO(EINVAL);
int n = THIS_LTHREAD->tls->nb_keys_inuse;
diff --git a/examples/performance-thread/l3fwd-thread/main.c b/examples/performance-thread/l3fwd-thread/main.c
index 7954b974..fa65234f 100644
--- a/examples/performance-thread/l3fwd-thread/main.c
+++ b/examples/performance-thread/l3fwd-thread/main.c
@@ -50,7 +50,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -60,7 +59,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -140,7 +138,7 @@ parse_ptype(struct rte_mbuf *m)
}
static uint16_t
-cb_parse_ptype(__rte_unused uint8_t port, __rte_unused uint16_t queue,
+cb_parse_ptype(__rte_unused uint16_t port, __rte_unused uint16_t queue,
struct rte_mbuf *pkts[], uint16_t nb_pkts,
__rte_unused uint16_t max_pkts, __rte_unused void *user_param)
{
@@ -277,7 +275,7 @@ struct mbuf_table {
};
struct lcore_rx_queue {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
} __rte_cache_aligned;
@@ -287,7 +285,7 @@ struct lcore_rx_queue {
#define MAX_LCORE_PARAMS 1024
struct rx_thread_params {
- uint8_t port_id;
+ uint16_t port_id;
uint8_t queue_id;
uint8_t lcore_id;
uint8_t thread_id;
@@ -648,7 +646,7 @@ struct thread_tx_conf tx_thread[MAX_TX_THREAD];
/* Send burst of packets on an output interface */
static inline int
-send_burst(struct thread_tx_conf *qconf, uint16_t n, uint8_t port)
+send_burst(struct thread_tx_conf *qconf, uint16_t n, uint16_t port)
{
struct rte_mbuf **m_table;
int ret;
@@ -669,7 +667,7 @@ send_burst(struct thread_tx_conf *qconf, uint16_t n, uint8_t port)
/* Enqueue a single packet, and send burst if queue is filled */
static inline int
-send_single_packet(struct rte_mbuf *m, uint8_t port)
+send_single_packet(struct rte_mbuf *m, uint16_t port)
{
uint16_t len;
struct thread_tx_conf *qconf;
@@ -696,7 +694,7 @@ send_single_packet(struct rte_mbuf *m, uint8_t port)
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_LPM) && \
(ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
static __rte_always_inline void
-send_packetsx4(uint8_t port,
+send_packetsx4(uint16_t port,
struct rte_mbuf *m[], uint32_t num)
{
uint32_t len, j, n;
@@ -832,8 +830,8 @@ is_valid_ipv4_pkt(struct ipv4_hdr *pkt, uint32_t link_len)
static __m128i mask0;
static __m128i mask1;
static __m128i mask2;
-static inline uint8_t
-get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
int ret = 0;
@@ -846,11 +844,11 @@ get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid,
key.xmm = _mm_and_si128(data, mask0);
/* Find destination port */
ret = rte_hash_lookup(ipv4_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv4_l3fwd_out_if[ret]);
}
-static inline uint8_t
-get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid,
lookup_struct_t *ipv6_l3fwd_lookup_struct)
{
int ret = 0;
@@ -873,36 +871,36 @@ get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid,
/* Find destination port */
ret = rte_hash_lookup(ipv6_l3fwd_lookup_struct, (const void *)&key);
- return (uint8_t)((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
+ return ((ret < 0) ? portid : ipv6_l3fwd_out_if[ret]);
}
#endif
#if (APP_LOOKUP_METHOD == APP_LOOKUP_LPM)
-static inline uint8_t
-get_ipv4_dst_port(void *ipv4_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv4_dst_port(void *ipv4_hdr, uint16_t portid,
lookup_struct_t *ipv4_l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t)((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
+ return ((rte_lpm_lookup(ipv4_l3fwd_lookup_struct,
rte_be_to_cpu_32(((struct ipv4_hdr *)ipv4_hdr)->dst_addr),
&next_hop) == 0) ? next_hop : portid);
}
-static inline uint8_t
-get_ipv6_dst_port(void *ipv6_hdr, uint8_t portid,
+static inline uint16_t
+get_ipv6_dst_port(void *ipv6_hdr, uint16_t portid,
lookup6_struct_t *ipv6_l3fwd_lookup_struct)
{
uint32_t next_hop;
- return (uint8_t) ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
+ return ((rte_lpm6_lookup(ipv6_l3fwd_lookup_struct,
((struct ipv6_hdr *)ipv6_hdr)->dst_addr, &next_hop) == 0) ?
next_hop : portid);
}
#endif
-static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
+static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
__attribute__((unused));
#if ((APP_LOOKUP_METHOD == APP_LOOKUP_EXACT_MATCH) && \
@@ -919,11 +917,11 @@ static inline void l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
#define EXCLUDE_8TH_PKT 0x7f
static inline void
-simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
+simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
{
struct ether_hdr *eth_hdr[8];
struct ipv4_hdr *ipv4_hdr[8];
- uint8_t dst_port[8];
+ uint16_t dst_port[8];
int32_t ret[8];
union ipv4_5tuple_host key[8];
__m128i data[8];
@@ -1042,14 +1040,14 @@ simple_ipv4_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
rte_hash_lookup_bulk(RTE_PER_LCORE(lcore_conf)->ipv4_lookup_struct,
&key_array[0], 8, ret);
- dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]);
- dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]);
- dst_port[2] = (uint8_t) ((ret[2] < 0) ? portid : ipv4_l3fwd_out_if[ret[2]]);
- dst_port[3] = (uint8_t) ((ret[3] < 0) ? portid : ipv4_l3fwd_out_if[ret[3]]);
- dst_port[4] = (uint8_t) ((ret[4] < 0) ? portid : ipv4_l3fwd_out_if[ret[4]]);
- dst_port[5] = (uint8_t) ((ret[5] < 0) ? portid : ipv4_l3fwd_out_if[ret[5]]);
- dst_port[6] = (uint8_t) ((ret[6] < 0) ? portid : ipv4_l3fwd_out_if[ret[6]]);
- dst_port[7] = (uint8_t) ((ret[7] < 0) ? portid : ipv4_l3fwd_out_if[ret[7]]);
+ dst_port[0] = ((ret[0] < 0) ? portid : ipv4_l3fwd_out_if[ret[0]]);
+ dst_port[1] = ((ret[1] < 0) ? portid : ipv4_l3fwd_out_if[ret[1]]);
+ dst_port[2] = ((ret[2] < 0) ? portid : ipv4_l3fwd_out_if[ret[2]]);
+ dst_port[3] = ((ret[3] < 0) ? portid : ipv4_l3fwd_out_if[ret[3]]);
+ dst_port[4] = ((ret[4] < 0) ? portid : ipv4_l3fwd_out_if[ret[4]]);
+ dst_port[5] = ((ret[5] < 0) ? portid : ipv4_l3fwd_out_if[ret[5]]);
+ dst_port[6] = ((ret[6] < 0) ? portid : ipv4_l3fwd_out_if[ret[6]]);
+ dst_port[7] = ((ret[7] < 0) ? portid : ipv4_l3fwd_out_if[ret[7]]);
if (dst_port[0] >= RTE_MAX_ETHPORTS ||
(enabled_port_mask & 1 << dst_port[0]) == 0)
@@ -1146,10 +1144,10 @@ static inline void get_ipv6_5tuple(struct rte_mbuf *m0, __m128i mask0,
}
static inline void
-simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
+simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint16_t portid)
{
int32_t ret[8];
- uint8_t dst_port[8];
+ uint16_t dst_port[8];
struct ether_hdr *eth_hdr[8];
union ipv6_5tuple_host key[8];
@@ -1196,14 +1194,14 @@ simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
rte_hash_lookup_bulk(RTE_PER_LCORE(lcore_conf)->ipv6_lookup_struct,
&key_array[0], 4, ret);
- dst_port[0] = (uint8_t) ((ret[0] < 0) ? portid : ipv6_l3fwd_out_if[ret[0]]);
- dst_port[1] = (uint8_t) ((ret[1] < 0) ? portid : ipv6_l3fwd_out_if[ret[1]]);
- dst_port[2] = (uint8_t) ((ret[2] < 0) ? portid : ipv6_l3fwd_out_if[ret[2]]);
- dst_port[3] = (uint8_t) ((ret[3] < 0) ? portid : ipv6_l3fwd_out_if[ret[3]]);
- dst_port[4] = (uint8_t) ((ret[4] < 0) ? portid : ipv6_l3fwd_out_if[ret[4]]);
- dst_port[5] = (uint8_t) ((ret[5] < 0) ? portid : ipv6_l3fwd_out_if[ret[5]]);
- dst_port[6] = (uint8_t) ((ret[6] < 0) ? portid : ipv6_l3fwd_out_if[ret[6]]);
- dst_port[7] = (uint8_t) ((ret[7] < 0) ? portid : ipv6_l3fwd_out_if[ret[7]]);
+ dst_port[0] = ((ret[0] < 0) ? portid : ipv6_l3fwd_out_if[ret[0]]);
+ dst_port[1] = ((ret[1] < 0) ? portid : ipv6_l3fwd_out_if[ret[1]]);
+ dst_port[2] = ((ret[2] < 0) ? portid : ipv6_l3fwd_out_if[ret[2]]);
+ dst_port[3] = ((ret[3] < 0) ? portid : ipv6_l3fwd_out_if[ret[3]]);
+ dst_port[4] = ((ret[4] < 0) ? portid : ipv6_l3fwd_out_if[ret[4]]);
+ dst_port[5] = ((ret[5] < 0) ? portid : ipv6_l3fwd_out_if[ret[5]]);
+ dst_port[6] = ((ret[6] < 0) ? portid : ipv6_l3fwd_out_if[ret[6]]);
+ dst_port[7] = ((ret[7] < 0) ? portid : ipv6_l3fwd_out_if[ret[7]]);
if (dst_port[0] >= RTE_MAX_ETHPORTS ||
(enabled_port_mask & 1 << dst_port[0]) == 0)
@@ -1250,24 +1248,24 @@ simple_ipv6_fwd_8pkts(struct rte_mbuf *m[8], uint8_t portid)
ether_addr_copy(&ports_eth_addr[dst_port[6]], &eth_hdr[6]->s_addr);
ether_addr_copy(&ports_eth_addr[dst_port[7]], &eth_hdr[7]->s_addr);
- send_single_packet(m[0], (uint8_t)dst_port[0]);
- send_single_packet(m[1], (uint8_t)dst_port[1]);
- send_single_packet(m[2], (uint8_t)dst_port[2]);
- send_single_packet(m[3], (uint8_t)dst_port[3]);
- send_single_packet(m[4], (uint8_t)dst_port[4]);
- send_single_packet(m[5], (uint8_t)dst_port[5]);
- send_single_packet(m[6], (uint8_t)dst_port[6]);
- send_single_packet(m[7], (uint8_t)dst_port[7]);
+ send_single_packet(m[0], dst_port[0]);
+ send_single_packet(m[1], dst_port[1]);
+ send_single_packet(m[2], dst_port[2]);
+ send_single_packet(m[3], dst_port[3]);
+ send_single_packet(m[4], dst_port[4]);
+ send_single_packet(m[5], dst_port[5]);
+ send_single_packet(m[6], dst_port[6]);
+ send_single_packet(m[7], dst_port[7]);
}
#endif /* APP_LOOKUP_METHOD */
static __rte_always_inline void
-l3fwd_simple_forward(struct rte_mbuf *m, uint8_t portid)
+l3fwd_simple_forward(struct rte_mbuf *m, uint16_t portid)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
- uint8_t dst_port;
+ uint16_t dst_port;
eth_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
@@ -1379,7 +1377,7 @@ rfc1812_process(struct ipv4_hdr *ipv4_hdr, uint16_t *dp, uint32_t ptype)
(ENABLE_MULTI_BUFFER_OPTIMIZE == 1))
static __rte_always_inline uint16_t
-get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
+get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint16_t portid)
{
uint32_t next_hop;
struct ipv6_hdr *ipv6_hdr;
@@ -1406,7 +1404,7 @@ get_dst_port(struct rte_mbuf *pkt, uint32_t dst_ipv4, uint8_t portid)
}
static inline void
-process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint8_t portid)
+process_packet(struct rte_mbuf *pkt, uint16_t *dst_port, uint16_t portid)
{
struct ether_hdr *eth_hdr;
struct ipv4_hdr *ipv4_hdr;
@@ -1473,7 +1471,7 @@ processx4_step1(struct rte_mbuf *pkt[FWDSTEP],
static inline void
processx4_step2(__m128i dip,
uint32_t ipv4_flag,
- uint8_t portid,
+ uint16_t portid,
struct rte_mbuf *pkt[FWDSTEP],
uint16_t dprt[FWDSTEP])
{
@@ -1716,7 +1714,8 @@ port_groupx4(uint16_t pn[FWDSTEP + 1], uint16_t *lp, __m128i dp1, __m128i dp2)
static void
process_burst(struct rte_mbuf *pkts_burst[MAX_PKT_BURST], int nb_rx,
- uint8_t portid) {
+ uint16_t portid)
+{
int j;
@@ -2036,7 +2035,7 @@ static void
lthread_tx_per_ring(void *dummy)
{
int nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct rte_ring *ring;
struct thread_tx_conf *tx_conf;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
@@ -2091,7 +2090,7 @@ lthread_tx(void *args)
struct lthread *lt;
unsigned lcore_id;
- uint8_t portid;
+ uint16_t portid;
struct thread_tx_conf *tx_conf;
tx_conf = (struct thread_tx_conf *)args;
@@ -2138,7 +2137,8 @@ lthread_rx(void *dummy)
int ret;
uint16_t nb_rx;
int i;
- uint8_t portid, queueid;
+ uint16_t portid;
+ uint8_t queueid;
int worker_id;
int len[RTE_MAX_LCORE] = { 0 };
int old_len, new_len;
@@ -2164,7 +2164,8 @@ lthread_rx(void *dummy)
portid = rx_conf->rx_queue_list[i].port_id;
queueid = rx_conf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ RTE_LOG(INFO, L3FWD,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
rte_lcore_id(), portid, queueid);
}
@@ -2323,7 +2324,7 @@ pthread_tx(void *dummy)
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
uint64_t prev_tsc, diff_tsc, cur_tsc;
int nb_rx;
- uint8_t portid;
+ uint16_t portid;
struct thread_tx_conf *tx_conf;
const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) /
@@ -2392,7 +2393,8 @@ pthread_rx(void *dummy)
uint32_t n;
uint32_t nb_rx;
unsigned lcore_id;
- uint8_t portid, queueid;
+ uint8_t queueid;
+ uint16_t portid;
struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
struct thread_rx_conf *rx_conf;
@@ -2411,7 +2413,8 @@ pthread_rx(void *dummy)
portid = rx_conf->rx_queue_list[i].port_id;
queueid = rx_conf->rx_queue_list[i].queue_id;
- RTE_LOG(INFO, L3FWD, " -- lcoreid=%u portid=%hhu rxqueueid=%hhu\n",
+ RTE_LOG(INFO, L3FWD,
+ " -- lcoreid=%u portid=%u rxqueueid=%hhu\n",
lcore_id, portid, queueid);
}
@@ -2539,7 +2542,7 @@ check_port_config(const unsigned nb_ports)
}
static uint8_t
-get_port_n_rx_queues(const uint8_t port)
+get_port_n_rx_queues(const uint16_t port)
{
int queue = -1;
uint16_t i;
@@ -2769,7 +2772,7 @@ parse_rx_config(const char *q_arg)
return -1;
}
rx_thread_params_array[nb_rx_thread_params].port_id =
- (uint8_t)int_fld[FLD_PORT];
+ int_fld[FLD_PORT];
rx_thread_params_array[nb_rx_thread_params].queue_id =
(uint8_t)int_fld[FLD_QUEUE];
rx_thread_params_array[nb_rx_thread_params].lcore_id =
@@ -2853,7 +2856,7 @@ parse_stat_lcore(const char *stat_lcore)
static void
parse_eth_dest(const char *optarg)
{
- uint8_t portid;
+ uint16_t portid;
char *port_end;
uint8_t c, *dest, peer_addr[6];
@@ -3436,11 +3439,12 @@ init_mem(unsigned nb_mbuf)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint16_t portid;
+ uint8_t count, all_ports_up, print_flag = 0;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -3455,14 +3459,13 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", (uint8_t)portid,
- (unsigned)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ portid, link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
- printf("Port %d Link Down\n",
- (uint8_t)portid);
+ printf("Port %d Link Down\n", portid);
continue;
}
/* clear all_ports_up flag if any link down */
@@ -3497,10 +3500,10 @@ main(int argc, char **argv)
int ret;
int i;
unsigned nb_ports;
- uint16_t queueid;
+ uint16_t queueid, portid;
unsigned lcore_id;
uint32_t n_tx_queue, nb_lcores;
- uint8_t portid, nb_rx_queue, queue, socketid;
+ uint8_t nb_rx_queue, queue, socketid;
/* init EAL */
ret = rte_eal_init(argc, argv);
diff --git a/examples/performance-thread/pthread_shim/main.c b/examples/performance-thread/pthread_shim/main.c
index 850b009d..febae39b 100644
--- a/examples/performance-thread/pthread_shim/main.c
+++ b/examples/performance-thread/pthread_shim/main.c
@@ -161,6 +161,7 @@ static void initial_lthread(void *args __attribute__((unused)))
pthread_override_set(1);
uint64_t i;
+ int ret;
/* initialize mutex for shared counter */
print_count = 0;
@@ -187,7 +188,10 @@ static void initial_lthread(void *args __attribute__((unused)))
pthread_attr_setaffinity_np(&attr, sizeof(rte_cpuset_t), &cpuset);
/* create the thread */
- pthread_create(&tid[i], &attr, helloworld_pthread, (void *) i);
+ ret = pthread_create(&tid[i], &attr,
+ helloworld_pthread, (void *) i);
+ if (ret != 0)
+ rte_exit(EXIT_FAILURE, "Cannot create helloworld thread\n");
}
/* wait for 1s to allow threads
diff --git a/examples/performance-thread/pthread_shim/pthread_shim.c b/examples/performance-thread/pthread_shim/pthread_shim.c
index 113bafa0..bc7cf2b0 100644
--- a/examples/performance-thread/pthread_shim/pthread_shim.c
+++ b/examples/performance-thread/pthread_shim/pthread_shim.c
@@ -202,10 +202,7 @@ static void *__libc_dl_handle = RTLD_NEXT;
* The constructor function initialises the
* function pointers for pthread library functions
*/
-void
-pthread_intercept_ctor(void)__attribute__((constructor));
-void
-pthread_intercept_ctor(void)
+RTE_INIT(pthread_intercept_ctor)
{
override = 0;
/*
diff --git a/examples/ptpclient/ptpclient.c b/examples/ptpclient/ptpclient.c
index ddfcdb83..c53dad68 100644
--- a/examples/ptpclient/ptpclient.c
+++ b/examples/ptpclient/ptpclient.c
@@ -158,12 +158,12 @@ struct ptpv2_data_slave_ordinary {
struct clock_id master_clock_id;
struct timeval new_adj;
int64_t delta;
- uint8_t portid;
+ uint16_t portid;
uint16_t seqID_SYNC;
uint16_t seqID_FOLLOWUP;
uint8_t ptpset;
uint8_t kernel_time_set;
- uint8_t current_ptp_port;
+ uint16_t current_ptp_port;
};
static struct ptpv2_data_slave_ordinary ptp_data;
@@ -202,7 +202,7 @@ ns_to_timeval(int64_t nsec)
* coming from the mbuf_pool passed as a parameter.
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf = port_conf_default;
@@ -555,7 +555,7 @@ parse_drsp(struct ptpv2_data_slave_ordinary *ptp_data)
* functionality.
*/
static void
-parse_ptp_frames(uint8_t portid, struct rte_mbuf *m) {
+parse_ptp_frames(uint16_t portid, struct rte_mbuf *m) {
struct ptp_header *ptp_hdr;
struct ether_hdr *eth_hdr;
uint16_t eth_type;
@@ -593,7 +593,7 @@ parse_ptp_frames(uint8_t portid, struct rte_mbuf *m) {
static __attribute__((noreturn)) void
lcore_main(void)
{
- uint8_t portid;
+ uint16_t portid;
unsigned nb_rx;
struct rte_mbuf *m;
@@ -728,7 +728,7 @@ main(int argc, char *argv[])
{
unsigned nb_ports;
- uint8_t portid;
+ uint16_t portid;
/* Initialize the Environment Abstraction Layer (EAL). */
int ret = rte_eal_init(argc, argv);
diff --git a/examples/qos_meter/main.c b/examples/qos_meter/main.c
index b0909f6a..67b4a75b 100644
--- a/examples/qos_meter/main.c
+++ b/examples/qos_meter/main.c
@@ -116,8 +116,8 @@ static struct rte_eth_conf port_conf = {
#define PKT_TX_BURST_MAX 32
#define TIME_TX_DRAIN 200000ULL
-static uint8_t port_rx;
-static uint8_t port_tx;
+static uint16_t port_rx;
+static uint16_t port_tx;
static struct rte_mbuf *pkts_rx[PKT_RX_BURST_MAX];
struct rte_eth_dev_tx_buffer *tx_buffer;
diff --git a/examples/qos_sched/args.c b/examples/qos_sched/args.c
index 2350d64f..203a3470 100644
--- a/examples/qos_sched/args.c
+++ b/examples/qos_sched/args.c
@@ -252,8 +252,8 @@ app_parse_flow_conf(const char *conf_str)
pconf = &qos_conf[nb_pfc];
- pconf->rx_port = (uint8_t)vals[0];
- pconf->tx_port = (uint8_t)vals[1];
+ pconf->rx_port = vals[0];
+ pconf->tx_port = vals[1];
pconf->rx_core = (uint8_t)vals[2];
pconf->wt_core = (uint8_t)vals[3];
if (ret == 5)
@@ -267,19 +267,19 @@ app_parse_flow_conf(const char *conf_str)
}
if (pconf->rx_port >= RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, APP, "pfc %u: invalid rx port %"PRIu8" index\n",
+ RTE_LOG(ERR, APP, "pfc %u: invalid rx port %"PRIu16" index\n",
nb_pfc, pconf->rx_port);
return -1;
}
if (pconf->tx_port >= RTE_MAX_ETHPORTS) {
- RTE_LOG(ERR, APP, "pfc %u: invalid tx port %"PRIu8" index\n",
+ RTE_LOG(ERR, APP, "pfc %u: invalid tx port %"PRIu16" index\n",
nb_pfc, pconf->tx_port);
return -1;
}
mask = 1lu << pconf->rx_port;
if (app_used_rx_port_mask & mask) {
- RTE_LOG(ERR, APP, "pfc %u: rx port %"PRIu8" is used already\n",
+ RTE_LOG(ERR, APP, "pfc %u: rx port %"PRIu16" is used already\n",
nb_pfc, pconf->rx_port);
return -1;
}
@@ -288,7 +288,7 @@ app_parse_flow_conf(const char *conf_str)
mask = 1lu << pconf->tx_port;
if (app_used_tx_port_mask & mask) {
- RTE_LOG(ERR, APP, "pfc %u: port %"PRIu8" is used already\n",
+ RTE_LOG(ERR, APP, "pfc %u: port %"PRIu16" is used already\n",
nb_pfc, pconf->tx_port);
return -1;
}
diff --git a/examples/qos_sched/cmdline.c b/examples/qos_sched/cmdline.c
index f79d5246..b62d165b 100644
--- a/examples/qos_sched/cmdline.c
+++ b/examples/qos_sched/cmdline.c
@@ -191,7 +191,7 @@ cmdline_parse_inst_t cmd_appstats = {
struct cmd_subportstats_result {
cmdline_fixed_string_t stats_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
};
@@ -220,7 +220,7 @@ cmdline_parse_token_num_t cmd_subportstats_subport_number =
UINT32);
cmdline_parse_token_num_t cmd_subportstats_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_subportstats_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_inst_t cmd_subportstats = {
.f = cmd_subportstats_parsed,
@@ -240,7 +240,7 @@ cmdline_parse_inst_t cmd_subportstats = {
struct cmd_pipestats_result {
cmdline_fixed_string_t stats_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
cmdline_fixed_string_t pipe_string;
@@ -265,7 +265,7 @@ cmdline_parse_token_string_t cmd_pipestats_port_string =
"port");
cmdline_parse_token_num_t cmd_pipestats_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_pipestats_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_pipestats_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_pipestats_result, subport_string,
"subport");
@@ -299,7 +299,7 @@ cmdline_parse_inst_t cmd_pipestats = {
struct cmd_avg_q_result {
cmdline_fixed_string_t qavg_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
cmdline_fixed_string_t pipe_string;
@@ -327,8 +327,8 @@ cmdline_parse_token_string_t cmd_avg_q_port_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_q_result, port_string,
"port");
cmdline_parse_token_num_t cmd_avg_q_port_number =
- TOKEN_NUM_INITIALIZER(struct cmd_avg_q_result, port_number,
- UINT8);
+ TOKEN_NUM_INITIALIZER(struct cmd_avg_q_result, port_number,
+ UINT16);
cmdline_parse_token_string_t cmd_avg_q_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_q_result, subport_string,
"subport");
@@ -378,7 +378,7 @@ cmdline_parse_inst_t cmd_avg_q = {
struct cmd_avg_tcpipe_result {
cmdline_fixed_string_t qavg_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
cmdline_fixed_string_t pipe_string;
@@ -405,7 +405,7 @@ cmdline_parse_token_string_t cmd_avg_tcpipe_port_string =
"port");
cmdline_parse_token_num_t cmd_avg_tcpipe_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_avg_tcpipe_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_avg_tcpipe_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_tcpipe_result, subport_string,
"subport");
@@ -447,7 +447,7 @@ cmdline_parse_inst_t cmd_avg_tcpipe = {
struct cmd_avg_pipe_result {
cmdline_fixed_string_t qavg_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
cmdline_fixed_string_t pipe_string;
@@ -472,7 +472,7 @@ cmdline_parse_token_string_t cmd_avg_pipe_port_string =
"port");
cmdline_parse_token_num_t cmd_avg_pipe_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_avg_pipe_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_avg_pipe_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_pipe_result, subport_string,
"subport");
@@ -506,7 +506,7 @@ cmdline_parse_inst_t cmd_avg_pipe = {
struct cmd_avg_tcsubport_result {
cmdline_fixed_string_t qavg_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
cmdline_fixed_string_t tc_string;
@@ -531,7 +531,7 @@ cmdline_parse_token_string_t cmd_avg_tcsubport_port_string =
"port");
cmdline_parse_token_num_t cmd_avg_tcsubport_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_avg_tcsubport_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_avg_tcsubport_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_tcsubport_result, subport_string,
"subport");
@@ -565,7 +565,7 @@ cmdline_parse_inst_t cmd_avg_tcsubport = {
struct cmd_avg_subport_result {
cmdline_fixed_string_t qavg_string;
cmdline_fixed_string_t port_string;
- uint8_t port_number;
+ uint16_t port_number;
cmdline_fixed_string_t subport_string;
uint32_t subport_number;
};
@@ -588,7 +588,7 @@ cmdline_parse_token_string_t cmd_avg_subport_port_string =
"port");
cmdline_parse_token_num_t cmd_avg_subport_port_number =
TOKEN_NUM_INITIALIZER(struct cmd_avg_subport_result, port_number,
- UINT8);
+ UINT16);
cmdline_parse_token_string_t cmd_avg_subport_subport_string =
TOKEN_STRING_INITIALIZER(struct cmd_avg_subport_result, subport_string,
"subport");
diff --git a/examples/qos_sched/init.c b/examples/qos_sched/init.c
index a82cbd7d..038f0427 100644
--- a/examples/qos_sched/init.c
+++ b/examples/qos_sched/init.c
@@ -100,7 +100,7 @@ static const struct rte_eth_conf port_conf = {
};
static int
-app_init_port(uint8_t portid, struct rte_mempool *mp)
+app_init_port(uint16_t portid, struct rte_mempool *mp)
{
int ret;
struct rte_eth_link link;
@@ -118,6 +118,7 @@ app_init_port(uint8_t portid, struct rte_mempool *mp)
rx_conf.rx_thresh.wthresh = rx_thresh.wthresh;
rx_conf.rx_free_thresh = 32;
rx_conf.rx_drop_en = 0;
+ rx_conf.rx_deferred_start = 0;
tx_conf.tx_thresh.pthresh = tx_thresh.pthresh;
tx_conf.tx_thresh.hthresh = tx_thresh.hthresh;
@@ -125,21 +126,24 @@ app_init_port(uint8_t portid, struct rte_mempool *mp)
tx_conf.tx_free_thresh = 0;
tx_conf.tx_rs_thresh = 0;
tx_conf.txq_flags = ETH_TXQ_FLAGS_NOMULTSEGS | ETH_TXQ_FLAGS_NOOFFLOADS;
+ tx_conf.tx_deferred_start = 0;
/* init port */
- RTE_LOG(INFO, APP, "Initializing port %"PRIu8"... ", portid);
+ RTE_LOG(INFO, APP, "Initializing port %"PRIu16"... ", portid);
fflush(stdout);
ret = rte_eth_dev_configure(portid, 1, 1, &port_conf);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot configure device: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "Cannot configure device: err=%d, port=%u\n",
+ ret, portid);
rx_size = ring_conf.rx_size;
tx_size = ring_conf.tx_size;
ret = rte_eth_dev_adjust_nb_rx_tx_desc(portid, &rx_size, &tx_size);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_dev_adjust_nb_rx_tx_desc: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_dev_adjust_nb_rx_tx_desc: err=%d,port=%u\n",
+ ret, portid);
ring_conf.rx_size = rx_size;
ring_conf.tx_size = tx_size;
@@ -148,22 +152,25 @@ app_init_port(uint8_t portid, struct rte_mempool *mp)
ret = rte_eth_rx_queue_setup(portid, 0, (uint16_t)ring_conf.rx_size,
rte_eth_dev_socket_id(portid), &rx_conf, mp);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup: err=%d, port=%u\n",
+ ret, portid);
/* init one TX queue */
fflush(stdout);
ret = rte_eth_tx_queue_setup(portid, 0,
(uint16_t)ring_conf.tx_size, rte_eth_dev_socket_id(portid), &tx_conf);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_eth_tx_queue_setup: err=%d, "
- "port=%"PRIu8" queue=%d\n", ret, portid, 0);
+ rte_exit(EXIT_FAILURE,
+ "rte_eth_tx_queue_setup: err=%d, port=%u queue=%d\n",
+ ret, portid, 0);
/* Start device */
ret = rte_eth_dev_start(portid);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "rte_pmd_port_start: "
- "err=%d, port=%"PRIu8"\n", ret, portid);
+ rte_exit(EXIT_FAILURE,
+ "rte_pmd_port_start: err=%d, port=%u\n",
+ ret, portid);
printf("done: ");
@@ -256,7 +263,7 @@ app_init_sched_port(uint32_t portid, uint32_t socketid)
uint32_t pipe, subport;
int err;
- rte_eth_link_get((uint8_t)portid, &link);
+ rte_eth_link_get(portid, &link);
port_params.socket = socketid;
port_params.rate = (uint64_t) link.link_speed * 1000 * 1000 / 8;
diff --git a/examples/qos_sched/main.c b/examples/qos_sched/main.c
index e10cfd44..1e2fb189 100644
--- a/examples/qos_sched/main.c
+++ b/examples/qos_sched/main.c
@@ -125,8 +125,7 @@ app_main_loop(__attribute__((unused))void *dummy)
/* initialize mbuf memory */
if (mode == APP_RX_MODE) {
for (i = 0; i < rx_idx; i++) {
- RTE_LOG(INFO, APP, "flow %u lcoreid %u "
- "reading port %"PRIu8"\n",
+ RTE_LOG(INFO, APP, "flow%u lcoreid%u reading port%u\n",
i, lcore_id, rx_confs[i]->rx_port);
}
@@ -140,8 +139,8 @@ app_main_loop(__attribute__((unused))void *dummy)
if (wt_confs[i]->m_table == NULL)
rte_panic("flow %u unable to allocate memory buffer\n", i);
- RTE_LOG(INFO, APP, "flow %u lcoreid %u sched+write "
- "port %"PRIu8"\n",
+ RTE_LOG(INFO, APP,
+ "flow %u lcoreid %u sched+write port %u\n",
i, lcore_id, wt_confs[i]->tx_port);
}
@@ -155,8 +154,7 @@ app_main_loop(__attribute__((unused))void *dummy)
if (tx_confs[i]->m_table == NULL)
rte_panic("flow %u unable to allocate memory buffer\n", i);
- RTE_LOG(INFO, APP, "flow %u lcoreid %u "
- "writing port %"PRIu8"\n",
+ RTE_LOG(INFO, APP, "flow%u lcoreid%u write port%u\n",
i, lcore_id, tx_confs[i]->tx_port);
}
@@ -186,7 +184,7 @@ app_stat(void)
struct flow_conf *flow = &qos_conf[i];
rte_eth_stats_get(flow->rx_port, &stats);
- printf("\nRX port %"PRIu8": rx: %"PRIu64 " err: %"PRIu64
+ printf("\nRX port %"PRIu16": rx: %"PRIu64 " err: %"PRIu64
" no_mbuf: %"PRIu64 "\n",
flow->rx_port,
stats.ipackets - rx_stats[i].ipackets,
@@ -195,7 +193,7 @@ app_stat(void)
memcpy(&rx_stats[i], &stats, sizeof(stats));
rte_eth_stats_get(flow->tx_port, &stats);
- printf("TX port %"PRIu8": tx: %" PRIu64 " err: %" PRIu64 "\n",
+ printf("TX port %"PRIu16": tx: %" PRIu64 " err: %" PRIu64 "\n",
flow->tx_port,
stats.opackets - tx_stats[i].opackets,
stats.oerrors - tx_stats[i].oerrors);
diff --git a/examples/qos_sched/main.h b/examples/qos_sched/main.h
index 8d02e1ad..77b6e3ee 100644
--- a/examples/qos_sched/main.h
+++ b/examples/qos_sched/main.h
@@ -106,8 +106,8 @@ struct thread_conf
uint32_t n_mbufs;
struct rte_mbuf **m_table;
- uint8_t rx_port;
- uint8_t tx_port;
+ uint16_t rx_port;
+ uint16_t tx_port;
uint16_t rx_queue;
uint16_t tx_queue;
struct rte_ring *rx_ring;
@@ -125,8 +125,8 @@ struct flow_conf
uint32_t rx_core;
uint32_t wt_core;
uint32_t tx_core;
- uint8_t rx_port;
- uint8_t tx_port;
+ uint16_t rx_port;
+ uint16_t tx_port;
uint16_t rx_queue;
uint16_t tx_queue;
struct rte_ring *rx_ring;
@@ -188,13 +188,15 @@ void app_worker_thread(struct thread_conf **qconf);
void app_mixed_thread(struct thread_conf **qconf);
void app_stat(void);
-int subport_stat(uint8_t port_id, uint32_t subport_id);
-int pipe_stat(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id);
-int qavg_q(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8_t q);
-int qavg_tcpipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc);
-int qavg_pipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id);
-int qavg_tcsubport(uint8_t port_id, uint32_t subport_id, uint8_t tc);
-int qavg_subport(uint8_t port_id, uint32_t subport_id);
+int subport_stat(uint16_t port_id, uint32_t subport_id);
+int pipe_stat(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id);
+int qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
+ uint8_t tc, uint8_t q);
+int qavg_tcpipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
+ uint8_t tc);
+int qavg_pipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id);
+int qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc);
+int qavg_subport(uint16_t port_id, uint32_t subport_id);
#ifdef __cplusplus
}
diff --git a/examples/qos_sched/stats.c b/examples/qos_sched/stats.c
index 5c894455..b5545e10 100644
--- a/examples/qos_sched/stats.c
+++ b/examples/qos_sched/stats.c
@@ -37,7 +37,8 @@
#include "main.h"
int
-qavg_q(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8_t q)
+qavg_q(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc,
+ uint8_t q)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
@@ -74,7 +75,8 @@ qavg_q(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc, uint8
}
int
-qavg_tcpipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc)
+qavg_tcpipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id,
+ uint8_t tc)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
@@ -114,7 +116,7 @@ qavg_tcpipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id, uint8_t tc)
}
int
-qavg_pipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id)
+qavg_pipe(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
@@ -153,7 +155,7 @@ qavg_pipe(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id)
}
int
-qavg_tcsubport(uint8_t port_id, uint32_t subport_id, uint8_t tc)
+qavg_tcsubport(uint16_t port_id, uint32_t subport_id, uint8_t tc)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
@@ -195,7 +197,7 @@ qavg_tcsubport(uint8_t port_id, uint32_t subport_id, uint8_t tc)
}
int
-qavg_subport(uint8_t port_id, uint32_t subport_id)
+qavg_subport(uint16_t port_id, uint32_t subport_id)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
@@ -237,7 +239,7 @@ qavg_subport(uint8_t port_id, uint32_t subport_id)
}
int
-subport_stat(uint8_t port_id, uint32_t subport_id)
+subport_stat(uint16_t port_id, uint32_t subport_id)
{
struct rte_sched_subport_stats stats;
struct rte_sched_port *port;
@@ -273,7 +275,7 @@ subport_stat(uint8_t port_id, uint32_t subport_id)
}
int
-pipe_stat(uint8_t port_id, uint32_t subport_id, uint32_t pipe_id)
+pipe_stat(uint16_t port_id, uint32_t subport_id, uint32_t pipe_id)
{
struct rte_sched_queue_stats stats;
struct rte_sched_port *port;
diff --git a/examples/quota_watermark/qw/init.c b/examples/quota_watermark/qw/init.c
index 083a37a9..37b03626 100644
--- a/examples/quota_watermark/qw/init.c
+++ b/examples/quota_watermark/qw/init.c
@@ -73,7 +73,7 @@ static struct rte_eth_fc_conf fc_conf = {
};
-void configure_eth_port(uint8_t port_id)
+void configure_eth_port(uint16_t port_id)
{
int ret;
uint16_t nb_rxd = RX_DESC_PER_QUEUE;
@@ -135,7 +135,7 @@ init_dpdk(void)
rte_exit(EXIT_FAILURE, "Not enough ethernet port available\n");
}
-void init_ring(int lcore_id, uint8_t port_id)
+void init_ring(int lcore_id, uint16_t port_id)
{
struct rte_ring *ring;
char ring_name[RTE_RING_NAMESIZE];
@@ -156,12 +156,12 @@ void init_ring(int lcore_id, uint8_t port_id)
void
pair_ports(void)
{
- uint8_t i, j;
+ uint16_t i, j;
/* Pair ports with their "closest neighbour" in the portmask */
for (i = 0; i < RTE_MAX_ETHPORTS; i++)
if (is_bit_set(i, portmask))
- for (j = (uint8_t) (i + 1); j < RTE_MAX_ETHPORTS; j++)
+ for (j = i + 1; j < RTE_MAX_ETHPORTS; j++)
if (is_bit_set(j, portmask)) {
port_pairs[i] = j;
port_pairs[j] = i;
diff --git a/examples/quota_watermark/qw/init.h b/examples/quota_watermark/qw/init.h
index 6d0af3ab..2bfec2b1 100644
--- a/examples/quota_watermark/qw/init.h
+++ b/examples/quota_watermark/qw/init.h
@@ -34,9 +34,9 @@
#ifndef _INIT_H_
#define _INIT_H_
-void configure_eth_port(uint8_t port_id);
+void configure_eth_port(uint16_t port_id);
void init_dpdk(void);
-void init_ring(int lcore_id, uint8_t port_id);
+void init_ring(int lcore_id, uint16_t port_id);
void pair_ports(void);
void setup_shared_variables(void);
diff --git a/examples/quota_watermark/qw/main.c b/examples/quota_watermark/qw/main.c
index d4fcfde4..fe174526 100644
--- a/examples/quota_watermark/qw/main.c
+++ b/examples/quota_watermark/qw/main.c
@@ -69,13 +69,13 @@ int *quota;
unsigned int *low_watermark;
unsigned int *high_watermark;
-uint8_t port_pairs[RTE_MAX_ETHPORTS];
+uint16_t port_pairs[RTE_MAX_ETHPORTS];
struct rte_ring *rings[RTE_MAX_LCORE][RTE_MAX_ETHPORTS];
struct rte_mempool *mbuf_pool;
-static void send_pause_frame(uint8_t port_id, uint16_t duration)
+static void send_pause_frame(uint16_t port_id, uint16_t duration)
{
struct rte_mbuf *mbuf;
struct ether_fc_frame *pause_frame;
@@ -155,7 +155,7 @@ receive_stage(__attribute__((unused)) void *args)
{
int i, ret;
- uint8_t port_id;
+ uint16_t port_id;
uint16_t nb_rx_pkts;
unsigned int lcore_id;
@@ -216,7 +216,7 @@ pipeline_stage(__attribute__((unused)) void *args)
int i, ret;
int nb_dq_pkts;
- uint8_t port_id;
+ uint16_t port_id;
unsigned int lcore_id, previous_lcore_id;
unsigned int free;
@@ -279,8 +279,8 @@ send_stage(__attribute__((unused)) void *args)
{
uint16_t nb_dq_pkts;
- uint8_t port_id;
- uint8_t dest_port_id;
+ uint16_t port_id;
+ uint16_t dest_port_id;
unsigned int lcore_id, previous_lcore_id;
@@ -324,7 +324,7 @@ main(int argc, char **argv)
int ret;
unsigned int lcore_id, master_lcore_id, last_lcore_id;
- uint8_t port_id;
+ uint16_t port_id;
rte_log_set_global_level(RTE_LOG_INFO);
diff --git a/examples/quota_watermark/qw/main.h b/examples/quota_watermark/qw/main.h
index 8c8e3116..ebed7b2f 100644
--- a/examples/quota_watermark/qw/main.h
+++ b/examples/quota_watermark/qw/main.h
@@ -45,7 +45,7 @@ extern int *quota;
extern unsigned int *low_watermark;
extern unsigned int *high_watermark;
-extern uint8_t port_pairs[RTE_MAX_ETHPORTS];
+extern uint16_t port_pairs[RTE_MAX_ETHPORTS];
extern struct rte_ring *rings[RTE_MAX_LCORE][RTE_MAX_ETHPORTS];
extern struct rte_mempool *mbuf_pool;
diff --git a/examples/rxtx_callbacks/main.c b/examples/rxtx_callbacks/main.c
index 66992405..ca135d21 100644
--- a/examples/rxtx_callbacks/main.c
+++ b/examples/rxtx_callbacks/main.c
@@ -59,7 +59,7 @@ static struct {
static uint16_t
-add_timestamps(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+add_timestamps(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts,
uint16_t max_pkts __rte_unused, void *_ __rte_unused)
{
@@ -72,7 +72,7 @@ add_timestamps(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
}
static uint16_t
-calc_latency(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
+calc_latency(uint16_t port __rte_unused, uint16_t qidx __rte_unused,
struct rte_mbuf **pkts, uint16_t nb_pkts, void *_ __rte_unused)
{
uint64_t cycles = 0;
@@ -97,7 +97,7 @@ calc_latency(uint8_t port __rte_unused, uint16_t qidx __rte_unused,
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
@@ -159,7 +159,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
static __attribute__((noreturn)) void
lcore_main(void)
{
- uint8_t port;
+ uint16_t port;
for (port = 0; port < nb_ports; port++)
if (rte_eth_dev_socket_id(port) > 0 &&
@@ -195,7 +195,7 @@ int
main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
- uint8_t portid;
+ uint16_t portid;
/* init EAL */
int ret = rte_eal_init(argc, argv);
diff --git a/examples/server_node_efd/node/node.c b/examples/server_node_efd/node/node.c
index 86e57c89..5aa1258e 100644
--- a/examples/server_node_efd/node/node.c
+++ b/examples/server_node_efd/node/node.c
@@ -57,7 +57,6 @@
#include <rte_mempool.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_string_fns.h>
@@ -77,7 +76,7 @@ static uint8_t node_id;
#define MBQ_CAPACITY 32
/* maps input ports to output ports for packets */
-static uint8_t output_ports[RTE_MAX_ETHPORTS];
+static uint16_t output_ports[RTE_MAX_ETHPORTS];
/* buffers up a set of packet that are ready to send */
struct rte_eth_dev_tx_buffer *tx_buffer[RTE_MAX_ETHPORTS];
@@ -154,7 +153,7 @@ static void
flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
void *userdata) {
int i;
- uint8_t port_id = (uintptr_t)userdata;
+ uint16_t port_id = (uintptr_t)userdata;
tx_stats->tx_drop[port_id] += count;
@@ -165,7 +164,7 @@ flush_tx_error_callback(struct rte_mbuf **unsent, uint16_t count,
}
static void
-configure_tx_buffer(uint8_t port_id, uint16_t size)
+configure_tx_buffer(uint16_t port_id, uint16_t size)
{
int ret;
@@ -174,16 +173,17 @@ configure_tx_buffer(uint8_t port_id, uint16_t size)
RTE_ETH_TX_BUFFER_SIZE(size), 0,
rte_eth_dev_socket_id(port_id));
if (tx_buffer[port_id] == NULL)
- rte_exit(EXIT_FAILURE, "Cannot allocate buffer for tx "
- "on port %u\n", (unsigned int) port_id);
+ rte_exit(EXIT_FAILURE,
+ "Cannot allocate buffer for tx on port %u\n", port_id);
rte_eth_tx_buffer_init(tx_buffer[port_id], size);
ret = rte_eth_tx_buffer_set_err_callback(tx_buffer[port_id],
flush_tx_error_callback, (void *)(intptr_t)port_id);
if (ret < 0)
- rte_exit(EXIT_FAILURE, "Cannot set error callback for "
- "tx buffer on port %u\n", (unsigned int) port_id);
+ rte_exit(EXIT_FAILURE,
+ "Cannot set error callback for tx buffer on port %u\n",
+ port_id);
}
/*
@@ -282,8 +282,8 @@ static inline void
transmit_packet(struct rte_mbuf *buf)
{
int sent;
- const uint8_t in_port = buf->port;
- const uint8_t out_port = output_ports[in_port];
+ const uint16_t in_port = buf->port;
+ const uint16_t out_port = output_ports[in_port];
struct rte_eth_dev_tx_buffer *buffer = tx_buffer[out_port];
sent = rte_eth_tx_buffer(out_port, node_id, buffer, buf);
@@ -381,7 +381,7 @@ main(int argc, char *argv[])
for (;;) {
uint16_t rx_pkts = PKT_READ_SIZE;
- uint8_t port;
+ uint16_t port;
/*
* Try dequeuing max possible packets first, if that fails,
diff --git a/examples/server_node_efd/server/Makefile b/examples/server_node_efd/server/Makefile
index a2f2f361..9f1fe289 100644
--- a/examples/server_node_efd/server/Makefile
+++ b/examples/server_node_efd/server/Makefile
@@ -49,7 +49,7 @@ APP = server
# all source are stored in SRCS-y
SRCS-y := main.c init.c args.c
-INC := $(wildcard *.h)
+INC := $(sort $(wildcard *.h))
CFLAGS += $(WERROR_FLAGS) -O3
CFLAGS += -I$(SRCDIR)/../shared
diff --git a/examples/server_node_efd/server/init.c b/examples/server_node_efd/server/init.c
index d114e5bf..0bcab8cc 100644
--- a/examples/server_node_efd/server/init.c
+++ b/examples/server_node_efd/server/init.c
@@ -56,7 +56,6 @@
#include <rte_memcpy.h>
#include <rte_mbuf.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ether.h>
#include <rte_ethdev.h>
#include <rte_malloc.h>
@@ -121,7 +120,7 @@ init_mbuf_pools(void)
* - start the port and report its status to stdout
*/
static int
-init_port(uint8_t port_num)
+init_port(uint16_t port_num)
{
/* for port configuration all features are off by default */
const struct rte_eth_conf port_conf = {
@@ -136,7 +135,7 @@ init_port(uint8_t port_num)
uint16_t q;
int retval;
- printf("Port %u init ... ", (unsigned int)port_num);
+ printf("Port %u init ... ", port_num);
fflush(stdout);
/*
@@ -255,11 +254,12 @@ populate_efd_table(void)
/* Check the link status of all ports in up to 9s, and print them finally */
static void
-check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
{
#define CHECK_INTERVAL 100 /* 100ms */
#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
- uint8_t portid, count, all_ports_up, print_flag = 0;
+ uint8_t count, all_ports_up, print_flag = 0;
+ uint16_t portid;
struct rte_eth_link link;
printf("\nChecking link status");
@@ -274,14 +274,15 @@ check_all_ports_link_status(uint8_t port_num, uint32_t port_mask)
/* print link status if flag set */
if (print_flag == 1) {
if (link.link_status)
- printf("Port %d Link Up - speed %u "
- "Mbps - %s\n", info->id[portid],
- (unsigned int)link.link_speed,
+ printf(
+ "Port%d Link Up. Speed %u Mbps - %s\n",
+ info->id[portid],
+ link.link_speed,
(link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
("full-duplex") : ("half-duplex\n"));
else
printf("Port %d Link Down\n",
- (uint8_t)info->id[portid]);
+ info->id[portid]);
continue;
}
/* clear all_ports_up flag if any link down */
diff --git a/examples/server_node_efd/server/main.c b/examples/server_node_efd/server/main.c
index dcdc0a48..aa1c6f57 100644
--- a/examples/server_node_efd/server/main.c
+++ b/examples/server_node_efd/server/main.c
@@ -44,7 +44,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_per_lcore.h>
@@ -59,7 +58,6 @@
#include <rte_mbuf.h>
#include <rte_ether.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_ethdev.h>
#include <rte_byteorder.h>
#include <rte_malloc.h>
@@ -95,7 +93,7 @@ struct efd_stats {
static struct node_rx_buf *cl_rx_buf;
static const char *
-get_printable_mac_addr(uint8_t port)
+get_printable_mac_addr(uint16_t port)
{
static const char err_address[] = "00:00:00:00:00:00";
static char addresses[RTE_MAX_ETHPORTS][sizeof(err_address)];
diff --git a/examples/server_node_efd/shared/common.h b/examples/server_node_efd/shared/common.h
index 8a134799..b1e0abe5 100644
--- a/examples/server_node_efd/shared/common.h
+++ b/examples/server_node_efd/shared/common.h
@@ -65,9 +65,9 @@ struct filter_stats {
struct shared_info {
uint8_t num_nodes;
- uint8_t num_ports;
+ uint16_t num_ports;
uint32_t num_flows;
- uint8_t id[RTE_MAX_ETHPORTS];
+ uint16_t id[RTE_MAX_ETHPORTS];
struct rx_stats rx_stats;
struct tx_stats tx_stats[MAX_NODES];
struct filter_stats filter_stats[MAX_NODES];
diff --git a/examples/vhost_xen/Makefile b/examples/service_cores/Makefile
index ad2466aa..bd4a345d 100644
--- a/examples/vhost_xen/Makefile
+++ b/examples/service_cores/Makefile
@@ -1,7 +1,6 @@
# BSD LICENSE
#
-# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
-# All rights reserved.
+# Copyright(c) 2017 Intel Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
@@ -39,14 +38,17 @@ RTE_TARGET ?= x86_64-native-linuxapp-gcc
include $(RTE_SDK)/mk/rte.vars.mk
# binary name
-APP = vhost-switch
+APP = service_cores
# all source are stored in SRCS-y
-SRCS-y := main.c vhost_monitor.c xenstore_parse.c
+SRCS-y := main.c
-CFLAGS += -O2 -I/usr/local/include -D_FILE_OFFSET_BITS=64 -Wno-unused-parameter
CFLAGS += $(WERROR_FLAGS)
-CFLAGS += -D_GNU_SOURCE
-LDFLAGS += -lxenstore
+
+# workaround for a gcc bug with noreturn attribute
+# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
+ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
+CFLAGS_main.o += -Wno-return-type
+endif
include $(RTE_SDK)/mk/rte.extapp.mk
diff --git a/examples/service_cores/main.c b/examples/service_cores/main.c
new file mode 100644
index 00000000..b617a789
--- /dev/null
+++ b/examples/service_cores/main.c
@@ -0,0 +1,246 @@
+/*
+ * BSD LICENSE
+ *
+ * Copyright(c) 2017 Intel Corporation. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <sys/queue.h>
+
+#include <rte_memory.h>
+#include <rte_launch.h>
+#include <rte_eal.h>
+#include <rte_debug.h>
+#include <rte_cycles.h>
+
+/* allow application scheduling of the services */
+#include <rte_service.h>
+
+/* Allow application registration of its own services. An application does not
+ * have to register services, but it can be useful if it wishes to run a
+ * function on a core that is otherwise in use as a service core. In this
+ * example, all services are dummy services registered by the sample app itself.
+ */
+#include <rte_service_component.h>
+
+#define PROFILE_CORES_MAX 8
+#define PROFILE_SERVICE_PER_CORE 5
+
+/* dummy function to do "work" */
+static int32_t service_func(void *args)
+{
+ RTE_SET_USED(args);
+ rte_delay_us(2000);
+ return 0;
+}
+
+static struct rte_service_spec services[] = {
+ {"service_1", service_func, NULL, 0, 0},
+ {"service_2", service_func, NULL, 0, 0},
+ {"service_3", service_func, NULL, 0, 0},
+ {"service_4", service_func, NULL, 0, 0},
+ {"service_5", service_func, NULL, 0, 0},
+};
+#define NUM_SERVICES RTE_DIM(services)
+
+/* this struct holds the mapping of a particular core to all services */
+struct profile_for_core {
+ uint32_t mapped_services[PROFILE_SERVICE_PER_CORE];
+};
+
+/* struct that can be applied as the service core mapping. Items in this
+ * struct will be passed to the ordinary rte_service_* APIs to configure the
+ * service cores at runtime, based on the requirements.
+ *
+ * These profiles can be considered a "configuration" for the service cores,
+ * where switching profile just changes the number of cores and the mappings
+ * for each of them. As a result, the core requirements and performance of the
+ * application scales.
+ */
+struct profile {
+ char name[64];
+ uint32_t num_cores;
+ struct profile_for_core cores[PROFILE_CORES_MAX];
+};
+
+static struct profile profiles[] = {
+ /* profile 0: high performance */
+ {
+ .name = "High Performance",
+ .num_cores = 5,
+ .cores[0] = {.mapped_services = {1, 0, 0, 0, 0} },
+ .cores[1] = {.mapped_services = {0, 1, 0, 0, 0} },
+ .cores[2] = {.mapped_services = {0, 0, 1, 0, 0} },
+ .cores[3] = {.mapped_services = {0, 0, 0, 1, 0} },
+ .cores[4] = {.mapped_services = {0, 0, 0, 0, 1} },
+ },
+ /* profile 1: mid performance with single service priority */
+ {
+ .name = "Mid-High Performance",
+ .num_cores = 3,
+ .cores[0] = {.mapped_services = {1, 1, 0, 0, 0} },
+ .cores[1] = {.mapped_services = {0, 0, 1, 1, 0} },
+ .cores[2] = {.mapped_services = {0, 0, 0, 0, 1} },
+ .cores[3] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[4] = {.mapped_services = {0, 0, 0, 0, 0} },
+ },
+ /* profile 2: mid performance with single service priority */
+ {
+ .name = "Mid-Low Performance",
+ .num_cores = 2,
+ .cores[0] = {.mapped_services = {1, 1, 1, 0, 0} },
+ .cores[1] = {.mapped_services = {1, 1, 0, 1, 1} },
+ .cores[2] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[3] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[4] = {.mapped_services = {0, 0, 0, 0, 0} },
+ },
+ /* profile 3: scale down performance on single core */
+ {
+ .name = "Scale down performance",
+ .num_cores = 1,
+ .cores[0] = {.mapped_services = {1, 1, 1, 1, 1} },
+ .cores[1] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[2] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[3] = {.mapped_services = {0, 0, 0, 0, 0} },
+ .cores[4] = {.mapped_services = {0, 0, 0, 0, 0} },
+ },
+};
+#define NUM_PROFILES RTE_DIM(profiles)
+
+static void
+apply_profile(int profile_id)
+{
+ uint32_t i;
+ uint32_t s;
+ int ret;
+ struct profile *p = &profiles[profile_id];
+ const uint8_t core_off = 1;
+
+ for (i = 0; i < p->num_cores; i++) {
+ uint32_t core = i + core_off;
+ ret = rte_service_lcore_add(core);
+ if (ret && ret != -EALREADY)
+ printf("core %d added ret %d\n", core, ret);
+
+ ret = rte_service_lcore_start(core);
+ if (ret && ret != -EALREADY)
+ printf("core %d start ret %d\n", core, ret);
+
+ for (s = 0; s < NUM_SERVICES; s++) {
+ if (rte_service_map_lcore_set(s, core,
+ p->cores[i].mapped_services[s]))
+ printf("failed to map lcore %d\n", core);
+ }
+ }
+
+ for ( ; i < PROFILE_CORES_MAX; i++) {
+ uint32_t core = i + core_off;
+ for (s = 0; s < NUM_SERVICES; s++) {
+ ret = rte_service_map_lcore_set(s, core, 0);
+ if (ret && ret != -EINVAL) {
+ printf("%s %d: map lcore set = %d\n", __func__,
+ __LINE__, ret);
+ }
+ }
+ ret = rte_service_lcore_stop(core);
+ if (ret && ret != -EALREADY) {
+ printf("%s %d: lcore stop = %d\n", __func__,
+ __LINE__, ret);
+ }
+ ret = rte_service_lcore_del(core);
+ if (ret && ret != -EINVAL) {
+ printf("%s %d: lcore del = %d\n", __func__,
+ __LINE__, ret);
+ }
+ }
+}
+
+int
+main(int argc, char **argv)
+{
+ int ret;
+
+ ret = rte_eal_init(argc, argv);
+ if (ret < 0)
+ rte_panic("Cannot init EAL\n");
+
+ uint32_t i;
+ for (i = 0; i < NUM_SERVICES; i++) {
+ services[i].callback_userdata = 0;
+ uint32_t id;
+ ret = rte_service_component_register(&services[i], &id);
+ if (ret)
+ rte_exit(-1, "service register() failed");
+
+ /* set the service itself to be ready to run. In the case of
+ * ethdev, eventdev etc PMDs, this will be set when the
+ * appropriate configure or setup function is called.
+ */
+ rte_service_component_runstate_set(id, 1);
+
+ /* Collect statistics for the service */
+ rte_service_set_stats_enable(id, 1);
+
+ /* the application sets the service to be active. Note that the
+ * previous component_runstate_set() is the PMD indicating
+ * ready, while this function is the application setting the
+ * service to run. Applications can choose to not run a service
+ * by setting runstate to 0 at any time.
+ */
+ ret = rte_service_runstate_set(id, 1);
+ if (ret)
+ return -ENOEXEC;
+ }
+
+ i = 0;
+ while (1) {
+ const char clr[] = { 27, '[', '2', 'J', '\0' };
+ const char topLeft[] = { 27, '[', '1', ';', '1', 'H', '\0' };
+ printf("%s%s", clr, topLeft);
+
+ apply_profile(i);
+ printf("\n==> Profile: %s\n\n", profiles[i].name);
+
+ sleep(1);
+ rte_service_dump(stdout, UINT32_MAX);
+
+ sleep(5);
+ rte_service_dump(stdout, UINT32_MAX);
+
+ i++;
+ if (i >= NUM_PROFILES)
+ i = 0;
+ }
+
+ return 0;
+}
diff --git a/examples/skeleton/basicfwd.c b/examples/skeleton/basicfwd.c
index b4d50de8..e623754c 100644
--- a/examples/skeleton/basicfwd.c
+++ b/examples/skeleton/basicfwd.c
@@ -57,7 +57,7 @@ static const struct rte_eth_conf port_conf_default = {
* coming from the mbuf_pool passed as a parameter.
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_conf port_conf = port_conf_default;
const uint16_t rx_rings = 1, tx_rings = 1;
@@ -104,7 +104,7 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
rte_eth_macaddr_get(port, &addr);
printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
" %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
- (unsigned)port,
+ port,
addr.addr_bytes[0], addr.addr_bytes[1],
addr.addr_bytes[2], addr.addr_bytes[3],
addr.addr_bytes[4], addr.addr_bytes[5]);
@@ -122,8 +122,8 @@ port_init(uint8_t port, struct rte_mempool *mbuf_pool)
static __attribute__((noreturn)) void
lcore_main(void)
{
- const uint8_t nb_ports = rte_eth_dev_count();
- uint8_t port;
+ const uint16_t nb_ports = rte_eth_dev_count();
+ uint16_t port;
/*
* Check that the port is on the same NUMA node as the polling thread
@@ -179,7 +179,7 @@ main(int argc, char *argv[])
{
struct rte_mempool *mbuf_pool;
unsigned nb_ports;
- uint8_t portid;
+ uint16_t portid;
/* Initialize the Environment Abstraction Layer (EAL). */
int ret = rte_eal_init(argc, argv);
@@ -204,7 +204,7 @@ main(int argc, char *argv[])
/* Initialize all ports. */
for (portid = 0; portid < nb_ports; portid++)
if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu16 "\n",
portid);
if (rte_lcore_count() > 1)
diff --git a/examples/tep_termination/main.c b/examples/tep_termination/main.c
index aee36c6e..a14d2e37 100644
--- a/examples/tep_termination/main.c
+++ b/examples/tep_termination/main.c
@@ -98,7 +98,7 @@
#define MBUF_HEADROOM_UINT32(mbuf) (*(uint32_t *)((uint8_t *)(mbuf) \
+ sizeof(struct rte_mbuf)))
-#define INVALID_PORT_ID 0xFF
+#define INVALID_PORT_ID 0xFFFF
/* Size of buffers used for snprintfs. */
#define MAX_PRINT_BUFF 6072
@@ -184,7 +184,7 @@ static uint32_t burst_rx_retry_num = BURST_RX_RETRIES;
static char dev_basename[MAX_BASENAME_SZ] = "vhost-net";
static unsigned lcore_ids[RTE_MAX_LCORE];
-uint8_t ports[RTE_MAX_ETHPORTS];
+uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned nb_ports; /**< The number of ports specified in command line */
@@ -1161,7 +1161,7 @@ main(int argc, char *argv[])
unsigned lcore_id, core_id = 0;
unsigned nb_ports, valid_nb_ports;
int ret;
- uint8_t portid;
+ uint16_t portid;
uint16_t queue_id;
static pthread_t tid;
char thread_name[RTE_MAX_THREAD_NAME_LEN];
diff --git a/examples/tep_termination/vxlan_setup.c b/examples/tep_termination/vxlan_setup.c
index 050bb32d..1ad4ca3c 100644
--- a/examples/tep_termination/vxlan_setup.c
+++ b/examples/tep_termination/vxlan_setup.c
@@ -129,7 +129,7 @@ const uint16_t tenant_id_conf[] = {
* coming from the mbuf_pool passed as parameter
*/
int
-vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
int retval;
uint16_t q;
@@ -202,7 +202,7 @@ vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool)
rte_eth_macaddr_get(port, &ports_eth_addr[port]);
RTE_LOG(INFO, PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
+ port,
ports_eth_addr[port].addr_bytes[0],
ports_eth_addr[port].addr_bytes[1],
ports_eth_addr[port].addr_bytes[2],
@@ -414,7 +414,7 @@ vxlan_unlink(struct vhost_dev *vdev)
/* Transmit packets after encapsulating */
int
-vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
+vxlan_tx_pkts(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts) {
int ret = 0;
uint16_t i;
diff --git a/examples/tep_termination/vxlan_setup.h b/examples/tep_termination/vxlan_setup.h
index 8d264619..2e3550d3 100644
--- a/examples/tep_termination/vxlan_setup.h
+++ b/examples/tep_termination/vxlan_setup.h
@@ -37,14 +37,14 @@
extern uint16_t nb_devices;
extern uint16_t udp_port;
extern uint8_t filter_idx;
-extern uint8_t ports[RTE_MAX_ETHPORTS];
+extern uint16_t ports[RTE_MAX_ETHPORTS];
extern struct ether_addr ports_eth_addr[RTE_MAX_ETHPORTS];
extern uint32_t enable_stats;
extern struct device_statistics dev_statistics[MAX_DEVICES];
extern uint8_t rx_decap;
extern uint8_t tx_encap;
-typedef int (*ol_port_configure_t)(uint8_t port,
+typedef int (*ol_port_configure_t)(uint16_t port,
struct rte_mempool *mbuf_pool);
typedef int (*ol_tunnel_setup_t)(struct vhost_dev *vdev,
@@ -52,7 +52,7 @@ typedef int (*ol_tunnel_setup_t)(struct vhost_dev *vdev,
typedef void (*ol_tunnel_destroy_t)(struct vhost_dev *vdev);
-typedef int (*ol_tx_handle_t)(uint8_t port_id, uint16_t queue_id,
+typedef int (*ol_tx_handle_t)(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
typedef int (*ol_rx_handle_t)(int vid, struct rte_mbuf **pkts,
@@ -70,7 +70,7 @@ struct ol_switch_ops {
};
int
-vxlan_port_init(uint8_t port, struct rte_mempool *mbuf_pool);
+vxlan_port_init(uint16_t port, struct rte_mempool *mbuf_pool);
int
vxlan_link(struct vhost_dev *vdev, struct rte_mbuf *m);
@@ -79,7 +79,7 @@ void
vxlan_unlink(struct vhost_dev *vdev);
int
-vxlan_tx_pkts(uint8_t port_id, uint16_t queue_id,
+vxlan_tx_pkts(uint16_t port_id, uint16_t queue_id,
struct rte_mbuf **tx_pkts, uint16_t nb_pkts);
int
vxlan_rx_pkts(int vid, struct rte_mbuf **pkts, uint32_t count);
diff --git a/examples/timer/main.c b/examples/timer/main.c
index 37ad559e..444e9cc1 100644
--- a/examples/timer/main.c
+++ b/examples/timer/main.c
@@ -39,7 +39,6 @@
#include <rte_common.h>
#include <rte_memory.h>
-#include <rte_memzone.h>
#include <rte_launch.h>
#include <rte_eal.h>
#include <rte_per_lcore.h>
diff --git a/examples/vhost/main.c b/examples/vhost/main.c
index 4d1589d0..89a61f0e 100644
--- a/examples/vhost/main.c
+++ b/examples/vhost/main.c
@@ -177,7 +177,7 @@ static struct rte_eth_conf vmdq_conf_default = {
};
static unsigned lcore_ids[RTE_MAX_LCORE];
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports = 0; /**< The number of ports specified in command line */
static uint16_t num_pf_queues, num_vmdq_queues;
static uint16_t vmdq_pool_base, vmdq_queue_base;
@@ -265,7 +265,7 @@ validate_num_devices(uint32_t max_nb_devices)
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port)
+port_init(uint16_t port)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf;
@@ -392,7 +392,7 @@ port_init(uint8_t port)
RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
" %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
+ port,
vmdq_ports_eth_addr[port].addr_bytes[0],
vmdq_ports_eth_addr[port].addr_bytes[1],
vmdq_ports_eth_addr[port].addr_bytes[2],
@@ -667,7 +667,7 @@ us_vhost_parse_args(int argc, char **argv)
for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
if (enabled_port_mask & (1 << i))
- ports[num_ports++] = (uint8_t)i;
+ ports[num_ports++] = i;
}
if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
@@ -1443,7 +1443,7 @@ main(int argc, char *argv[])
unsigned lcore_id, core_id = 0;
unsigned nb_ports, valid_num_ports;
int ret, i;
- uint8_t portid;
+ uint16_t portid;
static pthread_t tid;
char thread_name[RTE_MAX_THREAD_NAME_LEN];
uint64_t flags = 0;
diff --git a/examples/vhost_scsi/scsi.c b/examples/vhost_scsi/scsi.c
index 54d3104e..fd430ec2 100644
--- a/examples/vhost_scsi/scsi.c
+++ b/examples/vhost_scsi/scsi.c
@@ -307,7 +307,9 @@ vhost_bdev_scsi_inquiry_command(struct vhost_block_dev *bdev,
strncpy((char *)inqdata->t10_vendor_id, "INTEL", 8);
/* PRODUCT IDENTIFICATION */
- strncpy((char *)inqdata->product_id, bdev->product_name, 16);
+ snprintf((char *)inqdata->product_id,
+ RTE_DIM(inqdata->product_id), "%s",
+ bdev->product_name);
/* PRODUCT REVISION LEVEL */
strncpy((char *)inqdata->product_rev, "0001", 4);
diff --git a/examples/vhost_xen/main.c b/examples/vhost_xen/main.c
deleted file mode 100644
index eba4d35a..00000000
--- a/examples/vhost_xen/main.c
+++ /dev/null
@@ -1,1522 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <arpa/inet.h>
-#include <getopt.h>
-#include <linux/if_ether.h>
-#include <linux/if_vlan.h>
-#include <linux/virtio_net.h>
-#include <linux/virtio_ring.h>
-#include <signal.h>
-#include <stdint.h>
-#include <sys/eventfd.h>
-#include <sys/param.h>
-#include <unistd.h>
-
-#include <rte_atomic.h>
-#include <rte_cycles.h>
-#include <rte_ethdev.h>
-#include <rte_log.h>
-#include <rte_string_fns.h>
-#include <rte_pause.h>
-
-#include "main.h"
-#include "virtio-net.h"
-#include "xen_vhost.h"
-
-#define MAX_QUEUES 128
-
-/* the maximum number of external ports supported */
-#define MAX_SUP_PORTS 1
-
-/*
- * Calculate the number of buffers needed per port
- */
-#define NUM_MBUFS_PER_PORT ((MAX_QUEUES*RTE_TEST_RX_DESC_DEFAULT) + \
- (num_switching_cores*MAX_PKT_BURST) + \
- (num_switching_cores*RTE_TEST_TX_DESC_DEFAULT) +\
- (num_switching_cores*MBUF_CACHE_SIZE))
-
-#define MBUF_CACHE_SIZE 64
-
-/*
- * RX and TX Prefetch, Host, and Write-back threshold values should be
- * carefully set for optimal performance. Consult the network
- * controller's datasheet and supporting DPDK documentation for guidance
- * on how these parameters should be set.
- */
-#define RX_PTHRESH 8 /* Default values of RX prefetch threshold reg. */
-#define RX_HTHRESH 8 /* Default values of RX host threshold reg. */
-#define RX_WTHRESH 4 /* Default values of RX write-back threshold reg. */
-
-/*
- * These default values are optimized for use with the Intel(R) 82599 10 GbE
- * Controller and the DPDK ixgbe PMD. Consider using other values for other
- * network controllers and/or network drivers.
- */
-#define TX_PTHRESH 36 /* Default values of TX prefetch threshold reg. */
-#define TX_HTHRESH 0 /* Default values of TX host threshold reg. */
-#define TX_WTHRESH 0 /* Default values of TX write-back threshold reg. */
-
-#define MAX_PKT_BURST 32 /* Max burst size for RX/TX */
-#define MAX_MRG_PKT_BURST 16 /* Max burst for merge buffers. Set to 1 due to performance issue. */
-#define BURST_TX_DRAIN_US 100 /* TX drain every ~100us */
-
-/* State of virtio device. */
-#define DEVICE_NOT_READY 0
-#define DEVICE_READY 1
-#define DEVICE_SAFE_REMOVE 2
-
-/* Config_core_flag status definitions. */
-#define REQUEST_DEV_REMOVAL 1
-#define ACK_DEV_REMOVAL 0
-
-/* Configurable number of RX/TX ring descriptors */
-#define RTE_TEST_RX_DESC_DEFAULT 128
-#define RTE_TEST_TX_DESC_DEFAULT 512
-
-#define INVALID_PORT_ID 0xFF
-
-/* Max number of devices. Limited by vmdq. */
-#define MAX_DEVICES 64
-
-/* Size of buffers used for snprintfs. */
-#define MAX_PRINT_BUFF 6072
-
-
-/* Maximum long option length for option parsing. */
-#define MAX_LONG_OPT_SZ 64
-
-/* Used to compare MAC addresses. */
-#define MAC_ADDR_CMP 0xFFFFFFFFFFFF
-
-/* mask of enabled ports */
-static uint32_t enabled_port_mask = 0;
-
-/*Number of switching cores enabled*/
-static uint32_t num_switching_cores = 0;
-
-/* number of devices/queues to support*/
-static uint32_t num_queues = 0;
-uint32_t num_devices = 0;
-
-/* Enable VM2VM communications. If this is disabled then the MAC address compare is skipped. */
-static uint32_t enable_vm2vm = 1;
-/* Enable stats. */
-static uint32_t enable_stats = 0;
-
-/* empty vmdq configuration structure. Filled in programatically */
-static const struct rte_eth_conf vmdq_conf_default = {
- .rxmode = {
- .mq_mode = ETH_MQ_RX_VMDQ_ONLY,
- .split_hdr_size = 0,
- .header_split = 0, /**< Header Split disabled */
- .hw_ip_checksum = 0, /**< IP checksum offload disabled */
- .hw_vlan_filter = 0, /**< VLAN filtering disabled */
- /*
- * It is necessary for 1G NIC such as I350,
- * this fixes bug of ipv4 forwarding in guest can't
- * forward pakets from one virtio dev to another virtio dev.
- */
- .hw_vlan_strip = 1, /**< VLAN strip enabled. */
- .jumbo_frame = 0, /**< Jumbo Frame Support disabled */
- .hw_strip_crc = 1, /**< CRC stripped by hardware */
- },
-
- .txmode = {
- .mq_mode = ETH_MQ_TX_NONE,
- },
- .rx_adv_conf = {
- /*
- * should be overridden separately in code with
- * appropriate values
- */
- .vmdq_rx_conf = {
- .nb_queue_pools = ETH_8_POOLS,
- .enable_default_pool = 0,
- .default_pool = 0,
- .nb_pool_maps = 0,
- .pool_map = {{0, 0},},
- },
- },
-};
-
-static unsigned lcore_ids[RTE_MAX_LCORE];
-static uint8_t ports[RTE_MAX_ETHPORTS];
-static unsigned num_ports = 0; /**< The number of ports specified in command line */
-
-const uint16_t vlan_tags[] = {
- 1000, 1001, 1002, 1003, 1004, 1005, 1006, 1007,
- 1008, 1009, 1010, 1011, 1012, 1013, 1014, 1015,
- 1016, 1017, 1018, 1019, 1020, 1021, 1022, 1023,
- 1024, 1025, 1026, 1027, 1028, 1029, 1030, 1031,
- 1032, 1033, 1034, 1035, 1036, 1037, 1038, 1039,
- 1040, 1041, 1042, 1043, 1044, 1045, 1046, 1047,
- 1048, 1049, 1050, 1051, 1052, 1053, 1054, 1055,
- 1056, 1057, 1058, 1059, 1060, 1061, 1062, 1063,
-};
-
-/* ethernet addresses of ports */
-static struct ether_addr vmdq_ports_eth_addr[RTE_MAX_ETHPORTS];
-
-/* heads for the main used and free linked lists for the data path. */
-static struct virtio_net_data_ll *ll_root_used = NULL;
-static struct virtio_net_data_ll *ll_root_free = NULL;
-
-/* Array of data core structures containing information on individual core linked lists. */
-static struct lcore_info lcore_info[RTE_MAX_LCORE];
-
-/* Used for queueing bursts of TX packets. */
-struct mbuf_table {
- unsigned len;
- unsigned txq_id;
- struct rte_mbuf *m_table[MAX_PKT_BURST];
-};
-
-/* TX queue for each data core. */
-struct mbuf_table lcore_tx_queue[RTE_MAX_LCORE];
-
-/* Vlan header struct used to insert vlan tags on TX. */
-struct vlan_ethhdr {
- unsigned char h_dest[ETH_ALEN];
- unsigned char h_source[ETH_ALEN];
- __be16 h_vlan_proto;
- __be16 h_vlan_TCI;
- __be16 h_vlan_encapsulated_proto;
-};
-
-/* Header lengths. */
-#define VLAN_HLEN 4
-#define VLAN_ETH_HLEN 18
-
-/* Per-device statistics struct */
-struct device_statistics {
- uint64_t tx_total;
- rte_atomic64_t rx_total;
- uint64_t tx;
- rte_atomic64_t rx;
-} __rte_cache_aligned;
-struct device_statistics dev_statistics[MAX_DEVICES];
-
-/*
- * Builds up the correct configuration for VMDQ VLAN pool map
- * according to the pool & queue limits.
- */
-static inline int
-get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_devices)
-{
- struct rte_eth_vmdq_rx_conf conf;
- unsigned i;
-
- memset(&conf, 0, sizeof(conf));
- conf.nb_queue_pools = (enum rte_eth_nb_pools)num_devices;
- conf.nb_pool_maps = num_devices;
-
- for (i = 0; i < conf.nb_pool_maps; i++) {
- conf.pool_map[i].vlan_id = vlan_tags[ i ];
- conf.pool_map[i].pools = (1UL << i);
- }
-
- (void)(rte_memcpy(eth_conf, &vmdq_conf_default, sizeof(*eth_conf)));
- (void)(rte_memcpy(&eth_conf->rx_adv_conf.vmdq_rx_conf, &conf,
- sizeof(eth_conf->rx_adv_conf.vmdq_rx_conf)));
- return 0;
-}
-
-/*
- * Validate the device number according to the max pool number gotten form dev_info
- * If the device number is invalid, give the error message and return -1.
- * Each device must have its own pool.
- */
-static inline int
-validate_num_devices(uint32_t max_nb_devices)
-{
- if (num_devices > max_nb_devices) {
- RTE_LOG(ERR, VHOST_PORT, "invalid number of devices\n");
- return -1;
- }
- return 0;
-}
-
-/*
- * Initialises a given port using global settings and with the rx buffers
- * coming from the mbuf_pool passed as parameter
- */
-static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
-{
- struct rte_eth_dev_info dev_info;
- struct rte_eth_rxconf *rxconf;
- struct rte_eth_conf port_conf;
- uint16_t rx_rings, tx_rings = (uint16_t)rte_lcore_count();
- uint16_t rx_ring_size = RTE_TEST_RX_DESC_DEFAULT;
- uint16_t tx_ring_size = RTE_TEST_TX_DESC_DEFAULT;
- int retval;
- uint16_t q;
-
- /* The max pool number from dev_info will be used to validate the pool number specified in cmd line */
- rte_eth_dev_info_get (port, &dev_info);
-
- /*configure the number of supported virtio devices based on VMDQ limits */
- num_devices = dev_info.max_vmdq_pools;
- num_queues = dev_info.max_rx_queues;
-
- retval = validate_num_devices(MAX_DEVICES);
- if (retval < 0)
- return retval;
-
- /* Get port configuration. */
- retval = get_eth_conf(&port_conf, num_devices);
- if (retval < 0)
- return retval;
-
- if (port >= rte_eth_dev_count()) return -1;
-
- rx_rings = (uint16_t)num_queues,
- /* Configure ethernet device. */
- retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
- if (retval != 0)
- return retval;
-
- retval = rte_eth_dev_adjust_nb_rx_tx_desc(port, &rx_ring_size,
- &tx_ring_size);
- if (retval != 0)
- return retval;
- if (rx_ring_size > RTE_TEST_RX_DESC_DEFAULT ||
- tx_ring_size > RTE_TEST_TX_DESC_DEFAULT) {
- RTE_LOG(ERR, VHOST_PORT, "Mbuf pool has an insufficient size for "
- "port %u.\n", port);
- return -1;
- }
-
- rte_eth_dev_info_get(port, &dev_info);
- rxconf = &dev_info.default_rxconf;
- rxconf->rx_drop_en = 1;
- /* Setup the queues. */
- for (q = 0; q < rx_rings; q ++) {
- retval = rte_eth_rx_queue_setup(port, q, rx_ring_size,
- rte_eth_dev_socket_id(port), rxconf,
- mbuf_pool);
- if (retval < 0)
- return retval;
- }
- for (q = 0; q < tx_rings; q ++) {
- retval = rte_eth_tx_queue_setup(port, q, tx_ring_size,
- rte_eth_dev_socket_id(port),
- NULL);
- if (retval < 0)
- return retval;
- }
-
- /* Start the device. */
- retval = rte_eth_dev_start(port);
- if (retval < 0)
- return retval;
-
- rte_eth_macaddr_get(port, &vmdq_ports_eth_addr[port]);
- RTE_LOG(INFO, VHOST_PORT, "Max virtio devices supported: %u\n", num_devices);
- RTE_LOG(INFO, VHOST_PORT, "Port %u MAC: %02"PRIx8" %02"PRIx8" %02"PRIx8
- " %02"PRIx8" %02"PRIx8" %02"PRIx8"\n",
- (unsigned)port,
- vmdq_ports_eth_addr[port].addr_bytes[0],
- vmdq_ports_eth_addr[port].addr_bytes[1],
- vmdq_ports_eth_addr[port].addr_bytes[2],
- vmdq_ports_eth_addr[port].addr_bytes[3],
- vmdq_ports_eth_addr[port].addr_bytes[4],
- vmdq_ports_eth_addr[port].addr_bytes[5]);
-
- return 0;
-}
-
-/*
- * Parse the portmask provided at run time.
- */
-static int
-parse_portmask(const char *portmask)
-{
- char *end = NULL;
- unsigned long pm;
-
- errno = 0;
-
- /* parse hexadecimal string */
- pm = strtoul(portmask, &end, 16);
- if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
- return -1;
-
- if (pm == 0)
- return -1;
-
- return pm;
-
-}
-
-/*
- * Parse num options at run time.
- */
-static int
-parse_num_opt(const char *q_arg, uint32_t max_valid_value)
-{
- char *end = NULL;
- unsigned long num;
-
- errno = 0;
-
- /* parse unsigned int string */
- num = strtoul(q_arg, &end, 10);
- if ((q_arg[0] == '\0') || (end == NULL) || (*end != '\0') || (errno != 0))
- return -1;
-
- if (num > max_valid_value)
- return -1;
-
- return num;
-
-}
-
-/*
- * Display usage
- */
-static void
-us_vhost_usage(const char *prgname)
-{
- RTE_LOG(INFO, VHOST_CONFIG, "%s [EAL options] -- -p PORTMASK --vm2vm [0|1] --stats [0-N] --nb-devices ND\n"
- " -p PORTMASK: Set mask for ports to be used by application\n"
- " --vm2vm [0|1]: disable/enable(default) vm2vm comms\n"
- " --stats [0-N]: 0: Disable stats, N: Time in seconds to print stats\n",
- prgname);
-}
-
-/*
- * Parse the arguments given in the command line of the application.
- */
-static int
-us_vhost_parse_args(int argc, char **argv)
-{
- int opt, ret;
- int option_index;
- unsigned i;
- const char *prgname = argv[0];
- static struct option long_option[] = {
- {"vm2vm", required_argument, NULL, 0},
- {"stats", required_argument, NULL, 0},
- {NULL, 0, 0, 0}
- };
-
- /* Parse command line */
- while ((opt = getopt_long(argc, argv, "p:",long_option, &option_index)) != EOF) {
- switch (opt) {
- /* Portmask */
- case 'p':
- enabled_port_mask = parse_portmask(optarg);
- if (enabled_port_mask == 0) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid portmask\n");
- us_vhost_usage(prgname);
- return -1;
- }
- break;
-
- case 0:
- /* Enable/disable vm2vm comms. */
- if (!strncmp(long_option[option_index].name, "vm2vm", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, 1);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for vm2vm [0|1]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- enable_vm2vm = ret;
- }
- }
-
- /* Enable/disable stats. */
- if (!strncmp(long_option[option_index].name, "stats", MAX_LONG_OPT_SZ)) {
- ret = parse_num_opt(optarg, INT32_MAX);
- if (ret == -1) {
- RTE_LOG(INFO, VHOST_CONFIG, "Invalid argument for stats [0..N]\n");
- us_vhost_usage(prgname);
- return -1;
- } else {
- enable_stats = ret;
- }
- }
- break;
-
- /* Invalid option - print options. */
- default:
- us_vhost_usage(prgname);
- return -1;
- }
- }
-
- for (i = 0; i < RTE_MAX_ETHPORTS; i++) {
- if (enabled_port_mask & (1 << i))
- ports[num_ports++] = (uint8_t)i;
- }
-
- if ((num_ports == 0) || (num_ports > MAX_SUP_PORTS)) {
- RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
- "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
- return -1;
- }
-
- return 0;
-}
-
-/*
- * Update the global var NUM_PORTS and array PORTS according to system ports number
- * and return valid ports number
- */
-static unsigned check_ports_num(unsigned nb_ports)
-{
- unsigned valid_num_ports = num_ports;
- unsigned portid;
-
- if (num_ports > nb_ports) {
- RTE_LOG(INFO, VHOST_PORT, "\nSpecified port number(%u) exceeds total system port number(%u)\n",
- num_ports, nb_ports);
- num_ports = nb_ports;
- }
-
- for (portid = 0; portid < num_ports; portid ++) {
- if (ports[portid] >= nb_ports) {
- RTE_LOG(INFO, VHOST_PORT, "\nSpecified port ID(%u) exceeds max system port ID(%u)\n",
- ports[portid], (nb_ports - 1));
- ports[portid] = INVALID_PORT_ID;
- valid_num_ports--;
- }
- }
- return valid_num_ports;
-}
-
-/*
- * Function to convert guest physical addresses to vhost virtual addresses. This
- * is used to convert virtio buffer addresses.
- */
-static __rte_always_inline uint64_t
-gpa_to_vva(struct virtio_net *dev, uint64_t guest_pa)
-{
- struct virtio_memory_regions *region;
- uint32_t regionidx;
- uint64_t vhost_va = 0;
-
- for (regionidx = 0; regionidx < dev->mem->nregions; regionidx++) {
- region = &dev->mem->regions[regionidx];
- if ((guest_pa >= region->guest_phys_address) &&
- (guest_pa <= region->guest_phys_address_end)) {
- vhost_va = region->address_offset + guest_pa;
- break;
- }
- }
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") GPA %p| VVA %p\n",
- dev->device_fh, (void*)(uintptr_t)guest_pa, (void*)(uintptr_t)vhost_va);
-
- return vhost_va;
-}
-
-/*
- * This function adds buffers to the virtio devices RX virtqueue. Buffers can
- * be received from the physical port or from another virtio device. A packet
- * count is returned to indicate the number of packets that were successfully
- * added to the RX queue.
- */
-static __rte_always_inline uint32_t
-virtio_dev_rx(struct virtio_net *dev, struct rte_mbuf **pkts, uint32_t count)
-{
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- struct rte_mbuf *buff;
- /* The virtio_hdr is initialised to 0. */
- struct virtio_net_hdr_mrg_rxbuf virtio_hdr = {{0,0,0,0,0,0},0};
- uint64_t buff_addr = 0;
- uint64_t buff_hdr_addr = 0;
- uint32_t head[MAX_PKT_BURST], packet_len = 0;
- uint32_t head_idx, packet_success = 0;
- uint16_t avail_idx, res_cur_idx;
- uint16_t res_base_idx, res_end_idx;
- uint16_t free_entries;
- uint8_t success = 0;
- void *userdata;
-
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_rx()\n", dev->device_fh);
- vq = dev->virtqueue_rx;
- count = (count > MAX_PKT_BURST) ? MAX_PKT_BURST : count;
- /* As many data cores may want access to available buffers, they need to be reserved. */
- do {
-
- res_base_idx = vq->last_used_idx_res;
-
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- free_entries = (avail_idx - res_base_idx);
-
- /*check that we have enough buffers*/
- if (unlikely(count > free_entries))
- count = free_entries;
-
- if (count == 0)
- return 0;
-
- res_end_idx = res_base_idx + count;
- /* vq->last_used_idx_res is atomically updated. */
- success = rte_atomic16_cmpset(&vq->last_used_idx_res, res_base_idx,
- res_end_idx);
- } while (unlikely(success == 0));
- res_cur_idx = res_base_idx;
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") Current Index %d| End Index %d\n",
- dev->device_fh, res_cur_idx, res_end_idx);
-
- /* Prefetch available ring to retrieve indexes. */
- rte_prefetch0(&vq->avail->ring[res_cur_idx & (vq->size - 1)]);
-
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (head_idx = 0; head_idx < count; head_idx++)
- head[head_idx] = vq->avail->ring[(res_cur_idx + head_idx) & (vq->size - 1)];
-
- /*Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
-
- while (res_cur_idx != res_end_idx) {
- /* Get descriptor from available ring */
- desc = &vq->desc[head[packet_success]];
- /* Prefetch descriptor address. */
- rte_prefetch0(desc);
-
- buff = pkts[packet_success];
-
- /* Convert from gpa to vva (guest physical addr -> vhost virtual addr) */
- buff_addr = gpa_to_vva(dev, desc->addr);
- /* Prefetch buffer address. */
- rte_prefetch0((void*)(uintptr_t)buff_addr);
-
- {
- /* Copy virtio_hdr to packet and increment buffer address */
- buff_hdr_addr = buff_addr;
- packet_len = rte_pktmbuf_data_len(buff) + vq->vhost_hlen;
-
- /*
- * If the descriptors are chained the header and data are placed in
- * separate buffers.
- */
- if (desc->flags & VRING_DESC_F_NEXT) {
- desc->len = vq->vhost_hlen;
- desc = &vq->desc[desc->next];
- /* Buffer address translation. */
- buff_addr = gpa_to_vva(dev, desc->addr);
- desc->len = rte_pktmbuf_data_len(buff);
- } else {
- buff_addr += vq->vhost_hlen;
- desc->len = packet_len;
- }
- }
-
- /* Update used ring with desc information */
- vq->used->ring[res_cur_idx & (vq->size - 1)].id = head[packet_success];
- vq->used->ring[res_cur_idx & (vq->size - 1)].len = packet_len;
-
- /* Copy mbuf data to buffer */
- userdata = rte_pktmbuf_mtod(buff, void *);
- rte_memcpy((void *)(uintptr_t)buff_addr, userdata, rte_pktmbuf_data_len(buff));
-
- res_cur_idx++;
- packet_success++;
-
- /* mergeable is disabled then a header is required per buffer. */
- rte_memcpy((void *)(uintptr_t)buff_hdr_addr, (const void *)&virtio_hdr, vq->vhost_hlen);
- if (res_cur_idx < res_end_idx) {
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
- }
- }
-
- rte_compiler_barrier();
-
- /* Wait until it's our turn to add our buffer to the used ring. */
- while (unlikely(vq->last_used_idx != res_base_idx))
- rte_pause();
-
- *(volatile uint16_t *)&vq->used->idx += count;
-
- vq->last_used_idx = res_end_idx;
-
- return count;
-}
-
-/*
- * Compares a packet destination MAC address to a device MAC address.
- */
-static __rte_always_inline int
-ether_addr_cmp(struct ether_addr *ea, struct ether_addr *eb)
-{
- return ((*(uint64_t *)ea ^ *(uint64_t *)eb) & MAC_ADDR_CMP) == 0;
-}
-
-/*
- * This function registers mac along with a
- * vlan tag to a VMDQ.
- */
-static int
-link_vmdq(struct virtio_net *dev)
-{
- int ret;
- struct virtio_net_data_ll *dev_ll;
-
- dev_ll = ll_root_used;
-
- while (dev_ll != NULL) {
- if ((dev != dev_ll->dev) && ether_addr_cmp(&dev->mac_address, &dev_ll->dev->mac_address)) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") WARNING: This device is using an existing MAC address and has not been registered.\n", dev->device_fh);
- return -1;
- }
- dev_ll = dev_ll->next;
- }
-
- /* vlan_tag currently uses the device_id. */
- dev->vlan_tag = vlan_tags[dev->device_fh];
- dev->vmdq_rx_q = dev->device_fh * (num_queues/num_devices);
-
- /* Print out VMDQ registration info. */
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") MAC_ADDRESS %02x:%02x:%02x:%02x:%02x:%02x and VLAN_TAG %d registered\n",
- dev->device_fh,
- dev->mac_address.addr_bytes[0], dev->mac_address.addr_bytes[1],
- dev->mac_address.addr_bytes[2], dev->mac_address.addr_bytes[3],
- dev->mac_address.addr_bytes[4], dev->mac_address.addr_bytes[5],
- dev->vlan_tag);
-
- /* Register the MAC address. */
- ret = rte_eth_dev_mac_addr_add(ports[0], &dev->mac_address, (uint32_t)dev->device_fh);
- if (ret) {
- RTE_LOG(ERR, VHOST_DATA, "(%"PRIu64") Failed to add device MAC address to VMDQ\n",
- dev->device_fh);
- return -1;
- }
-
- /* Enable stripping of the vlan tag as we handle routing. */
- rte_eth_dev_set_vlan_strip_on_queue(ports[0], dev->vmdq_rx_q, 1);
-
- rte_compiler_barrier();
- /* Set device as ready for RX. */
- dev->ready = DEVICE_READY;
-
- return 0;
-}
-
-/*
- * Removes MAC address and vlan tag from VMDQ. Ensures that nothing is adding buffers to the RX
- * queue before disabling RX on the device.
- */
-static inline void
-unlink_vmdq(struct virtio_net *dev)
-{
- unsigned i = 0;
- unsigned rx_count;
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
-
- if (dev->ready == DEVICE_READY) {
- /*clear MAC and VLAN settings*/
- rte_eth_dev_mac_addr_remove(ports[0], &dev->mac_address);
- for (i = 0; i < 6; i++)
- dev->mac_address.addr_bytes[i] = 0;
-
- dev->vlan_tag = 0;
-
- /*Clear out the receive buffers*/
- rx_count = rte_eth_rx_burst(ports[0],
- (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
-
- while (rx_count) {
- for (i = 0; i < rx_count; i++)
- rte_pktmbuf_free(pkts_burst[i]);
-
- rx_count = rte_eth_rx_burst(ports[0],
- (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
- }
-
- dev->ready = DEVICE_NOT_READY;
- }
-}
-
-/*
- * Check if the packet destination MAC address is for a local device. If so then put
- * the packet on that devices RX queue. If not then return.
- */
-static __rte_always_inline unsigned
-virtio_tx_local(struct virtio_net *dev, struct rte_mbuf *m)
-{
- struct virtio_net_data_ll *dev_ll;
- struct ether_hdr *pkt_hdr;
- uint64_t ret = 0;
-
- pkt_hdr = rte_pktmbuf_mtod(m, struct ether_hdr *);
-
- /*get the used devices list*/
- dev_ll = ll_root_used;
-
- while (dev_ll != NULL) {
- if (likely(dev_ll->dev->ready == DEVICE_READY) && ether_addr_cmp(&(pkt_hdr->d_addr),
- &dev_ll->dev->mac_address)) {
-
- /* Drop the packet if the TX packet is destined for the TX device. */
- if (dev_ll->dev->device_fh == dev->device_fh) {
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
- "Source and destination MAC addresses are the same. "
- "Dropping packet.\n",
- dev_ll->dev->device_fh);
- return 0;
- }
-
-
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
- "MAC address is local\n", dev_ll->dev->device_fh);
-
- if (dev_ll->dev->remove) {
- /*drop the packet if the device is marked for removal*/
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") "
- "Device is marked for removal\n",
- dev_ll->dev->device_fh);
- } else {
- /*send the packet to the local virtio device*/
- ret = virtio_dev_rx(dev_ll->dev, &m, 1);
- if (enable_stats) {
- rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, 1);
- rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret);
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx += ret;
- }
- }
-
- return 0;
- }
- dev_ll = dev_ll->next;
- }
-
- return -1;
-}
-
-/*
- * This function routes the TX packet to the correct interface. This may be a local device
- * or the physical port.
- */
-static __rte_always_inline void
-virtio_tx_route(struct virtio_net* dev, struct rte_mbuf *m, struct rte_mempool *mbuf_pool, uint16_t vlan_tag)
-{
- struct mbuf_table *tx_q;
- struct vlan_ethhdr *vlan_hdr;
- struct rte_mbuf **m_table;
- struct rte_mbuf *mbuf;
- unsigned len, ret;
- const uint16_t lcore_id = rte_lcore_id();
-
- /*check if destination is local VM*/
- if (enable_vm2vm && (virtio_tx_local(dev, m) == 0)) {
- return;
- }
-
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") TX: "
- "MAC address is external\n", dev->device_fh);
-
- /*Add packet to the port tx queue*/
- tx_q = &lcore_tx_queue[lcore_id];
- len = tx_q->len;
-
- /* Allocate an mbuf and populate the structure. */
- mbuf = rte_pktmbuf_alloc(mbuf_pool);
- if(!mbuf)
- return;
-
- mbuf->data_len = m->data_len + VLAN_HLEN;
- mbuf->pkt_len = mbuf->data_len;
-
- /* Copy ethernet header to mbuf. */
- rte_memcpy(rte_pktmbuf_mtod(mbuf, void*),
- rte_pktmbuf_mtod(m, const void*), ETH_HLEN);
-
-
- /* Setup vlan header. Bytes need to be re-ordered for network with htons()*/
- vlan_hdr = rte_pktmbuf_mtod(mbuf, struct vlan_ethhdr *);
- vlan_hdr->h_vlan_encapsulated_proto = vlan_hdr->h_vlan_proto;
- vlan_hdr->h_vlan_proto = htons(ETH_P_8021Q);
- vlan_hdr->h_vlan_TCI = htons(vlan_tag);
-
- /* Copy the remaining packet contents to the mbuf. */
- rte_memcpy(rte_pktmbuf_mtod_offset(mbuf, void *, VLAN_ETH_HLEN),
- rte_pktmbuf_mtod_offset(m, const void *, ETH_HLEN),
- (m->data_len - ETH_HLEN));
- tx_q->m_table[len] = mbuf;
- len++;
- if (enable_stats) {
- dev_statistics[dev->device_fh].tx_total++;
- dev_statistics[dev->device_fh].tx++;
- }
-
- if (unlikely(len == MAX_PKT_BURST)) {
- m_table = (struct rte_mbuf **)tx_q->m_table;
- ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id, m_table, (uint16_t) len);
- /* Free any buffers not handled by TX and update the port stats. */
- if (unlikely(ret < len)) {
- do {
- rte_pktmbuf_free(m_table[ret]);
- } while (++ret < len);
- }
-
- len = 0;
- }
-
- tx_q->len = len;
- return;
-}
-
-static __rte_always_inline void
-virtio_dev_tx(struct virtio_net* dev, struct rte_mempool *mbuf_pool)
-{
- struct rte_mbuf m;
- struct vhost_virtqueue *vq;
- struct vring_desc *desc;
- uint64_t buff_addr = 0;
- uint32_t head[MAX_PKT_BURST];
- uint32_t used_idx;
- uint32_t i;
- uint16_t free_entries, packet_success = 0;
- uint16_t avail_idx;
-
- vq = dev->virtqueue_tx;
- avail_idx = *((volatile uint16_t *)&vq->avail->idx);
-
- /* If there are no available buffers then return. */
- if (vq->last_used_idx == avail_idx)
- return;
-
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") virtio_dev_tx()\n",
- dev->device_fh);
-
- /* Prefetch available ring to retrieve head indexes. */
- rte_prefetch0(&vq->avail->ring[vq->last_used_idx & (vq->size - 1)]);
-
- /*get the number of free entries in the ring*/
- free_entries = avail_idx - vq->last_used_idx;
- free_entries = unlikely(free_entries < MAX_PKT_BURST) ? free_entries : MAX_PKT_BURST;
-
- RTE_LOG_DP(DEBUG, VHOST_DATA, "(%" PRIu64 ") Buffers available %d\n",
- dev->device_fh, free_entries);
- /* Retrieve all of the head indexes first to avoid caching issues. */
- for (i = 0; i < free_entries; i++)
- head[i] = vq->avail->ring[(vq->last_used_idx + i) & (vq->size - 1)];
-
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success]]);
-
- while (packet_success < free_entries) {
- desc = &vq->desc[head[packet_success]];
- /* Prefetch descriptor address. */
- rte_prefetch0(desc);
-
- if (packet_success < (free_entries - 1)) {
- /* Prefetch descriptor index. */
- rte_prefetch0(&vq->desc[head[packet_success+1]]);
- }
-
- /* Update used index buffer information. */
- used_idx = vq->last_used_idx & (vq->size - 1);
- vq->used->ring[used_idx].id = head[packet_success];
- vq->used->ring[used_idx].len = 0;
-
- /* Discard first buffer as it is the virtio header */
- desc = &vq->desc[desc->next];
-
- /* Buffer address translation. */
- buff_addr = gpa_to_vva(dev, desc->addr);
- /* Prefetch buffer address. */
- rte_prefetch0((void*)(uintptr_t)buff_addr);
-
- /* Setup dummy mbuf. This is copied to a real mbuf if transmitted out the physical port. */
- m.data_len = desc->len;
- m.data_off = 0;
- m.nb_segs = 1;
-
- virtio_tx_route(dev, &m, mbuf_pool, 0);
-
- vq->last_used_idx++;
- packet_success++;
- }
-
- rte_compiler_barrier();
- vq->used->idx += packet_success;
- /* Kick guest if required. */
-}
-
-/*
- * This function is called by each data core. It handles all RX/TX registered with the
- * core. For TX the specific lcore linked list is used. For RX, MAC addresses are compared
- * with all devices in the main linked list.
- */
-static int
-switch_worker(__attribute__((unused)) void *arg)
-{
- struct rte_mempool *mbuf_pool = arg;
- struct virtio_net *dev = NULL;
- struct rte_mbuf *pkts_burst[MAX_PKT_BURST];
- struct virtio_net_data_ll *dev_ll;
- struct mbuf_table *tx_q;
- volatile struct lcore_ll_info *lcore_ll;
- const uint64_t drain_tsc = (rte_get_tsc_hz() + US_PER_S - 1) / US_PER_S * BURST_TX_DRAIN_US;
- uint64_t prev_tsc, diff_tsc, cur_tsc, ret_count = 0;
- unsigned ret, i;
- const uint16_t lcore_id = rte_lcore_id();
- const uint16_t num_cores = (uint16_t)rte_lcore_count();
- uint16_t rx_count = 0;
-
- RTE_LOG(INFO, VHOST_DATA, "Procesing on Core %u started \n", lcore_id);
- lcore_ll = lcore_info[lcore_id].lcore_ll;
- prev_tsc = 0;
-
- tx_q = &lcore_tx_queue[lcore_id];
- for (i = 0; i < num_cores; i ++) {
- if (lcore_ids[i] == lcore_id) {
- tx_q->txq_id = i;
- break;
- }
- }
-
- while(1) {
- cur_tsc = rte_rdtsc();
- /*
- * TX burst queue drain
- */
- diff_tsc = cur_tsc - prev_tsc;
- if (unlikely(diff_tsc > drain_tsc)) {
-
- if (tx_q->len) {
- RTE_LOG_DP(DEBUG, VHOST_DATA,
- "TX queue drained after timeout with burst size %u\n",
- tx_q->len);
-
- /*Tx any packets in the queue*/
- ret = rte_eth_tx_burst(ports[0], (uint16_t)tx_q->txq_id,
- (struct rte_mbuf **)tx_q->m_table,
- (uint16_t)tx_q->len);
- if (unlikely(ret < tx_q->len)) {
- do {
- rte_pktmbuf_free(tx_q->m_table[ret]);
- } while (++ret < tx_q->len);
- }
-
- tx_q->len = 0;
- }
-
- prev_tsc = cur_tsc;
-
- }
-
- /*
- * Inform the configuration core that we have exited the linked list and that no devices are
- * in use if requested.
- */
- if (lcore_ll->dev_removal_flag == REQUEST_DEV_REMOVAL)
- lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
-
- /*
- * Process devices
- */
- dev_ll = lcore_ll->ll_root_used;
-
- while (dev_ll != NULL) {
- /*get virtio device ID*/
- dev = dev_ll->dev;
-
- if (unlikely(dev->remove)) {
- dev_ll = dev_ll->next;
- unlink_vmdq(dev);
- dev->ready = DEVICE_SAFE_REMOVE;
- continue;
- }
- if (likely(dev->ready == DEVICE_READY)) {
- /*Handle guest RX*/
- rx_count = rte_eth_rx_burst(ports[0],
- (uint16_t)dev->vmdq_rx_q, pkts_burst, MAX_PKT_BURST);
-
- if (rx_count) {
- ret_count = virtio_dev_rx(dev, pkts_burst, rx_count);
- if (enable_stats) {
- rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx_total, rx_count);
- rte_atomic64_add(&dev_statistics[dev_ll->dev->device_fh].rx, ret_count);
- }
- while (likely(rx_count)) {
- rx_count--;
- rte_pktmbuf_free_seg(pkts_burst[rx_count]);
- }
-
- }
- }
-
- if (likely(!dev->remove))
- /*Handle guest TX*/
- virtio_dev_tx(dev, mbuf_pool);
-
- /*move to the next device in the list*/
- dev_ll = dev_ll->next;
- }
- }
-
- return 0;
-}
-
-/*
- * Add an entry to a used linked list. A free entry must first be found in the free linked list
- * using get_data_ll_free_entry();
- */
-static void
-add_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
-{
- struct virtio_net_data_ll *ll = *ll_root_addr;
-
- /* Set next as NULL and use a compiler barrier to avoid reordering. */
- ll_dev->next = NULL;
- rte_compiler_barrier();
-
- /* If ll == NULL then this is the first device. */
- if (ll) {
- /* Increment to the tail of the linked list. */
- while ((ll->next != NULL) )
- ll = ll->next;
-
- ll->next = ll_dev;
- } else {
- *ll_root_addr = ll_dev;
- }
-}
-
-/*
- * Remove an entry from a used linked list. The entry must then be added to the free linked list
- * using put_data_ll_free_entry().
- */
-static void
-rm_data_ll_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev, struct virtio_net_data_ll *ll_dev_last)
-{
- struct virtio_net_data_ll *ll = *ll_root_addr;
-
- if (ll_dev == ll)
- *ll_root_addr = ll_dev->next;
- else
- ll_dev_last->next = ll_dev->next;
-}
-
-/*
- * Find and return an entry from the free linked list.
- */
-static struct virtio_net_data_ll *
-get_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr)
-{
- struct virtio_net_data_ll *ll_free = *ll_root_addr;
- struct virtio_net_data_ll *ll_dev;
-
- if (ll_free == NULL)
- return NULL;
-
- ll_dev = ll_free;
- *ll_root_addr = ll_free->next;
-
- return ll_dev;
-}
-
-/*
- * Place an entry back on to the free linked list.
- */
-static void
-put_data_ll_free_entry(struct virtio_net_data_ll **ll_root_addr, struct virtio_net_data_ll *ll_dev)
-{
- struct virtio_net_data_ll *ll_free = *ll_root_addr;
-
- ll_dev->next = ll_free;
- *ll_root_addr = ll_dev;
-}
-
-/*
- * Creates a linked list of a given size.
- */
-static struct virtio_net_data_ll *
-alloc_data_ll(uint32_t size)
-{
- struct virtio_net_data_ll *ll_new;
- uint32_t i;
-
- /* Malloc and then chain the linked list. */
- ll_new = malloc(size * sizeof(struct virtio_net_data_ll));
- if (ll_new == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for ll_new.\n");
- return NULL;
- }
-
- for (i = 0; i < size - 1; i++) {
- ll_new[i].dev = NULL;
- ll_new[i].next = &ll_new[i+1];
- }
- ll_new[i].next = NULL;
-
- return ll_new;
-}
-
-/*
- * Create the main linked list along with each individual cores linked list. A used and a free list
- * are created to manage entries.
- */
-static int
-init_data_ll (void)
-{
- int lcore;
-
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- lcore_info[lcore].lcore_ll = malloc(sizeof(struct lcore_ll_info));
- if (lcore_info[lcore].lcore_ll == NULL) {
- RTE_LOG(ERR, VHOST_CONFIG, "Failed to allocate memory for lcore_ll.\n");
- return -1;
- }
-
- lcore_info[lcore].lcore_ll->device_num = 0;
- lcore_info[lcore].lcore_ll->dev_removal_flag = ACK_DEV_REMOVAL;
- lcore_info[lcore].lcore_ll->ll_root_used = NULL;
- if (num_devices % num_switching_cores)
- lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll((num_devices / num_switching_cores) + 1);
- else
- lcore_info[lcore].lcore_ll->ll_root_free = alloc_data_ll(num_devices / num_switching_cores);
- }
-
- /* Allocate devices up to a maximum of MAX_DEVICES. */
- ll_root_free = alloc_data_ll(MIN((num_devices), MAX_DEVICES));
-
- return 0;
-}
-/*
- * Remove a device from the specific data core linked list and from the main linked list. The
- * rx/tx thread must be set the flag to indicate that it is safe to remove the device.
- * used.
- */
-static void
-destroy_device (volatile struct virtio_net *dev)
-{
- struct virtio_net_data_ll *ll_lcore_dev_cur;
- struct virtio_net_data_ll *ll_main_dev_cur;
- struct virtio_net_data_ll *ll_lcore_dev_last = NULL;
- struct virtio_net_data_ll *ll_main_dev_last = NULL;
- int lcore;
-
- dev->flags &= ~VIRTIO_DEV_RUNNING;
-
- /*set the remove flag. */
- dev->remove = 1;
-
- while(dev->ready != DEVICE_SAFE_REMOVE) {
- rte_pause();
- }
-
- /* Search for entry to be removed from lcore ll */
- ll_lcore_dev_cur = lcore_info[dev->coreid].lcore_ll->ll_root_used;
- while (ll_lcore_dev_cur != NULL) {
- if (ll_lcore_dev_cur->dev == dev) {
- break;
- } else {
- ll_lcore_dev_last = ll_lcore_dev_cur;
- ll_lcore_dev_cur = ll_lcore_dev_cur->next;
- }
- }
-
- /* Search for entry to be removed from main ll */
- ll_main_dev_cur = ll_root_used;
- ll_main_dev_last = NULL;
- while (ll_main_dev_cur != NULL) {
- if (ll_main_dev_cur->dev == dev) {
- break;
- } else {
- ll_main_dev_last = ll_main_dev_cur;
- ll_main_dev_cur = ll_main_dev_cur->next;
- }
- }
-
- if (ll_lcore_dev_cur == NULL || ll_main_dev_cur == NULL) {
- RTE_LOG(ERR, XENHOST, "%s: could find device in per_cpu list or main_list\n", __func__);
- return;
- }
-
- /* Remove entries from the lcore and main ll. */
- rm_data_ll_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_used, ll_lcore_dev_cur, ll_lcore_dev_last);
- rm_data_ll_entry(&ll_root_used, ll_main_dev_cur, ll_main_dev_last);
-
- /* Set the dev_removal_flag on each lcore. */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- lcore_info[lcore].lcore_ll->dev_removal_flag = REQUEST_DEV_REMOVAL;
- }
-
- /*
- * Once each core has set the dev_removal_flag to ACK_DEV_REMOVAL we can be sure that
- * they can no longer access the device removed from the linked lists and that the devices
- * are no longer in use.
- */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- while (lcore_info[lcore].lcore_ll->dev_removal_flag != ACK_DEV_REMOVAL) {
- rte_pause();
- }
- }
-
- /* Add the entries back to the lcore and main free ll.*/
- put_data_ll_free_entry(&lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->ll_root_free, ll_lcore_dev_cur);
- put_data_ll_free_entry(&ll_root_free, ll_main_dev_cur);
-
- /* Decrement number of device on the lcore. */
- lcore_info[ll_lcore_dev_cur->dev->coreid].lcore_ll->device_num--;
-
- RTE_LOG(INFO, VHOST_DATA, " #####(%"PRIu64") Device has been removed from data core\n", dev->device_fh);
-}
-
-/*
- * A new device is added to a data core. First the device is added to the main linked list
- * and the allocated to a specific data core.
- */
-static int
-new_device (struct virtio_net *dev)
-{
- struct virtio_net_data_ll *ll_dev;
- int lcore, core_add = 0;
- uint32_t device_num_min = num_devices;
-
- /* Add device to main ll */
- ll_dev = get_data_ll_free_entry(&ll_root_free);
- if (ll_dev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") No free entry found in linked list. Device limit "
- "of %d devices per core has been reached\n",
- dev->device_fh, num_devices);
- return -1;
- }
- ll_dev->dev = dev;
- add_data_ll_entry(&ll_root_used, ll_dev);
-
- /*reset ready flag*/
- dev->ready = DEVICE_NOT_READY;
- dev->remove = 0;
-
- /* Find a suitable lcore to add the device. */
- RTE_LCORE_FOREACH_SLAVE(lcore) {
- if (lcore_info[lcore].lcore_ll->device_num < device_num_min) {
- device_num_min = lcore_info[lcore].lcore_ll->device_num;
- core_add = lcore;
- }
- }
- /* Add device to lcore ll */
- ll_dev->dev->coreid = core_add;
- ll_dev = get_data_ll_free_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_free);
- if (ll_dev == NULL) {
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Failed to add device to data core\n", dev->device_fh);
- destroy_device(dev);
- return -1;
- }
- ll_dev->dev = dev;
- add_data_ll_entry(&lcore_info[ll_dev->dev->coreid].lcore_ll->ll_root_used, ll_dev);
-
- /* Initialize device stats */
- memset(&dev_statistics[dev->device_fh], 0, sizeof(struct device_statistics));
-
- lcore_info[ll_dev->dev->coreid].lcore_ll->device_num++;
- dev->flags |= VIRTIO_DEV_RUNNING;
-
- RTE_LOG(INFO, VHOST_DATA, "(%"PRIu64") Device has been added to data core %d\n", dev->device_fh, dev->coreid);
-
- link_vmdq(dev);
-
- return 0;
-}
-
-/*
- * These callback allow devices to be added to the data core when configuration
- * has been fully complete.
- */
-static const struct virtio_net_device_ops virtio_net_device_ops =
-{
- .new_device = new_device,
- .destroy_device = destroy_device,
-};
-
-/*
- * This is a thread will wake up after a period to print stats if the user has
- * enabled them.
- */
-static void
-print_stats(void)
-{
- struct virtio_net_data_ll *dev_ll;
- uint64_t tx_dropped, rx_dropped;
- uint64_t tx, tx_total, rx, rx_total;
- uint32_t device_fh;
- const char clr[] = { 27, '[', '2', 'J', '\0' };
- const char top_left[] = { 27, '[', '1', ';', '1', 'H','\0' };
-
- while(1) {
- sleep(enable_stats);
-
- /* Clear screen and move to top left */
- printf("%s%s", clr, top_left);
-
- printf("\nDevice statistics ====================================");
-
- dev_ll = ll_root_used;
- while (dev_ll != NULL) {
- device_fh = (uint32_t)dev_ll->dev->device_fh;
- tx_total = dev_statistics[device_fh].tx_total;
- tx = dev_statistics[device_fh].tx;
- tx_dropped = tx_total - tx;
- rx_total = rte_atomic64_read(&dev_statistics[device_fh].rx_total);
- rx = rte_atomic64_read(&dev_statistics[device_fh].rx);
- rx_dropped = rx_total - rx;
-
- printf("\nStatistics for device %"PRIu32" ------------------------------"
- "\nTX total: %"PRIu64""
- "\nTX dropped: %"PRIu64""
- "\nTX successful: %"PRIu64""
- "\nRX total: %"PRIu64""
- "\nRX dropped: %"PRIu64""
- "\nRX successful: %"PRIu64"",
- device_fh,
- tx_total,
- tx_dropped,
- tx,
- rx_total,
- rx_dropped,
- rx);
-
- dev_ll = dev_ll->next;
- }
- printf("\n======================================================\n");
- }
-}
-
-
-int init_virtio_net(struct virtio_net_device_ops const * const ops);
-
-/*
- * Main function, does initialisation and calls the per-lcore functions.
- */
-int
-main(int argc, char *argv[])
-{
- struct rte_mempool *mbuf_pool;
- unsigned lcore_id, core_id = 0;
- unsigned nb_ports, valid_num_ports;
- int ret;
- uint8_t portid;
- static pthread_t tid;
- char thread_name[RTE_MAX_THREAD_NAME_LEN];
-
- /* init EAL */
- ret = rte_eal_init(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Error with EAL initialization\n");
- argc -= ret;
- argv += ret;
-
- /* parse app arguments */
- ret = us_vhost_parse_args(argc, argv);
- if (ret < 0)
- rte_exit(EXIT_FAILURE, "Invalid argument\n");
-
- for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id ++)
- if (rte_lcore_is_enabled(lcore_id))
- lcore_ids[core_id ++] = lcore_id;
-
- if (rte_lcore_count() > RTE_MAX_LCORE)
- rte_exit(EXIT_FAILURE,"Not enough cores\n");
-
- /*set the number of swithcing cores available*/
- num_switching_cores = rte_lcore_count()-1;
-
- /* Get the number of physical ports. */
- nb_ports = rte_eth_dev_count();
-
- /*
- * Update the global var NUM_PORTS and global array PORTS
- * and get value of var VALID_NUM_PORTS according to system ports number
- */
- valid_num_ports = check_ports_num(nb_ports);
-
- if ((valid_num_ports == 0) || (valid_num_ports > MAX_SUP_PORTS)) {
- RTE_LOG(INFO, VHOST_PORT, "Current enabled port number is %u,"
- "but only %u port can be enabled\n",num_ports, MAX_SUP_PORTS);
- return -1;
- }
-
- /* Create the mbuf pool. */
- mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL",
- NUM_MBUFS_PER_PORT * valid_num_ports, MBUF_CACHE_SIZE, 0,
- RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
- if (mbuf_pool == NULL)
- rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
-
- /* initialize all ports */
- for (portid = 0; portid < nb_ports; portid++) {
- /* skip ports that are not enabled */
- if ((enabled_port_mask & (1 << portid)) == 0) {
- RTE_LOG(INFO, VHOST_PORT, "Skipping disabled port %d\n", portid);
- continue;
- }
- if (port_init(portid, mbuf_pool) != 0)
- rte_exit(EXIT_FAILURE, "Cannot initialize network ports\n");
- }
-
- /* Initialise all linked lists. */
- if (init_data_ll() == -1)
- rte_exit(EXIT_FAILURE, "Failed to initialize linked list\n");
-
- /* Initialize device stats */
- memset(&dev_statistics, 0, sizeof(dev_statistics));
-
- /* Enable stats if the user option is set. */
- if (enable_stats) {
- ret = pthread_create(&tid, NULL, (void *)print_stats, NULL);
- if (ret != 0)
- rte_exit(EXIT_FAILURE,
- "Cannot create print-stats thread\n");
-
- /* Set thread_name for aid in debugging. */
- snprintf(thread_name, RTE_MAX_THREAD_NAME_LEN, "print-xen-stats");
- ret = rte_thread_setname(tid, thread_name);
- if (ret != 0)
- RTE_LOG(DEBUG, VHOST_CONFIG,
- "Cannot set print-stats name\n");
- }
-
- /* Launch all data cores. */
- RTE_LCORE_FOREACH_SLAVE(lcore_id) {
- rte_eal_remote_launch(switch_worker, mbuf_pool, lcore_id);
- }
-
- init_virtio_xen(&virtio_net_device_ops);
-
- virtio_monitor_loop();
- return 0;
-}
diff --git a/examples/vhost_xen/main.h b/examples/vhost_xen/main.h
deleted file mode 100644
index 5ff48fd9..00000000
--- a/examples/vhost_xen/main.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _MAIN_H_
-#define _MAIN_H_
-
-/* Macros for printing using RTE_LOG */
-#define RTE_LOGTYPE_VHOST_CONFIG RTE_LOGTYPE_USER1
-#define RTE_LOGTYPE_VHOST_DATA RTE_LOGTYPE_USER2
-#define RTE_LOGTYPE_VHOST_PORT RTE_LOGTYPE_USER3
-
-/*
- * Device linked list structure for data path.
- */
-struct virtio_net_data_ll
-{
- struct virtio_net *dev; /* Pointer to device created by configuration core. */
- struct virtio_net_data_ll *next; /* Pointer to next device in linked list. */
-};
-
-/*
- * Structure containing data core specific information.
- */
-struct lcore_ll_info
-{
- struct virtio_net_data_ll *ll_root_free; /* Pointer to head in free linked list. */
- struct virtio_net_data_ll *ll_root_used; /* Pointer to head of used linked list. */
- uint32_t device_num; /* Number of devices on lcore. */
- volatile uint8_t dev_removal_flag; /* Flag to synchronize device removal. */
-};
-
-struct lcore_info
-{
- struct lcore_ll_info *lcore_ll; /* Pointer to data core specific lcore_ll_info struct */
-};
-#endif /* _MAIN_H_ */
diff --git a/examples/vhost_xen/vhost_monitor.c b/examples/vhost_xen/vhost_monitor.c
deleted file mode 100644
index fb9606bf..00000000
--- a/examples/vhost_xen/vhost_monitor.c
+++ /dev/null
@@ -1,595 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdlib.h>
-#include <stdio.h>
-#include <dirent.h>
-#include <unistd.h>
-#include <sys/eventfd.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <xen/xen-compat.h>
-#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
-#include <xs.h>
-#else
-#include <xenstore.h>
-#endif
-#include <linux/virtio_ring.h>
-#include <linux/virtio_pci.h>
-#include <linux/virtio_net.h>
-
-#include <rte_ethdev.h>
-#include <rte_log.h>
-#include <rte_malloc.h>
-#include <rte_string_fns.h>
-
-#include "virtio-net.h"
-#include "xen_vhost.h"
-
-struct virtio_watch {
- struct xs_handle *xs;
- int watch_fd;
-};
-
-
-/* device ops to add/remove device to/from data core. */
-static struct virtio_net_device_ops const *notify_ops;
-
-/* root address of the linked list in the configuration core. */
-static struct virtio_net_config_ll *ll_root = NULL;
-
-/* root address of VM. */
-static struct xen_guestlist guest_root;
-
-static struct virtio_watch watch;
-
-static void
-vq_vring_init(struct vhost_virtqueue *vq, unsigned int num, uint8_t *p,
- unsigned long align)
-{
- vq->size = num;
- vq->desc = (struct vring_desc *) p;
- vq->avail = (struct vring_avail *) (p +
- num * sizeof(struct vring_desc));
- vq->used = (void *)
- RTE_ALIGN_CEIL( (uintptr_t)(&vq->avail->ring[num]), align);
-
-}
-
-static int
-init_watch(void)
-{
- struct xs_handle *xs;
- int ret;
- int fd;
-
- /* get a connection to the daemon */
- xs = xs_daemon_open();
- if (xs == NULL) {
- RTE_LOG(ERR, XENHOST, "xs_daemon_open failed\n");
- return -1;
- }
-
- ret = xs_watch(xs, "/local/domain", "mytoken");
- if (ret == 0) {
- RTE_LOG(ERR, XENHOST, "%s: xs_watch failed\n", __func__);
- xs_daemon_close(xs);
- return -1;
- }
-
- /* We are notified of read availability on the watch via the file descriptor. */
- fd = xs_fileno(xs);
- watch.xs = xs;
- watch.watch_fd = fd;
-
- TAILQ_INIT(&guest_root);
- return 0;
-}
-
-static struct xen_guest *
-get_xen_guest(int dom_id)
-{
- struct xen_guest *guest = NULL;
-
- TAILQ_FOREACH(guest, &guest_root, next) {
- if(guest->dom_id == dom_id)
- return guest;
- }
-
- return NULL;
-}
-
-
-static struct xen_guest *
-add_xen_guest(int32_t dom_id)
-{
- struct xen_guest *guest = NULL;
-
- if ((guest = get_xen_guest(dom_id)) != NULL)
- return guest;
-
- guest = calloc(1, sizeof(struct xen_guest));
- if (guest) {
- RTE_LOG(ERR, XENHOST, " %s: return newly created guest with %d rings\n", __func__, guest->vring_num);
- TAILQ_INSERT_TAIL(&guest_root, guest, next);
- guest->dom_id = dom_id;
- }
-
- return guest;
-}
-
-static void
-cleanup_device(struct virtio_net_config_ll *ll_dev)
-{
- if (ll_dev == NULL)
- return;
- if (ll_dev->dev.virtqueue_rx) {
- rte_free(ll_dev->dev.virtqueue_rx);
- ll_dev->dev.virtqueue_rx = NULL;
- }
- if (ll_dev->dev.virtqueue_tx) {
- rte_free(ll_dev->dev.virtqueue_tx);
- ll_dev->dev.virtqueue_tx = NULL;
- }
- free(ll_dev);
-}
-
-/*
- * Add entry containing a device to the device configuration linked list.
- */
-static void
-add_config_ll_entry(struct virtio_net_config_ll *new_ll_dev)
-{
- struct virtio_net_config_ll *ll_dev = ll_root;
-
- /* If ll_dev == NULL then this is the first device so go to else */
- if (ll_dev) {
- /* If the 1st device_id != 0 then we insert our device here. */
- if (ll_dev->dev.device_fh != 0) {
- new_ll_dev->dev.device_fh = 0;
- new_ll_dev->next = ll_dev;
- ll_root = new_ll_dev;
- } else {
- /* increment through the ll until we find un unused device_id,
- * insert the device at that entry
- */
- while ((ll_dev->next != NULL) && (ll_dev->dev.device_fh == (ll_dev->next->dev.device_fh - 1)))
- ll_dev = ll_dev->next;
-
- new_ll_dev->dev.device_fh = ll_dev->dev.device_fh + 1;
- new_ll_dev->next = ll_dev->next;
- ll_dev->next = new_ll_dev;
- }
- } else {
- ll_root = new_ll_dev;
- ll_root->dev.device_fh = 0;
- }
-}
-
-
-/*
- * Remove an entry from the device configuration linked list.
- */
-static struct virtio_net_config_ll *
-rm_config_ll_entry(struct virtio_net_config_ll *ll_dev, struct virtio_net_config_ll *ll_dev_last)
-{
- /* First remove the device and then clean it up. */
- if (ll_dev == ll_root) {
- ll_root = ll_dev->next;
- cleanup_device(ll_dev);
- return ll_root;
- } else {
- ll_dev_last->next = ll_dev->next;
- cleanup_device(ll_dev);
- return ll_dev_last->next;
- }
-}
-
-/*
- * Retrieves an entry from the devices configuration linked list.
- */
-static struct virtio_net_config_ll *
-get_config_ll_entry(unsigned int virtio_idx, unsigned int dom_id)
-{
- struct virtio_net_config_ll *ll_dev = ll_root;
-
- /* Loop through linked list until the dom_id is found. */
- while (ll_dev != NULL) {
- if (ll_dev->dev.dom_id == dom_id && ll_dev->dev.virtio_idx == virtio_idx)
- return ll_dev;
- ll_dev = ll_dev->next;
- }
-
- return NULL;
-}
-
-/*
- * Initialise all variables in device structure.
- */
-static void
-init_dev(struct virtio_net *dev)
-{
- RTE_SET_USED(dev);
-}
-
-
-static struct
-virtio_net_config_ll *new_device(unsigned int virtio_idx, struct xen_guest *guest)
-{
- struct virtio_net_config_ll *new_ll_dev;
- struct vhost_virtqueue *virtqueue_rx, *virtqueue_tx;
- size_t size, vq_ring_size, vq_size = VQ_DESC_NUM;
- void *vq_ring_virt_mem;
- uint64_t gpa;
- uint32_t i;
-
- /* Setup device and virtqueues. */
- new_ll_dev = calloc(1, sizeof(struct virtio_net_config_ll));
- virtqueue_rx = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), RTE_CACHE_LINE_SIZE);
- virtqueue_tx = rte_zmalloc(NULL, sizeof(struct vhost_virtqueue), RTE_CACHE_LINE_SIZE);
- if (new_ll_dev == NULL || virtqueue_rx == NULL || virtqueue_tx == NULL)
- goto err;
-
- new_ll_dev->dev.virtqueue_rx = virtqueue_rx;
- new_ll_dev->dev.virtqueue_tx = virtqueue_tx;
- new_ll_dev->dev.dom_id = guest->dom_id;
- new_ll_dev->dev.virtio_idx = virtio_idx;
- /* Initialise device and virtqueues. */
- init_dev(&new_ll_dev->dev);
-
- size = vring_size(vq_size, VIRTIO_PCI_VRING_ALIGN);
- vq_ring_size = RTE_ALIGN_CEIL(size, VIRTIO_PCI_VRING_ALIGN);
- (void)vq_ring_size;
-
- vq_ring_virt_mem = guest->vring[virtio_idx].rxvring_addr;
- vq_vring_init(virtqueue_rx, vq_size, vq_ring_virt_mem, VIRTIO_PCI_VRING_ALIGN);
- virtqueue_rx->size = vq_size;
- virtqueue_rx->vhost_hlen = sizeof(struct virtio_net_hdr);
-
- vq_ring_virt_mem = guest->vring[virtio_idx].txvring_addr;
- vq_vring_init(virtqueue_tx, vq_size, vq_ring_virt_mem, VIRTIO_PCI_VRING_ALIGN);
- virtqueue_tx->size = vq_size;
- memcpy(&new_ll_dev->dev.mac_address, &guest->vring[virtio_idx].addr, sizeof(struct ether_addr));
-
- /* virtio_memory has to be one per domid */
- new_ll_dev->dev.mem = malloc(sizeof(struct virtio_memory) + sizeof(struct virtio_memory_regions) * MAX_XENVIRT_MEMPOOL);
- new_ll_dev->dev.mem->nregions = guest->pool_num;
- for (i = 0; i < guest->pool_num; i++) {
- gpa = new_ll_dev->dev.mem->regions[i].guest_phys_address =
- (uint64_t)((uintptr_t)guest->mempool[i].gva);
- new_ll_dev->dev.mem->regions[i].guest_phys_address_end =
- gpa + guest->mempool[i].mempfn_num * getpagesize();
- new_ll_dev->dev.mem->regions[i].address_offset =
- (uint64_t)((uintptr_t)guest->mempool[i].hva -
- (uintptr_t)gpa);
- }
-
- new_ll_dev->next = NULL;
-
- /* Add entry to device configuration linked list. */
- add_config_ll_entry(new_ll_dev);
- return new_ll_dev;
-err:
- free(new_ll_dev);
- rte_free(virtqueue_rx);
- rte_free(virtqueue_tx);
-
- return NULL;
-}
-
-static void
-destroy_guest(struct xen_guest *guest)
-{
- uint32_t i;
-
- for (i = 0; i < guest->vring_num; i++)
- cleanup_vring(&guest->vring[i]);
- /* clean mempool */
- for (i = 0; i < guest->pool_num; i++)
- cleanup_mempool(&guest->mempool[i]);
- free(guest);
-
- return;
-}
-
-/*
- * This function will cleanup the device and remove it from device configuration linked list.
- */
-static void
-destroy_device(unsigned int virtio_idx, unsigned int dom_id)
-{
- struct virtio_net_config_ll *ll_dev_cur_ctx, *ll_dev_last = NULL;
- struct virtio_net_config_ll *ll_dev_cur = ll_root;
-
- /* clean virtio device */
- struct xen_guest *guest = NULL;
- guest = get_xen_guest(dom_id);
- if (guest == NULL)
- return;
-
- /* Find the linked list entry for the device to be removed. */
- ll_dev_cur_ctx = get_config_ll_entry(virtio_idx, dom_id);
- while (ll_dev_cur != NULL) {
- /* If the device is found or a device that doesn't exist is found then it is removed. */
- if (ll_dev_cur == ll_dev_cur_ctx) {
- if ((ll_dev_cur->dev.flags & VIRTIO_DEV_RUNNING))
- notify_ops->destroy_device(&(ll_dev_cur->dev));
- ll_dev_cur = rm_config_ll_entry(ll_dev_cur, ll_dev_last);
- } else {
- ll_dev_last = ll_dev_cur;
- ll_dev_cur = ll_dev_cur->next;
- }
- }
- RTE_LOG(INFO, XENHOST, " %s guest:%p vring:%p rxvring:%p txvring:%p flag:%p\n",
- __func__, guest, &guest->vring[virtio_idx], guest->vring[virtio_idx].rxvring_addr, guest->vring[virtio_idx].txvring_addr, guest->vring[virtio_idx].flag);
- cleanup_vring(&guest->vring[virtio_idx]);
- guest->vring[virtio_idx].removed = 1;
- guest->vring_num -= 1;
-}
-
-
-
-
-static void
-watch_unmap_event(void)
-{
- int i;
- struct xen_guest *guest = NULL;
- bool remove_request;
-
- TAILQ_FOREACH(guest, &guest_root, next) {
- for (i = 0; i < MAX_VIRTIO; i++) {
- if (guest->vring[i].dom_id && guest->vring[i].removed == 0 && *guest->vring[i].flag == 0) {
- RTE_LOG(INFO, XENHOST, "\n\n");
- RTE_LOG(INFO, XENHOST, " #####%s: (%d, %d) to be removed\n",
- __func__,
- guest->vring[i].dom_id,
- i);
- destroy_device(i, guest->dom_id);
- RTE_LOG(INFO, XENHOST, " %s: DOM %u, vring num: %d\n",
- __func__,
- guest->dom_id,
- guest->vring_num);
- }
- }
- }
-
-_find_next_remove:
- guest = NULL;
- remove_request = false;
- TAILQ_FOREACH(guest, &guest_root, next) {
- if (guest->vring_num == 0) {
- remove_request = true;
- break;
- }
- }
- if (remove_request == true) {
- TAILQ_REMOVE(&guest_root, guest, next);
- RTE_LOG(INFO, XENHOST, " #####%s: destroy guest (%d)\n", __func__, guest->dom_id);
- destroy_guest(guest);
- goto _find_next_remove;
- }
- return;
-}
-
-/*
- * OK, if the guest starts first, it is ok.
- * if host starts first, it is ok.
- * if guest starts, and has run for sometime, and host stops and restarts,
- * then last_used_idx 0? how to solve this. */
-
-static void virtio_init(void)
-{
- uint32_t len, e_num;
- uint32_t i,j;
- char **dom;
- char *status;
- int dom_id;
- char path[PATH_MAX];
- char node[PATH_MAX];
- xs_transaction_t th;
- struct xen_guest *guest;
- struct virtio_net_config_ll *net_config;
- char *end;
- int val;
-
- /* init env for watch the node */
- if (init_watch() < 0)
- return;
-
- dom = xs_directory(watch.xs, XBT_NULL, "/local/domain", &e_num);
-
- for (i = 0; i < e_num; i++) {
- errno = 0;
- dom_id = strtol(dom[i], &end, 0);
- if (errno != 0 || end == NULL || dom_id == 0)
- continue;
-
- for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
- snprintf(node, PATH_MAX, "%s%d", VIRTIO_START, j);
- snprintf(path, PATH_MAX, XEN_VM_NODE_FMT,
- dom_id, node);
-
- th = xs_transaction_start(watch.xs);
- status = xs_read(watch.xs, th, path, &len);
- xs_transaction_end(watch.xs, th, false);
-
- if (status == NULL)
- break;
-
- /* if there's any valid virtio device */
- errno = 0;
- val = strtol(status, &end, 0);
- if (errno != 0 || end == NULL || dom_id == 0)
- val = 0;
- if (val == 1) {
- guest = add_xen_guest(dom_id);
- if (guest == NULL)
- continue;
- RTE_LOG(INFO, XENHOST, " there's a new virtio existed, new a virtio device\n\n");
-
- RTE_LOG(INFO, XENHOST, " parse_vringnode dom_id %d virtioidx %d\n",dom_id,j);
- if (parse_vringnode(guest, j)) {
- RTE_LOG(ERR, XENHOST, " there is invalid information in xenstore\n");
- TAILQ_REMOVE(&guest_root, guest, next);
- destroy_guest(guest);
-
- continue;
- }
-
- /*if pool_num > 0, then mempool has already been parsed*/
- if (guest->pool_num == 0 && parse_mempoolnode(guest)) {
- RTE_LOG(ERR, XENHOST, " there is error information in xenstore\n");
- TAILQ_REMOVE(&guest_root, guest, next);
- destroy_guest(guest);
- continue;
- }
-
- net_config = new_device(j, guest);
- /* every thing is ready now, added into data core */
- notify_ops->new_device(&net_config->dev);
- }
- }
- }
-
- free(dom);
- return;
-}
-
-void
-virtio_monitor_loop(void)
-{
- char **vec;
- xs_transaction_t th;
- char *buf;
- unsigned int len;
- unsigned int dom_id;
- uint32_t virtio_idx;
- struct xen_guest *guest;
- struct virtio_net_config_ll *net_config;
- enum fieldnames {
- FLD_NULL = 0,
- FLD_LOCAL,
- FLD_DOMAIN,
- FLD_ID,
- FLD_CONTROL,
- FLD_DPDK,
- FLD_NODE,
- _NUM_FLD
- };
- char *str_fld[_NUM_FLD];
- char *str;
- char *end;
-
- virtio_init();
- while (1) {
- watch_unmap_event();
-
- usleep(50);
- vec = xs_check_watch(watch.xs);
-
- if (vec == NULL)
- continue;
-
- th = xs_transaction_start(watch.xs);
-
- buf = xs_read(watch.xs, th, vec[XS_WATCH_PATH],&len);
- xs_transaction_end(watch.xs, th, false);
-
- if (buf) {
- /* theres' some node for vhost existed */
- if (rte_strsplit(vec[XS_WATCH_PATH], strnlen(vec[XS_WATCH_PATH], PATH_MAX),
- str_fld, _NUM_FLD, '/') == _NUM_FLD) {
- if (strstr(str_fld[FLD_NODE], VIRTIO_START)) {
- errno = 0;
- str = str_fld[FLD_ID];
- dom_id = strtoul(str, &end, 0);
- if (errno != 0 || end == NULL || end == str ) {
- RTE_LOG(INFO, XENHOST, "invalid domain id\n");
- continue;
- }
-
- errno = 0;
- str = str_fld[FLD_NODE] + sizeof(VIRTIO_START) - 1;
- virtio_idx = strtoul(str, &end, 0);
- if (errno != 0 || end == NULL || end == str
- || virtio_idx > MAX_VIRTIO) {
- RTE_LOG(INFO, XENHOST, "invalid virtio idx\n");
- continue;
- }
- RTE_LOG(INFO, XENHOST, " #####virtio dev (%d, %d) is started\n", dom_id, virtio_idx);
-
- guest = add_xen_guest(dom_id);
- if (guest == NULL)
- continue;
- guest->dom_id = dom_id;
- if (parse_vringnode(guest, virtio_idx)) {
- RTE_LOG(ERR, XENHOST, " there is invalid information in xenstore\n");
- /*guest newly created? guest existed ?*/
- TAILQ_REMOVE(&guest_root, guest, next);
- destroy_guest(guest);
- continue;
- }
- /*if pool_num > 0, then mempool has already been parsed*/
- if (guest->pool_num == 0 && parse_mempoolnode(guest)) {
- RTE_LOG(ERR, XENHOST, " there is error information in xenstore\n");
- TAILQ_REMOVE(&guest_root, guest, next);
- destroy_guest(guest);
- continue;
- }
-
-
- net_config = new_device(virtio_idx, guest);
- RTE_LOG(INFO, XENHOST, " Add to dataplane core\n");
- notify_ops->new_device(&net_config->dev);
-
- }
- }
- }
-
- free(vec);
- }
- return;
-}
-
-/*
- * Register ops so that we can add/remove device to data core.
- */
-int
-init_virtio_xen(struct virtio_net_device_ops const *const ops)
-{
- notify_ops = ops;
- if (xenhost_init())
- return -1;
- return 0;
-}
diff --git a/examples/vhost_xen/virtio-net.h b/examples/vhost_xen/virtio-net.h
deleted file mode 100644
index ab697260..00000000
--- a/examples/vhost_xen/virtio-net.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _VIRTIO_NET_H_
-#define _VIRTIO_NET_H_
-
-#include <stdint.h>
-
-#define VQ_DESC_NUM 256
-/* Used to indicate that the device is running on a data core */
-#define VIRTIO_DEV_RUNNING 1
-
-/*
- * Structure contains variables relevant to TX/RX virtqueues.
- */
-struct vhost_virtqueue
-{
- struct vring_desc *desc; /* Virtqueue descriptor ring. */
- struct vring_avail *avail; /* Virtqueue available ring. */
- struct vring_used *used; /* Virtqueue used ring. */
- uint32_t size; /* Size of descriptor ring. */
- uint32_t vhost_hlen; /* Vhost header length (varies depending on RX merge buffers. */
- volatile uint16_t last_used_idx; /* Last index used on the available ring */
- volatile uint16_t last_used_idx_res; /* Used for multiple devices reserving buffers. */
-} __rte_cache_aligned;
-
-/*
- * Device structure contains all configuration information relating to the device.
- */
-struct virtio_net
-{
- struct vhost_virtqueue *virtqueue_tx; /* Contains all TX virtqueue information. */
- struct vhost_virtqueue *virtqueue_rx; /* Contains all RX virtqueue information. */
- struct virtio_memory *mem; /* QEMU memory and memory region information. */
- struct ether_addr mac_address; /* Device MAC address (Obtained on first TX packet). */
- uint32_t flags; /* Device flags. Only used to check if device is running on data core. */
- uint32_t vlan_tag; /* Vlan tag for device. Currently set to device_id (0-63). */
- uint32_t vmdq_rx_q;
- uint64_t device_fh; /* device identifier. */
- uint16_t coreid;
- volatile uint8_t ready; /* A device is set as ready if the MAC address has been set. */
- volatile uint8_t remove; /* Device is marked for removal from the data core. */
- uint32_t virtio_idx; /* Index of virtio device */
- uint32_t dom_id; /* Domain id of xen guest */
-} ___rte_cache_aligned;
-
-/*
- * Device linked list structure for configuration.
- */
-struct virtio_net_config_ll
-{
- struct virtio_net dev; /* Virtio device. */
- struct virtio_net_config_ll *next; /* Next entry on linked list. */
-};
-
-/*
- * Information relating to memory regions including offsets to addresses in QEMUs memory file.
- */
-struct virtio_memory_regions {
- uint64_t guest_phys_address; /* Base guest physical address of region. */
- uint64_t guest_phys_address_end; /* End guest physical address of region. */
- uint64_t memory_size; /* Size of region. */
- uint64_t userspace_address; /* Base userspace address of region. */
- uint64_t address_offset; /* Offset of region for address translation. */
-};
-
-/*
- * Memory structure includes region and mapping information.
- */
-struct virtio_memory {
- uint32_t nregions; /* Number of memory regions. */
- struct virtio_memory_regions regions[0]; /* Memory region information. */
-};
-
-/*
- * Device operations to add/remove device.
- */
-struct virtio_net_device_ops {
- int (* new_device)(struct virtio_net *); /* Add device. */
- void (* destroy_device) (volatile struct virtio_net *); /* Remove device. */
-};
-
-#endif
diff --git a/examples/vhost_xen/xen_vhost.h b/examples/vhost_xen/xen_vhost.h
deleted file mode 100644
index 2fc304c7..00000000
--- a/examples/vhost_xen/xen_vhost.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef _XEN_VHOST_H_
-#define _XEN_VHOST_H_
-
-#include <stdint.h>
-
-#include <rte_ether.h>
-
-#include "virtio-net.h"
-
-#define RTE_LOGTYPE_XENHOST RTE_LOGTYPE_USER1
-
-#define XEN_VM_ROOTNODE_FMT "/local/domain/%d/control/dpdk"
-#define XEN_VM_NODE_FMT "/local/domain/%d/control/dpdk/%s"
-#define XEN_MEMPOOL_SUFFIX "mempool_gref"
-#define XEN_RXVRING_SUFFIX "rx_vring_gref"
-#define XEN_TXVRING_SUFFIX "tx_vring_gref"
-#define XEN_GVA_SUFFIX "mempool_va"
-#define XEN_VRINGFLAG_SUFFIX "vring_flag"
-#define XEN_ADDR_SUFFIX "ether_addr"
-#define VIRTIO_START "event_type_start_"
-
-#define XEN_GREF_SPLITTOKEN ','
-
-#define MAX_XENVIRT_MEMPOOL 16
-#define MAX_VIRTIO 32
-#define MAX_GREF_PER_NODE 64 /* 128 MB memory */
-
-#define PAGE_SIZE 4096
-#define PAGE_PFNNUM (PAGE_SIZE / sizeof(uint32_t))
-
-#define XEN_GNTDEV_FNAME "/dev/xen/gntdev"
-
-/* xen grant reference info in one grant node */
-struct xen_gnt {
- uint32_t gref; /* grant reference for this node */
- union {
- int gref; /* grant reference */
- uint32_t pfn_num; /* guest pfn number of grant reference */
- } gref_pfn[PAGE_PFNNUM];
-}__attribute__((__packed__));
-
-
-/* structure for mempool or vring node list */
-struct xen_gntnode {
- uint32_t gnt_num; /* grant reference number */
- struct xen_gnt *gnt_info; /* grant reference info */
-};
-
-
-struct xen_vring {
- uint32_t dom_id;
- uint32_t virtio_idx; /* index of virtio device */
- void *rxvring_addr; /* mapped virtual address of rxvring */
- void *txvring_addr; /* mapped virtual address of txvring */
- uint32_t rxpfn_num; /* number of gpfn for rxvring */
- uint32_t txpfn_num; /* number of gpfn for txvring */
- uint32_t *rxpfn_tbl; /* array of rxvring gpfn */
- uint32_t *txpfn_tbl; /* array of txvring gpfn */
- uint64_t *rx_pindex; /* index used to release rx grefs */
- uint64_t *tx_pindex; /* index used to release tx grefs */
- uint64_t flag_index;
- uint8_t *flag; /* cleared to zero on guest unmap */
- struct ether_addr addr; /* ethernet address of virtio device */
- uint8_t removed;
-
-};
-
-struct xen_mempool {
- uint32_t dom_id; /* guest domain id */
- uint32_t pool_idx; /* index of memory pool */
- void *gva; /* guest virtual address of mbuf pool */
- void *hva; /* host virtual address of mbuf pool */
- uint32_t mempfn_num; /* number of gpfn for mbuf pool */
- uint32_t *mempfn_tbl; /* array of mbuf pool gpfn */
- uint64_t *pindex; /* index used to release grefs */
-};
-
-struct xen_guest {
- TAILQ_ENTRY(xen_guest) next;
- int32_t dom_id; /* guest domain id */
- uint32_t pool_num; /* number of mbuf pool of the guest */
- uint32_t vring_num; /* number of virtio ports of the guest */
- /* array contain the guest mbuf pool info */
- struct xen_mempool mempool[MAX_XENVIRT_MEMPOOL];
- /* array contain the guest rx/tx vring info */
- struct xen_vring vring[MAX_VIRTIO];
-};
-
-TAILQ_HEAD(xen_guestlist, xen_guest);
-
-int
-parse_mempoolnode(struct xen_guest *guest);
-
-int
-xenhost_init(void);
-
-int
-parse_vringnode(struct xen_guest *guest, uint32_t virtio_idx);
-
-int
-parse_mempoolnode(struct xen_guest *guest);
-
-void
-cleanup_mempool(struct xen_mempool *mempool);
-
-void
-cleanup_vring(struct xen_vring *vring);
-
-void
-virtio_monitor_loop(void);
-
-int
-init_virtio_xen(struct virtio_net_device_ops const * const);
-
-#endif
diff --git a/examples/vhost_xen/xenstore_parse.c b/examples/vhost_xen/xenstore_parse.c
deleted file mode 100644
index ab089f1b..00000000
--- a/examples/vhost_xen/xenstore_parse.c
+++ /dev/null
@@ -1,775 +0,0 @@
-/*-
- * BSD LICENSE
- *
- * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * * Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * * Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in
- * the documentation and/or other materials provided with the
- * distribution.
- * * Neither the name of Intel Corporation nor the names of its
- * contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <stdint.h>
-#include <unistd.h>
-#include <inttypes.h>
-#include <errno.h>
-#include <fcntl.h>
-#include <sys/ioctl.h>
-#include <sys/mman.h>
-#include <xen/sys/gntalloc.h>
-#include <xen/sys/gntdev.h>
-#include <xen/xen-compat.h>
-#if __XEN_LATEST_INTERFACE_VERSION__ < 0x00040200
-#include <xs.h>
-#else
-#include <xenstore.h>
-#endif
-
-#include <rte_common.h>
-#include <rte_memory.h>
-#include <rte_eal.h>
-#include <rte_malloc.h>
-#include <rte_string_fns.h>
-#include <rte_log.h>
-#include <rte_debug.h>
-
-#include "xen_vhost.h"
-
-/* xenstore handle */
-static struct xs_handle *xs = NULL;
-
-/* gntdev file descriptor to map grant pages */
-static int d_fd = -1;
-
-/*
- * The grant node format in xenstore for vring/mpool is like:
- * idx#_rx_vring_gref = "gref1#, gref2#, gref3#"
- * idx#_mempool_gref = "gref1#, gref2#, gref3#"
- * each gref# is the grant reference for a shared page.
- * In each shared page, we store the grant_node_item items.
- */
-struct grant_node_item {
- uint32_t gref;
- uint32_t pfn;
-} __attribute__((packed));
-
-int cmdline_parse_etheraddr(void *tk, const char *srcbuf,
- void *res, unsigned ressize);
-
-/* Map grant ref refid at addr_ori*/
-static void *
-xen_grant_mmap(void *addr_ori, int domid, int refid, uint64_t *pindex)
-{
- struct ioctl_gntdev_map_grant_ref arg;
- void *addr = NULL;
- int pg_sz = getpagesize();
-
- arg.count = 1;
- arg.refs[0].domid = domid;
- arg.refs[0].ref = refid;
-
- int rv = ioctl(d_fd, IOCTL_GNTDEV_MAP_GRANT_REF, &arg);
- if (rv) {
- RTE_LOG(ERR, XENHOST, " %s: (%d,%d) %s (ioctl failed)\n", __func__,
- domid, refid, strerror(errno));
- return NULL;
- }
-
- if (addr_ori == NULL)
- addr = mmap(addr_ori, pg_sz, PROT_READ|PROT_WRITE, MAP_SHARED,
- d_fd, arg.index);
- else
- addr = mmap(addr_ori, pg_sz, PROT_READ|PROT_WRITE, MAP_SHARED | MAP_FIXED,
- d_fd, arg.index);
-
- if (addr == MAP_FAILED) {
- RTE_LOG(ERR, XENHOST, " %s: (%d, %d) %s (map failed)\n", __func__,
- domid, refid, strerror(errno));
- return NULL;
- }
-
- if (pindex)
- *pindex = arg.index;
-
- return addr;
-}
-
-/* Unmap one grant ref, and munmap must be called before this */
-static int
-xen_unmap_grant_ref(uint64_t index)
-{
- struct ioctl_gntdev_unmap_grant_ref arg;
- int rv;
-
- arg.count = 1;
- arg.index = index;
- rv = ioctl(d_fd, IOCTL_GNTDEV_UNMAP_GRANT_REF, &arg);
- if (rv) {
- RTE_LOG(ERR, XENHOST, " %s: index 0x%" PRIx64 "unmap failed\n", __func__, index);
- return -1;
- }
- return 0;
-}
-
-/*
- * Reserve a virtual address space.
- * On success, returns the pointer. On failure, returns NULL.
- */
-static void *
-get_xen_virtual(size_t size, size_t page_sz)
-{
- void *addr;
- uintptr_t aligned_addr;
-
- addr = mmap(NULL, size + page_sz, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
- if (addr == MAP_FAILED) {
- RTE_LOG(ERR, XENHOST, "failed get a virtual area\n");
- return NULL;
- }
-
- aligned_addr = RTE_ALIGN_CEIL((uintptr_t)addr, page_sz);
- munmap(addr, aligned_addr - (uintptr_t)addr);
- munmap((void *)(aligned_addr + size), page_sz + (uintptr_t)addr - aligned_addr);
- addr = (void *)(aligned_addr);
-
- return addr;
-}
-
-static void
-free_xen_virtual(void *addr, size_t size, size_t page_sz __rte_unused)
-{
- if (addr)
- munmap(addr, size);
-}
-
-/*
- * Returns val str in xenstore.
- * @param path
- * Full path string for key
- * @return
- * Pointer to Val str, NULL on failure
- */
-static char *
-xen_read_node(char *path, uint32_t *len)
-{
- char *buf;
-
- buf = xs_read(xs, XBT_NULL, path, len);
- return buf;
-}
-
-static int
-cal_pagenum(struct xen_gnt *gnt)
-{
- unsigned int i;
- /*
- * the items in the page are in the format of
- * gref#,pfn#,...,gref#,pfn#
- * FIXME, 0 is reserved by system, use it as terminator.
- */
- for (i = 0; i < (PAGE_PFNNUM) / 2; i++) {
- if (gnt->gref_pfn[i * 2].gref <= 0)
- break;
- }
-
- return i;
-}
-
-/* Frees memory allocated to a grant node */
-static void
-xen_free_gntnode(struct xen_gntnode *gntnode)
-{
- if (gntnode == NULL)
- return;
- free(gntnode->gnt_info);
- free(gntnode);
-}
-
-/*
- * Parse a grant node.
- * @param domid
- * Guest domain id.
- * @param path
- * Full path string for a grant node, like for the following (key, val) pair
- * idx#_mempool_gref = "gref#, gref#, gref#"
- * path = 'local/domain/domid/control/dpdk/idx#_mempool_gref'
- * gref# is a shared page contain packed (gref,pfn) entries
- * @return
- * Returns the pointer to xen_gntnode
- */
-static struct xen_gntnode *
-parse_gntnode(int dom_id, char *path)
-{
- char **gref_list = NULL;
- uint32_t i, len, gref_num;
- void *addr = NULL;
- char *buf = NULL;
- struct xen_gntnode *gntnode = NULL;
- struct xen_gnt *gnt = NULL;
- int pg_sz = getpagesize();
- char *end;
- uint64_t index;
-
- if ((buf = xen_read_node(path, &len)) == NULL)
- goto err;
-
- gref_list = malloc(MAX_GREF_PER_NODE * sizeof(char *));
- if (gref_list == NULL)
- goto err;
-
- gref_num = rte_strsplit(buf, len, gref_list, MAX_GREF_PER_NODE,
- XEN_GREF_SPLITTOKEN);
- if (gref_num == 0) {
- RTE_LOG(ERR, XENHOST, " %s: invalid grant node format\n", __func__);
- goto err;
- }
-
- gntnode = calloc(1, sizeof(struct xen_gntnode));
- gnt = calloc(gref_num, sizeof(struct xen_gnt));
- if (gnt == NULL || gntnode == NULL)
- goto err;
-
- for (i = 0; i < gref_num; i++) {
- errno = 0;
- gnt[i].gref = strtol(gref_list[i], &end, 0);
- if (errno != 0 || end == NULL || end == gref_list[i] ||
- (*end != '\0' && *end != XEN_GREF_SPLITTOKEN)) {
- RTE_LOG(ERR, XENHOST, " %s: parse grant node item failed\n", __func__);
- goto err;
- }
- addr = xen_grant_mmap(NULL, dom_id, gnt[i].gref, &index);
- if (addr == NULL) {
- RTE_LOG(ERR, XENHOST, " %s: map gref %u failed\n", __func__, gnt[i].gref);
- goto err;
- }
- RTE_LOG(INFO, XENHOST, " %s: map gref %u to %p\n", __func__, gnt[i].gref, addr);
- memcpy(gnt[i].gref_pfn, addr, pg_sz);
- if (munmap(addr, pg_sz)) {
- RTE_LOG(INFO, XENHOST, " %s: unmap gref %u failed\n", __func__, gnt[i].gref);
- goto err;
- }
- if (xen_unmap_grant_ref(index)) {
- RTE_LOG(INFO, XENHOST, " %s: release gref %u failed\n", __func__, gnt[i].gref);
- goto err;
- }
-
- }
-
- gntnode->gnt_num = gref_num;
- gntnode->gnt_info = gnt;
-
- free(buf);
- free(gref_list);
- return gntnode;
-
-err:
- free(gnt);
- free(gntnode);
- free(gref_list);
- free(buf);
- return NULL;
-}
-
-/*
- * This function maps grant node of vring or mbuf pool to a continuous virtual address space,
- * and returns mapped address, pfn array, index array
- * @param gntnode
- * Pointer to grant node
- * @param domid
- * Guest domain id
- * @param ppfn
- * Pointer to pfn array, caller should free this array
- * @param pgs
- * Pointer to number of pages
- * @param ppindex
- * Pointer to index array, used to release grefs when to free this node
- * @return
- * Pointer to mapped virtual address, NULL on failure
- */
-static void *
-map_gntnode(struct xen_gntnode *gntnode, int domid, uint32_t **ppfn, uint32_t *pgs, uint64_t **ppindex)
-{
- struct xen_gnt *gnt;
- uint32_t i, j;
- size_t total_pages = 0;
- void *addr;
- uint32_t *pfn;
- uint64_t *pindex;
- uint32_t pfn_num = 0;
- int pg_sz;
-
- if (gntnode == NULL)
- return NULL;
-
- pg_sz = getpagesize();
- for (i = 0; i < gntnode->gnt_num; i++) {
- gnt = gntnode->gnt_info + i;
- total_pages += cal_pagenum(gnt);
- }
- if ((addr = get_xen_virtual(total_pages * pg_sz, pg_sz)) == NULL) {
- RTE_LOG(ERR, XENHOST, " %s: failed get_xen_virtual\n", __func__);
- return NULL;
- }
- pfn = calloc(total_pages, (size_t)sizeof(uint32_t));
- pindex = calloc(total_pages, (size_t)sizeof(uint64_t));
- if (pfn == NULL || pindex == NULL) {
- free_xen_virtual(addr, total_pages * pg_sz, pg_sz);
- free(pfn);
- free(pindex);
- return NULL;
- }
-
- RTE_LOG(INFO, XENHOST, " %s: total pages:%zu, map to [%p, %p]\n", __func__, total_pages, addr, RTE_PTR_ADD(addr, total_pages * pg_sz - 1));
- for (i = 0; i < gntnode->gnt_num; i++) {
- gnt = gntnode->gnt_info + i;
- for (j = 0; j < (PAGE_PFNNUM) / 2; j++) {
- if ((gnt->gref_pfn[j * 2].gref) <= 0)
- goto _end;
- /*alternative: batch map, or through libxc*/
- if (xen_grant_mmap(RTE_PTR_ADD(addr, pfn_num * pg_sz),
- domid,
- gnt->gref_pfn[j * 2].gref,
- &pindex[pfn_num]) == NULL) {
- goto mmap_failed;
- }
- pfn[pfn_num] = gnt->gref_pfn[j * 2 + 1].pfn_num;
- pfn_num++;
- }
- }
-
-mmap_failed:
- if (pfn_num)
- munmap(addr, pfn_num * pg_sz);
- for (i = 0; i < pfn_num; i++) {
- xen_unmap_grant_ref(pindex[i]);
- }
- free(pindex);
- free(pfn);
- return NULL;
-
-_end:
- if (ppindex)
- *ppindex = pindex;
- else
- free(pindex);
- if (ppfn)
- *ppfn = pfn;
- else
- free(pfn);
- if (pgs)
- *pgs = total_pages;
-
- return addr;
-}
-
-static int
-parse_mpool_va(struct xen_mempool *mempool)
-{
- char path[PATH_MAX] = {0};
- char *buf;
- uint32_t len;
- char *end;
- int ret = -1;
-
- errno = 0;
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_GVA_SUFFIX,
- mempool->dom_id, mempool->pool_idx);
-
- if((buf = xen_read_node(path, &len)) == NULL)
- goto out;
- mempool->gva = (void *)strtoul(buf, &end, 16);
- if (errno != 0 || end == NULL || end == buf || *end != '\0') {
- mempool->gva = NULL;
- goto out;
- }
- ret = 0;
-out:
- free(buf);
- return ret;
-}
-
-/*
- * map mbuf pool
- */
-static int
-map_mempoolnode(struct xen_gntnode *gntnode,
- struct xen_mempool *mempool)
-{
- if (gntnode == NULL || mempool == NULL)
- return -1;
-
- mempool->hva =
- map_gntnode(gntnode, mempool->dom_id, &mempool->mempfn_tbl, &mempool->mempfn_num, &mempool->pindex);
-
- RTE_LOG(INFO, XENHOST, " %s: map mempool at %p\n", __func__, (void *)mempool->hva);
- if (mempool->hva)
- return 0;
- else {
- return -1;
- }
-}
-
-void
-cleanup_mempool(struct xen_mempool *mempool)
-{
- int pg_sz = getpagesize();
- uint32_t i;
-
- if (mempool->hva)
- munmap(mempool->hva, mempool->mempfn_num * pg_sz);
- mempool->hva = NULL;
-
- if (mempool->pindex) {
- RTE_LOG(INFO, XENHOST, " %s: unmap dom %02u mempool%02u %u grefs\n",
- __func__,
- mempool->dom_id,
- mempool->pool_idx,
- mempool->mempfn_num);
- for (i = 0; i < mempool->mempfn_num; i ++) {
- xen_unmap_grant_ref(mempool->pindex[i]);
- }
- }
- mempool->pindex = NULL;
-
- free(mempool->mempfn_tbl);
- mempool->mempfn_tbl = NULL;
-}
-
-/*
- * process mempool node idx#_mempool_gref, idx = 0, 1, 2...
- * until we encounter a node that doesn't exist.
- */
-int
-parse_mempoolnode(struct xen_guest *guest)
-{
- uint32_t i, len;
- char path[PATH_MAX] = {0};
- struct xen_gntnode *gntnode = NULL;
- struct xen_mempool *mempool = NULL;
- char *buf;
-
- bzero(&guest->mempool, MAX_XENVIRT_MEMPOOL * sizeof(guest->mempool[0]));
- guest->pool_num = 0;
-
- while (1) {
- /* check if null terminated */
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_MEMPOOL_SUFFIX,
- guest->dom_id,
- guest->pool_num);
-
- if ((buf = xen_read_node(path, &len)) != NULL) {
- /* this node exists */
- free(buf);
- } else {
- if (guest->pool_num == 0) {
- RTE_LOG(ERR, PMD, "no mempool found\n");
- return -1;
- }
- break;
- }
-
- mempool = &guest->mempool[guest->pool_num];
- mempool->dom_id = guest->dom_id;
- mempool->pool_idx = guest->pool_num;
-
- RTE_LOG(INFO, XENHOST, " %s: mempool %u parse gntnode %s\n", __func__, guest->pool_num, path);
- gntnode = parse_gntnode(guest->dom_id, path);
- if (gntnode == NULL)
- goto err;
-
- if (parse_mpool_va(mempool))
- goto err;
-
- RTE_LOG(INFO, XENHOST, " %s: mempool %u map gntnode %s\n", __func__, guest->pool_num, path);
- if (map_mempoolnode(gntnode, mempool))
- goto err;
-
- xen_free_gntnode(gntnode);
- guest->pool_num++;
- }
-
- return 0;
-err:
- if (gntnode)
- xen_free_gntnode(gntnode);
- for (i = 0; i < MAX_XENVIRT_MEMPOOL ; i++) {
- cleanup_mempool(&guest->mempool[i]);
- }
- /* reinitialise mempool */
- bzero(&guest->mempool, MAX_XENVIRT_MEMPOOL * sizeof(guest->mempool[0]));
- return -1;
-}
-
-static int
-xen_map_vringflag(struct xen_vring *vring)
-{
- char path[PATH_MAX] = {0};
- char *buf;
- uint32_t len,gref;
- int pg_sz = getpagesize();
- char *end;
-
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_VRINGFLAG_SUFFIX,
- vring->dom_id, vring->virtio_idx);
-
- if((buf = xen_read_node(path, &len)) == NULL)
- goto err;
-
- errno = 0;
- gref = strtol(buf, &end, 0);
- if (errno != 0 || end == NULL || end == buf) {
- goto err;
- }
- vring->flag = xen_grant_mmap(0, vring->dom_id, gref, &vring->flag_index);
- if (vring->flag == NULL || *vring->flag == 0)
- goto err;
-
- free(buf);
- return 0;
-err:
- free(buf);
- if (vring->flag) {
- munmap(vring->flag, pg_sz);
- vring->flag = NULL;
- xen_unmap_grant_ref(vring->flag_index);
- }
- return -1;
-}
-
-
-static int
-xen_map_rxvringnode(struct xen_gntnode *gntnode,
- struct xen_vring *vring)
-{
- vring->rxvring_addr =
- map_gntnode(gntnode, vring->dom_id, &vring->rxpfn_tbl, &vring->rxpfn_num, &vring->rx_pindex);
- RTE_LOG(INFO, XENHOST, " %s: map rx vring at %p\n", __func__, (void *)vring->rxvring_addr);
- if (vring->rxvring_addr)
- return 0;
- else
- return -1;
-}
-
-static int
-xen_map_txvringnode(struct xen_gntnode *gntnode,
- struct xen_vring *vring)
-{
- vring->txvring_addr =
- map_gntnode(gntnode, vring->dom_id, &vring->txpfn_tbl, &vring->txpfn_num, &vring->tx_pindex);
- RTE_LOG(INFO, XENHOST, " %s: map tx vring at %p\n", __func__, (void *)vring->txvring_addr);
- if (vring->txvring_addr)
- return 0;
- else
- return -1;
-}
-
-void
-cleanup_vring(struct xen_vring *vring)
-{
- int pg_sz = getpagesize();
- uint32_t i;
-
- RTE_LOG(INFO, XENHOST, " %s: cleanup dom %u vring %u\n", __func__, vring->dom_id, vring->virtio_idx);
- if (vring->rxvring_addr) {
- munmap(vring->rxvring_addr, vring->rxpfn_num * pg_sz);
- RTE_LOG(INFO, XENHOST, " %s: unmap rx vring [%p, %p]\n",
- __func__,
- vring->rxvring_addr,
- RTE_PTR_ADD(vring->rxvring_addr,
- vring->rxpfn_num * pg_sz - 1));
- }
- vring->rxvring_addr = NULL;
-
-
- if (vring->rx_pindex) {
- RTE_LOG(INFO, XENHOST, " %s: unmap rx vring %u grefs\n", __func__, vring->rxpfn_num);
- for (i = 0; i < vring->rxpfn_num; i++) {
- xen_unmap_grant_ref(vring->rx_pindex[i]);
- }
- }
- vring->rx_pindex = NULL;
-
- free(vring->rxpfn_tbl);
- vring->rxpfn_tbl = NULL;
-
- if (vring->txvring_addr) {
- munmap(vring->txvring_addr, vring->txpfn_num * pg_sz);
- RTE_LOG(INFO, XENHOST, " %s: unmap tx vring [%p, %p]\n",
- __func__,
- vring->txvring_addr,
- RTE_PTR_ADD(vring->txvring_addr,
- vring->txpfn_num * pg_sz - 1));
- }
- vring->txvring_addr = NULL;
-
- if (vring->tx_pindex) {
- RTE_LOG(INFO, XENHOST, " %s: unmap tx vring %u grefs\n", __func__, vring->txpfn_num);
- for (i = 0; i < vring->txpfn_num; i++) {
- xen_unmap_grant_ref(vring->tx_pindex[i]);
- }
- }
- vring->tx_pindex = NULL;
-
- free(vring->txpfn_tbl);
- vring->txpfn_tbl = NULL;
-
- if (vring->flag) {
- if (!munmap((void *)vring->flag, pg_sz))
- RTE_LOG(INFO, XENHOST, " %s: unmap flag page at %p\n", __func__, vring->flag);
- if (!xen_unmap_grant_ref(vring->flag_index))
- RTE_LOG(INFO, XENHOST, " %s: release flag ref index 0x%" PRIx64 "\n", __func__, vring->flag_index);
- }
- vring->flag = NULL;
- return;
-}
-
-
-
-static int
-xen_parse_etheraddr(struct xen_vring *vring)
-{
- char path[PATH_MAX] = {0};
- char *buf;
- uint32_t len;
- int ret = -1;
-
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_ADDR_SUFFIX,
- vring->dom_id, vring->virtio_idx);
-
- if ((buf = xen_read_node(path, &len)) == NULL)
- goto out;
-
- if (cmdline_parse_etheraddr(NULL, buf, &vring->addr,
- sizeof(vring->addr)) < 0)
- goto out;
- ret = 0;
-out:
- free(buf);
- return ret;
-}
-
-
-int
-parse_vringnode(struct xen_guest *guest, uint32_t virtio_idx)
-{
- char path[PATH_MAX] = {0};
- struct xen_gntnode *rx_gntnode = NULL;
- struct xen_gntnode *tx_gntnode = NULL;
- struct xen_vring *vring = NULL;
-
- /*check if null terminated */
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_RXVRING_SUFFIX,
- guest->dom_id,
- virtio_idx);
-
- RTE_LOG(INFO, XENHOST, " %s: virtio %u parse rx gntnode %s\n", __func__, virtio_idx, path);
- rx_gntnode = parse_gntnode(guest->dom_id, path);
- if (rx_gntnode == NULL)
- goto err;
-
- /*check if null terminated */
- snprintf(path, sizeof(path),
- XEN_VM_ROOTNODE_FMT"/%d_"XEN_TXVRING_SUFFIX,
- guest->dom_id,
- virtio_idx);
-
- RTE_LOG(INFO, XENHOST, " %s: virtio %u parse tx gntnode %s\n", __func__, virtio_idx, path);
- tx_gntnode = parse_gntnode(guest->dom_id, path);
- if (tx_gntnode == NULL)
- goto err;
-
- vring = &guest->vring[virtio_idx];
- bzero(vring, sizeof(*vring));
- vring->dom_id = guest->dom_id;
- vring->virtio_idx = virtio_idx;
-
- if (xen_parse_etheraddr(vring) != 0)
- goto err;
-
- RTE_LOG(INFO, XENHOST, " %s: virtio %u map rx gntnode %s\n", __func__, virtio_idx, path);
- if (xen_map_rxvringnode(rx_gntnode, vring) != 0)
- goto err;
-
- RTE_LOG(INFO, XENHOST, " %s: virtio %u map tx gntnode %s\n", __func__, virtio_idx, path);
- if (xen_map_txvringnode(tx_gntnode, vring) != 0)
- goto err;
-
- if (xen_map_vringflag(vring) != 0)
- goto err;
-
- guest->vring_num++;
-
- xen_free_gntnode(rx_gntnode);
- xen_free_gntnode(tx_gntnode);
-
- return 0;
-
-err:
- if (rx_gntnode)
- xen_free_gntnode(rx_gntnode);
- if (tx_gntnode)
- xen_free_gntnode(tx_gntnode);
- if (vring) {
- cleanup_vring(vring);
- bzero(vring, sizeof(*vring));
- }
- return -1;
-}
-
-/*
- * Open xen grant dev driver
- * @return
- * 0 on success, -1 on failure.
- */
-static int
-xen_grant_init(void)
-{
- d_fd = open(XEN_GNTDEV_FNAME, O_RDWR);
-
- return d_fd == -1? (-1): (0);
-}
-
-/*
- * Initialise xenstore handle and open grant dev driver.
- * @return
- * 0 on success, -1 on failure.
- */
-int
-xenhost_init(void)
-{
- xs = xs_daemon_open();
- if (xs == NULL) {
- rte_panic("failed initialize xen daemon handler");
- return -1;
- }
- if (xen_grant_init())
- return -1;
- return 0;
-}
diff --git a/examples/vm_power_manager/Makefile b/examples/vm_power_manager/Makefile
index 59a96417..9cf20a28 100644
--- a/examples/vm_power_manager/Makefile
+++ b/examples/vm_power_manager/Makefile
@@ -54,6 +54,22 @@ CFLAGS += $(WERROR_FLAGS)
LDLIBS += -lvirt
+ifeq ($(CONFIG_RTE_BUILD_SHARED_LIB),y)
+
+ifeq ($(CONFIG_RTE_LIBRTE_IXGBE_PMD),y)
+LDLIBS += -lrte_pmd_ixgbe
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_I40E_PMD),y)
+LDLIBS += -lrte_pmd_i40e
+endif
+
+ifeq ($(CONFIG_RTE_LIBRTE_BNXT_PMD),y)
+LDLIBS += -lrte_pmd_bnxt
+endif
+
+endif
+
# workaround for a gcc bug with noreturn attribute
# http://gcc.gnu.org/bugzilla/show_bug.cgi?id=12603
ifeq ($(CONFIG_RTE_TOOLCHAIN_GCC),y)
diff --git a/examples/vm_power_manager/channel_manager.c b/examples/vm_power_manager/channel_manager.c
index e068ae28..ab856bdb 100644
--- a/examples/vm_power_manager/channel_manager.c
+++ b/examples/vm_power_manager/channel_manager.c
@@ -574,6 +574,73 @@ set_channel_status(const char *vm_name, unsigned *channel_list,
return num_channels_changed;
}
+void
+get_all_vm(int *num_vm, int *num_vcpu)
+{
+
+ virNodeInfo node_info;
+ virDomainPtr *domptr;
+ uint64_t mask;
+ int i, ii, numVcpus[MAX_VCPUS], cpu, n_vcpus;
+ unsigned int jj;
+ const char *vm_name;
+ unsigned int domain_flags = VIR_CONNECT_LIST_DOMAINS_RUNNING |
+ VIR_CONNECT_LIST_DOMAINS_PERSISTENT;
+ unsigned int domain_flag = VIR_DOMAIN_VCPU_CONFIG;
+
+
+ memset(global_cpumaps, 0, CHANNEL_CMDS_MAX_CPUS*global_maplen);
+ if (virNodeGetInfo(global_vir_conn_ptr, &node_info)) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "Unable to retrieve node Info\n");
+ return;
+ }
+
+ /* Returns number of pcpus */
+ global_n_host_cpus = (unsigned int)node_info.cpus;
+
+ /* Returns number of active domains */
+ *num_vm = virConnectListAllDomains(global_vir_conn_ptr, &domptr,
+ domain_flags);
+ if (*num_vm <= 0) {
+ RTE_LOG(ERR, CHANNEL_MANAGER, "No Active Domains Running\n");
+ return;
+ }
+
+ for (i = 0; i < *num_vm; i++) {
+
+ /* Get Domain Names */
+ vm_name = virDomainGetName(domptr[i]);
+ lvm_info[i].vm_name = vm_name;
+
+ /* Get Number of Vcpus */
+ numVcpus[i] = virDomainGetVcpusFlags(domptr[i], domain_flag);
+
+ /* Get Number of VCpus & VcpuPinInfo */
+ n_vcpus = virDomainGetVcpuPinInfo(domptr[i],
+ numVcpus[i], global_cpumaps,
+ global_maplen, domain_flag);
+
+ if ((int)n_vcpus > 0) {
+ *num_vcpu = n_vcpus;
+ lvm_info[i].num_cpus = n_vcpus;
+ }
+
+ /* Save pcpu in use by libvirt VMs */
+ for (ii = 0; ii < n_vcpus; ii++) {
+ mask = 0;
+ for (jj = 0; jj < global_n_host_cpus; jj++) {
+ if (VIR_CPU_USABLE(global_cpumaps,
+ global_maplen, ii, jj) > 0) {
+ mask |= 1ULL << jj;
+ }
+ }
+ ITERATIVE_BITMASK_CHECK_64(mask, cpu) {
+ lvm_info[i].pcpus[ii] = cpu;
+ }
+ }
+ }
+}
+
int
get_info_vm(const char *vm_name, struct vm_info *info)
{
diff --git a/examples/vm_power_manager/channel_manager.h b/examples/vm_power_manager/channel_manager.h
index 47c3b9cd..358fb8f2 100644
--- a/examples/vm_power_manager/channel_manager.h
+++ b/examples/vm_power_manager/channel_manager.h
@@ -66,6 +66,17 @@ struct sockaddr_un _sockaddr_un;
#define UNIX_PATH_MAX sizeof(_sockaddr_un.sun_path)
#endif
+#define MAX_VMS 4
+#define MAX_VCPUS 20
+
+
+struct libvirt_vm_info {
+ const char *vm_name;
+ unsigned int pcpus[MAX_VCPUS];
+ uint8_t num_cpus;
+};
+
+struct libvirt_vm_info lvm_info[MAX_VMS];
/* Communication Channel Status */
enum channel_status { CHANNEL_MGR_CHANNEL_DISCONNECTED = 0,
CHANNEL_MGR_CHANNEL_CONNECTED,
@@ -319,6 +330,20 @@ int set_channel_status(const char *vm_name, unsigned *channel_list,
*/
int get_info_vm(const char *vm_name, struct vm_info *info);
+/**
+ * Populates a table with all domains running and their physical cpu.
+ * All information is gathered through libvirt api.
+ *
+ * @param num_vm
+ * modified to store number of active VMs
+ *
+ * @param num_vcpu
+ modified to store number of vcpus active
+ *
+ * @return
+ * void
+ */
+void get_all_vm(int *num_vm, int *num_vcpu);
#ifdef __cplusplus
}
#endif
diff --git a/examples/vm_power_manager/channel_monitor.c b/examples/vm_power_manager/channel_monitor.c
index e7f5cc4a..37e71ed9 100644
--- a/examples/vm_power_manager/channel_monitor.c
+++ b/examples/vm_power_manager/channel_monitor.c
@@ -41,13 +41,17 @@
#include <sys/types.h>
#include <sys/epoll.h>
#include <sys/queue.h>
+#include <sys/time.h>
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_malloc.h>
#include <rte_atomic.h>
+#include <rte_cycles.h>
+#include <rte_ethdev.h>
+#include <rte_pmd_i40e.h>
-
+#include <libvirt/libvirt.h>
#include "channel_monitor.h"
#include "channel_commands.h"
#include "channel_manager.h"
@@ -57,10 +61,15 @@
#define MAX_EVENTS 256
+uint64_t vsi_pkt_count_prev[384];
+uint64_t rdtsc_prev[384];
+double time_period_ms = 1;
static volatile unsigned run_loop = 1;
static int global_event_fd;
+static unsigned int policy_is_set;
static struct epoll_event *global_events_list;
+static struct policy policies[MAX_VMS];
void channel_monitor_exit(void)
{
@@ -68,6 +77,287 @@ void channel_monitor_exit(void)
rte_free(global_events_list);
}
+static void
+core_share(int pNo, int z, int x, int t)
+{
+ if (policies[pNo].core_share[z].pcpu == lvm_info[x].pcpus[t]) {
+ if (strcmp(policies[pNo].pkt.vm_name,
+ lvm_info[x].vm_name) != 0) {
+ policies[pNo].core_share[z].status = 1;
+ power_manager_scale_core_max(
+ policies[pNo].core_share[z].pcpu);
+ }
+ }
+}
+
+static void
+core_share_status(int pNo)
+{
+
+ int noVms, noVcpus, z, x, t;
+
+ get_all_vm(&noVms, &noVcpus);
+
+ /* Reset Core Share Status. */
+ for (z = 0; z < noVcpus; z++)
+ policies[pNo].core_share[z].status = 0;
+
+ /* Foreach vcpu in a policy. */
+ for (z = 0; z < policies[pNo].pkt.num_vcpu; z++) {
+ /* Foreach VM on the platform. */
+ for (x = 0; x < noVms; x++) {
+ /* Foreach vcpu of VMs on platform. */
+ for (t = 0; t < lvm_info[x].num_cpus; t++)
+ core_share(pNo, z, x, t);
+ }
+ }
+}
+
+static void
+get_pcpu_to_control(struct policy *pol)
+{
+
+ /* Convert vcpu to pcpu. */
+ struct vm_info info;
+ int pcpu, count;
+ uint64_t mask_u64b;
+
+ RTE_LOG(INFO, CHANNEL_MONITOR, "Looking for pcpu for %s\n",
+ pol->pkt.vm_name);
+ get_info_vm(pol->pkt.vm_name, &info);
+
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ mask_u64b = info.pcpu_mask[pol->pkt.vcpu_to_control[count]];
+ for (pcpu = 0; mask_u64b; mask_u64b &= ~(1ULL << pcpu++)) {
+ if ((mask_u64b >> pcpu) & 1)
+ pol->core_share[count].pcpu = pcpu;
+ }
+ }
+}
+
+static int
+get_pfid(struct policy *pol)
+{
+
+ int i, x, ret = 0, nb_ports;
+
+ nb_ports = rte_eth_dev_count();
+ for (i = 0; i < pol->pkt.nb_mac_to_monitor; i++) {
+
+ for (x = 0; x < nb_ports; x++) {
+ ret = rte_pmd_i40e_query_vfid_by_mac(x,
+ (struct ether_addr *)&(pol->pkt.vfid[i]));
+ if (ret != -EINVAL) {
+ pol->port[i] = x;
+ break;
+ }
+ }
+ if (ret == -EINVAL || ret == -ENOTSUP || ret == ENODEV) {
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Error with Policy. MAC not found on "
+ "attached ports ");
+ pol->enabled = 0;
+ return ret;
+ }
+ pol->pfid[i] = ret;
+ }
+ return 1;
+}
+
+static int
+update_policy(struct channel_packet *pkt)
+{
+
+ unsigned int updated = 0;
+ int i;
+
+ for (i = 0; i < MAX_VMS; i++) {
+ if (strcmp(policies[i].pkt.vm_name, pkt->vm_name) == 0) {
+ policies[i].pkt = *pkt;
+ get_pcpu_to_control(&policies[i]);
+ if (get_pfid(&policies[i]) == -1) {
+ updated = 1;
+ break;
+ }
+ core_share_status(i);
+ policies[i].enabled = 1;
+ updated = 1;
+ }
+ }
+ if (!updated) {
+ for (i = 0; i < MAX_VMS; i++) {
+ if (policies[i].enabled == 0) {
+ policies[i].pkt = *pkt;
+ get_pcpu_to_control(&policies[i]);
+ if (get_pfid(&policies[i]) == -1)
+ break;
+ core_share_status(i);
+ policies[i].enabled = 1;
+ break;
+ }
+ }
+ }
+ return 0;
+}
+
+static uint64_t
+get_pkt_diff(struct policy *pol)
+{
+
+ uint64_t vsi_pkt_count,
+ vsi_pkt_total = 0,
+ vsi_pkt_count_prev_total = 0;
+ double rdtsc_curr, rdtsc_diff, diff;
+ int x;
+ struct rte_eth_stats vf_stats;
+
+ for (x = 0; x < pol->pkt.nb_mac_to_monitor; x++) {
+
+ /*Read vsi stats*/
+ if (rte_pmd_i40e_get_vf_stats(x, pol->pfid[x], &vf_stats) == 0)
+ vsi_pkt_count = vf_stats.ipackets;
+ else
+ vsi_pkt_count = -1;
+
+ vsi_pkt_total += vsi_pkt_count;
+
+ vsi_pkt_count_prev_total += vsi_pkt_count_prev[pol->pfid[x]];
+ vsi_pkt_count_prev[pol->pfid[x]] = vsi_pkt_count;
+ }
+
+ rdtsc_curr = rte_rdtsc_precise();
+ rdtsc_diff = rdtsc_curr - rdtsc_prev[pol->pfid[x-1]];
+ rdtsc_prev[pol->pfid[x-1]] = rdtsc_curr;
+
+ diff = (vsi_pkt_total - vsi_pkt_count_prev_total) *
+ ((double)rte_get_tsc_hz() / rdtsc_diff);
+
+ return diff;
+}
+
+static void
+apply_traffic_profile(struct policy *pol)
+{
+
+ int count;
+ uint64_t diff = 0;
+
+ diff = get_pkt_diff(pol);
+
+ RTE_LOG(INFO, CHANNEL_MONITOR, "Applying traffic profile\n");
+
+ if (diff >= (pol->pkt.traffic_policy.max_max_packet_thresh)) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_max(
+ pol->core_share[count].pcpu);
+ }
+ } else if (diff >= (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_med(
+ pol->core_share[count].pcpu);
+ }
+ } else if (diff < (pol->pkt.traffic_policy.avg_max_packet_thresh)) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_min(
+ pol->core_share[count].pcpu);
+ }
+ }
+}
+
+static void
+apply_time_profile(struct policy *pol)
+{
+
+ int count, x;
+ struct timeval tv;
+ struct tm *ptm;
+ char time_string[40];
+
+ /* Obtain the time of day, and convert it to a tm struct. */
+ gettimeofday(&tv, NULL);
+ ptm = localtime(&tv.tv_sec);
+ /* Format the date and time, down to a single second. */
+ strftime(time_string, sizeof(time_string), "%Y-%m-%d %H:%M:%S", ptm);
+
+ for (x = 0; x < HOURS; x++) {
+
+ if (ptm->tm_hour == pol->pkt.timer_policy.busy_hours[x]) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1) {
+ power_manager_scale_core_max(
+ pol->core_share[count].pcpu);
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Scaling up core %d to max\n",
+ pol->core_share[count].pcpu);
+ }
+ }
+ break;
+ } else if (ptm->tm_hour ==
+ pol->pkt.timer_policy.quiet_hours[x]) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1) {
+ power_manager_scale_core_min(
+ pol->core_share[count].pcpu);
+ RTE_LOG(INFO, CHANNEL_MONITOR,
+ "Scaling down core %d to min\n",
+ pol->core_share[count].pcpu);
+ }
+ }
+ break;
+ } else if (ptm->tm_hour ==
+ pol->pkt.timer_policy.hours_to_use_traffic_profile[x]) {
+ apply_traffic_profile(pol);
+ break;
+ }
+ }
+}
+
+static void
+apply_workload_profile(struct policy *pol)
+{
+
+ int count;
+
+ if (pol->pkt.workload == HIGH) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_max(
+ pol->core_share[count].pcpu);
+ }
+ } else if (pol->pkt.workload == MEDIUM) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_med(
+ pol->core_share[count].pcpu);
+ }
+ } else if (pol->pkt.workload == LOW) {
+ for (count = 0; count < pol->pkt.num_vcpu; count++) {
+ if (pol->core_share[count].status != 1)
+ power_manager_scale_core_min(
+ pol->core_share[count].pcpu);
+ }
+ }
+}
+
+static void
+apply_policy(struct policy *pol)
+{
+
+ struct channel_packet *pkt = &pol->pkt;
+
+ /*Check policy to use*/
+ if (pkt->policy_to_use == TRAFFIC)
+ apply_traffic_profile(pol);
+ else if (pkt->policy_to_use == TIME)
+ apply_time_profile(pol);
+ else if (pkt->policy_to_use == WORKLOAD)
+ apply_workload_profile(pol);
+}
+
+
static int
process_request(struct channel_packet *pkt, struct channel_info *chan_info)
{
@@ -105,6 +395,12 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info)
case(CPU_POWER_SCALE_UP):
power_manager_scale_core_up(core_num);
break;
+ case(CPU_POWER_ENABLE_TURBO):
+ power_manager_enable_turbo_core(core_num);
+ break;
+ case(CPU_POWER_DISABLE_TURBO):
+ power_manager_disable_turbo_core(core_num);
+ break;
default:
break;
}
@@ -122,12 +418,25 @@ process_request(struct channel_packet *pkt, struct channel_info *chan_info)
case(CPU_POWER_SCALE_UP):
power_manager_scale_mask_up(core_mask);
break;
+ case(CPU_POWER_ENABLE_TURBO):
+ power_manager_enable_turbo_mask(core_mask);
+ break;
+ case(CPU_POWER_DISABLE_TURBO):
+ power_manager_disable_turbo_mask(core_mask);
+ break;
default:
break;
}
}
}
+
+ if (pkt->command == PKT_POLICY) {
+ RTE_LOG(INFO, CHANNEL_MONITOR, "\nProcessing Policy request from Guest\n");
+ update_policy(pkt);
+ policy_is_set = 1;
+ }
+
/* Return is not checked as channel status may have been set to DISABLED
* from management thread
*/
@@ -197,9 +506,10 @@ run_channel_monitor(void)
struct channel_info *chan_info = (struct channel_info *)
global_events_list[i].data.ptr;
if ((global_events_list[i].events & EPOLLERR) ||
- (global_events_list[i].events & EPOLLHUP)) {
+ (global_events_list[i].events & EPOLLHUP)) {
RTE_LOG(DEBUG, CHANNEL_MONITOR, "Remote closed connection for "
- "channel '%s'\n", chan_info->channel_path);
+ "channel '%s'\n",
+ chan_info->channel_path);
remove_channel(&chan_info);
continue;
}
@@ -211,14 +521,17 @@ run_channel_monitor(void)
int buffer_len = sizeof(pkt);
while (buffer_len > 0) {
- n_bytes = read(chan_info->fd, buffer, buffer_len);
+ n_bytes = read(chan_info->fd,
+ buffer, buffer_len);
if (n_bytes == buffer_len)
break;
if (n_bytes == -1) {
err = errno;
- RTE_LOG(DEBUG, CHANNEL_MONITOR, "Received error on "
- "channel '%s' read: %s\n",
- chan_info->channel_path, strerror(err));
+ RTE_LOG(DEBUG, CHANNEL_MONITOR,
+ "Received error on "
+ "channel '%s' read: %s\n",
+ chan_info->channel_path,
+ strerror(err));
remove_channel(&chan_info);
break;
}
@@ -229,5 +542,14 @@ run_channel_monitor(void)
process_request(&pkt, chan_info);
}
}
+ rte_delay_us(time_period_ms*1000);
+ if (policy_is_set) {
+ int j;
+
+ for (j = 0; j < MAX_VMS; j++) {
+ if (policies[j].enabled == 1)
+ apply_policy(&policies[j]);
+ }
+ }
}
}
diff --git a/examples/vm_power_manager/channel_monitor.h b/examples/vm_power_manager/channel_monitor.h
index c1386079..b52c1fca 100644
--- a/examples/vm_power_manager/channel_monitor.h
+++ b/examples/vm_power_manager/channel_monitor.h
@@ -35,6 +35,24 @@
#define CHANNEL_MONITOR_H_
#include "channel_manager.h"
+#include "channel_commands.h"
+
+struct core_share {
+ unsigned int pcpu;
+ /*
+ * 1 CORE SHARE
+ * 0 NOT SHARED
+ */
+ int status;
+};
+
+struct policy {
+ struct channel_packet pkt;
+ uint32_t pfid[MAX_VFS];
+ uint32_t port[MAX_VFS];
+ unsigned int enabled;
+ struct core_share core_share[MAX_VCPU_PER_VM];
+};
#ifdef __cplusplus
extern "C" {
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
index 7931135e..63f711e0 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.c
@@ -45,8 +45,10 @@
#include <cmdline.h>
#include <rte_log.h>
#include <rte_lcore.h>
+#include <rte_ethdev.h>
#include <rte_power.h>
+#include <guest_channel.h>
#include "vm_power_cli_guest.h"
@@ -108,6 +110,10 @@ cmd_set_cpu_freq_parsed(void *parsed_result, struct cmdline *cl,
ret = rte_power_freq_min(res->lcore_id);
else if (!strcmp(res->cmd , "max"))
ret = rte_power_freq_max(res->lcore_id);
+ else if (!strcmp(res->cmd, "enable_turbo"))
+ ret = rte_power_freq_enable_turbo(res->lcore_id);
+ else if (!strcmp(res->cmd, "disable_turbo"))
+ ret = rte_power_freq_disable_turbo(res->lcore_id);
if (ret != 1)
cmdline_printf(cl, "Error sending message: %s\n", strerror(ret));
}
@@ -120,13 +126,14 @@ cmdline_parse_token_string_t cmd_set_cpu_freq_core_num =
lcore_id, UINT8);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
- cmd, "up#down#min#max");
+ cmd, "up#down#min#max#enable_turbo#disable_turbo");
cmdline_parse_inst_t cmd_set_cpu_freq_set = {
.f = cmd_set_cpu_freq_parsed,
.data = NULL,
- .help_str = "set_cpu_freq <core_num> <up|down|min|max>, Set the current "
- "frequency for the specified core by scaling up/down/min/max",
+ .help_str = "set_cpu_freq <core_num> "
+ "<up|down|min|max|enable_turbo|disable_turbo>, "
+ "adjust the frequency for the specified core.",
.tokens = {
(void *)&cmd_set_cpu_freq,
(void *)&cmd_set_cpu_freq_core_num,
@@ -135,8 +142,103 @@ cmdline_parse_inst_t cmd_set_cpu_freq_set = {
},
};
+struct cmd_send_policy_result {
+ cmdline_fixed_string_t send_policy;
+ cmdline_fixed_string_t cmd;
+};
+
+union PFID {
+ struct ether_addr addr;
+ uint64_t pfid;
+};
+
+static inline int
+send_policy(void)
+{
+ struct channel_packet pkt;
+ int ret;
+
+ union PFID pfid;
+ /* Use port MAC address as the vfid */
+ rte_eth_macaddr_get(0, &pfid.addr);
+ printf("Port %u MAC: %02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 ":"
+ "%02" PRIx8 ":%02" PRIx8 ":%02" PRIx8 "\n",
+ 1,
+ pfid.addr.addr_bytes[0], pfid.addr.addr_bytes[1],
+ pfid.addr.addr_bytes[2], pfid.addr.addr_bytes[3],
+ pfid.addr.addr_bytes[4], pfid.addr.addr_bytes[5]);
+ pkt.vfid[0] = pfid.pfid;
+
+ pkt.nb_mac_to_monitor = 1;
+ pkt.t_boost_status.tbEnabled = false;
+
+ pkt.vcpu_to_control[0] = 0;
+ pkt.vcpu_to_control[1] = 1;
+ pkt.num_vcpu = 2;
+ /* Dummy Population. */
+ pkt.traffic_policy.min_packet_thresh = 96000;
+ pkt.traffic_policy.avg_max_packet_thresh = 1800000;
+ pkt.traffic_policy.max_max_packet_thresh = 2000000;
+
+ pkt.timer_policy.busy_hours[0] = 3;
+ pkt.timer_policy.busy_hours[1] = 4;
+ pkt.timer_policy.busy_hours[2] = 5;
+ pkt.timer_policy.quiet_hours[0] = 11;
+ pkt.timer_policy.quiet_hours[1] = 12;
+ pkt.timer_policy.quiet_hours[2] = 13;
+
+ pkt.timer_policy.hours_to_use_traffic_profile[0] = 8;
+ pkt.timer_policy.hours_to_use_traffic_profile[1] = 10;
+
+ pkt.workload = LOW;
+ pkt.policy_to_use = TIME;
+ pkt.command = PKT_POLICY;
+ strcpy(pkt.vm_name, "ubuntu2");
+ ret = rte_power_guest_channel_send_msg(&pkt, 1);
+ if (ret == 0)
+ return 1;
+ RTE_LOG(DEBUG, POWER, "Error sending message: %s\n",
+ ret > 0 ? strerror(ret) : "channel not connected");
+ return -1;
+}
+
+static void
+cmd_send_policy_parsed(void *parsed_result, struct cmdline *cl,
+ __attribute__((unused)) void *data)
+{
+ int ret = -1;
+ struct cmd_send_policy_result *res = parsed_result;
+
+ if (!strcmp(res->cmd, "now")) {
+ printf("Sending Policy down now!\n");
+ ret = send_policy();
+ }
+ if (ret != 1)
+ cmdline_printf(cl, "Error sending message: %s\n",
+ strerror(ret));
+}
+
+cmdline_parse_token_string_t cmd_send_policy =
+ TOKEN_STRING_INITIALIZER(struct cmd_send_policy_result,
+ send_policy, "send_policy");
+cmdline_parse_token_string_t cmd_send_policy_cmd_cmd =
+ TOKEN_STRING_INITIALIZER(struct cmd_send_policy_result,
+ cmd, "now");
+
+cmdline_parse_inst_t cmd_send_policy_set = {
+ .f = cmd_send_policy_parsed,
+ .data = NULL,
+ .help_str = "send_policy now",
+ .tokens = {
+ (void *)&cmd_send_policy,
+ (void *)&cmd_send_policy_cmd_cmd,
+ NULL,
+ },
+};
+
cmdline_parse_ctx_t main_ctx[] = {
(cmdline_parse_inst_t *)&cmd_quit,
+ (cmdline_parse_inst_t *)&cmd_send_policy_set,
(cmdline_parse_inst_t *)&cmd_set_cpu_freq_set,
NULL,
};
diff --git a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h
index 0c4bdd5b..277eab3d 100644
--- a/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h
+++ b/examples/vm_power_manager/guest_cli/vm_power_cli_guest.h
@@ -40,12 +40,6 @@ extern "C" {
#include "channel_commands.h"
-int guest_channel_host_connect(unsigned lcore_id);
-
-int guest_channel_send_msg(struct channel_packet *pkt, unsigned lcore_id);
-
-void guest_channel_host_disconnect(unsigned lcore_id);
-
void run_cli(__attribute__((unused)) void *arg);
#ifdef __cplusplus
diff --git a/examples/vm_power_manager/main.c b/examples/vm_power_manager/main.c
index c33fcc93..399fbdd4 100644
--- a/examples/vm_power_manager/main.c
+++ b/examples/vm_power_manager/main.c
@@ -49,14 +49,206 @@
#include <rte_log.h>
#include <rte_per_lcore.h>
#include <rte_lcore.h>
+#include <rte_ethdev.h>
+#include <getopt.h>
+#include <rte_cycles.h>
#include <rte_debug.h>
#include "channel_manager.h"
#include "channel_monitor.h"
#include "power_manager.h"
#include "vm_power_cli.h"
+#include <rte_pmd_ixgbe.h>
+#include <rte_pmd_i40e.h>
+#include <rte_pmd_bnxt.h>
+
+#define RX_RING_SIZE 512
+#define TX_RING_SIZE 512
+
+#define NUM_MBUFS 8191
+#define MBUF_CACHE_SIZE 250
+#define BURST_SIZE 32
+
+static uint32_t enabled_port_mask;
+static volatile bool force_quit;
+
+/****************/
+static const struct rte_eth_conf port_conf_default = {
+ .rxmode = { .max_rx_pkt_len = ETHER_MAX_LEN }
+};
+
+static inline int
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
+{
+ struct rte_eth_conf port_conf = port_conf_default;
+ const uint16_t rx_rings = 1, tx_rings = 1;
+ int retval;
+ uint16_t q;
+
+ if (port >= rte_eth_dev_count())
+ return -1;
+
+ /* Configure the Ethernet device. */
+ retval = rte_eth_dev_configure(port, rx_rings, tx_rings, &port_conf);
+ if (retval != 0)
+ return retval;
+
+ /* Allocate and set up 1 RX queue per Ethernet port. */
+ for (q = 0; q < rx_rings; q++) {
+ retval = rte_eth_rx_queue_setup(port, q, RX_RING_SIZE,
+ rte_eth_dev_socket_id(port), NULL, mbuf_pool);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Allocate and set up 1 TX queue per Ethernet port. */
+ for (q = 0; q < tx_rings; q++) {
+ retval = rte_eth_tx_queue_setup(port, q, TX_RING_SIZE,
+ rte_eth_dev_socket_id(port), NULL);
+ if (retval < 0)
+ return retval;
+ }
+
+ /* Start the Ethernet port. */
+ retval = rte_eth_dev_start(port);
+ if (retval < 0)
+ return retval;
+
+ /* Display the port MAC address. */
+ struct ether_addr addr;
+ rte_eth_macaddr_get(port, &addr);
+ printf("Port %u MAC: %02" PRIx8 " %02" PRIx8 " %02" PRIx8
+ " %02" PRIx8 " %02" PRIx8 " %02" PRIx8 "\n",
+ (unsigned int)port,
+ addr.addr_bytes[0], addr.addr_bytes[1],
+ addr.addr_bytes[2], addr.addr_bytes[3],
+ addr.addr_bytes[4], addr.addr_bytes[5]);
+
+ /* Enable RX in promiscuous mode for the Ethernet device. */
+ rte_eth_promiscuous_enable(port);
+
+
+ return 0;
+}
static int
+parse_portmask(const char *portmask)
+{
+ char *end = NULL;
+ unsigned long pm;
+
+ /* parse hexadecimal string */
+ pm = strtoul(portmask, &end, 16);
+ if ((portmask[0] == '\0') || (end == NULL) || (*end != '\0'))
+ return -1;
+
+ if (pm == 0)
+ return -1;
+
+ return pm;
+}
+/* Parse the argument given in the command line of the application */
+static int
+parse_args(int argc, char **argv)
+{
+ int opt, ret;
+ char **argvopt;
+ int option_index;
+ char *prgname = argv[0];
+ static struct option lgopts[] = {
+ { "mac-updating", no_argument, 0, 1},
+ { "no-mac-updating", no_argument, 0, 0},
+ {NULL, 0, 0, 0}
+ };
+ argvopt = argv;
+
+ while ((opt = getopt_long(argc, argvopt, "p:q:T:",
+ lgopts, &option_index)) != EOF) {
+
+ switch (opt) {
+ /* portmask */
+ case 'p':
+ enabled_port_mask = parse_portmask(optarg);
+ if (enabled_port_mask == 0) {
+ printf("invalid portmask\n");
+ return -1;
+ }
+ break;
+ /* long options */
+ case 0:
+ break;
+
+ default:
+ return -1;
+ }
+ }
+
+ if (optind >= 0)
+ argv[optind-1] = prgname;
+
+ ret = optind-1;
+ optind = 0; /* reset getopt lib */
+ return ret;
+}
+
+static void
+check_all_ports_link_status(uint16_t port_num, uint32_t port_mask)
+{
+#define CHECK_INTERVAL 100 /* 100ms */
+#define MAX_CHECK_TIME 90 /* 9s (90 * 100ms) in total */
+ uint16_t portid, count, all_ports_up, print_flag = 0;
+ struct rte_eth_link link;
+
+ printf("\nChecking link status");
+ fflush(stdout);
+ for (count = 0; count <= MAX_CHECK_TIME; count++) {
+ if (force_quit)
+ return;
+ all_ports_up = 1;
+ for (portid = 0; portid < port_num; portid++) {
+ if (force_quit)
+ return;
+ if ((port_mask & (1 << portid)) == 0)
+ continue;
+ memset(&link, 0, sizeof(link));
+ rte_eth_link_get_nowait(portid, &link);
+ /* print link status if flag set */
+ if (print_flag == 1) {
+ if (link.link_status)
+ printf("Port %d Link Up - speed %u "
+ "Mbps - %s\n", (uint16_t)portid,
+ (unsigned int)link.link_speed,
+ (link.link_duplex == ETH_LINK_FULL_DUPLEX) ?
+ ("full-duplex") : ("half-duplex\n"));
+ else
+ printf("Port %d Link Down\n",
+ (uint16_t)portid);
+ continue;
+ }
+ /* clear all_ports_up flag if any link down */
+ if (link.link_status == ETH_LINK_DOWN) {
+ all_ports_up = 0;
+ break;
+ }
+ }
+ /* after finally printing all link status, get out */
+ if (print_flag == 1)
+ break;
+
+ if (all_ports_up == 0) {
+ printf(".");
+ fflush(stdout);
+ rte_delay_ms(CHECK_INTERVAL);
+ }
+
+ /* set the print_flag if all ports up or timeout */
+ if (all_ports_up == 1 || count == (MAX_CHECK_TIME - 1)) {
+ print_flag = 1;
+ printf("done\n");
+ }
+ }
+}
+static int
run_monitor(__attribute__((unused)) void *arg)
{
if (channel_monitor_init() < 0) {
@@ -82,6 +274,10 @@ main(int argc, char **argv)
{
int ret;
unsigned lcore_id;
+ unsigned int nb_ports;
+ struct rte_mempool *mbuf_pool;
+ uint16_t portid;
+
ret = rte_eal_init(argc, argv);
if (ret < 0)
@@ -90,12 +286,77 @@ main(int argc, char **argv)
signal(SIGINT, sig_handler);
signal(SIGTERM, sig_handler);
+ argc -= ret;
+ argv += ret;
+
+ /* parse application arguments (after the EAL ones) */
+ ret = parse_args(argc, argv);
+ if (ret < 0)
+ rte_exit(EXIT_FAILURE, "Invalid arguments\n");
+
+ nb_ports = rte_eth_dev_count();
+
+ mbuf_pool = rte_pktmbuf_pool_create("MBUF_POOL", NUM_MBUFS * nb_ports,
+ MBUF_CACHE_SIZE, 0, RTE_MBUF_DEFAULT_BUF_SIZE, rte_socket_id());
+
+ if (mbuf_pool == NULL)
+ rte_exit(EXIT_FAILURE, "Cannot create mbuf pool\n");
+
+ /* Initialize ports. */
+ for (portid = 0; portid < nb_ports; portid++) {
+ struct ether_addr eth;
+ int w, j;
+ int ret = -ENOTSUP;
+
+ if ((enabled_port_mask & (1 << portid)) == 0)
+ continue;
+
+ eth.addr_bytes[0] = 0xe0;
+ eth.addr_bytes[1] = 0xe0;
+ eth.addr_bytes[2] = 0xe0;
+ eth.addr_bytes[3] = 0xe0;
+ eth.addr_bytes[4] = portid + 0xf0;
+
+ if (port_init(portid, mbuf_pool) != 0)
+ rte_exit(EXIT_FAILURE, "Cannot init port %"PRIu8 "\n",
+ portid);
+
+ for (w = 0; w < MAX_VFS; w++) {
+ eth.addr_bytes[5] = w + 0xf0;
+
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_ixgbe_set_vf_mac_addr(portid,
+ w, &eth);
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_i40e_set_vf_mac_addr(portid,
+ w, &eth);
+ if (ret == -ENOTSUP)
+ ret = rte_pmd_bnxt_set_vf_mac_addr(portid,
+ w, &eth);
+
+ switch (ret) {
+ case 0:
+ printf("Port %d VF %d MAC: ",
+ portid, w);
+ for (j = 0; j < 6; j++) {
+ printf("%02x", eth.addr_bytes[j]);
+ if (j < 5)
+ printf(":");
+ }
+ printf("\n");
+ break;
+ }
+ }
+ }
+
lcore_id = rte_get_next_lcore(-1, 1, 0);
if (lcore_id == RTE_MAX_LCORE) {
RTE_LOG(ERR, EAL, "A minimum of two cores are required to run "
"application\n");
return 0;
}
+
+ check_all_ports_link_status(nb_ports, enabled_port_mask);
rte_eal_remote_launch(run_monitor, NULL, lcore_id);
if (power_manager_init() < 0) {
diff --git a/examples/vm_power_manager/power_manager.c b/examples/vm_power_manager/power_manager.c
index 2644fce6..1834a823 100644
--- a/examples/vm_power_manager/power_manager.c
+++ b/examples/vm_power_manager/power_manager.c
@@ -108,7 +108,7 @@ set_host_cpus_mask(void)
int
power_manager_init(void)
{
- unsigned i, num_cpus;
+ unsigned int i, num_cpus, num_freqs;
uint64_t cpu_mask;
int ret = 0;
@@ -121,15 +121,21 @@ power_manager_init(void)
rte_power_set_env(PM_ENV_ACPI_CPUFREQ);
cpu_mask = global_enabled_cpus;
for (i = 0; cpu_mask; cpu_mask &= ~(1 << i++)) {
- if (rte_power_init(i) < 0 || rte_power_freqs(i,
- global_core_freq_info[i].freqs,
- RTE_MAX_LCORE_FREQS) == 0) {
- RTE_LOG(ERR, POWER_MANAGER, "Unable to initialize power manager "
+ if (rte_power_init(i) < 0)
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Unable to initialize power manager "
"for core %u\n", i);
+ num_freqs = rte_power_freqs(i, global_core_freq_info[i].freqs,
+ RTE_MAX_LCORE_FREQS);
+ if (num_freqs == 0) {
+ RTE_LOG(ERR, POWER_MANAGER,
+ "Unable to get frequency list for core %u\n",
+ i);
global_enabled_cpus &= ~(1 << i);
num_cpus--;
ret = -1;
}
+ global_core_freq_info[i].num_freqs = num_freqs;
rte_spinlock_init(&global_core_freq_info[i].power_sl);
}
RTE_LOG(INFO, POWER_MANAGER, "Detected %u host CPUs , enabled core mask:"
@@ -216,6 +222,24 @@ power_manager_scale_mask_max(uint64_t core_mask)
}
int
+power_manager_enable_turbo_mask(uint64_t core_mask)
+{
+ int ret = 0;
+
+ POWER_SCALE_MASK(enable_turbo, core_mask, ret);
+ return ret;
+}
+
+int
+power_manager_disable_turbo_mask(uint64_t core_mask)
+{
+ int ret = 0;
+
+ POWER_SCALE_MASK(disable_turbo, core_mask, ret);
+ return ret;
+}
+
+int
power_manager_scale_core_up(unsigned core_num)
{
int ret = 0;
@@ -250,3 +274,37 @@ power_manager_scale_core_max(unsigned core_num)
POWER_SCALE_CORE(max, core_num, ret);
return ret;
}
+
+int
+power_manager_enable_turbo_core(unsigned int core_num)
+{
+ int ret = 0;
+
+ POWER_SCALE_CORE(enable_turbo, core_num, ret);
+ return ret;
+}
+
+int
+power_manager_disable_turbo_core(unsigned int core_num)
+{
+ int ret = 0;
+
+ POWER_SCALE_CORE(disable_turbo, core_num, ret);
+ return ret;
+}
+
+int
+power_manager_scale_core_med(unsigned int core_num)
+{
+ int ret = 0;
+
+ if (core_num >= POWER_MGR_MAX_CPUS)
+ return -1;
+ if (!(global_enabled_cpus & (1ULL << core_num)))
+ return -1;
+ rte_spinlock_lock(&global_core_freq_info[core_num].power_sl);
+ ret = rte_power_set_freq(core_num,
+ global_core_freq_info[core_num].num_freqs / 2);
+ rte_spinlock_unlock(&global_core_freq_info[core_num].power_sl);
+ return ret;
+}
diff --git a/examples/vm_power_manager/power_manager.h b/examples/vm_power_manager/power_manager.h
index 1b45babf..b52fb4c7 100644
--- a/examples/vm_power_manager/power_manager.h
+++ b/examples/vm_power_manager/power_manager.h
@@ -113,6 +113,32 @@ int power_manager_scale_mask_min(uint64_t core_mask);
int power_manager_scale_mask_max(uint64_t core_mask);
/**
+ * Enable Turbo Boost on the cores specified in core_mask.
+ * It is thread-safe.
+ *
+ * @param core_mask
+ * The uint64_t bit-mask of cores to change frequency.
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int power_manager_enable_turbo_mask(uint64_t core_mask);
+
+/**
+ * Disable Turbo Boost on the cores specified in core_mask.
+ * It is thread-safe.
+ *
+ * @param core_mask
+ * The uint64_t bit-mask of cores to change frequency.
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int power_manager_disable_turbo_mask(uint64_t core_mask);
+
+/**
* Scale up frequency for the core specified by core_num.
* It is thread-safe.
*
@@ -168,6 +194,32 @@ int power_manager_scale_core_min(unsigned core_num);
int power_manager_scale_core_max(unsigned core_num);
/**
+ * Enable Turbo Boost for the core specified by core_num.
+ * It is thread-safe.
+ *
+ * @param core_num
+ * The core number to boost
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int power_manager_enable_turbo_core(unsigned int core_num);
+
+/**
+ * Disable Turbo Boost for the core specified by core_num.
+ * It is thread-safe.
+ *
+ * @param core_num
+ * The core number to boost
+ *
+ * @return
+ * - 1 on success.
+ * - Negative on error.
+ */
+int power_manager_disable_turbo_core(unsigned int core_num);
+
+/**
* Get the current freuency of the core specified by core_num
*
* @param core_num
@@ -179,6 +231,19 @@ int power_manager_scale_core_max(unsigned core_num);
*/
uint32_t power_manager_get_current_frequency(unsigned core_num);
+/**
+ * Scale to medium frequency for the core specified by core_num.
+ * It is thread-safe.
+ *
+ * @param core_num
+ * The core number to change frequency
+ *
+ * @return
+ * - 1 on success.
+ * - 0 if frequency not changed.
+ * - Negative on error.
+ */
+int power_manager_scale_core_med(unsigned int core_num);
#ifdef __cplusplus
}
diff --git a/examples/vm_power_manager/vm_power_cli.c b/examples/vm_power_manager/vm_power_cli.c
index c5e8d934..6f234fb7 100644
--- a/examples/vm_power_manager/vm_power_cli.c
+++ b/examples/vm_power_manager/vm_power_cli.c
@@ -520,6 +520,10 @@ cmd_set_cpu_freq_mask_parsed(void *parsed_result, struct cmdline *cl,
ret = power_manager_scale_mask_min(res->core_mask);
else if (!strcmp(res->cmd , "max"))
ret = power_manager_scale_mask_max(res->core_mask);
+ else if (!strcmp(res->cmd, "enable_turbo"))
+ ret = power_manager_enable_turbo_mask(res->core_mask);
+ else if (!strcmp(res->cmd, "disable_turbo"))
+ ret = power_manager_disable_turbo_mask(res->core_mask);
if (ret < 0) {
cmdline_printf(cl, "Error scaling core_mask(0x%"PRIx64") '%s' , not "
"all cores specified have been scaled\n",
@@ -535,14 +539,13 @@ cmdline_parse_token_num_t cmd_set_cpu_freq_mask_core_mask =
core_mask, UINT64);
cmdline_parse_token_string_t cmd_set_cpu_freq_mask_result =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_mask_result,
- cmd, "up#down#min#max");
+ cmd, "up#down#min#max#enable_turbo#disable_turbo");
cmdline_parse_inst_t cmd_set_cpu_freq_mask_set = {
.f = cmd_set_cpu_freq_mask_parsed,
.data = NULL,
- .help_str = "set_cpu_freq <core_mask> <up|down|min|max>, Set the current "
- "frequency for the cores specified in <core_mask> by scaling "
- "each up/down/min/max.",
+ .help_str = "set_cpu_freq <core_mask> <up|down|min|max|enable_turbo|disable_turbo>, adjust the current "
+ "frequency for the cores specified in <core_mask>",
.tokens = {
(void *)&cmd_set_cpu_freq_mask,
(void *)&cmd_set_cpu_freq_mask_core_mask,
@@ -614,6 +617,10 @@ cmd_set_cpu_freq_parsed(void *parsed_result, struct cmdline *cl,
ret = power_manager_scale_core_min(res->core_num);
else if (!strcmp(res->cmd , "max"))
ret = power_manager_scale_core_max(res->core_num);
+ else if (!strcmp(res->cmd, "enable_turbo"))
+ ret = power_manager_enable_turbo_core(res->core_num);
+ else if (!strcmp(res->cmd, "disable_turbo"))
+ ret = power_manager_disable_turbo_core(res->core_num);
if (ret < 0) {
cmdline_printf(cl, "Error scaling core(%u) '%s'\n", res->core_num,
res->cmd);
@@ -628,13 +635,13 @@ cmdline_parse_token_num_t cmd_set_cpu_freq_core_num =
core_num, UINT8);
cmdline_parse_token_string_t cmd_set_cpu_freq_cmd_cmd =
TOKEN_STRING_INITIALIZER(struct cmd_set_cpu_freq_result,
- cmd, "up#down#min#max");
+ cmd, "up#down#min#max#enable_turbo#disable_turbo");
cmdline_parse_inst_t cmd_set_cpu_freq_set = {
.f = cmd_set_cpu_freq_parsed,
.data = NULL,
- .help_str = "set_cpu_freq <core_num> <up|down|min|max>, Set the current "
- "frequency for the specified core by scaling up/down/min/max",
+ .help_str = "set_cpu_freq <core_num> <up|down|min|max|enable_turbo|disable_turbo>, adjust the current "
+ "frequency for the specified core",
.tokens = {
(void *)&cmd_set_cpu_freq,
(void *)&cmd_set_cpu_freq_core_num,
diff --git a/examples/vmdq/main.c b/examples/vmdq/main.c
index 8949a115..84e9937d 100644
--- a/examples/vmdq/main.c
+++ b/examples/vmdq/main.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -121,7 +119,7 @@ static const struct rte_eth_conf vmdq_conf_default = {
};
static unsigned lcore_ids[RTE_MAX_LCORE];
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports; /**< The number of ports specified in command line */
/* array used for printing out statistics */
@@ -186,7 +184,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf, uint32_t num_pools)
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_rxconf *rxconf;
@@ -583,7 +581,7 @@ main(int argc, char *argv[])
unsigned lcore_id, core_id = 0;
int ret;
unsigned nb_ports, valid_num_ports;
- uint8_t portid;
+ uint16_t portid;
signal(SIGHUP, sighup_handler);
diff --git a/examples/vmdq_dcb/main.c b/examples/vmdq_dcb/main.c
index b6ebccb2..9dad2b8e 100644
--- a/examples/vmdq_dcb/main.c
+++ b/examples/vmdq_dcb/main.c
@@ -47,7 +47,6 @@
#include <rte_log.h>
#include <rte_memory.h>
#include <rte_memcpy.h>
-#include <rte_memzone.h>
#include <rte_eal.h>
#include <rte_launch.h>
#include <rte_atomic.h>
@@ -57,7 +56,6 @@
#include <rte_per_lcore.h>
#include <rte_branch_prediction.h>
#include <rte_interrupts.h>
-#include <rte_pci.h>
#include <rte_random.h>
#include <rte_debug.h>
#include <rte_ether.h>
@@ -87,7 +85,7 @@
/* mask of enabled ports */
static uint32_t enabled_port_mask;
-static uint8_t ports[RTE_MAX_ETHPORTS];
+static uint16_t ports[RTE_MAX_ETHPORTS];
static unsigned num_ports;
/* number of pools (if user does not specify any, 32 by default */
@@ -220,7 +218,7 @@ get_eth_conf(struct rte_eth_conf *eth_conf)
* coming from the mbuf_pool passed as parameter
*/
static inline int
-port_init(uint8_t port, struct rte_mempool *mbuf_pool)
+port_init(uint16_t port, struct rte_mempool *mbuf_pool)
{
struct rte_eth_dev_info dev_info;
struct rte_eth_conf port_conf = {0};
@@ -646,7 +644,7 @@ main(int argc, char *argv[])
uintptr_t i;
int ret;
unsigned nb_ports, valid_num_ports;
- uint8_t portid;
+ uint16_t portid;
signal(SIGHUP, sighup_handler);