aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/failsafe
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/failsafe')
-rw-r--r--drivers/net/failsafe/failsafe.c11
-rw-r--r--drivers/net/failsafe/failsafe_args.c10
-rw-r--r--drivers/net/failsafe/failsafe_eal.c3
-rw-r--r--drivers/net/failsafe/failsafe_ether.c116
-rw-r--r--drivers/net/failsafe/failsafe_flow.c31
-rw-r--r--drivers/net/failsafe/failsafe_intr.c2
-rw-r--r--drivers/net/failsafe/failsafe_ops.c252
-rw-r--r--drivers/net/failsafe/failsafe_private.h15
-rw-r--r--drivers/net/failsafe/failsafe_rxtx.c2
9 files changed, 402 insertions, 40 deletions
diff --git a/drivers/net/failsafe/failsafe.c b/drivers/net/failsafe/failsafe.c
index 657919f9..06e859e9 100644
--- a/drivers/net/failsafe/failsafe.c
+++ b/drivers/net/failsafe/failsafe.c
@@ -71,7 +71,7 @@ failsafe_hotplug_alarm_install(struct rte_eth_dev *dev)
return -EINVAL;
if (PRIV(dev)->pending_alarm)
return 0;
- ret = rte_eal_alarm_set(hotplug_poll * 1000,
+ ret = rte_eal_alarm_set(failsafe_hotplug_poll * 1000,
fs_hotplug_alarm,
dev);
if (ret) {
@@ -225,7 +225,7 @@ fs_eth_dev_create(struct rte_vdev_device *vdev)
goto unregister_new_callback;
}
mac = &dev->data->mac_addrs[0];
- if (mac_from_arg) {
+ if (failsafe_mac_from_arg) {
/*
* If MAC address was provided as a parameter,
* apply to all probed slaves.
@@ -280,7 +280,8 @@ free_args:
free_subs:
fs_sub_device_free(dev);
free_dev:
- rte_free(PRIV(dev));
+ /* mac_addrs must not be freed alone because part of dev_private */
+ dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(dev);
return -1;
}
@@ -304,7 +305,9 @@ fs_rte_eth_free(const char *name)
ret = pthread_mutex_destroy(&PRIV(dev)->hotplug_mutex);
if (ret)
ERROR("Error while destroying hotplug mutex");
- rte_free(PRIV(dev));
+ rte_free(PRIV(dev)->mcast_addrs);
+ /* mac_addrs must not be freed alone because part of dev_private */
+ dev->data->mac_addrs = NULL;
rte_eth_dev_release_port(dev);
return ret;
}
diff --git a/drivers/net/failsafe/failsafe_args.c b/drivers/net/failsafe/failsafe_args.c
index 626883ce..c4b220c4 100644
--- a/drivers/net/failsafe/failsafe_args.c
+++ b/drivers/net/failsafe/failsafe_args.c
@@ -22,10 +22,10 @@
typedef int (parse_cb)(struct rte_eth_dev *dev, const char *params,
uint8_t head);
-uint64_t hotplug_poll = FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS;
-int mac_from_arg = 0;
+uint64_t failsafe_hotplug_poll = FAILSAFE_HOTPLUG_DEFAULT_TIMEOUT_MS;
+int failsafe_mac_from_arg;
-const char *pmd_failsafe_init_parameters[] = {
+static const char * const pmd_failsafe_init_parameters[] = {
PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
PMD_FAILSAFE_MAC_KVARG,
NULL,
@@ -420,7 +420,7 @@ failsafe_args_parse(struct rte_eth_dev *dev, const char *params)
if (arg_count == 1) {
ret = rte_kvargs_process(kvlist,
PMD_FAILSAFE_HOTPLUG_POLL_KVARG,
- &fs_get_u64_arg, &hotplug_poll);
+ &fs_get_u64_arg, &failsafe_hotplug_poll);
if (ret < 0)
goto free_kvlist;
}
@@ -435,7 +435,7 @@ failsafe_args_parse(struct rte_eth_dev *dev, const char *params)
if (ret < 0)
goto free_kvlist;
- mac_from_arg = 1;
+ failsafe_mac_from_arg = 1;
}
}
PRIV(dev)->state = DEV_PARSED;
diff --git a/drivers/net/failsafe/failsafe_eal.c b/drivers/net/failsafe/failsafe_eal.c
index ce1633f1..8a888b1f 100644
--- a/drivers/net/failsafe/failsafe_eal.c
+++ b/drivers/net/failsafe/failsafe_eal.c
@@ -144,8 +144,7 @@ fs_bus_uninit(struct rte_eth_dev *dev)
int ret = 0;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
- sdev_ret = rte_eal_hotplug_remove(sdev->bus->name,
- sdev->dev->name);
+ sdev_ret = rte_dev_remove(sdev->dev);
if (sdev_ret) {
ERROR("Failed to remove requested device %s (err: %d)",
sdev->dev->name, sdev_ret);
diff --git a/drivers/net/failsafe/failsafe_ether.c b/drivers/net/failsafe/failsafe_ether.c
index 5b5cb3b4..17831652 100644
--- a/drivers/net/failsafe/failsafe_ether.c
+++ b/drivers/net/failsafe/failsafe_ether.c
@@ -179,6 +179,23 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
return ret;
}
}
+ /*
+ * Propagate multicast MAC addresses to sub-devices,
+ * if non zero number of addresses is set.
+ * The condition is required to avoid breakage of failsafe
+ * for sub-devices which do not support the operation
+ * if the feature is really not used.
+ */
+ if (PRIV(dev)->nb_mcast_addr > 0) {
+ DEBUG("Configuring multicast MAC addresses");
+ ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
+ PRIV(dev)->mcast_addrs,
+ PRIV(dev)->nb_mcast_addr);
+ if (ret) {
+ ERROR("Failed to apply multicast MAC addresses");
+ return ret;
+ }
+ }
/* VLAN filter */
vfc1 = &dev->data->vlan_filter_conf;
vfc2 = &edev->data->vlan_filter_conf;
@@ -230,9 +247,9 @@ fs_eth_dev_conf_apply(struct rte_eth_dev *dev,
DEBUG("Creating flow #%" PRIu32, i++);
flow->flows[SUB_ID(sdev)] =
rte_flow_create(PORT_ID(sdev),
- &flow->fd->attr,
- flow->fd->items,
- flow->fd->actions,
+ flow->rule.attr,
+ flow->rule.pattern,
+ flow->rule.actions,
&ferror);
ret = rte_errno;
if (ret)
@@ -265,8 +282,7 @@ fs_dev_remove(struct sub_device *sdev)
sdev->state = DEV_PROBED;
/* fallthrough */
case DEV_PROBED:
- ret = rte_eal_hotplug_remove(sdev->bus->name,
- sdev->dev->name);
+ ret = rte_dev_remove(sdev->dev);
if (ret) {
ERROR("Bus detach failed for sub_device %u",
SUB_ID(sdev));
@@ -366,6 +382,88 @@ failsafe_dev_remove(struct rte_eth_dev *dev)
}
}
+static int
+failsafe_eth_dev_rx_queues_sync(struct rte_eth_dev *dev)
+{
+ struct rxq *rxq;
+ int ret;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+
+ if (rxq->info.conf.rx_deferred_start &&
+ dev->data->rx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ /*
+ * The subdevice Rx queue does not launch on device
+ * start if deferred start flag is set. It needs to be
+ * started manually in case an appropriate failsafe Rx
+ * queue has been started earlier.
+ */
+ ret = dev->dev_ops->rx_queue_start(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Rx queue %d", i);
+ return ret;
+ }
+ } else if (dev->data->rx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STOPPED) {
+ /*
+ * The subdevice Rx queue needs to be stopped manually
+ * in case an appropriate failsafe Rx queue has been
+ * stopped earlier.
+ */
+ ret = dev->dev_ops->rx_queue_stop(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Rx queue %d", i);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
+static int
+failsafe_eth_dev_tx_queues_sync(struct rte_eth_dev *dev)
+{
+ struct txq *txq;
+ int ret;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+
+ if (txq->info.conf.tx_deferred_start &&
+ dev->data->tx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STARTED) {
+ /*
+ * The subdevice Tx queue does not launch on device
+ * start if deferred start flag is set. It needs to be
+ * started manually in case an appropriate failsafe Tx
+ * queue has been started earlier.
+ */
+ ret = dev->dev_ops->tx_queue_start(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Tx queue %d", i);
+ return ret;
+ }
+ } else if (dev->data->tx_queue_state[i] ==
+ RTE_ETH_QUEUE_STATE_STOPPED) {
+ /*
+ * The subdevice Tx queue needs to be stopped manually
+ * in case an appropriate failsafe Tx queue has been
+ * stopped earlier.
+ */
+ ret = dev->dev_ops->tx_queue_stop(dev, i);
+ if (ret) {
+ ERROR("Could not synchronize Tx queue %d", i);
+ return ret;
+ }
+ }
+ }
+ return 0;
+}
+
int
failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
{
@@ -424,6 +522,12 @@ failsafe_eth_dev_state_sync(struct rte_eth_dev *dev)
ret = dev->dev_ops->dev_start(dev);
if (ret)
goto err_remove;
+ ret = failsafe_eth_dev_rx_queues_sync(dev);
+ if (ret)
+ goto err_remove;
+ ret = failsafe_eth_dev_tx_queues_sync(dev);
+ if (ret)
+ goto err_remove;
return 0;
err_remove:
FOREACH_SUBDEV(sdev, i, dev)
@@ -466,7 +570,7 @@ failsafe_eth_rmv_event_callback(uint16_t port_id __rte_unused,
/* Switch as soon as possible tx_dev. */
fs_switch_dev(sdev->fs_dev, sdev);
/* Use safe bursts in any case. */
- set_burst_fn(sdev->fs_dev, 1);
+ failsafe_set_burst_fn(sdev->fs_dev, 1);
/*
* Async removal, the sub-PMD will try to unregister
* the callback at the source of the current thread context.
diff --git a/drivers/net/failsafe/failsafe_flow.c b/drivers/net/failsafe/failsafe_flow.c
index bfe42fce..5e2b5f7c 100644
--- a/drivers/net/failsafe/failsafe_flow.c
+++ b/drivers/net/failsafe/failsafe_flow.c
@@ -3,8 +3,11 @@
* Copyright 2017 Mellanox Technologies, Ltd
*/
+#include <stddef.h>
+#include <string.h>
#include <sys/queue.h>
+#include <rte_errno.h>
#include <rte_malloc.h>
#include <rte_tailq.h>
#include <rte_flow.h>
@@ -18,19 +21,33 @@ fs_flow_allocate(const struct rte_flow_attr *attr,
const struct rte_flow_action *actions)
{
struct rte_flow *flow;
- size_t fdsz;
+ const struct rte_flow_conv_rule rule = {
+ .attr_ro = attr,
+ .pattern_ro = items,
+ .actions_ro = actions,
+ };
+ struct rte_flow_error error;
+ int ret;
- fdsz = rte_flow_copy(NULL, 0, attr, items, actions);
- flow = rte_zmalloc(NULL,
- sizeof(struct rte_flow) + fdsz,
+ ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, NULL, 0, &rule, &error);
+ if (ret < 0) {
+ ERROR("Unable to process flow rule (%s): %s",
+ error.message ? error.message : "unspecified",
+ strerror(rte_errno));
+ return NULL;
+ }
+ flow = rte_zmalloc(NULL, offsetof(struct rte_flow, rule) + ret,
RTE_CACHE_LINE_SIZE);
if (flow == NULL) {
ERROR("Could not allocate new flow");
return NULL;
}
- flow->fd = (void *)((uintptr_t)flow + sizeof(*flow));
- if (rte_flow_copy(flow->fd, fdsz, attr, items, actions) != fdsz) {
- ERROR("Failed to copy flow description");
+ ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, &flow->rule, ret, &rule,
+ &error);
+ if (ret < 0) {
+ ERROR("Failed to copy flow rule (%s): %s",
+ error.message ? error.message : "unspecified",
+ strerror(rte_errno));
rte_free(flow);
return NULL;
}
diff --git a/drivers/net/failsafe/failsafe_intr.c b/drivers/net/failsafe/failsafe_intr.c
index fc6ec37f..1c2cb71c 100644
--- a/drivers/net/failsafe/failsafe_intr.c
+++ b/drivers/net/failsafe/failsafe_intr.c
@@ -372,7 +372,7 @@ void failsafe_rx_intr_uninstall_subdevice(struct sub_device *sdev)
for (qid = 0; qid < ETH(sdev)->data->nb_rx_queues; qid++) {
if (qid < fsdev->data->nb_rx_queues) {
fsrxq = fsdev->data->rx_queues[qid];
- if (fsrxq->enable_events)
+ if (fsrxq != NULL && fsrxq->enable_events)
rte_eth_dev_rx_intr_disable(PORT_ID(sdev),
qid);
}
diff --git a/drivers/net/failsafe/failsafe_ops.c b/drivers/net/failsafe/failsafe_ops.c
index 24e91c93..7f8bcd4c 100644
--- a/drivers/net/failsafe/failsafe_ops.c
+++ b/drivers/net/failsafe/failsafe_ops.c
@@ -57,7 +57,6 @@ static struct rte_eth_dev_info default_infos = {
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_SECURITY,
@@ -74,7 +73,6 @@ static struct rte_eth_dev_info default_infos = {
DEV_RX_OFFLOAD_VLAN_FILTER |
DEV_RX_OFFLOAD_VLAN_EXTEND |
DEV_RX_OFFLOAD_JUMBO_FRAME |
- DEV_RX_OFFLOAD_CRC_STRIP |
DEV_RX_OFFLOAD_SCATTER |
DEV_RX_OFFLOAD_TIMESTAMP |
DEV_RX_OFFLOAD_SECURITY,
@@ -88,6 +86,9 @@ static struct rte_eth_dev_info default_infos = {
ETH_RSS_IP |
ETH_RSS_UDP |
ETH_RSS_TCP,
+ .dev_capa =
+ RTE_ETH_DEV_CAPA_RUNTIME_RX_QUEUE_SETUP |
+ RTE_ETH_DEV_CAPA_RUNTIME_TX_QUEUE_SETUP,
};
static int
@@ -170,6 +171,27 @@ fs_dev_configure(struct rte_eth_dev *dev)
return 0;
}
+static void
+fs_set_queues_state_start(struct rte_eth_dev *dev)
+{
+ struct rxq *rxq;
+ struct txq *txq;
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++) {
+ rxq = dev->data->rx_queues[i];
+ if (rxq != NULL && !rxq->info.conf.rx_deferred_start)
+ dev->data->rx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
+ for (i = 0; i < dev->data->nb_tx_queues; i++) {
+ txq = dev->data->tx_queues[i];
+ if (txq != NULL && !txq->info.conf.tx_deferred_start)
+ dev->data->tx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STARTED;
+ }
+}
+
static int
fs_dev_start(struct rte_eth_dev *dev)
{
@@ -204,14 +226,31 @@ fs_dev_start(struct rte_eth_dev *dev)
}
sdev->state = DEV_STARTED;
}
- if (PRIV(dev)->state < DEV_STARTED)
+ if (PRIV(dev)->state < DEV_STARTED) {
PRIV(dev)->state = DEV_STARTED;
+ fs_set_queues_state_start(dev);
+ }
fs_switch_dev(dev, NULL);
fs_unlock(dev, 0);
return 0;
}
static void
+fs_set_queues_state_stop(struct rte_eth_dev *dev)
+{
+ uint16_t i;
+
+ for (i = 0; i < dev->data->nb_rx_queues; i++)
+ if (dev->data->rx_queues[i] != NULL)
+ dev->data->rx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+ for (i = 0; i < dev->data->nb_tx_queues; i++)
+ if (dev->data->tx_queues[i] != NULL)
+ dev->data->tx_queue_state[i] =
+ RTE_ETH_QUEUE_STATE_STOPPED;
+}
+
+static void
fs_dev_stop(struct rte_eth_dev *dev)
{
struct sub_device *sdev;
@@ -225,6 +264,7 @@ fs_dev_stop(struct rte_eth_dev *dev)
sdev->state = DEV_STARTED - 1;
}
failsafe_rx_intr_uninstall(dev);
+ fs_set_queues_state_stop(dev);
fs_unlock(dev, 0);
}
@@ -294,6 +334,112 @@ fs_dev_close(struct rte_eth_dev *dev)
fs_unlock(dev, 0);
}
+static int
+fs_rx_queue_stop(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+ int err = 0;
+ bool failure = true;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ uint16_t port_id = ETH(sdev)->data->port_id;
+
+ ret = rte_eth_dev_rx_queue_stop(port_id, rx_queue_id);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Rx queue stop failed for subdevice %d", i);
+ err = ret;
+ } else {
+ failure = false;
+ }
+ }
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ fs_unlock(dev, 0);
+ /* Return 0 in case of at least one successful queue stop */
+ return (failure) ? err : 0;
+}
+
+static int
+fs_rx_queue_start(struct rte_eth_dev *dev, uint16_t rx_queue_id)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ uint16_t port_id = ETH(sdev)->data->port_id;
+
+ ret = rte_eth_dev_rx_queue_start(port_id, rx_queue_id);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Rx queue start failed for subdevice %d", i);
+ fs_rx_queue_stop(dev, rx_queue_id);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ dev->data->rx_queue_state[rx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ fs_unlock(dev, 0);
+ return 0;
+}
+
+static int
+fs_tx_queue_stop(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+ int err = 0;
+ bool failure = true;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ uint16_t port_id = ETH(sdev)->data->port_id;
+
+ ret = rte_eth_dev_tx_queue_stop(port_id, tx_queue_id);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Tx queue stop failed for subdevice %d", i);
+ err = ret;
+ } else {
+ failure = false;
+ }
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STOPPED;
+ fs_unlock(dev, 0);
+ /* Return 0 in case of at least one successful queue stop */
+ return (failure) ? err : 0;
+}
+
+static int
+fs_tx_queue_start(struct rte_eth_dev *dev, uint16_t tx_queue_id)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+
+ fs_lock(dev, 0);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ uint16_t port_id = ETH(sdev)->data->port_id;
+
+ ret = rte_eth_dev_tx_queue_start(port_id, tx_queue_id);
+ ret = fs_err(sdev, ret);
+ if (ret) {
+ ERROR("Tx queue start failed for subdevice %d", i);
+ fs_tx_queue_stop(dev, tx_queue_id);
+ fs_unlock(dev, 0);
+ return ret;
+ }
+ }
+ dev->data->tx_queue_state[tx_queue_id] = RTE_ETH_QUEUE_STATE_STARTED;
+ fs_unlock(dev, 0);
+ return 0;
+}
+
static void
fs_rx_queue_release(void *queue)
{
@@ -309,9 +455,13 @@ fs_rx_queue_release(void *queue)
fs_lock(dev, 0);
if (rxq->event_fd > 0)
close(rxq->event_fd);
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- SUBOPS(sdev, rx_queue_release)
- (ETH(sdev)->data->rx_queues[rxq->qid]);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ if (ETH(sdev)->data->rx_queues != NULL &&
+ ETH(sdev)->data->rx_queues[rxq->qid] != NULL) {
+ SUBOPS(sdev, rx_queue_release)
+ (ETH(sdev)->data->rx_queues[rxq->qid]);
+ }
+ }
dev->data->rx_queues[rxq->qid] = NULL;
rte_free(rxq);
fs_unlock(dev, 0);
@@ -341,6 +491,16 @@ fs_rx_queue_setup(struct rte_eth_dev *dev,
int ret;
fs_lock(dev, 0);
+ if (rx_conf->rx_deferred_start) {
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ if (SUBOPS(sdev, rx_queue_start) == NULL) {
+ ERROR("Rx queue deferred start is not "
+ "supported for subdevice %d", i);
+ fs_unlock(dev, 0);
+ return -EINVAL;
+ }
+ }
+ }
rxq = dev->data->rx_queues[rx_queue_id];
if (rxq != NULL) {
fs_rx_queue_release(rxq);
@@ -477,9 +637,13 @@ fs_tx_queue_release(void *queue)
txq = queue;
dev = txq->priv->dev;
fs_lock(dev, 0);
- FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE)
- SUBOPS(sdev, tx_queue_release)
- (ETH(sdev)->data->tx_queues[txq->qid]);
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ if (ETH(sdev)->data->tx_queues != NULL &&
+ ETH(sdev)->data->tx_queues[txq->qid] != NULL) {
+ SUBOPS(sdev, tx_queue_release)
+ (ETH(sdev)->data->tx_queues[txq->qid]);
+ }
+ }
dev->data->tx_queues[txq->qid] = NULL;
rte_free(txq);
fs_unlock(dev, 0);
@@ -498,6 +662,16 @@ fs_tx_queue_setup(struct rte_eth_dev *dev,
int ret;
fs_lock(dev, 0);
+ if (tx_conf->tx_deferred_start) {
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
+ if (SUBOPS(sdev, tx_queue_start) == NULL) {
+ ERROR("Tx queue deferred start is not "
+ "supported for subdevice %d", i);
+ fs_unlock(dev, 0);
+ return -EINVAL;
+ }
+ }
+ }
txq = dev->data->tx_queues[tx_queue_id];
if (txq != NULL) {
fs_tx_queue_release(txq);
@@ -716,6 +890,8 @@ fs_stats_reset(struct rte_eth_dev *dev)
* all sub_devices and the default capabilities.
* Uses a logical AND of TX capabilities among
* the active probed sub_device and the default capabilities.
+ * Uses a logical AND of device capabilities among
+ * all sub_devices and the default capabilities.
*
*/
static void
@@ -734,10 +910,12 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
uint64_t rx_offload_capa;
uint64_t rxq_offload_capa;
uint64_t rss_hf_offload_capa;
+ uint64_t dev_capa;
rx_offload_capa = default_infos.rx_offload_capa;
rxq_offload_capa = default_infos.rx_queue_offload_capa;
rss_hf_offload_capa = default_infos.flow_type_rss_offloads;
+ dev_capa = default_infos.dev_capa;
FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_PROBED) {
rte_eth_dev_info_get(PORT_ID(sdev),
&PRIV(dev)->infos);
@@ -746,12 +924,14 @@ fs_dev_infos_get(struct rte_eth_dev *dev,
PRIV(dev)->infos.rx_queue_offload_capa;
rss_hf_offload_capa &=
PRIV(dev)->infos.flow_type_rss_offloads;
+ dev_capa &= PRIV(dev)->infos.dev_capa;
}
sdev = TX_SUBDEV(dev);
rte_eth_dev_info_get(PORT_ID(sdev), &PRIV(dev)->infos);
PRIV(dev)->infos.rx_offload_capa = rx_offload_capa;
PRIV(dev)->infos.rx_queue_offload_capa = rxq_offload_capa;
PRIV(dev)->infos.flow_type_rss_offloads = rss_hf_offload_capa;
+ PRIV(dev)->infos.dev_capa = dev_capa;
PRIV(dev)->infos.tx_offload_capa &=
default_infos.tx_offload_capa;
PRIV(dev)->infos.tx_queue_offload_capa &=
@@ -953,6 +1133,55 @@ fs_mac_addr_set(struct rte_eth_dev *dev, struct ether_addr *mac_addr)
}
static int
+fs_set_mc_addr_list(struct rte_eth_dev *dev,
+ struct ether_addr *mc_addr_set, uint32_t nb_mc_addr)
+{
+ struct sub_device *sdev;
+ uint8_t i;
+ int ret;
+ void *mcast_addrs;
+
+ fs_lock(dev, 0);
+
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ ret = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
+ mc_addr_set, nb_mc_addr);
+ if (ret != 0) {
+ ERROR("Operation rte_eth_dev_set_mc_addr_list failed for sub_device %d with error %d",
+ i, ret);
+ goto rollback;
+ }
+ }
+
+ mcast_addrs = rte_realloc(PRIV(dev)->mcast_addrs,
+ nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]), 0);
+ if (mcast_addrs == NULL && nb_mc_addr > 0) {
+ ret = -ENOMEM;
+ goto rollback;
+ }
+ rte_memcpy(mcast_addrs, mc_addr_set,
+ nb_mc_addr * sizeof(PRIV(dev)->mcast_addrs[0]));
+ PRIV(dev)->nb_mcast_addr = nb_mc_addr;
+ PRIV(dev)->mcast_addrs = mcast_addrs;
+
+ fs_unlock(dev, 0);
+ return 0;
+
+rollback:
+ FOREACH_SUBDEV_STATE(sdev, i, dev, DEV_ACTIVE) {
+ int rc = rte_eth_dev_set_mc_addr_list(PORT_ID(sdev),
+ PRIV(dev)->mcast_addrs, PRIV(dev)->nb_mcast_addr);
+ if (rc != 0) {
+ ERROR("Multicast MAC address list rollback for sub_device %d failed with error %d",
+ i, rc);
+ }
+ }
+
+ fs_unlock(dev, 0);
+ return ret;
+}
+
+static int
fs_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf)
{
@@ -1025,6 +1254,10 @@ const struct eth_dev_ops failsafe_ops = {
.dev_supported_ptypes_get = fs_dev_supported_ptypes_get,
.mtu_set = fs_mtu_set,
.vlan_filter_set = fs_vlan_filter_set,
+ .rx_queue_start = fs_rx_queue_start,
+ .rx_queue_stop = fs_rx_queue_stop,
+ .tx_queue_start = fs_tx_queue_start,
+ .tx_queue_stop = fs_tx_queue_stop,
.rx_queue_setup = fs_rx_queue_setup,
.tx_queue_setup = fs_tx_queue_setup,
.rx_queue_release = fs_rx_queue_release,
@@ -1036,6 +1269,7 @@ const struct eth_dev_ops failsafe_ops = {
.mac_addr_remove = fs_mac_addr_remove,
.mac_addr_add = fs_mac_addr_add,
.mac_addr_set = fs_mac_addr_set,
+ .set_mc_addr_list = fs_set_mc_addr_list,
.rss_hash_update = fs_rss_hash_update,
.filter_ctrl = fs_filter_ctrl,
};
diff --git a/drivers/net/failsafe/failsafe_private.h b/drivers/net/failsafe/failsafe_private.h
index 886af861..7e318968 100644
--- a/drivers/net/failsafe/failsafe_private.h
+++ b/drivers/net/failsafe/failsafe_private.h
@@ -6,6 +6,7 @@
#ifndef _RTE_ETH_FAILSAFE_PRIVATE_H_
#define _RTE_ETH_FAILSAFE_PRIVATE_H_
+#include <stdint.h>
#include <sys/queue.h>
#include <pthread.h>
@@ -13,6 +14,7 @@
#include <rte_dev.h>
#include <rte_ethdev_driver.h>
#include <rte_devargs.h>
+#include <rte_flow.h>
#include <rte_interrupts.h>
#define FAILSAFE_DRIVER_NAME "Fail-safe PMD"
@@ -81,7 +83,8 @@ struct rte_flow {
/* sub_flows */
struct rte_flow *flows[FAILSAFE_MAX_ETHPORTS];
/* flow description for synchronization */
- struct rte_flow_desc *fd;
+ struct rte_flow_conv_rule rule;
+ uint8_t rule_data[];
};
enum dev_state {
@@ -143,6 +146,8 @@ struct fs_priv {
uint32_t nb_mac_addr;
struct ether_addr mac_addrs[FAILSAFE_MAX_ETHADDR];
uint32_t mac_addr_pool[FAILSAFE_MAX_ETHADDR];
+ uint32_t nb_mcast_addr;
+ struct ether_addr *mcast_addrs;
/* current capabilities */
struct rte_eth_dev_info infos;
struct rte_eth_dev_owner my_owner; /* Unique owner. */
@@ -188,7 +193,7 @@ int failsafe_hotplug_alarm_cancel(struct rte_eth_dev *dev);
/* RX / TX */
-void set_burst_fn(struct rte_eth_dev *dev, int force_safe);
+void failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe);
uint16_t failsafe_rx_burst(void *rxq,
struct rte_mbuf **rx_pkts, uint16_t nb_pkts);
@@ -234,8 +239,8 @@ int failsafe_eth_new_event_callback(uint16_t port_id,
extern const char pmd_failsafe_driver_name[];
extern const struct eth_dev_ops failsafe_ops;
extern const struct rte_flow_ops fs_flow_ops;
-extern uint64_t hotplug_poll;
-extern int mac_from_arg;
+extern uint64_t failsafe_hotplug_poll;
+extern int failsafe_mac_from_arg;
/* HELPERS */
@@ -468,7 +473,7 @@ fs_switch_dev(struct rte_eth_dev *dev,
} else {
return;
}
- set_burst_fn(dev, 0);
+ failsafe_set_burst_fn(dev, 0);
rte_wmb();
}
diff --git a/drivers/net/failsafe/failsafe_rxtx.c b/drivers/net/failsafe/failsafe_rxtx.c
index 7bd0f963..034f47b8 100644
--- a/drivers/net/failsafe/failsafe_rxtx.c
+++ b/drivers/net/failsafe/failsafe_rxtx.c
@@ -29,7 +29,7 @@ fs_tx_unsafe(struct sub_device *sdev)
}
void
-set_burst_fn(struct rte_eth_dev *dev, int force_safe)
+failsafe_set_burst_fn(struct rte_eth_dev *dev, int force_safe)
{
struct sub_device *sdev;
uint8_t i;