diff options
author | Luca Boccassi <luca.boccassi@gmail.com> | 2018-11-01 11:59:50 +0000 |
---|---|---|
committer | Luca Boccassi <luca.boccassi@gmail.com> | 2018-11-01 12:00:19 +0000 |
commit | 8d01b9cd70a67cdafd5b965a70420c3bd7fb3f82 (patch) | |
tree | 208e3bc33c220854d89d010e3abf720a2e62e546 /drivers/net/enic/enic_flow.c | |
parent | b63264c8342e6a1b6971c79550d2af2024b6a4de (diff) |
New upstream version 18.11-rc1upstream/18.11-rc1
Change-Id: Iaa71986dd6332e878d8f4bf493101b2bbc6313bb
Signed-off-by: Luca Boccassi <luca.boccassi@gmail.com>
Diffstat (limited to 'drivers/net/enic/enic_flow.c')
-rw-r--r-- | drivers/net/enic/enic_flow.c | 180 |
1 files changed, 161 insertions, 19 deletions
diff --git a/drivers/net/enic/enic_flow.c b/drivers/net/enic/enic_flow.c index 0cf04aef..bb9ed037 100644 --- a/drivers/net/enic/enic_flow.c +++ b/drivers/net/enic/enic_flow.c @@ -289,6 +289,15 @@ static const enum rte_flow_action_type enic_supported_actions_v2_drop[] = { RTE_FLOW_ACTION_TYPE_END, }; +static const enum rte_flow_action_type enic_supported_actions_v2_count[] = { + RTE_FLOW_ACTION_TYPE_QUEUE, + RTE_FLOW_ACTION_TYPE_MARK, + RTE_FLOW_ACTION_TYPE_FLAG, + RTE_FLOW_ACTION_TYPE_DROP, + RTE_FLOW_ACTION_TYPE_COUNT, + RTE_FLOW_ACTION_TYPE_END, +}; + /** Action capabilities indexed by NIC version information */ static const struct enic_action_cap enic_action_cap[] = { [FILTER_ACTION_RQ_STEERING_FLAG] = { @@ -303,6 +312,10 @@ static const struct enic_action_cap enic_action_cap[] = { .actions = enic_supported_actions_v2_drop, .copy_fn = enic_copy_action_v2, }, + [FILTER_ACTION_COUNTER_FLAG] = { + .actions = enic_supported_actions_v2_count, + .copy_fn = enic_copy_action_v2, + }, }; static int @@ -1068,6 +1081,10 @@ enic_copy_action_v2(const struct rte_flow_action actions[], enic_action->flags |= FILTER_ACTION_DROP_FLAG; break; } + case RTE_FLOW_ACTION_TYPE_COUNT: { + enic_action->flags |= FILTER_ACTION_COUNTER_FLAG; + break; + } case RTE_FLOW_ACTION_TYPE_VOID: continue; default: @@ -1112,7 +1129,9 @@ enic_get_action_cap(struct enic *enic) uint8_t actions; actions = enic->filter_actions; - if (actions & FILTER_ACTION_DROP_FLAG) + if (actions & FILTER_ACTION_COUNTER_FLAG) + ea = &enic_action_cap[FILTER_ACTION_COUNTER_FLAG]; + else if (actions & FILTER_ACTION_DROP_FLAG) ea = &enic_action_cap[FILTER_ACTION_DROP_FLAG]; else if (actions & FILTER_ACTION_FILTER_ID_FLAG) ea = &enic_action_cap[FILTER_ACTION_FILTER_ID_FLAG]; @@ -1395,8 +1414,10 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, struct rte_flow_error *error) { struct rte_flow *flow; - int ret; - u16 entry; + int err; + uint16_t entry; + int ctr_idx; + int last_max_flow_ctr; FLOW_TRACE(); @@ -1407,20 +1428,64 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, return NULL; } + flow->counter_idx = -1; + last_max_flow_ctr = -1; + if (enic_action->flags & FILTER_ACTION_COUNTER_FLAG) { + if (!vnic_dev_counter_alloc(enic->vdev, (uint32_t *)&ctr_idx)) { + rte_flow_error_set(error, ENOMEM, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "cannot allocate counter"); + goto unwind_flow_alloc; + } + flow->counter_idx = ctr_idx; + enic_action->counter_index = ctr_idx; + + /* If index is the largest, increase the counter DMA size */ + if (ctr_idx > enic->max_flow_counter) { + err = vnic_dev_counter_dma_cfg(enic->vdev, + VNIC_FLOW_COUNTER_UPDATE_MSECS, + ctr_idx + 1); + if (err) { + rte_flow_error_set(error, -err, + RTE_FLOW_ERROR_TYPE_ACTION_CONF, + NULL, "counter DMA config failed"); + goto unwind_ctr_alloc; + } + last_max_flow_ctr = enic->max_flow_counter; + enic->max_flow_counter = ctr_idx; + } + } + /* entry[in] is the queue id, entry[out] is the filter Id for delete */ entry = enic_action->rq_idx; - ret = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter, + err = vnic_dev_classifier(enic->vdev, CLSF_ADD, &entry, enic_filter, enic_action); - if (!ret) { - flow->enic_filter_id = entry; - flow->enic_filter = *enic_filter; - } else { - rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, + if (err) { + rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "vnic_dev_classifier error"); - rte_free(flow); - return NULL; + goto unwind_ctr_dma_cfg; } + + flow->enic_filter_id = entry; + flow->enic_filter = *enic_filter; + return flow; + +/* unwind if there are errors */ +unwind_ctr_dma_cfg: + if (last_max_flow_ctr != -1) { + /* reduce counter DMA size */ + vnic_dev_counter_dma_cfg(enic->vdev, + VNIC_FLOW_COUNTER_UPDATE_MSECS, + last_max_flow_ctr + 1); + enic->max_flow_counter = last_max_flow_ctr; + } +unwind_ctr_alloc: + if (flow->counter_idx != -1) + vnic_dev_counter_free(enic->vdev, ctr_idx); +unwind_flow_alloc: + rte_free(flow); + return NULL; } /** @@ -1435,18 +1500,29 @@ enic_flow_add_filter(struct enic *enic, struct filter_v2 *enic_filter, * @param error[out] */ static int -enic_flow_del_filter(struct enic *enic, u16 filter_id, +enic_flow_del_filter(struct enic *enic, struct rte_flow *flow, struct rte_flow_error *error) { - int ret; + u16 filter_id; + int err; FLOW_TRACE(); - ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL); - if (!ret) - rte_flow_error_set(error, ret, RTE_FLOW_ERROR_TYPE_HANDLE, + filter_id = flow->enic_filter_id; + err = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL, NULL); + if (err) { + rte_flow_error_set(error, -err, RTE_FLOW_ERROR_TYPE_HANDLE, NULL, "vnic_dev_classifier failed"); - return ret; + return -err; + } + + if (flow->counter_idx != -1) { + if (!vnic_dev_counter_free(enic->vdev, flow->counter_idx)) + dev_err(enic, "counter free failed, idx: %d\n", + flow->counter_idx); + flow->counter_idx = -1; + } + return 0; } /* @@ -1529,9 +1605,10 @@ enic_flow_destroy(struct rte_eth_dev *dev, struct rte_flow *flow, FLOW_TRACE(); rte_spinlock_lock(&enic->flows_lock); - enic_flow_del_filter(enic, flow->enic_filter_id, error); + enic_flow_del_filter(enic, flow, error); LIST_REMOVE(flow, next); rte_spinlock_unlock(&enic->flows_lock); + rte_free(flow); return 0; } @@ -1553,13 +1630,77 @@ enic_flow_flush(struct rte_eth_dev *dev, struct rte_flow_error *error) while (!LIST_EMPTY(&enic->flows)) { flow = LIST_FIRST(&enic->flows); - enic_flow_del_filter(enic, flow->enic_filter_id, error); + enic_flow_del_filter(enic, flow, error); LIST_REMOVE(flow, next); + rte_free(flow); } rte_spinlock_unlock(&enic->flows_lock); return 0; } +static int +enic_flow_query_count(struct rte_eth_dev *dev, + struct rte_flow *flow, void *data, + struct rte_flow_error *error) +{ + struct enic *enic = pmd_priv(dev); + struct rte_flow_query_count *query; + uint64_t packets, bytes; + + FLOW_TRACE(); + + if (flow->counter_idx == -1) { + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "flow does not have counter"); + } + query = (struct rte_flow_query_count *)data; + if (!vnic_dev_counter_query(enic->vdev, flow->counter_idx, + !!query->reset, &packets, &bytes)) { + return rte_flow_error_set + (error, EINVAL, + RTE_FLOW_ERROR_TYPE_UNSPECIFIED, + NULL, + "cannot read counter"); + } + query->hits_set = 1; + query->bytes_set = 1; + query->hits = packets; + query->bytes = bytes; + return 0; +} + +static int +enic_flow_query(struct rte_eth_dev *dev, + struct rte_flow *flow, + const struct rte_flow_action *actions, + void *data, + struct rte_flow_error *error) +{ + int ret = 0; + + FLOW_TRACE(); + + for (; actions->type != RTE_FLOW_ACTION_TYPE_END; actions++) { + switch (actions->type) { + case RTE_FLOW_ACTION_TYPE_VOID: + break; + case RTE_FLOW_ACTION_TYPE_COUNT: + ret = enic_flow_query_count(dev, flow, data, error); + break; + default: + return rte_flow_error_set(error, ENOTSUP, + RTE_FLOW_ERROR_TYPE_ACTION, + actions, + "action not supported"); + } + if (ret < 0) + return ret; + } + return 0; +} + /** * Flow callback registration. * @@ -1570,4 +1711,5 @@ const struct rte_flow_ops enic_flow_ops = { .create = enic_flow_create, .destroy = enic_flow_destroy, .flush = enic_flow_flush, + .query = enic_flow_query, }; |