summaryrefslogtreecommitdiffstats
path: root/drivers/net/ixgbe/ixgbe_ethdev.h
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 14:51:32 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2017-05-16 16:20:45 +0200
commit7595afa4d30097c1177b69257118d8ad89a539be (patch)
tree4bfeadc905c977e45e54a90c42330553b8942e4e /drivers/net/ixgbe/ixgbe_ethdev.h
parentce3d555e43e3795b5d9507fcfc76b7a0a92fd0d6 (diff)
Imported Upstream version 17.05
Change-Id: Id1e419c5a214e4a18739663b91f0f9a549f1fdc6 Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'drivers/net/ixgbe/ixgbe_ethdev.h')
-rw-r--r--drivers/net/ixgbe/ixgbe_ethdev.h287
1 files changed, 283 insertions, 4 deletions
diff --git a/drivers/net/ixgbe/ixgbe_ethdev.h b/drivers/net/ixgbe/ixgbe_ethdev.h
index a4e2996a..b576a6f4 100644
--- a/drivers/net/ixgbe/ixgbe_ethdev.h
+++ b/drivers/net/ixgbe/ixgbe_ethdev.h
@@ -1,7 +1,7 @@
/*-
* BSD LICENSE
*
- * Copyright(c) 2010-2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2010-2016 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -38,11 +38,14 @@
#include "base/ixgbe_dcb_82598.h"
#include "ixgbe_bypass.h"
#include <rte_time.h>
+#include <rte_hash.h>
/* need update link, bit flag */
#define IXGBE_FLAG_NEED_LINK_UPDATE (uint32_t)(1 << 0)
#define IXGBE_FLAG_MAILBOX (uint32_t)(1 << 1)
#define IXGBE_FLAG_PHY_INTERRUPT (uint32_t)(1 << 2)
+#define IXGBE_FLAG_MACSEC (uint32_t)(1 << 3)
+#define IXGBE_FLAG_NEED_LINK_CONFIG (uint32_t)(1 << 4)
/*
* Defines that were not part of ixgbe_type.h as they are not used by the
@@ -130,10 +133,28 @@
#define IXGBE_MISC_VEC_ID RTE_INTR_VEC_ZERO_OFFSET
#define IXGBE_RX_VEC_START RTE_INTR_VEC_RXTX_OFFSET
+#define IXGBE_SECTX_MINSECIFG_MASK 0x0000000F
+
+#define IXGBE_MACSEC_PNTHRSH 0xFFFFFE00
+
+#define IXGBE_MAX_FDIR_FILTER_NUM (1024 * 32)
+#define IXGBE_MAX_L2_TN_FILTER_NUM 128
+
+#define MAC_TYPE_FILTER_SUP_EXT(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540)\
+ return -ENOTSUP;\
+} while (0)
+
+#define MAC_TYPE_FILTER_SUP(type) do {\
+ if ((type) != ixgbe_mac_82599EB && (type) != ixgbe_mac_X540 &&\
+ (type) != ixgbe_mac_X550 && (type) != ixgbe_mac_X550EM_x &&\
+ (type) != ixgbe_mac_X550EM_a)\
+ return -ENOTSUP;\
+} while (0)
+
/*
* Information about the fdir mode.
*/
-
struct ixgbe_hw_fdir_mask {
uint16_t vlan_tci_mask;
uint32_t src_ipv4_mask;
@@ -148,6 +169,28 @@ struct ixgbe_hw_fdir_mask {
uint8_t tunnel_type_mask;
};
+struct ixgbe_fdir_filter {
+ TAILQ_ENTRY(ixgbe_fdir_filter) entries;
+ union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t fdirhash; /* hash value for fdir */
+ uint8_t queue; /* assigned rx queue */
+};
+
+/* list of fdir filters */
+TAILQ_HEAD(ixgbe_fdir_filter_list, ixgbe_fdir_filter);
+
+struct ixgbe_fdir_rule {
+ struct ixgbe_hw_fdir_mask mask;
+ union ixgbe_atr_input ixgbe_fdir; /* key of fdir filter*/
+ bool b_spec; /* If TRUE, ixgbe_fdir, fdirflags, queue have meaning. */
+ bool b_mask; /* If TRUE, mask has meaning. */
+ enum rte_fdir_mode mode; /* IP, MAC VLAN, Tunnel */
+ uint32_t fdirflags; /* drop or forward */
+ uint32_t soft_id; /* an unique value for this rule */
+ uint8_t queue; /* assigned rx queue */
+};
+
struct ixgbe_hw_fdir_info {
struct ixgbe_hw_fdir_mask mask;
uint8_t flex_bytes_offset;
@@ -159,6 +202,11 @@ struct ixgbe_hw_fdir_info {
uint64_t remove;
uint64_t f_add;
uint64_t f_remove;
+ struct ixgbe_fdir_filter_list fdir_list; /* filter list*/
+ /* store the pointers of the filters, index is the hash value. */
+ struct ixgbe_fdir_filter **hash_map;
+ struct rte_hash *hash_handle; /* cuckoo hash handler */
+ bool mask_added; /* If already got mask from consistent filter */
};
/* structure for interrupt relative data */
@@ -254,16 +302,136 @@ struct ixgbe_5tuple_filter {
(RTE_ALIGN(IXGBE_MAX_FTQF_FILTERS, (sizeof(uint32_t) * NBBY)) / \
(sizeof(uint32_t) * NBBY))
+struct ixgbe_ethertype_filter {
+ uint16_t ethertype;
+ uint32_t etqf;
+ uint32_t etqs;
+ /**
+ * If this filter is added by configuration,
+ * it should not be removed.
+ */
+ bool conf;
+};
+
/*
* Structure to store filters' info.
*/
struct ixgbe_filter_info {
uint8_t ethertype_mask; /* Bit mask for every used ethertype filter */
/* store used ethertype filters*/
- uint16_t ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
+ struct ixgbe_ethertype_filter ethertype_filters[IXGBE_MAX_ETQF_FILTERS];
/* Bit mask for every used 5tuple filter */
uint32_t fivetuple_mask[IXGBE_5TUPLE_ARRAY_SIZE];
struct ixgbe_5tuple_filter_list fivetuple_list;
+ /* store the SYN filter info */
+ uint32_t syn_info;
+};
+
+struct ixgbe_l2_tn_key {
+ enum rte_eth_tunnel_type l2_tn_type;
+ uint32_t tn_id;
+};
+
+struct ixgbe_l2_tn_filter {
+ TAILQ_ENTRY(ixgbe_l2_tn_filter) entries;
+ struct ixgbe_l2_tn_key key;
+ uint32_t pool;
+};
+
+TAILQ_HEAD(ixgbe_l2_tn_filter_list, ixgbe_l2_tn_filter);
+
+struct ixgbe_l2_tn_info {
+ struct ixgbe_l2_tn_filter_list l2_tn_list;
+ struct ixgbe_l2_tn_filter **hash_map;
+ struct rte_hash *hash_handle;
+ bool e_tag_en; /* e-tag enabled */
+ bool e_tag_fwd_en; /* e-tag based forwarding enabled */
+ bool e_tag_ether_type; /* ether type for e-tag */
+};
+
+struct rte_flow {
+ enum rte_filter_type filter_type;
+ void *rule;
+};
+/* ntuple filter list structure */
+struct ixgbe_ntuple_filter_ele {
+ TAILQ_ENTRY(ixgbe_ntuple_filter_ele) entries;
+ struct rte_eth_ntuple_filter filter_info;
+};
+/* ethertype filter list structure */
+struct ixgbe_ethertype_filter_ele {
+ TAILQ_ENTRY(ixgbe_ethertype_filter_ele) entries;
+ struct rte_eth_ethertype_filter filter_info;
+};
+/* syn filter list structure */
+struct ixgbe_eth_syn_filter_ele {
+ TAILQ_ENTRY(ixgbe_eth_syn_filter_ele) entries;
+ struct rte_eth_syn_filter filter_info;
+};
+/* fdir filter list structure */
+struct ixgbe_fdir_rule_ele {
+ TAILQ_ENTRY(ixgbe_fdir_rule_ele) entries;
+ struct ixgbe_fdir_rule filter_info;
+};
+/* l2_tunnel filter list structure */
+struct ixgbe_eth_l2_tunnel_conf_ele {
+ TAILQ_ENTRY(ixgbe_eth_l2_tunnel_conf_ele) entries;
+ struct rte_eth_l2_tunnel_conf filter_info;
+};
+/* ixgbe_flow memory list structure */
+struct ixgbe_flow_mem {
+ TAILQ_ENTRY(ixgbe_flow_mem) entries;
+ struct rte_flow *flow;
+};
+
+TAILQ_HEAD(ixgbe_ntuple_filter_list, ixgbe_ntuple_filter_ele);
+struct ixgbe_ntuple_filter_list filter_ntuple_list;
+TAILQ_HEAD(ixgbe_ethertype_filter_list, ixgbe_ethertype_filter_ele);
+struct ixgbe_ethertype_filter_list filter_ethertype_list;
+TAILQ_HEAD(ixgbe_syn_filter_list, ixgbe_eth_syn_filter_ele);
+struct ixgbe_syn_filter_list filter_syn_list;
+TAILQ_HEAD(ixgbe_fdir_rule_filter_list, ixgbe_fdir_rule_ele);
+struct ixgbe_fdir_rule_filter_list filter_fdir_list;
+TAILQ_HEAD(ixgbe_l2_tunnel_filter_list, ixgbe_eth_l2_tunnel_conf_ele);
+struct ixgbe_l2_tunnel_filter_list filter_l2_tunnel_list;
+TAILQ_HEAD(ixgbe_flow_mem_list, ixgbe_flow_mem);
+struct ixgbe_flow_mem_list ixgbe_flow_list;
+
+/*
+ * Statistics counters collected by the MACsec
+ */
+struct ixgbe_macsec_stats {
+ /* TX port statistics */
+ uint64_t out_pkts_untagged;
+ uint64_t out_pkts_encrypted;
+ uint64_t out_pkts_protected;
+ uint64_t out_octets_encrypted;
+ uint64_t out_octets_protected;
+
+ /* RX port statistics */
+ uint64_t in_pkts_untagged;
+ uint64_t in_pkts_badtag;
+ uint64_t in_pkts_nosci;
+ uint64_t in_pkts_unknownsci;
+ uint64_t in_octets_decrypted;
+ uint64_t in_octets_validated;
+
+ /* RX SC statistics */
+ uint64_t in_pkts_unchecked;
+ uint64_t in_pkts_delayed;
+ uint64_t in_pkts_late;
+
+ /* RX SA statistics */
+ uint64_t in_pkts_ok;
+ uint64_t in_pkts_invalid;
+ uint64_t in_pkts_notvalid;
+ uint64_t in_pkts_unusedsa;
+ uint64_t in_pkts_notusingsa;
+};
+
+/* The configuration of bandwidth */
+struct ixgbe_bw_conf {
+ uint8_t tc_num; /* Number of TCs. */
};
/*
@@ -272,6 +440,7 @@ struct ixgbe_filter_info {
struct ixgbe_adapter {
struct ixgbe_hw hw;
struct ixgbe_hw_stats stats;
+ struct ixgbe_macsec_stats macsec_stats;
struct ixgbe_hw_fdir_info fdir;
struct ixgbe_interrupt intr;
struct ixgbe_stat_mapping_registers stat_mappings;
@@ -285,6 +454,8 @@ struct ixgbe_adapter {
struct ixgbe_bypass_info bps;
#endif /* RTE_NIC_BYPASS */
struct ixgbe_filter_info filter;
+ struct ixgbe_l2_tn_info l2_tn;
+ struct ixgbe_bw_conf bw_conf;
bool rx_bulk_alloc_allowed;
bool rx_vec_allowed;
@@ -293,12 +464,18 @@ struct ixgbe_adapter {
struct rte_timecounter tx_tstamp_tc;
};
+#define IXGBE_DEV_TO_PCI(eth_dev) \
+ RTE_DEV_TO_PCI((eth_dev)->device)
+
#define IXGBE_DEV_PRIVATE_TO_HW(adapter)\
(&((struct ixgbe_adapter *)adapter)->hw)
#define IXGBE_DEV_PRIVATE_TO_STATS(adapter) \
(&((struct ixgbe_adapter *)adapter)->stats)
+#define IXGBE_DEV_PRIVATE_TO_MACSEC_STATS(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->macsec_stats)
+
#define IXGBE_DEV_PRIVATE_TO_INTR(adapter) \
(&((struct ixgbe_adapter *)adapter)->intr)
@@ -329,6 +506,12 @@ struct ixgbe_adapter {
#define IXGBE_DEV_PRIVATE_TO_FILTER_INFO(adapter) \
(&((struct ixgbe_adapter *)adapter)->filter)
+#define IXGBE_DEV_PRIVATE_TO_L2_TN_INFO(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->l2_tn)
+
+#define IXGBE_DEV_PRIVATE_TO_BW_CONF(adapter) \
+ (&((struct ixgbe_adapter *)adapter)->bw_conf)
+
/*
* RX/TX function prototypes
*/
@@ -353,7 +536,9 @@ uint32_t ixgbe_dev_rx_queue_count(struct rte_eth_dev *dev,
uint16_t rx_queue_id);
int ixgbe_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
-int ixgbevf_dev_rx_descriptor_done(void *rx_queue, uint16_t offset);
+
+int ixgbe_dev_rx_descriptor_status(void *rx_queue, uint16_t offset);
+int ixgbe_dev_tx_descriptor_status(void *tx_queue, uint16_t offset);
int ixgbe_dev_rx_init(struct rte_eth_dev *dev);
@@ -398,6 +583,9 @@ uint16_t ixgbe_xmit_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t ixgbe_xmit_pkts_simple(void *tx_queue, struct rte_mbuf **tx_pkts,
uint16_t nb_pkts);
+uint16_t ixgbe_prep_pkts(void *tx_queue, struct rte_mbuf **tx_pkts,
+ uint16_t nb_pkts);
+
int ixgbe_dev_rss_hash_update(struct rte_eth_dev *dev,
struct rte_eth_rss_conf *rss_conf);
@@ -414,10 +602,31 @@ uint32_t ixgbe_rssrk_reg_get(enum ixgbe_mac_type mac_type, uint8_t i);
bool ixgbe_rss_update_sp(enum ixgbe_mac_type mac_type);
+int ixgbe_add_del_ntuple_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ntuple_filter *filter,
+ bool add);
+int ixgbe_add_del_ethertype_filter(struct rte_eth_dev *dev,
+ struct rte_eth_ethertype_filter *filter,
+ bool add);
+int ixgbe_syn_filter_set(struct rte_eth_dev *dev,
+ struct rte_eth_syn_filter *filter,
+ bool add);
+int
+ixgbe_dev_l2_tunnel_filter_add(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel,
+ bool restore);
+int
+ixgbe_dev_l2_tunnel_filter_del(struct rte_eth_dev *dev,
+ struct rte_eth_l2_tunnel_conf *l2_tunnel);
+void ixgbe_filterlist_flush(void);
/*
* Flow director function prototypes
*/
int ixgbe_fdir_configure(struct rte_eth_dev *dev);
+int ixgbe_fdir_set_input_mask(struct rte_eth_dev *dev);
+int ixgbe_fdir_filter_program(struct rte_eth_dev *dev,
+ struct ixgbe_fdir_rule *rule,
+ bool del, bool update);
void ixgbe_configure_dcb(struct rte_eth_dev *dev);
@@ -444,4 +653,74 @@ uint32_t ixgbe_convert_vm_rx_mask_to_val(uint16_t rx_mask, uint32_t orig_val);
int ixgbe_fdir_ctrl_func(struct rte_eth_dev *dev,
enum rte_filter_op filter_op, void *arg);
+void ixgbe_fdir_filter_restore(struct rte_eth_dev *dev);
+int ixgbe_clear_all_fdir_filter(struct rte_eth_dev *dev);
+
+extern const struct rte_flow_ops ixgbe_flow_ops;
+
+void ixgbe_clear_all_ethertype_filter(struct rte_eth_dev *dev);
+void ixgbe_clear_all_ntuple_filter(struct rte_eth_dev *dev);
+void ixgbe_clear_syn_filter(struct rte_eth_dev *dev);
+int ixgbe_clear_all_l2_tn_filter(struct rte_eth_dev *dev);
+
+int ixgbe_disable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_enable_sec_tx_path_generic(struct ixgbe_hw *hw);
+
+int ixgbe_vt_check(struct ixgbe_hw *hw);
+int ixgbe_set_vf_rate_limit(struct rte_eth_dev *dev, uint16_t vf,
+ uint16_t tx_rate, uint64_t q_msk);
+bool is_ixgbe_supported(struct rte_eth_dev *dev);
+
+static inline int
+ixgbe_ethertype_filter_lookup(struct ixgbe_filter_info *filter_info,
+ uint16_t ethertype)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (filter_info->ethertype_filters[i].ethertype == ethertype &&
+ (filter_info->ethertype_mask & (1 << i)))
+ return i;
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_insert(struct ixgbe_filter_info *filter_info,
+ struct ixgbe_ethertype_filter *ethertype_filter)
+{
+ int i;
+
+ for (i = 0; i < IXGBE_MAX_ETQF_FILTERS; i++) {
+ if (!(filter_info->ethertype_mask & (1 << i))) {
+ filter_info->ethertype_mask |= 1 << i;
+ filter_info->ethertype_filters[i].ethertype =
+ ethertype_filter->ethertype;
+ filter_info->ethertype_filters[i].etqf =
+ ethertype_filter->etqf;
+ filter_info->ethertype_filters[i].etqs =
+ ethertype_filter->etqs;
+ filter_info->ethertype_filters[i].conf =
+ ethertype_filter->conf;
+ return i;
+ }
+ }
+ return -1;
+}
+
+static inline int
+ixgbe_ethertype_filter_remove(struct ixgbe_filter_info *filter_info,
+ uint8_t idx)
+{
+ if (idx >= IXGBE_MAX_ETQF_FILTERS)
+ return -1;
+ filter_info->ethertype_mask &= ~(1 << idx);
+ filter_info->ethertype_filters[idx].ethertype = 0;
+ filter_info->ethertype_filters[idx].etqf = 0;
+ filter_info->ethertype_filters[idx].etqs = 0;
+ filter_info->ethertype_filters[idx].etqs = FALSE;
+ return idx;
+}
+
#endif /* _IXGBE_ETHDEV_H_ */