aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/common/qat/qat_qp.h
blob: 6f1525e1e3c06b4535b012327a8612421dae8d35 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2018 Intel Corporation
 */
#ifndef _QAT_QP_H_
#define _QAT_QP_H_

#include "qat_common.h"
#include "adf_transport_access_macros.h"

struct qat_pci_device;

#define QAT_CSR_HEAD_WRITE_THRESH 32U
/* number of requests to accumulate before writing head CSR */
#define QAT_CSR_TAIL_WRITE_THRESH 32U
/* number of requests to accumulate before writing tail CSR */
#define QAT_CSR_TAIL_FORCE_WRITE_THRESH 256U
/* number of inflights below which no tail write coalescing should occur */

typedef int (*build_request_t)(void *op,
		uint8_t *req, void *op_cookie,
		enum qat_device_gen qat_dev_gen);
/**< Build a request from an op. */

/**
 * Structure with data needed for creation of queue pair.
 */
struct qat_qp_hw_data {
	enum qat_service_type service_type;
	uint8_t hw_bundle_num;
	uint8_t tx_ring_num;
	uint8_t rx_ring_num;
	uint16_t tx_msg_size;
	uint16_t rx_msg_size;
};
/**
 * Structure with data needed for creation of queue pair.
 */
struct qat_qp_config {
	const struct qat_qp_hw_data *hw;
	uint32_t nb_descriptors;
	uint32_t cookie_size;
	int socket_id;
	build_request_t build_request;
	const char *service_str;
};

/**
 * Structure associated with each queue.
 */
struct qat_queue {
	char		memz_name[RTE_MEMZONE_NAMESIZE];
	void		*base_addr;		/* Base address */
	rte_iova_t	base_phys_addr;		/* Queue physical address */
	uint32_t	head;			/* Shadow copy of the head */
	uint32_t	tail;			/* Shadow copy of the tail */
	uint32_t	modulo_mask;
	uint32_t	msg_size;
	uint16_t	max_inflights;
	uint32_t	queue_size;
	uint8_t		hw_bundle_number;
	uint8_t		hw_queue_number;
	/* HW queue aka ring offset on bundle */
	uint32_t	csr_head;		/* last written head value */
	uint32_t	csr_tail;		/* last written tail value */
	uint16_t	nb_processed_responses;
	/* number of responses processed since last CSR head write */
	uint16_t	nb_pending_requests;
	/* number of requests pending since last CSR tail write */
};

struct qat_qp {
	void			*mmap_bar_addr;
	uint16_t		inflights16;
	struct qat_queue	tx_q;
	struct qat_queue	rx_q;
	struct qat_common_stats stats;
	struct rte_mempool *op_cookie_pool;
	void **op_cookies;
	uint32_t nb_descriptors;
	enum qat_device_gen qat_dev_gen;
	build_request_t build_request;
	enum qat_service_type service_type;
	struct qat_pci_device *qat_dev;
	/**< qat device this qp is on */
} __rte_cache_aligned;

extern const struct qat_qp_hw_data qat_gen1_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];
extern const struct qat_qp_hw_data qat_gen3_qps[][ADF_MAX_QPS_ON_ANY_SERVICE];

uint16_t
qat_enqueue_op_burst(void *qp, void **ops, uint16_t nb_ops);

uint16_t
qat_dequeue_op_burst(void *qp, void **ops, uint16_t nb_ops);

int
qat_qp_release(struct qat_qp **qp_addr);

int
qat_qp_setup(struct qat_pci_device *qat_dev,
		struct qat_qp **qp_addr, uint16_t queue_pair_id,
		struct qat_qp_config *qat_qp_conf);

int
qat_qps_per_service(const struct qat_qp_hw_data *qp_hw_data,
			enum qat_service_type service);

/* Needed for weak function*/
int
qat_comp_process_response(void **op __rte_unused, uint8_t *resp __rte_unused);

#endif /* _QAT_QP_H_ */
arc_registration *next; /** Feature Arc name */ char *arc_name; /** Start nodes */ char **start_nodes; int n_start_nodes; /* Feature arc index, assigned by init function */ u8 feature_arc_index; u8 *arc_index_ptr; } vnet_feature_arc_registration_t; /* Enable feature callback. */ typedef clib_error_t *(vnet_feature_enable_disable_function_t) (u32 sw_if_index, int enable_disable); /** feature registration object */ typedef struct _vnet_feature_registration { /** next registration in list of all registrations*/ struct _vnet_feature_registration *next; /** Feature arc name */ char *arc_name; /** Graph node name */ char *node_name; /** Pointer to this feature index, filled in by vnet_feature_arc_init */ u32 *feature_index_ptr; u32 feature_index; /** Constraints of the form "this feature runs before X" */ char **runs_before; /** Constraints of the form "this feature runs after Y" */ char **runs_after; /** Function to enable/disable feature **/ vnet_feature_enable_disable_function_t *enable_disable_cb; } vnet_feature_registration_t; typedef struct vnet_feature_config_main_t_ { vnet_config_main_t config_main; u32 *config_index_by_sw_if_index; } vnet_feature_config_main_t; typedef struct { /** feature arc configuration list */ vnet_feature_arc_registration_t *next_arc; uword **arc_index_by_name; /** feature path configuration lists */ vnet_feature_registration_t *next_feature; vnet_feature_registration_t **next_feature_by_arc; uword **next_feature_by_name; /** feature config main objects */ vnet_feature_config_main_t *feature_config_mains; /** Save partial order results for show command */ char ***feature_nodes; /** bitmap of interfaces which have driver rx features configured */ uword **sw_if_index_has_features; /** feature reference counts by interface */ i16 **feature_count_by_sw_if_index; /** Feature arc index for device-input */ u8 device_input_feature_arc_index; /** convenience */ vlib_main_t *vlib_main; vnet_main_t *vnet_main; } vnet_feature_main_t; extern vnet_feature_main_t feature_main; #define VNET_FEATURE_ARC_INIT(x,...) \ __VA_ARGS__ vnet_feature_arc_registration_t vnet_feat_arc_##x;\ static void __vnet_add_feature_arc_registration_##x (void) \ __attribute__((__constructor__)) ; \ static void __vnet_add_feature_arc_registration_##x (void) \ { \ vnet_feature_main_t * fm = &feature_main; \ vnet_feat_arc_##x.next = fm->next_arc; \ fm->next_arc = & vnet_feat_arc_##x; \ } \ __VA_ARGS__ vnet_feature_arc_registration_t vnet_feat_arc_##x #define VNET_FEATURE_INIT(x,...) \ __VA_ARGS__ vnet_feature_registration_t vnet_feat_##x; \ static void __vnet_add_feature_registration_##x (void) \ __attribute__((__constructor__)) ; \ static void __vnet_add_feature_registration_##x (void) \ { \ vnet_feature_main_t * fm = &feature_main; \ vnet_feat_##x.next = fm->next_feature; \ fm->next_feature = & vnet_feat_##x; \ } \ __VA_ARGS__ vnet_feature_registration_t vnet_feat_##x void vnet_config_update_feature_count (vnet_feature_main_t * fm, u8 arc, u32 sw_if_index, int is_add); u32 vnet_get_feature_index (u8 arc, const char *s); u8 vnet_get_feature_arc_index (const char *s); vnet_feature_registration_t *vnet_get_feature_reg (const char *arc_name, const char *node_name); int vnet_feature_enable_disable_with_index (u8 arc_index, u32 feature_index, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes); int vnet_feature_enable_disable (const char *arc_name, const char *node_name, u32 sw_if_index, int enable_disable, void *feature_config, u32 n_feature_config_bytes); static inline vnet_feature_config_main_t * vnet_get_feature_arc_config_main (u8 arc_index) { vnet_feature_main_t *fm = &feature_main; if (arc_index == (u8) ~ 0) return 0; return &fm->feature_config_mains[arc_index]; } static_always_inline vnet_feature_config_main_t * vnet_feature_get_config_main (u16 arc) { vnet_feature_main_t *fm = &feature_main; return &fm->feature_config_mains[arc]; } static_always_inline int vnet_have_features (u8 arc, u32 sw_if_index) { vnet_feature_main_t *fm = &feature_main; return clib_bitmap_get (fm->sw_if_index_has_features[arc], sw_if_index); } static_always_inline u32 vnet_get_feature_config_index (u8 arc, u32 sw_if_index) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc]; return vec_elt (cm->config_index_by_sw_if_index, sw_if_index); } static_always_inline void * vnet_feature_arc_start_with_data (u8 arc, u32 sw_if_index, u32 * next, vlib_buffer_t * b, u32 n_data_bytes) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm; cm = &fm->feature_config_mains[arc]; if (PREDICT_FALSE (vnet_have_features (arc, sw_if_index))) { b->feature_arc_index = arc; b->current_config_index = vec_elt (cm->config_index_by_sw_if_index, sw_if_index); return vnet_get_config_data (&cm->config_main, &b->current_config_index, next, n_data_bytes); } return 0; } static_always_inline void vnet_feature_arc_start (u8 arc, u32 sw_if_index, u32 * next0, vlib_buffer_t * b0) { vnet_feature_arc_start_with_data (arc, sw_if_index, next0, b0, 0); } static_always_inline void * vnet_feature_next_with_data (u32 sw_if_index, u32 * next0, vlib_buffer_t * b0, u32 n_data_bytes) { vnet_feature_main_t *fm = &feature_main; u8 arc = b0->feature_arc_index; vnet_feature_config_main_t *cm = &fm->feature_config_mains[arc]; return vnet_get_config_data (&cm->config_main, &b0->current_config_index, next0, n_data_bytes); } static_always_inline void vnet_feature_next (u32 sw_if_index, u32 * next0, vlib_buffer_t * b0) { vnet_feature_next_with_data (sw_if_index, next0, b0, 0); } static_always_inline void vnet_feature_start_device_input_x1 (u32 sw_if_index, u32 * next0, vlib_buffer_t * b0) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm; u8 feature_arc_index = fm->device_input_feature_arc_index; cm = &fm->feature_config_mains[feature_arc_index]; if (PREDICT_FALSE (clib_bitmap_get (fm->sw_if_index_has_features[feature_arc_index], sw_if_index))) { /* * Save next0 so that the last feature in the chain * can skip ethernet-input if indicated... */ u16 adv; vnet_buffer (b0)->device_input_feat.saved_next_index = *next0; adv = device_input_next_node_advance[*next0]; vnet_buffer (b0)->device_input_feat.buffer_advance = adv; vlib_buffer_advance (b0, -adv); b0->feature_arc_index = feature_arc_index; b0->current_config_index = vec_elt (cm->config_index_by_sw_if_index, sw_if_index); vnet_get_config_data (&cm->config_main, &b0->current_config_index, next0, /* # bytes of config data */ 0); } } static_always_inline void vnet_feature_start_device_input_x2 (u32 sw_if_index, u32 * next0, u32 * next1, vlib_buffer_t * b0, vlib_buffer_t * b1) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm; u8 feature_arc_index = fm->device_input_feature_arc_index; cm = &fm->feature_config_mains[feature_arc_index]; if (PREDICT_FALSE (clib_bitmap_get (fm->sw_if_index_has_features[feature_arc_index], sw_if_index))) { /* * Save next0 so that the last feature in the chain * can skip ethernet-input if indicated... */ u16 adv; vnet_buffer (b0)->device_input_feat.saved_next_index = *next0; adv = device_input_next_node_advance[*next0]; vnet_buffer (b0)->device_input_feat.buffer_advance = adv; vlib_buffer_advance (b0, -adv); vnet_buffer (b1)->device_input_feat.saved_next_index = *next1; adv = device_input_next_node_advance[*next1]; vnet_buffer (b1)->device_input_feat.buffer_advance = adv; vlib_buffer_advance (b1, -adv); b0->feature_arc_index = feature_arc_index; b1->feature_arc_index = feature_arc_index; b0->current_config_index = vec_elt (cm->config_index_by_sw_if_index, sw_if_index); b1->current_config_index = b0->current_config_index; vnet_get_config_data (&cm->config_main, &b0->current_config_index, next0, /* # bytes of config data */ 0); vnet_get_config_data (&cm->config_main, &b1->current_config_index, next1, /* # bytes of config data */ 0); } } static_always_inline void vnet_feature_start_device_input_x4 (u32 sw_if_index, u32 * next0, u32 * next1, u32 * next2, u32 * next3, vlib_buffer_t * b0, vlib_buffer_t * b1, vlib_buffer_t * b2, vlib_buffer_t * b3) { vnet_feature_main_t *fm = &feature_main; vnet_feature_config_main_t *cm; u8 feature_arc_index = fm->device_input_feature_arc_index; cm = &fm->feature_config_mains[feature_arc_index]; if (PREDICT_FALSE (clib_bitmap_get (fm->sw_if_index_has_features[feature_arc_index], sw_if_index))) { /* * Save next0 so that the last feature in the chain * can skip ethernet-input if indicated... */ u16 adv; vnet_buffer (b0)->device_input_feat.saved_next_index = *next0; adv = device_input_next_node_advance[*next0]; vnet_buffer (b0)->device_input_feat.buffer_advance = adv; vlib_buffer_advance (b0, -adv); vnet_buffer (b1)->device_input_feat.saved_next_index = *next1; adv = device_input_next_node_advance[*next1]; vnet_buffer (b1)->device_input_feat.buffer_advance = adv; vlib_buffer_advance (b1, -adv); vnet_buffer (b2)->device_input_feat.saved_next_index = *next2; adv = device_input_next_node_advance[*next2]; vnet_buffer (b2)->device_input_feat.buffer_advance = adv; vlib_buffer_advance (b2, -adv); vnet_buffer (b3)->device_input_feat.saved_next_index = *next3; adv = device_input_next_node_advance[*next3]; vnet_buffer (b3)->device_input_feat.buffer_advance = adv; vlib_buffer_advance (b3, -adv); b0->feature_arc_index = feature_arc_index; b1->feature_arc_index = feature_arc_index; b2->feature_arc_index = feature_arc_index; b3->feature_arc_index = feature_arc_index; b0->current_config_index = vec_elt (cm->config_index_by_sw_if_index, sw_if_index); b1->current_config_index = b0->current_config_index; b2->current_config_index = b0->current_config_index; b3->current_config_index = b0->current_config_index; vnet_get_config_data (&cm->config_main, &b0->current_config_index, next0, /* # bytes of config data */ 0); vnet_get_config_data (&cm->config_main, &b1->current_config_index, next1, /* # bytes of config data */ 0); vnet_get_config_data (&cm->config_main, &b2->current_config_index, next2, /* # bytes of config data */ 0); vnet_get_config_data (&cm->config_main, &b3->current_config_index, next3, /* # bytes of config data */ 0); } } #define VNET_FEATURES(...) (char*[]) { __VA_ARGS__, 0} clib_error_t *vnet_feature_arc_init (vlib_main_t * vm, vnet_config_main_t * vcm, char **feature_start_nodes, int num_feature_start_nodes, vnet_feature_registration_t * first_reg, char ***feature_nodes); void vnet_interface_features_show (vlib_main_t * vm, u32 sw_if_index); #endif /* included_feature_h */ /* * fd.io coding-style-patch-verification: ON * * Local Variables: * eval: (c-set-style "gnu") * End: */