1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
|
/* SPDX-License-Identifier: BSD-3-Clause
* Copyright(c) 2017 Intel Corporation
*/
#ifndef _SCHEDULER_PMD_PRIVATE_H
#define _SCHEDULER_PMD_PRIVATE_H
#include "rte_cryptodev_scheduler.h"
#define CRYPTODEV_NAME_SCHEDULER_PMD crypto_scheduler
/**< Scheduler Crypto PMD device name */
#define PER_SLAVE_BUFF_SIZE (256)
extern int scheduler_logtype_driver;
#define CR_SCHED_LOG(level, fmt, args...) \
rte_log(RTE_LOG_ ## level, scheduler_logtype_driver, \
"%s() line %u: "fmt "\n", __func__, __LINE__, ##args)
struct scheduler_slave {
uint8_t dev_id;
uint16_t qp_id;
uint32_t nb_inflight_cops;
uint8_t driver_id;
};
struct scheduler_ctx {
void *private_ctx;
/**< private scheduler context pointer */
struct rte_cryptodev_capabilities *capabilities;
uint32_t nb_capabilities;
uint32_t max_nb_queue_pairs;
struct scheduler_slave slaves[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
uint32_t nb_slaves;
enum rte_cryptodev_scheduler_mode mode;
struct rte_cryptodev_scheduler_ops ops;
uint8_t reordering_enabled;
char name[RTE_CRYPTODEV_SCHEDULER_NAME_MAX_LEN];
char description[RTE_CRYPTODEV_SCHEDULER_DESC_MAX_LEN];
uint16_t wc_pool[RTE_MAX_LCORE];
uint16_t nb_wc;
char *init_slave_names[RTE_CRYPTODEV_SCHEDULER_MAX_NB_SLAVES];
int nb_init_slaves;
} __rte_cache_aligned;
struct scheduler_qp_ctx {
void *private_qp_ctx;
uint32_t max_nb_objs;
struct rte_ring *order_ring;
uint32_t seqn;
} __rte_cache_aligned;
extern uint8_t cryptodev_scheduler_driver_id;
static __rte_always_inline uint16_t
get_max_enqueue_order_count(struct rte_ring *order_ring, uint16_t nb_ops)
{
uint32_t count = rte_ring_free_count(order_ring);
return count > nb_ops ? nb_ops : count;
}
static __rte_always_inline void
scheduler_order_insert(struct rte_ring *order_ring,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
rte_ring_sp_enqueue_burst(order_ring, (void **)ops, nb_ops, NULL);
}
#define SCHEDULER_GET_RING_OBJ(order_ring, pos, op) do { \
struct rte_crypto_op **ring = (void *)&order_ring[1]; \
op = ring[(order_ring->cons.head + pos) & order_ring->mask]; \
} while (0)
static __rte_always_inline uint16_t
scheduler_order_drain(struct rte_ring *order_ring,
struct rte_crypto_op **ops, uint16_t nb_ops)
{
struct rte_crypto_op *op;
uint32_t nb_objs = rte_ring_count(order_ring);
uint32_t nb_ops_to_deq = 0;
uint32_t nb_ops_deqd = 0;
if (nb_objs > nb_ops)
nb_objs = nb_ops;
while (nb_ops_to_deq < nb_objs) {
SCHEDULER_GET_RING_OBJ(order_ring, nb_ops_to_deq, op);
if (op->status == RTE_CRYPTO_OP_STATUS_NOT_PROCESSED)
break;
nb_ops_to_deq++;
}
if (nb_ops_to_deq)
nb_ops_deqd = rte_ring_sc_dequeue_bulk(order_ring,
(void **)ops, nb_ops_to_deq, NULL);
return nb_ops_deqd;
}
/** device specific operations function pointer structure */
extern struct rte_cryptodev_ops *rte_crypto_scheduler_pmd_ops;
#endif /* _SCHEDULER_PMD_PRIVATE_H */
|