aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mempool/ring/rte_mempool_ring.c
blob: bc123fc5259b49a7c8480e055aa0cc875edd9890 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2010-2016 Intel Corporation
 */

#include <stdio.h>
#include <string.h>

#include <rte_errno.h>
#include <rte_ring.h>
#include <rte_mempool.h>

static int
common_ring_mp_enqueue(struct rte_mempool *mp, void * const *obj_table,
		unsigned n)
{
	return rte_ring_mp_enqueue_bulk(mp->pool_data,
			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sp_enqueue(struct rte_mempool *mp, void * const *obj_table,
		unsigned n)
{
	return rte_ring_sp_enqueue_bulk(mp->pool_data,
			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_mc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
	return rte_ring_mc_dequeue_bulk(mp->pool_data,
			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static int
common_ring_sc_dequeue(struct rte_mempool *mp, void **obj_table, unsigned n)
{
	return rte_ring_sc_dequeue_bulk(mp->pool_data,
			obj_table, n, NULL) == 0 ? -ENOBUFS : 0;
}

static unsigned
common_ring_get_count(const struct rte_mempool *mp)
{
	return rte_ring_count(mp->pool_data);
}


static int
common_ring_alloc(struct rte_mempool *mp)
{
	int rg_flags = 0, ret;
	char rg_name[RTE_RING_NAMESIZE];
	struct rte_ring *r;

	ret = snprintf(rg_name, sizeof(rg_name),
		RTE_MEMPOOL_MZ_FORMAT, mp->name);
	if (ret < 0 || ret >= (int)sizeof(rg_name)) {
		rte_errno = ENAMETOOLONG;
		return -rte_errno;
	}

	/* ring flags */
	if (mp->flags & MEMPOOL_F_SP_PUT)
		rg_flags |= RING_F_SP_ENQ;
	if (mp->flags & MEMPOOL_F_SC_GET)
		rg_flags |= RING_F_SC_DEQ;

	/*
	 * Allocate the ring that will be used to store objects.
	 * Ring functions will return appropriate errors if we are
	 * running as a secondary process etc., so no checks made
	 * in this function for that condition.
	 */
	r = rte_ring_create(rg_name, rte_align32pow2(mp->size + 1),
		mp->socket_id, rg_flags);
	if (r == NULL)
		return -rte_errno;

	mp->pool_data = r;

	return 0;
}

static void
common_ring_free(struct rte_mempool *mp)
{
	rte_ring_free(mp->pool_data);
}

/*
 * The following 4 declarations of mempool ops structs address
 * the need for the backward compatible mempool handlers for
 * single/multi producers and single/multi consumers as dictated by the
 * flags provided to the rte_mempool_create function
 */
static const struct rte_mempool_ops ops_mp_mc = {
	.name = "ring_mp_mc",
	.alloc = common_ring_alloc,
	.free = common_ring_free,
	.enqueue = common_ring_mp_enqueue,
	.dequeue = common_ring_mc_dequeue,
	.get_count = common_ring_get_count,
};

static const struct rte_mempool_ops ops_sp_sc = {
	.name = "ring_sp_sc",
	.alloc = common_ring_alloc,
	.free = common_ring_free,
	.enqueue = common_ring_sp_enqueue,
	.dequeue = common_ring_sc_dequeue,
	.get_count = common_ring_get_count,
};

static const struct rte_mempool_ops ops_mp_sc = {
	.name = "ring_mp_sc",
	.alloc = common_ring_alloc,
	.free = common_ring_free,
	.enqueue = common_ring_mp_enqueue,
	.dequeue = common_ring_sc_dequeue,
	.get_count = common_ring_get_count,
};

static const struct rte_mempool_ops ops_sp_mc = {
	.name = "ring_sp_mc",
	.alloc = common_ring_alloc,
	.free = common_ring_free,
	.enqueue = common_ring_sp_enqueue,
	.dequeue = common_ring_mc_dequeue,
	.get_count = common_ring_get_count,
};

MEMPOOL_REGISTER_OPS(ops_mp_mc);
MEMPOOL_REGISTER_OPS(ops_sp_sc);
MEMPOOL_REGISTER_OPS(ops_mp_sc);
MEMPOOL_REGISTER_OPS(ops_sp_mc);