aboutsummaryrefslogtreecommitdiffstats
path: root/test/test/test_sched.c
blob: 32e500ba92b7bb2a77a2ed3e6e60f427f644fa38 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
/* SPDX-License-Identifier: BSD-3-Clause
 * Copyright(c) 2010-2014 Intel Corporation
 */

#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <unistd.h>

#include "test.h"

#include <rte_cycles.h>
#include <rte_ether.h>
#include <rte_ip.h>
#include <rte_byteorder.h>
#include <rte_sched.h>


#define SUBPORT         0
#define PIPE            1
#define TC              2
#define QUEUE           3

static struct rte_sched_subport_params subport_param[] = {
	{
		.tb_rate = 1250000000,
		.tb_size = 1000000,

		.tc_rate = {1250000000, 1250000000, 1250000000, 1250000000},
		.tc_period = 10,
	},
};

static struct rte_sched_pipe_params pipe_profile[] = {
	{ /* Profile #0 */
		.tb_rate = 305175,
		.tb_size = 1000000,

		.tc_rate = {305175, 305175, 305175, 305175},
		.tc_period = 40,

		.wrr_weights = {1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1,  1, 1, 1, 1},
	},
};

static struct rte_sched_port_params port_param = {
	.socket = 0, /* computed */
	.rate = 0, /* computed */
	.mtu = 1522,
	.frame_overhead = RTE_SCHED_FRAME_OVERHEAD_DEFAULT,
	.n_subports_per_port = 1,
	.n_pipes_per_subport = 1024,
	.qsize = {32, 32, 32, 32},
	.pipe_profiles = pipe_profile,
	.n_pipe_profiles = 1,
};

#define NB_MBUF          32
#define MBUF_DATA_SZ     (2048 + RTE_PKTMBUF_HEADROOM)
#define MEMPOOL_CACHE_SZ 0
#define SOCKET           0


static struct rte_mempool *
create_mempool(void)
{
	struct rte_mempool * mp;

	mp = rte_mempool_lookup("test_sched");
	if (!mp)
		mp = rte_pktmbuf_pool_create("test_sched", NB_MBUF,
			MEMPOOL_CACHE_SZ, 0, MBUF_DATA_SZ, SOCKET);

	return mp;
}

static void
prepare_pkt(struct rte_mbuf *mbuf)
{
	struct ether_hdr *eth_hdr;
	struct vlan_hdr *vlan1, *vlan2;
	struct ipv4_hdr *ip_hdr;

	/* Simulate a classifier */
	eth_hdr = rte_pktmbuf_mtod(mbuf, struct ether_hdr *);
	vlan1 = (struct vlan_hdr *)(&eth_hdr->ether_type );
	vlan2 = (struct vlan_hdr *)((uintptr_t)&eth_hdr->ether_type + sizeof(struct vlan_hdr));
	eth_hdr = (struct ether_hdr *)((uintptr_t)&eth_hdr->ether_type + 2 *sizeof(struct vlan_hdr));
	ip_hdr = (struct ipv4_hdr *)((uintptr_t)eth_hdr +  sizeof(eth_hdr->ether_type));

	vlan1->vlan_tci = rte_cpu_to_be_16(SUBPORT);
	vlan2->vlan_tci = rte_cpu_to_be_16(PIPE);
	eth_hdr->ether_type =  rte_cpu_to_be_16(ETHER_TYPE_IPv4);
	ip_hdr->dst_addr = IPv4(0,0,TC,QUEUE);


	rte_sched_port_pkt_write(mbuf, SUBPORT, PIPE, TC, QUEUE, e_RTE_METER_YELLOW);

	/* 64 byte packet */
	mbuf->pkt_len  = 60;
	mbuf->data_len = 60;
}


/**
 * test main entrance for library sched
 */
static int
test_sched(void)
{
	struct rte_mempool *mp = NULL;
	struct rte_sched_port *port = NULL;
	uint32_t pipe;
	struct rte_mbuf *in_mbufs[10];
	struct rte_mbuf *out_mbufs[10];
	int i;

	int err;

	mp = create_mempool();
	TEST_ASSERT_NOT_NULL(mp, "Error creating mempool\n");

	port_param.socket = 0;
	port_param.rate = (uint64_t) 10000 * 1000 * 1000 / 8;

	port = rte_sched_port_config(&port_param);
	TEST_ASSERT_NOT_NULL(port, "Error config sched port\n");

	err = rte_sched_subport_config(port, SUBPORT, subport_param);
	TEST_ASSERT_SUCCESS(err, "Error config sched, err=%d\n", err);

	for (pipe = 0; pipe < port_param.n_pipes_per_subport; pipe ++) {
		err = rte_sched_pipe_config(port, SUBPORT, pipe, 0);
		TEST_ASSERT_SUCCESS(err, "Error config sched pipe %u, err=%d\n", pipe, err);
	}

	for (i = 0; i < 10; i++) {
		in_mbufs[i] = rte_pktmbuf_alloc(mp);
		TEST_ASSERT_NOT_NULL(in_mbufs[i], "Packet allocation failed\n");
		prepare_pkt(in_mbufs[i]);
	}


	err = rte_sched_port_enqueue(port, in_mbufs, 10);
	TEST_ASSERT_EQUAL(err, 10, "Wrong enqueue, err=%d\n", err);

	err = rte_sched_port_dequeue(port, out_mbufs, 10);
	TEST_ASSERT_EQUAL(err, 10, "Wrong dequeue, err=%d\n", err);

	for (i = 0; i < 10; i++) {
		enum rte_meter_color color;
		uint32_t subport, traffic_class, queue;

		color = rte_sched_port_pkt_read_color(out_mbufs[i]);
		TEST_ASSERT_EQUAL(color, e_RTE_METER_YELLOW, "Wrong color\n");

		rte_sched_port_pkt_read_tree_path(out_mbufs[i],
				&subport, &pipe, &traffic_class, &queue);

		TEST_ASSERT_EQUAL(subport, SUBPORT, "Wrong subport\n");
		TEST_ASSERT_EQUAL(pipe, PIPE, "Wrong pipe\n");
		TEST_ASSERT_EQUAL(traffic_class, TC, "Wrong traffic_class\n");
		TEST_ASSERT_EQUAL(queue, QUEUE, "Wrong queue\n");

	}


	struct rte_sched_subport_stats subport_stats;
	uint32_t tc_ov;
	rte_sched_subport_read_stats(port, SUBPORT, &subport_stats, &tc_ov);
#if 0
	TEST_ASSERT_EQUAL(subport_stats.n_pkts_tc[TC-1], 10, "Wrong subport stats\n");
#endif
	struct rte_sched_queue_stats queue_stats;
	uint16_t qlen;
	rte_sched_queue_read_stats(port, QUEUE, &queue_stats, &qlen);
#if 0
	TEST_ASSERT_EQUAL(queue_stats.n_pkts, 10, "Wrong queue stats\n");
#endif

	rte_sched_port_free(port);

	return 0;
}

REGISTER_TEST_COMMAND(sched_autotest, test_sched);