summaryrefslogtreecommitdiffstats
path: root/app/test/test_mempool.c
diff options
context:
space:
mode:
authorChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 09:22:35 +0200
committerChristian Ehrhardt <christian.ehrhardt@canonical.com>2016-07-06 16:09:40 +0200
commit8b25d1ad5d2264bdfc2818c7bda74ee2697df6db (patch)
tree8c3c769777f7e66a2d1ba7dd7651b563cfde370b /app/test/test_mempool.c
parent97f17497d162afdb82c8704bf097f0fee3724b2e (diff)
Imported Upstream version 16.07-rc1
Change-Id: I40a523e52f12e8496fdd69e902824b0226c303de Signed-off-by: Christian Ehrhardt <christian.ehrhardt@canonical.com>
Diffstat (limited to 'app/test/test_mempool.c')
-rw-r--r--app/test/test_mempool.c346
1 files changed, 217 insertions, 129 deletions
diff --git a/app/test/test_mempool.c b/app/test/test_mempool.c
index f0f823b9..9ea98314 100644
--- a/app/test/test_mempool.c
+++ b/app/test/test_mempool.c
@@ -71,19 +71,23 @@
* put them back in the pool.
*/
-#define N 65536
-#define TIME_S 5
#define MEMPOOL_ELT_SIZE 2048
-#define MAX_KEEP 128
+#define MAX_KEEP 16
#define MEMPOOL_SIZE ((rte_lcore_count()*(MAX_KEEP+RTE_MEMPOOL_CACHE_MAX_SIZE))-1)
-static struct rte_mempool *mp;
-static struct rte_mempool *mp_cache, *mp_nocache;
+#define LOG_ERR() printf("test failed at %s():%d\n", __func__, __LINE__)
+#define RET_ERR() do { \
+ LOG_ERR(); \
+ return -1; \
+ } while (0)
+#define GOTO_ERR(var, label) do { \
+ LOG_ERR(); \
+ var = -1; \
+ goto label; \
+ } while (0)
static rte_atomic32_t synchro;
-
-
/*
* save the object number in the first 4 bytes of object data. All
* other bytes are set to 0.
@@ -93,13 +97,14 @@ my_obj_init(struct rte_mempool *mp, __attribute__((unused)) void *arg,
void *obj, unsigned i)
{
uint32_t *objnum = obj;
+
memset(obj, 0, mp->elt_size);
*objnum = i;
}
/* basic tests (done on one core) */
static int
-test_mempool_basic(void)
+test_mempool_basic(struct rte_mempool *mp, int use_external_cache)
{
uint32_t *objnum;
void **objtable;
@@ -107,48 +112,62 @@ test_mempool_basic(void)
char *obj_data;
int ret = 0;
unsigned i, j;
+ int offset;
+ struct rte_mempool_cache *cache;
+
+ if (use_external_cache) {
+ /* Create a user-owned mempool cache. */
+ cache = rte_mempool_cache_create(RTE_MEMPOOL_CACHE_MAX_SIZE,
+ SOCKET_ID_ANY);
+ if (cache == NULL)
+ RET_ERR();
+ } else {
+ /* May be NULL if cache is disabled. */
+ cache = rte_mempool_default_cache(mp, rte_lcore_id());
+ }
/* dump the mempool status */
rte_mempool_dump(stdout, mp);
printf("get an object\n");
- if (rte_mempool_get(mp, &obj) < 0)
- return -1;
+ if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0)
+ GOTO_ERR(ret, out);
rte_mempool_dump(stdout, mp);
/* tests that improve coverage */
printf("get object count\n");
- if (rte_mempool_count(mp) != MEMPOOL_SIZE - 1)
- return -1;
+ /* We have to count the extra caches, one in this case. */
+ offset = use_external_cache ? 1 * cache->len : 0;
+ if (rte_mempool_avail_count(mp) + offset != MEMPOOL_SIZE - 1)
+ GOTO_ERR(ret, out);
printf("get private data\n");
- if (rte_mempool_get_priv(mp) !=
- (char*) mp + MEMPOOL_HEADER_SIZE(mp, mp->pg_num))
- return -1;
+ if (rte_mempool_get_priv(mp) != (char *)mp +
+ MEMPOOL_HEADER_SIZE(mp, mp->cache_size))
+ GOTO_ERR(ret, out);
+#ifndef RTE_EXEC_ENV_BSDAPP /* rte_mem_virt2phy() not supported on bsd */
printf("get physical address of an object\n");
- if (MEMPOOL_IS_CONTIG(mp) &&
- rte_mempool_virt2phy(mp, obj) !=
- (phys_addr_t) (mp->phys_addr +
- (phys_addr_t) ((char*) obj - (char*) mp)))
- return -1;
+ if (rte_mempool_virt2phy(mp, obj) != rte_mem_virt2phy(obj))
+ GOTO_ERR(ret, out);
+#endif
printf("put the object back\n");
- rte_mempool_put(mp, obj);
+ rte_mempool_generic_put(mp, &obj, 1, cache, 0);
rte_mempool_dump(stdout, mp);
printf("get 2 objects\n");
- if (rte_mempool_get(mp, &obj) < 0)
- return -1;
- if (rte_mempool_get(mp, &obj2) < 0) {
- rte_mempool_put(mp, obj);
- return -1;
+ if (rte_mempool_generic_get(mp, &obj, 1, cache, 0) < 0)
+ GOTO_ERR(ret, out);
+ if (rte_mempool_generic_get(mp, &obj2, 1, cache, 0) < 0) {
+ rte_mempool_generic_put(mp, &obj, 1, cache, 0);
+ GOTO_ERR(ret, out);
}
rte_mempool_dump(stdout, mp);
printf("put the objects back\n");
- rte_mempool_put(mp, obj);
- rte_mempool_put(mp, obj2);
+ rte_mempool_generic_put(mp, &obj, 1, cache, 0);
+ rte_mempool_generic_put(mp, &obj2, 1, cache, 0);
rte_mempool_dump(stdout, mp);
/*
@@ -156,12 +175,11 @@ test_mempool_basic(void)
* on other cores may not be empty.
*/
objtable = malloc(MEMPOOL_SIZE * sizeof(void *));
- if (objtable == NULL) {
- return -1;
- }
+ if (objtable == NULL)
+ GOTO_ERR(ret, out);
- for (i=0; i<MEMPOOL_SIZE; i++) {
- if (rte_mempool_get(mp, &objtable[i]) < 0)
+ for (i = 0; i < MEMPOOL_SIZE; i++) {
+ if (rte_mempool_generic_get(mp, &objtable[i], 1, cache, 0) < 0)
break;
}
@@ -174,22 +192,28 @@ test_mempool_basic(void)
obj_data = obj;
objnum = obj;
if (*objnum > MEMPOOL_SIZE) {
- printf("bad object number\n");
+ printf("bad object number(%d)\n", *objnum);
ret = -1;
break;
}
- for (j=sizeof(*objnum); j<mp->elt_size; j++) {
+ for (j = sizeof(*objnum); j < mp->elt_size; j++) {
if (obj_data[j] != 0)
ret = -1;
}
- rte_mempool_put(mp, objtable[i]);
+ rte_mempool_generic_put(mp, &objtable[i], 1, cache, 0);
}
free(objtable);
if (ret == -1)
printf("objects were modified!\n");
+out:
+ if (use_external_cache) {
+ rte_mempool_cache_flush(cache, mp);
+ rte_mempool_cache_free(cache);
+ }
+
return ret;
}
@@ -197,14 +221,17 @@ static int test_mempool_creation_with_exceeded_cache_size(void)
{
struct rte_mempool *mp_cov;
- mp_cov = rte_mempool_create("test_mempool_creation_with_exceeded_cache_size", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE,
- RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
- if(NULL != mp_cov) {
- return -1;
+ mp_cov = rte_mempool_create("test_mempool_cache_too_big",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE + 32, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_cov != NULL) {
+ rte_mempool_free(mp_cov);
+ RET_ERR();
}
return 0;
@@ -222,7 +249,7 @@ static int test_mempool_single_producer(void)
unsigned int i;
void *obj = NULL;
uint64_t start_cycles, end_cycles;
- uint64_t duration = rte_get_timer_hz() * 8;
+ uint64_t duration = rte_get_timer_hz() / 4;
start_cycles = rte_get_timer_cycles();
while (1) {
@@ -242,10 +269,10 @@ static int test_mempool_single_producer(void)
continue;
}
if (rte_mempool_from_obj(obj) != mp_spsc) {
- printf("test_mempool_single_producer there is an obj not owned by this mempool\n");
- return -1;
+ printf("obj not owned by this mempool\n");
+ RET_ERR();
}
- rte_mempool_sp_put(mp_spsc, obj);
+ rte_mempool_put(mp_spsc, obj);
rte_spinlock_lock(&scsp_spinlock);
scsp_obj_table[i] = NULL;
rte_spinlock_unlock(&scsp_spinlock);
@@ -262,7 +289,7 @@ static int test_mempool_single_consumer(void)
unsigned int i;
void * obj;
uint64_t start_cycles, end_cycles;
- uint64_t duration = rte_get_timer_hz() * 5;
+ uint64_t duration = rte_get_timer_hz() / 8;
start_cycles = rte_get_timer_cycles();
while (1) {
@@ -278,7 +305,7 @@ static int test_mempool_single_consumer(void)
rte_spinlock_unlock(&scsp_spinlock);
if (i >= MAX_KEEP)
continue;
- if (rte_mempool_sc_get(mp_spsc, &obj) < 0)
+ if (rte_mempool_get(mp_spsc, &obj) < 0)
break;
rte_spinlock_lock(&scsp_spinlock);
scsp_obj_table[i] = obj;
@@ -289,14 +316,17 @@ static int test_mempool_single_consumer(void)
}
/*
- * test function for mempool test based on singple consumer and single producer, can run on one lcore only
+ * test function for mempool test based on singple consumer and single producer,
+ * can run on one lcore only
*/
-static int test_mempool_launch_single_consumer(__attribute__((unused)) void *arg)
+static int
+test_mempool_launch_single_consumer(__attribute__((unused)) void *arg)
{
return test_mempool_single_consumer();
}
-static void my_mp_init(struct rte_mempool * mp, __attribute__((unused)) void * arg)
+static void
+my_mp_init(struct rte_mempool *mp, __attribute__((unused)) void *arg)
{
printf("mempool name is %s\n", mp->name);
/* nothing to be implemented here*/
@@ -314,33 +344,41 @@ test_mempool_sp_sc(void)
unsigned lcore_next;
/* create a mempool with single producer/consumer ring */
- if (NULL == mp_spsc) {
+ if (mp_spsc == NULL) {
mp_spsc = rte_mempool_create("test_mempool_sp_sc", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- my_mp_init, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT | MEMPOOL_F_SC_GET);
- if (NULL == mp_spsc) {
- return -1;
- }
+ MEMPOOL_ELT_SIZE, 0, 0,
+ my_mp_init, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY,
+ MEMPOOL_F_NO_CACHE_ALIGN | MEMPOOL_F_SP_PUT |
+ MEMPOOL_F_SC_GET);
+ if (mp_spsc == NULL)
+ RET_ERR();
}
if (rte_mempool_lookup("test_mempool_sp_sc") != mp_spsc) {
printf("Cannot lookup mempool from its name\n");
- return -1;
+ rte_mempool_free(mp_spsc);
+ RET_ERR();
}
lcore_next = rte_get_next_lcore(lcore_id, 0, 1);
- if (RTE_MAX_LCORE <= lcore_next)
- return -1;
- if (rte_eal_lcore_role(lcore_next) != ROLE_RTE)
- return -1;
+ if (lcore_next >= RTE_MAX_LCORE) {
+ rte_mempool_free(mp_spsc);
+ RET_ERR();
+ }
+ if (rte_eal_lcore_role(lcore_next) != ROLE_RTE) {
+ rte_mempool_free(mp_spsc);
+ RET_ERR();
+ }
rte_spinlock_init(&scsp_spinlock);
memset(scsp_obj_table, 0, sizeof(scsp_obj_table));
- rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL, lcore_next);
- if(test_mempool_single_producer() < 0)
+ rte_eal_remote_launch(test_mempool_launch_single_consumer, NULL,
+ lcore_next);
+ if (test_mempool_single_producer() < 0)
ret = -1;
- if(rte_eal_wait_lcore(lcore_next) < 0)
+ if (rte_eal_wait_lcore(lcore_next) < 0)
ret = -1;
+ rte_mempool_free(mp_spsc);
return ret;
}
@@ -349,7 +387,7 @@ test_mempool_sp_sc(void)
* it tests some more basic of mempool
*/
static int
-test_mempool_basic_ex(struct rte_mempool * mp)
+test_mempool_basic_ex(struct rte_mempool *mp)
{
unsigned i;
void **obj;
@@ -359,38 +397,41 @@ test_mempool_basic_ex(struct rte_mempool * mp)
if (mp == NULL)
return ret;
- obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE , sizeof(void *), 0);
+ obj = rte_calloc("test_mempool_basic_ex", MEMPOOL_SIZE,
+ sizeof(void *), 0);
if (obj == NULL) {
printf("test_mempool_basic_ex fail to rte_malloc\n");
return ret;
}
- printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n", mp->name, rte_mempool_free_count(mp));
+ printf("test_mempool_basic_ex now mempool (%s) has %u free entries\n",
+ mp->name, rte_mempool_in_use_count(mp));
if (rte_mempool_full(mp) != 1) {
- printf("test_mempool_basic_ex the mempool is not full but it should be\n");
+ printf("test_mempool_basic_ex the mempool should be full\n");
goto fail_mp_basic_ex;
}
for (i = 0; i < MEMPOOL_SIZE; i ++) {
- if (rte_mempool_mc_get(mp, &obj[i]) < 0) {
- printf("fail_mp_basic_ex fail to get mempool object for [%u]\n", i);
+ if (rte_mempool_get(mp, &obj[i]) < 0) {
+ printf("test_mp_basic_ex fail to get object for [%u]\n",
+ i);
goto fail_mp_basic_ex;
}
}
- if (rte_mempool_mc_get(mp, &err_obj) == 0) {
- printf("test_mempool_basic_ex get an impossible obj from mempool\n");
+ if (rte_mempool_get(mp, &err_obj) == 0) {
+ printf("test_mempool_basic_ex get an impossible obj\n");
goto fail_mp_basic_ex;
}
printf("number: %u\n", i);
if (rte_mempool_empty(mp) != 1) {
- printf("test_mempool_basic_ex the mempool is not empty but it should be\n");
+ printf("test_mempool_basic_ex the mempool should be empty\n");
goto fail_mp_basic_ex;
}
- for (i = 0; i < MEMPOOL_SIZE; i ++) {
- rte_mempool_mp_put(mp, obj[i]);
- }
+ for (i = 0; i < MEMPOOL_SIZE; i++)
+ rte_mempool_put(mp, obj[i]);
+
if (rte_mempool_full(mp) != 1) {
- printf("test_mempool_basic_ex the mempool is not full but it should be\n");
+ printf("test_mempool_basic_ex the mempool should be full\n");
goto fail_mp_basic_ex;
}
@@ -406,24 +447,30 @@ fail_mp_basic_ex:
static int
test_mempool_same_name_twice_creation(void)
{
- struct rte_mempool *mp_tc;
-
- mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- NULL, NULL,
- NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (NULL == mp_tc)
- return -1;
-
- mp_tc = rte_mempool_create("test_mempool_same_name_twice_creation", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- NULL, NULL,
- NULL, NULL,
- SOCKET_ID_ANY, 0);
- if (NULL != mp_tc)
- return -1;
+ struct rte_mempool *mp_tc, *mp_tc2;
+
+ mp_tc = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ NULL, NULL,
+ NULL, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_tc == NULL)
+ RET_ERR();
+
+ mp_tc2 = rte_mempool_create("test_mempool_same_name", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ NULL, NULL,
+ NULL, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_tc2 != NULL) {
+ rte_mempool_free(mp_tc);
+ rte_mempool_free(mp_tc2);
+ RET_ERR();
+ }
+ rte_mempool_free(mp_tc);
return 0;
}
@@ -444,7 +491,7 @@ test_mempool_xmem_misc(void)
usz = rte_mempool_xmem_usage(NULL, elt_num, total_size, 0, 1,
MEMPOOL_PG_SHIFT_MAX);
- if(sz != (size_t)usz) {
+ if (sz != (size_t)usz) {
printf("failure @ %s: rte_mempool_xmem_usage(%u, %u) "
"returns: %#zx, while expected: %#zx;\n",
__func__, elt_num, total_size, sz, (size_t)usz);
@@ -457,68 +504,109 @@ test_mempool_xmem_misc(void)
static int
test_mempool(void)
{
+ struct rte_mempool *mp_cache = NULL;
+ struct rte_mempool *mp_nocache = NULL;
+ struct rte_mempool *mp_ext = NULL;
+ struct rte_mempool *mp_stack = NULL;
+
rte_atomic32_init(&synchro);
/* create a mempool (without cache) */
- if (mp_nocache == NULL)
- mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE, 0, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
- if (mp_nocache == NULL)
- return -1;
+ mp_nocache = rte_mempool_create("test_nocache", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE, 0, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_nocache == NULL) {
+ printf("cannot allocate mp_nocache mempool\n");
+ goto err;
+ }
/* create a mempool (with cache) */
- if (mp_cache == NULL)
- mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE,
- MEMPOOL_ELT_SIZE,
- RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
- NULL, NULL,
- my_obj_init, NULL,
- SOCKET_ID_ANY, 0);
- if (mp_cache == NULL)
- return -1;
+ mp_cache = rte_mempool_create("test_cache", MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+ NULL, NULL,
+ my_obj_init, NULL,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_cache == NULL) {
+ printf("cannot allocate mp_cache mempool\n");
+ goto err;
+ }
+ /* create a mempool with an external handler */
+ mp_stack = rte_mempool_create_empty("test_stack",
+ MEMPOOL_SIZE,
+ MEMPOOL_ELT_SIZE,
+ RTE_MEMPOOL_CACHE_MAX_SIZE, 0,
+ SOCKET_ID_ANY, 0);
+
+ if (mp_stack == NULL) {
+ printf("cannot allocate mp_stack mempool\n");
+ goto err;
+ }
+ if (rte_mempool_set_ops_byname(mp_stack, "stack", NULL) < 0) {
+ printf("cannot set stack handler\n");
+ goto err;
+ }
+ if (rte_mempool_populate_default(mp_stack) < 0) {
+ printf("cannot populate mp_stack mempool\n");
+ goto err;
+ }
+ rte_mempool_obj_iter(mp_stack, my_obj_init, NULL);
/* retrieve the mempool from its name */
if (rte_mempool_lookup("test_nocache") != mp_nocache) {
printf("Cannot lookup mempool from its name\n");
- return -1;
+ goto err;
}
rte_mempool_list_dump(stdout);
/* basic tests without cache */
- mp = mp_nocache;
- if (test_mempool_basic() < 0)
- return -1;
+ if (test_mempool_basic(mp_nocache, 0) < 0)
+ goto err;
/* basic tests with cache */
- mp = mp_cache;
- if (test_mempool_basic() < 0)
- return -1;
+ if (test_mempool_basic(mp_cache, 0) < 0)
+ goto err;
+
+ /* basic tests with user-owned cache */
+ if (test_mempool_basic(mp_nocache, 1) < 0)
+ goto err;
/* more basic tests without cache */
if (test_mempool_basic_ex(mp_nocache) < 0)
- return -1;
+ goto err;
/* mempool operation test based on single producer and single comsumer */
if (test_mempool_sp_sc() < 0)
- return -1;
+ goto err;
if (test_mempool_creation_with_exceeded_cache_size() < 0)
- return -1;
+ goto err;
if (test_mempool_same_name_twice_creation() < 0)
- return -1;
+ goto err;
if (test_mempool_xmem_misc() < 0)
- return -1;
+ goto err;
+
+ /* test the stack handler */
+ if (test_mempool_basic(mp_stack, 1) < 0)
+ goto err;
rte_mempool_list_dump(stdout);
return 0;
+
+err:
+ rte_mempool_free(mp_nocache);
+ rte_mempool_free(mp_cache);
+ rte_mempool_free(mp_ext);
+ return -1;
}
static struct test_command mempool_cmd = {