From 425c6c2d1ab01b8c8de11e2cba021def2c05574f Mon Sep 17 00:00:00 2001 From: B Sharath Kumar Reddy Date: Thu, 10 May 2018 11:35:47 +0530 Subject: Coding style error check and remove some unused macros Change-Id: I93af88ae86debd47b594fcca6792b90024a229c6 Signed-off-by: sharath reddy Signed-off-by: Yalei Wang --- .../common/base/include/common/common_sys_config.h | 20 +-- .../common/base/liblinuxapi/nsfw_getopt.c | 98 +++++++------- src/framework/common/data_struct/list.c | 55 ++++---- src/framework/common/data_struct/sha256.c | 146 ++++++++++----------- .../common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.c | 83 ++++++------ 5 files changed, 195 insertions(+), 207 deletions(-) (limited to 'src/framework/common') diff --git a/src/framework/common/base/include/common/common_sys_config.h b/src/framework/common/base/include/common/common_sys_config.h index 552cae3..736c47b 100644 --- a/src/framework/common/base/include/common/common_sys_config.h +++ b/src/framework/common/base/include/common/common_sys_config.h @@ -21,25 +21,25 @@ #if (HAL_LIB) #else #undef RTE_CACHE_LINE_SIZE -#define RTE_CACHE_LINE_SIZE 64 +#define RTE_CACHE_LINE_SIZE 64 /* RTE_CACHE_LINE_SIZE */ #undef RTE_MAX_LCORE -#define RTE_MAX_LCORE 128 +#define RTE_MAX_LCORE 128 /* RTE_MAX_LCORE */ #undef RTE_MAX_NUMA_NODES -#define RTE_MAX_NUMA_NODES 8 +#define RTE_MAX_NUMA_NODES 8 /* RTE_MAX_NUMA_NODES */ #undef RTE_MAX_MEMSEG -#define RTE_MAX_MEMSEG 256 +#define RTE_MAX_MEMSEG 256 /* RTE_MAX_MEMSEG */ #undef RTE_MAX_MEMZONE -#define RTE_MAX_MEMZONE 2560 +#define RTE_MAX_MEMZONE 2560 /* RTE_MAX_MEMZONE */ #undef RTE_MAX_TAILQ -#define RTE_MAX_TAILQ 32 +#define RTE_MAX_TAILQ 32 /* RTE_MAX_TAILQ */ #undef RTE_ARCH_X86 -#define RTE_ARCH_X86 1 +#define RTE_ARCH_X86 1 /* RTE_ARCH_64 */ #undef RTE_ARCH_64 -#define RTE_ARCH_64 1 +#define RTE_ARCH_64 1 /* RTE_ARCH_64 */ #undef RTE_PKTMBUF_HEADROOM -#define RTE_PKTMBUF_HEADROOM 128 +#define RTE_PKTMBUF_HEADROOM 128 /* RTE_PKTMBUF_HEADROOM */ #undef RTE_MEMPOOL_CACHE_MAX_SIZE -#define RTE_MEMPOOL_CACHE_MAX_SIZE 512 +#define RTE_MEMPOOL_CACHE_MAX_SIZE 512 /* RTE_MEMPOOL_CACHE_MAX_SIZE */ #endif diff --git a/src/framework/common/base/liblinuxapi/nsfw_getopt.c b/src/framework/common/base/liblinuxapi/nsfw_getopt.c index ac7d6bf..4d6227b 100644 --- a/src/framework/common/base/liblinuxapi/nsfw_getopt.c +++ b/src/framework/common/base/liblinuxapi/nsfw_getopt.c @@ -59,19 +59,19 @@ check_gnu_extension (const char *optstring) { if (optstring[0] == '+' || getenv ("POSIXLY_CORRECT") != NULL) { - posixly_correct = 1; + posixly_correct = 1; /* assign 1 to posixly_correct */ } else { - posixly_correct = 0; + posixly_correct = 0; /* assign 0 to posixly_correct */ } if (optstring[0] == '-') { - handle_nonopt_argv = 1; + handle_nonopt_argv = 1; /* assign 1 to handle_nonopt_argv */ } else { - handle_nonopt_argv = 0; + handle_nonopt_argv = 0; /* assign 0 to handle_nonopt_argv */ } } @@ -83,11 +83,11 @@ nsfw_getopt_long (int argc, char *const argv[], const char *optstring, } NSTACK_STATIC inline int -nsfw_getopt_internal_check_opts (const char *optstring) +nsfw_getopt_internal_check_opts (const char *optstr) { - if (NULL == optstring) + if (NULL == optstr) { - return -1; + return -1; /* return -1 */ } if (nsfw_optopt == '?') @@ -97,21 +97,21 @@ nsfw_getopt_internal_check_opts (const char *optstring) if (posixly_correct == -1) { - check_gnu_extension (optstring); + check_gnu_extension (optstr); } if (nsfw_optind == 0) { - check_gnu_extension (optstring); + check_gnu_extension (optstr); nsfw_optind = 1; nsfw_optnext = NULL; } - switch (optstring[0]) + switch (optstr[0]) { - case '+': case '-': - optstring++; + case '+': + optstr++; break; default: break; @@ -132,25 +132,25 @@ nsfw_getopt_internal_init (char *const argv[]) { if (nsfw_optnext == NULL && start != 0) { - int last_pos = nsfw_optind - 1; + int last_loc = nsfw_optind - 1; nsfw_optind -= end - start; (void) nsfw_getopt_check_optind (); while (start < end--) { - int i; + int j; char *arg = argv[end]; - for (i = end; i < last_pos; i++) + for (j = end; j < last_loc; j++) { - int j = i + 1; - ((char **) argv)[i] = argv[j]; + int k = j + 1; + ((char **) argv)[j] = argv[k]; } - ((char const **) argv)[i] = arg; - last_pos--; + ((char const **) argv)[j] = arg; + last_loc--; } - start = 0; + start = 0; /*make start as zero */ } return 0; } @@ -168,7 +168,7 @@ nsfw_getopt_internal (int argc, char *const argv[], const char *optstring, if (nsfw_optind >= argc) { nsfw_optarg = NULL; - return -1; + return -1; /* return -1 */ } if (nsfw_optnext == NULL) { @@ -188,23 +188,23 @@ nsfw_getopt_internal (int argc, char *const argv[], const char *optstring, } else { - int i; + int k; start = nsfw_optind; - for (i = nsfw_optind + 1; i < argc; i++) + for (k = nsfw_optind + 1; k < argc; k++) { - if (argv[i][0] == '-') + if (argv[k][0] == '-') { - end = i; + end = k; break; } } - if (i == argc) + if (k == argc) { nsfw_optarg = NULL; return -1; } - nsfw_optind = i; + nsfw_optind = k; arg = argv[nsfw_optind]; } } @@ -360,27 +360,27 @@ nsfw_getopt_longopts (int argc, char *const argv[], char *arg, const char *optstring, const struct option *longopts, int *longindex, int *long_only_flag) { - char *val = NULL; - const struct option *opt; - size_t namelen; - int idx; + char *value = NULL; + const struct option *option; + size_t namelength; + int index; if ((longopts == NULL) || (arg == NULL)) { return -1; } - for (idx = 0; longopts[idx].name != NULL; idx++) + for (index = 0; longopts[index].name != NULL; index++) { - opt = &longopts[idx]; - namelen = strlen (opt->name); + option = &longopts[index]; + namelength = strlen (option->name); - if (strncmp (arg, opt->name, namelen) == 0) + if (strncmp (arg, option->name, namelength) == 0) { - switch (arg[namelen]) + switch (arg[namelength]) { case '\0': - switch (opt->has_arg) + switch (option->has_arg) { case nsfw_required_argument: nsfw_optind++; @@ -388,17 +388,17 @@ nsfw_getopt_longopts (int argc, char *const argv[], char *arg, if (nsfw_optind == argc) { nsfw_optarg = NULL; - nsfw_optopt = opt->val; + nsfw_optopt = option->val; if (':' != optstring[0]) { NSFW_LOGERR ("requires an argument] argv_0=%s, opt name=%s", - argv[0], opt->name); + argv[0], option->name); } return optstring[0] == ':' ? ':' : '?'; } - val = argv[nsfw_optind]; + value = argv[nsfw_optind]; break; default: @@ -408,23 +408,23 @@ nsfw_getopt_longopts (int argc, char *const argv[], char *arg, goto found; case '=': - if (opt->has_arg == nsfw_no_argument) + if (option->has_arg == nsfw_no_argument) { const char *hyphens = (argv[nsfw_optind][1] == '-') ? "--" : "-"; nsfw_optind++; nsfw_optarg = NULL; - nsfw_optopt = opt->val; + nsfw_optopt = option->val; if (':' != optstring[0]) { NSFW_LOGERR ("doesn't allow an argument] argv_0=%s, hyphens=%s, opt name=%s", - argv[0], hyphens, opt->name); + argv[0], hyphens, option->name); } return '?'; } - val = arg + namelen + 1; + value = arg + namelength + 1; goto found; default: @@ -438,18 +438,18 @@ nsfw_getopt_longopts (int argc, char *const argv[], char *arg, return '?'; found: - nsfw_optarg = val; + nsfw_optarg = value; nsfw_optind++; - if (opt->flag) + if (option->flag) { - *opt->flag = opt->val; + *option->flag = option->val; } if (longindex) { - *longindex = idx; + *longindex = index; } - return opt->flag ? 0 : opt->val; + return option->flag ? 0 : option->val; } diff --git a/src/framework/common/data_struct/list.c b/src/framework/common/data_struct/list.c index 7645640..d9ea5a4 100644 --- a/src/framework/common/data_struct/list.c +++ b/src/framework/common/data_struct/list.c @@ -17,13 +17,14 @@ #include "list.h" /** - * list_empty - tests whether a list is empty - * @head: the list to test. + * function-name : list_empty + * description : tests whether a list is empty + * parameter @head : the list to test. */ inline int -list_empty (const struct list_head *head) +list_empty (const struct list_head *head_of_list) { - return head->next == head; + return head_of_list->next == head_of_list; } inline void @@ -77,22 +78,22 @@ list_add_tail (struct list_head *newp, struct list_head *head) inline void hlist_del_init (struct hlist_node *n) { - struct hlist_node *next = n->next; + struct hlist_node *next_node = n->next; struct hlist_node **pprev = n->pprev; - if (pprev == NULL && next == NULL) + if (pprev == NULL && next_node == NULL) { return; } if (pprev) { - *pprev = next; + *pprev = next_node; } - if (next) + if (next_node) { - next->pprev = pprev; + next_node->pprev = pprev; } n->next = NULL; @@ -107,12 +108,12 @@ hlist_del_init (struct hlist_node *n) * @next: node in the hlist */ inline void -hlist_add_before (struct hlist_node *n, struct hlist_node *next) +hlist_add_before (struct hlist_node *node, struct hlist_node *next) { - n->pprev = next->pprev; - n->next = next; - next->pprev = &n->next; - *(n->pprev) = n; + node->pprev = next->pprev; + node->next = next; + next->pprev = &node->next; + *(node->pprev) = node; } /** @@ -123,11 +124,11 @@ hlist_add_before (struct hlist_node *n, struct hlist_node *next) * @next: new node */ inline void -hlist_add_after (struct hlist_node *n, struct hlist_node *next) +hlist_add_after (struct hlist_node *node, struct hlist_node *next) { - next->next = n->next; - n->next = next; - next->pprev = &n->next; + next->next = node->next; + node->next = next; + next->pprev = &node->next; if (next->next) { next->next->pprev = &next->next; @@ -136,28 +137,28 @@ hlist_add_after (struct hlist_node *n, struct hlist_node *next) /* add after the head */ inline void -hlist_add_head (struct hlist_node *n, struct hlist_head *h) +hlist_add_head (struct hlist_node *node, struct hlist_head *h) { struct hlist_node *first = h->first; - n->next = first; + node->next = first; if (first) { - first->pprev = &n->next; + first->pprev = &node->next; } - h->first = n; - n->pprev = &h->first; + h->first = node; + node->pprev = &h->first; } inline int -hlist_unhashed (const struct hlist_node *h) +hlist_unhashed (const struct hlist_node *node) { - return !h->pprev; + return !node->pprev; } inline int -hlist_empty (const struct hlist_head *h) +hlist_empty (const struct hlist_head *node) { - return !h->first; + return !node->first; } diff --git a/src/framework/common/data_struct/sha256.c b/src/framework/common/data_struct/sha256.c index 504b365..213eb4e 100644 --- a/src/framework/common/data_struct/sha256.c +++ b/src/framework/common/data_struct/sha256.c @@ -33,40 +33,40 @@ extern "C" { /* *INDENT-ON* */ #endif -#define rotl32(x,n) (((x) << n) | ((x) >> (32 - n))) -#define rotr32(x,n) (((x) >> n) | ((x) << (32 - n))) +#define rotleft32(x,n) (((x) << n) | ((x) >> (32 - n))) +#define rotright32(x,n) (((x) >> n) | ((x) << (32 - n))) #if !defined(bswap_32) -#define bswap_32(x) ((rotr32((x), 24) & 0x00ff00ff) | (rotr32((x), 8) & 0xff00ff00)) +#define bswap_32(x) ((rotright32((x), 24) & 0x00ff00ff) | (rotright32((x), 8) & 0xff00ff00)) #endif #ifdef LITTLE_ENDIAN -#define SWAP_BYTES +#define SWAP_THE_BYTES #else -#undef SWAP_BYTES +#undef SWAP_THE_BYTES #endif -#define ch(x,y,z) ((z) ^ ((x) & ((y) ^ (z)))) -#define maj(x,y,z) (((x) & (y)) | ((z) & ((x) ^ (y)))) +#define ch(a,b,c) ((c) ^ ((a) & ((b) ^ (c)))) +#define maj(a,b,c) (((a) & (b)) | ((c) & ((a) ^ (b)))) - /* round transforms for SHA256 and SHA512 compression functions */ + /* round transforms for the SHA256 & SHA512 compression functions */ -#define vf(n,i) v[(n - i) & 7] +#define vf(m,n) v[(m - n) & 7] -#define hf(i) (p[i & 15] += \ - g_1(p[(i + 14) & 15]) + p[(i + 9) & 15] + g_0(p[(i + 1) & 15])) +#define hf(n) (p[n & 15] += \ + g_1(p[(n + 14) & 15]) + p[(n + 9) & 15] + g_0(p[(n + 1) & 15])) -#define v_cycle(i,j) \ +#define v_cycle(m,n) \ { \ - vf(7,i) += (j ? hf(i) : p[i]) + k_0[i+j] \ - + s_1(vf(4,i)) + ch(vf(4,i),vf(5,i),vf(6,i)); \ - vf(3,i) += vf(7,i); \ - vf(7,i) += s_0(vf(0,i))+ maj(vf(0,i),vf(1,i),vf(2,i)); \ + vf(7,m) += (n ? hf(m) : p[m]) + k_0[m+n] \ + + s_1(vf(4,m)) + ch(vf(4,m),vf(5,m),vf(6,m)); \ + vf(3,m) += vf(7,m); \ + vf(7,m) += s_0(vf(0,m))+ maj(vf(0,m),vf(1,m),vf(2,m)); \ } -#define SHA256_MASK (SHA256_BLOCK_SIZE - 1) +#define SHA256_MASK (SHA256_BLOCK_SIZE - 1) /* SHA256_MASK */ -#if defined(SWAP_BYTES) +#if defined(SWAP_THE_BYTES) #define bsw_32(p,n) \ { \ u32 _i = (n); \ @@ -80,17 +80,17 @@ extern "C" { #define bsw_32(p,n) #endif -#define s_0(x) (rotr32((x), 2) ^ rotr32((x), 13) ^ rotr32((x), 22)) -#define s_1(x) (rotr32((x), 6) ^ rotr32((x), 11) ^ rotr32((x), 25)) -#define g_0(x) (rotr32((x), 7) ^ rotr32((x), 18) ^ ((x) >> 3)) -#define g_1(x) (rotr32((x), 17) ^ rotr32((x), 19) ^ ((x) >> 10)) +#define s_0(x) (rotright32((x), 2) ^ rotright32((x), 13) ^ rotright32((x), 22)) +#define s_1(x) (rotright32((x), 6) ^ rotright32((x), 11) ^ rotright32((x), 25)) +#define g_0(x) (rotright32((x), 7) ^ rotright32((x), 18) ^ ((x) >> 3)) +#define g_1(x) (rotright32((x), 17) ^ rotright32((x), 19) ^ ((x) >> 10)) #define k_0 k256 -/* rotated SHA256 round definition. Rather than swapping variables as in */ -/* FIPS-180, different variables are 'rotated' on each round, returning */ -/* to their starting positions every eight rounds */ +/* rotated SHA256 round definition. Unlike swapping the variables as in */ +/* FIPS-180, different variables are being 'rotated' on each round, */ +/* returning to their starting positions every 8 rounds */ -#define q(n) v##n +#define q(i) v##i #define one_cycle(a,b,c,d,e,f,g,h,k,w) \ q(h) += s_1(q(e)) + ch(q(e), q(f), q(g)) + k + w; \ @@ -121,12 +121,6 @@ static const u32 k256[64] = { 022057577772, 024424066353, 027676321767, 030634274362, }; -/* Compile 64 bytes of hash data into SHA256 digest value */ -/* NOTE: this routine assumes that the byte order in the */ -/* ctx->wbuf[] at this point is such that low address bytes */ -/* in the ORIGINAL byte stream will go into the high end of */ -/* words on BOTH big and little endian systems */ - #define v_ v #define ptr p @@ -150,7 +144,7 @@ Sha256_compile__ (SHA256_CTX ctx[1]) /* macros defined above to this function i.e. v_ and ptr should not be removed */ /* v_cycle - for 0 to 15 */ - u32 j; + u32 i; u32 *ptr = ctx->wbuf; u32 v_[8]; @@ -161,25 +155,25 @@ Sha256_compile__ (SHA256_CTX ctx[1]) return; } - for (j = 0; j < 64; j += 16) + for (i = 0; i < 64; i += 16) { /*v_cycle operations from 0 to 15 */ - v_cycle (0, j); - v_cycle (1, j); - v_cycle (2, j); - v_cycle (3, j); - v_cycle (4, j); - v_cycle (5, j); - v_cycle (6, j); - v_cycle (7, j); - v_cycle (8, j); - v_cycle (9, j); - v_cycle (10, j); - v_cycle (11, j); - v_cycle (12, j); - v_cycle (13, j); - v_cycle (14, j); - v_cycle (15, j); + v_cycle (0, i); + v_cycle (1, i); + v_cycle (2, i); + v_cycle (3, i); + v_cycle (4, i); + v_cycle (5, i); + v_cycle (6, i); + v_cycle (7, i); + v_cycle (8, i); + v_cycle (9, i); + v_cycle (10, i); + v_cycle (11, i); + v_cycle (12, i); + v_cycle (13, i); + v_cycle (14, i); + v_cycle (15, i); } /* update the context */ @@ -198,9 +192,6 @@ Sha256_compile__ (SHA256_CTX ctx[1]) #undef v_ #undef ptr -/* SHA256 hash data in an array of bytes into hash buffer */ -/* and call the hash_compile function as required. */ - /*===========================================================================*\ Function :Sha256_upd Description : @@ -214,40 +205,42 @@ Sha256_compile__ (SHA256_CTX ctx[1]) Note : \*===========================================================================*/ void -Sha256_upd (SHA256_CTX ctx[1], const u8 data[], size_t len) +Sha256_upd (SHA256_CTX sha256_ctx[1], const u8 data[], size_t len) { - u32 pos = (u32) (ctx->count[0] & SHA256_MASK); - u32 space = SHA256_BLOCK_SIZE - pos; + u32 pos = (u32) (sha256_ctx->count[0] & SHA256_MASK); + u32 capacity = SHA256_BLOCK_SIZE - pos; const u8 *sp = data; int ret; - if ((ctx->count[0] += (u32) len) < len) + if ((sha256_ctx->count[0] += (u32) len) < len) { - ++(ctx->count[1]); + ++(sha256_ctx->count[1]); } - while (len >= space) + while (len >= capacity) { /* tranfer whole blocks while possible */ - ret = MEMCPY_S (((u8 *) ctx->wbuf) + pos, space, sp, space); + ret = + MEMCPY_S (((u8 *) sha256_ctx->wbuf) + pos, capacity, sp, capacity); if (EOK != ret) { NSPOL_LOGERR ("MEMCPY_S failed"); return; } - sp += space; - len -= space; - space = SHA256_BLOCK_SIZE; + sp += capacity; + len -= capacity; + capacity = SHA256_BLOCK_SIZE; pos = 0; - bsw_32 (ctx->wbuf, SHA256_BLOCK_SIZE >> 2); - Sha256_compile__ (ctx); + bsw_32 (sha256_ctx->wbuf, SHA256_BLOCK_SIZE >> 2); + Sha256_compile__ (sha256_ctx); } if (len != 0) { - ret = MEMCPY_S (((u8 *) ctx->wbuf) + pos, (u32) len, sp, (u32) len); + ret = + MEMCPY_S (((u8 *) sha256_ctx->wbuf) + pos, (u32) len, sp, (u32) len); if (EOK != ret) { NSPOL_LOGERR ("MEMCPY_S failed"); @@ -258,7 +251,7 @@ Sha256_upd (SHA256_CTX ctx[1], const u8 data[], size_t len) return; } -/* SHA256 Final padding and digest calculation */ +/* the Final padding and digest calculation of SHA256 */ /*===========================================================================*\ Function :SHA_fin1 @@ -284,16 +277,13 @@ SHA_fin1 (u8 hval[], SHA256_CTX ctx[1], const unsigned int hlen) /*top of 32 bit words on BOTH big and little endian machines */ bsw_32 (ctx->wbuf, (i + 3) >> 2); - /*we now need to mask valid bytes and add the padding which is */ + /*now it is needed to mask valid bytes and add padding which is */ /*a single 1 bit and as many zero bits as necessary. Note that */ /*we can always add the first padding byte here because the */ /*buffer always has at least one empty slot */ ctx->wbuf[i >> 2] &= (u32) 0xffffff80 << 8 * (~i & 3); ctx->wbuf[i >> 2] |= (u32) 0x00000080 << 8 * (~i & 3); - /* we need 9 or more empty positions, one for the padding byte */ - /* (above) and eight for the length count. If there is not */ - /* enough space pad and empty the buffer */ if (i > SHA256_BLOCK_SIZE - 9) { if (i < 60) @@ -316,16 +306,10 @@ SHA_fin1 (u8 hval[], SHA256_CTX ctx[1], const unsigned int hlen) ctx->wbuf[i++] = 0; } - /* the following 32-bit length fields are assembled in the */ - /* wrong byte order on little endian machines but this is */ - /* corrected later since they are only ever used as 32-bit */ - /* word values. */ ctx->wbuf[14] = (ctx->count[1] << 3) | (ctx->count[0] >> 29); ctx->wbuf[15] = ctx->count[0] << 3; Sha256_compile__ (ctx); - /* extract the hash value as bytes in case the hash buffer is */ - /* mislaigned for 32-bit words */ for (i = 0; i < hlen; ++i) { hval[i] = (u8) (ctx->hash[i >> 2] >> (8 * (~i & 3))); @@ -356,12 +340,14 @@ static const u32 g_i256[] = { Note : \*===========================================================================*/ void -Sha256_set (SHA256_CTX ctx[1]) +Sha256_set (SHA256_CTX sha256_ctx[1]) { int ret; - ctx->count[0] = ctx->count[1] = 0; + sha256_ctx->count[0] = sha256_ctx->count[1] = 0; - ret = MEMCPY_S (ctx->hash, sizeof (ctx->hash), g_i256, sizeof (g_i256)); + ret = + MEMCPY_S (sha256_ctx->hash, sizeof (sha256_ctx->hash), g_i256, + sizeof (g_i256)); if (EOK != ret) { NSPOL_LOGERR ("MEMCPY_S failed"); diff --git a/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.c b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.c index 64e7d57..780596c 100644 --- a/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.c +++ b/src/framework/common/mem_mgr/nsfw_nshmem/nsfw_nshmem_ring.c @@ -131,8 +131,8 @@ this is a multi thread/process enqueue function, please pay attention to the be int nsfw_nshmem_ring_mp_enqueue (struct nsfw_mem_ring *mem_ring, void *obj_table) { - uint32_t prod_head, prod_next; - uint32_t cons_tail, free_entries; + uint32_t producer_head, producer_next; + uint32_t consumer_tail, free_entries; int success; unsigned rep = 0; uint32_t mask = mem_ring->mask; @@ -143,13 +143,13 @@ nsfw_nshmem_ring_mp_enqueue (struct nsfw_mem_ring *mem_ring, void *obj_table) do { - prod_head = mem_ring->prod.head; - cons_tail = mem_ring->cons.tail; + producer_head = mem_ring->prod.head; + consumer_tail = mem_ring->cons.tail; /* The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have - * prod_head > cons_tail). So 'free_entries' is always between 0 + * producer_head > consumer_tail). So 'free_entries' is always between 0 * and size(ring)-1. */ - free_entries = (size + cons_tail - prod_head); + free_entries = (size + consumer_tail - producer_head); /* check that we have enough room in ring */ if (unlikely (n > free_entries)) @@ -168,20 +168,20 @@ nsfw_nshmem_ring_mp_enqueue (struct nsfw_mem_ring *mem_ring, void *obj_table) common_mem_pause (); } - prod_next = prod_head + n; + producer_next = producer_head + n; success = - common_mem_atomic32_cmpset (&mem_ring->prod.head, prod_head, - prod_next); + common_mem_atomic32_cmpset (&mem_ring->prod.head, producer_head, + producer_next); } while (unlikely (success == 0)); - mem_ring->ring[prod_head & mask].data_l = (u64) obj_table; + mem_ring->ring[producer_head & mask].data_l = (u64) obj_table; /* * If there are other enqueues in progress that preceded us, * we need to wait for them to complete */ - while (unlikely (mem_ring->prod.tail != prod_head)) + while (unlikely (mem_ring->prod.tail != producer_head)) { common_mem_pause (); @@ -196,7 +196,7 @@ nsfw_nshmem_ring_mp_enqueue (struct nsfw_mem_ring *mem_ring, void *obj_table) } } - mem_ring->prod.tail = prod_next; + mem_ring->prod.tail = producer_next; return (int) n; } @@ -206,19 +206,19 @@ nsfw_nshmem_ring_mp_enqueue (struct nsfw_mem_ring *mem_ring, void *obj_table) int nsfw_nshmem_ring_sp_enqueue (struct nsfw_mem_ring *r, void *obj_table) { - uint32_t prod_head, cons_tail; - uint32_t prod_next, free_entries; + uint32_t producer_head, consumer_tail; + uint32_t producer_next, free_entries; uint32_t mask = r->mask; uint32_t n = 1; uint32_t size = r->size; - prod_head = r->prod.head; - cons_tail = r->cons.tail; + producer_head = r->prod.head; + consumer_tail = r->cons.tail; /* The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have - * prod_head > cons_tail). So 'free_entries' is always between 0 + * producer_head > consumer_tail). So 'free_entries' is always between 0 * and size(ring)-1. */ - free_entries = size + cons_tail - prod_head; + free_entries = size + consumer_tail - producer_head; /* check that we have enough room in ring */ if (unlikely (n > free_entries)) @@ -228,12 +228,12 @@ nsfw_nshmem_ring_sp_enqueue (struct nsfw_mem_ring *r, void *obj_table) nsfw_nshmem_enqueue_fork_recov (r); - prod_next = prod_head + n; - r->prod.head = prod_next; + producer_next = producer_head + n; + r->prod.head = producer_next; - r->ring[prod_head & mask].data_l = (u64) obj_table; + r->ring[producer_head & mask].data_l = (u64) obj_table; - r->prod.tail = prod_next; + r->prod.tail = producer_next; return (int) n; } @@ -244,8 +244,8 @@ int nsfw_nshmem_ring_mc_dequeuev (struct nsfw_mem_ring *r, void **obj_table, unsigned int n) { - uint32_t cons_head, prod_tail; - uint32_t cons_next, entries; + uint32_t consumer_head, producer_tail; + uint32_t consumer_next, entries; int success; unsigned rep = 0; uint32_t num = n; @@ -263,13 +263,13 @@ nsfw_nshmem_ring_mc_dequeuev (struct nsfw_mem_ring *r, void **obj_table, do { num = n; - cons_head = r->cons.head; - prod_tail = r->prod.tail; + consumer_head = r->cons.head; + producer_tail = r->prod.tail; /* The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have * cons_head > prod_tail). So 'entries' is always between 0 * and size(ring)-1. */ - entries = (prod_tail - cons_head); + entries = (producer_tail - consumer_head); /* Set the actual entries for dequeue */ if (unlikely (num > entries)) @@ -292,20 +292,21 @@ nsfw_nshmem_ring_mc_dequeuev (struct nsfw_mem_ring *r, void **obj_table, common_mem_pause (); } - cons_next = cons_head + num; + consumer_next = consumer_head + num; success = - common_mem_atomic32_cmpset (&r->cons.head, cons_head, cons_next); + common_mem_atomic32_cmpset (&r->cons.head, consumer_head, + consumer_next); } while (unlikely (success == 0)); - nsfw_nshmem_ring_obj_copy (r, cons_head, obj_table, num); + nsfw_nshmem_ring_obj_copy (r, consumer_head, obj_table, num); /* * If there are other dequeues in progress that preceded us, * we need to wait for them to complete */ - while (unlikely (r->cons.tail != cons_head)) + while (unlikely (r->cons.tail != consumer_head)) { common_mem_pause (); @@ -320,7 +321,7 @@ nsfw_nshmem_ring_mc_dequeuev (struct nsfw_mem_ring *r, void **obj_table, } } - r->cons.tail = cons_next; + r->cons.tail = consumer_next; return (int) num; } @@ -341,16 +342,16 @@ int nsfw_nshmem_ring_sc_dequeuev (struct nsfw_mem_ring *r, void **obj_table, unsigned int n) { - uint32_t cons_head, prod_tail; - uint32_t cons_next, entries; + uint32_t consumer_head, producer_tail; + uint32_t consumer_next, entries; uint32_t inum = n; - cons_head = r->cons.head; - prod_tail = r->prod.tail; + consumer_head = r->cons.head; + producer_tail = r->prod.tail; /* The subtraction is done between two unsigned 32bits value * (the result is always modulo 32 bits even if we have * cons_head > prod_tail). So 'entries' is always between 0 * and size(ring)-1. */ - entries = prod_tail - cons_head; + entries = producer_tail - consumer_head; if (unlikely (inum > entries)) { @@ -366,12 +367,12 @@ nsfw_nshmem_ring_sc_dequeuev (struct nsfw_mem_ring *r, void **obj_table, nsfw_nshmem_dequeue_fork_recov (r); - cons_next = cons_head + inum; - r->cons.head = cons_next; + consumer_next = consumer_head + inum; + r->cons.head = consumer_next; - nsfw_nshmem_ring_obj_copy (r, cons_head, obj_table, inum); + nsfw_nshmem_ring_obj_copy (r, consumer_head, obj_table, inum); - r->cons.tail = cons_next; + r->cons.tail = consumer_next; return (int) inum; } -- cgit 1.2.3-korg