summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Barach <dave@barachs.net>2016-08-15 11:12:27 -0400
committerDave Barach <dave@barachs.net>2016-08-15 11:12:40 -0400
commitc379999665febd12ec55bfb3a7545224f2b39d3d (patch)
tree8bf0c11e52c2162e1239b6c3f4a5f74b62a07409
parentb3d93dacfde8ab21bbce171fff2971b2ed7bce6a (diff)
VPP-327 Coding standards cleanup for vppinfra
Fix additional a few additional deviations reported elsewhere by checkstyle Change-Id: I026a8ae1c5b1856bbe3c4a555e1b690e7501b045 Signed-off-by: Dave Barach <dave@barachs.net>
-rw-r--r--svm/svm.c34
-rw-r--r--svm/svm.h2
-rw-r--r--svm/svmtool.c2
-rw-r--r--vlib-api/vlibmemory/memory_shared.c2
-rw-r--r--vlib-api/vlibmemory/memory_vlib.c9
-rw-r--r--vpp/vpp-api/api.c34
-rw-r--r--vpp/vpp-api/gmon.c4
-rw-r--r--vppinfra/vppinfra/anneal.c166
-rw-r--r--vppinfra/vppinfra/anneal.h31
-rw-r--r--vppinfra/vppinfra/asm_mips.h75
-rw-r--r--vppinfra/vppinfra/asm_x86.h44
-rw-r--r--vppinfra/vppinfra/backtrace.c263
-rw-r--r--vppinfra/vppinfra/bihash_24_8.h46
-rw-r--r--vppinfra/vppinfra/bihash_8_8.h37
-rw-r--r--vppinfra/vppinfra/bihash_doc.h73
-rw-r--r--vppinfra/vppinfra/bihash_template.c487
-rw-r--r--vppinfra/vppinfra/bihash_template.h155
-rw-r--r--vppinfra/vppinfra/bitmap.h135
-rw-r--r--vppinfra/vppinfra/bitops.h42
-rw-r--r--vppinfra/vppinfra/byte_order.h78
-rw-r--r--vppinfra/vppinfra/cache.h12
-rw-r--r--vppinfra/vppinfra/clib.h117
-rw-r--r--vppinfra/vppinfra/cpu.c25
-rw-r--r--vppinfra/vppinfra/cpu.h19
-rw-r--r--vppinfra/vppinfra/dlist.h65
-rw-r--r--vppinfra/vppinfra/elf.c1308
-rw-r--r--vppinfra/vppinfra/elf.h295
-rw-r--r--vppinfra/vppinfra/elf_clib.c246
-rw-r--r--vppinfra/vppinfra/elf_clib.h60
-rw-r--r--vppinfra/vppinfra/elog.c366
-rw-r--r--vppinfra/vppinfra/elog.h180
-rw-r--r--vppinfra/vppinfra/error.c127
-rw-r--r--vppinfra/vppinfra/error.h40
-rw-r--r--vppinfra/vppinfra/error_bootstrap.h24
-rw-r--r--vppinfra/vppinfra/fheap.c271
-rw-r--r--vppinfra/vppinfra/fheap.h30
-rw-r--r--vppinfra/vppinfra/fifo.c17
-rw-r--r--vppinfra/vppinfra/fifo.h61
-rw-r--r--vppinfra/vppinfra/format.c219
-rw-r--r--vppinfra/vppinfra/format.h145
-rw-r--r--vppinfra/vppinfra/graph.c87
-rw-r--r--vppinfra/vppinfra/graph.h41
-rw-r--r--vppinfra/vppinfra/hash.c506
-rw-r--r--vppinfra/vppinfra/hash.h198
-rw-r--r--vppinfra/vppinfra/heap.c244
-rw-r--r--vppinfra/vppinfra/heap.h156
-rw-r--r--vppinfra/vppinfra/longjmp.h16
-rw-r--r--vppinfra/vppinfra/macros.c266
-rw-r--r--vppinfra/vppinfra/macros.h31
-rw-r--r--vppinfra/vppinfra/math.h14
-rw-r--r--vppinfra/vppinfra/md5.c182
-rw-r--r--vppinfra/vppinfra/md5.h30
-rw-r--r--vppinfra/vppinfra/mem.h92
-rw-r--r--vppinfra/vppinfra/mem_mheap.c71
-rw-r--r--vppinfra/vppinfra/memcheck.h64
-rw-r--r--vppinfra/vppinfra/memcpy_avx.h329
-rw-r--r--vppinfra/vppinfra/memcpy_sse3.h250
-rw-r--r--vppinfra/vppinfra/mhash.c132
-rw-r--r--vppinfra/vppinfra/mhash.h66
-rw-r--r--vppinfra/vppinfra/mheap.c613
-rw-r--r--vppinfra/vppinfra/mheap.h41
-rw-r--r--vppinfra/vppinfra/mheap_bootstrap.h159
-rw-r--r--vppinfra/vppinfra/mod_test_hash.c11
-rw-r--r--vppinfra/vppinfra/os.h8
-rw-r--r--vppinfra/vppinfra/pfhash.c650
-rw-r--r--vppinfra/vppinfra/pfhash.h168
-rw-r--r--vppinfra/vppinfra/phash.c382
-rw-r--r--vppinfra/vppinfra/phash.h49
-rw-r--r--vppinfra/vppinfra/pipeline.h8
-rw-r--r--vppinfra/vppinfra/pool.h71
-rw-r--r--vppinfra/vppinfra/ptclosure.c64
-rw-r--r--vppinfra/vppinfra/ptclosure.h20
-rw-r--r--vppinfra/vppinfra/qhash.c207
-rw-r--r--vppinfra/vppinfra/qhash.h66
-rw-r--r--vppinfra/vppinfra/qsort.c271
-rw-r--r--vppinfra/vppinfra/random.c8
-rw-r--r--vppinfra/vppinfra/random.h61
-rw-r--r--vppinfra/vppinfra/random_buffer.c28
-rw-r--r--vppinfra/vppinfra/random_buffer.h19
-rw-r--r--vppinfra/vppinfra/random_isaac.c282
-rw-r--r--vppinfra/vppinfra/random_isaac.h13
-rw-r--r--vppinfra/vppinfra/serialize.c571
-rw-r--r--vppinfra/vppinfra/serialize.h162
-rw-r--r--vppinfra/vppinfra/slist.c280
-rw-r--r--vppinfra/vppinfra/slist.h64
-rw-r--r--vppinfra/vppinfra/smp.c84
-rw-r--r--vppinfra/vppinfra/smp.h18
-rw-r--r--vppinfra/vppinfra/smp_fifo.c34
-rw-r--r--vppinfra/vppinfra/smp_fifo.h92
-rw-r--r--vppinfra/vppinfra/socket.c113
-rw-r--r--vppinfra/vppinfra/socket.h79
-rw-r--r--vppinfra/vppinfra/sparse_vec.h53
-rw-r--r--vppinfra/vppinfra/std-formats.c111
-rw-r--r--vppinfra/vppinfra/string.c28
-rw-r--r--vppinfra/vppinfra/string.h12
-rw-r--r--vppinfra/vppinfra/test_bihash_template.c246
-rw-r--r--vppinfra/vppinfra/test_dlist.c53
-rw-r--r--vppinfra/vppinfra/test_elf.c106
-rw-r--r--vppinfra/vppinfra/test_elog.c118
-rw-r--r--vppinfra/vppinfra/test_fifo.c34
-rw-r--r--vppinfra/vppinfra/test_format.c61
-rw-r--r--vppinfra/vppinfra/test_hash.c197
-rw-r--r--vppinfra/vppinfra/test_heap.c48
-rw-r--r--vppinfra/vppinfra/test_longjmp.c30
-rw-r--r--vppinfra/vppinfra/test_macros.c24
-rw-r--r--vppinfra/vppinfra/test_md5.c62
-rw-r--r--vppinfra/vppinfra/test_mheap.c54
-rw-r--r--vppinfra/vppinfra/test_pfhash.c509
-rw-r--r--vppinfra/vppinfra/test_phash.c71
-rw-r--r--vppinfra/vppinfra/test_pool.c17
-rw-r--r--vppinfra/vppinfra/test_pool_iterate.c27
-rw-r--r--vppinfra/vppinfra/test_ptclosure.c138
-rw-r--r--vppinfra/vppinfra/test_qhash.c68
-rw-r--r--vppinfra/vppinfra/test_random.c41
-rw-r--r--vppinfra/vppinfra/test_random_isaac.c26
-rw-r--r--vppinfra/vppinfra/test_serialize.c80
-rw-r--r--vppinfra/vppinfra/test_slist.c211
-rw-r--r--vppinfra/vppinfra/test_socket.c29
-rw-r--r--vppinfra/vppinfra/test_time.c43
-rw-r--r--vppinfra/vppinfra/test_timing_wheel.c116
-rw-r--r--vppinfra/vppinfra/test_vec.c347
-rw-r--r--vppinfra/vppinfra/test_vec.h64
-rw-r--r--vppinfra/vppinfra/test_vhash.c231
-rw-r--r--vppinfra/vppinfra/test_zvec.c18
-rw-r--r--vppinfra/vppinfra/time.c53
-rw-r--r--vppinfra/vppinfra/time.h144
-rw-r--r--vppinfra/vppinfra/timer.c97
-rw-r--r--vppinfra/vppinfra/timer.h8
-rw-r--r--vppinfra/vppinfra/timing_wheel.c373
-rw-r--r--vppinfra/vppinfra/timing_wheel.h47
-rw-r--r--vppinfra/vppinfra/types.h16
-rw-r--r--vppinfra/vppinfra/unformat.c274
-rw-r--r--vppinfra/vppinfra/unix-kelog.c189
-rw-r--r--vppinfra/vppinfra/unix-misc.c111
-rw-r--r--vppinfra/vppinfra/unix.h18
-rw-r--r--vppinfra/vppinfra/valgrind.h270
-rw-r--r--vppinfra/vppinfra/vec.c62
-rw-r--r--vppinfra/vppinfra/vec.h178
-rw-r--r--vppinfra/vppinfra/vec_bootstrap.h58
-rw-r--r--vppinfra/vppinfra/vector.c8
-rw-r--r--vppinfra/vppinfra/vector.h90
-rw-r--r--vppinfra/vppinfra/vector_altivec.h96
-rw-r--r--vppinfra/vppinfra/vector_funcs.h10
-rw-r--r--vppinfra/vppinfra/vector_iwmmxt.h106
-rw-r--r--vppinfra/vppinfra/vector_sse2.h527
-rw-r--r--vppinfra/vppinfra/vhash.c327
-rw-r--r--vppinfra/vppinfra/vhash.h283
-rw-r--r--vppinfra/vppinfra/vm_linux_kernel.h38
-rw-r--r--vppinfra/vppinfra/vm_standalone.h35
-rw-r--r--vppinfra/vppinfra/vm_unix.h30
-rw-r--r--vppinfra/vppinfra/xxhash.h19
-rw-r--r--vppinfra/vppinfra/xy.h8
-rw-r--r--vppinfra/vppinfra/zvec.c127
-rw-r--r--vppinfra/vppinfra/zvec.h49
154 files changed, 11293 insertions, 8448 deletions
diff --git a/svm/svm.c b/svm/svm.c
index 851baea55ed..e4ca98e1ed2 100644
--- a/svm/svm.c
+++ b/svm/svm.c
@@ -235,9 +235,9 @@ svm_data_region_create (svm_map_region_args_t * a, svm_region_t * rp)
u8 junk = 0;
uword map_size;
- map_size = rp->virtual_size - (MMAP_PAGESIZE +
- (a->pvt_heap_size ? a->pvt_heap_size :
- SVM_PVT_MHEAP_SIZE));
+ map_size = rp->virtual_size - (MMAP_PAGESIZE +
+ (a->pvt_heap_size ? a->pvt_heap_size :
+ SVM_PVT_MHEAP_SIZE));
if (a->flags & SVM_FLAGS_FILE)
{
@@ -318,8 +318,8 @@ svm_data_region_map (svm_map_region_args_t * a, svm_region_t * rp)
uword map_size;
struct stat statb;
- map_size = rp->virtual_size -
- (MMAP_PAGESIZE
+ map_size = rp->virtual_size -
+ (MMAP_PAGESIZE
+ (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE));
if (a->flags & SVM_FLAGS_FILE)
@@ -368,9 +368,9 @@ svm_data_region_map (svm_map_region_args_t * a, svm_region_t * rp)
}
ASSERT (map_size <= rp->virtual_size
- - (MMAP_PAGESIZE
- +
- (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE)));
+ - (MMAP_PAGESIZE
+ +
+ (a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE)));
if (mmap (rp->data_base, map_size, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_FIXED, fd, 0) == MAP_FAILED)
@@ -534,9 +534,9 @@ svm_map_region (svm_map_region_args_t * a)
rp->region_heap =
mheap_alloc_with_flags ((void *) (a->baseva + MMAP_PAGESIZE),
- (a->pvt_heap_size != 0) ?
- a->pvt_heap_size : SVM_PVT_MHEAP_SIZE,
- MHEAP_FLAG_DISABLE_VM);
+ (a->pvt_heap_size != 0) ?
+ a->pvt_heap_size : SVM_PVT_MHEAP_SIZE,
+ MHEAP_FLAG_DISABLE_VM);
oldheap = svm_push_pvt_heap (rp);
rp->region_name = (char *) format (0, "%s%c", a->name, 0);
@@ -550,7 +550,7 @@ svm_map_region (svm_map_region_args_t * a)
vec_validate (rp->bitmap, words - 1);
overhead_space = MMAP_PAGESIZE /* header */ +
- ((a->pvt_heap_size != 0)? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
+ ((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
bit = 0;
data_base = (uword) rp->virtual_base;
@@ -782,7 +782,7 @@ void
svm_region_init (void)
{
svm_map_region_args_t _a, *a = &_a;
-
+
memset (a, 0, sizeof (*a));
a->root_path = 0;
a->name = SVM_GLOBAL_REGION_NAME;
@@ -799,7 +799,7 @@ void
svm_region_init_chroot (char *root_path)
{
svm_map_region_args_t _a, *a = &_a;
-
+
memset (a, 0, sizeof (*a));
a->root_path = root_path;
a->name = SVM_GLOBAL_REGION_NAME;
@@ -816,7 +816,7 @@ void
svm_region_init_chroot_uid_gid (char *root_path, int uid, int gid)
{
svm_map_region_args_t _a, *a = &_a;
-
+
memset (a, 0, sizeof (*a));
a->root_path = root_path;
a->name = SVM_GLOBAL_REGION_NAME;
@@ -849,7 +849,7 @@ svm_region_find_or_create (svm_map_region_args_t * a)
ASSERT (root_rp);
- a->size += MMAP_PAGESIZE +
+ a->size += MMAP_PAGESIZE +
((a->pvt_heap_size != 0) ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
a->size = rnd_pagesize (a->size);
@@ -932,7 +932,7 @@ svm_region_find_or_create (svm_map_region_args_t * a)
a->baseva = root_rp->virtual_base + index * MMAP_PAGESIZE;
rp = svm_map_region (a);
-
+
pool_get (mp->subregions, subp);
name = format (0, "%s%c", a->name, 0);
subp->subregion_name = name;
diff --git a/svm/svm.h b/svm/svm.h
index c42d2b578df..5b95abb66ba 100644
--- a/svm/svm.h
+++ b/svm/svm.h
@@ -117,7 +117,7 @@ void *svm_region_find_or_create (svm_map_region_args_t * a);
void svm_region_init (void);
void svm_region_init_chroot (char *root_path);
void svm_region_init_chroot_uid_gid (char *root_path, int uid, int gid);
-void svm_region_init_args (svm_map_region_args_t *a);
+void svm_region_init_args (svm_map_region_args_t * a);
void svm_region_exit (void);
void svm_region_unmap (void *rp_arg);
void svm_client_scan (char *root_path);
diff --git a/svm/svmtool.c b/svm/svmtool.c
index 2e55a0873e3..b319551425c 100644
--- a/svm/svmtool.c
+++ b/svm/svmtool.c
@@ -247,7 +247,7 @@ svm_existing_region_map_nolock (void *root_arg, svm_map_region_args_t * a)
void *oldheap;
uword *p;
- a->size += MMAP_PAGESIZE +
+ a->size += MMAP_PAGESIZE +
(a->pvt_heap_size ? a->pvt_heap_size : SVM_PVT_MHEAP_SIZE);
a->size = rnd_pagesize (a->size);
diff --git a/vlib-api/vlibmemory/memory_shared.c b/vlib-api/vlibmemory/memory_shared.c
index a2f22771596..900e89d15c3 100644
--- a/vlib-api/vlibmemory/memory_shared.c
+++ b/vlib-api/vlibmemory/memory_shared.c
@@ -285,7 +285,7 @@ vl_map_shmem (char *region_name, int is_vlib)
memset (a, 0, sizeof (*a));
a->name = region_name;
- a->size = am->api_size ? am->api_size: (16 << 20);
+ a->size = am->api_size ? am->api_size : (16 << 20);
a->flags = SVM_FLAGS_MHEAP;
a->uid = am->api_uid;
a->gid = am->api_gid;
diff --git a/vlib-api/vlibmemory/memory_vlib.c b/vlib-api/vlibmemory/memory_vlib.c
index 2212b46bce4..32f0fbcc689 100644
--- a/vlib-api/vlibmemory/memory_vlib.c
+++ b/vlib-api/vlibmemory/memory_vlib.c
@@ -1177,18 +1177,19 @@ vlibmemory_init (vlib_main_t * vm)
{
api_main_t *am = &api_main;
svm_map_region_args_t _a, *a = &_a;
-
+
memset (a, 0, sizeof (*a));
a->root_path = am->root_path;
a->name = SVM_GLOBAL_REGION_NAME;
- a->baseva = (am->global_baseva != 0) ?
+ a->baseva = (am->global_baseva != 0) ?
am->global_baseva : SVM_GLOBAL_REGION_BASEVA;
a->size = (am->global_size != 0) ? am->global_size : SVM_GLOBAL_REGION_SIZE;
a->flags = SVM_FLAGS_NODATA;
a->uid = am->api_uid;
a->gid = am->api_gid;
- a->pvt_heap_size = (am->global_pvt_heap_size != 0) ? am->global_pvt_heap_size
- : SVM_PVT_MHEAP_SIZE;
+ a->pvt_heap_size =
+ (am->global_pvt_heap_size !=
+ 0) ? am->global_pvt_heap_size : SVM_PVT_MHEAP_SIZE;
svm_region_init_args (a);
return 0;
diff --git a/vpp/vpp-api/api.c b/vpp/vpp-api/api.c
index ee3ced4379d..788e310e768 100644
--- a/vpp/vpp-api/api.c
+++ b/vpp/vpp-api/api.c
@@ -8111,7 +8111,7 @@ VLIB_INIT_FUNCTION (vpe_api_init);
static clib_error_t *
api_segment_config (vlib_main_t * vm, unformat_input_t * input)
{
- u8 * chroot_path;
+ u8 *chroot_path;
u64 baseva, size, pvt_heap_size;
int uid, gid, rv;
const int max_buf_size = 4096;
@@ -8130,31 +8130,31 @@ api_segment_config (vlib_main_t * vm, unformat_input_t * input)
else if (unformat (input, "uid %d", &uid))
vl_set_memory_uid (uid);
else if (unformat (input, "gid %d", &gid))
- vl_set_memory_gid (gid);
+ vl_set_memory_gid (gid);
else if (unformat (input, "baseva %llx", &baseva))
- vl_set_global_memory_baseva (baseva);
+ vl_set_global_memory_baseva (baseva);
else if (unformat (input, "global-size %lldM", &size))
- vl_set_global_memory_size (size * (1ULL<<20));
+ vl_set_global_memory_size (size * (1ULL << 20));
else if (unformat (input, "global-size %lldG", &size))
- vl_set_global_memory_size (size * (1ULL<<30));
+ vl_set_global_memory_size (size * (1ULL << 30));
else if (unformat (input, "global-size %lld", &size))
- vl_set_global_memory_size (size);
+ vl_set_global_memory_size (size);
else if (unformat (input, "global-pvt-heap-size %lldM", &pvt_heap_size))
- vl_set_global_pvt_heap_size (pvt_heap_size * (1ULL<<20));
- else if (unformat (input, "global-pvt-heap-size size %lld",
- &pvt_heap_size))
- vl_set_global_pvt_heap_size (pvt_heap_size);
+ vl_set_global_pvt_heap_size (pvt_heap_size * (1ULL << 20));
+ else if (unformat (input, "global-pvt-heap-size size %lld",
+ &pvt_heap_size))
+ vl_set_global_pvt_heap_size (pvt_heap_size);
else if (unformat (input, "api-pvt-heap-size %lldM", &pvt_heap_size))
- vl_set_api_pvt_heap_size (pvt_heap_size * (1ULL<<20));
- else if (unformat (input, "api-pvt-heap-size size %lld",
- &pvt_heap_size))
- vl_set_api_pvt_heap_size (pvt_heap_size);
+ vl_set_api_pvt_heap_size (pvt_heap_size * (1ULL << 20));
+ else if (unformat (input, "api-pvt-heap-size size %lld",
+ &pvt_heap_size))
+ vl_set_api_pvt_heap_size (pvt_heap_size);
else if (unformat (input, "api-size %lldM", &size))
- vl_set_api_memory_size (size * (1ULL<<20));
+ vl_set_api_memory_size (size * (1ULL << 20));
else if (unformat (input, "api-size %lldG", &size))
- vl_set_api_memory_size (size * (1ULL<<30));
+ vl_set_api_memory_size (size * (1ULL << 30));
else if (unformat (input, "api-size %lld", &size))
- vl_set_api_memory_size (size);
+ vl_set_api_memory_size (size);
else if (unformat (input, "uid %s", &s))
{
/* lookup the username */
diff --git a/vpp/vpp-api/gmon.c b/vpp/vpp-api/gmon.c
index 330dc9fe5d3..e5cb1271f6b 100644
--- a/vpp/vpp-api/gmon.c
+++ b/vpp/vpp-api/gmon.c
@@ -180,8 +180,8 @@ gmon_init (vlib_main_t * vm)
if ((error = vlib_call_init_function (vm, vpe_api_init)))
return (error);
- if ((error = vlib_call_init_function(vm, vlibmemory_init)))
- return(error);
+ if ((error = vlib_call_init_function (vm, vlibmemory_init)))
+ return (error);
gm->vlib_main = vm;
diff --git a/vppinfra/vppinfra/anneal.c b/vppinfra/vppinfra/anneal.c
index 9b3a5fe8f94..35d10946482 100644
--- a/vppinfra/vppinfra/anneal.c
+++ b/vppinfra/vppinfra/anneal.c
@@ -18,7 +18,7 @@
/*
* Optimize an objective function by simulated annealing
- *
+ *
* Here are a couple of short, easily-understood
* descriptions of simulated annealing:
*
@@ -32,10 +32,10 @@
* of hot metal, aka annealing.
*
* There are (at least) three problem-dependent annealing parameters
- * to consider:
+ * to consider:
*
* t0, the initial "temperature. Should be set so that the probability
- * of accepting a transition to a higher cost configuration is
+ * of accepting a transition to a higher cost configuration is
* initially about 0.8.
*
* ntemps, the number of temperatures to use. Each successive temperature
@@ -49,116 +49,124 @@
* (desired) global minimum.
*/
-void clib_anneal (clib_anneal_param_t * p)
+void
+clib_anneal (clib_anneal_param_t * p)
{
f64 t;
f64 cost, prev_cost, delta_cost, initial_cost, best_cost;
f64 random_accept, delta_cost_over_t;
- f64 total_increase=0.0, average_increase;
+ f64 total_increase = 0.0, average_increase;
u32 i, j;
u32 number_of_increases = 0;
u32 accepted_this_temperature;
u32 best_saves_this_temperature;
int accept;
-
+
t = p->initial_temperature;
best_cost = initial_cost = prev_cost = p->anneal_metric (p->opaque);
p->anneal_save_best_configuration (p->opaque);
if (p->flags & CLIB_ANNEAL_VERBOSE)
- fformat(stdout, "Initial cost %.2f\n", initial_cost);
+ fformat (stdout, "Initial cost %.2f\n", initial_cost);
- for (i = 0; i < p->number_of_temperatures; i++)
+ for (i = 0; i < p->number_of_temperatures; i++)
{
accepted_this_temperature = 0;
best_saves_this_temperature = 0;
-
+
p->anneal_restore_best_configuration (p->opaque);
cost = best_cost;
- for (j = 0; j < p->number_of_configurations_per_temperature; j++)
- {
- p->anneal_new_configuration (p->opaque);
- cost = p->anneal_metric (p->opaque);
-
- delta_cost = cost - prev_cost;
-
- /* cost function looks better, accept this move */
- if (p->flags & CLIB_ANNEAL_MINIMIZE)
- accept = delta_cost < 0.0;
- else
- accept = delta_cost > 0.0;
-
- if (accept)
- {
- if (p->flags & CLIB_ANNEAL_MINIMIZE)
- if (cost < best_cost)
- {
- if (p->flags & CLIB_ANNEAL_VERBOSE)
- fformat (stdout, "New best cost %.2f\n", cost);
- best_cost = cost;
- p->anneal_save_best_configuration (p->opaque);
- best_saves_this_temperature++;
- }
-
- accepted_this_temperature++;
- prev_cost = cost;
- continue;
- }
-
- /* cost function worse, keep stats to suggest t0 */
- total_increase += (p->flags & CLIB_ANNEAL_MINIMIZE) ?
- delta_cost : -delta_cost;
-
- number_of_increases++;
-
- /*
- * Accept a higher cost with Pr { e^(-(delta_cost / T)) },
- * equivalent to rnd[0,1] < e^(-(delta_cost / T))
- *
- * AKA, the Boltzmann factor.
- */
- random_accept = random_f64 (&p->random_seed);
-
- delta_cost_over_t = delta_cost / t;
-
- if (random_accept < exp (-delta_cost_over_t))
- {
- accepted_this_temperature++;
- prev_cost = cost;
- continue;
- }
- p->anneal_restore_previous_configuration (p->opaque);
- }
+ for (j = 0; j < p->number_of_configurations_per_temperature; j++)
+ {
+ p->anneal_new_configuration (p->opaque);
+ cost = p->anneal_metric (p->opaque);
+
+ delta_cost = cost - prev_cost;
+
+ /* cost function looks better, accept this move */
+ if (p->flags & CLIB_ANNEAL_MINIMIZE)
+ accept = delta_cost < 0.0;
+ else
+ accept = delta_cost > 0.0;
+
+ if (accept)
+ {
+ if (p->flags & CLIB_ANNEAL_MINIMIZE)
+ if (cost < best_cost)
+ {
+ if (p->flags & CLIB_ANNEAL_VERBOSE)
+ fformat (stdout, "New best cost %.2f\n", cost);
+ best_cost = cost;
+ p->anneal_save_best_configuration (p->opaque);
+ best_saves_this_temperature++;
+ }
+
+ accepted_this_temperature++;
+ prev_cost = cost;
+ continue;
+ }
+
+ /* cost function worse, keep stats to suggest t0 */
+ total_increase += (p->flags & CLIB_ANNEAL_MINIMIZE) ?
+ delta_cost : -delta_cost;
+
+ number_of_increases++;
+
+ /*
+ * Accept a higher cost with Pr { e^(-(delta_cost / T)) },
+ * equivalent to rnd[0,1] < e^(-(delta_cost / T))
+ *
+ * AKA, the Boltzmann factor.
+ */
+ random_accept = random_f64 (&p->random_seed);
+
+ delta_cost_over_t = delta_cost / t;
+
+ if (random_accept < exp (-delta_cost_over_t))
+ {
+ accepted_this_temperature++;
+ prev_cost = cost;
+ continue;
+ }
+ p->anneal_restore_previous_configuration (p->opaque);
+ }
if (p->flags & CLIB_ANNEAL_VERBOSE)
- {
- fformat (stdout, "Temp %.2f, cost %.2f, accepted %d, bests %d\n", t,
- prev_cost, accepted_this_temperature,
- best_saves_this_temperature);
- fformat (stdout, "Improvement %.2f\n", initial_cost - prev_cost);
- fformat (stdout, "-------------\n");
- }
-
+ {
+ fformat (stdout, "Temp %.2f, cost %.2f, accepted %d, bests %d\n", t,
+ prev_cost, accepted_this_temperature,
+ best_saves_this_temperature);
+ fformat (stdout, "Improvement %.2f\n", initial_cost - prev_cost);
+ fformat (stdout, "-------------\n");
+ }
+
t = t * p->temperature_step;
}
- /*
+ /*
* Empirically, one wants the probability of accepting a move
* at the initial temperature to be about 0.8.
*/
average_increase = total_increase / (f64) number_of_increases;
- p->suggested_initial_temperature =
- average_increase / 0.22 ; /* 0.22 = -ln (0.8) */
+ p->suggested_initial_temperature = average_increase / 0.22; /* 0.22 = -ln (0.8) */
p->final_temperature = t;
p->final_metric = p->anneal_metric (p->opaque);
-
+
if (p->flags & CLIB_ANNEAL_VERBOSE)
{
- fformat (stdout, "Average cost increase from a bad move: %.2f\n",
- average_increase);
- fformat (stdout, "Suggested t0 = %.2f\n",
- p->suggested_initial_temperature);
+ fformat (stdout, "Average cost increase from a bad move: %.2f\n",
+ average_increase);
+ fformat (stdout, "Suggested t0 = %.2f\n",
+ p->suggested_initial_temperature);
}
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/anneal.h b/vppinfra/vppinfra/anneal.h
index 970b9068a65..148d38ba551 100644
--- a/vppinfra/vppinfra/anneal.h
+++ b/vppinfra/vppinfra/anneal.h
@@ -22,7 +22,8 @@
#include <vppinfra/random.h>
#include <math.h>
-typedef struct {
+typedef struct
+{
/* Initial temperature */
f64 initial_temperature;
@@ -37,14 +38,14 @@ typedef struct {
u32 flags;
#define CLIB_ANNEAL_VERBOSE (1<<0)
-#define CLIB_ANNEAL_MINIMIZE (1<<1) /* mutually exclusive */
-#define CLIB_ANNEAL_MAXIMIZE (1<<2) /* mutually exclusive */
+#define CLIB_ANNEAL_MINIMIZE (1<<1) /* mutually exclusive */
+#define CLIB_ANNEAL_MAXIMIZE (1<<2) /* mutually exclusive */
/* Random number seed, set to ensure repeatable results */
u32 random_seed;
/* Opaque data passed to callbacks */
- void * opaque;
+ void *opaque;
/* Final temperature (output) */
f64 final_temperature;
@@ -57,24 +58,32 @@ typedef struct {
/*--- Callbacks ---*/
-
+
/* objective function to minimize */
- f64 (*anneal_metric)(void * opaque);
+ f64 (*anneal_metric) (void *opaque);
/* Generate a new configuration */
- void (*anneal_new_configuration)(void * opaque);
+ void (*anneal_new_configuration) (void *opaque);
/* Restore the previous configuration */
- void (*anneal_restore_previous_configuration)(void * opaque);
+ void (*anneal_restore_previous_configuration) (void *opaque);
/* Save best configuration found e.g at a certain temperature */
- void (*anneal_save_best_configuration) (void * opaque);
+ void (*anneal_save_best_configuration) (void *opaque);
/* restore best configuration found e.g at a certain temperature */
- void (*anneal_restore_best_configuration) (void * opaque);
-
+ void (*anneal_restore_best_configuration) (void *opaque);
+
} clib_anneal_param_t;
void clib_anneal (clib_anneal_param_t * p);
#endif /* __included_anneal_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/asm_mips.h b/vppinfra/vppinfra/asm_mips.h
index 38641f70c0c..7c9e69586f4 100644
--- a/vppinfra/vppinfra/asm_mips.h
+++ b/vppinfra/vppinfra/asm_mips.h
@@ -61,7 +61,7 @@
_(tge) _(tgeu) _(tlt) _(tltu) _(teq) _(o65) _(tne) _(o67) \
_(dsll) _(o71) _(dsrl) _(dsra) _(dsll32) _(o75) _(dsrl32) _(dsra32)
-/* SPECIAL2 encoding of funct field. */ \
+/* SPECIAL2 encoding of funct field. */
#define mips_foreach_special2_funct \
_(madd) _(maddu) _(mul) _(o03) _(msub) _(msubu) _(o06) _(o07) \
_(o10) _(o11) _(o12) _(o13) _(o14) _(o15) _(o16) _(o17) \
@@ -137,92 +137,116 @@
_(o70) _(o71) _(o72) _(o73) _(o74) _(o75) _(wac) _(rac)
#define _(f) MIPS_OPCODE_##f,
-typedef enum {
+typedef enum
+{
mips_foreach_opcode
} mips_insn_opcode_t;
#undef _
#define _(f) MIPS_SPECIAL_FUNCT_##f,
-typedef enum {
+typedef enum
+{
mips_foreach_special_funct
} mips_insn_special_funct_t;
#undef _
#define _(f) MIPS_SPECIAL2_FUNCT_##f,
-typedef enum {
+typedef enum
+{
mips_foreach_special2_funct
} mips_insn_special2_funct_t;
#undef _
#define _(f) MIPS_REGIMM_RT_##f,
-typedef enum {
+typedef enum
+{
mips_foreach_regimm_rt
} mips_insn_regimm_rt_t;
#undef _
#define _(f) MIPS_COP0_RS_##f,
-typedef enum {
+typedef enum
+{
mips_foreach_cop0_rs
} mips_insn_cop0_rs_t;
#undef _
#define _(f) MIPS_COP0_FUNCT_##f,
-typedef enum {
+typedef enum
+{
mips_foreach_cop0_funct
} mips_insn_cop0_funct_t;
#undef _
#define _(f) MIPS_COP1_RS_##f,
-typedef enum {
+typedef enum
+{
mips_foreach_cop1_rs
} mips_insn_cop1_rs_t;
#undef _
#define _(f) MIPS_COP1_FUNCT_##f,
-typedef enum {
+typedef enum
+{
mips_foreach_cop1_funct
} mips_insn_cop1_funct_t;
#undef _
#define _(f) MIPS_COP1X_FUNCT_##f,
-typedef enum {
+typedef enum
+{
mips_foreach_cop1x_funct
} mips_insn_cop1x_funct_t;
#undef _
#define _(f) MIPS_MDMX_FUNCT_##f,
-typedef enum {
+typedef enum
+{
mips_foreach_mdmx_funct
} mips_insn_mdmx_funct_t;
#undef _
always_inline mips_insn_opcode_t
mips_insn_get_op (u32 insn)
-{ return (insn >> 26) & 0x3f; }
+{
+ return (insn >> 26) & 0x3f;
+}
always_inline u32
mips_insn_get_rs (u32 insn)
-{ return (insn >> 21) & 0x1f; }
+{
+ return (insn >> 21) & 0x1f;
+}
always_inline u32
mips_insn_get_rt (u32 insn)
-{ return (insn >> 16) & 0x1f; }
+{
+ return (insn >> 16) & 0x1f;
+}
always_inline u32
mips_insn_get_rd (u32 insn)
-{ return (insn >> 11) & 0x1f; }
+{
+ return (insn >> 11) & 0x1f;
+}
always_inline u32
mips_insn_get_sa (u32 insn)
-{ return (insn >> 6) & 0x1f; }
+{
+ return (insn >> 6) & 0x1f;
+}
always_inline u32
mips_insn_get_funct (u32 insn)
-{ return (insn >> 0) & 0x3f; }
+{
+ return (insn >> 0) & 0x3f;
+}
always_inline i32
mips_insn_get_immediate (u32 insn)
-{ return (((i32) insn) << 16) >> 16; }
+{
+ return (((i32) insn) << 16) >> 16;
+}
always_inline u32
mips_insn_encode_i_type (int op, int rs, int rt, int immediate)
@@ -310,9 +334,18 @@ mips_insn_load (u32 rd, i32 offset, u32 base, u32 log2_bytes)
return mips_insn_encode_i_type (op, base, rd, offset);
}
-typedef enum {
- MIPS_REG_SP = 29,
- MIPS_REG_RA = 31,
+typedef enum
+{
+ MIPS_REG_SP = 29,
+ MIPS_REG_RA = 31,
} mips_reg_t;
#endif /* included_asm_mips_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/asm_x86.h b/vppinfra/vppinfra/asm_x86.h
index 378da0f8167..dacef61755c 100644
--- a/vppinfra/vppinfra/asm_x86.h
+++ b/vppinfra/vppinfra/asm_x86.h
@@ -17,17 +17,20 @@
#include <vppinfra/format.h>
-typedef union {
- struct {
+typedef union
+{
+ struct
+ {
u8 code;
u8 type;
};
u8 data[2];
} x86_insn_operand_t;
-typedef struct {
+typedef struct
+{
/* Instruction name. */
- char * name;
+ char *name;
/* X86 instructions may have up to 3 operands. */
x86_insn_operand_t operands[3];
@@ -71,21 +74,22 @@ x86_insn_operand_is_valid (x86_insn_t * i, uword o)
/* rex w bit */ \
_ (OPERAND_SIZE_64, 0)
-typedef enum {
+typedef enum
+{
#define _(f,o) X86_INSN_FLAG_BIT_##f,
- foreach_x86_insn_parse_flag
- foreach_x86_legacy_prefix
+ foreach_x86_insn_parse_flag foreach_x86_legacy_prefix
#undef _
} x86_insn_parse_flag_bit_t;
-typedef enum {
+typedef enum
+{
#define _(f,o) X86_INSN_##f = 1 << X86_INSN_FLAG_BIT_##f,
- foreach_x86_insn_parse_flag
- foreach_x86_legacy_prefix
+ foreach_x86_insn_parse_flag foreach_x86_legacy_prefix
#undef _
} x86_insn_parse_flag_t;
-typedef struct {
+typedef struct
+{
/* Registers in instruction.
[0] is modrm reg field
[1] is base reg
@@ -93,9 +97,9 @@ typedef struct {
u8 regs[3];
/* Scale for index register. */
- u8 log2_index_scale : 2;
- u8 log2_effective_operand_bytes : 3;
- u8 log2_effective_address_bytes : 3;
+ u8 log2_index_scale:2;
+ u8 log2_effective_operand_bytes:3;
+ u8 log2_effective_address_bytes:3;
i32 displacement;
@@ -106,8 +110,16 @@ typedef struct {
x86_insn_t insn;
} x86_insn_parse_t;
-
-u8 * x86_insn_parse (x86_insn_parse_t * p, u8 * code_start);
+
+u8 *x86_insn_parse (x86_insn_parse_t * p, u8 * code_start);
format_function_t format_x86_insn_parse;
#endif /* included_asm_x86_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/backtrace.c b/vppinfra/vppinfra/backtrace.c
index dc9c2565023..bbfb792c656 100644
--- a/vppinfra/vppinfra/backtrace.c
+++ b/vppinfra/vppinfra/backtrace.c
@@ -46,28 +46,22 @@
#include <vppinfra/asm_mips.h>
uword
-clib_backtrace (uword * callers,
- uword max_callers,
- uword n_frames_to_skip)
+clib_backtrace (uword * callers, uword max_callers, uword n_frames_to_skip)
{
- u32 * pc;
- void * sp;
+ u32 *pc;
+ void *sp;
uword i, saved_pc;
/* Figure current PC, saved PC and stack pointer. */
asm volatile (".set push\n"
- ".set noat\n"
- "move %[saved_pc], $31\n"
- "move %[sp], $29\n"
+ ".set noat\n" "move %[saved_pc], $31\n" "move %[sp], $29\n"
/* Fetches current PC. */
"la $at, 1f\n"
"jalr %[pc], $at\n"
"nop\n"
"1:\n"
- ".set pop\n"
- : [pc] "=r" (pc),
- [saved_pc] "=r" (saved_pc),
- [sp] "=r" (sp));
+ ".set pop\n":[pc] "=r" (pc),
+ [saved_pc] "=r" (saved_pc),[sp] "=r" (sp));
/* Also skip current frame. */
n_frames_to_skip += 1;
@@ -77,11 +71,11 @@ clib_backtrace (uword * callers,
mips_insn_opcode_t op;
mips_insn_special_funct_t funct;
i32 insn, rs, rt, rd, immediate, found_saved_pc;
- u32 * start_pc;
+ u32 *start_pc;
/* Parse instructions until we reach prologue for this
- stack frame. We'll need to figure out where saved
- PC is and where previous stack frame lives. */
+ stack frame. We'll need to figure out where saved
+ PC is and where previous stack frame lives. */
start_pc = pc;
found_saved_pc = 0;
while (1)
@@ -94,125 +88,127 @@ clib_backtrace (uword * callers,
rd = mips_insn_get_rd (insn);
immediate = mips_insn_get_immediate (insn);
- switch (op) {
- default:
- break;
-
- case MIPS_OPCODE_sd:
- case MIPS_OPCODE_sw:
- /* Trace stores of return address. */
- if (rt == MIPS_REG_RA)
- {
- void * addr = sp + immediate;
-
- /* If RA is stored somewhere other than in the
- stack frame, give up. */
- if (rs != MIPS_REG_SP)
- goto backtrace_done;
-
- ASSERT (immediate % 4 == 0);
- if (op == MIPS_OPCODE_sw)
- saved_pc = ((u32 *) addr)[0];
- else
- saved_pc = ((u64 *) addr)[0];
- found_saved_pc = 1;
- }
- break;
-
- case MIPS_OPCODE_addiu:
- case MIPS_OPCODE_daddiu:
- case MIPS_OPCODE_addi:
- case MIPS_OPCODE_daddi:
- if (rt == MIPS_REG_SP)
- {
- if (rs != MIPS_REG_SP)
- goto backtrace_done;
-
- ASSERT (immediate % 4 == 0);
-
- /* Assume positive offset is part of the epilogue.
- E.g.
- jr ra
- add sp,sp,100
- */
- if (immediate > 0)
- continue;
-
- /* Negative offset means allocate stack space.
- This could either be the prologue or could be due to
- alloca. */
- sp -= immediate;
-
- /* This frame will not save RA. */
- if (i == 0)
- goto found_prologue;
-
- /* Assume that addiu sp,sp,-N without store of ra means
- that we have not found the prologue yet. */
- if (found_saved_pc)
- goto found_prologue;
- }
- break;
-
- case MIPS_OPCODE_slti:
- case MIPS_OPCODE_sltiu:
- case MIPS_OPCODE_andi:
- case MIPS_OPCODE_ori:
- case MIPS_OPCODE_xori:
- case MIPS_OPCODE_lui:
- case MIPS_OPCODE_ldl:
- case MIPS_OPCODE_ldr:
- case MIPS_OPCODE_lb:
- case MIPS_OPCODE_lh:
- case MIPS_OPCODE_lwl:
- case MIPS_OPCODE_lw:
- case MIPS_OPCODE_lbu:
- case MIPS_OPCODE_lhu:
- case MIPS_OPCODE_lwr:
- case MIPS_OPCODE_lwu:
- case MIPS_OPCODE_ld:
- /* Give up when we find anyone setting the stack pointer. */
- if (rt == MIPS_REG_SP)
- goto backtrace_done;
- break;
-
- case MIPS_OPCODE_SPECIAL:
- if (rd == MIPS_REG_SP)
- switch (funct) {
+ switch (op)
+ {
default:
+ break;
+
+ case MIPS_OPCODE_sd:
+ case MIPS_OPCODE_sw:
+ /* Trace stores of return address. */
+ if (rt == MIPS_REG_RA)
+ {
+ void *addr = sp + immediate;
+
+ /* If RA is stored somewhere other than in the
+ stack frame, give up. */
+ if (rs != MIPS_REG_SP)
+ goto backtrace_done;
+
+ ASSERT (immediate % 4 == 0);
+ if (op == MIPS_OPCODE_sw)
+ saved_pc = ((u32 *) addr)[0];
+ else
+ saved_pc = ((u64 *) addr)[0];
+ found_saved_pc = 1;
+ }
+ break;
+
+ case MIPS_OPCODE_addiu:
+ case MIPS_OPCODE_daddiu:
+ case MIPS_OPCODE_addi:
+ case MIPS_OPCODE_daddi:
+ if (rt == MIPS_REG_SP)
+ {
+ if (rs != MIPS_REG_SP)
+ goto backtrace_done;
+
+ ASSERT (immediate % 4 == 0);
+
+ /* Assume positive offset is part of the epilogue.
+ E.g.
+ jr ra
+ add sp,sp,100
+ */
+ if (immediate > 0)
+ continue;
+
+ /* Negative offset means allocate stack space.
+ This could either be the prologue or could be due to
+ alloca. */
+ sp -= immediate;
+
+ /* This frame will not save RA. */
+ if (i == 0)
+ goto found_prologue;
+
+ /* Assume that addiu sp,sp,-N without store of ra means
+ that we have not found the prologue yet. */
+ if (found_saved_pc)
+ goto found_prologue;
+ }
+ break;
+
+ case MIPS_OPCODE_slti:
+ case MIPS_OPCODE_sltiu:
+ case MIPS_OPCODE_andi:
+ case MIPS_OPCODE_ori:
+ case MIPS_OPCODE_xori:
+ case MIPS_OPCODE_lui:
+ case MIPS_OPCODE_ldl:
+ case MIPS_OPCODE_ldr:
+ case MIPS_OPCODE_lb:
+ case MIPS_OPCODE_lh:
+ case MIPS_OPCODE_lwl:
+ case MIPS_OPCODE_lw:
+ case MIPS_OPCODE_lbu:
+ case MIPS_OPCODE_lhu:
+ case MIPS_OPCODE_lwr:
+ case MIPS_OPCODE_lwu:
+ case MIPS_OPCODE_ld:
/* Give up when we find anyone setting the stack pointer. */
- goto backtrace_done;
-
- case MIPS_SPECIAL_FUNCT_break:
- case MIPS_SPECIAL_FUNCT_jr:
- case MIPS_SPECIAL_FUNCT_sync:
- case MIPS_SPECIAL_FUNCT_syscall:
- case MIPS_SPECIAL_FUNCT_tge:
- case MIPS_SPECIAL_FUNCT_tgeu:
- case MIPS_SPECIAL_FUNCT_tlt:
- case MIPS_SPECIAL_FUNCT_tltu:
- case MIPS_SPECIAL_FUNCT_teq:
- case MIPS_SPECIAL_FUNCT_tne:
- /* These instructions can validly have rd == MIPS_REG_SP */
+ if (rt == MIPS_REG_SP)
+ goto backtrace_done;
+ break;
+
+ case MIPS_OPCODE_SPECIAL:
+ if (rd == MIPS_REG_SP)
+ switch (funct)
+ {
+ default:
+ /* Give up when we find anyone setting the stack pointer. */
+ goto backtrace_done;
+
+ case MIPS_SPECIAL_FUNCT_break:
+ case MIPS_SPECIAL_FUNCT_jr:
+ case MIPS_SPECIAL_FUNCT_sync:
+ case MIPS_SPECIAL_FUNCT_syscall:
+ case MIPS_SPECIAL_FUNCT_tge:
+ case MIPS_SPECIAL_FUNCT_tgeu:
+ case MIPS_SPECIAL_FUNCT_tlt:
+ case MIPS_SPECIAL_FUNCT_tltu:
+ case MIPS_SPECIAL_FUNCT_teq:
+ case MIPS_SPECIAL_FUNCT_tne:
+ /* These instructions can validly have rd == MIPS_REG_SP */
+ break;
+ }
break;
}
- break;
- }
}
found_prologue:
/* Check sanity of saved pc. */
if (saved_pc & 3)
- goto backtrace_done;
+ goto backtrace_done;
if (saved_pc == 0)
- goto backtrace_done;
+ goto backtrace_done;
if (i >= n_frames_to_skip)
callers[i - n_frames_to_skip] = saved_pc;
pc = uword_to_pointer (saved_pc, u32 *);
}
- backtrace_done:
+backtrace_done:
if (i < n_frames_to_skip)
return 0;
else
@@ -223,20 +219,19 @@ clib_backtrace (uword * callers,
#ifndef clib_backtrace_defined
#define clib_backtrace_defined
-typedef struct clib_generic_stack_frame_t {
- struct clib_generic_stack_frame_t * prev;
- void * return_address;
+typedef struct clib_generic_stack_frame_t
+{
+ struct clib_generic_stack_frame_t *prev;
+ void *return_address;
} clib_generic_stack_frame_t;
/* This will only work if we have a frame pointer.
Without a frame pointer we have to parse the machine code to
parse the stack frames. */
uword
-clib_backtrace (uword * callers,
- uword max_callers,
- uword n_frames_to_skip)
+clib_backtrace (uword * callers, uword max_callers, uword n_frames_to_skip)
{
- clib_generic_stack_frame_t * f;
+ clib_generic_stack_frame_t *f;
uword i;
f = __builtin_frame_address (0);
@@ -247,18 +242,26 @@ clib_backtrace (uword * callers,
for (i = 0; i < max_callers + n_frames_to_skip; i++)
{
f = f->prev;
- if (! f)
+ if (!f)
goto backtrace_done;
- if (clib_abs ((void *) f - (void *) f->prev) > (64*1024))
+ if (clib_abs ((void *) f - (void *) f->prev) > (64 * 1024))
goto backtrace_done;
if (i >= n_frames_to_skip)
callers[i - n_frames_to_skip] = pointer_to_uword (f->return_address);
}
- backtrace_done:
+backtrace_done:
if (i < n_frames_to_skip)
- return 0;
+ return 0;
else
- return i - n_frames_to_skip;
+ return i - n_frames_to_skip;
}
#endif /* clib_backtrace_defined */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/bihash_24_8.h b/vppinfra/vppinfra/bihash_24_8.h
index e2ad8f91863..e97a37dadff 100644
--- a/vppinfra/vppinfra/bihash_24_8.h
+++ b/vppinfra/vppinfra/bihash_24_8.h
@@ -25,12 +25,14 @@
#include <vppinfra/pool.h>
#include <vppinfra/xxhash.h>
-typedef struct {
+typedef struct
+{
u64 key[3];
u64 value;
} clib_bihash_kv_24_8_t;
-static inline int clib_bihash_is_free_24_8 (clib_bihash_kv_24_8_t *v)
+static inline int
+clib_bihash_is_free_24_8 (clib_bihash_kv_24_8_t * v)
{
/* Free values are memset to 0xff, check a bit... */
if (v->key[0] == ~0ULL && v->value == ~0ULL)
@@ -40,17 +42,17 @@ static inline int clib_bihash_is_free_24_8 (clib_bihash_kv_24_8_t *v)
#if __SSE4_2__
static inline u32
-crc_u32(u32 data, u32 value)
+crc_u32 (u32 data, u32 value)
{
- __asm__ volatile( "crc32l %[data], %[value];"
- : [value] "+r" (value)
- : [data] "rm" (data));
+ __asm__ volatile ("crc32l %[data], %[value];":[value] "+r" (value):[data]
+ "rm" (data));
return value;
}
-static inline u64 clib_bihash_hash_24_8 (clib_bihash_kv_24_8_t *v)
+static inline u64
+clib_bihash_hash_24_8 (clib_bihash_kv_24_8_t * v)
{
- u32 * dp = (u32 *) &v->key[0];
+ u32 *dp = (u32 *) & v->key[0];
u32 value = 0;
value = crc_u32 (dp[0], value);
@@ -62,28 +64,40 @@ static inline u64 clib_bihash_hash_24_8 (clib_bihash_kv_24_8_t *v)
return value;
}
-#else
-static inline u64 clib_bihash_hash_24_8 (clib_bihash_kv_24_8_t *v)
+#else
+static inline u64
+clib_bihash_hash_24_8 (clib_bihash_kv_24_8_t * v)
{
u64 tmp = v->key[0] ^ v->key[1] ^ v->key[2];
return clib_xxhash (tmp);
}
#endif
-static inline u8 * format_bihash_kvp_24_8 (u8 * s, va_list * args)
+static inline u8 *
+format_bihash_kvp_24_8 (u8 * s, va_list * args)
{
- clib_bihash_kv_24_8_t * v = va_arg (*args, clib_bihash_kv_24_8_t *);
+ clib_bihash_kv_24_8_t *v = va_arg (*args, clib_bihash_kv_24_8_t *);
- s = format (s, "key %llu %llu %llu value %llu",
- v->key[0], v->key[1], v->key[2], v->value);
+ s = format (s, "key %llu %llu %llu value %llu",
+ v->key[0], v->key[1], v->key[2], v->value);
return s;
}
-static inline int clib_bihash_key_compare_24_8 (u64 * a, u64 * b)
+static inline int
+clib_bihash_key_compare_24_8 (u64 * a, u64 * b)
{
- return ((a[0]^b[0]) | (a[1]^b[1]) | (a[2]^b[2])) == 0;
+ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) == 0;
}
+
#undef __included_bihash_template_h__
#include <vppinfra/bihash_template.h>
#endif /* __included_bihash_24_8_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/bihash_8_8.h b/vppinfra/vppinfra/bihash_8_8.h
index 221e1f25534..b5f4e3bfdfa 100644
--- a/vppinfra/vppinfra/bihash_8_8.h
+++ b/vppinfra/vppinfra/bihash_8_8.h
@@ -26,47 +26,52 @@
#include <vppinfra/xxhash.h>
/** 8 octet key, 8 octet key value pair */
-typedef struct {
- u64 key; /**< the key */
- u64 value; /**< the value */
+typedef struct
+{
+ u64 key; /**< the key */
+ u64 value; /**< the value */
} clib_bihash_kv_8_8_t;
-/** Decide if a clib_bihash_kv_8_8_t instance is free
+/** Decide if a clib_bihash_kv_8_8_t instance is free
@param v- pointer to the (key,value) pair
*/
-static inline int clib_bihash_is_free_8_8 (clib_bihash_kv_8_8_t *v)
+static inline int
+clib_bihash_is_free_8_8 (clib_bihash_kv_8_8_t * v)
{
if (v->key == ~0ULL && v->value == ~0ULL)
return 1;
return 0;
}
-/** Hash a clib_bihash_kv_8_8_t instance
+/** Hash a clib_bihash_kv_8_8_t instance
@param v - pointer to the (key,value) pair, hash the key (only)
*/
-static inline u64 clib_bihash_hash_8_8 (clib_bihash_kv_8_8_t *v)
+static inline u64
+clib_bihash_hash_8_8 (clib_bihash_kv_8_8_t * v)
{
return clib_xxhash (v->key);
}
-/** Format a clib_bihash_kv_8_8_t instance
+/** Format a clib_bihash_kv_8_8_t instance
@param s - u8 * vector under construction
@param v (vararg) - the (key,value) pair to format
@return s - the u8 * vector under construction
*/
-static inline u8 * format_bihash_kvp_8_8 (u8 * s, va_list * args)
+static inline u8 *
+format_bihash_kvp_8_8 (u8 * s, va_list * args)
{
- clib_bihash_kv_8_8_t * v = va_arg (*args, clib_bihash_kv_8_8_t *);
+ clib_bihash_kv_8_8_t *v = va_arg (*args, clib_bihash_kv_8_8_t *);
s = format (s, "key %llu value %llu", v->key, v->value);
return s;
}
-/** Compare two clib_bihash_kv_8_8_t instances
+/** Compare two clib_bihash_kv_8_8_t instances
@param a - first key
@param b - second key
*/
-static inline int clib_bihash_key_compare_8_8 (u64 a, u64 b)
+static inline int
+clib_bihash_key_compare_8_8 (u64 a, u64 b)
{
return a == b;
}
@@ -75,3 +80,11 @@ static inline int clib_bihash_key_compare_8_8 (u64 a, u64 b)
#include <vppinfra/bihash_template.h>
#endif /* __included_bihash_8_8_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/bihash_doc.h b/vppinfra/vppinfra/bihash_doc.h
index 037e7cc90e6..da3d0a9b55b 100644
--- a/vppinfra/vppinfra/bihash_doc.h
+++ b/vppinfra/vppinfra/bihash_doc.h
@@ -41,18 +41,22 @@
*/
/** template key/value backing page structure */
-typedef struct clib_bihash_value {
- union {
+typedef struct clib_bihash_value
+{
+ union
+ {
clib_bihash_kv kvp[BIHASH_KVP_PER_PAGE]; /**< the actual key/value pairs */
- clib_bihash_value * next_free; /**< used when a KVP page (or block thereof) is on a freelist */
+ clib_bihash_value *next_free; /**< used when a KVP page (or block thereof) is on a freelist */
};
} clib_bihash_value_t
-
/** bihash bucket structure */
-typedef struct {
- union {
- struct {
+ typedef struct
+{
+ union
+ {
+ struct
+ {
u32 offset; /**< backing page offset in the clib memory heap */
u8 pad[3]; /**< log2 (size of the packing page block) */
u8 log2_pages;
@@ -60,39 +64,40 @@ typedef struct {
u64 as_u64;
};
} clib_bihash_bucket_t;
-#endif /* __defined_clib_bihash_bucket_t__ */
/** A bounded index extensible hash table */
-typedef struct {
- clib_bihash_bucket_t * buckets; /**< Hash bucket vector, power-of-two in size */
- volatile u32 * writer_lock; /**< Writer lock, in its own cache line */
- BVT(clib_bihash_value) ** working_copies; /**< Working copies (various sizes), to avoid locking against readers */
+typedef struct
+{
+ clib_bihash_bucket_t *buckets; /**< Hash bucket vector, power-of-two in size */
+ volatile u32 *writer_lock; /**< Writer lock, in its own cache line */
+ BVT (clib_bihash_value) ** working_copies;
+ /**< Working copies (various sizes), to avoid locking against readers */
clib_bihash_bucket_t saved_bucket; /**< Saved bucket pointer */
- u32 nbuckets; /**< Number of hash buckets */
- u32 log2_nbuckets; /**< lg(nbuckets) */
- u8 * name; /**< hash table name */
- BVT(clib_bihash_value) **freelists; /**< power of two freelist vector */
- void * mheap; /**< clib memory heap */
+ u32 nbuckets; /**< Number of hash buckets */
+ u32 log2_nbuckets; /**< lg(nbuckets) */
+ u8 *name; /**< hash table name */
+ BVT (clib_bihash_value) ** freelists;
+ /**< power of two freelist vector */
+ void *mheap; /**< clib memory heap */
} clib_bihash_t;
/** Get pointer to value page given its clib mheap offset */
-static inline void *
-clib_bihash_get_value (clib_bihash * h, uword offset);
+static inline void *clib_bihash_get_value (clib_bihash * h, uword offset);
/** Get clib mheap offset given a pointer */
-static inline uword clib_bihash_get_offset (clib_bihash * h, void * v);
+static inline uword clib_bihash_get_offset (clib_bihash * h, void *v);
-/** initialize a bounded index extensible hash table
+/** initialize a bounded index extensible hash table
@param h - the bi-hash table to initialize
@param name - name of the hash table
- @param nbuckets - the number of buckets, will be rounded up to
+ @param nbuckets - the number of buckets, will be rounded up to
a power of two
@param memory_size - clib mheap size, in bytes
*/
void clib_bihash_init
-(clib_bihash * h, char * name, u32 nbuckets, uword memory_size);
+ (clib_bihash * h, char *name, u32 nbuckets, uword memory_size);
/** Destroy a bounded index extensible hash table
@param h - the bi-hash table to free
@@ -109,9 +114,7 @@ void clib_bihash_free (clib_bihash * h);
@note This function will replace an existing (key,value) pair if the
new key matches an existing key
*/
-int clib_bihash_add_del (clib_bihash * h,
- clib_bihash_kv * add_v,
- int is_add);
+int clib_bihash_add_del (clib_bihash * h, clib_bihash_kv * add_v, int is_add);
/** Search a bi-hash table
@@ -121,9 +124,8 @@ int clib_bihash_add_del (clib_bihash * h,
@param return_v - (key,value) pair which matches search_v.key
@returns 0 on success (with return_v set), < 0 on error
*/
-int clib_bihash_search (clib_bihash * h,
- clib_bihash_kv * search_v,
- clib_bihash_kv * return_v);
+int clib_bihash_search (clib_bihash * h,
+ clib_bihash_kv * search_v, clib_bihash_kv * return_v);
/** Visit active (key,value) pairs in a bi-hash table
@@ -135,6 +137,13 @@ int clib_bihash_search (clib_bihash * h,
@note Trying to supply a proper function prototype for the
callback function appears to be a fool's errand.
*/
-void clib_bihash_foreach_key_value_pair (clib_bihash) * h,
- void *callback,
- void *arg);
+void clib_bihash_foreach_key_value_pair (clib_bihash * h,
+ void *callback, void *arg);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/bihash_template.c b/vppinfra/vppinfra/bihash_template.c
index 0ee92c07570..a8d095c9758 100644
--- a/vppinfra/vppinfra/bihash_template.c
+++ b/vppinfra/vppinfra/bihash_template.c
@@ -15,92 +15,88 @@
/** @if DOCUMENTATION_IS_IN_BIHASH_DOC_H */
-void BV(clib_bihash_init)
- (BVT(clib_bihash) * h, char * name, u32 nbuckets,
- uword memory_size)
+void BV (clib_bihash_init)
+ (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size)
{
- void * oldheap;
+ void *oldheap;
nbuckets = 1 << (max_log2 (nbuckets));
- h->name = (u8 *)name;
+ h->name = (u8 *) name;
h->nbuckets = nbuckets;
h->log2_nbuckets = max_log2 (nbuckets);
- h->mheap = mheap_alloc (0 /* use VM */, memory_size);
+ h->mheap = mheap_alloc (0 /* use VM */ , memory_size);
oldheap = clib_mem_set_heap (h->mheap);
vec_validate_aligned (h->buckets, nbuckets - 1, CLIB_CACHE_LINE_BYTES);
- h->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
- CLIB_CACHE_LINE_BYTES);
+ h->writer_lock = clib_mem_alloc_aligned (CLIB_CACHE_LINE_BYTES,
+ CLIB_CACHE_LINE_BYTES);
clib_mem_set_heap (oldheap);
}
-void BV(clib_bihash_free) (BVT(clib_bihash) * h)
+void BV (clib_bihash_free) (BVT (clib_bihash) * h)
{
- mheap_free (h->mheap);
- memset (h, 0, sizeof (*h));
+ mheap_free (h->mheap);
+ memset (h, 0, sizeof (*h));
}
-static BVT(clib_bihash_value) *
-BV(value_alloc) (BVT(clib_bihash) * h, u32 log2_pages)
+static
+BVT (clib_bihash_value) *
+BV (value_alloc) (BVT (clib_bihash) * h, u32 log2_pages)
{
- BVT(clib_bihash_value) * rv = 0;
- void * oldheap;
+ BVT (clib_bihash_value) * rv = 0;
+ void *oldheap;
- ASSERT (h->writer_lock[0]);
- if (log2_pages >= vec_len (h->freelists)
- || h->freelists [log2_pages] == 0)
+ ASSERT (h->writer_lock[0]);
+ if (log2_pages >= vec_len (h->freelists) || h->freelists[log2_pages] == 0)
{
- oldheap = clib_mem_set_heap (h->mheap);
+ oldheap = clib_mem_set_heap (h->mheap);
- vec_validate (h->freelists, log2_pages);
- vec_validate_aligned (rv, (1<<log2_pages) - 1, CLIB_CACHE_LINE_BYTES);
- clib_mem_set_heap (oldheap);
- goto initialize;
+ vec_validate (h->freelists, log2_pages);
+ vec_validate_aligned (rv, (1 << log2_pages) - 1, CLIB_CACHE_LINE_BYTES);
+ clib_mem_set_heap (oldheap);
+ goto initialize;
}
- rv = h->freelists[log2_pages];
- h->freelists[log2_pages] = rv->next_free;
-
- initialize:
- ASSERT(rv);
- ASSERT (vec_len(rv) == (1<<log2_pages));
- /*
- * Latest gcc complains that the length arg is zero
- * if we replace (1<<log2_pages) with vec_len(rv).
- * No clue.
- */
- memset (rv, 0xff, sizeof (*rv) * (1<<log2_pages));
- return rv;
+ rv = h->freelists[log2_pages];
+ h->freelists[log2_pages] = rv->next_free;
+
+initialize:
+ ASSERT (rv);
+ ASSERT (vec_len (rv) == (1 << log2_pages));
+ /*
+ * Latest gcc complains that the length arg is zero
+ * if we replace (1<<log2_pages) with vec_len(rv).
+ * No clue.
+ */
+ memset (rv, 0xff, sizeof (*rv) * (1 << log2_pages));
+ return rv;
}
static void
-BV(value_free)
- (BVT(clib_bihash) * h,
- BVT(clib_bihash_value) * v)
+BV (value_free) (BVT (clib_bihash) * h, BVT (clib_bihash_value) * v)
{
- u32 log2_pages;
+ u32 log2_pages;
- ASSERT (h->writer_lock[0]);
-
- log2_pages = min_log2(vec_len(v));
+ ASSERT (h->writer_lock[0]);
- ASSERT(vec_len (h->freelists) > log2_pages);
+ log2_pages = min_log2 (vec_len (v));
- v->next_free = h->freelists[log2_pages];
- h->freelists[log2_pages] = v;
+ ASSERT (vec_len (h->freelists) > log2_pages);
+
+ v->next_free = h->freelists[log2_pages];
+ h->freelists[log2_pages] = v;
}
static inline void
-BV(make_working_copy)
- (BVT(clib_bihash) * h, clib_bihash_bucket_t * b)
+BV (make_working_copy) (BVT (clib_bihash) * h, clib_bihash_bucket_t * b)
{
- BVT(clib_bihash_value) * v;
- clib_bihash_bucket_t working_bucket __attribute__((aligned (8)));
- void * oldheap;
- BVT(clib_bihash_value) * working_copy;
- u32 cpu_number = os_get_cpu_number();
+ BVT (clib_bihash_value) * v;
+ clib_bihash_bucket_t working_bucket __attribute__ ((aligned (8)));
+ void *oldheap;
+ BVT (clib_bihash_value) * working_copy;
+ u32 cpu_number = os_get_cpu_number ();
if (cpu_number >= vec_len (h->working_copies))
{
@@ -109,173 +105,171 @@ BV(make_working_copy)
clib_mem_set_heap (oldheap);
}
- /*
+ /*
* working_copies are per-cpu so that near-simultaneous
* updates from multiple threads will not result in sporadic, spurious
- * lookup failures.
+ * lookup failures.
*/
working_copy = h->working_copies[cpu_number];
h->saved_bucket.as_u64 = b->as_u64;
oldheap = clib_mem_set_heap (h->mheap);
- if ((1<<b->log2_pages) > vec_len (working_copy))
+ if ((1 << b->log2_pages) > vec_len (working_copy))
{
- vec_validate_aligned (working_copy, (1<<b->log2_pages)-1,
- sizeof (u64));
+ vec_validate_aligned (working_copy, (1 << b->log2_pages) - 1,
+ sizeof (u64));
h->working_copies[cpu_number] = working_copy;
}
- _vec_len(working_copy) = 1<<b->log2_pages;
+ _vec_len (working_copy) = 1 << b->log2_pages;
clib_mem_set_heap (oldheap);
- v = BV(clib_bihash_get_value) (h, b->offset);
+ v = BV (clib_bihash_get_value) (h, b->offset);
- clib_memcpy (working_copy, v, sizeof (*v)*(1<<b->log2_pages));
+ clib_memcpy (working_copy, v, sizeof (*v) * (1 << b->log2_pages));
working_bucket.as_u64 = b->as_u64;
- working_bucket.offset = BV(clib_bihash_get_offset) (h, working_copy);
- CLIB_MEMORY_BARRIER();
+ working_bucket.offset = BV (clib_bihash_get_offset) (h, working_copy);
+ CLIB_MEMORY_BARRIER ();
b->as_u64 = working_bucket.as_u64;
h->working_copies[cpu_number] = working_copy;
}
-static BVT(clib_bihash_value) *
- BV(split_and_rehash)
- (BVT(clib_bihash) * h,
- BVT(clib_bihash_value) * old_values,
- u32 new_log2_pages)
+static
+BVT (clib_bihash_value) *
+BV (split_and_rehash)
+ (BVT (clib_bihash) * h,
+ BVT (clib_bihash_value) * old_values, u32 new_log2_pages)
{
- BVT(clib_bihash_value) * new_values, * v, * new_v;
+ BVT (clib_bihash_value) * new_values, *v, *new_v;
int i, j, k;
- new_values = BV(value_alloc) (h, new_log2_pages);
+ new_values = BV (value_alloc) (h, new_log2_pages);
v = old_values;
for (i = 0; i < vec_len (old_values); i++)
{
u64 new_hash;
-
+
for (j = 0; j < BIHASH_KVP_PER_PAGE; j++)
- {
- if (BV(clib_bihash_is_free)(&(v->kvp[j])) == 0)
- {
- new_hash = BV(clib_bihash_hash) (&(v->kvp[j]));
- new_hash >>= h->log2_nbuckets;
- new_hash &= (1<<new_log2_pages) - 1;
-
- new_v = &new_values [new_hash];
-
- for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
- {
- if (BV(clib_bihash_is_free)(&(new_v->kvp[k])))
- {
- clib_memcpy (&(new_v->kvp[k]), &(v->kvp[j]),
- sizeof (new_v->kvp[k]));
- goto doublebreak;
- }
- }
- /* Crap. Tell caller to try again */
- BV(value_free) (h, new_values);
- return 0;
- }
- doublebreak:
- ;
- }
+ {
+ if (BV (clib_bihash_is_free) (&(v->kvp[j])) == 0)
+ {
+ new_hash = BV (clib_bihash_hash) (&(v->kvp[j]));
+ new_hash >>= h->log2_nbuckets;
+ new_hash &= (1 << new_log2_pages) - 1;
+
+ new_v = &new_values[new_hash];
+
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (BV (clib_bihash_is_free) (&(new_v->kvp[k])))
+ {
+ clib_memcpy (&(new_v->kvp[k]), &(v->kvp[j]),
+ sizeof (new_v->kvp[k]));
+ goto doublebreak;
+ }
+ }
+ /* Crap. Tell caller to try again */
+ BV (value_free) (h, new_values);
+ return 0;
+ }
+ doublebreak:
+ ;
+ }
v++;
}
return new_values;
}
-int BV(clib_bihash_add_del)
- (BVT(clib_bihash) * h,
- BVT(clib_bihash_kv) * add_v,
- int is_add)
+int BV (clib_bihash_add_del)
+ (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * add_v, int is_add)
{
u32 bucket_index;
- clib_bihash_bucket_t * b, tmp_b;
- BVT(clib_bihash_value) * v, * new_v, * save_new_v, * working_copy;
+ clib_bihash_bucket_t *b, tmp_b;
+ BVT (clib_bihash_value) * v, *new_v, *save_new_v, *working_copy;
u32 value_index;
int rv = 0;
int i;
u64 hash, new_hash;
u32 new_log2_pages;
- u32 cpu_number = os_get_cpu_number();
-
- hash = BV(clib_bihash_hash) (add_v);
+ u32 cpu_number = os_get_cpu_number ();
- bucket_index = hash & (h->nbuckets-1);
+ hash = BV (clib_bihash_hash) (add_v);
+
+ bucket_index = hash & (h->nbuckets - 1);
b = &h->buckets[bucket_index];
hash >>= h->log2_nbuckets;
while (__sync_lock_test_and_set (h->writer_lock, 1))
- ;
+ ;
/* First elt in the bucket? */
if (b->offset == 0)
{
if (is_add == 0)
- {
- rv = -1;
- goto unlock;
- }
+ {
+ rv = -1;
+ goto unlock;
+ }
- v = BV(value_alloc) (h, 0);
- *v->kvp = * add_v;
+ v = BV (value_alloc) (h, 0);
+ *v->kvp = *add_v;
tmp_b.as_u64 = 0;
- tmp_b.offset = BV(clib_bihash_get_offset) (h, v);
+ tmp_b.offset = BV (clib_bihash_get_offset) (h, v);
b->as_u64 = tmp_b.as_u64;
goto unlock;
}
- BV(make_working_copy) (h, b);
+ BV (make_working_copy) (h, b);
- v = BV(clib_bihash_get_value) (h, h->saved_bucket.offset);
- value_index = hash & ((1<<h->saved_bucket.log2_pages)-1);
+ v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
+ value_index = hash & ((1 << h->saved_bucket.log2_pages) - 1);
v += value_index;
-
+
if (is_add)
{
- /*
+ /*
* For obvious (in hindsight) reasons, see if we're supposed to
* replace an existing key, then look for an empty slot.
*/
for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
- {
- if (!memcmp(&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
- {
- clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v));
- CLIB_MEMORY_BARRIER();
- /* Restore the previous (k,v) pairs */
- b->as_u64 = h->saved_bucket.as_u64;
- goto unlock;
- }
- }
+ {
+ if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
+ {
+ clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v));
+ CLIB_MEMORY_BARRIER ();
+ /* Restore the previous (k,v) pairs */
+ b->as_u64 = h->saved_bucket.as_u64;
+ goto unlock;
+ }
+ }
for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
- {
- if (BV(clib_bihash_is_free)(&(v->kvp[i])))
- {
- clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v));
- CLIB_MEMORY_BARRIER();
- b->as_u64 = h->saved_bucket.as_u64;
- goto unlock;
- }
- }
+ {
+ if (BV (clib_bihash_is_free) (&(v->kvp[i])))
+ {
+ clib_memcpy (&(v->kvp[i]), add_v, sizeof (*add_v));
+ CLIB_MEMORY_BARRIER ();
+ b->as_u64 = h->saved_bucket.as_u64;
+ goto unlock;
+ }
+ }
/* no room at the inn... split case... */
}
else
{
for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
- {
- if (!memcmp(&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
- {
- memset (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
- CLIB_MEMORY_BARRIER();
- b->as_u64 = h->saved_bucket.as_u64;
- goto unlock;
- }
- }
+ {
+ if (!memcmp (&(v->kvp[i]), &add_v->key, sizeof (add_v->key)))
+ {
+ memset (&(v->kvp[i]), 0xff, sizeof (*(add_v)));
+ CLIB_MEMORY_BARRIER ();
+ b->as_u64 = h->saved_bucket.as_u64;
+ goto unlock;
+ }
+ }
rv = -3;
b->as_u64 = h->saved_bucket.as_u64;
goto unlock;
@@ -283,9 +277,9 @@ int BV(clib_bihash_add_del)
new_log2_pages = h->saved_bucket.log2_pages + 1;
- expand_again:
+expand_again:
working_copy = h->working_copies[cpu_number];
- new_v = BV(split_and_rehash) (h, working_copy, new_log2_pages);
+ new_v = BV (split_and_rehash) (h, working_copy, new_log2_pages);
if (new_v == 0)
{
new_log2_pages++;
@@ -294,55 +288,54 @@ int BV(clib_bihash_add_del)
/* Try to add the new entry */
save_new_v = new_v;
- new_hash = BV(clib_bihash_hash) (add_v);
+ new_hash = BV (clib_bihash_hash) (add_v);
new_hash >>= h->log2_nbuckets;
- new_hash &= (1<<min_log2(vec_len(new_v))) - 1;
+ new_hash &= (1 << min_log2 (vec_len (new_v))) - 1;
new_v += new_hash;
-
+
for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
{
- if (BV(clib_bihash_is_free)(&(new_v->kvp[i])))
- {
- clib_memcpy (&(new_v->kvp[i]), add_v, sizeof (*add_v));
- goto expand_ok;
- }
+ if (BV (clib_bihash_is_free) (&(new_v->kvp[i])))
+ {
+ clib_memcpy (&(new_v->kvp[i]), add_v, sizeof (*add_v));
+ goto expand_ok;
+ }
}
/* Crap. Try again */
new_log2_pages++;
- BV(value_free) (h, save_new_v);
+ BV (value_free) (h, save_new_v);
goto expand_again;
- expand_ok:
+expand_ok:
tmp_b.log2_pages = min_log2 (vec_len (save_new_v));
- tmp_b.offset = BV(clib_bihash_get_offset) (h, save_new_v);
- CLIB_MEMORY_BARRIER();
+ tmp_b.offset = BV (clib_bihash_get_offset) (h, save_new_v);
+ CLIB_MEMORY_BARRIER ();
b->as_u64 = tmp_b.as_u64;
- v = BV(clib_bihash_get_value) (h, h->saved_bucket.offset);
- BV(value_free) (h, v);
+ v = BV (clib_bihash_get_value) (h, h->saved_bucket.offset);
+ BV (value_free) (h, v);
- unlock:
- CLIB_MEMORY_BARRIER();
+unlock:
+ CLIB_MEMORY_BARRIER ();
h->writer_lock[0] = 0;
return rv;
}
-int BV(clib_bihash_search)
- (BVT(clib_bihash) * h,
- BVT(clib_bihash_kv) *search_key,
- BVT(clib_bihash_kv) *valuep)
+int BV (clib_bihash_search)
+ (BVT (clib_bihash) * h,
+ BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
{
u64 hash;
u32 bucket_index;
uword value_index;
- BVT(clib_bihash_value) * v;
- clib_bihash_bucket_t * b;
+ BVT (clib_bihash_value) * v;
+ clib_bihash_bucket_t *b;
int i;
- ASSERT(valuep);
+ ASSERT (valuep);
- hash = BV(clib_bihash_hash) (search_key);
+ hash = BV (clib_bihash_hash) (search_key);
- bucket_index = hash & (h->nbuckets-1);
+ bucket_index = hash & (h->nbuckets - 1);
b = &h->buckets[bucket_index];
if (b->offset == 0)
@@ -350,72 +343,70 @@ int BV(clib_bihash_search)
hash >>= h->log2_nbuckets;
- v = BV(clib_bihash_get_value) (h, b->offset);
- value_index = hash & ((1<<b->log2_pages)-1);
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ value_index = hash & ((1 << b->log2_pages) - 1);
v += value_index;
-
+
for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
{
- if (BV(clib_bihash_key_compare)(v->kvp[i].key, search_key->key))
- {
- *valuep = v->kvp[i];
- return 0;
- }
+ if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key))
+ {
+ *valuep = v->kvp[i];
+ return 0;
+ }
}
return -1;
}
-u8 * BV(format_bihash) (u8 * s, va_list * args)
+u8 *BV (format_bihash) (u8 * s, va_list * args)
{
- BVT(clib_bihash) * h
- = va_arg (*args, BVT(clib_bihash) *);
+ BVT (clib_bihash) * h = va_arg (*args, BVT (clib_bihash) *);
int verbose = va_arg (*args, int);
- clib_bihash_bucket_t * b;
- BVT(clib_bihash_value) * v;
+ clib_bihash_bucket_t *b;
+ BVT (clib_bihash_value) * v;
int i, j, k;
u64 active_elements = 0;
s = format (s, "Hash table %s\n", h->name ? h->name : (u8 *) "(unnamed)");
-
+
for (i = 0; i < h->nbuckets; i++)
{
- b = &h->buckets [i];
+ b = &h->buckets[i];
if (b->offset == 0)
- {
- if (verbose > 1)
- s = format (s, "[%d]: empty\n", i);
- continue;
- }
+ {
+ if (verbose > 1)
+ s = format (s, "[%d]: empty\n", i);
+ continue;
+ }
if (verbose)
- {
- s = format (s, "[%d]: heap offset %d, len %d\n", i,
- b->offset, (1<<b->log2_pages));
- }
-
- v = BV(clib_bihash_get_value) (h, b->offset);
- for (j = 0; j < (1<<b->log2_pages); j++)
- {
- for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
- {
- if (BV(clib_bihash_is_free)(&v->kvp[k]))
- {
- if (verbose > 1)
- s = format (s, " %d: empty\n",
- j * BIHASH_KVP_PER_PAGE + k);
- continue;
- }
- if (verbose)
- {
- s = format (s, " %d: %U\n",
- j * BIHASH_KVP_PER_PAGE + k,
- BV(format_bihash_kvp),
- &(v->kvp[k]));
- }
- active_elements++;
- }
- v++;
- }
+ {
+ s = format (s, "[%d]: heap offset %d, len %d\n", i,
+ b->offset, (1 << b->log2_pages));
+ }
+
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (BV (clib_bihash_is_free) (&v->kvp[k]))
+ {
+ if (verbose > 1)
+ s = format (s, " %d: empty\n",
+ j * BIHASH_KVP_PER_PAGE + k);
+ continue;
+ }
+ if (verbose)
+ {
+ s = format (s, " %d: %U\n",
+ j * BIHASH_KVP_PER_PAGE + k,
+ BV (format_bihash_kvp), &(v->kvp[k]));
+ }
+ active_elements++;
+ }
+ v++;
+ }
}
s = format (s, " %lld active elements\n", active_elements);
@@ -424,35 +415,41 @@ u8 * BV(format_bihash) (u8 * s, va_list * args)
return s;
}
-void BV(clib_bihash_foreach_key_value_pair)
- (BVT(clib_bihash) * h,
- void *callback,
- void *arg)
+void BV (clib_bihash_foreach_key_value_pair)
+ (BVT (clib_bihash) * h, void *callback, void *arg)
{
int i, j, k;
- clib_bihash_bucket_t * b;
- BVT(clib_bihash_value) * v;
- void (*fp)(BVT(clib_bihash_kv) *, void *) = callback;
-
+ clib_bihash_bucket_t *b;
+ BVT (clib_bihash_value) * v;
+ void (*fp) (BVT (clib_bihash_kv) *, void *) = callback;
+
for (i = 0; i < h->nbuckets; i++)
{
- b = &h->buckets [i];
+ b = &h->buckets[i];
if (b->offset == 0)
- continue;
-
- v = BV(clib_bihash_get_value) (h, b->offset);
- for (j = 0; j < (1<<b->log2_pages); j++)
- {
- for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
- {
- if (BV(clib_bihash_is_free)(&v->kvp[k]))
- continue;
-
- (*fp)(&v->kvp[k], arg);
- }
- v++;
- }
+ continue;
+
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ for (j = 0; j < (1 << b->log2_pages); j++)
+ {
+ for (k = 0; k < BIHASH_KVP_PER_PAGE; k++)
+ {
+ if (BV (clib_bihash_is_free) (&v->kvp[k]))
+ continue;
+
+ (*fp) (&v->kvp[k], arg);
+ }
+ v++;
+ }
}
}
/** @endif */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/bihash_template.h b/vppinfra/vppinfra/bihash_template.h
index 5f80e7af044..07c3e7daec7 100644
--- a/vppinfra/vppinfra/bihash_template.h
+++ b/vppinfra/vppinfra/bihash_template.h
@@ -16,7 +16,7 @@
/** @if DOCUMENTATION_IS_IN_BIHASH_DOC_H */
-/*
+/*
* Note: to instantiate the template multiple times in a single file,
* #undef __included_bihash_template_h__...
*/
@@ -39,22 +39,27 @@
#define __bvt(a,b) _bvt(a,b)
#define BVT(a) __bvt(a,BIHASH_TYPE)
-typedef struct BV(clib_bihash_value) {
- union {
- BVT(clib_bihash_kv) kvp[BIHASH_KVP_PER_PAGE];
- struct BV(clib_bihash_value) * next_free;
+typedef struct BV (clib_bihash_value)
+{
+ union
+ {
+ BVT (clib_bihash_kv) kvp[BIHASH_KVP_PER_PAGE];
+ struct BV (clib_bihash_value) * next_free;
};
-} BVT(clib_bihash_value);
+} BVT (clib_bihash_value);
-/*
+/*
* This is shared across all uses of the template, so it needs
* a "personal" #include recursion block
*/
#ifndef __defined_clib_bihash_bucket_t__
#define __defined_clib_bihash_bucket_t__
-typedef struct {
- union {
- struct {
+typedef struct
+{
+ union
+ {
+ struct
+ {
u32 offset;
u8 pad[3];
u8 log2_pages;
@@ -64,78 +69,77 @@ typedef struct {
} clib_bihash_bucket_t;
#endif /* __defined_clib_bihash_bucket_t__ */
-typedef struct {
- BVT(clib_bihash_value) * values;
- clib_bihash_bucket_t * buckets;
- volatile u32 * writer_lock;
+typedef struct
+{
+ BVT (clib_bihash_value) * values;
+ clib_bihash_bucket_t *buckets;
+ volatile u32 *writer_lock;
- BVT(clib_bihash_value) ** working_copies;
+ BVT (clib_bihash_value) ** working_copies;
clib_bihash_bucket_t saved_bucket;
u32 nbuckets;
u32 log2_nbuckets;
- u8 * name;
+ u8 *name;
- BVT(clib_bihash_value) **freelists;
- void * mheap;
+ BVT (clib_bihash_value) ** freelists;
+ void *mheap;
-} BVT(clib_bihash);
+} BVT (clib_bihash);
-static inline void *
-BV(clib_bihash_get_value) (BVT(clib_bihash) * h, uword offset)
+static inline void *BV (clib_bihash_get_value) (BVT (clib_bihash) * h,
+ uword offset)
{
- u8 * hp = h->mheap;
- u8 * vp = hp + offset;
+ u8 *hp = h->mheap;
+ u8 *vp = hp + offset;
return (void *) vp;
}
-static inline uword BV(clib_bihash_get_offset) (BVT(clib_bihash) * h, void * v)
+static inline uword BV (clib_bihash_get_offset) (BVT (clib_bihash) * h,
+ void *v)
{
- u8 * hp, * vp;
+ u8 *hp, *vp;
hp = (u8 *) h->mheap;
vp = (u8 *) v;
- ASSERT((vp - hp) < 0x100000000ULL);
+ ASSERT ((vp - hp) < 0x100000000ULL);
return vp - hp;
}
-void BV(clib_bihash_init)
- (BVT(clib_bihash) * h, char * name, u32 nbuckets, uword memory_size);
+void BV (clib_bihash_init)
+ (BVT (clib_bihash) * h, char *name, u32 nbuckets, uword memory_size);
-void BV(clib_bihash_free)
- (BVT(clib_bihash) * h);
+void BV (clib_bihash_free) (BVT (clib_bihash) * h);
-int BV(clib_bihash_add_del) (BVT(clib_bihash) * h,
- BVT(clib_bihash_kv) * add_v,
- int is_add);
-int BV(clib_bihash_search) (BVT(clib_bihash) * h,
- BVT(clib_bihash_kv) * search_v,
- BVT(clib_bihash_kv) * return_v);
+int BV (clib_bihash_add_del) (BVT (clib_bihash) * h,
+ BVT (clib_bihash_kv) * add_v, int is_add);
+int BV (clib_bihash_search) (BVT (clib_bihash) * h,
+ BVT (clib_bihash_kv) * search_v,
+ BVT (clib_bihash_kv) * return_v);
-void BV(clib_bihash_foreach_key_value_pair) (BVT(clib_bihash) * h,
- void *callback,
- void *arg);
+void BV (clib_bihash_foreach_key_value_pair) (BVT (clib_bihash) * h,
+ void *callback, void *arg);
-format_function_t BV(format_bihash);
-format_function_t BV(format_bihash_kvp);
+format_function_t BV (format_bihash);
+format_function_t BV (format_bihash_kvp);
-static inline int BV(clib_bihash_search_inline)
- (BVT(clib_bihash) * h, BVT(clib_bihash_kv) * kvp)
+static inline int BV (clib_bihash_search_inline)
+ (BVT (clib_bihash) * h, BVT (clib_bihash_kv) * kvp)
{
u64 hash;
u32 bucket_index;
uword value_index;
- BVT(clib_bihash_value) * v;
- clib_bihash_bucket_t * b;
+ BVT (clib_bihash_value) * v;
+ clib_bihash_bucket_t *b;
int i;
- hash = BV(clib_bihash_hash) (kvp);
+ hash = BV (clib_bihash_hash) (kvp);
- bucket_index = hash & (h->nbuckets-1);
+ bucket_index = hash & (h->nbuckets - 1);
b = &h->buckets[bucket_index];
if (b->offset == 0)
@@ -143,38 +147,37 @@ static inline int BV(clib_bihash_search_inline)
hash >>= h->log2_nbuckets;
- v = BV(clib_bihash_get_value) (h, b->offset);
- value_index = hash & ((1<<b->log2_pages)-1);
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ value_index = hash & ((1 << b->log2_pages) - 1);
v += value_index;
-
+
for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
{
- if (BV(clib_bihash_key_compare)(v->kvp[i].key, kvp->key))
- {
- *kvp = v->kvp[i];
- return 0;
- }
+ if (BV (clib_bihash_key_compare) (v->kvp[i].key, kvp->key))
+ {
+ *kvp = v->kvp[i];
+ return 0;
+ }
}
return -1;
}
-static inline int BV(clib_bihash_search_inline_2)
- (BVT(clib_bihash) * h,
- BVT(clib_bihash_kv) *search_key,
- BVT(clib_bihash_kv) *valuep)
+static inline int BV (clib_bihash_search_inline_2)
+ (BVT (clib_bihash) * h,
+ BVT (clib_bihash_kv) * search_key, BVT (clib_bihash_kv) * valuep)
{
u64 hash;
u32 bucket_index;
uword value_index;
- BVT(clib_bihash_value) * v;
- clib_bihash_bucket_t * b;
+ BVT (clib_bihash_value) * v;
+ clib_bihash_bucket_t *b;
int i;
- ASSERT(valuep);
+ ASSERT (valuep);
- hash = BV(clib_bihash_hash) (search_key);
+ hash = BV (clib_bihash_hash) (search_key);
- bucket_index = hash & (h->nbuckets-1);
+ bucket_index = hash & (h->nbuckets - 1);
b = &h->buckets[bucket_index];
if (b->offset == 0)
@@ -182,17 +185,17 @@ static inline int BV(clib_bihash_search_inline_2)
hash >>= h->log2_nbuckets;
- v = BV(clib_bihash_get_value) (h, b->offset);
- value_index = hash & ((1<<b->log2_pages)-1);
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ value_index = hash & ((1 << b->log2_pages) - 1);
v += value_index;
-
+
for (i = 0; i < BIHASH_KVP_PER_PAGE; i++)
{
- if (BV(clib_bihash_key_compare)(v->kvp[i].key, search_key->key))
- {
- *valuep = v->kvp[i];
- return 0;
- }
+ if (BV (clib_bihash_key_compare) (v->kvp[i].key, search_key->key))
+ {
+ *valuep = v->kvp[i];
+ return 0;
+ }
}
return -1;
}
@@ -201,3 +204,11 @@ static inline int BV(clib_bihash_search_inline_2)
#endif /* __included_bihash_template_h__ */
/** @endif */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/bitmap.h b/vppinfra/vppinfra/bitmap.h
index 80f1b4f191b..4c27820d334 100644
--- a/vppinfra/vppinfra/bitmap.h
+++ b/vppinfra/vppinfra/bitmap.h
@@ -40,7 +40,7 @@
/** \file
Bitmaps built as vectors of machine words
-*/
+*/
#include <vppinfra/vec.h>
#include <vppinfra/random.h>
@@ -131,7 +131,7 @@ _clib_bitmap_remove_trailing_zeros (uword * a)
}
/** Sets the ith bit of a bitmap to new_value.
- No sanity checking. Be careful.
+ No sanity checking. Be careful.
@param a - pointer to the bitmap
@param i - the bit position to interrogate
@param new_value - new value for the bit
@@ -150,7 +150,7 @@ clib_bitmap_set_no_check (uword * a, uword i, uword new_value)
ai = a[i0];
old_value = (ai & bit) != 0;
- ai &= ~ bit;
+ ai &= ~bit;
ai |= ((uword) (new_value != 0)) << i1;
a[i0] = ai;
return old_value;
@@ -256,7 +256,8 @@ clib_bitmap_get_multiple (uword * bitmap, uword i, uword n_bits)
if (i1 + n_bits > BITS (bitmap[0]) && i0 < l)
{
n_bits -= BITS (bitmap[0]) - i1;
- result |= (bitmap[i0] & (((uword) 1 << n_bits) - 1)) << (BITS (bitmap[0]) - i1);
+ result |=
+ (bitmap[i0] & (((uword) 1 << n_bits) - 1)) << (BITS (bitmap[0]) - i1);
}
return result;
@@ -380,7 +381,8 @@ do { \
@param ai - pointer to the bitmap
@returns lowest numbered set bit, or ~0 if the entire bitmap is zero
*/
-always_inline uword clib_bitmap_first_set (uword * ai)
+always_inline uword
+clib_bitmap_first_set (uword * ai)
{
uword i;
for (i = 0; i < vec_len (ai); i++)
@@ -396,11 +398,12 @@ always_inline uword clib_bitmap_first_set (uword * ai)
@param ai - pointer to the bitmap
@returns lowest numbered set bit, or ~0 if the entire bitmap is zero
*/
-always_inline uword clib_bitmap_last_set (uword * ai)
+always_inline uword
+clib_bitmap_last_set (uword * ai)
{
uword i;
- for (i = vec_len (ai); i > 0 ; i--)
+ for (i = vec_len (ai); i > 0; i--)
{
uword x = ai[i - 1];
if (x != 0)
@@ -450,8 +453,7 @@ clib_bitmap_count_set_bits (uword * ai)
@param bi - pointer to the source bitmap
@returns ai = ai and bi. ai is modified, bi is not modified
*/
-always_inline uword *
-clib_bitmap_and (uword * ai, uword * bi);
+always_inline uword *clib_bitmap_and (uword * ai, uword * bi);
/** Logical operator across two bitmaps
@@ -459,8 +461,7 @@ clib_bitmap_and (uword * ai, uword * bi);
@param bi - pointer to the source bitmap
@returns ai = ai & ~bi. ai is modified, bi is not modified
*/
-always_inline uword *
-clib_bitmap_andnot (uword * ai, uword * bi);
+always_inline uword *clib_bitmap_andnot (uword * ai, uword * bi);
/** Logical operator across two bitmaps
@@ -468,16 +469,14 @@ clib_bitmap_andnot (uword * ai, uword * bi);
@param bi - pointer to the source bitmap
@returns ai = ai & ~bi. ai is modified, bi is not modified
*/
-always_inline uword *
-clib_bitmap_or (uword * ai, uword * bi);
+always_inline uword *clib_bitmap_or (uword * ai, uword * bi);
/** Logical operator across two bitmaps
@param ai - pointer to the destination bitmap
@param bi - pointer to the source bitmap
@returns ai = ai or bi. ai is modified, bi is not modified
*/
-always_inline uword *
-clib_bitmap_or (uword * ai, uword * bi);
+always_inline uword *clib_bitmap_or (uword * ai, uword * bi);
/** Logical operator across two bitmaps
@@ -485,8 +484,7 @@ clib_bitmap_or (uword * ai, uword * bi);
@param bi - pointer to the source bitmap
@returns ai = ai xor bi. ai is modified, bi is not modified
*/
-always_inline uword *
-clib_bitmap_xor (uword * ai, uword * bi);
+always_inline uword *clib_bitmap_xor (uword * ai, uword * bi);
/* ALU function definition macro for functions taking two bitmaps. */
#define _(name, body, check_zero) \
@@ -514,20 +512,17 @@ clib_bitmap_##name (uword * ai, uword * bi) \
}
/* ALU functions: */
-_ (and, a = a & b, 1)
-_ (andnot, a = a &~ b, 1)
-_ (or, a = a | b, 0)
-_ (xor, a = a ^ b, 1)
+_(and, a = a & b, 1)
+_(andnot, a = a & ~b, 1) _(or, a = a | b, 0) _(xor, a = a ^ b, 1)
#undef _
-
/** Logical operator across two bitmaps which duplicates the first bitmap
@param ai - pointer to the destination bitmap
@param bi - pointer to the source bitmap
@returns aiDup = ai and bi. Neither ai nor bi are modified
*/
-always_inline uword *
-clib_bitmap_dup_and (uword * ai, uword * bi);
+ always_inline uword *
+ clib_bitmap_dup_and (uword * ai, uword * bi);
/** Logical operator across two bitmaps which duplicates the first bitmap
@@ -535,8 +530,8 @@ clib_bitmap_dup_and (uword * ai, uword * bi);
@param bi - pointer to the source bitmap
@returns aiDup = ai & ~bi. Neither ai nor bi are modified
*/
-always_inline uword *
-clib_bitmap_dup_andnot (uword * ai, uword * bi);
+ always_inline uword *
+ clib_bitmap_dup_andnot (uword * ai, uword * bi);
/** Logical operator across two bitmaps which duplicates the first bitmap
@@ -544,8 +539,8 @@ clib_bitmap_dup_andnot (uword * ai, uword * bi);
@param bi - pointer to the source bitmap
@returns aiDup = ai or bi. Neither ai nor bi are modified
*/
-always_inline uword *
-clib_bitmap_dup_or (uword * ai, uword * bi);
+ always_inline uword *
+ clib_bitmap_dup_or (uword * ai, uword * bi);
/** Logical operator across two bitmaps which duplicates the first bitmap
@@ -553,18 +548,18 @@ clib_bitmap_dup_or (uword * ai, uword * bi);
@param bi - pointer to the source bitmap
@returns aiDup = ai xor bi. Neither ai nor bi are modified
*/
-always_inline uword *
-clib_bitmap_dup_xor (uword * ai, uword * bi);
+ always_inline uword *
+ clib_bitmap_dup_xor (uword * ai, uword * bi);
#define _(name) \
always_inline uword * \
clib_bitmap_dup_##name (uword * ai, uword * bi) \
{ return clib_bitmap_##name (clib_bitmap_dup (ai), bi); }
-_ (and);
-_ (andnot);
-_ (or);
-_ (xor);
+_(and);
+_(andnot);
+_(or);
+_(xor);
#undef _
@@ -587,21 +582,17 @@ clib_bitmap_##name (uword * ai, uword i) \
}
/* ALU functions immediate: */
-_ (andi, a = a & b, 1)
-_ (andnoti, a = a &~ b, 1)
-_ (ori, a = a | b, 0)
-_ (xori, a = a ^ b, 1)
-
+_(andi, a = a & b, 1)
+_(andnoti, a = a & ~b, 1) _(ori, a = a | b, 0) _(xori, a = a ^ b, 1)
#undef _
-
/** Return a random bitmap of the requested length
@param ai - pointer to the destination bitmap
@param n_bits - number of bits to allocate
@param [in/out] seed - pointer to the random number seed
@returns a reasonably random bitmap based. See random.h.
*/
-always_inline uword *
-clib_bitmap_random (uword * ai, uword n_bits, u32 * seed)
+ always_inline uword *
+ clib_bitmap_random (uword * ai, uword n_bits, u32 * seed)
{
vec_reset_length (ai);
@@ -632,7 +623,7 @@ clib_bitmap_random (uword * ai, uword n_bits, u32 * seed)
/** Return the next set bit in a bitmap starting at bit i
@param ai - pointer to the bitmap
@param i - first bit position to test
- @returns first set bit position at or after i,
+ @returns first set bit position at or after i,
~0 if no further set bits are found
*/
always_inline uword
@@ -641,7 +632,7 @@ clib_bitmap_next_set (uword * ai, uword i)
uword i0 = i / BITS (ai[0]);
uword i1 = i % BITS (ai[0]);
uword t;
-
+
if (i0 < vec_len (ai))
{
t = (ai[i0] >> i1) << i1;
@@ -670,7 +661,7 @@ clib_bitmap_next_clear (uword * ai, uword i)
uword i0 = i / BITS (ai[0]);
uword i1 = i % BITS (ai[0]);
uword t;
-
+
if (i0 < vec_len (ai))
{
t = (~ai[i0] >> i1) << i1;
@@ -679,7 +670,7 @@ clib_bitmap_next_clear (uword * ai, uword i)
for (i0++; i0 < vec_len (ai); i0++)
{
- t = ~ai[i0];
+ t = ~ai[i0];
if (t)
return log2_first_set (t) + i0 * BITS (ai[0]);
}
@@ -687,54 +678,54 @@ clib_bitmap_next_clear (uword * ai, uword i)
return i;
}
-/** unformat a list of bit ranges into a bitmap (eg "0-3,5-7,11" )
+/** unformat a list of bit ranges into a bitmap (eg "0-3,5-7,11" )
uword * bitmap;
rv = unformat ("%U", unformat_bitmap_list, &bitmap);
Standard unformat_function_t arguments
- @param input - pointer an unformat_input_t
+ @param input - pointer an unformat_input_t
@param va - varargs list comprising a single uword **
@returns 1 on success, 0 on failure
*/
static inline uword
-unformat_bitmap_list(unformat_input_t * input, va_list * va)
+unformat_bitmap_list (unformat_input_t * input, va_list * va)
{
- uword ** bitmap_return = va_arg (* va, uword **);
- uword * bitmap = 0;
+ uword **bitmap_return = va_arg (*va, uword **);
+ uword *bitmap = 0;
- u32 a,b;
+ u32 a, b;
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
int i;
if (unformat (input, "%u-%u,", &a, &b))
- ;
+ ;
else if (unformat (input, "%u,", &a))
- b = a;
+ b = a;
else if (unformat (input, "%u-%u", &a, &b))
- ;
+ ;
else if (unformat (input, "%u", &a))
- b = a;
+ b = a;
else if (bitmap)
- {
- unformat_put_input(input);
+ {
+ unformat_put_input (input);
break;
}
else
- goto error;
+ goto error;
if (b < a)
- goto error;
+ goto error;
for (i = a; i <= b; i++)
- bitmap = clib_bitmap_set(bitmap, i, 1);
+ bitmap = clib_bitmap_set (bitmap, i, 1);
}
*bitmap_return = bitmap;
return 1;
error:
- clib_bitmap_free(bitmap);
+ clib_bitmap_free (bitmap);
return 0;
}
@@ -750,26 +741,34 @@ error:
@returns string under construction
*/
static inline u8 *
-format_bitmap_hex(u8 * s, va_list * args)
+format_bitmap_hex (u8 * s, va_list * args)
{
- uword * bitmap = va_arg (*args, uword *);
+ uword *bitmap = va_arg (*args, uword *);
int i, is_trailing_zero = 1;
if (!bitmap)
- return format(s, "0");
+ return format (s, "0");
i = vec_bytes (bitmap) * 2;
while (i > 0)
{
- u8 x = clib_bitmap_get_multiple(bitmap, --i * 4, 4);
+ u8 x = clib_bitmap_get_multiple (bitmap, --i * 4, 4);
if (x && is_trailing_zero)
- is_trailing_zero = 0;
+ is_trailing_zero = 0;
if (x || !is_trailing_zero)
- s = format(s, "%x", x);
+ s = format (s, "%x", x);
}
return s;
}
#endif /* included_clib_bitmap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/bitops.h b/vppinfra/vppinfra/bitops.h
index 03802eb215c..ab91b8ae443 100644
--- a/vppinfra/vppinfra/bitops.h
+++ b/vppinfra/vppinfra/bitops.h
@@ -41,7 +41,8 @@
#include <vppinfra/clib.h>
/* Population count from Hacker's Delight. */
-always_inline uword count_set_bits (uword x)
+always_inline uword
+count_set_bits (uword x)
{
#if uword_bits == 64
const uword c1 = 0x5555555555555555;
@@ -69,11 +70,12 @@ always_inline uword count_set_bits (uword x)
x = x + (x >> (uword) 32);
#endif
- return x & (2*BITS (uword) - 1);
+ return x & (2 * BITS (uword) - 1);
}
/* Based on "Hacker's Delight" code from GLS. */
-typedef struct {
+typedef struct
+{
uword masks[1 + log2_uword_bits];
} compress_main_t;
@@ -110,13 +112,19 @@ compress_bits (compress_main_t * cm, uword x)
uword q, r;
r = x & cm->masks[0];
- q = r & cm->masks[1]; r ^= q ^ (q >> 1);
- q = r & cm->masks[2]; r ^= q ^ (q >> 2);
- q = r & cm->masks[3]; r ^= q ^ (q >> 4);
- q = r & cm->masks[4]; r ^= q ^ (q >> 8);
- q = r & cm->masks[5]; r ^= q ^ (q >> 16);
+ q = r & cm->masks[1];
+ r ^= q ^ (q >> 1);
+ q = r & cm->masks[2];
+ r ^= q ^ (q >> 2);
+ q = r & cm->masks[3];
+ r ^= q ^ (q >> 4);
+ q = r & cm->masks[4];
+ r ^= q ^ (q >> 8);
+ q = r & cm->masks[5];
+ r ^= q ^ (q >> 16);
#if uword_bits > 32
- q = r & cm->masks[6]; r ^= q ^ (q >> (uword) 32);
+ q = r & cm->masks[6];
+ r ^= q ^ (q >> (uword) 32);
#endif
return r;
@@ -124,11 +132,15 @@ compress_bits (compress_main_t * cm, uword x)
always_inline uword
rotate_left (uword x, uword i)
-{ return (x << i) | (x >> (BITS (i) - i)); }
+{
+ return (x << i) | (x >> (BITS (i) - i));
+}
always_inline uword
rotate_right (uword x, uword i)
-{ return (x >> i) | (x << (BITS (i) - i)); }
+{
+ return (x >> i) | (x << (BITS (i) - i));
+}
/* Returns snoob from Hacker's Delight. Next highest number
with same number of set bits. */
@@ -157,3 +169,11 @@ do { \
} while (0)
#endif /* included_clib_bitops_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/byte_order.h b/vppinfra/vppinfra/byte_order.h
index b2c26e5fdee..b263538c6fe 100644
--- a/vppinfra/vppinfra/byte_order.h
+++ b/vppinfra/vppinfra/byte_order.h
@@ -55,54 +55,58 @@
always_inline u16
clib_byte_swap_u16 (u16 x)
-{ return (x >> 8) | (x << 8); }
+{
+ return (x >> 8) | (x << 8);
+}
always_inline i16
clib_byte_swap_i16 (i16 x)
-{ return clib_byte_swap_u16 (x); }
+{
+ return clib_byte_swap_u16 (x);
+}
always_inline u32
clib_byte_swap_u32 (u32 x)
{
#if defined (i386) || defined (__x86_64__)
- if (! __builtin_constant_p (x))
+ if (!__builtin_constant_p (x))
{
- asm volatile ("bswap %0" : "=r" (x) : "0" (x));
+ asm volatile ("bswap %0":"=r" (x):"0" (x));
return x;
}
#endif
- return ((x << 24)
- | ((x & 0xff00) << 8)
- | ((x >> 8) & 0xff00)
- | (x >> 24));
+ return ((x << 24) | ((x & 0xff00) << 8) | ((x >> 8) & 0xff00) | (x >> 24));
}
always_inline i32
clib_byte_swap_i32 (i32 x)
-{ return clib_byte_swap_u32 (x); }
+{
+ return clib_byte_swap_u32 (x);
+}
always_inline u64
clib_byte_swap_u64 (u64 x)
{
#if defined (__x86_64__)
- if (! __builtin_constant_p (x))
+ if (!__builtin_constant_p (x))
{
- asm volatile ("bswapq %0" : "=r" (x) : "0" (x));
+ asm volatile ("bswapq %0":"=r" (x):"0" (x));
return x;
}
#endif
#define _(x,n,i) \
((((x) >> (8*(i))) & 0xff) << (8*((n)-(i)-1)))
- return (_ (x, 8, 0) | _ (x, 8, 1)
- | _ (x, 8, 2) | _ (x, 8, 3)
- | _ (x, 8, 4) | _ (x, 8, 5)
- | _ (x, 8, 6) | _ (x, 8, 7));
+ return (_(x, 8, 0) | _(x, 8, 1)
+ | _(x, 8, 2) | _(x, 8, 3)
+ | _(x, 8, 4) | _(x, 8, 5) | _(x, 8, 6) | _(x, 8, 7));
#undef _
}
always_inline i64
clib_byte_swap_i64 (i64 x)
-{ return clib_byte_swap_u64 (x); }
+{
+ return clib_byte_swap_u64 (x);
+}
#define _(sex,type) \
/* HOST -> SEX */ \
@@ -142,22 +146,15 @@ clib_##sex##_to_host_unaligned_mem_##type (type * x) \
{ return clib_host_to_##sex##_unaligned_mem_##type (x); }
#ifndef __cplusplus
-_ (little, u16)
-_ (little, u32)
-_ (little, u64)
-_ (little, i16)
-_ (little, i32)
-_ (little, i64)
-_ (big, u16)
-_ (big, u32)
-_ (big, u64)
-_ (big, i16)
-_ (big, i32)
-_ (big, i64)
+_(little, u16)
+_(little, u32)
+_(little, u64)
+_(little, i16)
+_(little, i32)
+_(little, i64)
+_(big, u16) _(big, u32) _(big, u64) _(big, i16) _(big, i32) _(big, i64)
#endif
-
#undef _
-
/* Network "net" alias for "big". */
#define _(type) \
always_inline type \
@@ -183,16 +180,23 @@ clib_host_to_net_mem_##type (type * x) \
always_inline type \
clib_host_to_net_unaligned_mem_##type (type * x) \
{ return clib_host_to_big_unaligned_mem_##type (x); }
-
#ifndef __cplusplus
-_ (u16);
-_ (i16);
-_ (u32);
-_ (i32);
-_ (u64);
-_ (i64);
+ _(u16);
+_(i16);
+_(u32);
+_(i32);
+_(u64);
+_(i64);
#endif
#undef _
#endif /* included_clib_byte_order_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/cache.h b/vppinfra/vppinfra/cache.h
index 92e1e8cf777..8e1f9483bde 100644
--- a/vppinfra/vppinfra/cache.h
+++ b/vppinfra/vppinfra/cache.h
@@ -40,8 +40,8 @@
#include <vppinfra/error_bootstrap.h>
-/*
- * Allow CFLAGS to override the arch-specific cache line size
+/*
+ * Allow CFLAGS to override the arch-specific cache line size
*/
#ifndef CLIB_LOG2_CACHE_LINE_BYTES
@@ -94,3 +94,11 @@ do { \
#endif /* included_clib_cache_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/clib.h b/vppinfra/vppinfra/clib.h
index 9e9a97789db..0386c756833 100644
--- a/vppinfra/vppinfra/clib.h
+++ b/vppinfra/vppinfra/clib.h
@@ -76,7 +76,7 @@
/* Used to pack structure elements. */
#define CLIB_PACKED(x) x __attribute__ ((packed))
-#define CLIB_UNUSED(x) x __attribute__ ((unused))
+#define CLIB_UNUSED(x) x __attribute__ ((unused))
#define never_inline __attribute__ ((__noinline__))
@@ -171,16 +171,18 @@
#endif /* count_leading_zeros */
#if defined (count_leading_zeros)
-always_inline uword min_log2 (uword x)
+always_inline uword
+min_log2 (uword x)
{
uword n;
count_leading_zeros (n, x);
return BITS (uword) - n - 1;
}
#else
-always_inline uword min_log2 (uword x)
+always_inline uword
+min_log2 (uword x)
{
- uword a = x, b = BITS(uword)/2, c = 0, r = 0;
+ uword a = x, b = BITS (uword) / 2, c = 0, r = 0;
/* Reduce x to 4 bit result. */
#define _ \
@@ -191,29 +193,33 @@ always_inline uword min_log2 (uword x)
b /= 2; \
}
- if (BITS (uword) > 32) _;
- _; _; _;
+ if (BITS (uword) > 32)
+ _;
+ _;
+ _;
+ _;
#undef _
/* Do table lookup on 4 bit partial. */
if (BITS (uword) > 32)
{
const u64 table = 0x3333333322221104LL;
- uword t = (table >> (4*a)) & 0xf;
+ uword t = (table >> (4 * a)) & 0xf;
r = t < 4 ? r + t : ~0;
}
else
{
const u32 table = 0x22221104;
- uword t = (a & 8) ? 3 : ((table >> (4*a)) & 0xf);
+ uword t = (a & 8) ? 3 : ((table >> (4 * a)) & 0xf);
r = t < 4 ? r + t : ~0;
- }
+ }
return r;
}
#endif
-always_inline uword max_log2 (uword x)
+always_inline uword
+max_log2 (uword x)
{
uword l = min_log2 (x);
if (x > ((uword) 1 << l))
@@ -221,7 +227,8 @@ always_inline uword max_log2 (uword x)
return l;
}
-always_inline u64 min_log2_u64 (u64 x)
+always_inline u64
+min_log2_u64 (u64 x)
{
if (BITS (uword) == 64)
return min_log2 (x);
@@ -230,42 +237,57 @@ always_inline u64 min_log2_u64 (u64 x)
uword l, y;
y = x;
l = 0;
- if (y == 0) {
- l += 32;
- x >>= 32;
- }
+ if (y == 0)
+ {
+ l += 32;
+ x >>= 32;
+ }
l += min_log2 (x);
return l;
}
}
-always_inline uword pow2_mask (uword x)
-{ return ((uword) 1 << x) - (uword) 1; }
+always_inline uword
+pow2_mask (uword x)
+{
+ return ((uword) 1 << x) - (uword) 1;
+}
-always_inline uword max_pow2 (uword x)
+always_inline uword
+max_pow2 (uword x)
{
word y = (word) 1 << min_log2 (x);
- if (x > y) y *= 2;
+ if (x > y)
+ y *= 2;
return y;
}
-always_inline uword is_pow2 (uword x)
-{ return 0 == (x & (x - 1)); }
+always_inline uword
+is_pow2 (uword x)
+{
+ return 0 == (x & (x - 1));
+}
-always_inline uword round_pow2 (uword x, uword pow2)
+always_inline uword
+round_pow2 (uword x, uword pow2)
{
- return (x + pow2 - 1) &~ (pow2 - 1);
+ return (x + pow2 - 1) & ~(pow2 - 1);
}
-always_inline u64 round_pow2_u64 (u64 x, u64 pow2)
+always_inline u64
+round_pow2_u64 (u64 x, u64 pow2)
{
- return (x + pow2 - 1) &~ (pow2 - 1);
+ return (x + pow2 - 1) & ~(pow2 - 1);
}
-always_inline uword first_set (uword x)
-{ return x & -x; }
+always_inline uword
+first_set (uword x)
+{
+ return x & -x;
+}
-always_inline uword log2_first_set (uword x)
+always_inline uword
+log2_first_set (uword x)
{
uword result;
#ifdef count_trailing_zeros
@@ -276,14 +298,23 @@ always_inline uword log2_first_set (uword x)
return result;
}
-always_inline f64 flt_round_down (f64 x)
-{ return (int) x; }
+always_inline f64
+flt_round_down (f64 x)
+{
+ return (int) x;
+}
-always_inline word flt_round_nearest (f64 x)
-{ return (word) (x + .5); }
+always_inline word
+flt_round_nearest (f64 x)
+{
+ return (word) (x + .5);
+}
-always_inline f64 flt_round_to_multiple (f64 x, f64 f)
-{ return f * flt_round_nearest (x / f); }
+always_inline f64
+flt_round_to_multiple (f64 x, f64 f)
+{
+ return f * flt_round_nearest (x / f);
+}
#define clib_max(x,y) \
({ \
@@ -307,16 +338,22 @@ always_inline f64 flt_round_to_multiple (f64 x, f64 f)
/* Standard standalone-only function declarations. */
#ifndef CLIB_UNIX
-void clib_standalone_init (void * memory, uword memory_bytes);
+void clib_standalone_init (void *memory, uword memory_bytes);
-void qsort (void * base, uword n, uword size,
- int (*) (const void *, const void *));
+void qsort (void *base, uword n, uword size,
+ int (*)(const void *, const void *));
#endif
/* Stack backtrace. */
uword
-clib_backtrace (uword * callers,
- uword max_callers,
- uword n_frames_to_skip);
+clib_backtrace (uword * callers, uword max_callers, uword n_frames_to_skip);
#endif /* included_clib_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/cpu.c b/vppinfra/vppinfra/cpu.c
index 9008ee3dd89..70b39214817 100644
--- a/vppinfra/vppinfra/cpu.c
+++ b/vppinfra/vppinfra/cpu.c
@@ -42,7 +42,7 @@ u8 *
format_cpu_uarch (u8 * s, va_list * args)
{
#if __x86_64__
- u32 __attribute__((unused)) eax, ebx, ecx, edx;
+ u32 __attribute__ ((unused)) eax, ebx, ecx, edx;
u8 model, family;
if (__get_cpuid (1, &eax, &ebx, &ecx, &edx) == 0)
@@ -54,7 +54,7 @@ format_cpu_uarch (u8 * s, va_list * args)
#define _(f,m,a,c) if ((model == m) && (family == f)) return format(s, "%s (%s)", a, c);
foreach_x86_cpu_uarch
#undef _
- return format (s, "unknown (family 0x%02x model 0x%02x)", family, model);
+ return format (s, "unknown (family 0x%02x model 0x%02x)", family, model);
#else /* ! __x86_64__ */
return format (s, "unknown");
@@ -65,9 +65,9 @@ u8 *
format_cpu_model_name (u8 * s, va_list * args)
{
#if __x86_64__
- u32 __attribute__((unused)) eax, ebx, ecx, edx;
- u8 * name = 0;
- u32 * name_u32;
+ u32 __attribute__ ((unused)) eax, ebx, ecx, edx;
+ u8 *name = 0;
+ u32 *name_u32;
if (__get_cpuid (1, &eax, &ebx, &ecx, &edx) == 0)
return format (s, "unknown (missing cpuid)");
@@ -76,7 +76,7 @@ format_cpu_model_name (u8 * s, va_list * args)
if (eax < 0x80000004)
return format (s, "unknown (missing ext feature)");
- vec_validate(name, 48);
+ vec_validate (name, 48);
name_u32 = (u32 *) name;
__get_cpuid (0x80000002, &eax, &ebx, &ecx, &edx);
@@ -98,7 +98,7 @@ format_cpu_model_name (u8 * s, va_list * args)
name_u32[11] = edx;
s = format (s, "%s", name);
- vec_free(name);
+ vec_free (name);
return s;
#else /* ! __x86_64__ */
@@ -113,8 +113,7 @@ format_cpu_flags (u8 * s, va_list * args)
#define _(flag, func, reg, bit) \
if (clib_cpu_supports_ ## flag()) \
s = format (s, #flag " ");
- foreach_x86_64_flags
- return s;
+ foreach_x86_64_flags return s;
#undef _
#else /* ! __x86_64__ */
return format (s, "unknown");
@@ -122,3 +121,11 @@ format_cpu_flags (u8 * s, va_list * args)
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/cpu.h b/vppinfra/vppinfra/cpu.h
index 961af709a63..a88eaa2d9ba 100644
--- a/vppinfra/vppinfra/cpu.h
+++ b/vppinfra/vppinfra/cpu.h
@@ -66,14 +66,14 @@ _ (aes, 1, ecx, 25) \
_ (sha, 7, ebx, 29)
static inline int
-clib_get_cpuid(const u32 lev, u32 * eax, u32 *ebx, u32 * ecx, u32 * edx)
+clib_get_cpuid (const u32 lev, u32 * eax, u32 * ebx, u32 * ecx, u32 * edx)
{
if ((u32) __get_cpuid_max (0x80000000 & lev, 0) < lev)
return 0;
if (lev == 7)
- __cpuid_count(lev, 0, *eax, *ebx, *ecx, *edx);
+ __cpuid_count (lev, 0, *eax, *ebx, *ecx, *edx);
else
- __cpuid(lev, *eax, *ebx, *ecx, *edx);
+ __cpuid (lev, *eax, *ebx, *ecx, *edx);
return 1;
}
@@ -87,12 +87,19 @@ clib_cpu_supports_ ## flag() \
\
return ((reg & (1 << bit)) != 0); \
}
- foreach_x86_64_flags
+foreach_x86_64_flags
#undef _
#endif
-
-format_function_t format_cpu_uarch;
+ format_function_t format_cpu_uarch;
format_function_t format_cpu_model_name;
format_function_t format_cpu_flags;
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/dlist.h b/vppinfra/vppinfra/dlist.h
index 0e9e1854bb4..7d09b2bbc7e 100644
--- a/vppinfra/vppinfra/dlist.h
+++ b/vppinfra/vppinfra/dlist.h
@@ -2,7 +2,7 @@
* Copyright (c) 2016 Cisco and/or its affiliates.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
- *
+ *
* You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
@@ -25,32 +25,33 @@
#include <vppinfra/format.h>
#include <vppinfra/cache.h>
-typedef struct {
+typedef struct
+{
u32 next;
u32 prev;
u32 value;
} dlist_elt_t;
-static inline void
+static inline void
clib_dlist_init (dlist_elt_t * pool, u32 index)
{
- dlist_elt_t * head = pool_elt_at_index (pool, index);
+ dlist_elt_t *head = pool_elt_at_index (pool, index);
memset (head, 0xFF, sizeof (*head));
}
-static inline void
+static inline void
clib_dlist_addtail (dlist_elt_t * pool, u32 head_index, u32 new_index)
{
- dlist_elt_t * head = pool_elt_at_index (pool, head_index);
+ dlist_elt_t *head = pool_elt_at_index (pool, head_index);
u32 old_last_index;
- dlist_elt_t * old_last;
- dlist_elt_t * new;
+ dlist_elt_t *old_last;
+ dlist_elt_t *new;
- ASSERT(head->value == ~0);
+ ASSERT (head->value == ~0);
new = pool_elt_at_index (pool, new_index);
- if (PREDICT_FALSE(head->next == ~0))
+ if (PREDICT_FALSE (head->next == ~0))
{
head->next = head->prev = new_index;
new->next = new->prev = head_index;
@@ -66,19 +67,19 @@ clib_dlist_addtail (dlist_elt_t * pool, u32 head_index, u32 new_index)
head->prev = new_index;
}
-static inline void
+static inline void
clib_dlist_addhead (dlist_elt_t * pool, u32 head_index, u32 new_index)
{
- dlist_elt_t * head = pool_elt_at_index (pool, head_index);
- dlist_elt_t * old_first;
+ dlist_elt_t *head = pool_elt_at_index (pool, head_index);
+ dlist_elt_t *old_first;
u32 old_first_index;
- dlist_elt_t * new;
-
- ASSERT(head->value == ~0);
+ dlist_elt_t *new;
+
+ ASSERT (head->value == ~0);
new = pool_elt_at_index (pool, new_index);
- if (PREDICT_FALSE(head->next == ~0))
+ if (PREDICT_FALSE (head->next == ~0))
{
head->next = head->prev = new_index;
new->next = new->prev = head_index;
@@ -97,11 +98,11 @@ clib_dlist_addhead (dlist_elt_t * pool, u32 head_index, u32 new_index)
static inline void
clib_dlist_remove (dlist_elt_t * pool, u32 index)
{
- dlist_elt_t * elt = pool_elt_at_index (pool, index);
- dlist_elt_t * next_elt, * prev_elt;
-
+ dlist_elt_t *elt = pool_elt_at_index (pool, index);
+ dlist_elt_t *next_elt, *prev_elt;
+
/* listhead, not so much */
- ASSERT(elt->value != ~0);
+ ASSERT (elt->value != ~0);
next_elt = pool_elt_at_index (pool, elt->next);
prev_elt = pool_elt_at_index (pool, elt->prev);
@@ -112,12 +113,13 @@ clib_dlist_remove (dlist_elt_t * pool, u32 index)
elt->prev = elt->next = ~0;
}
-static inline u32 clib_dlist_remove_head (dlist_elt_t * pool, u32 head_index)
+static inline u32
+clib_dlist_remove_head (dlist_elt_t * pool, u32 head_index)
{
- dlist_elt_t * head = pool_elt_at_index (pool, head_index);
+ dlist_elt_t *head = pool_elt_at_index (pool, head_index);
u32 rv;
- ASSERT(head->value == ~0);
+ ASSERT (head->value == ~0);
if (head->next == ~0)
return ~0;
@@ -127,12 +129,13 @@ static inline u32 clib_dlist_remove_head (dlist_elt_t * pool, u32 head_index)
return rv;
}
-static inline u32 clib_dlist_remove_tail (dlist_elt_t * pool, u32 head_index)
+static inline u32
+clib_dlist_remove_tail (dlist_elt_t * pool, u32 head_index)
{
- dlist_elt_t * head = pool_elt_at_index (pool, head_index);
+ dlist_elt_t *head = pool_elt_at_index (pool, head_index);
u32 rv;
- ASSERT(head->value == ~0);
+ ASSERT (head->value == ~0);
if (head->prev == ~0)
return ~0;
@@ -143,3 +146,11 @@ static inline u32 clib_dlist_remove_tail (dlist_elt_t * pool, u32 head_index)
}
#endif /* included_dlist_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/elf.c b/vppinfra/vppinfra/elf.c
index 63c17ec382e..84d6282f1f7 100644
--- a/vppinfra/vppinfra/elf.c
+++ b/vppinfra/vppinfra/elf.c
@@ -44,12 +44,13 @@ elf_swap_verneed_aux (elf_dynamic_version_need_aux_t * n)
}
clib_error_t *
-elf_get_section_by_name (elf_main_t * em, char * section_name, elf_section_t ** result)
+elf_get_section_by_name (elf_main_t * em, char *section_name,
+ elf_section_t ** result)
{
- uword * p;
+ uword *p;
p = hash_get_mem (em->section_by_name, section_name);
- if (! p)
+ if (!p)
return clib_error_return (0, "no such section `%s'", section_name);
*result = vec_elt_at_index (em->sections, p[0]);
@@ -57,18 +58,22 @@ elf_get_section_by_name (elf_main_t * em, char * section_name, elf_section_t **
}
elf_section_t *
-elf_get_section_by_start_address_no_check (elf_main_t * em, uword start_address)
+elf_get_section_by_start_address_no_check (elf_main_t * em,
+ uword start_address)
{
- uword * p = hash_get (em->section_by_start_address, start_address);
+ uword *p = hash_get (em->section_by_start_address, start_address);
return p ? vec_elt_at_index (em->sections, p[0]) : 0;
}
clib_error_t *
-elf_get_section_by_start_address (elf_main_t * em, uword start_address, elf_section_t ** result)
+elf_get_section_by_start_address (elf_main_t * em, uword start_address,
+ elf_section_t ** result)
{
- elf_section_t * s = elf_get_section_by_start_address_no_check (em, start_address);
- if (! s)
- return clib_error_return (0, "no section with address 0x%wx", start_address);
+ elf_section_t *s =
+ elf_get_section_by_start_address_no_check (em, start_address);
+ if (!s)
+ return clib_error_return (0, "no section with address 0x%wx",
+ start_address);
*result = s;
return 0;
}
@@ -77,7 +82,7 @@ static u8 *
format_elf_section_type (u8 * s, va_list * args)
{
elf_section_type_t type = va_arg (*args, elf_section_type_t);
- char * t = 0;
+ char *t = 0;
switch (type)
{
@@ -86,7 +91,7 @@ format_elf_section_type (u8 * s, va_list * args)
#undef _
}
- if (! t)
+ if (!t)
s = format (s, "unknown 0x%x", type);
else
s = format (s, "%s", t);
@@ -96,13 +101,14 @@ format_elf_section_type (u8 * s, va_list * args)
static u8 *
format_elf_section (u8 * s, va_list * args)
{
- elf_main_t * em = va_arg (*args, elf_main_t *);
- elf_section_t * es = va_arg (*args, elf_section_t *);
- elf64_section_header_t * h = &es->header;
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ elf_section_t *es = va_arg (*args, elf_section_t *);
+ elf64_section_header_t *h = &es->header;
- if (! h)
+ if (!h)
return format (s, "%=40s%=10s%=20s%=8s%=16s%=16s%=16s",
- "Name", "Index", "Type", "Size", "Align", "Address", "File offset");
+ "Name", "Index", "Type", "Size", "Align", "Address",
+ "File offset");
s = format (s, "%-40s%10d%=20U%8Lx%16d%16Lx %Lx-%Lx",
elf_section_name (em, es),
@@ -110,8 +116,7 @@ format_elf_section (u8 * s, va_list * args)
format_elf_section_type, h->type,
h->file_size,
h->align,
- h->exec_address,
- h->file_offset, h->file_offset + h->file_size);
+ h->exec_address, h->file_offset, h->file_offset + h->file_size);
if (h->flags != 0)
{
@@ -128,7 +133,7 @@ static u8 *
format_elf_segment_type (u8 * s, va_list * args)
{
elf_segment_type_t type = va_arg (*args, elf_segment_type_t);
- char * t = 0;
+ char *t = 0;
switch (type)
{
@@ -137,7 +142,7 @@ format_elf_segment_type (u8 * s, va_list * args)
#undef _
}
- if (! t)
+ if (!t)
s = format (s, "unknown 0x%x", type);
else
s = format (s, "%s", t);
@@ -147,19 +152,17 @@ format_elf_segment_type (u8 * s, va_list * args)
static u8 *
format_elf_segment (u8 * s, va_list * args)
{
- elf_segment_t * es = va_arg (*args, elf_segment_t *);
- elf64_segment_header_t * h = &es->header;
+ elf_segment_t *es = va_arg (*args, elf_segment_t *);
+ elf64_segment_header_t *h = &es->header;
- if (! h)
+ if (!h)
return format (s, "%=16s%=16s%=16s%=16s",
"Type", "Virt. Address", "Phys. Address", "Size");
s = format (s, "%=16U%16Lx%16Lx%16Lx%16Lx",
format_elf_segment_type, h->type,
h->virtual_address,
- h->physical_address,
- h->memory_size,
- h->file_offset);
+ h->physical_address, h->memory_size, h->file_offset);
if (h->flags != 0)
{
@@ -177,15 +180,16 @@ format_elf_symbol_binding_and_type (u8 * s, va_list * args)
{
int bt = va_arg (*args, int);
int b, t;
- char * type_string = 0;
- char * binding_string = 0;
-
+ char *type_string = 0;
+ char *binding_string = 0;
+
switch ((b = ((bt >> 4) & 0xf)))
{
#define _(f,n) case n: binding_string = #f; break;
foreach_elf_symbol_binding;
#undef _
- default: break;
+ default:
+ break;
}
switch ((t = ((bt >> 0) & 0xf)))
@@ -193,7 +197,8 @@ format_elf_symbol_binding_and_type (u8 * s, va_list * args)
#define _(f,n) case n: type_string = #f; break;
foreach_elf_symbol_type;
#undef _
- default: break;
+ default:
+ break;
}
if (binding_string)
@@ -213,7 +218,7 @@ static u8 *
format_elf_symbol_visibility (u8 * s, va_list * args)
{
int visibility = va_arg (*args, int);
- char * t = 0;
+ char *t = 0;
switch (visibility)
{
@@ -231,13 +236,13 @@ format_elf_symbol_visibility (u8 * s, va_list * args)
static u8 *
format_elf_symbol_section_name (u8 * s, va_list * args)
{
- elf_main_t * em = va_arg (*args, elf_main_t *);
+ elf_main_t *em = va_arg (*args, elf_main_t *);
int si = va_arg (*args, int);
- char * t = 0;
+ char *t = 0;
if (si < vec_len (em->sections))
{
- elf_section_t * es = vec_elt_at_index (em->sections, si);
+ elf_section_t *es = vec_elt_at_index (em->sections, si);
return format (s, "%s", elf_section_name (em, es));
}
@@ -260,15 +265,17 @@ format_elf_symbol_section_name (u8 * s, va_list * args)
return format (s, "unknown 0x%x", si);
}
-u8 * format_elf_symbol (u8 * s, va_list * args)
+u8 *
+format_elf_symbol (u8 * s, va_list * args)
{
- elf_main_t * em = va_arg (*args, elf_main_t *);
- elf_symbol_table_t * t = va_arg (*args, elf_symbol_table_t *);
- elf64_symbol_t * sym = va_arg (*args, elf64_symbol_t *);
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ elf_symbol_table_t *t = va_arg (*args, elf_symbol_table_t *);
+ elf64_symbol_t *sym = va_arg (*args, elf64_symbol_t *);
- if (! sym)
+ if (!sym)
return format (s, "%=32s%=16s%=16s%=16s%=16s%=16s",
- "Symbol", "Size", "Value", "Type", "Visibility", "Section");
+ "Symbol", "Size", "Value", "Type", "Visibility",
+ "Section");
s = format (s, "%-32s%16Ld%16Lx%=16U%=16U%U",
elf_symbol_name (t, sym),
@@ -283,9 +290,9 @@ u8 * format_elf_symbol (u8 * s, va_list * args)
static u8 *
format_elf_relocation_type (u8 * s, va_list * args)
{
- elf_main_t * em = va_arg (*args, elf_main_t *);
+ elf_main_t *em = va_arg (*args, elf_main_t *);
int type = va_arg (*args, int);
- char * t = 0;
+ char *t = 0;
switch (em->first_header.architecture)
{
@@ -293,7 +300,7 @@ format_elf_relocation_type (u8 * s, va_list * args)
case ELF_ARCH_X86_64:
{
- static char * tab[] = {
+ static char *tab[] = {
foreach_elf_x86_64_relocation_type
};
@@ -307,7 +314,7 @@ format_elf_relocation_type (u8 * s, va_list * args)
break;
}
- if (! t)
+ if (!t)
s = format (s, "0x%02x", type);
else
s = format (s, "%s", t);
@@ -318,12 +325,13 @@ format_elf_relocation_type (u8 * s, va_list * args)
static u8 *
format_elf_relocation (u8 * s, va_list * args)
{
- elf_main_t * em = va_arg (*args, elf_main_t *);
- elf_relocation_with_addend_t * r = va_arg (*args, elf_relocation_with_addend_t *);
- elf_symbol_table_t * t;
- elf64_symbol_t * sym;
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ elf_relocation_with_addend_t *r =
+ va_arg (*args, elf_relocation_with_addend_t *);
+ elf_symbol_table_t *t;
+ elf64_symbol_t *sym;
- if (! r)
+ if (!r)
return format (s, "%=16s%=16s%=16s", "Address", "Type", "Symbol");
t = vec_elt_at_index (em->symbol_tables, 0);
@@ -335,7 +343,7 @@ format_elf_relocation (u8 * s, va_list * args)
if (sym->section_index != 0)
{
- elf_section_t * es;
+ elf_section_t *es;
es = vec_elt_at_index (em->sections, sym->section_index);
s = format (s, " (section %s)", elf_section_name (em, es));
}
@@ -346,9 +354,7 @@ format_elf_relocation (u8 * s, va_list * args)
{
i64 a = r->addend;
if (a != 0)
- s = format (s, " %c 0x%Lx",
- a > 0 ? '+' : '-',
- a > 0 ? a : -a);
+ s = format (s, " %c 0x%Lx", a > 0 ? '+' : '-', a > 0 ? a : -a);
}
return s;
@@ -358,13 +364,14 @@ static u8 *
format_elf_dynamic_entry_type (u8 * s, va_list * args)
{
u32 type = va_arg (*args, u32);
- char * t = 0;
+ char *t = 0;
switch (type)
{
#define _(f,n) case n: t = #f; break;
foreach_elf_dynamic_entry_type;
#undef _
- default: break;
+ default:
+ break;
}
if (t)
return format (s, "%s", t);
@@ -375,14 +382,13 @@ format_elf_dynamic_entry_type (u8 * s, va_list * args)
static u8 *
format_elf_dynamic_entry (u8 * s, va_list * args)
{
- elf_main_t * em = va_arg (*args, elf_main_t *);
- elf64_dynamic_entry_t * e = va_arg (*args, elf64_dynamic_entry_t *);
+ elf_main_t *em = va_arg (*args, elf_main_t *);
+ elf64_dynamic_entry_t *e = va_arg (*args, elf64_dynamic_entry_t *);
- if (! e)
+ if (!e)
return format (s, "%=40s%=16s", "Type", "Data");
- s = format (s, "%=40U",
- format_elf_dynamic_entry_type, (u32) e->type);
+ s = format (s, "%=40U", format_elf_dynamic_entry_type, (u32) e->type);
switch (e->type)
{
case ELF_DYNAMIC_ENTRY_NEEDED_LIBRARY:
@@ -403,7 +409,8 @@ format_elf_dynamic_entry (u8 * s, va_list * args)
case ELF_DYNAMIC_ENTRY_VERSION_NEED:
case ELF_DYNAMIC_ENTRY_VERSYM:
{
- elf_section_t * es = elf_get_section_by_start_address_no_check (em, e->data);
+ elf_section_t *es =
+ elf_get_section_by_start_address_no_check (em, e->data);
if (es)
s = format (s, "section %s", elf_section_name (em, es));
else
@@ -419,10 +426,11 @@ format_elf_dynamic_entry (u8 * s, va_list * args)
return s;
}
-static u8 * format_elf_architecture (u8 * s, va_list * args)
+static u8 *
+format_elf_architecture (u8 * s, va_list * args)
{
int a = va_arg (*args, int);
- char * t;
+ char *t;
switch (a)
{
@@ -436,10 +444,11 @@ static u8 * format_elf_architecture (u8 * s, va_list * args)
return format (s, "%s", t);
}
-static u8 * format_elf_abi (u8 * s, va_list * args)
+static u8 *
+format_elf_abi (u8 * s, va_list * args)
{
int a = va_arg (*args, int);
- char * t;
+ char *t;
switch (a)
{
@@ -453,10 +462,11 @@ static u8 * format_elf_abi (u8 * s, va_list * args)
return format (s, "%s", t);
}
-static u8 * format_elf_file_class (u8 * s, va_list * args)
+static u8 *
+format_elf_file_class (u8 * s, va_list * args)
{
int a = va_arg (*args, int);
- char * t;
+ char *t;
switch (a)
{
@@ -470,10 +480,11 @@ static u8 * format_elf_file_class (u8 * s, va_list * args)
return format (s, "%s", t);
}
-static u8 * format_elf_file_type (u8 * s, va_list * args)
+static u8 *
+format_elf_file_type (u8 * s, va_list * args)
{
int a = va_arg (*args, int);
- char * t;
+ char *t;
if (a >= ELF_ARCH_SPECIFIC_LO && a <= ELF_ARCH_SPECIFIC_HI)
return format (s, "arch-specific 0x%x", a - ELF_ARCH_SPECIFIC_LO);
@@ -493,10 +504,11 @@ static u8 * format_elf_file_type (u8 * s, va_list * args)
return format (s, "%s", t);
}
-static u8 * format_elf_data_encoding (u8 * s, va_list * args)
+static u8 *
+format_elf_data_encoding (u8 * s, va_list * args)
{
int a = va_arg (*args, int);
- char * t;
+ char *t;
switch (a)
{
@@ -510,35 +522,41 @@ static u8 * format_elf_data_encoding (u8 * s, va_list * args)
return format (s, "%s", t);
}
-static int elf_section_offset_compare (void *a1, void *a2)
+static int
+elf_section_offset_compare (void *a1, void *a2)
{
elf_section_t *s1 = a1;
elf_section_t *s2 = a2;
-
- return ((i64)s1->header.file_offset - (i64)s2->header.file_offset);
+
+ return ((i64) s1->header.file_offset - (i64) s2->header.file_offset);
}
-static int elf_segment_va_compare (void *a1, void *a2)
+static int
+elf_segment_va_compare (void *a1, void *a2)
{
elf_segment_t *s1 = a1;
elf_segment_t *s2 = a2;
-
- return ((i64)s1->header.virtual_address - (i64)s2->header.virtual_address);
+
+ return ((i64) s1->header.virtual_address -
+ (i64) s2->header.virtual_address);
}
u8 *
format_elf_main (u8 * s, va_list * args)
{
- elf_main_t * em = va_arg (*args, elf_main_t *);
+ elf_main_t *em = va_arg (*args, elf_main_t *);
u32 verbose = va_arg (*args, u32);
- elf64_file_header_t * fh = &em->file_header;
-
- s = format (s, "File header: machine: %U, file type/class %U/%U, data-encoding: %U, abi: %U version %d\n",
- format_elf_architecture, em->first_header.architecture,
- format_elf_file_type, em->first_header.file_type,
- format_elf_file_class, em->first_header.file_class,
- format_elf_data_encoding, em->first_header.data_encoding,
- format_elf_abi, em->first_header.abi, em->first_header.abi_version);
+ elf64_file_header_t *fh = &em->file_header;
+
+ s =
+ format (s,
+ "File header: machine: %U, file type/class %U/%U, data-encoding: %U, abi: %U version %d\n",
+ format_elf_architecture, em->first_header.architecture,
+ format_elf_file_type, em->first_header.file_type,
+ format_elf_file_class, em->first_header.file_class,
+ format_elf_data_encoding, em->first_header.data_encoding,
+ format_elf_abi, em->first_header.abi,
+ em->first_header.abi_version);
s = format (s, " entry 0x%Lx, arch-flags 0x%x",
em->file_header.entry_point, em->file_header.flags);
@@ -547,34 +565,32 @@ format_elf_main (u8 * s, va_list * args)
s = format (s, "\n interpreter: %s", em->interpreter);
{
- elf_section_t * h, * copy;
+ elf_section_t *h, *copy;
copy = 0;
- vec_foreach (h, em->sections)
- if (h->header.type != ~0)
- vec_add1 (copy, h[0]);
+ vec_foreach (h, em->sections) if (h->header.type != ~0)
+ vec_add1 (copy, h[0]);
vec_sort_with_function (copy, elf_section_offset_compare);
s = format (s, "\nSections %d at file offset 0x%Lx-0x%Lx:\n",
fh->section_header_count,
fh->section_header_file_offset,
- fh->section_header_file_offset +
- (u64) fh->section_header_count * fh->section_header_size);
+ fh->section_header_file_offset +
+ (u64) fh->section_header_count * fh->section_header_size);
s = format (s, "%U\n", format_elf_section, em, 0);
- vec_foreach (h, copy)
- s = format (s, "%U\n", format_elf_section, em, h);
+ vec_foreach (h, copy) s = format (s, "%U\n", format_elf_section, em, h);
vec_free (copy);
}
{
- elf_segment_t * h, * copy;
+ elf_segment_t *h, *copy;
copy = 0;
vec_foreach (h, em->segments)
if (h->header.type != ELF_SEGMENT_UNUSED && h->header.type != ~0)
- vec_add1 (copy, h[0]);
+ vec_add1 (copy, h[0]);
/* Sort segments by address. */
vec_sort_with_function (copy, elf_segment_va_compare);
@@ -582,60 +598,61 @@ format_elf_main (u8 * s, va_list * args)
s = format (s, "\nSegments: %d at file offset 0x%Lx-0x%Lx:\n",
fh->segment_header_count,
fh->segment_header_file_offset,
- (u64) fh->segment_header_file_offset + (u64) fh->segment_header_count * (u64) fh->segment_header_size);
-
+ (u64) fh->segment_header_file_offset +
+ (u64) fh->segment_header_count *
+ (u64) fh->segment_header_size);
+
s = format (s, "%U\n", format_elf_segment, 0);
- vec_foreach (h, copy)
- s = format (s, "%U\n", format_elf_segment, h);
+ vec_foreach (h, copy) s = format (s, "%U\n", format_elf_segment, h);
vec_free (copy);
}
- if ((verbose & FORMAT_ELF_MAIN_SYMBOLS)
- && vec_len (em->symbol_tables) > 0)
+ if ((verbose & FORMAT_ELF_MAIN_SYMBOLS) && vec_len (em->symbol_tables) > 0)
{
- elf_symbol_table_t * t;
- elf64_symbol_t * sym;
- elf_section_t * es;
+ elf_symbol_table_t *t;
+ elf64_symbol_t *sym;
+ elf_section_t *es;
vec_foreach (t, em->symbol_tables)
- {
- es = vec_elt_at_index (em->sections, t->section_index);
- s = format (s, "\nSymbols for section %s:\n", elf_section_name (em, es));
-
- s = format (s, "%U\n", format_elf_symbol, em, 0, 0);
- vec_foreach (sym, t->symbols)
- s = format (s, "%U\n", format_elf_symbol, em, t, sym);
- }
+ {
+ es = vec_elt_at_index (em->sections, t->section_index);
+ s =
+ format (s, "\nSymbols for section %s:\n",
+ elf_section_name (em, es));
+
+ s = format (s, "%U\n", format_elf_symbol, em, 0, 0);
+ vec_foreach (sym, t->symbols)
+ s = format (s, "%U\n", format_elf_symbol, em, t, sym);
+ }
}
if ((verbose & FORMAT_ELF_MAIN_RELOCATIONS)
&& vec_len (em->relocation_tables) > 0)
{
- elf_relocation_table_t * t;
- elf_relocation_with_addend_t * r;
- elf_section_t * es;
+ elf_relocation_table_t *t;
+ elf_relocation_with_addend_t *r;
+ elf_section_t *es;
vec_foreach (t, em->relocation_tables)
- {
- es = vec_elt_at_index (em->sections, t->section_index);
- r = t->relocations;
- s = format (s, "\nRelocations for section %s:\n",
- elf_section_name (em, es));
+ {
+ es = vec_elt_at_index (em->sections, t->section_index);
+ r = t->relocations;
+ s = format (s, "\nRelocations for section %s:\n",
+ elf_section_name (em, es));
- s = format (s, "%U\n", format_elf_relocation, em, 0);
- vec_foreach (r, t->relocations)
- {
- s = format (s, "%U\n",
- format_elf_relocation, em, r);
- }
+ s = format (s, "%U\n", format_elf_relocation, em, 0);
+ vec_foreach (r, t->relocations)
+ {
+ s = format (s, "%U\n", format_elf_relocation, em, r);
}
+ }
}
if ((verbose & FORMAT_ELF_MAIN_DYNAMIC)
&& vec_len (em->dynamic_entries) > 0)
{
- elf64_dynamic_entry_t * es, * e;
+ elf64_dynamic_entry_t *es, *e;
s = format (s, "\nDynamic linker information:\n");
es = vec_dup (em->dynamic_entries);
s = format (s, "%U\n", format_elf_dynamic_entry, em, 0);
@@ -647,9 +664,9 @@ format_elf_main (u8 * s, va_list * args)
}
static void
-elf_parse_segments (elf_main_t * em, void * data)
+elf_parse_segments (elf_main_t * em, void *data)
{
- void * d = data + em->file_header.segment_header_file_offset;
+ void *d = data + em->file_header.segment_header_file_offset;
uword n = em->file_header.segment_header_count;
uword i;
@@ -661,29 +678,29 @@ elf_parse_segments (elf_main_t * em, void * data)
if (em->first_header.file_class == ELF_64BIT)
{
- elf64_segment_header_t * h = d;
+ elf64_segment_header_t *h = d;
#define _(t,f) em->segments[i].header.f = elf_swap_##t (em, h->f);
foreach_elf64_segment_header
#undef _
- d = (h + 1);
+ d = (h + 1);
}
else
{
- elf32_segment_header_t * h = d;
+ elf32_segment_header_t *h = d;
#define _(t,f) em->segments[i].header.f = elf_swap_##t (em, h->f);
foreach_elf32_segment_header
#undef _
- d = (h + 1);
+ d = (h + 1);
}
}
}
static void
-elf_parse_sections (elf_main_t * em, void * data)
+elf_parse_sections (elf_main_t * em, void *data)
{
- elf64_file_header_t * fh = &em->file_header;
- elf_section_t * s;
- void * d = data + fh->section_header_file_offset;
+ elf64_file_header_t *fh = &em->file_header;
+ elf_section_t *s;
+ void *d = data + fh->section_header_file_offset;
uword n = fh->section_header_count;
uword i;
@@ -697,48 +714,47 @@ elf_parse_sections (elf_main_t * em, void * data)
if (em->first_header.file_class == ELF_64BIT)
{
- elf64_section_header_t * h = d;
+ elf64_section_header_t *h = d;
#define _(t,f) em->sections[i].header.f = elf_swap_##t (em, h->f);
foreach_elf64_section_header
#undef _
- d = (h + 1);
+ d = (h + 1);
}
else
{
- elf32_section_header_t * h = d;
+ elf32_section_header_t *h = d;
#define _(t,f) em->sections[i].header.f = elf_swap_##t (em, h->f);
foreach_elf32_section_header
#undef _
- d = (h + 1);
+ d = (h + 1);
}
if (s->header.type != ELF_SECTION_NO_BITS)
- vec_add (s->contents, data + s->header.file_offset, s->header.file_size);
+ vec_add (s->contents, data + s->header.file_offset,
+ s->header.file_size);
}
s = vec_elt_at_index (em->sections, fh->section_header_string_table_index);
em->section_by_name
- = hash_create_string (/* # elts */ vec_len (em->sections),
+ = hash_create_string ( /* # elts */ vec_len (em->sections),
/* sizeof of value */ sizeof (uword));
vec_foreach (s, em->sections)
- {
- hash_set_mem (em->section_by_name,
- elf_section_name (em, s),
- s - em->sections);
- hash_set (em->section_by_start_address,
- s->header.exec_address,
- s - em->sections);
- }
+ {
+ hash_set_mem (em->section_by_name,
+ elf_section_name (em, s), s - em->sections);
+ hash_set (em->section_by_start_address,
+ s->header.exec_address, s - em->sections);
+ }
}
static void
add_symbol_table (elf_main_t * em, elf_section_t * s)
{
- elf_symbol_table_t * tab;
- elf32_symbol_t * sym32;
- elf64_symbol_t * sym64;
+ elf_symbol_table_t *tab;
+ elf32_symbol_t *sym32;
+ elf64_symbol_t *sym64;
uword i;
if (s->header.type == ELF_SECTION_DYNAMIC_SYMBOL_TABLE)
@@ -750,7 +766,9 @@ add_symbol_table (elf_main_t * em, elf_section_t * s)
if (em->first_header.file_class == ELF_64BIT)
{
- tab->symbols = elf_get_section_contents (em, s - em->sections, sizeof (tab->symbols[0]));
+ tab->symbols =
+ elf_get_section_contents (em, s - em->sections,
+ sizeof (tab->symbols[0]));
for (i = 0; i < vec_len (tab->symbols); i++)
{
#define _(t,f) tab->symbols[i].f = elf_swap_##t (em, tab->symbols[i].f);
@@ -760,7 +778,8 @@ add_symbol_table (elf_main_t * em, elf_section_t * s)
}
else
{
- sym32 = elf_get_section_contents (em, s - em->sections, sizeof (sym32[0]));
+ sym32 =
+ elf_get_section_contents (em, s - em->sections, sizeof (sym32[0]));
vec_clone (tab->symbols, sym32);
for (i = 0; i < vec_len (tab->symbols); i++)
{
@@ -774,25 +793,25 @@ add_symbol_table (elf_main_t * em, elf_section_t * s)
return;
tab->string_table =
- elf_get_section_contents (em, s->header.link, sizeof (tab->string_table[0]));
- tab->symbol_by_name
- = hash_create_string (/* # elts */ vec_len (tab->symbols),
- /* sizeof of value */ sizeof (uword));
+ elf_get_section_contents (em, s->header.link,
+ sizeof (tab->string_table[0]));
+ tab->symbol_by_name =
+ hash_create_string ( /* # elts */ vec_len (tab->symbols),
+ /* sizeof of value */ sizeof (uword));
vec_foreach (sym64, tab->symbols)
- {
- if (sym64->name != 0)
- hash_set_mem (tab->symbol_by_name,
- tab->string_table + sym64->name,
- sym64 - tab->symbols);
- }
+ {
+ if (sym64->name != 0)
+ hash_set_mem (tab->symbol_by_name,
+ tab->string_table + sym64->name, sym64 - tab->symbols);
+ }
}
static void
add_relocation_table (elf_main_t * em, elf_section_t * s)
{
uword has_addend = s->header.type == ELF_SECTION_RELOCATION_ADD;
- elf_relocation_table_t * t;
+ elf_relocation_table_t *t;
uword i;
vec_add2 (em->relocation_tables, t, 1);
@@ -800,10 +819,11 @@ add_relocation_table (elf_main_t * em, elf_section_t * s)
if (em->first_header.file_class == ELF_64BIT)
{
- elf64_relocation_t * r, * rs;
+ elf64_relocation_t *r, *rs;
- rs = elf_get_section_contents (em, t->section_index,
- sizeof (rs[0]) + has_addend * sizeof (rs->addend[0]));
+ rs = elf_get_section_contents (em, t->section_index,
+ sizeof (rs[0]) +
+ has_addend * sizeof (rs->addend[0]));
if (em->need_byte_swap)
{
@@ -824,29 +844,31 @@ add_relocation_table (elf_main_t * em, elf_section_t * s)
}
else
{
- elf_relocation_with_addend_t * r;
- elf32_relocation_t * r32, * r32s;
+ elf_relocation_with_addend_t *r;
+ elf32_relocation_t *r32, *r32s;
- r32s = elf_get_section_contents (em, t->section_index,
- sizeof (r32s[0]) + has_addend * sizeof (r32s->addend[0]));
+ r32s = elf_get_section_contents (em, t->section_index,
+ sizeof (r32s[0]) +
+ has_addend * sizeof (r32s->addend[0]));
vec_resize (t->relocations, vec_len (r32s));
r32 = r32s;
vec_foreach (r, t->relocations)
- {
- r->address = elf_swap_u32 (em, r32->address);
- r->symbol_and_type = elf_swap_u32 (em, r->symbol_and_type);
- r->addend = has_addend ? elf_swap_u32 (em, r32->addend[0]) : 0;
- r32 = elf_relocation_next (r32, s->header.type);
- }
+ {
+ r->address = elf_swap_u32 (em, r32->address);
+ r->symbol_and_type = elf_swap_u32 (em, r->symbol_and_type);
+ r->addend = has_addend ? elf_swap_u32 (em, r32->addend[0]) : 0;
+ r32 = elf_relocation_next (r32, s->header.type);
+ }
vec_free (r32s);
}
}
-void elf_parse_symbols (elf_main_t * em)
+void
+elf_parse_symbols (elf_main_t * em)
{
- elf_section_t * s;
+ elf_section_t *s;
/* No need to parse symbols twice. */
if (em->parsed_symbols)
@@ -854,84 +876,87 @@ void elf_parse_symbols (elf_main_t * em)
em->parsed_symbols = 1;
vec_foreach (s, em->sections)
- {
- switch (s->header.type)
- {
- case ELF_SECTION_SYMBOL_TABLE:
- case ELF_SECTION_DYNAMIC_SYMBOL_TABLE:
- add_symbol_table (em, s);
- break;
+ {
+ switch (s->header.type)
+ {
+ case ELF_SECTION_SYMBOL_TABLE:
+ case ELF_SECTION_DYNAMIC_SYMBOL_TABLE:
+ add_symbol_table (em, s);
+ break;
- case ELF_SECTION_RELOCATION_ADD:
- case ELF_SECTION_RELOCATION:
- add_relocation_table (em, s);
- break;
+ case ELF_SECTION_RELOCATION_ADD:
+ case ELF_SECTION_RELOCATION:
+ add_relocation_table (em, s);
+ break;
- default:
- break;
- }
- }
+ default:
+ break;
+ }
+ }
}
-void elf_set_dynamic_entries (elf_main_t * em)
+void
+elf_set_dynamic_entries (elf_main_t * em)
{
uword i;
/* Start address for sections may have changed. */
{
- elf64_dynamic_entry_t * e;
+ elf64_dynamic_entry_t *e;
vec_foreach (e, em->dynamic_entries)
- {
- switch (e->type)
+ {
+ switch (e->type)
+ {
+ case ELF_DYNAMIC_ENTRY_INIT_FUNCTION:
+ case ELF_DYNAMIC_ENTRY_FINI_FUNCTION:
+ case ELF_DYNAMIC_ENTRY_SYMBOL_HASH:
+ case ELF_DYNAMIC_ENTRY_GNU_HASH:
+ case ELF_DYNAMIC_ENTRY_STRING_TABLE:
+ case ELF_DYNAMIC_ENTRY_SYMBOL_TABLE:
+ case ELF_DYNAMIC_ENTRY_PLT_GOT:
+ case ELF_DYNAMIC_ENTRY_PLT_RELOCATION_ADDRESS:
+ case ELF_DYNAMIC_ENTRY_RELA_ADDRESS:
+ case ELF_DYNAMIC_ENTRY_VERSION_NEED:
+ case ELF_DYNAMIC_ENTRY_VERSYM:
{
- case ELF_DYNAMIC_ENTRY_INIT_FUNCTION:
- case ELF_DYNAMIC_ENTRY_FINI_FUNCTION:
- case ELF_DYNAMIC_ENTRY_SYMBOL_HASH:
- case ELF_DYNAMIC_ENTRY_GNU_HASH:
- case ELF_DYNAMIC_ENTRY_STRING_TABLE:
- case ELF_DYNAMIC_ENTRY_SYMBOL_TABLE:
- case ELF_DYNAMIC_ENTRY_PLT_GOT:
- case ELF_DYNAMIC_ENTRY_PLT_RELOCATION_ADDRESS:
- case ELF_DYNAMIC_ENTRY_RELA_ADDRESS:
- case ELF_DYNAMIC_ENTRY_VERSION_NEED:
- case ELF_DYNAMIC_ENTRY_VERSYM:
- {
- elf_section_t * es = elf_get_section_by_start_address_no_check (em, e->data);
- /* If section is not found just leave e->data alone. */
- if (es)
- e->data = es->header.exec_address;
- break;
- }
-
- default:
+ elf_section_t *es =
+ elf_get_section_by_start_address_no_check (em, e->data);
+ /* If section is not found just leave e->data alone. */
+ if (es)
+ e->data = es->header.exec_address;
break;
}
- }
+
+ default:
+ break;
+ }
+ }
}
if (em->first_header.file_class == ELF_64BIT)
{
- elf64_dynamic_entry_t * e, * es;
+ elf64_dynamic_entry_t *e, *es;
es = em->dynamic_entries;
if (em->need_byte_swap)
{
es = vec_dup (es);
vec_foreach (e, es)
- {
- e->type = elf_swap_u64 (em, e->type);
- e->data = elf_swap_u64 (em, e->data);
- }
+ {
+ e->type = elf_swap_u64 (em, e->type);
+ e->data = elf_swap_u64 (em, e->data);
+ }
}
- elf_set_section_contents (em, em->dynamic_section_index, es, vec_bytes (es));
+ elf_set_section_contents (em, em->dynamic_section_index, es,
+ vec_bytes (es));
if (es != em->dynamic_entries)
vec_free (es);
}
else
{
- elf32_dynamic_entry_t * es;
+ elf32_dynamic_entry_t *es;
vec_clone (es, em->dynamic_entries);
if (em->need_byte_swap)
@@ -943,47 +968,45 @@ void elf_set_dynamic_entries (elf_main_t * em)
}
}
- elf_set_section_contents (em, em->dynamic_section_index, es, vec_bytes (es));
+ elf_set_section_contents (em, em->dynamic_section_index, es,
+ vec_bytes (es));
vec_free (es);
}
}
clib_error_t *
-elf_parse (elf_main_t * em,
- void * data,
- uword data_bytes)
+elf_parse (elf_main_t * em, void *data, uword data_bytes)
{
- elf_first_header_t * h = data;
- elf64_file_header_t * fh = &em->file_header;
- clib_error_t * error = 0;
+ elf_first_header_t *h = data;
+ elf64_file_header_t *fh = &em->file_header;
+ clib_error_t *error = 0;
{
- char * save = em->file_name;
+ char *save = em->file_name;
memset (em, 0, sizeof (em[0]));
em->file_name = save;
}
em->first_header = h[0];
- em->need_byte_swap =
- CLIB_ARCH_IS_BIG_ENDIAN != (h->data_encoding == ELF_TWOS_COMPLEMENT_BIG_ENDIAN);
+ em->need_byte_swap =
+ CLIB_ARCH_IS_BIG_ENDIAN != (h->data_encoding ==
+ ELF_TWOS_COMPLEMENT_BIG_ENDIAN);
elf_swap_first_header (em, &em->first_header);
- if (! (h->magic[0] == 0x7f
- && h->magic[1] == 'E'
- && h->magic[2] == 'L'
- && h->magic[3] == 'F'))
+ if (!(h->magic[0] == 0x7f
+ && h->magic[1] == 'E' && h->magic[2] == 'L' && h->magic[3] == 'F'))
return clib_error_return (0, "`%s': bad magic", em->file_name);
if (h->file_class == ELF_64BIT)
{
- elf64_file_header_t * h64 = (void *) (h + 1);
+ elf64_file_header_t *h64 = (void *) (h + 1);
#define _(t,f) fh->f = elf_swap_##t (em, h64->f);
foreach_elf64_file_header
#undef _
}
else
{
- elf32_file_header_t * h32 = (void *) (h + 1);
+ elf32_file_header_t *h32 = (void *) (h + 1);
#define _(t,f) fh->f = elf_swap_##t (em, h32->f);
foreach_elf32_file_header
@@ -995,31 +1018,33 @@ elf_parse (elf_main_t * em,
/* Figure which sections are contained in each segment. */
{
- elf_segment_t * g;
- elf_section_t * s;
+ elf_segment_t *g;
+ elf_section_t *s;
vec_foreach (g, em->segments)
- {
- u64 g_lo, g_hi;
- u64 s_lo, s_hi;
+ {
+ u64 g_lo, g_hi;
+ u64 s_lo, s_hi;
- if (g->header.memory_size == 0)
- continue;
+ if (g->header.memory_size == 0)
+ continue;
+
+ g_lo = g->header.virtual_address;
+ g_hi = g_lo + g->header.memory_size;
- g_lo = g->header.virtual_address;
- g_hi = g_lo + g->header.memory_size;
+ vec_foreach (s, em->sections)
+ {
+ s_lo = s->header.exec_address;
+ s_hi = s_lo + s->header.file_size;
- vec_foreach (s, em->sections)
+ if (s_lo >= g_lo && s_hi <= g_hi)
{
- s_lo = s->header.exec_address;
- s_hi = s_lo + s->header.file_size;
-
- if (s_lo >= g_lo && s_hi <= g_hi)
- {
- g->section_index_bitmap = clib_bitmap_ori (g->section_index_bitmap, s->index);
- s->segment_index_bitmap = clib_bitmap_ori (s->segment_index_bitmap, g->index);
- }
+ g->section_index_bitmap =
+ clib_bitmap_ori (g->section_index_bitmap, s->index);
+ s->segment_index_bitmap =
+ clib_bitmap_ori (s->segment_index_bitmap, g->index);
}
}
+ }
}
return error;
@@ -1038,7 +1063,7 @@ add_dynamic_entries (elf_main_t * em, elf_section_t * s)
if (em->first_header.file_class == ELF_64BIT)
{
- elf64_dynamic_entry_t * e;
+ elf64_dynamic_entry_t *e;
e = elf_get_section_contents (em, s - em->sections, sizeof (e[0]));
if (em->need_byte_swap)
@@ -1052,7 +1077,7 @@ add_dynamic_entries (elf_main_t * em, elf_section_t * s)
}
else
{
- elf32_dynamic_entry_t * e;
+ elf32_dynamic_entry_t *e;
e = elf_get_section_contents (em, s - em->sections, sizeof (e[0]));
vec_clone (em->dynamic_entries, e);
@@ -1067,16 +1092,16 @@ add_dynamic_entries (elf_main_t * em, elf_section_t * s)
}
}
-static void byte_swap_verneed (elf_main_t * em,
- elf_dynamic_version_need_union_t * vus)
+static void
+byte_swap_verneed (elf_main_t * em, elf_dynamic_version_need_union_t * vus)
{
- uword * entries_swapped = 0;
+ uword *entries_swapped = 0;
uword i, j;
for (i = 0; i < vec_len (vus); i++)
{
- elf_dynamic_version_need_union_t * n = vec_elt_at_index (vus, i);
- elf_dynamic_version_need_union_t * a;
+ elf_dynamic_version_need_union_t *n = vec_elt_at_index (vus, i);
+ elf_dynamic_version_need_union_t *a;
if (clib_bitmap_get (entries_swapped, i))
continue;
@@ -1091,7 +1116,7 @@ static void byte_swap_verneed (elf_main_t * em,
while (1)
{
a = vec_elt_at_index (vus, j);
- if (! clib_bitmap_get (entries_swapped, j))
+ if (!clib_bitmap_get (entries_swapped, j))
{
entries_swapped = clib_bitmap_set (entries_swapped, j, 1);
elf_swap_verneed_aux (&a->aux);
@@ -1100,19 +1125,18 @@ static void byte_swap_verneed (elf_main_t * em,
break;
ASSERT (a->aux.next_offset % sizeof (a->aux) == 0);
j += (a->aux.next_offset / sizeof (a->aux));
- }
+ }
}
}
clib_bitmap_free (entries_swapped);
}
-static void
-set_dynamic_verneed (elf_main_t * em) __attribute__((unused));
+static void set_dynamic_verneed (elf_main_t * em) __attribute__ ((unused));
static void
set_dynamic_verneed (elf_main_t * em)
{
- elf_dynamic_version_need_union_t * vus = em->verneed;
+ elf_dynamic_version_need_union_t *vus = em->verneed;
if (em->need_byte_swap)
{
@@ -1120,36 +1144,37 @@ set_dynamic_verneed (elf_main_t * em)
byte_swap_verneed (em, vus);
}
- elf_set_section_contents (em, em->verneed_section_index, vus, vec_bytes (vus));
+ elf_set_section_contents (em, em->verneed_section_index, vus,
+ vec_bytes (vus));
if (vus != em->verneed)
vec_free (vus);
}
static void
-set_symbol_table (elf_main_t * em, u32 table_index) __attribute__((unused));
+set_symbol_table (elf_main_t * em, u32 table_index) __attribute__ ((unused));
static void
set_symbol_table (elf_main_t * em, u32 table_index)
{
- elf_symbol_table_t * tab = vec_elt_at_index (em->symbol_tables, table_index);
+ elf_symbol_table_t *tab = vec_elt_at_index (em->symbol_tables, table_index);
if (em->first_header.file_class == ELF_64BIT)
{
- elf64_symbol_t * s, * syms;
-
+ elf64_symbol_t *s, *syms;
+
syms = vec_dup (tab->symbols);
vec_foreach (s, syms)
- {
+ {
#define _(t,f) s->f = elf_swap_##t (em, s->f);
- foreach_elf64_symbol_header;
+ foreach_elf64_symbol_header;
#undef _
- }
+ }
elf_set_section_contents (em, tab->section_index,
syms, vec_bytes (syms));
}
else
{
- elf32_symbol_t * syms;
+ elf32_symbol_t *syms;
uword i;
vec_clone (syms, tab->symbols);
for (i = 0; i < vec_len (tab->symbols); i++)
@@ -1165,23 +1190,23 @@ set_symbol_table (elf_main_t * em, u32 table_index)
}
static char *
-elf_find_interpreter (elf_main_t * em, void * data)
+elf_find_interpreter (elf_main_t * em, void *data)
{
- elf_segment_t * g;
- elf_section_t * s;
- uword * p;
+ elf_segment_t *g;
+ elf_section_t *s;
+ uword *p;
vec_foreach (g, em->segments)
- {
- if (g->header.type == ELF_SEGMENT_INTERP)
- break;
- }
+ {
+ if (g->header.type == ELF_SEGMENT_INTERP)
+ break;
+ }
if (g >= vec_end (em->segments))
return 0;
p = hash_get (em->section_by_start_address, g->header.virtual_address);
- if (! p)
+ if (!p)
return 0;
s = vec_elt_at_index (em->sections, p[0]);
@@ -1194,8 +1219,8 @@ elf_get_section_contents_with_starting_address (elf_main_t * em,
uword elt_size,
u32 * section_index_result)
{
- elf_section_t * s;
- clib_error_t * error;
+ elf_section_t *s;
+ clib_error_t *error;
error = elf_get_section_by_start_address (em, start_address, &s);
if (error)
@@ -1210,91 +1235,104 @@ elf_get_section_contents_with_starting_address (elf_main_t * em,
return elf_get_section_contents (em, s->index, elt_size);
}
-static void elf_parse_dynamic (elf_main_t * em)
+static void
+elf_parse_dynamic (elf_main_t * em)
{
- elf_section_t * s;
- elf64_dynamic_entry_t * e;
+ elf_section_t *s;
+ elf64_dynamic_entry_t *e;
vec_foreach (s, em->sections)
- {
- switch (s->header.type)
- {
- case ELF_SECTION_DYNAMIC:
- add_dynamic_entries (em, s);
- break;
+ {
+ switch (s->header.type)
+ {
+ case ELF_SECTION_DYNAMIC:
+ add_dynamic_entries (em, s);
+ break;
- default:
- break;
- }
- }
+ default:
+ break;
+ }
+ }
em->dynamic_string_table_section_index = ~0;
em->dynamic_string_table = 0;
vec_foreach (e, em->dynamic_entries)
- {
- switch (e->type)
+ {
+ switch (e->type)
+ {
+ case ELF_DYNAMIC_ENTRY_STRING_TABLE:
+ ASSERT (vec_len (em->dynamic_string_table) == 0);
+ em->dynamic_string_table
+ =
+ elf_get_section_contents_with_starting_address (em, e->data,
+ sizeof (u8),
+ &em->
+ dynamic_string_table_section_index);
+ break;
+
+ case ELF_DYNAMIC_ENTRY_SYMBOL_TABLE:
{
- case ELF_DYNAMIC_ENTRY_STRING_TABLE:
- ASSERT (vec_len (em->dynamic_string_table) == 0);
- em->dynamic_string_table
- = elf_get_section_contents_with_starting_address (em, e->data, sizeof (u8),
- &em->dynamic_string_table_section_index);
- break;
+ elf_section_t *s;
+ clib_error_t *error;
- case ELF_DYNAMIC_ENTRY_SYMBOL_TABLE:
- {
- elf_section_t * s;
- clib_error_t * error;
+ error = elf_get_section_by_start_address (em, e->data, &s);
+ if (error)
+ {
+ clib_error_report (error);
+ return;
+ }
- error = elf_get_section_by_start_address (em, e->data, &s);
- if (error)
- {
- clib_error_report (error);
- return;
- }
+ em->dynamic_symbol_table_section_index = s - em->sections;
+ }
+ break;
- em->dynamic_symbol_table_section_index = s - em->sections;
+ case ELF_DYNAMIC_ENTRY_VERSYM:
+ em->versym
+ =
+ elf_get_section_contents_with_starting_address (em, e->data,
+ sizeof (em->versym
+ [0]),
+ &em->
+ versym_section_index);
+ if (em->need_byte_swap)
+ {
+ uword i;
+ for (i = 0; i < vec_len (em->versym); i++)
+ em->versym[i] = clib_byte_swap_u16 (em->versym[i]);
}
- break;
-
- case ELF_DYNAMIC_ENTRY_VERSYM:
- em->versym
- = elf_get_section_contents_with_starting_address (em, e->data, sizeof (em->versym[0]),
- &em->versym_section_index);
- if (em->need_byte_swap)
- {
- uword i;
- for (i = 0; i < vec_len (em->versym); i++)
- em->versym[i] = clib_byte_swap_u16 (em->versym[i]);
- }
- break;
+ break;
- case ELF_DYNAMIC_ENTRY_VERSION_NEED:
- em->verneed
- = elf_get_section_contents_with_starting_address (em, e->data, sizeof (em->verneed[0]),
- &em->verneed_section_index);
- if (em->need_byte_swap)
- byte_swap_verneed (em, em->verneed);
- break;
+ case ELF_DYNAMIC_ENTRY_VERSION_NEED:
+ em->verneed
+ =
+ elf_get_section_contents_with_starting_address (em, e->data,
+ sizeof (em->verneed
+ [0]),
+ &em->
+ verneed_section_index);
+ if (em->need_byte_swap)
+ byte_swap_verneed (em, em->verneed);
+ break;
- default:
- break;
- }
- }
+ default:
+ break;
+ }
+ }
}
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
-clib_error_t * elf_read_file (elf_main_t * em, char * file_name)
+clib_error_t *
+elf_read_file (elf_main_t * em, char *file_name)
{
int fd;
struct stat fd_stat;
uword mmap_length = 0;
- void * data = 0;
- clib_error_t * error = 0;
+ void *data = 0;
+ clib_error_t *error = 0;
elf_main_init (em);
@@ -1335,7 +1373,7 @@ clib_error_t * elf_read_file (elf_main_t * em, char * file_name)
return /* no error */ 0;
- done:
+done:
elf_main_free (em);
if (fd >= 0)
close (fd);
@@ -1344,20 +1382,23 @@ clib_error_t * elf_read_file (elf_main_t * em, char * file_name)
return error;
}
-typedef struct {
- u8 * new_table;
+typedef struct
+{
+ u8 *new_table;
- u8 * old_table;
+ u8 *old_table;
- uword * hash;
+ uword *hash;
} string_table_builder_t;
-static u32 string_table_add_name (string_table_builder_t * b, u8 * n)
+static u32
+string_table_add_name (string_table_builder_t * b, u8 * n)
{
- uword * p, i, j, l;
+ uword *p, i, j, l;
p = hash_get_mem (b->hash, n);
- if (p) return p[0];
+ if (p)
+ return p[0];
l = strlen ((char *) n);
i = vec_len (b->new_table);
@@ -1381,56 +1422,62 @@ static u32 string_table_add_name (string_table_builder_t * b, u8 * n)
}
static u32 string_table_add_name_index (string_table_builder_t * b, u32 index)
- __attribute__((unused));
-static u32 string_table_add_name_index (string_table_builder_t * b, u32 index)
+ __attribute__ ((unused));
+static u32
+string_table_add_name_index (string_table_builder_t * b, u32 index)
{
- u8 * n = b->old_table + index;
+ u8 *n = b->old_table + index;
return string_table_add_name (b, n);
}
static void string_table_init (string_table_builder_t * b, u8 * old_table)
- __attribute__((unused));
-static void string_table_init (string_table_builder_t * b, u8 * old_table)
+ __attribute__ ((unused));
+static void
+string_table_init (string_table_builder_t * b, u8 * old_table)
{
memset (b, 0, sizeof (b[0]));
b->old_table = old_table;
b->hash = hash_create_string (0, sizeof (uword));
}
-static u8 * string_table_done (string_table_builder_t * b)
- __attribute__((unused));
-static u8 * string_table_done (string_table_builder_t * b)
+static u8 *string_table_done (string_table_builder_t * b)
+ __attribute__ ((unused));
+static u8 *
+string_table_done (string_table_builder_t * b)
{
hash_free (b->hash);
return b->new_table;
}
-static void layout_sections (elf_main_t * em)
+static void
+layout_sections (elf_main_t * em)
{
- elf_section_t * s;
+ elf_section_t *s;
u32 n_sections_with_changed_exec_address = 0;
- u32 * deferred_symbol_and_string_sections = 0;
+ u32 *deferred_symbol_and_string_sections = 0;
u32 n_deleted_sections = 0;
/* note: rebuild is always zero. Intent lost in the sands of time */
-#if 0
+#if 0
int rebuild = 0;
/* Re-build section string table (sections may have been deleted). */
if (rebuild)
{
- u8 * st = 0;
+ u8 *st = 0;
vec_foreach (s, em->sections)
- {
- u8 * name;
- if (s->header.type == ~0)
- continue;
- name = elf_section_name (em, s);
- s->header.name = vec_len (st);
- vec_add (st, name, strlen ((char *) name) + 1);
- }
+ {
+ u8 *name;
+ if (s->header.type == ~0)
+ continue;
+ name = elf_section_name (em, s);
+ s->header.name = vec_len (st);
+ vec_add (st, name, strlen ((char *) name) + 1);
+ }
- s = vec_elt_at_index (em->sections, em->file_header.section_header_string_table_index);
+ s =
+ vec_elt_at_index (em->sections,
+ em->file_header.section_header_string_table_index);
vec_free (s->contents);
s->contents = st;
@@ -1440,57 +1487,61 @@ static void layout_sections (elf_main_t * em)
if (rebuild && em->dynamic_string_table_section_index != ~0)
{
string_table_builder_t b;
-
+
string_table_init (&b, em->dynamic_string_table);
/* Add all dynamic symbols. */
{
- elf_symbol_table_t * symtab;
- elf64_symbol_t * sym;
+ elf_symbol_table_t *symtab;
+ elf64_symbol_t *sym;
- symtab = vec_elt_at_index (em->symbol_tables, em->dynamic_symbol_table_index);
+ symtab =
+ vec_elt_at_index (em->symbol_tables,
+ em->dynamic_symbol_table_index);
vec_foreach (sym, symtab->symbols)
- {
- u8 * name = elf_symbol_name (symtab, sym);
- sym->name = string_table_add_name (&b, name);
- }
+ {
+ u8 *name = elf_symbol_name (symtab, sym);
+ sym->name = string_table_add_name (&b, name);
+ }
set_symbol_table (em, em->dynamic_symbol_table_index);
}
/* Add all dynamic entries. */
{
- elf64_dynamic_entry_t * e;
+ elf64_dynamic_entry_t *e;
vec_foreach (e, em->dynamic_entries)
- {
- switch (e->type)
- {
- case ELF_DYNAMIC_ENTRY_NEEDED_LIBRARY:
- case ELF_DYNAMIC_ENTRY_RPATH:
- case ELF_DYNAMIC_ENTRY_RUN_PATH:
- e->data = string_table_add_name_index (&b, e->data);
- break;
- }
- }
+ {
+ switch (e->type)
+ {
+ case ELF_DYNAMIC_ENTRY_NEEDED_LIBRARY:
+ case ELF_DYNAMIC_ENTRY_RPATH:
+ case ELF_DYNAMIC_ENTRY_RUN_PATH:
+ e->data = string_table_add_name_index (&b, e->data);
+ break;
+ }
+ }
}
/* Add all version needs. */
if (vec_len (em->verneed) > 0)
{
- elf_dynamic_version_need_union_t * n, * a;
+ elf_dynamic_version_need_union_t *n, *a;
n = em->verneed;
while (1)
{
- n->need.file_name_offset = string_table_add_name_index (&b, n->need.file_name_offset);
+ n->need.file_name_offset =
+ string_table_add_name_index (&b, n->need.file_name_offset);
if (n->need.first_aux_offset != 0)
{
a = n + n->need.first_aux_offset / sizeof (n[0]);
while (1)
{
- a->aux.name = string_table_add_name_index (&b, a->aux.name);
+ a->aux.name =
+ string_table_add_name_index (&b, a->aux.name);
if (a->aux.next_offset == 0)
break;
a += a->aux.next_offset / sizeof (a[0]);
@@ -1506,7 +1557,9 @@ static void layout_sections (elf_main_t * em)
set_dynamic_verneed (em);
}
- s = vec_elt_at_index (em->sections, em->dynamic_string_table_section_index);
+ s =
+ vec_elt_at_index (em->sections,
+ em->dynamic_string_table_section_index);
vec_free (s->contents);
s->contents = string_table_done (&b);
@@ -1519,89 +1572,90 @@ static void layout_sections (elf_main_t * em)
u64 file_size, align_size;
vec_foreach (s, em->sections)
- {
- /* Ignore deleted and unused sections. */
- switch (s->header.type)
- {
- case ~0:
- n_deleted_sections++;
- case ELF_SECTION_UNUSED:
- continue;
-
- case ELF_SECTION_STRING_TABLE:
- case ELF_SECTION_SYMBOL_TABLE:
- if (! (s->index == em->dynamic_string_table_section_index
- || s->index == em->file_header.section_header_string_table_index))
- {
- vec_add1 (deferred_symbol_and_string_sections, s->index);
- continue;
- }
- break;
-
- default:
- break;
- }
+ {
+ /* Ignore deleted and unused sections. */
+ switch (s->header.type)
+ {
+ case ~0:
+ n_deleted_sections++;
+ case ELF_SECTION_UNUSED:
+ continue;
- exec_address = round_pow2_u64 (exec_address, s->header.align);
+ case ELF_SECTION_STRING_TABLE:
+ case ELF_SECTION_SYMBOL_TABLE:
+ if (!(s->index == em->dynamic_string_table_section_index
+ || s->index ==
+ em->file_header.section_header_string_table_index))
+ {
+ vec_add1 (deferred_symbol_and_string_sections, s->index);
+ continue;
+ }
+ break;
- /* Put sections we added at end of file. */
- if (s->header.file_offset == ~0)
- s->header.file_offset = file_offset;
+ default:
+ break;
+ }
- /* Follow gaps in original file. */
- if (s->header.exec_address > exec_address)
- {
- exec_address = s->header.exec_address;
- file_offset = s->header.file_offset;
- }
+ exec_address = round_pow2_u64 (exec_address, s->header.align);
- if (s->header.flags & ELF_SECTION_FLAG_ALLOC)
- {
- s->exec_address_change = exec_address - s->header.exec_address;
- n_sections_with_changed_exec_address += s->exec_address_change != 0;
- s->header.exec_address = exec_address;
- }
+ /* Put sections we added at end of file. */
+ if (s->header.file_offset == ~0)
+ s->header.file_offset = file_offset;
- if (s->header.type == ELF_SECTION_NO_BITS)
- file_size = s->header.file_size;
- else
- file_size = vec_len (s->contents);
+ /* Follow gaps in original file. */
+ if (s->header.exec_address > exec_address)
+ {
+ exec_address = s->header.exec_address;
+ file_offset = s->header.file_offset;
+ }
+ if (s->header.flags & ELF_SECTION_FLAG_ALLOC)
{
- u64 align;
+ s->exec_address_change = exec_address - s->header.exec_address;
+ n_sections_with_changed_exec_address += s->exec_address_change != 0;
+ s->header.exec_address = exec_address;
+ }
- if (s + 1 >= vec_end (em->sections))
- align = 16;
- else if (s[1].header.type == ELF_SECTION_NO_BITS)
- align = 8;
- else
- align = s[1].header.align;
+ if (s->header.type == ELF_SECTION_NO_BITS)
+ file_size = s->header.file_size;
+ else
+ file_size = vec_len (s->contents);
- if (s->header.flags & ELF_SECTION_FLAG_ALLOC)
- {
- u64 v = round_pow2_u64 (exec_address + file_size, align);
- align_size = v - exec_address;
- }
- else
- {
- u64 v = round_pow2_u64 (file_offset + file_size, align);
- align_size = v - file_offset;
- }
- }
+ {
+ u64 align;
- s->header.file_offset = file_offset;
- s->header.file_size = file_size;
- s->align_size = align_size;
+ if (s + 1 >= vec_end (em->sections))
+ align = 16;
+ else if (s[1].header.type == ELF_SECTION_NO_BITS)
+ align = 8;
+ else
+ align = s[1].header.align;
- if (s->header.type != ELF_SECTION_NO_BITS)
- file_offset += align_size;
- exec_address += align_size;
+ if (s->header.flags & ELF_SECTION_FLAG_ALLOC)
+ {
+ u64 v = round_pow2_u64 (exec_address + file_size, align);
+ align_size = v - exec_address;
+ }
+ else
+ {
+ u64 v = round_pow2_u64 (file_offset + file_size, align);
+ align_size = v - file_offset;
+ }
}
+ s->header.file_offset = file_offset;
+ s->header.file_size = file_size;
+ s->align_size = align_size;
+
+ if (s->header.type != ELF_SECTION_NO_BITS)
+ file_offset += align_size;
+ exec_address += align_size;
+ }
+
/* Section headers go after last section but before symbol/string
tables. */
{
- elf64_file_header_t * fh = &em->file_header;
+ elf64_file_header_t *fh = &em->file_header;
fh->section_header_file_offset = file_offset;
fh->section_header_count = vec_len (em->sections) - n_deleted_sections;
@@ -1612,7 +1666,9 @@ static void layout_sections (elf_main_t * em)
int i;
for (i = 0; i < vec_len (deferred_symbol_and_string_sections); i++)
{
- s = vec_elt_at_index (em->sections, deferred_symbol_and_string_sections[i]);
+ s =
+ vec_elt_at_index (em->sections,
+ deferred_symbol_and_string_sections[i]);
s->header.file_offset = file_offset;
s->header.file_size = vec_len (s->contents);
@@ -1634,18 +1690,19 @@ static void layout_sections (elf_main_t * em)
/* Update segments for changed section addresses. */
{
- elf_segment_t * g;
+ elf_segment_t *g;
uword si;
vec_foreach (g, em->segments)
- {
- u64 s_lo, s_hi, f_lo = 0;
- u32 n_sections = 0;
+ {
+ u64 s_lo, s_hi, f_lo = 0;
+ u32 n_sections = 0;
- if (g->header.memory_size == 0)
- continue;
+ if (g->header.memory_size == 0)
+ continue;
- s_lo = s_hi = 0;
+ s_lo = s_hi = 0;
+ /* *INDENT-OFF* */
clib_bitmap_foreach (si, g->section_index_bitmap, ({
u64 lo, hi;
@@ -1670,32 +1727,33 @@ static void layout_sections (elf_main_t * em)
s_hi = hi;
}
}));
+ /* *INDENT-ON* */
- if (n_sections == 0)
- continue;
+ if (n_sections == 0)
+ continue;
- /* File offset zero includes ELF headers/segment headers.
- Don't change that. */
- if (g->header.file_offset == 0
- && g->header.type == ELF_SEGMENT_LOAD)
- {
- s_lo = g->header.virtual_address;
- f_lo = g->header.file_offset;
- }
+ /* File offset zero includes ELF headers/segment headers.
+ Don't change that. */
+ if (g->header.file_offset == 0 && g->header.type == ELF_SEGMENT_LOAD)
+ {
+ s_lo = g->header.virtual_address;
+ f_lo = g->header.file_offset;
+ }
- g->header.virtual_address = s_lo;
- g->header.physical_address = s_lo;
- g->header.file_offset = f_lo;
- g->header.memory_size = s_hi - s_lo;
- }
+ g->header.virtual_address = s_lo;
+ g->header.physical_address = s_lo;
+ g->header.file_offset = f_lo;
+ g->header.memory_size = s_hi - s_lo;
+ }
}
}
-clib_error_t * elf_write_file (elf_main_t * em, char * file_name)
+clib_error_t *
+elf_write_file (elf_main_t * em, char *file_name)
{
int fd;
- FILE * f;
- clib_error_t * error = 0;
+ FILE *f;
+ clib_error_t *error = 0;
fd = open (file_name, O_CREAT | O_RDWR | O_TRUNC, 0755);
if (fd < 0)
@@ -1736,155 +1794,166 @@ clib_error_t * elf_write_file (elf_main_t * em, char * file_name)
foreach_elf64_file_header;
#undef _
- if (fwrite (&h, sizeof (h), 1, f) != 1)
- {
- error = clib_error_return_unix (0, "write file header");
- goto error;
- }
- }
- else
- {
- elf32_file_header_t h32;
+ if (fwrite (&h, sizeof (h), 1, f) != 1)
+ {
+ error = clib_error_return_unix (0, "write file header");
+ goto error;
+ }
+ }
+ else
+ {
+ elf32_file_header_t h32;
#define _(t,field) h32.field = elf_swap_##t (em, h.field);
- foreach_elf32_file_header;
+ foreach_elf32_file_header;
#undef _
- if (fwrite (&h32, sizeof (h32), 1, f) != 1)
- {
- error = clib_error_return_unix (0, "write file header");
- goto error;
- }
- }
+ if (fwrite (&h32, sizeof (h32), 1, f) != 1)
+ {
+ error = clib_error_return_unix (0, "write file header");
+ goto error;
+ }
+ }
}
/* Write segment headers. */
{
- elf_segment_t * s;
+ elf_segment_t *s;
vec_foreach (s, em->segments)
- {
- elf64_segment_header_t h;
+ {
+ elf64_segment_header_t h;
- if (s->header.type == ~0)
- continue;
+ if (s->header.type == ~0)
+ continue;
- h = s->header;
+ h = s->header;
- if (em->first_header.file_class == ELF_64BIT)
- {
+ if (em->first_header.file_class == ELF_64BIT)
+ {
#define _(t,field) h.field = elf_swap_##t (em, h.field);
- foreach_elf64_segment_header;
+ foreach_elf64_segment_header;
#undef _
-
- if (fwrite (&h, sizeof (h), 1, f) != 1)
- {
- error = clib_error_return_unix (0, "write segment header %U", format_elf_segment, em, s);
- goto error;
- }
- }
- else
- {
- elf32_segment_header_t h32;
+
+ if (fwrite (&h, sizeof (h), 1, f) != 1)
+ {
+ error =
+ clib_error_return_unix (0, "write segment header %U",
+ format_elf_segment, em, s);
+ goto error;
+ }
+ }
+ else
+ {
+ elf32_segment_header_t h32;
#define _(t,field) h32.field = elf_swap_##t (em, h.field);
- foreach_elf32_segment_header;
+ foreach_elf32_segment_header;
#undef _
- if (fwrite (&h32, sizeof (h32), 1, f) != 1)
- {
- error = clib_error_return_unix (0, "write segment header %U", format_elf_segment, em, s);
- goto error;
- }
- }
- }
+ if (fwrite (&h32, sizeof (h32), 1, f) != 1)
+ {
+ error =
+ clib_error_return_unix (0, "write segment header %U",
+ format_elf_segment, em, s);
+ goto error;
+ }
+ }
+ }
}
/* Write contents for all sections. */
{
- elf_section_t * s;
+ elf_section_t *s;
vec_foreach (s, em->sections)
- {
- if (s->header.file_size == 0)
- continue;
+ {
+ if (s->header.file_size == 0)
+ continue;
- if (fseek (f, s->header.file_offset, SEEK_SET) < 0)
- {
- fclose(f);
- return clib_error_return_unix (0, "fseek 0x%Lx",
- s->header.file_offset);
- }
+ if (fseek (f, s->header.file_offset, SEEK_SET) < 0)
+ {
+ fclose (f);
+ return clib_error_return_unix (0, "fseek 0x%Lx",
+ s->header.file_offset);
+ }
- if (s->header.type == ELF_SECTION_NO_BITS)
- /* don't write for .bss sections */;
- else if (fwrite (s->contents, vec_len (s->contents), 1, f) != 1)
- {
- error = clib_error_return_unix (0, "write %s section contents", elf_section_name (em, s));
- goto error;
- }
- }
+ if (s->header.type == ELF_SECTION_NO_BITS)
+ /* don't write for .bss sections */ ;
+ else if (fwrite (s->contents, vec_len (s->contents), 1, f) != 1)
+ {
+ error =
+ clib_error_return_unix (0, "write %s section contents",
+ elf_section_name (em, s));
+ goto error;
+ }
+ }
/* Finally write section headers. */
if (fseek (f, em->file_header.section_header_file_offset, SEEK_SET) < 0)
{
- fclose(f);
- return clib_error_return_unix
- (0, "fseek 0x%Lx", em->file_header.section_header_file_offset);
+ fclose (f);
+ return clib_error_return_unix
+ (0, "fseek 0x%Lx", em->file_header.section_header_file_offset);
}
-
+
vec_foreach (s, em->sections)
- {
- elf64_section_header_t h;
+ {
+ elf64_section_header_t h;
- if (s->header.type == ~0)
- continue;
+ if (s->header.type == ~0)
+ continue;
- h = s->header;
+ h = s->header;
- if (em->first_header.file_class == ELF_64BIT)
- {
+ if (em->first_header.file_class == ELF_64BIT)
+ {
#define _(t,field) h.field = elf_swap_##t (em, h.field);
- foreach_elf64_section_header;
+ foreach_elf64_section_header;
#undef _
-
- if (fwrite (&h, sizeof (h), 1, f) != 1)
- {
- error = clib_error_return_unix (0, "write %s section header", elf_section_name (em, s));
- goto error;
- }
- }
- else
- {
- elf32_section_header_t h32;
+
+ if (fwrite (&h, sizeof (h), 1, f) != 1)
+ {
+ error =
+ clib_error_return_unix (0, "write %s section header",
+ elf_section_name (em, s));
+ goto error;
+ }
+ }
+ else
+ {
+ elf32_section_header_t h32;
#define _(t,field) h32.field = elf_swap_##t (em, h.field);
- foreach_elf32_section_header;
+ foreach_elf32_section_header;
#undef _
- if (fwrite (&h32, sizeof (h32), 1, f) != 1)
- {
- error = clib_error_return_unix (0, "write %s section header", elf_section_name (em, s));
- goto error;
- }
- }
- }
+ if (fwrite (&h32, sizeof (h32), 1, f) != 1)
+ {
+ error =
+ clib_error_return_unix (0, "write %s section header",
+ elf_section_name (em, s));
+ goto error;
+ }
+ }
+ }
}
- error:
+error:
fclose (f);
return error;
}
-clib_error_t * elf_delete_named_section (elf_main_t * em, char * section_name)
+clib_error_t *
+elf_delete_named_section (elf_main_t * em, char *section_name)
{
- elf_section_t * s;
- clib_error_t * error;
+ elf_section_t *s;
+ clib_error_t *error;
error = elf_get_section_by_name (em, section_name, &s);
if (error)
return error;
-
+
s->header.type = ~0;
return 0;
@@ -1892,14 +1961,13 @@ clib_error_t * elf_delete_named_section (elf_main_t * em, char * section_name)
void
elf_create_section_with_contents (elf_main_t * em,
- char * section_name,
+ char *section_name,
elf64_section_header_t * header,
- void * contents,
- uword n_content_bytes)
+ void *contents, uword n_content_bytes)
{
- elf_section_t * s, * sts;
- u8 * st, * c;
- uword * p, is_new_section;
+ elf_section_t *s, *sts;
+ u8 *st, *c;
+ uword *p, is_new_section;
/* See if section already exists with given name.
If so, just replace contents. */
@@ -1917,7 +1985,9 @@ elf_create_section_with_contents (elf_main_t * em,
c = 0;
}
- sts = vec_elt_at_index (em->sections, em->file_header.section_header_string_table_index);
+ sts =
+ vec_elt_at_index (em->sections,
+ em->file_header.section_header_string_table_index);
st = sts->contents;
s->header = header[0];
@@ -1936,20 +2006,22 @@ elf_create_section_with_contents (elf_main_t * em,
clib_memcpy (c, contents, n_content_bytes);
s->contents = c;
- em->file_header.section_header_count += is_new_section && s->header.type != ~0;
+ em->file_header.section_header_count += is_new_section
+ && s->header.type != ~0;
}
-uword elf_delete_segment_with_type (elf_main_t * em, elf_segment_type_t segment_type)
+uword
+elf_delete_segment_with_type (elf_main_t * em,
+ elf_segment_type_t segment_type)
{
uword n_deleted = 0;
- elf_segment_t * s;
+ elf_segment_t *s;
- vec_foreach (s, em->segments)
- if (s->header.type == segment_type)
- {
- s->header.type = ~0;
- n_deleted += 1;
- }
+ vec_foreach (s, em->segments) if (s->header.type == segment_type)
+ {
+ s->header.type = ~0;
+ n_deleted += 1;
+ }
ASSERT (em->file_header.segment_header_count >= n_deleted);
em->file_header.segment_header_count -= n_deleted;
@@ -1958,3 +2030,11 @@ uword elf_delete_segment_with_type (elf_main_t * em, elf_segment_type_t segment_
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/elf.h b/vppinfra/vppinfra/elf.h
index 52989166a5b..008ea284b31 100644
--- a/vppinfra/vppinfra/elf.h
+++ b/vppinfra/vppinfra/elf.h
@@ -161,50 +161,54 @@
#define _(f) ELF_##f,
-typedef enum {
- foreach_elf_file_class
- ELF_N_FILE_CLASS,
+typedef enum
+{
+ foreach_elf_file_class ELF_N_FILE_CLASS,
} elf_file_class_t;
-typedef enum {
- foreach_elf_data_encoding
- ELF_N_DATA_ENCODING,
+typedef enum
+{
+ foreach_elf_data_encoding ELF_N_DATA_ENCODING,
} elf_data_encoding_t;
#undef _
#define _(f,i) ELF_##f = i,
-typedef enum {
+typedef enum
+{
foreach_elf_abi
} elf_abi_t;
-typedef enum {
+typedef enum
+{
foreach_elf_file_type
} elf_file_type_t;
#undef _
-typedef enum {
+typedef enum
+{
#define _(f,i) ELF_ARCH_##f = i,
foreach_elf_architecture
#undef _
} elf_architecture_t;
-typedef struct {
+typedef struct
+{
/* 0x7f ELF */
u8 magic[4];
- elf_file_class_t file_class : 8;
- elf_data_encoding_t data_encoding : 8;
+ elf_file_class_t file_class:8;
+ elf_data_encoding_t data_encoding:8;
u8 file_version_ident;
- elf_abi_t abi : 8;
+ elf_abi_t abi:8;
u8 abi_version;
u8 pad[7];
- elf_file_type_t file_type : 16;
- elf_architecture_t architecture : 16;
+ elf_file_type_t file_type:16;
+ elf_architecture_t architecture:16;
u32 file_version;
} elf_first_header_t;
@@ -300,37 +304,37 @@ typedef struct {
#define _(t,f) t f;
-typedef struct {
- foreach_elf32_file_header
-} elf32_file_header_t;
+typedef struct
+{
+foreach_elf32_file_header} elf32_file_header_t;
-typedef struct {
- foreach_elf64_file_header
-} elf64_file_header_t;
+typedef struct
+{
+foreach_elf64_file_header} elf64_file_header_t;
-typedef struct {
- foreach_elf32_section_header
-} elf32_section_header_t;
+typedef struct
+{
+foreach_elf32_section_header} elf32_section_header_t;
-typedef struct {
- foreach_elf64_section_header
-} elf64_section_header_t;
+typedef struct
+{
+foreach_elf64_section_header} elf64_section_header_t;
-typedef struct {
- foreach_elf32_segment_header
-} elf32_segment_header_t;
+typedef struct
+{
+foreach_elf32_segment_header} elf32_segment_header_t;
-typedef struct {
- foreach_elf64_segment_header
-} elf64_segment_header_t;
+typedef struct
+{
+foreach_elf64_segment_header} elf64_segment_header_t;
-typedef struct {
- foreach_elf32_symbol_header
-} elf32_symbol_t;
+typedef struct
+{
+foreach_elf32_symbol_header} elf32_symbol_t;
-typedef struct {
- foreach_elf64_symbol_header
-} elf64_symbol_t;
+typedef struct
+{
+foreach_elf64_symbol_header} elf64_symbol_t;
#undef _
/* Special section names. */
@@ -376,7 +380,7 @@ typedef struct {
_ (ARCH_SPECIFIC_LO, 0x70000000) /* Start of processor-specific */ \
_ (ARCH_SPECIFIC_HI, 0x7fffffff) /* End of processor-specific */ \
_ (APP_SPECIFIC_LO, 0x80000000) /* Start of application-specific */ \
- _ (APP_SPECIFIC_HI, 0x8fffffff) /* End of application-specific */
+ _ (APP_SPECIFIC_HI, 0x8fffffff) /* End of application-specific */
/* Section flags. */
#define foreach_elf_section_flag \
@@ -395,20 +399,23 @@ typedef struct {
_ (ARCH_SPECIFIC_LO, 28) \
_ (ARCH_SPECIFIC_HI, 31)
-typedef enum {
+typedef enum
+{
#define _(f,i) ELF_SECTION_##f = i,
foreach_elf_section_type
#undef _
- ELF_SECTION_OS_SPECIFIC_HI = 0x6fffffff,
+ ELF_SECTION_OS_SPECIFIC_HI = 0x6fffffff,
} elf_section_type_t;
-typedef enum {
+typedef enum
+{
#define _(f,i) ELF_SECTION_FLAG_BIT_##f = i,
foreach_elf_section_flag
#undef _
} elf_section_flag_bit_t;
-typedef enum {
+typedef enum
+{
#define _(f,i) ELF_SECTION_FLAG_##f = 1 << ELF_SECTION_FLAG_BIT_##f,
foreach_elf_section_flag
#undef _
@@ -447,7 +454,8 @@ typedef enum {
/* The syminfo section if available contains additional
information about every dynamic symbol. */
-typedef struct {
+typedef struct
+{
u16 bound_to;
u16 flags;
} elf_symbol_info_t;
@@ -467,19 +475,22 @@ typedef struct {
_ (LAZY_LOAD) /* Symbol bound to object to be lazy loaded */
/* Relocation table entry with/without addend. */
-typedef struct {
+typedef struct
+{
u32 address;
u32 symbol_and_type; /* high 24 symbol, low 8 type. */
i32 addend[0];
} elf32_relocation_t;
-typedef struct {
+typedef struct
+{
u64 address;
u64 symbol_and_type; /* high 32 symbol, low 32 type. */
i64 addend[0];
} elf64_relocation_t;
-typedef struct {
+typedef struct
+{
u64 address;
u64 symbol_and_type;
u64 addend;
@@ -506,7 +517,7 @@ typedef struct {
_ (SUNW_STACK, 0x6ffffffb) /* Sun specific stack */ \
_ (OS_SPECIFIC_HI, 0x6fffffff) /* End of OS-specific */ \
_ (ARCH_SPECIFIC_LO, 0x70000000) /* Start of processor-specific */ \
- _ (ARCH_SPECIFIC_HI, 0x7fffffff) /* End of processor-specific */
+ _ (ARCH_SPECIFIC_HI, 0x7fffffff) /* End of processor-specific */
/* Segment flags. */
#define foreach_elf_segment_flag \
@@ -518,19 +529,22 @@ typedef struct {
_ (ARCH_SPECIFIC_LO, 28) \
_ (ARCH_SPECIFIC_HI, 31)
-typedef enum {
+typedef enum
+{
#define _(f,i) ELF_SEGMENT_##f = i,
foreach_elf_segment_type
#undef _
} elf_segment_type_t;
-typedef enum {
+typedef enum
+{
#define _(f,i) ELF_SEGMENT_FLAG_BIT_##f = i,
foreach_elf_segment_flag
#undef _
} elf_segment_flag_bit_t;
-typedef enum {
+typedef enum
+{
#define _(f,i) ELF_SEGMENT_FLAG_##f = 1 << ELF_SEGMENT_FLAG_BIT_##f,
foreach_elf_segment_flag
#undef _
@@ -543,16 +557,16 @@ typedef enum {
#define foreach_elf64_dynamic_entry_header \
_ (u64, type) \
_ (u64, data)
-
+
#define _(t,f) t f;
-typedef struct {
- foreach_elf32_dynamic_entry_header
-} elf32_dynamic_entry_t;
+typedef struct
+{
+foreach_elf32_dynamic_entry_header} elf32_dynamic_entry_t;
-typedef struct {
- foreach_elf64_dynamic_entry_header
-} elf64_dynamic_entry_t;
+typedef struct
+{
+foreach_elf64_dynamic_entry_header} elf64_dynamic_entry_t;
#undef _
@@ -620,17 +634,18 @@ typedef struct {
_ (VERSION_NEED, 0x6ffffffe) /* Address of table with needed versions */ \
_ (VERSION_NEED_COUNT, 0x6fffffff) /* Number of needed versions */ \
_ (AUXILIARY, 0x7ffffffd) /* Shared object to load before self */ \
- _ (FILTER, 0x7fffffff) /* Shared object to get values from */
+ _ (FILTER, 0x7fffffff) /* Shared object to get values from */
-typedef enum {
+typedef enum
+{
#define _(f,n) ELF_DYNAMIC_ENTRY_##f = (n),
foreach_elf_dynamic_entry_type
#undef _
} elf_dynamic_entry_type_t;
/* Values of `d_un.d_val' in the DT_FLAGS entry. */
-#define ELF_DYNAMIC_FLAGS_ORIGIN (1 << 0) /* Object may use DF_ORIGIN */
-#define ELF_DYNAMIC_FLAGS_SYMBOLIC (1 << 1) /* Symbol resolutions starts here */
+#define ELF_DYNAMIC_FLAGS_ORIGIN (1 << 0) /* Object may use DF_ORIGIN */
+#define ELF_DYNAMIC_FLAGS_SYMBOLIC (1 << 1) /* Symbol resolutions starts here */
#define ELF_DYNAMIC_FLAGS_TEXT_RELOCATIONS (1 << 2) /* Object contains text relocations */
#define ELF_DYNAMIC_FLAGS_BIND_NOW (1 << 3) /* No lazy binding for this object */
#define ELF_DYNAMIC_FLAGS_STATIC_TLS (1 << 4) /* Module uses the static TLS model */
@@ -640,9 +655,9 @@ typedef enum {
#define DF_1_NOW 0x00000001 /* Set RTLD_NOW for this object. */
#define DF_1_GLOBAL 0x00000002 /* Set RTLD_GLOBAL for this object. */
#define DF_1_GROUP 0x00000004 /* Set RTLD_GROUP for this object. */
-#define DF_1_NODELETE 0x00000008 /* Set RTLD_NODELETE for this object.*/
-#define DF_1_LOADFLTR 0x00000010 /* Trigger filtee loading at runtime.*/
-#define DF_1_INITFIRST 0x00000020 /* Set RTLD_INITFIRST for this object*/
+#define DF_1_NODELETE 0x00000008 /* Set RTLD_NODELETE for this object. */
+#define DF_1_LOADFLTR 0x00000010 /* Trigger filtee loading at runtime. */
+#define DF_1_INITFIRST 0x00000020 /* Set RTLD_INITFIRST for this object */
#define DF_1_NOOPEN 0x00000040 /* Set RTLD_NOOPEN for this object. */
#define DF_1_ORIGIN 0x00000080 /* $ORIGIN must be handled. */
#define DF_1_DIRECT 0x00000100 /* Direct binding enabled. */
@@ -650,7 +665,7 @@ typedef enum {
#define DF_1_INTERPOSE 0x00000400 /* Object is used to interpose. */
#define DF_1_NODEFLIB 0x00000800 /* Ignore default lib search path. */
#define DF_1_NODUMP 0x00001000 /* Object can't be dldump'ed. */
-#define DF_1_CONFALT 0x00002000 /* Configuration alternative created.*/
+#define DF_1_CONFALT 0x00002000 /* Configuration alternative created. */
#define DF_1_ENDFILTEE 0x00004000 /* Filtee terminates filters search. */
#define DF_1_DISPRELDNE 0x00008000 /* Disp reloc applied at build time. */
#define DF_1_DISPRELPND 0x00010000 /* Disp reloc applied at run-time. */
@@ -676,19 +691,20 @@ typedef struct
u32 byte_offset_next_version_definition;
} elf_dynamic_version_definition_t;
-typedef struct {
+typedef struct
+{
u32 name;
- u32 next_offset; /* byte offset of ver def aux next entry */
+ u32 next_offset; /* byte offset of ver def aux next entry */
} elf_dynamic_version_definition_aux_t;
/* Version definition flags. */
-#define ELF_DYNAMIC_VERSION_FILE (1 << 0) /* Version definition of file itself */
-#define ELF_DYNAMIC_VERSION_WEAK (1 << 1) /* Weak version identifier */
+#define ELF_DYNAMIC_VERSION_FILE (1 << 0) /* Version definition of file itself */
+#define ELF_DYNAMIC_VERSION_WEAK (1 << 1) /* Weak version identifier */
/* Version symbol index. */
#define ELF_DYNAMIC_VERSYM_LOCAL 0 /* Symbol is local. */
#define ELF_DYNAMIC_VERSYM_GLOBAL 1 /* Symbol is global. */
-#define ELF_DYNAMIC_VERSYM_RESERVED_LO 0xff00 /* Beginning of reserved entries. */
+#define ELF_DYNAMIC_VERSYM_RESERVED_LO 0xff00 /* Beginning of reserved entries. */
#define ELF_DYNAMIC_VERSYM_ELIMINATE 0xff01 /* Symbol is to be eliminated. */
/* Version dependency section. */
@@ -797,14 +813,15 @@ typedef struct
_ (GOTTPOFF, 22) /* 32 bit signed PC relative offset to GOT entry for IE symbol */ \
_ (TPOFF32, 23) /* Offset in initial TLS, block) */
-typedef struct {
- elf64_symbol_t * symbols;
+typedef struct
+{
+ elf64_symbol_t *symbols;
u32 section_index;
- u8 * string_table;
+ u8 *string_table;
- uword * symbol_by_name;
+ uword *symbol_by_name;
} elf_symbol_table_t;
always_inline void
@@ -816,10 +833,13 @@ elf_symbol_table_free (elf_symbol_table_t * s)
always_inline u8 *
elf_symbol_name (elf_symbol_table_t * t, elf64_symbol_t * sym)
-{ return vec_elt_at_index (t->string_table, sym->name); }
+{
+ return vec_elt_at_index (t->string_table, sym->name);
+}
-typedef struct {
- elf_relocation_with_addend_t * relocations;
+typedef struct
+{
+ elf_relocation_with_addend_t *relocations;
u32 section_index;
} elf_relocation_table_t;
@@ -830,13 +850,14 @@ elf_relocation_table_free (elf_relocation_table_t * r)
vec_free (r->relocations);
}
-typedef struct {
+typedef struct
+{
elf64_section_header_t header;
u32 index;
/* Index of segments containing this section. */
- uword * segment_index_bitmap;
+ uword *segment_index_bitmap;
/* Aligned size (included padding not included in
header.file_size). */
@@ -844,52 +865,54 @@ typedef struct {
i64 exec_address_change;
- u8 * contents;
+ u8 *contents;
} elf_section_t;
-typedef struct {
+typedef struct
+{
elf64_segment_header_t header;
/* Sections contained in this segment. */
- uword * section_index_bitmap;
+ uword *section_index_bitmap;
u32 index;
- u8 * contents;
+ u8 *contents;
} elf_segment_t;
-typedef struct {
+typedef struct
+{
u8 need_byte_swap;
u8 parsed_symbols;
- char * file_name;
+ char *file_name;
elf_first_header_t first_header;
elf64_file_header_t file_header;
- elf_segment_t * segments;
+ elf_segment_t *segments;
- elf_section_t * sections;
+ elf_section_t *sections;
- uword * section_by_name;
- uword * section_by_start_address;
+ uword *section_by_name;
+ uword *section_by_start_address;
- elf_symbol_table_t * symbol_tables;
- elf_relocation_table_t * relocation_tables;
+ elf_symbol_table_t *symbol_tables;
+ elf_relocation_table_t *relocation_tables;
- char * interpreter;
+ char *interpreter;
- elf64_dynamic_entry_t * dynamic_entries;
- u8 * dynamic_string_table;
+ elf64_dynamic_entry_t *dynamic_entries;
+ u8 *dynamic_string_table;
u32 dynamic_string_table_section_index;
u32 dynamic_symbol_table_section_index;
u32 dynamic_symbol_table_index;
u32 dynamic_section_index;
- u16 * versym;
+ u16 *versym;
u32 versym_section_index;
- elf_dynamic_version_need_union_t * verneed;
+ elf_dynamic_version_need_union_t *verneed;
u32 verneed_section_index;
} elf_main_t;
@@ -923,22 +946,20 @@ elf_main_free (elf_main_t * em)
}
always_inline void
-elf_get_segment_contents (elf_main_t * em,
- void * data,
- uword segment_index)
+elf_get_segment_contents (elf_main_t * em, void *data, uword segment_index)
{
- elf_segment_t * g = vec_elt_at_index (em->segments, segment_index);
- if (! g->contents)
- vec_add (g->contents, data + g->header.file_offset, g->header.memory_size);
+ elf_segment_t *g = vec_elt_at_index (em->segments, segment_index);
+ if (!g->contents)
+ vec_add (g->contents, data + g->header.file_offset,
+ g->header.memory_size);
}
always_inline void *
elf_get_section_contents (elf_main_t * em,
- uword section_index,
- uword elt_size)
+ uword section_index, uword elt_size)
{
- elf_section_t * s;
- void * result;
+ elf_section_t *s;
+ void *result;
s = vec_elt_at_index (em->sections, section_index);
@@ -960,10 +981,9 @@ elf_get_section_contents (elf_main_t * em,
always_inline void
elf_set_section_contents (elf_main_t * em,
uword section_index,
- void * new_contents,
- uword n_content_bytes)
+ void *new_contents, uword n_content_bytes)
{
- elf_section_t * s;
+ elf_section_t *s;
s = vec_elt_at_index (em->sections, section_index);
vec_free (s->contents);
@@ -973,25 +993,35 @@ elf_set_section_contents (elf_main_t * em,
always_inline u8 *
elf_section_name (elf_main_t * em, elf_section_t * s)
{
- elf_section_t * es = vec_elt_at_index (em->sections, em->file_header.section_header_string_table_index);
+ elf_section_t *es = vec_elt_at_index (em->sections,
+ em->
+ file_header.section_header_string_table_index);
return vec_elt_at_index (es->contents, s->header.name);
}
always_inline u8
elf_swap_u8 (elf_main_t * em, u8 x)
-{ return x; }
+{
+ return x;
+}
always_inline u16
elf_swap_u16 (elf_main_t * em, u16 x)
-{ return em->need_byte_swap ? clib_byte_swap_u16 (x) : x; }
+{
+ return em->need_byte_swap ? clib_byte_swap_u16 (x) : x;
+}
always_inline u32
elf_swap_u32 (elf_main_t * em, u32 x)
-{ return em->need_byte_swap ? clib_byte_swap_u32 (x) : x; }
+{
+ return em->need_byte_swap ? clib_byte_swap_u32 (x) : x;
+}
always_inline u64
elf_swap_u64 (elf_main_t * em, u64 x)
-{ return em->need_byte_swap ? clib_byte_swap_u64 (x) : x; }
+{
+ return em->need_byte_swap ? clib_byte_swap_u64 (x) : x;
+}
#define FORMAT_ELF_MAIN_SYMBOLS (1 << 0)
#define FORMAT_ELF_MAIN_RELOCATIONS (1 << 1)
@@ -1000,24 +1030,33 @@ elf_swap_u64 (elf_main_t * em, u64 x)
format_function_t format_elf_main;
format_function_t format_elf_symbol;
-clib_error_t * elf_read_file (elf_main_t * em, char * file_name);
-clib_error_t * elf_write_file (elf_main_t * em, char * file_name);
-clib_error_t * elf_delete_named_section (elf_main_t * em, char * section_name);
-clib_error_t * elf_parse (elf_main_t * em, void * data, uword data_bytes);
+clib_error_t *elf_read_file (elf_main_t * em, char *file_name);
+clib_error_t *elf_write_file (elf_main_t * em, char *file_name);
+clib_error_t *elf_delete_named_section (elf_main_t * em, char *section_name);
+clib_error_t *elf_parse (elf_main_t * em, void *data, uword data_bytes);
void elf_parse_symbols (elf_main_t * em);
-clib_error_t *
-elf_get_section_by_name (elf_main_t * em, char * section_name, elf_section_t ** result);
-clib_error_t *
-elf_get_section_by_start_address (elf_main_t * em, uword start_address, elf_section_t ** result);
+clib_error_t *elf_get_section_by_name (elf_main_t * em, char *section_name,
+ elf_section_t ** result);
+clib_error_t *elf_get_section_by_start_address (elf_main_t * em,
+ uword start_address,
+ elf_section_t ** result);
void
elf_create_section_with_contents (elf_main_t * em,
- char * section_name,
+ char *section_name,
elf64_section_header_t * header,
- void * contents,
- uword n_content_bytes);
-uword elf_delete_segment_with_type (elf_main_t * em, elf_segment_type_t segment_type);
+ void *contents, uword n_content_bytes);
+uword elf_delete_segment_with_type (elf_main_t * em,
+ elf_segment_type_t segment_type);
void elf_set_dynamic_entries (elf_main_t * em);
#endif /* included_clib_elf_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/elf_clib.c b/vppinfra/vppinfra/elf_clib.c
index 8c705488a4b..7bb72ee3e3f 100644
--- a/vppinfra/vppinfra/elf_clib.c
+++ b/vppinfra/vppinfra/elf_clib.c
@@ -18,8 +18,9 @@
#include <fcntl.h>
#include <sys/stat.h>
-typedef struct {
- char ** path;
+typedef struct
+{
+ char **path;
} path_search_t;
always_inline void
@@ -32,11 +33,11 @@ path_search_free (path_search_t * p)
}
static char **
-split_string (char * string, u8 delimiter)
+split_string (char *string, u8 delimiter)
{
- char ** result = 0;
- char * p, * start, * s;
-
+ char **result = 0;
+ char *p, *start, *s;
+
p = string;
while (1)
{
@@ -56,9 +57,9 @@ split_string (char * string, u8 delimiter)
}
static int
-file_exists_and_is_executable (char * dir, char * file)
+file_exists_and_is_executable (char *dir, char *file)
{
- char * path = (char *) format (0, "%s/%s%c", dir, file, 0);
+ char *path = (char *) format (0, "%s/%s%c", dir, file, 0);
struct stat s;
uword yes;
@@ -72,17 +73,17 @@ file_exists_and_is_executable (char * dir, char * file)
}
static char *
-path_search (char * file)
+path_search (char *file)
{
path_search_t ps;
uword i;
- char * result;
+ char *result;
/* Relative or absolute path. */
if (file[0] == '.' || file[0] == '/')
return file;
- if (getenv("PATH") == 0)
+ if (getenv ("PATH") == 0)
return file;
ps.path = split_string (getenv ("PATH"), ':');
@@ -102,16 +103,15 @@ path_search (char * file)
static clib_error_t *
clib_elf_parse_file (clib_elf_main_t * cem,
- char * file_name,
- void * link_address)
+ char *file_name, void *link_address)
{
- elf_main_t * em;
- elf_section_t * s;
+ elf_main_t *em;
+ elf_section_t *s;
int fd;
struct stat fd_stat;
uword mmap_length = 0;
- void * data = 0;
- clib_error_t * error = 0;
+ void *data = 0;
+ clib_error_t *error = 0;
vec_add2 (cem->elf_mains, em, 1);
@@ -142,66 +142,66 @@ clib_elf_parse_file (clib_elf_main_t * cem,
/* Look for CLIB special sections. */
{
- char * section_name_start = CLIB_ELF_SECTION_ADD_PREFIX ();
+ char *section_name_start = CLIB_ELF_SECTION_ADD_PREFIX ();
uword section_name_start_len = strlen (section_name_start);
vec_foreach (s, em->sections)
- {
- u8 * name = elf_section_name (em, s);
- uword * p;
- clib_elf_section_t * vs;
- clib_elf_section_bounds_t * b;
-
- /* Section name must begin with CLIB_ELF_SECTION key. */
- if (memcmp (name, section_name_start, section_name_start_len))
- continue;
-
- name += section_name_start_len;
- p = hash_get_mem (cem->section_by_name, name);
- if (p)
- vs = vec_elt_at_index (cem->sections, p[0]);
- else
- {
- name = format (0, "%s%c", name, 0);
- if (! cem->section_by_name)
- cem->section_by_name = hash_create_string (0, sizeof (uword));
- hash_set_mem (cem->section_by_name, name, vec_len (cem->sections));
- vec_add2 (cem->sections, vs, 1);
- vs->name = name;
- }
+ {
+ u8 *name = elf_section_name (em, s);
+ uword *p;
+ clib_elf_section_t *vs;
+ clib_elf_section_bounds_t *b;
+
+ /* Section name must begin with CLIB_ELF_SECTION key. */
+ if (memcmp (name, section_name_start, section_name_start_len))
+ continue;
+
+ name += section_name_start_len;
+ p = hash_get_mem (cem->section_by_name, name);
+ if (p)
+ vs = vec_elt_at_index (cem->sections, p[0]);
+ else
+ {
+ name = format (0, "%s%c", name, 0);
+ if (!cem->section_by_name)
+ cem->section_by_name = hash_create_string (0, sizeof (uword));
+ hash_set_mem (cem->section_by_name, name, vec_len (cem->sections));
+ vec_add2 (cem->sections, vs, 1);
+ vs->name = name;
+ }
- vec_add2 (vs->bounds, b, 1);
- b->lo = link_address + s->header.exec_address;
- b->hi = b->lo + s->header.file_size;
- }
+ vec_add2 (vs->bounds, b, 1);
+ b->lo = link_address + s->header.exec_address;
+ b->hi = b->lo + s->header.file_size;
+ }
}
/* Parse symbols for this file. */
{
- elf_symbol_table_t * t;
- elf64_symbol_t * s;
+ elf_symbol_table_t *t;
+ elf64_symbol_t *s;
elf_parse_symbols (em);
vec_foreach (t, em->symbol_tables)
+ {
+ vec_foreach (s, t->symbols)
{
- vec_foreach (s, t->symbols)
- {
- s->value += pointer_to_uword (link_address);
- }
+ s->value += pointer_to_uword (link_address);
}
+ }
}
/* No need to keep section contents around. */
{
- elf_section_t * s;
+ elf_section_t *s;
vec_foreach (s, em->sections)
- {
- if (s->header.type != ELF_SECTION_STRING_TABLE)
- vec_free (s->contents);
- }
+ {
+ if (s->header.type != ELF_SECTION_STRING_TABLE)
+ vec_free (s->contents);
+ }
}
- done:
+done:
if (error)
elf_main_free (em);
if (fd >= 0)
@@ -215,12 +215,12 @@ clib_elf_parse_file (clib_elf_main_t * cem,
#include <link.h>
static int
-add_section (struct dl_phdr_info * info, size_t size, void * opaque)
+add_section (struct dl_phdr_info *info, size_t size, void *opaque)
{
- clib_elf_main_t * cem = opaque;
- clib_error_t * error;
- char * name = (char *) info->dlpi_name;
- void * addr = (void *) info->dlpi_addr;
+ clib_elf_main_t *cem = opaque;
+ clib_error_t *error;
+ char *name = (char *) info->dlpi_name;
+ void *addr = (void *) info->dlpi_addr;
uword is_main;
is_main = strlen (name) == 0;
@@ -233,11 +233,11 @@ add_section (struct dl_phdr_info * info, size_t size, void * opaque)
return 0;
name = path_search (cem->exec_path);
- if (! name)
- {
- clib_error ("failed to find %s on PATH", cem->exec_path);
- return 0;
- }
+ if (!name)
+ {
+ clib_error ("failed to find %s on PATH", cem->exec_path);
+ return 0;
+ }
addr = 0;
}
@@ -253,9 +253,10 @@ add_section (struct dl_phdr_info * info, size_t size, void * opaque)
static clib_elf_main_t clib_elf_main;
-void clib_elf_main_init (char * exec_path)
+void
+clib_elf_main_init (char *exec_path)
{
- clib_elf_main_t * cem = &clib_elf_main;
+ clib_elf_main_t *cem = &clib_elf_main;
cem->exec_path = exec_path;
@@ -263,70 +264,76 @@ void clib_elf_main_init (char * exec_path)
}
clib_elf_section_bounds_t *
-clib_elf_get_section_bounds (char * name)
+clib_elf_get_section_bounds (char *name)
{
- clib_elf_main_t * em = &clib_elf_main;
- uword * p = hash_get (em->section_by_name, name);
+ clib_elf_main_t *em = &clib_elf_main;
+ uword *p = hash_get (em->section_by_name, name);
return p ? vec_elt_at_index (em->sections, p[0])->bounds : 0;
}
static uword
-symbol_by_address_or_name (char * by_name,
- uword by_address,
- clib_elf_symbol_t * s)
+symbol_by_address_or_name (char *by_name,
+ uword by_address, clib_elf_symbol_t * s)
{
- clib_elf_main_t * cem = &clib_elf_main;
- elf_main_t * em;
+ clib_elf_main_t *cem = &clib_elf_main;
+ elf_main_t *em;
vec_foreach (em, cem->elf_mains)
+ {
+ elf_symbol_table_t *t;
+ s->elf_main_index = em - cem->elf_mains;
+ vec_foreach (t, em->symbol_tables)
{
- elf_symbol_table_t * t;
- s->elf_main_index = em - cem->elf_mains;
- vec_foreach (t, em->symbol_tables)
+ s->symbol_table_index = t - em->symbol_tables;
+ if (by_name)
{
- s->symbol_table_index = t - em->symbol_tables;
- if (by_name)
- {
- uword * p = hash_get (t->symbol_by_name, by_name);
- if (p)
- {
- s->symbol = vec_elt (t->symbols, p[0]);
- return 1;
- }
- }
- else
+ uword *p = hash_get (t->symbol_by_name, by_name);
+ if (p)
{
- elf64_symbol_t * x;
- /* FIXME linear search. */
- vec_foreach (x, t->symbols)
- {
- if (by_address >= x->value && by_address < x->value + x->size)
- {
- s->symbol = x[0];
- return 1;
- }
- }
+ s->symbol = vec_elt (t->symbols, p[0]);
+ return 1;
}
}
+ else
+ {
+ elf64_symbol_t *x;
+ /* FIXME linear search. */
+ vec_foreach (x, t->symbols)
+ {
+ if (by_address >= x->value && by_address < x->value + x->size)
+ {
+ s->symbol = x[0];
+ return 1;
+ }
+ }
+ }
}
+ }
return 0;
}
-uword clib_elf_symbol_by_name (char * by_name, clib_elf_symbol_t * s)
-{ return symbol_by_address_or_name (by_name, /* by_address */ 0, s); }
+uword
+clib_elf_symbol_by_name (char *by_name, clib_elf_symbol_t * s)
+{
+ return symbol_by_address_or_name (by_name, /* by_address */ 0, s);
+}
-uword clib_elf_symbol_by_address (uword by_address, clib_elf_symbol_t * s)
-{ return symbol_by_address_or_name (/* by_name */ 0, by_address, s); }
+uword
+clib_elf_symbol_by_address (uword by_address, clib_elf_symbol_t * s)
+{
+ return symbol_by_address_or_name ( /* by_name */ 0, by_address, s);
+}
-u8 * format_clib_elf_symbol (u8 * s, va_list * args)
+u8 *
+format_clib_elf_symbol (u8 * s, va_list * args)
{
- clib_elf_main_t * cem = &clib_elf_main;
- clib_elf_symbol_t * sym = va_arg (*args, clib_elf_symbol_t *);
- elf_main_t * em;
- elf_symbol_table_t * t;
+ clib_elf_main_t *cem = &clib_elf_main;
+ clib_elf_symbol_t *sym = va_arg (*args, clib_elf_symbol_t *);
+ elf_main_t *em;
+ elf_symbol_table_t *t;
- if (! sym)
+ if (!sym)
/* Just print table headings. */
return format (s, "%U", format_elf_symbol, 0, 0, 0);
@@ -338,13 +345,14 @@ u8 * format_clib_elf_symbol (u8 * s, va_list * args)
}
}
-u8 * format_clib_elf_symbol_with_address (u8 * s, va_list * args)
+u8 *
+format_clib_elf_symbol_with_address (u8 * s, va_list * args)
{
uword address = va_arg (*args, uword);
- clib_elf_main_t * cem = &clib_elf_main;
+ clib_elf_main_t *cem = &clib_elf_main;
clib_elf_symbol_t sym;
- elf_main_t * em;
- elf_symbol_table_t * t;
+ elf_main_t *em;
+ elf_symbol_table_t *t;
if (clib_elf_symbol_by_address (address, &sym))
{
@@ -359,3 +367,11 @@ u8 * format_clib_elf_symbol_with_address (u8 * s, va_list * args)
return s;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/elf_clib.h b/vppinfra/vppinfra/elf_clib.h
index e3ac0b770ad..25b928c22a5 100644
--- a/vppinfra/vppinfra/elf_clib.h
+++ b/vppinfra/vppinfra/elf_clib.h
@@ -59,61 +59,65 @@
CLIB_ELF_SECTION_DATA_ALIGN), \
void *)
-typedef struct {
- void * lo, * hi;
+typedef struct
+{
+ void *lo, *hi;
} clib_elf_section_bounds_t;
-typedef struct {
+typedef struct
+{
/* Vector of bounds for this section. Multiple shared objects may have instances
of the same sections. */
- clib_elf_section_bounds_t * bounds;
+ clib_elf_section_bounds_t *bounds;
/* Name of ELF section (e.g. .text). */
- u8 * name;
+ u8 *name;
} clib_elf_section_t;
-typedef struct {
+typedef struct
+{
/* Vector of sections. */
- clib_elf_section_t * sections;
+ clib_elf_section_t *sections;
/* Hash map of name to section index. */
- uword * section_by_name;
+ uword *section_by_name;
/* Unix path that we were exec()ed with. */
- char * exec_path;
+ char *exec_path;
- elf_main_t * elf_mains;
+ elf_main_t *elf_mains;
} clib_elf_main_t;
always_inline void
clib_elf_main_free (clib_elf_main_t * m)
{
- clib_elf_section_t * s;
+ clib_elf_section_t *s;
vec_foreach (s, m->sections)
- {
- vec_free (s->bounds);
- vec_free (s->name);
- }
+ {
+ vec_free (s->bounds);
+ vec_free (s->name);
+ }
vec_free (m->sections);
hash_free (m->section_by_name);
{
- elf_main_t * em;
+ elf_main_t *em;
vec_foreach (em, m->elf_mains)
- {
- elf_main_free (em);
- }
+ {
+ elf_main_free (em);
+ }
vec_free (m->elf_mains);
}
}
/* Call with exec_path equal to argv[0] from C main. */
-void clib_elf_main_init (char * exec_path);
+void clib_elf_main_init (char *exec_path);
-clib_elf_section_bounds_t * clib_elf_get_section_bounds (char * name);
+clib_elf_section_bounds_t *clib_elf_get_section_bounds (char *name);
-typedef struct {
- /* The symbol. */
+typedef struct
+{
+ /* The symbol. */
elf64_symbol_t symbol;
/* elf_main_t where symbol came from. */
@@ -124,9 +128,17 @@ typedef struct {
} clib_elf_symbol_t;
/* Returns 1 if found; otherwise zero. */
-uword clib_elf_symbol_by_name (char * name, clib_elf_symbol_t * result);
+uword clib_elf_symbol_by_name (char *name, clib_elf_symbol_t * result);
uword clib_elf_symbol_by_address (uword address, clib_elf_symbol_t * result);
format_function_t format_clib_elf_symbol, format_clib_elf_symbol_with_address;
#endif /* included_clib_elf_self_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/elog.c b/vppinfra/vppinfra/elog.c
index 7ae4ea1d2e2..e9f06d0948c 100644
--- a/vppinfra/vppinfra/elog.c
+++ b/vppinfra/vppinfra/elog.c
@@ -42,18 +42,20 @@
#include <vppinfra/hash.h>
#include <vppinfra/math.h>
-static inline void elog_lock (elog_main_t * em)
+static inline void
+elog_lock (elog_main_t * em)
{
- if (PREDICT_FALSE(em->lock != 0))
- while (__sync_lock_test_and_set (em->lock, 1))
- ;
+ if (PREDICT_FALSE (em->lock != 0))
+ while (__sync_lock_test_and_set (em->lock, 1))
+ ;
}
-static inline void elog_unlock (elog_main_t * em)
+static inline void
+elog_unlock (elog_main_t * em)
{
- if (PREDICT_FALSE(em->lock != 0))
+ if (PREDICT_FALSE (em->lock != 0))
{
- CLIB_MEMORY_BARRIER();
+ CLIB_MEMORY_BARRIER ();
*em->lock = 0;
}
}
@@ -61,17 +63,19 @@ static inline void elog_unlock (elog_main_t * em)
/* Non-inline version. */
void *
elog_event_data (elog_main_t * em,
- elog_event_type_t * type,
- elog_track_t * track,
- u64 cpu_time)
-{ return elog_event_data_inline (em, type, track, cpu_time); }
+ elog_event_type_t * type, elog_track_t * track, u64 cpu_time)
+{
+ return elog_event_data_inline (em, type, track, cpu_time);
+}
-static void new_event_type (elog_main_t * em, uword i)
+static void
+new_event_type (elog_main_t * em, uword i)
{
- elog_event_type_t * t = vec_elt_at_index (em->event_types, i);
+ elog_event_type_t *t = vec_elt_at_index (em->event_types, i);
- if (! em->event_type_by_format)
- em->event_type_by_format = hash_create_vec (/* size */ 0, sizeof (u8), sizeof (uword));
+ if (!em->event_type_by_format)
+ em->event_type_by_format =
+ hash_create_vec ( /* size */ 0, sizeof (u8), sizeof (uword));
hash_set_mem (em->event_type_by_format, t->format, i);
}
@@ -79,7 +83,7 @@ static void new_event_type (elog_main_t * em, uword i)
static uword
find_or_create_type (elog_main_t * em, elog_event_type_t * t)
{
- uword * p = hash_get_mem (em->event_type_by_format, t->format);
+ uword *p = hash_get_mem (em->event_type_by_format, t->format);
uword i;
if (p)
@@ -95,9 +99,10 @@ find_or_create_type (elog_main_t * em, elog_event_type_t * t)
}
/* External function to register types. */
-word elog_event_type_register (elog_main_t * em, elog_event_type_t * t)
+word
+elog_event_type_register (elog_main_t * em, elog_event_type_t * t)
{
- elog_event_type_t * static_type = t;
+ elog_event_type_t *static_type = t;
word l;
elog_lock (em);
@@ -110,10 +115,10 @@ word elog_event_type_register (elog_main_t * em, elog_event_type_t * t)
/* If format args are not specified try to be smart about providing defaults
so most of the time user does not have to specify them. */
- if (! t->format_args)
+ if (!t->format_args)
{
uword i, l;
- char * this_arg;
+ char *this_arg;
l = strlen (t->format);
for (i = 0; i < l; i++)
@@ -122,28 +127,32 @@ word elog_event_type_register (elog_main_t * em, elog_event_type_t * t)
continue;
if (i + 1 >= l)
continue;
- if (t->format[i+1] == '%') /* %% */
+ if (t->format[i + 1] == '%') /* %% */
continue;
- switch (t->format[i+1]) {
- default:
- case 'd': case 'x': case 'u':
- this_arg = "i4"; /* size of u32 */
- break;
- case 'f':
- this_arg = "f8"; /* defaults to f64 */
- break;
- case 's':
- this_arg = "s0"; /* defaults to null terminated string. */
- break;
- }
+ switch (t->format[i + 1])
+ {
+ default:
+ case 'd':
+ case 'x':
+ case 'u':
+ this_arg = "i4"; /* size of u32 */
+ break;
+ case 'f':
+ this_arg = "f8"; /* defaults to f64 */
+ break;
+ case 's':
+ this_arg = "s0"; /* defaults to null terminated string. */
+ break;
+ }
- t->format_args = (char *) format ((u8 *) t->format_args, "%s", this_arg);
+ t->format_args =
+ (char *) format ((u8 *) t->format_args, "%s", this_arg);
}
/* Null terminate. */
vec_add1 (t->format_args, 0);
- }
+ }
vec_add1 (em->event_types, t[0]);
@@ -163,20 +172,22 @@ word elog_event_type_register (elog_main_t * em, elog_event_type_t * t)
t->n_enum_strings = static_type->n_enum_strings;
for (i = 0; i < t->n_enum_strings; i++)
{
- if (! static_type->enum_strings[i])
+ if (!static_type->enum_strings[i])
static_type->enum_strings[i] = "MISSING";
- vec_add1 (t->enum_strings_vector,
- (char *) format (0, "%s%c", static_type->enum_strings[i], 0));
+ vec_add1 (t->enum_strings_vector,
+ (char *) format (0, "%s%c", static_type->enum_strings[i],
+ 0));
}
}
new_event_type (em, l);
- elog_unlock(em);
+ elog_unlock (em);
return l;
}
-word elog_track_register (elog_main_t * em, elog_track_t * t)
+word
+elog_track_register (elog_main_t * em, elog_track_t * t)
{
word l;
@@ -199,7 +210,8 @@ word elog_track_register (elog_main_t * em, elog_track_t * t)
return l;
}
-static uword parse_2digit_decimal (char * p, uword * number)
+static uword
+parse_2digit_decimal (char *p, uword * number)
{
uword i = 0;
u8 digits[2];
@@ -225,10 +237,11 @@ static uword parse_2digit_decimal (char * p, uword * number)
return 0;
}
-static u8 * fixed_format (u8 * s, char * fmt, char * result, uword * result_len)
+static u8 *
+fixed_format (u8 * s, char *fmt, char *result, uword * result_len)
{
- char * f = fmt;
- char * percent;
+ char *f = fmt;
+ char *percent;
uword l = 0;
while (1)
@@ -266,18 +279,19 @@ static u8 * fixed_format (u8 * s, char * fmt, char * result, uword * result_len)
clib_memcpy (result, percent, l);
result[l] = 0;
- done:
+done:
*result_len = f - fmt;
return s;
}
-u8 * format_elog_event (u8 * s, va_list * va)
+u8 *
+format_elog_event (u8 * s, va_list * va)
{
- elog_main_t * em = va_arg (*va, elog_main_t *);
- elog_event_t * e = va_arg (*va, elog_event_t *);
- elog_event_type_t * t;
- char * a, * f;
- void * d = (u8 *) e->data;
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ elog_event_t *e = va_arg (*va, elog_event_t *);
+ elog_event_type_t *t;
+ char *a, *f;
+ void *d = (u8 *) e->data;
char arg_format[64];
t = vec_elt_at_index (em->event_types, e->type);
@@ -324,12 +338,14 @@ u8 * format_elog_event (u8 * s, va_list * va)
ASSERT (0);
if (a[0] == 't')
{
- char * e = vec_elt (t->enum_strings_vector, n_bytes == 8 ? l : i);
+ char *e =
+ vec_elt (t->enum_strings_vector, n_bytes == 8 ? l : i);
s = format (s, arg_format, e);
}
else if (a[0] == 'T')
{
- char * e = vec_elt_at_index (em->string_table, n_bytes == 8 ? l : i);
+ char *e =
+ vec_elt_at_index (em->string_table, n_bytes == 8 ? l : i);
s = format (s, arg_format, e);
}
else if (n_bytes == 8)
@@ -371,15 +387,17 @@ u8 * format_elog_event (u8 * s, va_list * va)
return s;
}
-u8 * format_elog_track (u8 * s, va_list * va)
+u8 *
+format_elog_track (u8 * s, va_list * va)
{
- elog_main_t * em = va_arg (*va, elog_main_t *);
- elog_event_t * e = va_arg (*va, elog_event_t *);
- elog_track_t * t = vec_elt_at_index (em->tracks, e->track);
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ elog_event_t *e = va_arg (*va, elog_event_t *);
+ elog_track_t *t = vec_elt_at_index (em->tracks, e->track);
return format (s, "%s", t->name);
}
-void elog_time_now (elog_time_stamp_t * et)
+void
+elog_time_now (elog_time_stamp_t * et)
{
u64 cpu_time_now, os_time_now_nsec;
@@ -401,14 +419,16 @@ void elog_time_now (elog_time_stamp_t * et)
}
always_inline i64
-elog_time_stamp_diff_os_nsec (elog_time_stamp_t * t1,
- elog_time_stamp_t * t2)
-{ return (i64) t1->os_nsec - (i64) t2->os_nsec; }
+elog_time_stamp_diff_os_nsec (elog_time_stamp_t * t1, elog_time_stamp_t * t2)
+{
+ return (i64) t1->os_nsec - (i64) t2->os_nsec;
+}
always_inline i64
-elog_time_stamp_diff_cpu (elog_time_stamp_t * t1,
- elog_time_stamp_t * t2)
-{ return (i64) t1->cpu - (i64) t2->cpu; }
+elog_time_stamp_diff_cpu (elog_time_stamp_t * t1, elog_time_stamp_t * t2)
+{
+ return (i64) t1->cpu - (i64) t2->cpu;
+}
always_inline f64
elog_nsec_per_clock (elog_main_t * em)
@@ -419,11 +439,12 @@ elog_nsec_per_clock (elog_main_t * em)
&em->init_time));
}
-void elog_alloc (elog_main_t * em, u32 n_events)
+void
+elog_alloc (elog_main_t * em, u32 n_events)
{
if (em->event_ring)
vec_free (em->event_ring);
-
+
/* Ring size must be a power of 2. */
em->event_ring_size = n_events = max_pow2 (n_events);
@@ -432,7 +453,8 @@ void elog_alloc (elog_main_t * em, u32 n_events)
vec_resize_aligned (em->event_ring, n_events, CLIB_CACHE_LINE_BYTES);
}
-void elog_init (elog_main_t * em, u32 n_events)
+void
+elog_init (elog_main_t * em, u32 n_events)
{
memset (em, 0, sizeof (em[0]));
@@ -453,7 +475,8 @@ void elog_init (elog_main_t * em, u32 n_events)
}
/* Returns number of events in ring and start index. */
-static uword elog_event_range (elog_main_t * em, uword * lo)
+static uword
+elog_event_range (elog_main_t * em, uword * lo)
{
uword l = em->event_ring_size;
u64 i = em->n_total_events;
@@ -461,19 +484,22 @@ static uword elog_event_range (elog_main_t * em, uword * lo)
/* Ring never wrapped? */
if (i <= (u64) l)
{
- if (lo) *lo = 0;
+ if (lo)
+ *lo = 0;
return i;
}
else
{
- if (lo) *lo = i & (l - 1);
+ if (lo)
+ *lo = i & (l - 1);
return l;
}
}
-elog_event_t * elog_peek_events (elog_main_t * em)
+elog_event_t *
+elog_peek_events (elog_main_t * em)
{
- elog_event_t * e, * f, * es = 0;
+ elog_event_t *e, *f, *es = 0;
uword i, j, n;
n = elog_event_range (em, &j);
@@ -484,7 +510,9 @@ elog_event_t * elog_peek_events (elog_main_t * em)
e[0] = f[0];
/* Convert absolute time from cycles to seconds from start. */
- e->time = (e->time_cycles - em->init_time.cpu) * em->cpu_timer.seconds_per_clock;
+ e->time =
+ (e->time_cycles -
+ em->init_time.cpu) * em->cpu_timer.seconds_per_clock;
j = (j + 1) & (em->event_ring_size - 1);
}
@@ -493,7 +521,8 @@ elog_event_t * elog_peek_events (elog_main_t * em)
}
/* Add a formatted string to the string table. */
-u32 elog_string (elog_main_t * em, char * fmt, ...)
+u32
+elog_string (elog_main_t * em, char *fmt, ...)
{
u32 offset;
va_list va;
@@ -510,19 +539,20 @@ u32 elog_string (elog_main_t * em, char * fmt, ...)
return offset;
}
-elog_event_t * elog_get_events (elog_main_t * em)
+elog_event_t *
+elog_get_events (elog_main_t * em)
{
- if (! em->events)
+ if (!em->events)
em->events = elog_peek_events (em);
return em->events;
}
-static void maybe_fix_string_table_offset (elog_event_t * e,
- elog_event_type_t * t,
- u32 offset)
+static void
+maybe_fix_string_table_offset (elog_event_t * e,
+ elog_event_type_t * t, u32 offset)
{
- void * d = (u8 *) e->data;
- char * a;
+ void *d = (u8 *) e->data;
+ char *a;
if (offset == 0)
return;
@@ -534,7 +564,7 @@ static void maybe_fix_string_table_offset (elog_event_t * e,
uword n_bytes = 0, n_digits;
if (a[0] == 0)
- break;
+ break;
/* Don't go past end of event data. */
ASSERT (d < (void *) (e->data + sizeof (e->data)));
@@ -543,9 +573,9 @@ static void maybe_fix_string_table_offset (elog_event_t * e,
switch (a[0])
{
case 'T':
- ASSERT (n_bytes == 4);
- clib_mem_unaligned (d, u32) += offset;
- break;
+ ASSERT (n_bytes == 4);
+ clib_mem_unaligned (d, u32) += offset;
+ break;
case 'i':
case 't':
@@ -564,25 +594,26 @@ static void maybe_fix_string_table_offset (elog_event_t * e,
}
}
-static int elog_cmp (void * a1, void * a2)
+static int
+elog_cmp (void *a1, void *a2)
{
- elog_event_t * e1 = a1;
- elog_event_t * e2 = a2;
+ elog_event_t *e1 = a1;
+ elog_event_t *e2 = a2;
return e1->time - e2->time;
}
-void elog_merge (elog_main_t * dst, u8 * dst_tag,
- elog_main_t * src, u8 * src_tag)
+void
+elog_merge (elog_main_t * dst, u8 * dst_tag, elog_main_t * src, u8 * src_tag)
{
- elog_event_t * e;
+ elog_event_t *e;
uword l;
u32 string_table_offset_for_src_events;
u32 track_offset_for_src_tracks;
elog_track_t newt;
int i;
- memset(&newt, 0, sizeof (newt));
+ memset (&newt, 0, sizeof (newt));
elog_get_events (src);
elog_get_events (dst);
@@ -596,42 +627,43 @@ void elog_merge (elog_main_t * dst, u8 * dst_tag,
/* Prepend the supplied tag (if any) to all dst track names */
if (dst_tag)
{
- for (i = 0; i < vec_len(dst->tracks); i++)
- {
- elog_track_t * t = vec_elt_at_index (dst->tracks, i);
- char * new_name;
-
- new_name = (char *) format (0, "%s:%s%c", dst_tag, t->name, 0);
- vec_free (t->name);
- t->name = new_name;
- }
+ for (i = 0; i < vec_len (dst->tracks); i++)
+ {
+ elog_track_t *t = vec_elt_at_index (dst->tracks, i);
+ char *new_name;
+
+ new_name = (char *) format (0, "%s:%s%c", dst_tag, t->name, 0);
+ vec_free (t->name);
+ t->name = new_name;
+ }
}
-
+
track_offset_for_src_tracks = vec_len (dst->tracks);
-
+
/* Copy / tag source tracks */
for (i = 0; i < vec_len (src->tracks); i++)
{
- elog_track_t * t = vec_elt_at_index (src->tracks, i);
+ elog_track_t *t = vec_elt_at_index (src->tracks, i);
if (src_tag)
- newt.name = (char *) format (0, "%s:%s%c", src_tag, t->name, 0);
+ newt.name = (char *) format (0, "%s:%s%c", src_tag, t->name, 0);
else
- newt.name = (char *) format (0, "%s%c", t->name, 0);
+ newt.name = (char *) format (0, "%s%c", t->name, 0);
(void) elog_track_register (dst, &newt);
vec_free (newt.name);
}
-
+
/* Across all (copied) src events... */
for (e = dst->events + l; e < vec_end (dst->events); e++)
{
- elog_event_type_t * t = vec_elt_at_index (src->event_types, e->type);
-
+ elog_event_type_t *t = vec_elt_at_index (src->event_types, e->type);
+
/* Remap type from src -> dst. */
e->type = find_or_create_type (dst, t);
/* Remap string table offsets for 'T' format args */
- maybe_fix_string_table_offset (e, t, string_table_offset_for_src_events);
-
+ maybe_fix_string_table_offset (e, t,
+ string_table_offset_for_src_events);
+
/* Remap track */
e->track += track_offset_for_src_tracks;
}
@@ -648,11 +680,13 @@ void elog_merge (elog_main_t * dst, u8 * dst_tag,
dst->nsec_per_cpu_clock = src->nsec_per_cpu_clock;
}
- dt_os_nsec = elog_time_stamp_diff_os_nsec (&src->init_time, &dst->init_time);
+ dt_os_nsec =
+ elog_time_stamp_diff_os_nsec (&src->init_time, &dst->init_time);
dt_event = dt_os_nsec;
- dt_clock_nsec = (elog_time_stamp_diff_cpu (&src->init_time, &dst->init_time)
- * .5*(dst->nsec_per_cpu_clock + src->nsec_per_cpu_clock));
+ dt_clock_nsec =
+ (elog_time_stamp_diff_cpu (&src->init_time, &dst->init_time) * .5 *
+ (dst->nsec_per_cpu_clock + src->nsec_per_cpu_clock));
/* Heuristic to see if src/dst came from same time source.
If frequencies are "the same" and os clock and cpu clock agree
@@ -689,18 +723,18 @@ void elog_merge (elog_main_t * dst, u8 * dst_tag,
ASSERT (dst->cpu_timer.seconds_per_clock);
elog_alloc (dst, vec_len (dst->events));
- for (i = 0; i < vec_len(dst->events); i++)
+ for (i = 0; i < vec_len (dst->events); i++)
{
- elog_event_t *es, *ed;
-
- es = dst->events + i;
- ed = dst->event_ring + i;
-
- ed[0] = es[0];
-
- /* Invert elog_peek_events calculation */
- ed->time_cycles =
- (es->time/dst->cpu_timer.seconds_per_clock) + dst->init_time.cpu;
+ elog_event_t *es, *ed;
+
+ es = dst->events + i;
+ ed = dst->event_ring + i;
+
+ ed[0] = es[0];
+
+ /* Invert elog_peek_events calculation */
+ ed->time_cycles =
+ (es->time / dst->cpu_timer.seconds_per_clock) + dst->init_time.cpu;
}
dst->n_total_events = vec_len (dst->events);
}
@@ -709,11 +743,11 @@ void elog_merge (elog_main_t * dst, u8 * dst_tag,
static void
serialize_elog_event (serialize_main_t * m, va_list * va)
{
- elog_main_t * em = va_arg (*va, elog_main_t *);
- elog_event_t * e = va_arg (*va, elog_event_t *);
- elog_event_type_t * t = vec_elt_at_index (em->event_types, e->type);
- u8 * d = e->data;
- u8 * p = (u8 *) t->format_args;
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ elog_event_t *e = va_arg (*va, elog_event_t *);
+ elog_event_type_t *t = vec_elt_at_index (em->event_types, e->type);
+ u8 *d = e->data;
+ u8 *p = (u8 *) t->format_args;
serialize_integer (m, e->type, sizeof (e->type));
serialize_integer (m, e->track, sizeof (e->track));
@@ -770,10 +804,10 @@ serialize_elog_event (serialize_main_t * m, va_list * va)
static void
unserialize_elog_event (serialize_main_t * m, va_list * va)
{
- elog_main_t * em = va_arg (*va, elog_main_t *);
- elog_event_t * e = va_arg (*va, elog_event_t *);
- elog_event_type_t * t;
- u8 * p, * d;
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ elog_event_t *e = va_arg (*va, elog_event_t *);
+ elog_event_type_t *t;
+ u8 *p, *d;
{
u16 tmp[2];
@@ -833,15 +867,16 @@ unserialize_elog_event (serialize_main_t * m, va_list * va)
ASSERT (0);
break;
- case 's': {
- char * t;
- unserialize_cstring (m, &t);
- if (n_bytes == 0)
- n_bytes = strlen (t) + 1;
- clib_memcpy (d, t, clib_min (n_bytes, vec_len (t)));
- vec_free (t);
- break;
- }
+ case 's':
+ {
+ char *t;
+ unserialize_cstring (m, &t);
+ if (n_bytes == 0)
+ n_bytes = strlen (t) + 1;
+ clib_memcpy (d, t, clib_min (n_bytes, vec_len (t)));
+ vec_free (t);
+ break;
+ }
case 'f':
if (n_bytes == 4)
@@ -873,15 +908,17 @@ unserialize_elog_event (serialize_main_t * m, va_list * va)
static void
serialize_elog_event_type (serialize_main_t * m, va_list * va)
{
- elog_event_type_t * t = va_arg (*va, elog_event_type_t *);
+ elog_event_type_t *t = va_arg (*va, elog_event_type_t *);
int n = va_arg (*va, int);
int i, j;
for (i = 0; i < n; i++)
{
serialize_cstring (m, t[i].format);
serialize_cstring (m, t[i].format_args);
- serialize_integer (m, t[i].type_index_plus_one, sizeof (t->type_index_plus_one));
- serialize_integer (m, t[i].n_enum_strings, sizeof (t[i].n_enum_strings));
+ serialize_integer (m, t[i].type_index_plus_one,
+ sizeof (t->type_index_plus_one));
+ serialize_integer (m, t[i].n_enum_strings,
+ sizeof (t[i].n_enum_strings));
for (j = 0; j < t[i].n_enum_strings; j++)
serialize_cstring (m, t[i].enum_strings_vector[j]);
}
@@ -890,15 +927,17 @@ serialize_elog_event_type (serialize_main_t * m, va_list * va)
static void
unserialize_elog_event_type (serialize_main_t * m, va_list * va)
{
- elog_event_type_t * t = va_arg (*va, elog_event_type_t *);
+ elog_event_type_t *t = va_arg (*va, elog_event_type_t *);
int n = va_arg (*va, int);
int i, j;
for (i = 0; i < n; i++)
{
unserialize_cstring (m, &t[i].format);
unserialize_cstring (m, &t[i].format_args);
- unserialize_integer (m, &t[i].type_index_plus_one, sizeof (t->type_index_plus_one));
- unserialize_integer (m, &t[i].n_enum_strings, sizeof (t[i].n_enum_strings));
+ unserialize_integer (m, &t[i].type_index_plus_one,
+ sizeof (t->type_index_plus_one));
+ unserialize_integer (m, &t[i].n_enum_strings,
+ sizeof (t[i].n_enum_strings));
vec_resize (t[i].enum_strings_vector, t[i].n_enum_strings);
for (j = 0; j < t[i].n_enum_strings; j++)
unserialize_cstring (m, &t[i].enum_strings_vector[j]);
@@ -908,7 +947,7 @@ unserialize_elog_event_type (serialize_main_t * m, va_list * va)
static void
serialize_elog_track (serialize_main_t * m, va_list * va)
{
- elog_track_t * t = va_arg (*va, elog_track_t *);
+ elog_track_t *t = va_arg (*va, elog_track_t *);
int n = va_arg (*va, int);
int i;
for (i = 0; i < n; i++)
@@ -920,7 +959,7 @@ serialize_elog_track (serialize_main_t * m, va_list * va)
static void
unserialize_elog_track (serialize_main_t * m, va_list * va)
{
- elog_track_t * t = va_arg (*va, elog_track_t *);
+ elog_track_t *t = va_arg (*va, elog_track_t *);
int n = va_arg (*va, int);
int i;
for (i = 0; i < n; i++)
@@ -932,7 +971,7 @@ unserialize_elog_track (serialize_main_t * m, va_list * va)
static void
serialize_elog_time_stamp (serialize_main_t * m, va_list * va)
{
- elog_time_stamp_t * st = va_arg (*va, elog_time_stamp_t *);
+ elog_time_stamp_t *st = va_arg (*va, elog_time_stamp_t *);
serialize (m, serialize_64, st->os_nsec);
serialize (m, serialize_64, st->cpu);
}
@@ -940,18 +979,18 @@ serialize_elog_time_stamp (serialize_main_t * m, va_list * va)
static void
unserialize_elog_time_stamp (serialize_main_t * m, va_list * va)
{
- elog_time_stamp_t * st = va_arg (*va, elog_time_stamp_t *);
+ elog_time_stamp_t *st = va_arg (*va, elog_time_stamp_t *);
unserialize (m, unserialize_64, &st->os_nsec);
unserialize (m, unserialize_64, &st->cpu);
}
-static char * elog_serialize_magic = "elog v0";
+static char *elog_serialize_magic = "elog v0";
void
serialize_elog_main (serialize_main_t * m, va_list * va)
{
- elog_main_t * em = va_arg (*va, elog_main_t *);
- elog_event_t * e;
+ elog_main_t *em = va_arg (*va, elog_main_t *);
+ elog_event_t *e;
serialize_magic (m, elog_serialize_magic, strlen (elog_serialize_magic));
@@ -974,14 +1013,13 @@ serialize_elog_main (serialize_main_t * m, va_list * va)
/* SMP logs can easily have local time paradoxes... */
vec_sort_with_function (em->events, elog_cmp);
- vec_foreach (e, em->events)
- serialize (m, serialize_elog_event, em, e);
+ vec_foreach (e, em->events) serialize (m, serialize_elog_event, em, e);
}
void
unserialize_elog_main (serialize_main_t * m, va_list * va)
{
- elog_main_t * em = va_arg (*va, elog_main_t *);
+ elog_main_t *em = va_arg (*va, elog_main_t *);
uword i;
u32 rs;
@@ -1005,7 +1043,7 @@ unserialize_elog_main (serialize_main_t * m, va_list * va)
{
u32 ne;
- elog_event_t * e;
+ elog_event_t *e;
unserialize_integer (m, &ne, sizeof (u32));
vec_resize (em->events, ne);
@@ -1013,3 +1051,11 @@ unserialize_elog_main (serialize_main_t * m, va_list * va)
unserialize (m, unserialize_elog_event, em, e);
}
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/elog.h b/vppinfra/vppinfra/elog.h
index afa677338e5..9756fb83a8d 100644
--- a/vppinfra/vppinfra/elog.h
+++ b/vppinfra/vppinfra/elog.h
@@ -41,13 +41,15 @@
#define included_clib_elog_h
#include <vppinfra/cache.h>
-#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/error.h> /* for ASSERT */
#include <vppinfra/serialize.h>
-#include <vppinfra/time.h> /* for clib_cpu_time_now */
+#include <vppinfra/time.h> /* for clib_cpu_time_now */
#include <vppinfra/mhash.h>
-typedef struct{
- union {
+typedef struct
+{
+ union
+ {
/* Absolute time stamp in CPU clock cycles. */
u64 time_cycles;
@@ -60,23 +62,24 @@ typedef struct{
/* Track for this event. Tracks allow events to be sorted and
displayed by track. Think of 2 dimensional display with time and
- track being the x and y axes.*/
+ track being the x and y axes. */
u16 track;
/* 20-bytes of data follows and pads to 32 bytes. */
u8 data[20];
} elog_event_t;
-typedef struct {
+typedef struct
+{
/* Type index plus one assigned to this type.
This is used to mark type as seen. */
u32 type_index_plus_one;
/* String table as a vector constructed when type is registered. */
- char ** enum_strings_vector;
+ char **enum_strings_vector;
/* Format string. (example: "my-event (%d,%d)"). */
- char * format;
+ char *format;
/* Specifies how arguments to format are parsed from event data.
String of characters '0' '1' or '2' '3' to specify log2 size of data
@@ -85,28 +88,30 @@ typedef struct {
't' means argument is an index into enum string table for this type.
'e' is a float,
'f' is a double. */
- char * format_args;
+ char *format_args;
/* Function name generating event. */
- char * function;
+ char *function;
/* Number of elements in string enum table. */
u32 n_enum_strings;
/* String table for enum/number to string formatting. */
- char * enum_strings[];
+ char *enum_strings[];
} elog_event_type_t;
-typedef struct {
+typedef struct
+{
/* Track name vector. */
- char * name;
+ char *name;
/* Set to one when track has been added to
main structure. */
u32 track_index_plus_one;
} elog_track_t;
-typedef struct {
+typedef struct
+{
/* CPU cycle counter. */
u64 cpu;
@@ -114,7 +119,8 @@ typedef struct {
u64 os_nsec;
} elog_time_stamp_t;
-typedef struct {
+typedef struct
+{
/* Total number of events in buffer. */
u32 n_total_events;
@@ -130,19 +136,19 @@ typedef struct {
/* Vector of events (circular buffer). Power of 2 size.
Used when events are being collected. */
- elog_event_t * event_ring;
+ elog_event_t *event_ring;
/* Vector of event types. */
- elog_event_type_t * event_types;
+ elog_event_type_t *event_types;
/* Hash table mapping type format to type index. */
- uword * event_type_by_format;
+ uword *event_type_by_format;
/* Events may refer to strings in string table. */
- char * string_table;
+ char *string_table;
/* Vector of tracks. */
- elog_track_t * tracks;
+ elog_track_t *tracks;
/* Default track. */
elog_track_t default_track;
@@ -153,23 +159,27 @@ typedef struct {
elog_time_stamp_t init_time, serialize_time;
/* SMP lock, non-zero means locking required */
- uword * lock;
+ uword *lock;
/* Use serialize_time and init_time to give estimate for
cpu clock frequency. */
f64 nsec_per_cpu_clock;
/* Vector of events converted to generic form after collection. */
- elog_event_t * events;
+ elog_event_t *events;
} elog_main_t;
always_inline uword
elog_n_events_in_buffer (elog_main_t * em)
-{ return clib_min (em->n_total_events, em->event_ring_size); }
+{
+ return clib_min (em->n_total_events, em->event_ring_size);
+}
always_inline uword
elog_buffer_capacity (elog_main_t * em)
-{ return em->event_ring_size; }
+{
+ return em->event_ring_size;
+}
always_inline void
elog_reset_buffer (elog_main_t * em)
@@ -191,13 +201,18 @@ elog_enable_disable (elog_main_t * em, int is_enabled)
event will not be lost as long as N < RING_SIZE. */
always_inline void
elog_disable_after_events (elog_main_t * em, uword n)
-{ em->n_total_events_disable_limit = em->n_total_events + n; }
+{
+ em->n_total_events_disable_limit = em->n_total_events + n;
+}
/* Signal a trigger. We do this when we encounter an event that we want to save
context around (before and after). */
always_inline void
elog_disable_trigger (elog_main_t * em)
-{ em->n_total_events_disable_limit = em->n_total_events + vec_len (em->event_ring) / 2; }
+{
+ em->n_total_events_disable_limit =
+ em->n_total_events + vec_len (em->event_ring) / 2;
+}
/* External function to register types/tracks. */
word elog_event_type_register (elog_main_t * em, elog_event_type_t * t);
@@ -205,22 +220,23 @@ word elog_track_register (elog_main_t * em, elog_track_t * t);
always_inline uword
elog_is_enabled (elog_main_t * em)
-{ return em->n_total_events < em->n_total_events_disable_limit; }
+{
+ return em->n_total_events < em->n_total_events_disable_limit;
+}
/* Add an event to the log. Returns a pointer to the
data for caller to write into. */
always_inline void *
elog_event_data_inline (elog_main_t * em,
elog_event_type_t * type,
- elog_track_t * track,
- u64 cpu_time)
+ elog_track_t * track, u64 cpu_time)
{
- elog_event_t * e;
+ elog_event_t *e;
uword ei;
word type_index, track_index;
/* Return the user dummy memory to scribble data into. */
- if (PREDICT_FALSE (! elog_is_enabled (em)))
+ if (PREDICT_FALSE (!elog_is_enabled (em)))
return em->dummy_event.data;
type_index = (word) type->type_index_plus_one - 1;
@@ -254,21 +270,18 @@ elog_event_data_inline (elog_main_t * em,
}
/* External version of inline. */
-void *
-elog_event_data (elog_main_t * em,
- elog_event_type_t * type,
- elog_track_t * track,
- u64 cpu_time);
+void *elog_event_data (elog_main_t * em,
+ elog_event_type_t * type,
+ elog_track_t * track, u64 cpu_time);
/* Non-inline version. */
always_inline void *
elog_event_data_not_inline (elog_main_t * em,
elog_event_type_t * type,
- elog_track_t * track,
- u64 cpu_time)
+ elog_track_t * track, u64 cpu_time)
{
/* Return the user dummy memory to scribble data into. */
- if (PREDICT_FALSE (! elog_is_enabled (em)))
+ if (PREDICT_FALSE (!elog_is_enabled (em)))
return em->dummy_event.data;
return elog_event_data (em, type, track, cpu_time);
}
@@ -277,11 +290,10 @@ elog_event_data_not_inline (elog_main_t * em,
always_inline void
elog (elog_main_t * em, elog_event_type_t * type, u32 data)
{
- u32 * d = elog_event_data_not_inline
- (em,
- type,
- &em->default_track,
- clib_cpu_time_now ());
+ u32 *d = elog_event_data_not_inline (em,
+ type,
+ &em->default_track,
+ clib_cpu_time_now ());
d[0] = data;
}
@@ -289,52 +301,46 @@ elog (elog_main_t * em, elog_event_type_t * type, u32 data)
always_inline void
elog_inline (elog_main_t * em, elog_event_type_t * type, u32 data)
{
- u32 * d = elog_event_data_inline
- (em,
- type,
- &em->default_track,
- clib_cpu_time_now ());
+ u32 *d = elog_event_data_inline (em,
+ type,
+ &em->default_track,
+ clib_cpu_time_now ());
d[0] = data;
}
always_inline void
-elog_track (elog_main_t * em, elog_event_type_t * type, elog_track_t *track,
- u32 data)
+elog_track (elog_main_t * em, elog_event_type_t * type, elog_track_t * track,
+ u32 data)
{
- u32 * d = elog_event_data_not_inline
- (em,
- type,
- track,
- clib_cpu_time_now ());
+ u32 *d = elog_event_data_not_inline (em,
+ type,
+ track,
+ clib_cpu_time_now ());
d[0] = data;
}
always_inline void
-elog_track_inline (elog_main_t * em, elog_event_type_t * type,
- elog_track_t *track, u32 data)
+elog_track_inline (elog_main_t * em, elog_event_type_t * type,
+ elog_track_t * track, u32 data)
{
- u32 * d = elog_event_data_inline
- (em,
- type,
- track,
- clib_cpu_time_now ());
+ u32 *d = elog_event_data_inline (em,
+ type,
+ track,
+ clib_cpu_time_now ());
d[0] = data;
}
always_inline void *
elog_data (elog_main_t * em, elog_event_type_t * type, elog_track_t * track)
{
- return elog_event_data_not_inline
- (em, type, track,
- clib_cpu_time_now ());
+ return elog_event_data_not_inline (em, type, track, clib_cpu_time_now ());
}
always_inline void *
-elog_data_inline (elog_main_t * em, elog_event_type_t * type, elog_track_t * track)
+elog_data_inline (elog_main_t * em, elog_event_type_t * type,
+ elog_track_t * track)
{
- return elog_event_data_inline
- (em, type, track,
- clib_cpu_time_now ());
+ return elog_event_data_inline (em, type, track, clib_cpu_time_now ());
}
/* Macro shorthands for generating/declaring events. */
@@ -386,23 +392,23 @@ elog_data_inline (elog_main_t * em, elog_event_type_t * type, elog_track_t * tra
#define ELOG_DATA(em,f) elog_data ((em), &__ELOG_TYPE_VAR (f), &(em)->default_track)
#define ELOG_DATA_INLINE(em,f) elog_data_inline ((em), &__ELOG_TYPE_VAR (f), &(em)->default_track)
-u32 elog_string (elog_main_t * em, char * format, ...);
+u32 elog_string (elog_main_t * em, char *format, ...);
void elog_time_now (elog_time_stamp_t * et);
/* Convert ievents to events and return them as a vector.
Sets em->events to resulting vector. */
-elog_event_t * elog_get_events (elog_main_t * em);
+elog_event_t *elog_get_events (elog_main_t * em);
/* Convert ievents to events and return them as a vector with no side effects. */
-elog_event_t * elog_peek_events (elog_main_t * em);
+elog_event_t *elog_peek_events (elog_main_t * em);
/* Merge two logs, add supplied track tags. */
-void elog_merge (elog_main_t * dst, u8 * dst_tag,
- elog_main_t * src, u8 * src_tag);
+void elog_merge (elog_main_t * dst, u8 * dst_tag,
+ elog_main_t * src, u8 * src_tag);
/* 2 arguments elog_main_t and elog_event_t to format event or track name. */
-u8 * format_elog_event (u8 * s, va_list * va);
-u8 * format_elog_track (u8 * s, va_list * va);
+u8 *format_elog_event (u8 * s, va_list * va);
+u8 *format_elog_track (u8 * s, va_list * va);
void serialize_elog_main (serialize_main_t * m, va_list * va);
void unserialize_elog_main (serialize_main_t * m, va_list * va);
@@ -412,31 +418,31 @@ void elog_alloc (elog_main_t * em, u32 n_events);
#ifdef CLIB_UNIX
always_inline clib_error_t *
-elog_write_file (elog_main_t * em, char * unix_file)
+elog_write_file (elog_main_t * em, char *unix_file)
{
serialize_main_t m;
- clib_error_t * error;
+ clib_error_t *error;
error = serialize_open_unix_file (&m, unix_file);
if (error)
return error;
error = serialize (&m, serialize_elog_main, em);
- if (! error)
+ if (!error)
serialize_close (&m);
return error;
}
always_inline clib_error_t *
-elog_read_file (elog_main_t * em, char * unix_file)
+elog_read_file (elog_main_t * em, char *unix_file)
{
serialize_main_t m;
- clib_error_t * error;
+ clib_error_t *error;
error = unserialize_open_unix_file (&m, unix_file);
if (error)
return error;
error = unserialize (&m, unserialize_elog_main, em);
- if (! error)
+ if (!error)
unserialize_close (&m);
return error;
}
@@ -444,3 +450,11 @@ elog_read_file (elog_main_t * em, char * unix_file)
#endif /* CLIB_UNIX */
#endif /* included_clib_elog_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/error.c b/vppinfra/vppinfra/error.c
index 1af9eb0f515..2722fb7be7e 100644
--- a/vppinfra/vppinfra/error.c
+++ b/vppinfra/vppinfra/error.c
@@ -38,7 +38,7 @@
/* Error reporting. */
#include <stdarg.h>
-#include <vppinfra/clib.h> /* for HAVE_ERRNO */
+#include <vppinfra/clib.h> /* for HAVE_ERRNO */
#ifdef CLIB_LINUX_KERNEL
#include <linux/unistd.h> /* for write */
@@ -52,7 +52,7 @@
#endif
#ifdef CLIB_STANDALONE
-#include <vppinfra/standalone_stdio.h> /* for printf */
+#include <vppinfra/standalone_stdio.h> /* for printf */
#endif
#include <vppinfra/string.h>
@@ -61,36 +61,41 @@
#include <vppinfra/format.h>
#include <vppinfra/error.h>
#include <vppinfra/hash.h>
-#include <vppinfra/os.h> /* for os_panic/os_exit/os_puts */
+#include <vppinfra/os.h> /* for os_panic/os_exit/os_puts */
-typedef struct {
- clib_error_handler_func_t * func;
- void * arg;
+typedef struct
+{
+ clib_error_handler_func_t *func;
+ void *arg;
} clib_error_handler_t;
-static clib_error_handler_t * handlers = 0;
+static clib_error_handler_t *handlers = 0;
-void clib_error_register_handler (clib_error_handler_func_t func, void * arg)
+void
+clib_error_register_handler (clib_error_handler_func_t func, void *arg)
{
- clib_error_handler_t h = { .func = func, .arg = arg, };
+ clib_error_handler_t h = {.func = func,.arg = arg, };
vec_add1 (handlers, h);
}
-static void debugger (void)
+static void
+debugger (void)
{
os_panic ();
}
-static void error_exit (int code)
+static void
+error_exit (int code)
{
os_exit (code);
}
-static u8 * dispatch_message (u8 * msg)
+static u8 *
+dispatch_message (u8 * msg)
{
word i;
- if (! msg)
+ if (!msg)
return msg;
for (i = 0; i < vec_len (handlers); i++)
@@ -103,12 +108,11 @@ static u8 * dispatch_message (u8 * msg)
return msg;
}
-void _clib_error (int how_to_die,
- char * function_name,
- uword line_number,
- char * fmt, ...)
+void
+_clib_error (int how_to_die,
+ char *function_name, uword line_number, char *fmt, ...)
{
- u8 * msg = 0;
+ u8 *msg = 0;
va_list va;
if (function_name)
@@ -141,13 +145,11 @@ void _clib_error (int how_to_die,
error_exit (1);
}
-clib_error_t * _clib_error_return (clib_error_t * errors,
- any code,
- uword flags,
- char * where,
- char * fmt, ...)
+clib_error_t *
+_clib_error_return (clib_error_t * errors,
+ any code, uword flags, char *where, char *fmt, ...)
{
- clib_error_t * e;
+ clib_error_t *e;
va_list va;
#ifdef HAVE_ERRNO
@@ -168,7 +170,7 @@ clib_error_t * _clib_error_return (clib_error_t * errors,
e->what = format (e->what, "%s", strerror (errno_save));
}
#endif
-
+
e->where = (u8 *) where;
e->code = code;
e->flags = flags;
@@ -176,48 +178,50 @@ clib_error_t * _clib_error_return (clib_error_t * errors,
return errors;
}
-void * clib_error_free_vector (clib_error_t * errors)
+void *
+clib_error_free_vector (clib_error_t * errors)
{
- clib_error_t * e;
- vec_foreach (e, errors)
- vec_free (e->what);
+ clib_error_t *e;
+ vec_foreach (e, errors) vec_free (e->what);
vec_free (errors);
return 0;
}
-u8 * format_clib_error (u8 * s, va_list * va)
+u8 *
+format_clib_error (u8 * s, va_list * va)
{
- clib_error_t * errors = va_arg (*va, clib_error_t *);
- clib_error_t * e;
+ clib_error_t *errors = va_arg (*va, clib_error_t *);
+ clib_error_t *e;
vec_foreach (e, errors)
- {
- if (! e->what)
- continue;
+ {
+ if (!e->what)
+ continue;
- if (e->where)
- {
- u8 * where = 0;
+ if (e->where)
+ {
+ u8 *where = 0;
- if (e > errors)
- where = format (where, "from ");
- where = format (where, "%s", e->where);
+ if (e > errors)
+ where = format (where, "from ");
+ where = format (where, "%s", e->where);
- s = format (s, "%v: ", where);
- vec_free (where);
- }
+ s = format (s, "%v: ", where);
+ vec_free (where);
+ }
- s = format (s, "%v\n", e->what);
- }
+ s = format (s, "%v\n", e->what);
+ }
return s;
}
-clib_error_t * _clib_error_report (clib_error_t * errors)
+clib_error_t *
+_clib_error_report (clib_error_t * errors)
{
if (errors)
{
- u8 * msg = format (0, "%U", format_clib_error, errors);
+ u8 *msg = format (0, "%U", format_clib_error, errors);
msg = dispatch_message (msg);
vec_free (msg);
@@ -234,19 +238,22 @@ clib_error_t * _clib_error_report (clib_error_t * errors)
#ifdef TEST
-static error_t * foo1 (int x)
+static error_t *
+foo1 (int x)
{
return error_return (0, "x is odd %d", x);
}
-static error_t * foo2 (int x)
+static error_t *
+foo2 (int x)
{
return error_return (0, "x is even %d", x);
}
-static error_t * foo (int x)
+static error_t *
+foo (int x)
{
- error_t * e;
+ error_t *e;
if (x & 1)
e = foo1 (x);
else
@@ -255,14 +262,16 @@ static error_t * foo (int x)
return error_return (e, 0);
}
-static void error_handler (void * arg, char * msg, int msg_len)
+static void
+error_handler (void *arg, char *msg, int msg_len)
{
write (2, msg, msg_len);
}
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
- error_t * e;
+ error_t *e;
register_error_handler (error_handler, 0);
@@ -273,3 +282,11 @@ int main (int argc, char * argv[])
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/error.h b/vppinfra/vppinfra/error.h
index ad79422f767..63d73af36c7 100644
--- a/vppinfra/vppinfra/error.h
+++ b/vppinfra/vppinfra/error.h
@@ -38,7 +38,7 @@
#ifndef included_error_h
#define included_error_h
-#include <vppinfra/clib.h> /* for CLIB_LINUX_KERNEL */
+#include <vppinfra/clib.h> /* for CLIB_LINUX_KERNEL */
#include <vppinfra/error_bootstrap.h>
#ifdef CLIB_UNIX
@@ -53,8 +53,8 @@
#include <vppinfra/vec.h>
/* Callback functions for error reporting. */
-typedef void clib_error_handler_func_t (void * arg, u8 * msg, int msg_len);
-void clib_error_register_handler (clib_error_handler_func_t func, void * arg);
+typedef void clib_error_handler_func_t (void *arg, u8 * msg, int msg_len);
+void clib_error_register_handler (clib_error_handler_func_t func, void *arg);
#define clib_warning(format,args...) \
_clib_error (CLIB_ERROR_WARNING, clib_error_function, __LINE__, format, ## args)
@@ -72,12 +72,13 @@ void clib_error_register_handler (clib_error_handler_func_t func, void * arg);
#define clib_panic(format,args...) \
_clib_error (CLIB_ERROR_ABORT, (char *) clib_error_function, __LINE__, format, ## args)
-typedef struct {
+typedef struct
+{
/* Error message. */
- u8 * what;
+ u8 *what;
/* Where error occurred (e.g. __FUNCTION__ __LINE__) */
- const u8 * where;
+ const u8 *where;
uword flags;
@@ -92,16 +93,14 @@ do { \
(err)->code = (c); \
} while (0)
-extern void * clib_error_free_vector (clib_error_t * errors);
+extern void *clib_error_free_vector (clib_error_t * errors);
#define clib_error_free(e) e = clib_error_free_vector(e)
-extern clib_error_t *
-_clib_error_return (clib_error_t * errors,
- any code,
- uword flags,
- char * where,
- char * fmt, ...);
+extern clib_error_t *_clib_error_return (clib_error_t * errors,
+ any code,
+ uword flags,
+ char *where, char *fmt, ...);
#define clib_error_return_code(e,code,flags,args...) \
_clib_error_return((e),(code),(flags),(char *)clib_error_function,args)
@@ -121,13 +120,14 @@ _clib_error_return (clib_error_t * errors,
#define clib_error_return_unix_fatal(e,args...) \
clib_error_return_code(e,errno,CLIB_ERROR_ERRNO_VALID|CLIB_ERROR_FATAL,args)
-extern clib_error_t * _clib_error_report (clib_error_t * errors);
+extern clib_error_t *_clib_error_report (clib_error_t * errors);
#define clib_error_report(e) do { (e) = _clib_error_report (e); } while (0)
-u8 * format_clib_error (u8 * s, va_list * va);
+u8 *format_clib_error (u8 * s, va_list * va);
-always_inline word unix_error_is_fatal (word error)
+always_inline word
+unix_error_is_fatal (word error)
{
#ifdef CLIB_UNIX
switch (error)
@@ -191,3 +191,11 @@ do { \
})
#endif /* included_error_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/error_bootstrap.h b/vppinfra/vppinfra/error_bootstrap.h
index b7cfac79768..3fa0a18ec9f 100644
--- a/vppinfra/vppinfra/error_bootstrap.h
+++ b/vppinfra/vppinfra/error_bootstrap.h
@@ -41,12 +41,13 @@
/* Bootstrap include so that #include <vppinfra/mem.h> can include e.g.
<vppinfra/mheap.h> which depends on <vppinfra/vec.h>. */
-#include <vppinfra/clib.h> /* for uword */
+#include <vppinfra/clib.h> /* for uword */
-enum {
- CLIB_ERROR_FATAL = 1 << 0,
- CLIB_ERROR_ABORT = 1 << 1,
- CLIB_ERROR_WARNING = 1 << 2,
+enum
+{
+ CLIB_ERROR_FATAL = 1 << 0,
+ CLIB_ERROR_ABORT = 1 << 1,
+ CLIB_ERROR_WARNING = 1 << 2,
CLIB_ERROR_ERRNO_VALID = 1 << 16,
CLIB_ERROR_NO_RATE_LIMIT = 1 << 17,
};
@@ -62,9 +63,8 @@ enum {
Code specifies whether to call exit, abort or nothing at
all (for non-fatal warnings). */
extern void _clib_error (int code,
- char * function_name,
- uword line_number,
- char * format, ...);
+ char *function_name,
+ uword line_number, char *format, ...);
#define ASSERT(truth) \
do { \
@@ -87,3 +87,11 @@ do { \
} while (0)
#endif /* included_error_bootstrap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/fheap.c b/vppinfra/vppinfra/fheap.c
index 2e8b977a54a..1369245615a 100644
--- a/vppinfra/vppinfra/fheap.c
+++ b/vppinfra/vppinfra/fheap.c
@@ -17,96 +17,111 @@
/* Fibonacci heaps. */
always_inline fheap_node_t *
fheap_get_node (fheap_t * f, u32 i)
-{ return i != ~0 ? vec_elt_at_index (f->nodes, i) : 0; }
+{
+ return i != ~0 ? vec_elt_at_index (f->nodes, i) : 0;
+}
always_inline fheap_node_t *
fheap_get_root (fheap_t * f)
-{ return fheap_get_node (f, f->min_root); }
+{
+ return fheap_get_node (f, f->min_root);
+}
-static void fheap_validate (fheap_t * f)
+static void
+fheap_validate (fheap_t * f)
{
- fheap_node_t * n, * m;
+ fheap_node_t *n, *m;
uword ni, si;
- if (! CLIB_DEBUG || ! f->enable_validate)
+ if (!CLIB_DEBUG || !f->enable_validate)
return;
vec_foreach_index (ni, f->nodes)
- {
- n = vec_elt_at_index (f->nodes, ni);
+ {
+ n = vec_elt_at_index (f->nodes, ni);
- if (! n->is_valid)
- continue;
+ if (!n->is_valid)
+ continue;
- /* Min root must have minimal key. */
- m = vec_elt_at_index (f->nodes, f->min_root);
- ASSERT (n->key >= m->key);
+ /* Min root must have minimal key. */
+ m = vec_elt_at_index (f->nodes, f->min_root);
+ ASSERT (n->key >= m->key);
- /* Min root must have no parent. */
- if (ni == f->min_root)
- ASSERT (n->parent == ~0);
+ /* Min root must have no parent. */
+ if (ni == f->min_root)
+ ASSERT (n->parent == ~0);
- /* Check sibling linkages. */
- if (n->next_sibling == ~0)
- ASSERT (n->prev_sibling == ~0);
- else if (n->prev_sibling == ~0)
- ASSERT (n->next_sibling == ~0);
- else
- {
- fheap_node_t * prev, * next;
- u32 si = n->next_sibling, si_start = si;
- do {
+ /* Check sibling linkages. */
+ if (n->next_sibling == ~0)
+ ASSERT (n->prev_sibling == ~0);
+ else if (n->prev_sibling == ~0)
+ ASSERT (n->next_sibling == ~0);
+ else
+ {
+ fheap_node_t *prev, *next;
+ u32 si = n->next_sibling, si_start = si;
+ do
+ {
m = vec_elt_at_index (f->nodes, si);
prev = vec_elt_at_index (f->nodes, m->prev_sibling);
next = vec_elt_at_index (f->nodes, m->next_sibling);
ASSERT (prev->next_sibling == si);
ASSERT (next->prev_sibling == si);
si = m->next_sibling;
- } while (si != si_start);
- }
-
- /* Loop through all siblings. */
- {
- u32 n_siblings = 0;
-
- foreach_fheap_node_sibling (f, si, n->next_sibling, ({
- m = vec_elt_at_index (f->nodes, si);
-
- /* All siblings must have same parent. */
- ASSERT (m->parent == n->parent);
-
- n_siblings += 1;
- }));
-
- /* Either parent is non-empty or there are siblings present. */
- if (n->parent == ~0 && ni != f->min_root)
- ASSERT (n_siblings > 0);
+ }
+ while (si != si_start);
}
- /* Loop through all children. */
- {
- u32 found_first_child = n->first_child == ~0;
- u32 n_children = 0;
-
- foreach_fheap_node_sibling (f, si, n->first_child, ({
- m = vec_elt_at_index (f->nodes, si);
-
- /* Children must have larger keys than their parent. */
- ASSERT (m->key >= n->key);
-
- if (! found_first_child)
- found_first_child = si == n->first_child;
-
- n_children += 1;
- }));
-
- /* Check that first child is present on list. */
- ASSERT (found_first_child);
+ /* Loop through all siblings. */
+ {
+ u32 n_siblings = 0;
+
+ foreach_fheap_node_sibling (f, si, n->next_sibling, (
+ {
+ m =
+ vec_elt_at_index
+ (f->nodes, si);
+ /* All siblings must have same parent. */
+ ASSERT (m->parent
+ ==
+ n->
+ parent);
+ n_siblings += 1;}
+ ));
+
+ /* Either parent is non-empty or there are siblings present. */
+ if (n->parent == ~0 && ni != f->min_root)
+ ASSERT (n_siblings > 0);
+ }
- /* Make sure rank is correct. */
- ASSERT (n->rank == n_children);
- }
+ /* Loop through all children. */
+ {
+ u32 found_first_child = n->first_child == ~0;
+ u32 n_children = 0;
+
+ foreach_fheap_node_sibling (f, si, n->first_child, (
+ {
+ m =
+ vec_elt_at_index
+ (f->nodes, si);
+ /* Children must have larger keys than their parent. */
+ ASSERT (m->key >=
+ n->key);
+ if
+ (!found_first_child)
+ found_first_child =
+ si ==
+ n->first_child;
+ n_children += 1;}
+ ));
+
+ /* Check that first child is present on list. */
+ ASSERT (found_first_child);
+
+ /* Make sure rank is correct. */
+ ASSERT (n->rank == n_children);
}
+ }
/* Increment serial number for each successful validate.
Failure can be used as condition for gdb breakpoints. */
@@ -116,10 +131,10 @@ static void fheap_validate (fheap_t * f)
always_inline void
fheap_node_add_sibling (fheap_t * f, u32 ni, u32 ni_to_add)
{
- fheap_node_t * n = vec_elt_at_index (f->nodes, ni);
- fheap_node_t * n_to_add = vec_elt_at_index (f->nodes, ni_to_add);
- fheap_node_t * n_next = fheap_get_node (f, n->next_sibling);
- fheap_node_t * parent;
+ fheap_node_t *n = vec_elt_at_index (f->nodes, ni);
+ fheap_node_t *n_to_add = vec_elt_at_index (f->nodes, ni_to_add);
+ fheap_node_t *n_next = fheap_get_node (f, n->next_sibling);
+ fheap_node_t *parent;
/* Empty list? */
if (n->next_sibling == ~0)
@@ -144,9 +159,10 @@ fheap_node_add_sibling (fheap_t * f, u32 ni, u32 ni_to_add)
parent->rank += 1;
}
-void fheap_add (fheap_t * f, u32 ni, u32 key)
+void
+fheap_add (fheap_t * f, u32 ni, u32 key)
{
- fheap_node_t * r, * n;
+ fheap_node_t *r, *n;
u32 ri;
n = vec_elt_at_index (f->nodes, ni);
@@ -157,7 +173,7 @@ void fheap_add (fheap_t * f, u32 ni, u32 key)
r = fheap_get_root (f);
ri = f->min_root;
- if (! r)
+ if (!r)
{
/* No root? Add node as new root. */
f->min_root = ni;
@@ -178,13 +194,13 @@ void fheap_add (fheap_t * f, u32 ni, u32 key)
always_inline u32
fheap_node_remove_internal (fheap_t * f, u32 ni, u32 invalidate)
{
- fheap_node_t * n = vec_elt_at_index (f->nodes, ni);
+ fheap_node_t *n = vec_elt_at_index (f->nodes, ni);
u32 prev_ni = n->prev_sibling;
u32 next_ni = n->next_sibling;
u32 list_has_single_element = prev_ni == ni;
- fheap_node_t * prev = fheap_get_node (f, prev_ni);
- fheap_node_t * next = fheap_get_node (f, next_ni);
- fheap_node_t * p = fheap_get_node (f, n->parent);
+ fheap_node_t *prev = fheap_get_node (f, prev_ni);
+ fheap_node_t *next = fheap_get_node (f, next_ni);
+ fheap_node_t *p = fheap_get_node (f, n->parent);
if (p)
{
@@ -211,16 +227,23 @@ fheap_node_remove_internal (fheap_t * f, u32 ni, u32 invalidate)
return list_has_single_element ? ~0 : next_ni;
}
-always_inline u32 fheap_node_remove (fheap_t * f, u32 ni)
-{ return fheap_node_remove_internal (f, ni, /* invalidate */ 0); }
+always_inline u32
+fheap_node_remove (fheap_t * f, u32 ni)
+{
+ return fheap_node_remove_internal (f, ni, /* invalidate */ 0);
+}
-always_inline u32 fheap_node_remove_and_invalidate (fheap_t * f, u32 ni)
-{ return fheap_node_remove_internal (f, ni, /* invalidate */ 1); }
+always_inline u32
+fheap_node_remove_and_invalidate (fheap_t * f, u32 ni)
+{
+ return fheap_node_remove_internal (f, ni, /* invalidate */ 1);
+}
-static void fheap_link_root (fheap_t * f, u32 ni)
+static void
+fheap_link_root (fheap_t * f, u32 ni)
{
- fheap_node_t * n = vec_elt_at_index (f->nodes, ni);
- fheap_node_t * r, * lo, * hi;
+ fheap_node_t *n = vec_elt_at_index (f->nodes, ni);
+ fheap_node_t *r, *lo, *hi;
u32 ri, lo_i, hi_i, k;
while (1)
@@ -229,7 +252,7 @@ static void fheap_link_root (fheap_t * f, u32 ni)
vec_validate_init_empty (f->root_list_by_rank, k, ~0);
ri = f->root_list_by_rank[k];
r = fheap_get_node (f, ri);
- if (! r)
+ if (!r)
{
f->root_list_by_rank[k] = ni;
return;
@@ -243,7 +266,7 @@ static void fheap_link_root (fheap_t * f, u32 ni)
if (hi->key < lo->key)
{
u32 ti;
- fheap_node_t * tn;
+ fheap_node_t *tn;
ti = lo_i, tn = lo;
lo = hi, lo_i = hi_i;
hi = tn, hi_i = ti;
@@ -263,7 +286,7 @@ static void fheap_link_root (fheap_t * f, u32 ni)
fheap_node_add_sibling (f, lo->first_child, hi_i);
/* Following Fredman & Trajan: "When making a root node X a child of another node in a linking step,
- we unmark X". */
+ we unmark X". */
hi->is_marked = 0;
ni = lo_i;
@@ -271,21 +294,22 @@ static void fheap_link_root (fheap_t * f, u32 ni)
}
}
-u32 fheap_del_min (fheap_t * f, u32 * min_key)
+u32
+fheap_del_min (fheap_t * f, u32 * min_key)
{
- fheap_node_t * r = fheap_get_root (f);
+ fheap_node_t *r = fheap_get_root (f);
u32 to_delete_min_ri = f->min_root;
u32 ri, ni;
/* Empty heap? */
- if (! r)
+ if (!r)
return ~0;
/* Root's children become siblings. Call this step a; see below. */
if (r->first_child != ~0)
{
u32 ci, cni, rni;
- fheap_node_t * c, * cn, * rn;
+ fheap_node_t *c, *cn, *rn;
/* Splice child & root circular lists together. */
ci = r->first_child;
@@ -328,19 +352,19 @@ u32 fheap_del_min (fheap_t * f, u32 * min_key)
min_ds = ~0;
vec_foreach_index (i, f->root_list_by_rank)
- {
- ni = f->root_list_by_rank[i];
- if (ni == ~0)
- continue;
- f->root_list_by_rank[i] = ~0;
- r = fheap_get_node (f, ni);
- if (r->key < min_ds)
- {
- f->min_root = ni;
- min_ds = r->key;
- ASSERT (r->parent == ~0);
- }
- }
+ {
+ ni = f->root_list_by_rank[i];
+ if (ni == ~0)
+ continue;
+ f->root_list_by_rank[i] = ~0;
+ r = fheap_get_node (f, ni);
+ if (r->key < min_ds)
+ {
+ f->min_root = ni;
+ min_ds = r->key;
+ ASSERT (r->parent == ~0);
+ }
+ }
}
/* Return deleted min root. */
@@ -353,16 +377,17 @@ u32 fheap_del_min (fheap_t * f, u32 * min_key)
return to_delete_min_ri;
}
-static void fheap_mark_parent (fheap_t * f, u32 pi)
+static void
+fheap_mark_parent (fheap_t * f, u32 pi)
{
- fheap_node_t * p = vec_elt_at_index (f->nodes, pi);
+ fheap_node_t *p = vec_elt_at_index (f->nodes, pi);
/* Parent is a root: do nothing. */
if (p->parent == ~0)
return;
/* If not marked, mark it. */
- if (! p->is_marked)
+ if (!p->is_marked)
{
p->is_marked = 1;
return;
@@ -382,10 +407,11 @@ static void fheap_mark_parent (fheap_t * f, u32 pi)
}
/* Set key to new smaller value. */
-void fheap_decrease_key (fheap_t * f, u32 ni, u32 new_key)
+void
+fheap_decrease_key (fheap_t * f, u32 ni, u32 new_key)
{
- fheap_node_t * n = vec_elt_at_index (f->nodes, ni);
- fheap_node_t * r = fheap_get_root (f);
+ fheap_node_t *n = vec_elt_at_index (f->nodes, ni);
+ fheap_node_t *r = fheap_get_root (f);
n->key = new_key;
@@ -404,9 +430,10 @@ void fheap_decrease_key (fheap_t * f, u32 ni, u32 new_key)
fheap_validate (f);
}
-void fheap_del (fheap_t * f, u32 ni)
+void
+fheap_del (fheap_t * f, u32 ni)
{
- fheap_node_t * n;
+ fheap_node_t *n;
n = vec_elt_at_index (f->nodes, ni);
@@ -422,13 +449,25 @@ void fheap_del (fheap_t * f, u32 ni)
fheap_mark_parent (f, n->parent);
/* Add children to root list. */
- foreach_fheap_node_sibling (f, ci, n->first_child, ({
- fheap_node_remove (f, ci);
- fheap_node_add_sibling (f, f->min_root, ci);
- }));
+ foreach_fheap_node_sibling (f, ci, n->first_child, (
+ {
+ fheap_node_remove
+ (f, ci);
+ fheap_node_add_sibling
+ (f, f->min_root,
+ ci);}
+ ));
fheap_node_remove_and_invalidate (f, ni);
}
fheap_validate (f);
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/fheap.h b/vppinfra/vppinfra/fheap.h
index 974eb1fc698..6d4965f1bea 100644
--- a/vppinfra/vppinfra/fheap.h
+++ b/vppinfra/vppinfra/fheap.h
@@ -20,7 +20,8 @@
#include <vppinfra/vec.h>
-typedef struct {
+typedef struct
+{
/* Node index of parent. */
u32 parent;
@@ -67,13 +68,14 @@ do { \
} \
} while (0)
-typedef struct {
+typedef struct
+{
u32 min_root;
/* Vector of nodes. */
- fheap_node_t * nodes;
+ fheap_node_t *nodes;
- u32 * root_list_by_rank;
+ u32 *root_list_by_rank;
u32 enable_validate;
@@ -84,8 +86,8 @@ typedef struct {
always_inline void
fheap_init (fheap_t * f, u32 n_nodes)
{
- fheap_node_t * save_nodes = f->nodes;
- u32 * save_root_list = f->root_list_by_rank;
+ fheap_node_t *save_nodes = f->nodes;
+ u32 *save_root_list = f->root_list_by_rank;
memset (f, 0, sizeof (f[0]));
@@ -107,11 +109,15 @@ fheap_free (fheap_t * f)
always_inline u32
fheap_find_min (fheap_t * f)
-{ return f->min_root; }
+{
+ return f->min_root;
+}
always_inline u32
fheap_is_empty (fheap_t * f)
-{ return f->min_root == ~0; }
+{
+ return f->min_root == ~0;
+}
/* Add/delete nodes. */
void fheap_add (fheap_t * f, u32 ni, u32 key);
@@ -124,3 +130,11 @@ u32 fheap_del_min (fheap_t * f, u32 * min_key);
void fheap_decrease_key (fheap_t * f, u32 ni, u32 new_key);
#endif /* included_clib_fheap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/fifo.c b/vppinfra/vppinfra/fifo.c
index fc287a2a29f..5b4c76d1084 100644
--- a/vppinfra/vppinfra/fifo.c
+++ b/vppinfra/vppinfra/fifo.c
@@ -76,13 +76,14 @@
fifo_free (f) frees fifo.
*/
-void * _clib_fifo_resize (void * v_old, uword n_new_elts, uword elt_bytes)
+void *
+_clib_fifo_resize (void *v_old, uword n_new_elts, uword elt_bytes)
{
- void * v_new, * end, * head;
+ void *v_new, *end, *head;
uword n_old_elts, header_bytes;
uword n_copy_bytes, n_zero_bytes;
- clib_fifo_header_t * f_new, * f_old;
-
+ clib_fifo_header_t *f_new, *f_old;
+
n_old_elts = clib_fifo_elts (v_old);
n_new_elts += n_old_elts;
if (n_new_elts < 32)
@@ -126,3 +127,11 @@ void * _clib_fifo_resize (void * v_old, uword n_new_elts, uword elt_bytes)
return v_new;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/fifo.h b/vppinfra/vppinfra/fifo.h
index 10c7b65673d..b0b35e25af7 100644
--- a/vppinfra/vppinfra/fifo.h
+++ b/vppinfra/vppinfra/fifo.h
@@ -39,10 +39,11 @@
#define included_fifo_h
#include <vppinfra/cache.h>
-#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/error.h> /* for ASSERT */
#include <vppinfra/vec.h>
-typedef struct {
+typedef struct
+{
/* First index of valid data in fifo. */
u32 head_index;
@@ -51,8 +52,10 @@ typedef struct {
} clib_fifo_header_t;
always_inline clib_fifo_header_t *
-clib_fifo_header (void * f)
-{ return vec_header (f, sizeof (clib_fifo_header_t)); }
+clib_fifo_header (void *f)
+{
+ return vec_header (f, sizeof (clib_fifo_header_t));
+}
/* Aliases. */
#define clib_fifo_len(v) vec_len(v)
@@ -60,12 +63,12 @@ clib_fifo_header (void * f)
#define clib_fifo_end(v) vec_end(v)
always_inline uword
-clib_fifo_elts (void * v)
+clib_fifo_elts (void *v)
{
word l, r;
- clib_fifo_header_t * f = clib_fifo_header (v);
+ clib_fifo_header_t *f = clib_fifo_header (v);
- if (! v)
+ if (!v)
return 0;
l = _clib_fifo_len (v);
@@ -76,13 +79,15 @@ clib_fifo_elts (void * v)
}
always_inline uword
-clib_fifo_free_elts (void * v)
-{ return clib_fifo_len (v) - clib_fifo_elts (v); }
+clib_fifo_free_elts (void *v)
+{
+ return clib_fifo_len (v) - clib_fifo_elts (v);
+}
always_inline void
-clib_fifo_reset (void * v)
+clib_fifo_reset (void *v)
{
- clib_fifo_header_t * f = clib_fifo_header (v);
+ clib_fifo_header_t *f = clib_fifo_header (v);
if (v)
{
f->head_index = f->tail_index = 0;
@@ -91,13 +96,13 @@ clib_fifo_reset (void * v)
}
/* External resize function. */
-void * _clib_fifo_resize (void * v, uword n_elts, uword elt_bytes);
+void *_clib_fifo_resize (void *v, uword n_elts, uword elt_bytes);
#define clib_fifo_resize(f,n_elts) \
f = _clib_fifo_resize ((f), (n_elts), sizeof ((f)[0]))
always_inline void *
-_clib_fifo_validate (void * v, uword n_elts, uword elt_bytes)
+_clib_fifo_validate (void *v, uword n_elts, uword elt_bytes)
{
if (clib_fifo_free_elts (v) < n_elts)
v = _clib_fifo_resize (v, n_elts, elt_bytes);
@@ -106,14 +111,14 @@ _clib_fifo_validate (void * v, uword n_elts, uword elt_bytes)
#define clib_fifo_validate(f,n_elts) \
f = _clib_fifo_validate ((f), (n_elts), sizeof (f[0]))
-
+
/* Advance tail pointer by N_ELTS which can be either positive or negative. */
always_inline void *
-_clib_fifo_advance_tail (void * v, word n_elts, uword elt_bytes,
+_clib_fifo_advance_tail (void *v, word n_elts, uword elt_bytes,
uword * tail_return)
{
word i, l, n_free;
- clib_fifo_header_t * f;
+ clib_fifo_header_t *f;
n_free = clib_fifo_free_elts (v);
if (n_free < n_elts)
@@ -161,9 +166,9 @@ _clib_fifo_advance_tail (void * v, word n_elts, uword elt_bytes,
})
always_inline uword
-clib_fifo_advance_head (void * v, uword n_elts)
+clib_fifo_advance_head (void *v, uword n_elts)
{
- clib_fifo_header_t * f;
+ clib_fifo_header_t *f;
uword l, i, n;
ASSERT (clib_fifo_elts (v) >= n_elts);
@@ -233,16 +238,16 @@ do { \
} while (0)
always_inline uword
-clib_fifo_head_index (void * v)
+clib_fifo_head_index (void *v)
{
- clib_fifo_header_t * f = clib_fifo_header (v);
+ clib_fifo_header_t *f = clib_fifo_header (v);
return v ? f->head_index : 0;
}
always_inline uword
-clib_fifo_tail_index (void * v)
+clib_fifo_tail_index (void *v)
{
- clib_fifo_header_t * f = clib_fifo_header (v);
+ clib_fifo_header_t *f = clib_fifo_header (v);
return v ? f->tail_index : 0;
}
@@ -252,9 +257,9 @@ clib_fifo_tail_index (void * v)
#define clib_fifo_free(f) vec_free_h((f),sizeof(clib_fifo_header_t))
always_inline uword
-clib_fifo_elt_index (void * v, uword i)
+clib_fifo_elt_index (void *v, uword i)
{
- clib_fifo_header_t * f = clib_fifo_header (v);
+ clib_fifo_header_t *f = clib_fifo_header (v);
uword result = 0;
ASSERT (i < clib_fifo_elts (v));
@@ -289,3 +294,11 @@ do { \
} while (0)
#endif /* included_fifo_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/format.c b/vppinfra/vppinfra/format.c
index 8224c87c2b5..0da3502ac0a 100644
--- a/vppinfra/vppinfra/format.c
+++ b/vppinfra/vppinfra/format.c
@@ -45,7 +45,7 @@
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <stdarg.h> /* va_start, etc */
+#include <stdarg.h> /* va_start, etc */
#ifdef CLIB_UNIX
#include <unistd.h>
@@ -61,26 +61,30 @@
#include <vppinfra/vec.h>
#include <vppinfra/error.h>
#include <vppinfra/string.h>
-#include <vppinfra/os.h> /* os_puts */
+#include <vppinfra/os.h> /* os_puts */
-typedef struct {
- /* Output number in this base. */
- u8 base;
+typedef struct
+{
+ /* Output number in this base. */
+ u8 base;
- /* Number of show of 64 bit number. */
- u8 n_bits;
+ /* Number of show of 64 bit number. */
+ u8 n_bits;
- /* Signed or unsigned. */
- u8 is_signed;
+ /* Signed or unsigned. */
+ u8 is_signed;
- /* Output digits uppercase (not lowercase) %X versus %x. */
- u8 uppercase_digits;
+ /* Output digits uppercase (not lowercase) %X versus %x. */
+ u8 uppercase_digits;
} format_integer_options_t;
-static u8 * format_integer (u8 * s, u64 number, format_integer_options_t * options);
-static u8 * format_float (u8 * s, f64 x, uword n_digits_to_print, uword output_style);
+static u8 *format_integer (u8 * s, u64 number,
+ format_integer_options_t * options);
+static u8 *format_float (u8 * s, f64 x, uword n_digits_to_print,
+ uword output_style);
-typedef struct {
+typedef struct
+{
/* String justification: + => right, - => left, = => center. */
uword justify;
@@ -95,7 +99,8 @@ typedef struct {
uword pad_char;
} format_info_t;
-static u8 * justify (u8 * s, format_info_t * fi, uword s_len_orig)
+static u8 *
+justify (u8 * s, format_info_t * fi, uword s_len_orig)
{
uword i0, l0, l1;
@@ -121,11 +126,11 @@ static u8 * justify (u8 * s, format_info_t * fi, uword s_len_orig)
break;
case '+':
- n_left = n;
+ n_left = n;
break;
case '=':
- n_right = n_left = n/2;
+ n_right = n_left = n / 2;
if (n % 2)
n_left++;
break;
@@ -145,12 +150,13 @@ static u8 * justify (u8 * s, format_info_t * fi, uword s_len_orig)
return s;
}
-static u8 * do_percent (u8 ** _s, u8 * fmt, va_list * va)
+static u8 *
+do_percent (u8 ** _s, u8 * fmt, va_list * va)
{
- u8 * s = *_s;
+ u8 *s = *_s;
uword c;
- u8 * f = fmt;
+ u8 *f = fmt;
format_info_t fi = {
.justify = '+',
@@ -189,15 +195,19 @@ static u8 * do_percent (u8 ** _s, u8 * fmt, va_list * va)
if (c == '0' && i == 0 && is_first_digit)
fi.pad_char = '0';
is_first_digit = 0;
- if (c == '*') {
- fi.width[i] = va_arg(*va, int);
+ if (c == '*')
+ {
+ fi.width[i] = va_arg (*va, int);
c = *++f;
- } else {
- while (c >= '0' && c <= '9') {
- fi.width[i] = 10*fi.width[i] + (c - '0');
+ }
+ else
+ {
+ while (c >= '0' && c <= '9')
+ {
+ fi.width[i] = 10 * fi.width[i] + (c - '0');
c = *++f;
- }
- }
+ }
+ }
if (c != '.')
break;
c = *++f;
@@ -240,12 +250,13 @@ static u8 * do_percent (u8 ** _s, u8 * fmt, va_list * va)
switch (c)
{
- default: {
- /* Try to give a helpful error message. */
- vec_free (s);
- s = format (s, "**** CLIB unknown format `%%%c' ****", c);
- goto done;
- }
+ default:
+ {
+ /* Try to give a helpful error message. */
+ vec_free (s);
+ s = format (s, "**** CLIB unknown format `%%%c' ****", c);
+ goto done;
+ }
case 'c':
vec_add1 (s, va_arg (*va, int));
@@ -306,10 +317,10 @@ static u8 * do_percent (u8 ** _s, u8 * fmt, va_list * va)
case 's':
case 'S':
{
- char * cstring = va_arg (*va, char *);
+ char *cstring = va_arg (*va, char *);
uword len;
- if (! cstring)
+ if (!cstring)
{
cstring = "(nil)";
len = 5;
@@ -318,13 +329,13 @@ static u8 * do_percent (u8 ** _s, u8 * fmt, va_list * va)
len = clib_min (strlen (cstring), fi.width[1]);
else
len = strlen (cstring);
-
+
/* %S => format string as C identifier (replace _ with space). */
if (c == 'S')
{
for (i = 0; i < len; i++)
vec_add1 (s, cstring[i] == '_' ? ' ' : cstring[i]);
- }
+ }
else
vec_add (s, cstring, len);
}
@@ -332,7 +343,7 @@ static u8 * do_percent (u8 ** _s, u8 * fmt, va_list * va)
case 'v':
{
- u8 * v = va_arg (*va, u8 *);
+ u8 *v = va_arg (*va, u8 *);
uword len;
if (fi.width[1] != 0)
@@ -344,21 +355,21 @@ static u8 * do_percent (u8 ** _s, u8 * fmt, va_list * va)
}
break;
- case 'f': case 'g': case 'e':
+ case 'f':
+ case 'g':
+ case 'e':
/* Floating point. */
ASSERT (fi.how_long == 0 || fi.how_long == 'l');
- s = format_float (s,
- va_arg (*va, double),
- fi.width[1], c);
+ s = format_float (s, va_arg (*va, double), fi.width[1], c);
break;
case 'U':
/* User defined function. */
{
- typedef u8 * (user_func_t) (u8 * s, va_list * args);
- user_func_t * u = va_arg (*va, user_func_t *);
+ typedef u8 *(user_func_t) (u8 * s, va_list * args);
+ user_func_t *u = va_arg (*va, user_func_t *);
- s = (* u) (s, va);
+ s = (*u) (s, va);
}
break;
}
@@ -366,14 +377,15 @@ static u8 * do_percent (u8 ** _s, u8 * fmt, va_list * va)
s = justify (s, &fi, s_initial_len);
}
- done:
+done:
*_s = s;
return f;
}
-u8 * va_format (u8 * s, char * fmt, va_list * va)
+u8 *
+va_format (u8 * s, char *fmt, va_list * va)
{
- u8 * f = (u8 *) fmt, * g;
+ u8 *f = (u8 *) fmt, *g;
u8 c;
g = f;
@@ -381,7 +393,7 @@ u8 * va_format (u8 * s, char * fmt, va_list * va)
{
c = *f;
- if (! c)
+ if (!c)
break;
if (c == '%')
@@ -402,7 +414,8 @@ u8 * va_format (u8 * s, char * fmt, va_list * va)
return s;
}
-u8 * format (u8 * s, char * fmt, ...)
+u8 *
+format (u8 * s, char *fmt, ...)
{
va_list va;
va_start (va, fmt);
@@ -411,10 +424,11 @@ u8 * format (u8 * s, char * fmt, ...)
return s;
}
-word va_fformat (FILE * f, char * fmt, va_list * va)
+word
+va_fformat (FILE * f, char *fmt, va_list * va)
{
word ret;
- u8 * s;
+ u8 *s;
s = va_format (0, fmt, va);
@@ -434,23 +448,25 @@ word va_fformat (FILE * f, char * fmt, va_list * va)
return ret;
}
-word fformat (FILE * f, char * fmt, ...)
+word
+fformat (FILE * f, char *fmt, ...)
{
- va_list va;
- word ret;
+ va_list va;
+ word ret;
- va_start(va, fmt);
- ret = va_fformat(f, fmt, &va);
- va_end(va);
+ va_start (va, fmt);
+ ret = va_fformat (f, fmt, &va);
+ va_end (va);
- return (ret);
+ return (ret);
}
#ifdef CLIB_UNIX
-word fdformat (int fd, char * fmt, ...)
+word
+fdformat (int fd, char *fmt, ...)
{
word ret;
- u8 * s;
+ u8 *s;
va_list va;
va_start (va, fmt);
@@ -464,12 +480,13 @@ word fdformat (int fd, char * fmt, ...)
#endif
/* Format integral type. */
-static u8 * format_integer (u8 * s, u64 number, format_integer_options_t * options)
+static u8 *
+format_integer (u8 * s, u64 number, format_integer_options_t * options)
{
u64 q;
u32 r;
u8 digit_buffer[128];
- u8 * d = digit_buffer + sizeof (digit_buffer);
+ u8 *d = digit_buffer + sizeof (digit_buffer);
word c, base;
if (options->is_signed && (i64) number < 0)
@@ -488,26 +505,25 @@ static u8 * format_integer (u8 * s, u64 number, format_integer_options_t * optio
q = number / base;
r = number % base;
- if (r < 10+26+26)
+ if (r < 10 + 26 + 26)
{
if (r < 10)
c = '0' + r;
- else if (r < 10+26)
+ else if (r < 10 + 26)
c = 'a' + (r - 10);
else
c = 'A' + (r - 10 - 26);
if (options->uppercase_digits
- && base <= 10+26
- && c >= 'a' && c <= 'z')
+ && base <= 10 + 26 && c >= 'a' && c <= 'z')
c += 'A' - 'a';
*--d = c;
}
- else /* will never happen, warning be gone */
- {
- *--d = '?';
- }
+ else /* will never happen, warning be gone */
+ {
+ *--d = '?';
+ }
if (q == 0)
break;
@@ -531,9 +547,14 @@ do { \
} while (0)
/* Construct IEEE 64 bit number. */
-static f64 f64_up (uword sign, word expon, u64 fraction)
+static f64
+f64_up (uword sign, word expon, u64 fraction)
{
- union { u64 u; f64 f; } tmp;
+ union
+ {
+ u64 u;
+ f64 f;
+ } tmp;
tmp.u = (u64) ((sign) != 0) << 63;
@@ -550,11 +571,12 @@ static f64 f64_up (uword sign, word expon, u64 fraction)
}
/* Returns approximate precision of number given its exponent. */
-static f64 f64_precision (int base2_expon)
+static f64
+f64_precision (int base2_expon)
{
static int n_bits = 0;
- if (! n_bits)
+ if (!n_bits)
{
/* Compute number of significant bits in floating point representation. */
f64 one = 0;
@@ -572,7 +594,8 @@ static f64 f64_precision (int base2_expon)
}
/* Return x 10^n */
-static f64 times_power_of_ten (f64 x, int n)
+static f64
+times_power_of_ten (f64 x, int n)
{
if (n >= 0)
{
@@ -594,11 +617,12 @@ static f64 times_power_of_ten (f64 x, int n)
}
return x * t[-n];
}
-
+
}
/* Write x = y * 10^expon with 1 < y < 10. */
-static f64 normalize (f64 x, word * expon_return, f64 * prec_return)
+static f64
+normalize (f64 x, word * expon_return, f64 * prec_return)
{
word expon2, expon10;
CLIB_UNUSED (u64 fraction);
@@ -607,8 +631,10 @@ static f64 normalize (f64 x, word * expon_return, f64 * prec_return)
f64_down (x, sign, expon2, fraction);
- expon10 = .5 + expon2 * .301029995663981195213738894724493 /* Log (2) / Log (10) */;
-
+ expon10 =
+ .5 +
+ expon2 * .301029995663981195213738894724493 /* Log (2) / Log (10) */ ;
+
prec = f64_precision (expon2);
x = times_power_of_ten (x, -expon10);
prec = times_power_of_ten (prec, -expon10);
@@ -639,7 +665,8 @@ static f64 normalize (f64 x, word * expon_return, f64 * prec_return)
return x;
}
-static u8 * add_some_zeros (u8 * s, uword n_zeros)
+static u8 *
+add_some_zeros (u8 * s, uword n_zeros)
{
while (n_zeros > 0)
{
@@ -652,9 +679,7 @@ static u8 * add_some_zeros (u8 * s, uword n_zeros)
/* Format a floating point number with the given number of fractional
digits (e.g. 1.2345 with 2 fraction digits yields "1.23") and output style. */
static u8 *
-format_float (u8 * s, f64 x,
- uword n_fraction_digits,
- uword output_style)
+format_float (u8 * s, f64 x, uword n_fraction_digits, uword output_style)
{
f64 prec;
word sign, expon, n_fraction_done, added_decimal_point;
@@ -676,7 +701,7 @@ format_float (u8 * s, f64 x,
vec_add1 (s, '0');
goto done;
}
-
+
if (x < 0)
{
x = -x;
@@ -684,13 +709,13 @@ format_float (u8 * s, f64 x,
}
/* Check for infinity. */
- if (x == x/2)
+ if (x == x / 2)
return format (s, "%cinfinity", sign ? '-' : '+');
x = normalize (x, &expon, &prec);
/* Not enough digits to print anything: so just print 0 */
- if ((word) -expon > (word) n_fraction_digits
+ if ((word) - expon > (word) n_fraction_digits
&& (output_style == 'f' || (output_style == 'g')))
goto do_zero;
@@ -737,8 +762,7 @@ format_float (u8 * s, f64 x,
/* Round last printed digit. */
if (decimal_point <= 0
- && n_fraction_done + 1 == n_fraction_digits
- && digit < 9)
+ && n_fraction_done + 1 == n_fraction_digits && digit < 9)
digit += x >= .5;
vec_add1 (s, '0' + digit);
@@ -747,8 +771,7 @@ format_float (u8 * s, f64 x,
decimal_point--;
n_fraction_done += decimal_point < 0;
- if (decimal_point <= 0
- && n_fraction_done >= n_fraction_digits)
+ if (decimal_point <= 0 && n_fraction_done >= n_fraction_digits)
break;
if (decimal_point == 0 && x != 0)
@@ -760,8 +783,8 @@ format_float (u8 * s, f64 x,
x *= 10;
prec *= 10;
}
-
- done:
+
+done:
if (decimal_point > 0)
{
s = add_some_zeros (s, decimal_point);
@@ -770,7 +793,7 @@ format_float (u8 * s, f64 x,
if (n_fraction_done < n_fraction_digits)
{
- if (! added_decimal_point)
+ if (!added_decimal_point)
vec_add1 (s, '.');
s = add_some_zeros (s, n_fraction_digits - n_fraction_done);
}
@@ -781,3 +804,11 @@ format_float (u8 * s, f64 x,
return s;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/format.h b/vppinfra/vppinfra/format.h
index 45cd3f50e37..c91cc74711e 100644
--- a/vppinfra/vppinfra/format.h
+++ b/vppinfra/vppinfra/format.h
@@ -40,15 +40,15 @@
#include <stdarg.h>
-#include <vppinfra/clib.h> /* for CLIB_UNIX, etc. */
+#include <vppinfra/clib.h> /* for CLIB_UNIX, etc. */
#include <vppinfra/vec.h>
-#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/error.h> /* for ASSERT */
#include <vppinfra/string.h>
-typedef u8 * (format_function_t) (u8 * s, va_list * args);
+typedef u8 *(format_function_t) (u8 * s, va_list * args);
-u8 * va_format (u8 * s, char * format, va_list * args);
-u8 * format (u8 * s, char * format, ...);
+u8 *va_format (u8 * s, char *format, va_list * args);
+u8 *format (u8 * s, char *format, ...);
#ifdef CLIB_UNIX
@@ -64,17 +64,17 @@ u8 * format (u8 * s, char * format, ...);
#endif
-word va_fformat (FILE * f, char * fmt, va_list * va);
-word fformat (FILE * f, char * fmt, ...);
-word fdformat (int fd, char * fmt, ...);
+word va_fformat (FILE * f, char *fmt, va_list * va);
+word fformat (FILE * f, char *fmt, ...);
+word fdformat (int fd, char *fmt, ...);
always_inline uword
format_get_indent (u8 * s)
{
uword indent = 0;
- u8 * nl;
+ u8 *nl;
- if (! s)
+ if (!s)
return indent;
nl = vec_end (s) - 1;
@@ -90,52 +90,53 @@ format_get_indent (u8 * s)
#define _(f) u8 * f (u8 * s, va_list * va)
/* Standard user-defined formats. */
-_ (format_vec32);
-_ (format_vec_uword);
-_ (format_ascii_bytes);
-_ (format_hex_bytes);
-_ (format_white_space);
-_ (format_f64);
-_ (format_time_interval);
+_(format_vec32);
+_(format_vec_uword);
+_(format_ascii_bytes);
+_(format_hex_bytes);
+_(format_white_space);
+_(format_f64);
+_(format_time_interval);
#ifdef CLIB_UNIX
/* Unix specific formats. */
-_ (format_address_family);
-_ (format_unix_arphrd);
-_ (format_unix_interface_flags);
-_ (format_network_address);
-_ (format_network_protocol);
-_ (format_network_port);
-_ (format_sockaddr);
-_ (format_ip4_tos_byte);
-_ (format_ip4_packet);
-_ (format_icmp4_type_and_code);
-_ (format_ethernet_packet);
-_ (format_hostname);
-_ (format_timeval);
-_ (format_time_float);
-_ (format_signal);
-_ (format_ucontext_pc);
+_(format_address_family);
+_(format_unix_arphrd);
+_(format_unix_interface_flags);
+_(format_network_address);
+_(format_network_protocol);
+_(format_network_port);
+_(format_sockaddr);
+_(format_ip4_tos_byte);
+_(format_ip4_packet);
+_(format_icmp4_type_and_code);
+_(format_ethernet_packet);
+_(format_hostname);
+_(format_timeval);
+_(format_time_float);
+_(format_signal);
+_(format_ucontext_pc);
#endif
#undef _
/* Unformat. */
-typedef struct _unformat_input_t {
+typedef struct _unformat_input_t
+{
/* Input buffer (vector). */
- u8 * buffer;
+ u8 *buffer;
/* Current index in input buffer. */
uword index;
/* Vector of buffer marks. Used to delineate pieces of the buffer
for error reporting and for parse recovery. */
- uword * buffer_marks;
+ uword *buffer_marks;
/* User's function to fill the buffer when its empty
(and argument). */
- uword (* fill_buffer) (struct _unformat_input_t * i);
+ uword (*fill_buffer) (struct _unformat_input_t * i);
/* Return values for fill buffer function which indicate whether not
input has been exhausted. */
@@ -143,13 +144,13 @@ typedef struct _unformat_input_t {
#define UNFORMAT_MORE_INPUT 0
/* User controlled argument to fill buffer function. */
- void * fill_buffer_arg;
+ void *fill_buffer_arg;
} unformat_input_t;
always_inline void
unformat_init (unformat_input_t * i,
- uword (* fill_buffer) (unformat_input_t *),
- void * fill_buffer_arg)
+ uword (*fill_buffer) (unformat_input_t *),
+ void *fill_buffer_arg)
{
memset (i, 0, sizeof (i[0]));
i->fill_buffer = fill_buffer;
@@ -170,8 +171,7 @@ unformat_check_input (unformat_input_t * i)
/* Low level fill input function. */
extern uword _unformat_fill_input (unformat_input_t * i);
- if (i->index >= vec_len (i->buffer)
- && i->index != UNFORMAT_END_OF_INPUT)
+ if (i->index >= vec_len (i->buffer) && i->index != UNFORMAT_END_OF_INPUT)
_unformat_fill_input (i);
return i->index;
@@ -201,7 +201,9 @@ unformat_get_input (unformat_input_t * input)
/* Back up input pointer by one. */
always_inline void
unformat_put_input (unformat_input_t * input)
-{ input->index -= 1; }
+{
+ input->index -= 1;
+}
/* Peek current input character without advancing. */
always_inline uword
@@ -214,59 +216,59 @@ unformat_peek_input (unformat_input_t * input)
}
/* Skip current input line. */
-always_inline void unformat_skip_line (unformat_input_t * i)
+always_inline void
+unformat_skip_line (unformat_input_t * i)
{
uword c;
- while ((c = unformat_get_input (i)) != UNFORMAT_END_OF_INPUT
- && c != '\n')
+ while ((c = unformat_get_input (i)) != UNFORMAT_END_OF_INPUT && c != '\n')
;
}
uword unformat_skip_white_space (unformat_input_t * input);
/* Unformat function. */
-typedef uword (unformat_function_t) (unformat_input_t * input, va_list * args);
+typedef uword (unformat_function_t) (unformat_input_t * input,
+ va_list * args);
/* External functions. */
/* General unformatting function with programmable input stream. */
-uword unformat (unformat_input_t * i, char * fmt, ...);
+uword unformat (unformat_input_t * i, char *fmt, ...);
/* Call user defined parse function.
unformat_user (i, f, ...) is equivalent to unformat (i, "%U", f, ...) */
-uword unformat_user (unformat_input_t * input, unformat_function_t * func, ...);
+uword unformat_user (unformat_input_t * input, unformat_function_t * func,
+ ...);
/* Alternate version which allows for extensions. */
-uword va_unformat (unformat_input_t * i, char * fmt, va_list * args);
+uword va_unformat (unformat_input_t * i, char *fmt, va_list * args);
/* Setup for unformat of Unix style command line. */
-void unformat_init_command_line (unformat_input_t * input,
- char * argv[]);
+void unformat_init_command_line (unformat_input_t * input, char *argv[]);
/* Setup for unformat of given string. */
void unformat_init_string (unformat_input_t * input,
- char * string,
- int string_len);
+ char *string, int string_len);
always_inline void
-unformat_init_cstring (unformat_input_t * input,
- char * string)
-{ unformat_init_string (input, string, strlen (string)); }
+unformat_init_cstring (unformat_input_t * input, char *string)
+{
+ unformat_init_string (input, string, strlen (string));
+}
/* Setup for unformat of given vector string; vector will be freed by unformat_string. */
-void unformat_init_vector (unformat_input_t * input,
- u8 * vector_string);
+void unformat_init_vector (unformat_input_t * input, u8 * vector_string);
/* Format function for unformat input usable when an unformat error
has occurred. */
-u8 * format_unformat_error (u8 * s, va_list * va);
+u8 *format_unformat_error (u8 * s, va_list * va);
#define unformat_parse_error(input) \
clib_error_return (0, "parse error `%U'", format_unformat_error, input)
/* Print all input: not just error context. */
-u8 * format_unformat_input (u8 * s, va_list * va);
+u8 *format_unformat_input (u8 * s, va_list * va);
/* Unformat (parse) function which reads a %s string and converts it
to and unformat_input_t. */
@@ -292,31 +294,38 @@ unformat_function_t unformat_eof;
unformat_function_t unformat_memory_size;
/* Unparse memory size e.g. 100, 100k, 100m, 100g. */
-u8 * format_memory_size (u8 * s, va_list * va);
+u8 *format_memory_size (u8 * s, va_list * va);
/* Format c identifier: e.g. a_name -> "a name". */
-u8 * format_c_identifier (u8 * s, va_list * va);
+u8 *format_c_identifier (u8 * s, va_list * va);
/* Format hexdump with both hex and printable chars - compatible with text2pcap */
-u8 * format_hexdump (u8 * s, va_list * va);
+u8 *format_hexdump (u8 * s, va_list * va);
/* Unix specific formats. */
#ifdef CLIB_UNIX
/* Setup input from Unix file. */
-void unformat_init_unix_file (unformat_input_t * input,
- int file_descriptor);
+void unformat_init_unix_file (unformat_input_t * input, int file_descriptor);
/* Take input from Unix environment variable; returns
1 if variable exists zero otherwise. */
-uword unformat_init_unix_env (unformat_input_t * input, char * var);
+uword unformat_init_unix_env (unformat_input_t * input, char *var);
#endif /* CLIB_UNIX */
/* Test code. */
int test_format_main (unformat_input_t * input);
int test_unformat_main (unformat_input_t * input);
-/* This is not the right place for this, but putting it in vec.h
+/* This is not the right place for this, but putting it in vec.h
created circular dependency problems. */
int test_vec_main (unformat_input_t * input);
#endif /* included_format_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/graph.c b/vppinfra/vppinfra/graph.c
index d1f648ac7b3..98a29046f17 100644
--- a/vppinfra/vppinfra/graph.c
+++ b/vppinfra/vppinfra/graph.c
@@ -15,18 +15,19 @@
#include <vppinfra/graph.h>
/* Set link distance, creating link if not found. */
-u32 graph_set_link (graph_t * g, u32 src, u32 dst, u32 distance)
+u32
+graph_set_link (graph_t * g, u32 src, u32 dst, u32 distance)
{
- graph_node_t * src_node, * dst_node;
- graph_link_t * l;
+ graph_node_t *src_node, *dst_node;
+ graph_link_t *l;
u32 old_distance;
-
+
/* The following validate will not work if src or dst are on the
pool free list. */
if (src < vec_len (g->nodes))
- ASSERT (! pool_is_free_index (g->nodes, src));
+ ASSERT (!pool_is_free_index (g->nodes, src));
if (dst < vec_len (g->nodes))
- ASSERT (! pool_is_free_index (g->nodes, dst));
+ ASSERT (!pool_is_free_index (g->nodes, dst));
/* Make new (empty) nodes to make src and dst valid. */
pool_validate_index (g->nodes, clib_max (src, dst));
@@ -62,10 +63,11 @@ u32 graph_set_link (graph_t * g, u32 src, u32 dst, u32 distance)
return old_distance;
}
-void graph_del_link (graph_t * g, u32 src, u32 dst)
+void
+graph_del_link (graph_t * g, u32 src, u32 dst)
{
- graph_node_t * src_node, * dst_node;
-
+ graph_node_t *src_node, *dst_node;
+
src_node = pool_elt_at_index (g->nodes, src);
dst_node = pool_elt_at_index (g->nodes, dst);
@@ -74,25 +76,26 @@ void graph_del_link (graph_t * g, u32 src, u32 dst)
}
/* Delete source node and all links from other nodes from/to source. */
-uword graph_del_node (graph_t * g, u32 src)
+uword
+graph_del_node (graph_t * g, u32 src)
{
- graph_node_t * src_node, * n;
+ graph_node_t *src_node, *n;
uword index;
- graph_link_t * l;
+ graph_link_t *l;
src_node = pool_elt_at_index (g->nodes, src);
vec_foreach (l, src_node->next.links)
- {
- n = pool_elt_at_index (g->nodes, l->node_index);
- graph_dir_del_link (&n->prev, src);
- }
+ {
+ n = pool_elt_at_index (g->nodes, l->node_index);
+ graph_dir_del_link (&n->prev, src);
+ }
vec_foreach (l, src_node->prev.links)
- {
- n = pool_elt_at_index (g->nodes, l->node_index);
- graph_dir_del_link (&n->next, src);
- }
+ {
+ n = pool_elt_at_index (g->nodes, l->node_index);
+ graph_dir_del_link (&n->next, src);
+ }
graph_dir_free (&src_node->next);
graph_dir_free (&src_node->prev);
@@ -104,36 +107,39 @@ uword graph_del_node (graph_t * g, u32 src)
return index;
}
-uword unformat_graph (unformat_input_t * input, va_list * args)
+uword
+unformat_graph (unformat_input_t * input, va_list * args)
{
- graph_t * g = va_arg (*args, graph_t *);
- typedef struct {
+ graph_t *g = va_arg (*args, graph_t *);
+ typedef struct
+ {
u32 src, dst, distance;
} T;
- T * links = 0, * l;
+ T *links = 0, *l;
uword result;
while (1)
{
vec_add2 (links, l, 1);
- if (! unformat (input, "%d%d%d", &l->src, &l->dst, &l->distance))
+ if (!unformat (input, "%d%d%d", &l->src, &l->dst, &l->distance))
break;
}
_vec_len (links) -= 1;
result = vec_len (links) > 0;
vec_foreach (l, links)
- {
- graph_set_link (g, l->src, l->dst, l->distance);
- graph_set_link (g, l->dst, l->src, l->distance);
- }
+ {
+ graph_set_link (g, l->src, l->dst, l->distance);
+ graph_set_link (g, l->dst, l->src, l->distance);
+ }
vec_free (links);
return result;
}
-u8 * format_graph_node (u8 * s, va_list * args)
+u8 *
+format_graph_node (u8 * s, va_list * args)
{
- graph_t * g = va_arg (*args, graph_t *);
+ graph_t *g = va_arg (*args, graph_t *);
u32 node_index = va_arg (*args, u32);
if (g->format_node)
@@ -144,14 +150,16 @@ u8 * format_graph_node (u8 * s, va_list * args)
return s;
}
-u8 * format_graph (u8 * s, va_list * args)
+u8 *
+format_graph (u8 * s, va_list * args)
{
- graph_t * g = va_arg (*args, graph_t *);
- graph_node_t * n;
- graph_link_t * l;
+ graph_t *g = va_arg (*args, graph_t *);
+ graph_node_t *n;
+ graph_link_t *l;
uword indent = format_get_indent (s);
s = format (s, "graph %d nodes", pool_elts (g->nodes));
+ /* *INDENT-OFF* */
pool_foreach (n, g->nodes, ({
s = format (s, "\n%U", format_white_space, indent + 2);
s = format (s, "%U -> ", format_graph_node, g, n - g->nodes);
@@ -160,6 +168,15 @@ u8 * format_graph (u8 * s, va_list * args)
format_graph_node, g, l->node_index,
l->distance);
}));
+ /* *INDENT-ON* */
return s;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/graph.h b/vppinfra/vppinfra/graph.h
index 5f02d1138b9..1c26118f76c 100644
--- a/vppinfra/vppinfra/graph.h
+++ b/vppinfra/vppinfra/graph.h
@@ -20,7 +20,8 @@
#include <vppinfra/pool.h>
/* Generic graphs. */
-typedef struct {
+typedef struct
+{
/* Next node along this link. */
u32 node_index;
@@ -32,12 +33,13 @@ typedef struct {
} graph_link_t;
/* Direction on graph: either next or previous. */
-typedef struct {
+typedef struct
+{
/* Vector of links. */
- graph_link_t * links;
+ graph_link_t *links;
/* Hash mapping node index to link which visits this node. */
- uword * link_index_by_node_index;
+ uword *link_index_by_node_index;
} graph_dir_t;
always_inline void
@@ -50,15 +52,15 @@ graph_dir_free (graph_dir_t * d)
always_inline graph_link_t *
graph_dir_get_link_to_node (graph_dir_t * d, u32 node_index)
{
- uword * p = hash_get (d->link_index_by_node_index, node_index);
+ uword *p = hash_get (d->link_index_by_node_index, node_index);
return p ? vec_elt_at_index (d->links, p[0]) : 0;
}
always_inline uword
graph_dir_add_link (graph_dir_t * d, u32 node_index, u32 distance)
{
- graph_link_t * l;
- ASSERT (! graph_dir_get_link_to_node (d, node_index));
+ graph_link_t *l;
+ ASSERT (!graph_dir_get_link_to_node (d, node_index));
vec_add2 (d->links, l, 1);
l->node_index = node_index;
l->distance = distance;
@@ -69,7 +71,7 @@ graph_dir_add_link (graph_dir_t * d, u32 node_index, u32 distance)
always_inline void
graph_dir_del_link (graph_dir_t * d, u32 node_index)
{
- graph_link_t * l = graph_dir_get_link_to_node (d, node_index);
+ graph_link_t *l = graph_dir_get_link_to_node (d, node_index);
uword li = l - d->links;
uword n_links = vec_len (d->links);
@@ -81,24 +83,27 @@ graph_dir_del_link (graph_dir_t * d, u32 node_index)
_vec_len (d->links) = n_links;
}
-typedef struct {
+typedef struct
+{
/* Nodes we are connected to plus distances. */
graph_dir_t next, prev;
} graph_node_t;
-typedef struct {
+typedef struct
+{
/* Pool of nodes. */
- graph_node_t * nodes;
+ graph_node_t *nodes;
- void * opaque;
+ void *opaque;
- format_function_t * format_node;
+ format_function_t *format_node;
} graph_t;
/* Set link distance, creating link if not found. */
u32 graph_set_link (graph_t * g, u32 src, u32 dst, u32 distance);
-always_inline void graph_set_bidirectional_link (graph_t * g, u32 src, u32 dst, u32 distance)
+always_inline void
+graph_set_bidirectional_link (graph_t * g, u32 src, u32 dst, u32 distance)
{
graph_set_link (g, src, dst, distance);
graph_set_link (g, dst, src, distance);
@@ -112,3 +117,11 @@ format_function_t format_graph;
format_function_t format_graph_node;
#endif /* included_clib_graph_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/hash.c b/vppinfra/vppinfra/hash.c
index cbb922edd5a..062ad8823e1 100644
--- a/vppinfra/vppinfra/hash.c
+++ b/vppinfra/vppinfra/hash.c
@@ -40,42 +40,50 @@
#include <vppinfra/mem.h>
#include <vppinfra/byte_order.h> /* for clib_arch_is_big_endian */
-always_inline void zero_pair (hash_t * h, hash_pair_t * p)
-{ memset (p, 0, hash_pair_bytes (h)); }
+always_inline void
+zero_pair (hash_t * h, hash_pair_t * p)
+{
+ memset (p, 0, hash_pair_bytes (h));
+}
-always_inline void init_pair (hash_t * h, hash_pair_t * p)
-{ memset (p->value, ~0, hash_value_bytes (h)); }
+always_inline void
+init_pair (hash_t * h, hash_pair_t * p)
+{
+ memset (p->value, ~0, hash_value_bytes (h));
+}
always_inline hash_pair_union_t *
-get_pair (void * v, uword i)
+get_pair (void *v, uword i)
{
- hash_t * h = hash_header (v);
- hash_pair_t * p;
+ hash_t *h = hash_header (v);
+ hash_pair_t *p;
ASSERT (i < vec_len (v));
p = v;
p += i << h->log2_pair_size;
return (hash_pair_union_t *) p;
}
-always_inline void set_is_user (void * v, uword i, uword is_user)
+always_inline void
+set_is_user (void *v, uword i, uword is_user)
{
- hash_t * h = hash_header (v);
- uword i0 = i / BITS(h->is_user[0]);
- uword i1 = (uword) 1 << (i % BITS(h->is_user[0]));
+ hash_t *h = hash_header (v);
+ uword i0 = i / BITS (h->is_user[0]);
+ uword i1 = (uword) 1 << (i % BITS (h->is_user[0]));
if (is_user)
h->is_user[i0] |= i1;
else
h->is_user[i0] &= ~i1;
}
-static u8 * hash_format_pair_default (u8 * s, va_list * args);
+static u8 *hash_format_pair_default (u8 * s, va_list * args);
#if uword_bits == 64
-static inline u64 zap64 (u64 x, word n)
+static inline u64
+zap64 (u64 x, word n)
{
#define _(n) (((u64) 1 << (u64) (8*(n))) - (u64) 1)
- static u64 masks_little_endian[] = {
+ static u64 masks_little_endian[] = {
0, _(1), _(2), _(3), _(4), _(5), _(6), _(7),
};
static u64 masks_big_endian[] = {
@@ -88,22 +96,23 @@ static inline u64 zap64 (u64 x, word n)
return x & masks_little_endian[n];
}
-static inline u64 hash_memory64 (void * p, word n_bytes, u64 state)
+static inline u64
+hash_memory64 (void *p, word n_bytes, u64 state)
{
- u64 * q = p;
+ u64 *q = p;
u64 a, b, c, n;
a = b = 0x9e3779b97f4a7c13LL;
c = state;
n = n_bytes;
- while (n >= 3 * sizeof(u64))
+ while (n >= 3 * sizeof (u64))
{
a += clib_mem_unaligned (q + 0, u64);
b += clib_mem_unaligned (q + 1, u64);
c += clib_mem_unaligned (q + 2, u64);
hash_mix64 (a, b, c);
- n -= 3*sizeof(u64);
+ n -= 3 * sizeof (u64);
q += 3;
}
@@ -114,32 +123,30 @@ static inline u64 hash_memory64 (void * p, word n_bytes, u64 state)
a += clib_mem_unaligned (q + 0, u64);
b += clib_mem_unaligned (q + 1, u64);
if (n % sizeof (u64))
- c += zap64 (clib_mem_unaligned (q + 2, u64),
- n % sizeof (u64)) << 8;
+ c += zap64 (clib_mem_unaligned (q + 2, u64), n % sizeof (u64)) << 8;
break;
case 1:
a += clib_mem_unaligned (q + 0, u64);
if (n % sizeof (u64))
- b += zap64 (clib_mem_unaligned (q + 1, u64),
- n % sizeof (u64));
+ b += zap64 (clib_mem_unaligned (q + 1, u64), n % sizeof (u64));
break;
case 0:
if (n % sizeof (u64))
- a += zap64 (clib_mem_unaligned (q + 0, u64),
- n % sizeof (u64));
+ a += zap64 (clib_mem_unaligned (q + 0, u64), n % sizeof (u64));
break;
}
hash_mix64 (a, b, c);
-
+
return c;
}
#else /* if uword_bits == 64 */
-static inline u32 zap32 (u32 x, word n)
+static inline u32
+zap32 (u32 x, word n)
{
#define _(n) (((u32) 1 << (u32) (8*(n))) - (u32) 1)
static u32 masks_little_endian[] = {
@@ -155,22 +162,23 @@ static inline u32 zap32 (u32 x, word n)
return x & masks_little_endian[n];
}
-static inline u32 hash_memory32 (void * p, word n_bytes, u32 state)
+static inline u32
+hash_memory32 (void *p, word n_bytes, u32 state)
{
- u32 * q = p;
+ u32 *q = p;
u32 a, b, c, n;
a = b = 0x9e3779b9;
c = state;
n = n_bytes;
- while (n >= 3 * sizeof(u32))
+ while (n >= 3 * sizeof (u32))
{
a += clib_mem_unaligned (q + 0, u32);
b += clib_mem_unaligned (q + 1, u32);
c += clib_mem_unaligned (q + 2, u32);
hash_mix32 (a, b, c);
- n -= 3*sizeof(u32);
+ n -= 3 * sizeof (u32);
q += 3;
}
@@ -181,33 +189,31 @@ static inline u32 hash_memory32 (void * p, word n_bytes, u32 state)
a += clib_mem_unaligned (q + 0, u32);
b += clib_mem_unaligned (q + 1, u32);
if (n % sizeof (u32))
- c += zap32 (clib_mem_unaligned (q + 2, u32),
- n % sizeof (u32)) << 8;
+ c += zap32 (clib_mem_unaligned (q + 2, u32), n % sizeof (u32)) << 8;
break;
case 1:
a += clib_mem_unaligned (q + 0, u32);
if (n % sizeof (u32))
- b += zap32 (clib_mem_unaligned (q + 1, u32),
- n % sizeof (u32));
+ b += zap32 (clib_mem_unaligned (q + 1, u32), n % sizeof (u32));
break;
case 0:
if (n % sizeof (u32))
- a += zap32 (clib_mem_unaligned (q + 0, u32),
- n % sizeof (u32));
+ a += zap32 (clib_mem_unaligned (q + 0, u32), n % sizeof (u32));
break;
}
hash_mix32 (a, b, c);
-
+
return c;
}
#endif
-uword hash_memory (void * p, word n_bytes, uword state)
+uword
+hash_memory (void *p, word n_bytes, uword state)
{
- uword * q = p;
+ uword *q = p;
#if uword_bits == 64
return hash_memory64 (q, n_bytes, state);
@@ -217,7 +223,8 @@ uword hash_memory (void * p, word n_bytes, uword state)
}
#if uword_bits == 64
-always_inline uword hash_uword (uword x)
+always_inline uword
+hash_uword (uword x)
{
u64 a, b, c;
@@ -228,7 +235,8 @@ always_inline uword hash_uword (uword x)
return c;
}
#else
-always_inline uword hash_uword (uword x)
+always_inline uword
+hash_uword (uword x)
{
u32 a, b, c;
@@ -242,7 +250,8 @@ always_inline uword hash_uword (uword x)
/* Call sum function. Hash code will be sum function value
modulo the prime length of the hash table. */
-always_inline uword key_sum (hash_t * h, uword key)
+always_inline uword
+key_sum (hash_t * h, uword key)
{
uword sum;
switch (pointer_to_uword ((void *) h->key_sum))
@@ -252,15 +261,15 @@ always_inline uword key_sum (hash_t * h, uword key)
break;
case KEY_FUNC_POINTER_UWORD:
- sum = hash_uword (* uword_to_pointer (key, uword *));
+ sum = hash_uword (*uword_to_pointer (key, uword *));
break;
case KEY_FUNC_POINTER_U32:
- sum = hash_uword (* uword_to_pointer (key, u32 *));
+ sum = hash_uword (*uword_to_pointer (key, u32 *));
break;
case KEY_FUNC_STRING:
- sum = string_key_sum (h, key);
+ sum = string_key_sum (h, key);
break;
default:
@@ -271,7 +280,8 @@ always_inline uword key_sum (hash_t * h, uword key)
return sum;
}
-always_inline uword key_equal1 (hash_t * h, uword key1, uword key2, uword e)
+always_inline uword
+key_equal1 (hash_t * h, uword key1, uword key2, uword e)
{
switch (pointer_to_uword ((void *) h->key_equal))
{
@@ -279,15 +289,17 @@ always_inline uword key_equal1 (hash_t * h, uword key1, uword key2, uword e)
break;
case KEY_FUNC_POINTER_UWORD:
- e = * uword_to_pointer (key1, uword *) == * uword_to_pointer (key2, uword *);
+ e =
+ *uword_to_pointer (key1, uword *) == *uword_to_pointer (key2,
+ uword *);
break;
case KEY_FUNC_POINTER_U32:
- e = * uword_to_pointer (key1, u32 *) == * uword_to_pointer (key2, u32 *);
+ e = *uword_to_pointer (key1, u32 *) == *uword_to_pointer (key2, u32 *);
break;
case KEY_FUNC_STRING:
- e = string_key_equal (h, key1, key2);
+ e = string_key_equal (h, key1, key2);
break;
default:
@@ -298,21 +310,22 @@ always_inline uword key_equal1 (hash_t * h, uword key1, uword key2, uword e)
}
/* Compares two keys: returns 1 if equal, 0 if not. */
-always_inline uword key_equal (hash_t * h, uword key1, uword key2)
+always_inline uword
+key_equal (hash_t * h, uword key1, uword key2)
{
uword e = key1 == key2;
if (CLIB_DEBUG > 0 && key1 == key2)
ASSERT (key_equal1 (h, key1, key2, e));
- if (! e)
+ if (!e)
e = key_equal1 (h, key1, key2, e);
return e;
}
static hash_pair_union_t *
-get_indirect (void * v, hash_pair_indirect_t * pi, uword key)
+get_indirect (void *v, hash_pair_indirect_t * pi, uword key)
{
- hash_t * h = hash_header (v);
- hash_pair_t * p0, * p1;
+ hash_t *h = hash_header (v);
+ hash_pair_t *p0, *p1;
p0 = p1 = pi->pairs;
if (h->log2_pair_size > 0)
@@ -331,14 +344,11 @@ get_indirect (void * v, hash_pair_indirect_t * pi, uword key)
}
static hash_pair_union_t *
-set_indirect_is_user (void * v,
- uword i,
- hash_pair_union_t * p,
- uword key)
+set_indirect_is_user (void *v, uword i, hash_pair_union_t * p, uword key)
{
- hash_t * h = hash_header (v);
- hash_pair_t * q;
- hash_pair_indirect_t * pi = &p->indirect;
+ hash_t *h = hash_header (v);
+ hash_pair_t *q;
+ hash_pair_indirect_t *pi = &p->indirect;
uword log2_bytes = 0;
if (h->log2_pair_size == 0)
@@ -364,12 +374,12 @@ set_indirect_is_user (void * v,
}
static hash_pair_union_t *
-set_indirect (void * v, hash_pair_indirect_t * pi, uword key,
+set_indirect (void *v, hash_pair_indirect_t * pi, uword key,
uword * found_key)
{
- hash_t * h = hash_header (v);
- hash_pair_t * new_pair;
- hash_pair_union_t * q;
+ hash_t *h = hash_header (v);
+ hash_pair_t *new_pair;
+ hash_pair_union_t *q;
q = get_indirect (v, pi, key);
if (q)
@@ -405,17 +415,18 @@ set_indirect (void * v, hash_pair_indirect_t * pi, uword key,
return (hash_pair_union_t *) new_pair;
}
-static void unset_indirect (void * v, uword i, hash_pair_t * q)
+static void
+unset_indirect (void *v, uword i, hash_pair_t * q)
{
- hash_t * h = hash_header (v);
- hash_pair_union_t * p = get_pair (v, i);
- hash_pair_t * e;
- hash_pair_indirect_t * pi = &p->indirect;
+ hash_t *h = hash_header (v);
+ hash_pair_union_t *p = get_pair (v, i);
+ hash_pair_t *e;
+ hash_pair_indirect_t *pi = &p->indirect;
uword len, is_vec;
is_vec = h->log2_pair_size == 0;
- ASSERT (! hash_is_user (v, i));
+ ASSERT (!hash_is_user (v, i));
len = is_vec ? vec_len (pi->pairs) : indirect_pair_get_len (pi);
e = hash_forward (h, pi->pairs, len - 1);
ASSERT (q >= pi->pairs && q <= e);
@@ -424,11 +435,12 @@ static void unset_indirect (void * v, uword i, hash_pair_t * q)
Make indirect pointer direct and free indirect memory. */
if (len <= 2)
{
- hash_pair_t * r = pi->pairs;
+ hash_pair_t *r = pi->pairs;
if (len == 2)
{
- clib_memcpy (p, q == r ? hash_forward1 (h, r) : r, hash_pair_bytes (h));
+ clib_memcpy (p, q == r ? hash_forward1 (h, r) : r,
+ hash_pair_bytes (h));
set_is_user (v, i, 1);
}
else
@@ -449,27 +461,27 @@ static void unset_indirect (void * v, uword i, hash_pair_t * q)
if (is_vec)
_vec_len (pi->pairs) -= 1;
else
- indirect_pair_set (pi,
- indirect_pair_get_log2_bytes (pi),
- len - 1);
+ indirect_pair_set (pi, indirect_pair_get_log2_bytes (pi), len - 1);
}
}
-enum lookup_opcode {
+enum lookup_opcode
+{
GET = 1,
SET = 2,
UNSET = 3,
};
-static hash_pair_t * lookup (void * v, uword key, enum lookup_opcode op,
- void * new_value, void * old_value)
+static hash_pair_t *
+lookup (void *v, uword key, enum lookup_opcode op,
+ void *new_value, void *old_value)
{
- hash_t * h = hash_header (v);
- hash_pair_union_t * p = 0;
+ hash_t *h = hash_header (v);
+ hash_pair_union_t *p = 0;
uword found_key = 0;
uword i;
- if (! v)
+ if (!v)
return 0;
i = key_sum (h, key) & (_vec_len (v) - 1);
@@ -484,7 +496,8 @@ static hash_pair_t * lookup (void * v, uword key, enum lookup_opcode op,
{
set_is_user (v, i, 0);
if (old_value)
- clib_memcpy (old_value, p->direct.value, hash_value_bytes (h));
+ clib_memcpy (old_value, p->direct.value,
+ hash_value_bytes (h));
zero_pair (h, &p->direct);
}
}
@@ -498,11 +511,11 @@ static hash_pair_t * lookup (void * v, uword key, enum lookup_opcode op,
}
else
{
- hash_pair_indirect_t * pi = &p->indirect;
+ hash_pair_indirect_t *pi = &p->indirect;
if (op == SET)
{
- if (! pi->pairs)
+ if (!pi->pairs)
{
p->direct.key = key;
set_is_user (v, i, 1);
@@ -517,12 +530,13 @@ static hash_pair_t * lookup (void * v, uword key, enum lookup_opcode op,
if (found_key && op == UNSET)
{
if (old_value)
- clib_memcpy (old_value, &p->direct.value, hash_value_bytes (h));
+ clib_memcpy (old_value, &p->direct.value,
+ hash_value_bytes (h));
unset_indirect (v, i, &p->direct);
/* Nullify p (since it's just been deleted).
- Otherwise we might be tempted to play with it. */
+ Otherwise we might be tempted to play with it. */
p = 0;
}
}
@@ -545,17 +559,18 @@ static hash_pair_t * lookup (void * v, uword key, enum lookup_opcode op,
}
/* Fetch value of key. */
-uword * _hash_get (void * v, uword key)
+uword *
+_hash_get (void *v, uword key)
{
- hash_t * h = hash_header (v);
- hash_pair_t * p;
+ hash_t *h = hash_header (v);
+ hash_pair_t *p;
/* Don't even search table if its empty. */
- if (! v || h->elts == 0)
+ if (!v || h->elts == 0)
return 0;
p = lookup (v, key, GET, 0, 0);
- if (! p)
+ if (!p)
return 0;
if (h->log2_pair_size == 0)
return &p->key;
@@ -563,13 +578,17 @@ uword * _hash_get (void * v, uword key)
return &p->value[0];
}
-hash_pair_t * _hash_get_pair (void * v, uword key)
-{ return lookup (v, key, GET, 0, 0); }
+hash_pair_t *
+_hash_get_pair (void *v, uword key)
+{
+ return lookup (v, key, GET, 0, 0);
+}
-hash_pair_t * hash_next (void * v, hash_next_t * hn)
+hash_pair_t *
+hash_next (void *v, hash_next_t * hn)
{
- hash_t * h = hash_header (v);
- hash_pair_t * p;
+ hash_t *h = hash_header (v);
+ hash_pair_t *p;
while (1)
{
@@ -580,9 +599,8 @@ hash_pair_t * hash_next (void * v, hash_next_t * hn)
/* Prevent others from re-sizing hash table. */
h->flags |=
- (HASH_FLAG_NO_AUTO_GROW
- | HASH_FLAG_NO_AUTO_SHRINK
- | HASH_FLAG_HASH_NEXT_IN_PROGRESS);
+ (HASH_FLAG_NO_AUTO_GROW
+ | HASH_FLAG_NO_AUTO_SHRINK | HASH_FLAG_HASH_NEXT_IN_PROGRESS);
}
else if (hn->i >= hash_capacity (v))
{
@@ -600,7 +618,7 @@ hash_pair_t * hash_next (void * v, hash_next_t * hn)
}
else
{
- hash_pair_indirect_t * pi = (void *) p;
+ hash_pair_indirect_t *pi = (void *) p;
uword n;
if (h->log2_pair_size > 0)
@@ -620,17 +638,18 @@ hash_pair_t * hash_next (void * v, hash_next_t * hn)
}
/* Remove key from table. */
-void * _hash_unset (void * v, uword key, void * old_value)
+void *
+_hash_unset (void *v, uword key, void *old_value)
{
- hash_t * h;
+ hash_t *h;
- if (! v)
+ if (!v)
return v;
(void) lookup (v, key, UNSET, 0, old_value);
h = hash_header (v);
- if (! (h->flags & HASH_FLAG_NO_AUTO_SHRINK))
+ if (!(h->flags & HASH_FLAG_NO_AUTO_SHRINK))
{
/* Resize when 1/4 full. */
if (h->elts > 32 && 4 * (h->elts + 1) < vec_len (v))
@@ -640,11 +659,12 @@ void * _hash_unset (void * v, uword key, void * old_value)
return v;
}
-void * _hash_create (uword elts, hash_t * h_user)
+void *
+_hash_create (uword elts, hash_t * h_user)
{
- hash_t * h;
+ hash_t *h;
uword log2_pair_size;
- void * v;
+ void *v;
/* Size of hash is power of 2 >= ELTS and larger than
number of bits in is_user bitmap elements. */
@@ -656,9 +676,12 @@ void * _hash_create (uword elts, hash_t * h_user)
log2_pair_size = h_user->log2_pair_size;
v = _vec_resize (0,
- /* vec len: */ elts,
- /* data bytes: */ (elts << log2_pair_size) * sizeof (hash_pair_t),
- /* header bytes: */ sizeof (h[0]) + (elts / BITS (h->is_user[0])) * sizeof (h->is_user[0]),
+ /* vec len: */ elts,
+ /* data bytes: */
+ (elts << log2_pair_size) * sizeof (hash_pair_t),
+ /* header bytes: */
+ sizeof (h[0]) +
+ (elts / BITS (h->is_user[0])) * sizeof (h->is_user[0]),
/* alignment */ sizeof (hash_pair_t));
h = hash_header (v);
@@ -670,10 +693,10 @@ void * _hash_create (uword elts, hash_t * h_user)
/* Default flags to never shrinking hash tables.
Shrinking tables can cause "jackpot" cases. */
- if (! h_user)
- h->flags = HASH_FLAG_NO_AUTO_SHRINK;
+ if (!h_user)
+ h->flags = HASH_FLAG_NO_AUTO_SHRINK;
- if (! h->format_pair)
+ if (!h->format_pair)
{
h->format_pair = hash_format_pair_default;
h->format_pair_arg = 0;
@@ -682,13 +705,14 @@ void * _hash_create (uword elts, hash_t * h_user)
return v;
}
-void * _hash_free (void * v)
+void *
+_hash_free (void *v)
{
- hash_t * h = hash_header (v);
- hash_pair_union_t * p;
+ hash_t *h = hash_header (v);
+ hash_pair_union_t *p;
uword i;
- if (! v)
+ if (!v)
return v;
/* We zero all freed memory in case user would be tempted to use it. */
@@ -708,19 +732,22 @@ void * _hash_free (void * v)
return 0;
}
-static void * hash_resize_internal (void * old, uword new_size, uword free_old)
+static void *
+hash_resize_internal (void *old, uword new_size, uword free_old)
{
- void * new;
- hash_pair_t * p;
+ void *new;
+ hash_pair_t *p;
new = 0;
if (new_size > 0)
{
- hash_t * h = old ? hash_header (old) : 0;
+ hash_t *h = old ? hash_header (old) : 0;
new = _hash_create (new_size, h);
+ /* *INDENT-OFF* */
hash_foreach_pair (p, old, {
new = _hash_set3 (new, p->key, &p->value[0], 0);
});
+ /* *INDENT-ON* */
}
if (free_old)
@@ -728,23 +755,30 @@ static void * hash_resize_internal (void * old, uword new_size, uword free_old)
return new;
}
-void * hash_resize (void * old, uword new_size)
-{ return hash_resize_internal (old, new_size, 1); }
+void *
+hash_resize (void *old, uword new_size)
+{
+ return hash_resize_internal (old, new_size, 1);
+}
-void * hash_dup (void * old)
-{ return hash_resize_internal (old, vec_len (old), 0); }
+void *
+hash_dup (void *old)
+{
+ return hash_resize_internal (old, vec_len (old), 0);
+}
-void * _hash_set3 (void * v, uword key, void * value, void * old_value)
+void *
+_hash_set3 (void *v, uword key, void *value, void *old_value)
{
- hash_t * h;
+ hash_t *h;
- if (! v)
+ if (!v)
v = hash_create (0, sizeof (uword));
h = hash_header (v);
(void) lookup (v, key, SET, value, old_value);
- if (! (h->flags & HASH_FLAG_NO_AUTO_GROW))
+ if (!(h->flags & HASH_FLAG_NO_AUTO_GROW))
{
/* Resize when 3/4 full. */
if (4 * (h->elts + 1) > 3 * vec_len (v))
@@ -754,63 +788,67 @@ void * _hash_set3 (void * v, uword key, void * value, void * old_value)
return v;
}
-uword vec_key_sum (hash_t * h, uword key)
+uword
+vec_key_sum (hash_t * h, uword key)
{
- void * v = uword_to_pointer (key, void *);
+ void *v = uword_to_pointer (key, void *);
return hash_memory (v, vec_len (v) * h->user, 0);
}
-uword vec_key_equal (hash_t * h, uword key1, uword key2)
+uword
+vec_key_equal (hash_t * h, uword key1, uword key2)
{
- void * v1 = uword_to_pointer (key1, void *);
- void * v2 = uword_to_pointer (key2, void *);
+ void *v1 = uword_to_pointer (key1, void *);
+ void *v2 = uword_to_pointer (key2, void *);
uword l1 = vec_len (v1);
uword l2 = vec_len (v2);
return l1 == l2 && 0 == memcmp (v1, v2, l1 * h->user);
}
-u8 * vec_key_format_pair (u8 * s, va_list * args)
+u8 *
+vec_key_format_pair (u8 * s, va_list * args)
{
- void * CLIB_UNUSED (user_arg) = va_arg (*args, void *);
- void * v = va_arg (*args, void *);
- hash_pair_t * p = va_arg (*args, hash_pair_t *);
- hash_t * h = hash_header (v);
- void * u = uword_to_pointer (p->key, void *);
+ void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
+ void *v = va_arg (*args, void *);
+ hash_pair_t *p = va_arg (*args, hash_pair_t *);
+ hash_t *h = hash_header (v);
+ void *u = uword_to_pointer (p->key, void *);
int i;
- switch (h->user) {
- case 1:
+ switch (h->user)
+ {
+ case 1:
s = format (s, "%v", u);
break;
- case 2:
- {
- u16 * w = u;
- for (i = 0; i < vec_len (w); i++)
- s = format (s, "0x%x, ", w[i]);
- break;
- }
+ case 2:
+ {
+ u16 *w = u;
+ for (i = 0; i < vec_len (w); i++)
+ s = format (s, "0x%x, ", w[i]);
+ break;
+ }
- case 4:
- {
- u32 * w = u;
- for (i = 0; i < vec_len (w); i++)
- s = format (s, "0x%x, ", w[i]);
- break;
- }
+ case 4:
+ {
+ u32 *w = u;
+ for (i = 0; i < vec_len (w); i++)
+ s = format (s, "0x%x, ", w[i]);
+ break;
+ }
- case 8:
- {
- u64 * w = u;
- for (i = 0; i < vec_len (w); i++)
- s = format (s, "0x%Lx, ", w[i]);
- break;
- }
+ case 8:
+ {
+ u64 *w = u;
+ for (i = 0; i < vec_len (w); i++)
+ s = format (s, "0x%Lx, ", w[i]);
+ break;
+ }
- default:
+ default:
s = format (s, "0x%U", format_hex_bytes, u, vec_len (u) * h->user);
break;
- }
+ }
if (hash_value_bytes (h) > 0)
s = format (s, " -> 0x%wx", p->value[0]);
@@ -818,78 +856,89 @@ u8 * vec_key_format_pair (u8 * s, va_list * args)
return s;
}
-uword mem_key_sum (hash_t * h, uword key)
+uword
+mem_key_sum (hash_t * h, uword key)
{
- uword * v = uword_to_pointer (key, void *);
+ uword *v = uword_to_pointer (key, void *);
return hash_memory (v, h->user, 0);
}
-uword mem_key_equal (hash_t * h, uword key1, uword key2)
+uword
+mem_key_equal (hash_t * h, uword key1, uword key2)
{
- void * v1 = uword_to_pointer (key1, void *);
- void * v2 = uword_to_pointer (key2, void *);
+ void *v1 = uword_to_pointer (key1, void *);
+ void *v2 = uword_to_pointer (key2, void *);
return v1 && v2 && 0 == memcmp (v1, v2, h->user);
}
-uword string_key_sum (hash_t * h, uword key)
+uword
+string_key_sum (hash_t * h, uword key)
{
- char * v = uword_to_pointer (key, char *);
+ char *v = uword_to_pointer (key, char *);
return hash_memory (v, strlen (v), 0);
}
-uword string_key_equal (hash_t * h, uword key1, uword key2)
+uword
+string_key_equal (hash_t * h, uword key1, uword key2)
{
- void * v1 = uword_to_pointer (key1, void *);
- void * v2 = uword_to_pointer (key2, void *);
+ void *v1 = uword_to_pointer (key1, void *);
+ void *v2 = uword_to_pointer (key2, void *);
return v1 && v2 && 0 == strcmp (v1, v2);
}
-u8 * string_key_format_pair (u8 * s, va_list * args)
+u8 *
+string_key_format_pair (u8 * s, va_list * args)
{
- void * CLIB_UNUSED (user_arg) = va_arg (*args, void *);
- void * v = va_arg (*args, void *);
- hash_pair_t * p = va_arg (*args, hash_pair_t *);
- hash_t * h = hash_header (v);
- void * u = uword_to_pointer (p->key, void *);
+ void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
+ void *v = va_arg (*args, void *);
+ hash_pair_t *p = va_arg (*args, hash_pair_t *);
+ hash_t *h = hash_header (v);
+ void *u = uword_to_pointer (p->key, void *);
s = format (s, "%s", u);
if (hash_value_bytes (h) > 0)
- s = format (s, " -> 0x%8U", format_hex_bytes, &p->value[0], hash_value_bytes (h));
+ s =
+ format (s, " -> 0x%8U", format_hex_bytes, &p->value[0],
+ hash_value_bytes (h));
return s;
}
-static u8 * hash_format_pair_default (u8 * s, va_list * args)
+static u8 *
+hash_format_pair_default (u8 * s, va_list * args)
{
- void * CLIB_UNUSED (user_arg) = va_arg (*args, void *);
- void * v = va_arg (*args, void *);
- hash_pair_t * p = va_arg (*args, hash_pair_t *);
- hash_t * h = hash_header (v);
+ void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
+ void *v = va_arg (*args, void *);
+ hash_pair_t *p = va_arg (*args, hash_pair_t *);
+ hash_t *h = hash_header (v);
s = format (s, "0x%08x", p->key);
if (hash_value_bytes (h) > 0)
- s = format (s, " -> 0x%8U", format_hex_bytes, &p->value[0], hash_value_bytes (h));
+ s =
+ format (s, " -> 0x%8U", format_hex_bytes, &p->value[0],
+ hash_value_bytes (h));
return s;
}
-uword hash_bytes (void * v)
+uword
+hash_bytes (void *v)
{
uword i, bytes;
- hash_t * h = hash_header (v);
+ hash_t *h = hash_header (v);
- if (! v)
+ if (!v)
return 0;
bytes = vec_capacity (v, hash_header_bytes (v));
for (i = 0; i < hash_capacity (v); i++)
{
- if (! hash_is_user (v, i))
+ if (!hash_is_user (v, i))
{
- hash_pair_union_t * p = get_pair (v, i);
+ hash_pair_union_t *p = get_pair (v, i);
if (h->log2_pair_size > 0)
- bytes += 1<< indirect_pair_get_log2_bytes (&p->indirect);
+ bytes += 1 << indirect_pair_get_log2_bytes (&p->indirect);
else
bytes += vec_capacity (p->indirect.pairs, 0);
}
@@ -897,20 +946,20 @@ uword hash_bytes (void * v)
return bytes;
}
-u8 * format_hash (u8 * s, va_list * va)
+u8 *
+format_hash (u8 * s, va_list * va)
{
- void * v = va_arg (*va, void *);
+ void *v = va_arg (*va, void *);
int verbose = va_arg (*va, int);
- hash_pair_t * p;
- hash_t * h = hash_header (v);
+ hash_pair_t *p;
+ hash_t *h = hash_header (v);
uword i;
s = format (s, "hash %p, %wd elts, capacity %wd, %wd bytes used,\n",
- v, hash_elts (v), hash_capacity (v),
- hash_bytes (v));
+ v, hash_elts (v), hash_capacity (v), hash_bytes (v));
{
- uword * occupancy = 0;
+ uword *occupancy = 0;
/* Count number of buckets with each occupancy. */
for (i = 0; i < hash_capacity (v); i++)
@@ -923,7 +972,7 @@ u8 * format_hash (u8 * s, va_list * va)
}
else
{
- hash_pair_union_t * p = get_pair (v, i);
+ hash_pair_union_t *p = get_pair (v, i);
if (h->log2_pair_size > 0)
j = indirect_pair_get_len (&p->indirect);
else
@@ -950,9 +999,11 @@ u8 * format_hash (u8 * s, va_list * va)
if (verbose)
{
+ /* *INDENT-OFF* */
hash_foreach_pair (p, v, {
s = format (s, " %U\n", h->format_pair, h->format_pair_arg, v, p);
});
+ /* *INDENT-ON* */
}
return s;
@@ -960,15 +1011,14 @@ u8 * format_hash (u8 * s, va_list * va)
static uword
unformat_hash_string_internal (unformat_input_t * input,
- va_list * va,
- int is_vec)
+ va_list * va, int is_vec)
{
- uword * hash = va_arg (*va, uword *);
- int * result = va_arg (*va, int *);
- u8 * string = 0;
- uword * p;
+ uword *hash = va_arg (*va, uword *);
+ int *result = va_arg (*va, int *);
+ u8 *string = 0;
+ uword *p;
- if (! unformat (input, is_vec ? "%v%_" : "%s%_", &string))
+ if (!unformat (input, is_vec ? "%v%_" : "%s%_", &string))
return 0;
p = hash_get_mem (hash, string);
@@ -981,24 +1031,29 @@ unformat_hash_string_internal (unformat_input_t * input,
uword
unformat_hash_vec_string (unformat_input_t * input, va_list * va)
-{ return unformat_hash_string_internal (input, va, /* is_vec */ 1); }
+{
+ return unformat_hash_string_internal (input, va, /* is_vec */ 1);
+}
uword
unformat_hash_string (unformat_input_t * input, va_list * va)
-{ return unformat_hash_string_internal (input, va, /* is_vec */ 0); }
+{
+ return unformat_hash_string_internal (input, va, /* is_vec */ 0);
+}
-clib_error_t * hash_validate (void * v)
+clib_error_t *
+hash_validate (void *v)
{
- hash_t * h = hash_header (v);
+ hash_t *h = hash_header (v);
uword i, j;
- uword * keys = 0;
- clib_error_t * error = 0;
+ uword *keys = 0;
+ clib_error_t *error = 0;
#define CHECK(x) if ((error = ERROR_ASSERT (x))) goto done;
for (i = 0; i < hash_capacity (v); i++)
{
- hash_pair_union_t * pu = get_pair (v, i);
+ hash_pair_union_t *pu = get_pair (v, i);
if (hash_is_user (v, i))
{
@@ -1007,13 +1062,12 @@ clib_error_t * hash_validate (void * v)
}
else
{
- hash_pair_t * p;
- hash_pair_indirect_t * pi = &pu->indirect;
+ hash_pair_t *p;
+ hash_pair_indirect_t *pi = &pu->indirect;
uword n;
n = h->log2_pair_size > 0
- ? indirect_pair_get_len (pi)
- : vec_len (pi->pairs);
+ ? indirect_pair_get_len (pi) : vec_len (pi->pairs);
for (p = pi->pairs; n-- > 0; p = hash_forward1 (h, p))
{
@@ -1028,6 +1082,14 @@ clib_error_t * hash_validate (void * v)
CHECK (vec_len (keys) == h->elts);
vec_free (keys);
- done:
+done:
return error;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/hash.h b/vppinfra/vppinfra/hash.h
index 978230b6ca0..f796f2dca09 100644
--- a/vppinfra/vppinfra/hash.h
+++ b/vppinfra/vppinfra/hash.h
@@ -45,13 +45,13 @@
struct hash_header;
-typedef uword (hash_key_sum_function_t)
- (struct hash_header *, uword key);
+typedef uword (hash_key_sum_function_t) (struct hash_header *, uword key);
typedef uword (hash_key_equal_function_t)
(struct hash_header *, uword key1, uword key2);
/* Vector header for hash tables. */
-typedef struct hash_header {
+typedef struct hash_header
+{
/* Number of elements in hash table. */
uword elts;
@@ -70,25 +70,25 @@ typedef struct hash_header {
/* Function to compute the "sum" of a hash key.
Hash function is this sum modulo the prime size of
the hash table (vec_len (v)). */
- hash_key_sum_function_t * key_sum;
+ hash_key_sum_function_t *key_sum;
/* Special values for key_sum "function". */
-#define KEY_FUNC_NONE (0) /*< sum = key */
-#define KEY_FUNC_POINTER_UWORD (1) /*< sum = *(uword *) key */
-#define KEY_FUNC_POINTER_U32 (2) /*< sum = *(u32 *) key */
-#define KEY_FUNC_STRING (3) /*< sum = string_key_sum, etc. */
+#define KEY_FUNC_NONE (0) /*< sum = key */
+#define KEY_FUNC_POINTER_UWORD (1) /*< sum = *(uword *) key */
+#define KEY_FUNC_POINTER_U32 (2) /*< sum = *(u32 *) key */
+#define KEY_FUNC_STRING (3) /*< sum = string_key_sum, etc. */
/* key comparison function */
- hash_key_equal_function_t * key_equal;
+ hash_key_equal_function_t *key_equal;
/* Hook for user's data. Used to parameterize sum/equal functions. */
any user;
/* Format a (k,v) pair */
- format_function_t * format_pair;
+ format_function_t *format_pair;
/* Format function arg */
- void * format_pair_arg;
+ void *format_pair_arg;
/* Bit i is set if pair i is a user object (as opposed to being
either zero or an indirect array of pairs). */
@@ -96,54 +96,67 @@ typedef struct hash_header {
} hash_t;
/* Hash header size in bytes */
-always_inline uword hash_header_bytes (void * v)
+always_inline uword
+hash_header_bytes (void *v)
{
- hash_t * h;
- uword is_user_bytes = (sizeof (h->is_user[0]) * vec_len (v)) / BITS (h->is_user[0]);
+ hash_t *h;
+ uword is_user_bytes =
+ (sizeof (h->is_user[0]) * vec_len (v)) / BITS (h->is_user[0]);
return sizeof (h[0]) + is_user_bytes;
}
/* Returns a pointer to the hash header given the vector pointer */
-always_inline hash_t * hash_header (void * v)
-{ return vec_header (v, hash_header_bytes (v)); }
+always_inline hash_t *
+hash_header (void *v)
+{
+ return vec_header (v, hash_header_bytes (v));
+}
/* Number of elements in the hash table */
-always_inline uword hash_elts (void * v)
+always_inline uword
+hash_elts (void *v)
{
- hash_t * h = hash_header (v);
+ hash_t *h = hash_header (v);
return v ? h->elts : 0;
}
/* Number of elements the hash table can hold */
-always_inline uword hash_capacity (void * v)
-{ return vec_len (v); }
+always_inline uword
+hash_capacity (void *v)
+{
+ return vec_len (v);
+}
/* Returns 1 if the hash pair contains user data */
-always_inline uword hash_is_user (void * v, uword i)
+always_inline uword
+hash_is_user (void *v, uword i)
{
- hash_t * h = hash_header (v);
- uword i0 = i / BITS(h->is_user[0]);
- uword i1 = i % BITS(h->is_user[0]);
+ hash_t *h = hash_header (v);
+ uword i0 = i / BITS (h->is_user[0]);
+ uword i1 = i % BITS (h->is_user[0]);
return (h->is_user[i0] & ((uword) 1 << i1)) != 0;
}
/* Set the format function and format argument for a hash table */
always_inline void
-hash_set_pair_format (void * v,
- format_function_t * format_pair,
- void * format_pair_arg)
+hash_set_pair_format (void *v,
+ format_function_t * format_pair, void *format_pair_arg)
{
- hash_t * h = hash_header (v);
+ hash_t *h = hash_header (v);
h->format_pair = format_pair;
h->format_pair_arg = format_pair_arg;
}
/* Set hash table flags */
-always_inline void hash_set_flags (void * v, uword flags)
-{ hash_header (v)->flags |= flags; }
+always_inline void
+hash_set_flags (void *v, uword flags)
+{
+ hash_header (v)->flags |= flags;
+}
/* Key value pairs. */
-typedef struct {
+typedef struct
+{
/* The Key */
uword key;
@@ -156,33 +169,39 @@ typedef struct {
If log2_pair_size > 0 we overload hash pairs
with indirect pairs for buckets with more than one
pair. */
-typedef struct {
+typedef struct
+{
/* pair vector */
- hash_pair_t * pairs;
+ hash_pair_t *pairs;
/* padding */
- u8 pad[sizeof(uword) - sizeof (hash_pair_t *)];
+ u8 pad[sizeof (uword) - sizeof (hash_pair_t *)];
/* allocated length */
uword alloc_len;
-} hash_pair_indirect_t;
+}
+hash_pair_indirect_t;
/* Direct / Indirect pair union */
-typedef union {
- hash_pair_t direct;
- hash_pair_indirect_t indirect;
+typedef union
+{
+ hash_pair_t direct;
+ hash_pair_indirect_t indirect;
} hash_pair_union_t;
#define LOG2_ALLOC_BITS (5)
#define PAIR_BITS (BITS (uword) - LOG2_ALLOC_BITS)
/* Log2 number of bytes allocated in pairs array. */
-always_inline uword indirect_pair_get_log2_bytes (hash_pair_indirect_t * p)
-{ return p->alloc_len >> PAIR_BITS; }
+always_inline uword
+indirect_pair_get_log2_bytes (hash_pair_indirect_t * p)
+{
+ return p->alloc_len >> PAIR_BITS;
+}
/* Get the length of an indirect pair */
always_inline uword
indirect_pair_get_len (hash_pair_indirect_t * p)
{
- if (! p->pairs)
+ if (!p->pairs)
return 0;
else
return p->alloc_len & (((uword) 1 << PAIR_BITS) - 1);
@@ -190,9 +209,7 @@ indirect_pair_get_len (hash_pair_indirect_t * p)
/* Set the length of an indirect pair */
always_inline void
-indirect_pair_set (hash_pair_indirect_t * p,
- uword log2_alloc,
- uword len)
+indirect_pair_set (hash_pair_indirect_t * p, uword log2_alloc, uword len)
{
ASSERT (len < ((uword) 1 << PAIR_BITS));
ASSERT (log2_alloc < ((uword) 1 << LOG2_ALLOC_BITS));
@@ -200,25 +217,25 @@ indirect_pair_set (hash_pair_indirect_t * p,
}
/* internal routine to fetch value for given key */
-uword * _hash_get (void * v, uword key);
+uword *_hash_get (void *v, uword key);
/* internal routine to fetch value (key, value) pair for given key */
-hash_pair_t * _hash_get_pair (void * v, uword key);
+hash_pair_t *_hash_get_pair (void *v, uword key);
/* internal routine to unset a (key, value) pair */
-void * _hash_unset (void * v, uword key, void * old_value);
+void *_hash_unset (void *v, uword key, void *old_value);
/* internal routine to set a (key, value) pair, return the old value */
-void * _hash_set3 (void * v, uword key, void * value, void * old_value);
+void *_hash_set3 (void *v, uword key, void *value, void *old_value);
/* Resize a hash table */
-void * hash_resize (void * old, uword new_size);
+void *hash_resize (void *old, uword new_size);
/* duplicate a hash table */
-void * hash_dup (void * old);
+void *hash_dup (void *old);
/* Returns the number of bytes used by a hash table */
-uword hash_bytes (void * v);
+uword hash_bytes (void *v);
/* Public macro to set a (key, value) pair, return the old value */
#define hash_set3(h,key,value,old_value) \
@@ -263,26 +280,27 @@ uword hash_bytes (void * v);
#define hash_unset_mem(h,key) ((h) = _hash_unset ((h), pointer_to_uword (key),0))
/* internal routine to free a hash table */
-extern void * _hash_free (void * v);
+extern void *_hash_free (void *v);
/* Public macro to free a hash table */
#define hash_free(h) (h) = _hash_free ((h))
-clib_error_t * hash_validate (void * v);
+clib_error_t *hash_validate (void *v);
/* Public inline funcion to get the number of value bytes for a hash table */
-always_inline uword hash_value_bytes (hash_t * h)
+always_inline uword
+hash_value_bytes (hash_t * h)
{
- hash_pair_t * p;
+ hash_pair_t *p;
return (sizeof (p->value[0]) << h->log2_pair_size) - sizeof (p->key);
}
/* Public inline funcion to get log2(size of a (key,value) pair) */
-always_inline uword hash_pair_log2_bytes (hash_t * h)
+always_inline uword
+hash_pair_log2_bytes (hash_t * h)
{
uword log2_bytes = h->log2_pair_size;
- ASSERT (BITS (hash_pair_t) == 32
- || BITS (hash_pair_t) == 64);
+ ASSERT (BITS (hash_pair_t) == 32 || BITS (hash_pair_t) == 64);
if (BITS (hash_pair_t) == 32)
log2_bytes += 2;
else if (BITS (hash_pair_t) == 64)
@@ -291,21 +309,30 @@ always_inline uword hash_pair_log2_bytes (hash_t * h)
}
/* Public inline funcion to get size of a (key,value) pair */
-always_inline uword hash_pair_bytes (hash_t * h)
-{ return (uword) 1 << hash_pair_log2_bytes (h); }
+always_inline uword
+hash_pair_bytes (hash_t * h)
+{
+ return (uword) 1 << hash_pair_log2_bytes (h);
+}
/* Public inline funcion to advance a pointer past one (key,value) pair */
-always_inline void * hash_forward1 (hash_t * h, void * v)
-{ return (u8 *) v + hash_pair_bytes (h); }
+always_inline void *
+hash_forward1 (hash_t * h, void *v)
+{
+ return (u8 *) v + hash_pair_bytes (h);
+}
/* Public inline funcion to advance a pointer past N (key,value) pairs */
-always_inline void * hash_forward (hash_t * h, void * v, uword n)
-{ return (u8 *) v + ((n * sizeof (hash_pair_t)) << h->log2_pair_size); }
+always_inline void *
+hash_forward (hash_t * h, void *v, uword n)
+{
+ return (u8 *) v + ((n * sizeof (hash_pair_t)) << h->log2_pair_size);
+}
/* Iterate over hash pairs
@param p the current (key,value) pair
@param v the hash table to iterate
- @param body the operation to perform on each (key,value) pair.
+ @param body the operation to perform on each (key,value) pair.
executes body with each active hash pair
*/
#define hash_foreach_pair(p,v,body) \
@@ -372,7 +399,7 @@ do { \
@param key_var the current key
@param value_var the current value
@param h the hash table to iterate across
- @param body the operation to perform on each (key_var,value_var) pair.
+ @param body the operation to perform on each (key_var,value_var) pair.
calls body with each active hash pair
*/
@@ -392,7 +419,7 @@ do { \
@param key_var the current key
@param value_var the current value
@param h the hash table to iterate across
- @param body the operation to perform on each (key_var,value_var) pair.
+ @param body the operation to perform on each (key_var,value_var) pair.
calls body with each active hash pair
*/
@@ -411,19 +438,22 @@ do { \
/* This struct saves iteration state for hash_next.
None of these fields are meant to be visible to the user.
Hence, the cryptic short-hand names. */
-typedef struct {
+typedef struct
+{
uword i, j, f;
} hash_next_t;
-hash_pair_t * hash_next (void * v, hash_next_t * hn);
+hash_pair_t *hash_next (void *v, hash_next_t * hn);
-void * _hash_create (uword elts, hash_t * h);
+void *_hash_create (uword elts, hash_t * h);
-always_inline void hash_set_value_bytes (hash_t * h, uword value_bytes)
+always_inline void
+hash_set_value_bytes (hash_t * h, uword value_bytes)
{
- hash_pair_t * p;
+ hash_pair_t *p;
h->log2_pair_size =
- max_log2 ((sizeof (p->key) + value_bytes + sizeof (p->key) - 1) / sizeof (p->key));
+ max_log2 ((sizeof (p->key) + value_bytes + sizeof (p->key) -
+ 1) / sizeof (p->key));
}
#define hash_create2(_elts,_user,_value_bytes, \
@@ -482,7 +512,9 @@ do { \
always_inline uword
hash32_rotate_left (u32 x, u32 i)
-{ return (x << i) | (x >> (BITS (i) - i)); }
+{
+ return (x << i) | (x >> (BITS (i) - i));
+}
#define hash_v3_mix32(a,b,c) \
do { \
@@ -586,7 +618,7 @@ do { \
hash_v3_finalize_step_2_u32x(a,b,c); \
} while (0)
-extern uword hash_memory (void * p, word n_bytes, uword state);
+extern uword hash_memory (void *p, word n_bytes, uword state);
extern uword mem_key_sum (hash_t * h, uword key);
extern uword mem_key_equal (hash_t * h, uword key1, uword key2);
@@ -596,7 +628,7 @@ extern uword mem_key_equal (hash_t * h, uword key1, uword key2);
extern uword vec_key_sum (hash_t * h, uword key);
extern uword vec_key_equal (hash_t * h, uword key1, uword key2);
-extern u8 * vec_key_format_pair (u8 * s, va_list * args);
+extern u8 *vec_key_format_pair (u8 * s, va_list * args);
#define hash_create_vec(elts,key_bytes,value_bytes) \
hash_create2((elts),(key_bytes),(value_bytes),\
@@ -604,7 +636,7 @@ extern u8 * vec_key_format_pair (u8 * s, va_list * args);
extern uword string_key_sum (hash_t * h, uword key);
extern uword string_key_equal (hash_t * h, uword key1, uword key2);
-extern u8 * string_key_format_pair (u8 * s, va_list * args);
+extern u8 *string_key_format_pair (u8 * s, va_list * args);
#define hash_create_string(elts,value_bytes) \
hash_create2((elts),0,(value_bytes), \
@@ -630,14 +662,22 @@ extern u8 * string_key_format_pair (u8 * s, va_list * args);
(hash_key_equal_function_t *) KEY_FUNC_POINTER_U32, \
0,0)
-u8 * format_hash (u8 * s, va_list * va);
+u8 *format_hash (u8 * s, va_list * va);
/* Looks up input in hash table indexed by either vec string or
c string (null terminated). */
unformat_function_t unformat_hash_vec_string;
unformat_function_t unformat_hash_string;
-/* Main test routine. */
+/* Main test routine. */
int test_hash_main (unformat_input_t * input);
#endif /* included_hash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/heap.c b/vppinfra/vppinfra/heap.c
index 5f44cd40534..2a5fb5c8d8e 100644
--- a/vppinfra/vppinfra/heap.c
+++ b/vppinfra/vppinfra/heap.c
@@ -35,24 +35,31 @@
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <vppinfra/cache.h> /* for CLIB_CACHE_LINE_BYTES */
+#include <vppinfra/cache.h> /* for CLIB_CACHE_LINE_BYTES */
#include <vppinfra/mem.h>
#include <vppinfra/hash.h>
#include <vppinfra/vec.h>
#include <vppinfra/heap.h>
#include <vppinfra/error.h>
-always_inline heap_elt_t * elt_at (heap_header_t * h, uword i)
+always_inline heap_elt_t *
+elt_at (heap_header_t * h, uword i)
{
ASSERT (i < vec_len (h->elts));
return h->elts + i;
}
-always_inline heap_elt_t * last (heap_header_t * h)
-{ return elt_at (h, h->tail); }
+always_inline heap_elt_t *
+last (heap_header_t * h)
+{
+ return elt_at (h, h->tail);
+}
-always_inline heap_elt_t * first (heap_header_t * h)
-{ return elt_at (h, h->head); }
+always_inline heap_elt_t *
+first (heap_header_t * h)
+{
+ return elt_at (h, h->head);
+}
/* Objects sizes are binned into N_BINS bins.
Objects with size <= SMALL_BINS have their own bins.
@@ -62,7 +69,8 @@ always_inline heap_elt_t * first (heap_header_t * h)
Sizes are in units of elt_bytes bytes. */
/* Convert size to bin. */
-always_inline uword size_to_bin (uword size)
+always_inline uword
+size_to_bin (uword size)
{
uword bin;
@@ -85,7 +93,8 @@ always_inline uword size_to_bin (uword size)
}
/* Convert bin to size. */
-always_inline __attribute__((unused)) uword bin_to_size (uword bin)
+always_inline __attribute__ ((unused))
+ uword bin_to_size (uword bin)
{
uword size;
@@ -97,16 +106,17 @@ always_inline __attribute__((unused)) uword bin_to_size (uword bin)
return size;
}
-static void elt_delete (heap_header_t * h, heap_elt_t * e)
+static void
+elt_delete (heap_header_t * h, heap_elt_t * e)
{
- heap_elt_t * l = vec_end (h->elts) - 1;
+ heap_elt_t *l = vec_end (h->elts) - 1;
ASSERT (e >= h->elts && e <= l);
/* Update doubly linked pointers. */
{
- heap_elt_t * p = heap_prev (e);
- heap_elt_t * n = heap_next (e);
+ heap_elt_t *p = heap_prev (e);
+ heap_elt_t *n = heap_next (e);
if (p == e)
{
@@ -136,9 +146,10 @@ static void elt_delete (heap_header_t * h, heap_elt_t * e)
Before: P ... E
After : P ... NEW ... E
*/
-always_inline void elt_insert_before (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
+always_inline void
+elt_insert_before (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
{
- heap_elt_t * p = heap_prev (e);
+ heap_elt_t *p = heap_prev (e);
if (p == e)
{
@@ -160,9 +171,10 @@ always_inline void elt_insert_before (heap_header_t * h, heap_elt_t * e, heap_el
Before: E ... N
After : E ... NEW ... N
*/
-always_inline void elt_insert_after (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
+always_inline void
+elt_insert_after (heap_header_t * h, heap_elt_t * e, heap_elt_t * new)
{
- heap_elt_t * n = heap_next (e);
+ heap_elt_t *n = heap_next (e);
if (n == e)
{
@@ -180,13 +192,14 @@ always_inline void elt_insert_after (heap_header_t * h, heap_elt_t * e, heap_elt
}
}
-always_inline heap_elt_t * elt_new (heap_header_t * h)
+always_inline heap_elt_t *
+elt_new (heap_header_t * h)
{
- heap_elt_t * e;
+ heap_elt_t *e;
uword l;
if ((l = vec_len (h->free_elts)) > 0)
{
- e = elt_at (h, h->free_elts[l-1]);
+ e = elt_at (h, h->free_elts[l - 1]);
_vec_len (h->free_elts) -= 1;
}
else
@@ -196,16 +209,17 @@ always_inline heap_elt_t * elt_new (heap_header_t * h)
/* Return pointer to object at given offset.
Used to write free list index of free objects. */
-always_inline u32 * elt_data (void * v, heap_elt_t * e)
+always_inline u32 *
+elt_data (void *v, heap_elt_t * e)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
return v + heap_offset (e) * h->elt_bytes;
}
always_inline void
-set_free_elt (void * v, heap_elt_t * e, uword fi)
+set_free_elt (void *v, heap_elt_t * e, uword fi)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
e->offset |= HEAP_ELT_FREE_BIT;
if (h->elt_bytes >= sizeof (u32))
@@ -215,7 +229,7 @@ set_free_elt (void * v, heap_elt_t * e, uword fi)
else
{
/* For elt_bytes < 4 we must store free index in separate
- vector. */
+ vector. */
uword elt_index = e - h->elts;
vec_validate (h->small_free_elt_free_index, elt_index);
h->small_free_elt_free_index[elt_index] = fi;
@@ -223,9 +237,9 @@ set_free_elt (void * v, heap_elt_t * e, uword fi)
}
always_inline uword
-get_free_elt (void * v, heap_elt_t * e, uword * bin_result)
+get_free_elt (void *v, heap_elt_t * e, uword * bin_result)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
uword fb, fi;
ASSERT (heap_is_free (e));
@@ -245,9 +259,10 @@ get_free_elt (void * v, heap_elt_t * e, uword * bin_result)
return fi;
}
-always_inline void remove_free_block (void * v, uword b, uword i)
+always_inline void
+remove_free_block (void *v, uword b, uword i)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
uword l;
ASSERT (b < vec_len (h->free_lists));
@@ -264,14 +279,15 @@ always_inline void remove_free_block (void * v, uword b, uword i)
_vec_len (h->free_lists[b]) = l - 1;
}
-static heap_elt_t * search_free_list (void * v, uword size)
+static heap_elt_t *
+search_free_list (void *v, uword size)
{
- heap_header_t * h = heap_header (v);
- heap_elt_t * f, * u;
+ heap_header_t *h = heap_header (v);
+ heap_elt_t *f, *u;
uword b, fb, f_size, f_index;
word s, l;
- if (! v)
+ if (!v)
return 0;
/* Search free lists for bins >= given size. */
@@ -281,14 +297,16 @@ static heap_elt_t * search_free_list (void * v, uword size)
/* Find an object that is large enough.
Search list in reverse so that more recently freed objects will be
allocated again sooner. */
- do {
- l--;
- f_index = h->free_lists[b][l];
- f = elt_at (h, f_index);
- f_size = heap_elt_size (v, f);
- if ((s = f_size - size) >= 0)
- break;
- } while (l >= 0);
+ do
+ {
+ l--;
+ f_index = h->free_lists[b][l];
+ f = elt_at (h, f_index);
+ f_size = heap_elt_size (v, f);
+ if ((s = f_size - size) >= 0)
+ break;
+ }
+ while (l >= 0);
/* If we fail to find a large enough object, try the next larger size. */
if (l < 0)
@@ -332,13 +350,14 @@ static heap_elt_t * search_free_list (void * v, uword size)
return 0;
}
-static void combine_free_blocks (void * v, heap_elt_t * e0, heap_elt_t * e1);
+static void combine_free_blocks (void *v, heap_elt_t * e0, heap_elt_t * e1);
-static inline void dealloc_elt (void * v, heap_elt_t * e)
+static inline void
+dealloc_elt (void *v, heap_elt_t * e)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
uword b, l;
- heap_elt_t * n, * p;
+ heap_elt_t *n, *p;
b = size_to_bin (heap_elt_size (v, e));
vec_validate (h->free_lists, b);
@@ -348,27 +367,26 @@ static inline void dealloc_elt (void * v, heap_elt_t * e)
/* See if we can combine the block we just freed with neighboring free blocks. */
p = heap_prev (e);
- if (! heap_is_free (p))
+ if (!heap_is_free (p))
p = e;
n = heap_next (e);
- if (! heap_is_free (n))
+ if (!heap_is_free (n))
n = e;
if (p != n)
combine_free_blocks (v, p, n);
}
-void * _heap_alloc (void * v,
- uword size,
- uword align,
- uword elt_bytes,
- uword * offset_return,
- uword * handle_return)
+void *
+_heap_alloc (void *v,
+ uword size,
+ uword align,
+ uword elt_bytes, uword * offset_return, uword * handle_return)
{
uword offset = 0, align_size;
- heap_header_t * h;
- heap_elt_t * e;
+ heap_header_t *h;
+ heap_elt_t *e;
if (size == 0)
goto error;
@@ -388,7 +406,7 @@ void * _heap_alloc (void * v,
e = search_free_list (v, align_size);
/* If nothing found on free list, allocate object from end of vector. */
- if (! e)
+ if (!e)
{
uword max_len;
@@ -399,12 +417,11 @@ void * _heap_alloc (void * v,
goto error;
h = heap_header (v);
- if (! v || ! (h->flags & HEAP_IS_STATIC))
+ if (!v || !(h->flags & HEAP_IS_STATIC))
v = _vec_resize (v,
align_size,
(offset + align_size) * elt_bytes,
- sizeof (h[0]),
- HEAP_DATA_ALIGN);
+ sizeof (h[0]), HEAP_DATA_ALIGN);
else
_vec_len (v) += align_size;
@@ -418,7 +435,7 @@ void * _heap_alloc (void * v,
h = heap_header (v);
/* Add new element to doubly linked chain of elements. */
- if (! e)
+ if (!e)
{
e = elt_new (h);
e->offset = offset;
@@ -431,14 +448,14 @@ void * _heap_alloc (void * v,
uword new_offset, old_offset;
old_offset = e->offset;
- new_offset = (old_offset + align - 1) &~ (align - 1);
+ new_offset = (old_offset + align - 1) & ~(align - 1);
e->offset = new_offset;
e_index = e - h->elts;
/* Free fragments before and after aligned object. */
if (new_offset > old_offset)
{
- heap_elt_t * before_e = elt_new (h);
+ heap_elt_t *before_e = elt_new (h);
before_e->offset = old_offset;
elt_insert_before (h, h->elts + e_index, before_e);
dealloc_elt (v, before_e);
@@ -446,12 +463,12 @@ void * _heap_alloc (void * v,
if (new_offset + size < old_offset + align_size)
{
- heap_elt_t * after_e = elt_new (h);
+ heap_elt_t *after_e = elt_new (h);
after_e->offset = new_offset + size;
elt_insert_after (h, h->elts + e_index, after_e);
dealloc_elt (v, after_e);
}
-
+
e = h->elts + e_index;
}
@@ -462,23 +479,24 @@ void * _heap_alloc (void * v,
if (CLIB_DEBUG > 0)
{
uword handle = e - h->elts;
- ASSERT (! clib_bitmap_get (h->used_elt_bitmap, handle));
+ ASSERT (!clib_bitmap_get (h->used_elt_bitmap, handle));
h->used_elt_bitmap = clib_bitmap_ori (h->used_elt_bitmap, handle);
}
*offset_return = e->offset;
- *handle_return = e - h->elts;
+ *handle_return = e - h->elts;
return v;
- error:
+error:
*offset_return = *handle_return = ~0;
return v;
}
-void heap_dealloc (void * v, uword handle)
+void
+heap_dealloc (void *v, uword handle)
{
- heap_header_t * h = heap_header (v);
- heap_elt_t * e;
+ heap_header_t *h = heap_header (v);
+ heap_elt_t *e;
ASSERT (handle < vec_len (h->elts));
@@ -493,20 +511,22 @@ void heap_dealloc (void * v, uword handle)
h->used_count--;
e = h->elts + handle;
- ASSERT (! heap_is_free (e));
+ ASSERT (!heap_is_free (e));
dealloc_elt (v, e);
}
/* While freeing objects at INDEX we noticed free blocks i0 <= index and
i1 >= index. We combine these two or three blocks into one big free block. */
-static void combine_free_blocks (void * v, heap_elt_t * e0, heap_elt_t * e1)
+static void
+combine_free_blocks (void *v, heap_elt_t * e0, heap_elt_t * e1)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
uword total_size, i, b, tb, ti, i_last, g_offset;
- heap_elt_t * e;
+ heap_elt_t *e;
- struct {
+ struct
+ {
u32 index;
u32 bin;
u32 bin_index;
@@ -573,21 +593,23 @@ static void combine_free_blocks (void * v, heap_elt_t * e0, heap_elt_t * e1)
set_free_elt (v, elt_at (h, g.index), g.bin_index);
}
-uword heap_len (void * v, word handle)
+uword
+heap_len (void *v, word handle)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
if (CLIB_DEBUG > 0)
ASSERT (clib_bitmap_get (h->used_elt_bitmap, handle));
return heap_elt_size (v, elt_at (h, handle));
}
-void * _heap_free (void * v)
+void *
+_heap_free (void *v)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
uword b;
- if (! v)
+ if (!v)
return v;
clib_bitmap_free (h->used_elt_bitmap);
@@ -597,17 +619,18 @@ void * _heap_free (void * v)
vec_free (h->elts);
vec_free (h->free_elts);
vec_free (h->small_free_elt_free_index);
- if (! (h->flags & HEAP_IS_STATIC))
+ if (!(h->flags & HEAP_IS_STATIC))
vec_free_h (v, sizeof (h[0]));
return v;
}
-uword heap_bytes (void * v)
+uword
+heap_bytes (void *v)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
uword bytes, b;
- if (! v)
+ if (!v)
return 0;
bytes = sizeof (h[0]);
@@ -622,10 +645,11 @@ uword heap_bytes (void * v)
return bytes;
}
-static u8 * debug_elt (u8 * s, void * v, word i, word n)
+static u8 *
+debug_elt (u8 * s, void *v, word i, word n)
{
- heap_elt_t * e, * e0, * e1;
- heap_header_t * h = heap_header (v);
+ heap_elt_t *e, *e0, *e1;
+ heap_header_t *h = heap_header (v);
word j;
if (vec_len (h->elts) == 0)
@@ -636,7 +660,7 @@ static u8 * debug_elt (u8 * s, void * v, word i, word n)
else
{
e0 = h->elts + i;
- for (j = 0; j < n/2; j++)
+ for (j = 0; j < n / 2; j++)
e0 = heap_prev (e0);
}
@@ -645,11 +669,11 @@ static u8 * debug_elt (u8 * s, void * v, word i, word n)
else
{
e1 = h->elts + i;
- for (j = 0; j < n/2; j++)
+ for (j = 0; j < n / 2; j++)
e1 = heap_next (e1);
}
- i = -n/2;
+ i = -n / 2;
for (e = e0; 1; e = heap_next (e))
{
if (heap_is_free (e))
@@ -666,24 +690,26 @@ static u8 * debug_elt (u8 * s, void * v, word i, word n)
return s;
}
-u8 * format_heap (u8 * s, va_list * va)
+u8 *
+format_heap (u8 * s, va_list * va)
{
- void * v = va_arg (*va, void *);
+ void *v = va_arg (*va, void *);
uword verbose = va_arg (*va, uword);
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
heap_header_t zero;
memset (&zero, 0, sizeof (zero));
- if (! v)
+ if (!v)
h = &zero;
{
f64 elt_bytes = vec_len (v) * h->elt_bytes;
f64 overhead_bytes = heap_bytes (v);
-
+
s = format (s, "heap %p, %6d objects, size %.1fk + overhead %.1fk\n",
- v, h->used_count, elt_bytes / 1024, (overhead_bytes - elt_bytes) / 1024);
+ v, h->used_count, elt_bytes / 1024,
+ (overhead_bytes - elt_bytes) / 1024);
}
if (v && verbose)
@@ -692,20 +718,21 @@ u8 * format_heap (u8 * s, va_list * va)
return s;
}
-void heap_validate (void * v)
+void
+heap_validate (void *v)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
uword i, o, s;
- u8 * free_map;
- heap_elt_t * e, * n;
+ u8 *free_map;
+ heap_elt_t *e, *n;
uword used_count, total_size;
uword free_count, free_size;
ASSERT (h->used_count == clib_bitmap_count_set_bits (h->used_elt_bitmap));
- ASSERT (first(h)->prev == 0);
- ASSERT (last(h)->next == 0);
+ ASSERT (first (h)->prev == 0);
+ ASSERT (last (h)->next == 0);
/* Validate number of elements and size. */
free_size = free_count = 0;
@@ -732,7 +759,8 @@ void heap_validate (void * v)
used_count++;
s = heap_elt_size (v, e);
total_size += s;
- ASSERT (is_free == ! clib_bitmap_get (h->used_elt_bitmap, e - h->elts));
+ ASSERT (is_free ==
+ !clib_bitmap_get (h->used_elt_bitmap, e - h->elts));
if (is_free)
{
elt_free_count++;
@@ -746,7 +774,7 @@ void heap_validate (void * v)
}
/* We should never have two free adjacent elements. */
- ASSERT (! (heap_is_free (e) && heap_is_free (n)));
+ ASSERT (!(heap_is_free (e) && heap_is_free (n)));
}
ASSERT (free_count == elt_free_count);
@@ -773,7 +801,7 @@ void heap_validate (void * v)
ASSERT (fi < vec_len (h->free_lists[fb]));
ASSERT (h->free_lists[fb][fi] == e - h->elts);
- ASSERT (! free_map[i]);
+ ASSERT (!free_map[i]);
free_map[i] = 1;
}
@@ -790,3 +818,11 @@ void heap_validate (void * v)
vec_free (free_map);
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/heap.h b/vppinfra/vppinfra/heap.h
index 604187c00ea..8c1aae46ebf 100644
--- a/vppinfra/vppinfra/heap.h
+++ b/vppinfra/vppinfra/heap.h
@@ -40,7 +40,7 @@
Usage. To declare a null heap:
T * heap = 0;
-
+
To allocate:
offset = heap_alloc (heap, size, handle);
@@ -68,7 +68,8 @@
#include <vppinfra/bitmap.h>
/* Doubly linked list of elements. */
-typedef struct {
+typedef struct
+{
/* Offset of this element (plus free bit).
If element is free, data at offset contains pointer to free list. */
u32 offset;
@@ -80,21 +81,34 @@ typedef struct {
/* Use high bit of offset as free bit. */
#define HEAP_ELT_FREE_BIT (1 << 31)
-always_inline uword heap_is_free (heap_elt_t * e)
-{ return (e->offset & HEAP_ELT_FREE_BIT) != 0; }
+always_inline uword
+heap_is_free (heap_elt_t * e)
+{
+ return (e->offset & HEAP_ELT_FREE_BIT) != 0;
+}
-always_inline uword heap_offset (heap_elt_t * e)
-{ return e->offset &~ HEAP_ELT_FREE_BIT; }
+always_inline uword
+heap_offset (heap_elt_t * e)
+{
+ return e->offset & ~HEAP_ELT_FREE_BIT;
+}
-always_inline heap_elt_t * heap_next (heap_elt_t * e)
-{ return e + e->next; }
+always_inline heap_elt_t *
+heap_next (heap_elt_t * e)
+{
+ return e + e->next;
+}
-always_inline heap_elt_t * heap_prev (heap_elt_t * e)
-{ return e + e->prev; }
+always_inline heap_elt_t *
+heap_prev (heap_elt_t * e)
+{
+ return e + e->prev;
+}
-always_inline uword heap_elt_size (void * v, heap_elt_t * e)
+always_inline uword
+heap_elt_size (void *v, heap_elt_t * e)
{
- heap_elt_t * n = heap_next (e);
+ heap_elt_t *n = heap_next (e);
uword next_offset = n != e ? heap_offset (n) : vec_len (v);
return next_offset - heap_offset (e);
}
@@ -106,24 +120,25 @@ always_inline uword heap_elt_size (void * v, heap_elt_t * e)
#define HEAP_N_BINS (2 * HEAP_SMALL_BINS)
/* Header for heaps. */
-typedef struct {
+typedef struct
+{
/* Vector of used and free elements. */
- heap_elt_t * elts;
+ heap_elt_t *elts;
/* For elt_bytes < sizeof (u32) we need some extra space
per elt to store free list index. */
- u32 * small_free_elt_free_index;
+ u32 *small_free_elt_free_index;
/* Vector of free indices of elts array. */
- u32 * free_elts;
+ u32 *free_elts;
/* Indices of free elts indexed by size bin. */
- u32 ** free_lists;
+ u32 **free_lists;
- format_function_t * format_elt;
+ format_function_t *format_elt;
/* Used for validattion/debugging. */
- uword * used_elt_bitmap;
+ uword *used_elt_bitmap;
/* First and last element of doubly linked chain of elements. */
u32 head, tail;
@@ -142,13 +157,20 @@ typedef struct {
/* Start of heap elements is always cache aligned. */
#define HEAP_DATA_ALIGN (CLIB_CACHE_LINE_BYTES)
-always_inline heap_header_t * heap_header (void * v)
-{ return vec_header (v, sizeof (heap_header_t)); }
+always_inline heap_header_t *
+heap_header (void *v)
+{
+ return vec_header (v, sizeof (heap_header_t));
+}
-always_inline uword heap_header_bytes ()
-{ return vec_header_bytes (sizeof (heap_header_t)); }
+always_inline uword
+heap_header_bytes ()
+{
+ return vec_header_bytes (sizeof (heap_header_t));
+}
-always_inline void heap_dup_header (heap_header_t * old, heap_header_t * new)
+always_inline void
+heap_dup_header (heap_header_t * old, heap_header_t * new)
{
uword i;
@@ -165,38 +187,42 @@ always_inline void heap_dup_header (heap_header_t * old, heap_header_t * new)
/* Make a duplicate copy of a heap. */
#define heap_dup(v) _heap_dup(v, vec_len (v) * sizeof (v[0]))
-always_inline void * _heap_dup (void * v_old, uword v_bytes)
+always_inline void *
+_heap_dup (void *v_old, uword v_bytes)
{
- heap_header_t * h_old, * h_new;
- void * v_new;
+ heap_header_t *h_old, *h_new;
+ void *v_new;
h_old = heap_header (v_old);
- if (! v_old)
+ if (!v_old)
return v_old;
v_new = 0;
- v_new = _vec_resize (v_new, _vec_len (v_old), v_bytes, sizeof (heap_header_t),
- HEAP_DATA_ALIGN);
+ v_new =
+ _vec_resize (v_new, _vec_len (v_old), v_bytes, sizeof (heap_header_t),
+ HEAP_DATA_ALIGN);
h_new = heap_header (v_new);
heap_dup_header (h_old, h_new);
clib_memcpy (v_new, v_old, v_bytes);
return v_new;
}
-always_inline uword heap_elts (void * v)
+always_inline uword
+heap_elts (void *v)
{
- heap_header_t * h = heap_header (v);
+ heap_header_t *h = heap_header (v);
return h->used_count;
}
-uword heap_bytes (void * v);
+uword heap_bytes (void *v);
-always_inline void * _heap_new (u32 len, u32 n_elt_bytes)
+always_inline void *
+_heap_new (u32 len, u32 n_elt_bytes)
{
- void * v = _vec_resize (0, len, (uword) len*n_elt_bytes,
- sizeof (heap_header_t),
- HEAP_DATA_ALIGN);
+ void *v = _vec_resize (0, len, (uword) len * n_elt_bytes,
+ sizeof (heap_header_t),
+ HEAP_DATA_ALIGN);
heap_header (v)->elt_bytes = n_elt_bytes;
return v;
}
@@ -204,28 +230,31 @@ always_inline void * _heap_new (u32 len, u32 n_elt_bytes)
#define heap_new(v) (v) = _heap_new (0, sizeof ((v)[0]))
always_inline void
-heap_set_format (void * v, format_function_t * format_elt)
+heap_set_format (void *v, format_function_t * format_elt)
{
ASSERT (v);
heap_header (v)->format_elt = format_elt;
}
always_inline void
-heap_set_max_len (void * v, uword max_len)
+heap_set_max_len (void *v, uword max_len)
{
ASSERT (v);
heap_header (v)->max_len = max_len;
}
-always_inline uword heap_get_max_len (void * v)
-{ return v ? heap_header (v)->max_len : 0; }
+always_inline uword
+heap_get_max_len (void *v)
+{
+ return v ? heap_header (v)->max_len : 0;
+}
/* Create fixed size heap with given block of memory. */
always_inline void *
-heap_create_from_memory (void * memory, uword max_len, uword elt_bytes)
+heap_create_from_memory (void *memory, uword max_len, uword elt_bytes)
{
- heap_header_t * h;
- void * v;
+ heap_header_t *h;
+ void *v;
if (max_len * elt_bytes < sizeof (h[0]))
return 0;
@@ -236,7 +265,7 @@ heap_create_from_memory (void * memory, uword max_len, uword elt_bytes)
h->elt_bytes = elt_bytes;
h->flags = HEAP_IS_STATIC;
- v = (void *) (memory + heap_header_bytes ());
+ v = (void *) (memory + heap_header_bytes ());
_vec_len (v) = 0;
return v;
}
@@ -267,11 +296,11 @@ do { \
#define heap_elt_at_index(v,index) vec_elt_at_index(v,index)
always_inline heap_elt_t *
-heap_get_elt (void * v, uword handle)
+heap_get_elt (void *v, uword handle)
{
- heap_header_t * h = heap_header (v);
- heap_elt_t * e = vec_elt_at_index (h->elts, handle);
- ASSERT (! heap_is_free (e));
+ heap_header_t *h = heap_header (v);
+ heap_elt_t *e = vec_elt_at_index (h->elts, handle);
+ ASSERT (!heap_is_free (e));
return e;
}
@@ -282,19 +311,18 @@ heap_get_elt (void * v, uword handle)
})
always_inline uword
-heap_is_free_handle (void * v, uword heap_handle)
+heap_is_free_handle (void *v, uword heap_handle)
{
- heap_header_t * h = heap_header (v);
- heap_elt_t * e = vec_elt_at_index (h->elts, heap_handle);
+ heap_header_t *h = heap_header (v);
+ heap_elt_t *e = vec_elt_at_index (h->elts, heap_handle);
return heap_is_free (e);
}
-extern uword heap_len (void * v, word handle);
+extern uword heap_len (void *v, word handle);
/* Low level allocation call. */
-extern void * _heap_alloc (void * v, uword size, uword alignment,
- uword elt_bytes,
- uword * offset, uword * handle);
+extern void *_heap_alloc (void *v, uword size, uword alignment,
+ uword elt_bytes, uword * offset, uword * handle);
#define heap_alloc_aligned(v,size,align,handle) \
({ \
@@ -308,14 +336,22 @@ extern void * _heap_alloc (void * v, uword size, uword alignment,
#define heap_alloc(v,size,handle) heap_alloc_aligned((v),(size),0,(handle))
-extern void heap_dealloc (void * v, uword handle);
-extern void heap_validate (void * v);
+extern void heap_dealloc (void *v, uword handle);
+extern void heap_validate (void *v);
/* Format heap internal data structures as string. */
-extern u8 * format_heap (u8 * s, va_list * va);
+extern u8 *format_heap (u8 * s, va_list * va);
-void * _heap_free (void * v);
+void *_heap_free (void *v);
#define heap_free(v) (v)=_heap_free(v)
#endif /* included_heap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/longjmp.h b/vppinfra/vppinfra/longjmp.h
index 7252aa3cd62..8d83203e41d 100644
--- a/vppinfra/vppinfra/longjmp.h
+++ b/vppinfra/vppinfra/longjmp.h
@@ -97,7 +97,8 @@
#error "unknown machine"
#endif
-typedef struct {
+typedef struct
+{
uword regs[CLIB_ARCH_LONGJMP_REGS];
} clib_longjmp_t __attribute__ ((aligned (16)));
@@ -109,8 +110,15 @@ void clib_longjmp (clib_longjmp_t * save, uword return_value);
uword clib_setjmp (clib_longjmp_t * save, uword return_value_not_taken);
/* Call function on given stack. */
-uword clib_calljmp (uword (* func) (uword func_arg),
- uword func_arg,
- void * stack);
+uword clib_calljmp (uword (*func) (uword func_arg),
+ uword func_arg, void *stack);
#endif /* included_clib_longjmp_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/macros.c b/vppinfra/vppinfra/macros.c
index f16948ef0e4..ce4cc9bc81b 100644
--- a/vppinfra/vppinfra/macros.c
+++ b/vppinfra/vppinfra/macros.c
@@ -1,8 +1,8 @@
/*
macros.c - a simple macro expander
-
+
Copyright (c) 2010, 2014 Cisco and/or its affiliates.
-
+
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -18,39 +18,40 @@
#include <vppinfra/macros.h>
-static inline int macro_isalnum (i8 c)
+static inline int
+macro_isalnum (i8 c)
{
if ((c >= 'A' && c <= 'Z')
- || (c >='a' && c <= 'z')
- || (c >='0' && c <= '9')
- || (c == '_'))
+ || (c >= 'a' && c <= 'z') || (c >= '0' && c <= '9') || (c == '_'))
return 1;
return 0;
}
-static i8 *builtin_eval(macro_main_t *mm, i8 *varname, i32 complain)
+static i8 *
+builtin_eval (macro_main_t * mm, i8 * varname, i32 complain)
{
- uword *p;
- i8 *(*fp)(macro_main_t *, i32);
-
- p = hash_get_mem(mm->the_builtin_eval_hash, varname);
- if (p == 0)
- return 0;
- fp = (void *)(p[0]);
- return (*fp)(mm, complain);
+ uword *p;
+ i8 *(*fp) (macro_main_t *, i32);
+
+ p = hash_get_mem (mm->the_builtin_eval_hash, varname);
+ if (p == 0)
+ return 0;
+ fp = (void *) (p[0]);
+ return (*fp) (mm, complain);
}
-int clib_macro_unset (macro_main_t * mm, char *name)
+int
+clib_macro_unset (macro_main_t * mm, char *name)
{
- hash_pair_t * p;
- u8 * key, * value;
+ hash_pair_t *p;
+ u8 *key, *value;
p = hash_get_pair (mm->the_value_table_hash, name);
-
+
if (p == 0)
return 1;
- key = (u8 *)(p->key);
+ key = (u8 *) (p->key);
value = (u8 *) (p->value[0]);
hash_unset_mem (mm->the_value_table_hash, name);
@@ -59,9 +60,10 @@ int clib_macro_unset (macro_main_t * mm, char *name)
return 0;
}
-int clib_macro_set_value (macro_main_t * mm, char *name, char *value)
+int
+clib_macro_set_value (macro_main_t * mm, char *name, char *value)
{
- u8 * key_copy, *value_copy;
+ u8 *key_copy, *value_copy;
int rv;
rv = clib_macro_unset (mm, name);
@@ -73,13 +75,14 @@ int clib_macro_set_value (macro_main_t * mm, char *name, char *value)
return rv;
}
-i8 * clib_macro_get_value (macro_main_t * mm, char *name)
+i8 *
+clib_macro_get_value (macro_main_t * mm, char *name)
{
- uword * p;
+ uword *p;
p = hash_get_mem (mm->the_value_table_hash, name);
if (p)
- return (i8 *)(p[0]);
+ return (i8 *) (p[0]);
else
return 0;
}
@@ -88,129 +91,134 @@ i8 * clib_macro_get_value (macro_main_t * mm, char *name)
* eval: takes a string, returns a vector.
* looks up $foobar in the variable table.
*/
-i8 * clib_macro_eval (macro_main_t *mm, i8 *s, i32 complain)
+i8 *
+clib_macro_eval (macro_main_t * mm, i8 * s, i32 complain)
{
- i8 *rv=0;
+ i8 *rv = 0;
i8 *varname, *varvalue;
i8 *ts;
- while (*s)
+ while (*s)
{
- switch(*s)
- {
- case '\\':
- s++;
- /* fallthrough */
-
- default:
- vec_add1(rv, *s);
- s++;
- break;
-
- case '$':
- s++;
- varname = 0;
- /*
- * Make vector with variable name in it.
- */
- while (*s && (macro_isalnum (*s) || (*s == '_') || (*s == '(')))
- {
-
- /* handle $(foo) */
- if (*s == '(')
- {
- s++; /* skip '(' */
- while (*s && *s != ')')
- {
- vec_add1(varname, *s);
- s++;
- }
- if (*s)
- s++; /* skip ')' */
- break;
- }
- vec_add1(varname, *s);
- s++;
- }
- /* null terminate */
- vec_add1(varname, 0);
- /* Look for a builtin, e.g. $my_hostname */
- if (!(varvalue = builtin_eval(mm, varname, complain)))
- {
- /* Look in value table */
- if (! varvalue)
- {
- char * tmp = clib_macro_get_value (mm, varname);
- if (tmp)
- varvalue = (i8 *) format (0, "%s%c", tmp, 0);
- }
+ switch (*s)
+ {
+ case '\\':
+ s++;
+ /* fallthrough */
+
+ default:
+ vec_add1 (rv, *s);
+ s++;
+ break;
+
+ case '$':
+ s++;
+ varname = 0;
+ /*
+ * Make vector with variable name in it.
+ */
+ while (*s && (macro_isalnum (*s) || (*s == '_') || (*s == '(')))
+ {
+
+ /* handle $(foo) */
+ if (*s == '(')
+ {
+ s++; /* skip '(' */
+ while (*s && *s != ')')
+ {
+ vec_add1 (varname, *s);
+ s++;
+ }
+ if (*s)
+ s++; /* skip ')' */
+ break;
+ }
+ vec_add1 (varname, *s);
+ s++;
+ }
+ /* null terminate */
+ vec_add1 (varname, 0);
+ /* Look for a builtin, e.g. $my_hostname */
+ if (!(varvalue = builtin_eval (mm, varname, complain)))
+ {
+ /* Look in value table */
+ if (!varvalue)
+ {
+ char *tmp = clib_macro_get_value (mm, varname);
+ if (tmp)
+ varvalue = (i8 *) format (0, "%s%c", tmp, 0);
+ }
#ifdef CLIB_UNIX
- /* Look in environment. */
- if (! varvalue)
- {
- char * tmp = getenv (varname);
- if (tmp)
- varvalue = (i8 *) format (0, "%s%c", tmp, 0);
- }
+ /* Look in environment. */
+ if (!varvalue)
+ {
+ char *tmp = getenv (varname);
+ if (tmp)
+ varvalue = (i8 *) format (0, "%s%c", tmp, 0);
+ }
#endif /* CLIB_UNIX */
- }
- if (varvalue)
- {
- /* recursively evaluate */
- ts = clib_macro_eval(mm, varvalue, complain);
- vec_free(varvalue);
- /* add results to answer */
- vec_append(rv, ts);
- /* Remove NULL termination or the results are sad */
- _vec_len(rv) = vec_len(rv)-1;
- vec_free(ts);
- }
- else
- {
- if (complain)
- clib_warning ("Undefined Variable Reference: %s\n", varname);
- vec_append(rv, format(0, "UNSET "));
- _vec_len(rv) = vec_len(rv)-1;
-
- }
- vec_free(varname);
- }
+ }
+ if (varvalue)
+ {
+ /* recursively evaluate */
+ ts = clib_macro_eval (mm, varvalue, complain);
+ vec_free (varvalue);
+ /* add results to answer */
+ vec_append (rv, ts);
+ /* Remove NULL termination or the results are sad */
+ _vec_len (rv) = vec_len (rv) - 1;
+ vec_free (ts);
+ }
+ else
+ {
+ if (complain)
+ clib_warning ("Undefined Variable Reference: %s\n", varname);
+ vec_append (rv, format (0, "UNSET "));
+ _vec_len (rv) = vec_len (rv) - 1;
+
+ }
+ vec_free (varname);
+ }
}
- vec_add1(rv, 0);
- return(rv);
+ vec_add1 (rv, 0);
+ return (rv);
}
/*
* eval: takes a string, returns a vector.
* looks up $foobar in the variable table.
*/
-i8 *clib_macro_eval_dollar (macro_main_t *mm, i8 *s, i32 complain)
+i8 *
+clib_macro_eval_dollar (macro_main_t * mm, i8 * s, i32 complain)
{
i8 *s2;
i8 *rv;
- s2 = (i8 *)format (0, "$(%s)%c", s, 0);
- rv = clib_macro_eval(mm, s2, complain);
+ s2 = (i8 *) format (0, "$(%s)%c", s, 0);
+ rv = clib_macro_eval (mm, s2, complain);
vec_free (s2);
return (rv);
}
-void clib_macro_add_builtin (macro_main_t *mm, char *name, void * eval_fn)
+void
+clib_macro_add_builtin (macro_main_t * mm, char *name, void *eval_fn)
{
- hash_set_mem(mm->the_builtin_eval_hash, name, (uword) eval_fn);
+ hash_set_mem (mm->the_builtin_eval_hash, name, (uword) eval_fn);
}
#ifdef CLIB_UNIX
-static i8 *eval_hostname (macro_main_t *mm, i32 complain)
+static i8 *
+eval_hostname (macro_main_t * mm, i32 complain)
{
char tmp[128];
- if (gethostname (tmp, sizeof(tmp)))
+ if (gethostname (tmp, sizeof (tmp)))
return ((i8 *) format (0, "gethostname-error%c", 0));
return ((i8 *) format (0, "%s%c", tmp, 0));
}
#endif
-void clib_macro_init(macro_main_t * mm)
+void
+clib_macro_init (macro_main_t * mm)
{
if (mm->the_builtin_eval_hash != 0)
{
@@ -218,31 +226,41 @@ void clib_macro_init(macro_main_t * mm)
return;
}
- mm->the_builtin_eval_hash = hash_create_string(0, sizeof(uword));
- mm->the_value_table_hash = hash_create_string(0, sizeof(uword));
-
+ mm->the_builtin_eval_hash = hash_create_string (0, sizeof (uword));
+ mm->the_value_table_hash = hash_create_string (0, sizeof (uword));
+
#ifdef CLIB_UNIX
- hash_set_mem(mm->the_builtin_eval_hash, "hostname",
- (uword) eval_hostname);
+ hash_set_mem (mm->the_builtin_eval_hash, "hostname", (uword) eval_hostname);
#endif
}
-void clib_macro_free(macro_main_t * mm)
+void
+clib_macro_free (macro_main_t * mm)
{
- hash_pair_t * p;
- u8 ** strings_to_free = 0;
+ hash_pair_t *p;
+ u8 **strings_to_free = 0;
int i;
hash_free (mm->the_builtin_eval_hash);
- hash_foreach_pair (p, mm->the_value_table_hash,
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, mm->the_value_table_hash,
({
vec_add1 (strings_to_free, (u8 *) (p->key));
vec_add1 (strings_to_free, (u8 *) (p->value[0]));
}));
+ /* *INDENT-ON* */
for (i = 0; i < vec_len (strings_to_free); i++)
vec_free (strings_to_free[i]);
vec_free (strings_to_free);
hash_free (mm->the_value_table_hash);
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/macros.h b/vppinfra/vppinfra/macros.h
index 94776272364..5c2e7033f8d 100644
--- a/vppinfra/vppinfra/macros.h
+++ b/vppinfra/vppinfra/macros.h
@@ -1,8 +1,8 @@
/*
macros.h - definitions for a simple macro expander
-
+
Copyright (c) 2010, 2014 Cisco and/or its affiliates.
-
+
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at:
@@ -28,18 +28,27 @@
#include <unistd.h>
#endif
-typedef struct {
- uword * the_builtin_eval_hash;
- uword * the_value_table_hash;
+typedef struct
+{
+ uword *the_builtin_eval_hash;
+ uword *the_value_table_hash;
} macro_main_t;
int clib_macro_unset (macro_main_t * mm, char *name);
int clib_macro_set_value (macro_main_t * mm, char *name, char *value);
-void clib_macro_add_builtin (macro_main_t *mm, char *name, void * eval_fn);
-i8 * clib_macro_get_value (macro_main_t * mm, char *name);
-i8 * clib_macro_eval (macro_main_t *mm, i8 *s, i32 complain);
-i8 *clib_macro_eval_dollar (macro_main_t *mm, i8 *s, i32 complain);
-void clib_macro_init(macro_main_t * mm);
-void clib_macro_free(macro_main_t * mm);
+void clib_macro_add_builtin (macro_main_t * mm, char *name, void *eval_fn);
+i8 *clib_macro_get_value (macro_main_t * mm, char *name);
+i8 *clib_macro_eval (macro_main_t * mm, i8 * s, i32 complain);
+i8 *clib_macro_eval_dollar (macro_main_t * mm, i8 * s, i32 complain);
+void clib_macro_init (macro_main_t * mm);
+void clib_macro_free (macro_main_t * mm);
#endif /* included_macros_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/math.h b/vppinfra/vppinfra/math.h
index cedd5cf7b24..48f8c0f4b83 100644
--- a/vppinfra/vppinfra/math.h
+++ b/vppinfra/vppinfra/math.h
@@ -40,14 +40,24 @@
#include <vppinfra/clib.h>
-always_inline f64 sqrt (f64 x)
+always_inline f64
+sqrt (f64 x)
{
return __builtin_sqrt (x);
}
-always_inline f64 fabs (f64 x)
+always_inline f64
+fabs (f64 x)
{
return __builtin_fabs (x);
}
#endif /* included_math_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/md5.c b/vppinfra/vppinfra/md5.c
index e6558d99e93..9ac1efc708d 100644
--- a/vppinfra/vppinfra/md5.c
+++ b/vppinfra/vppinfra/md5.c
@@ -36,7 +36,7 @@ These notices must be retained in any copies of any part of this
documentation and/or software.
*/
-#include <vppinfra/string.h> /* for memset */
+#include <vppinfra/string.h> /* for memset */
#include <vppinfra/byte_order.h>
#include <vppinfra/md5.h>
@@ -83,13 +83,11 @@ do { \
#undef _
/* MD5 basic transformation. Transforms state based on block. */
-static void md5_transform (md5_context_t * m,
- u32 * data,
- u32 * result,
- int zero_buffer)
+static void
+md5_transform (md5_context_t * m, u32 * data, u32 * result, int zero_buffer)
{
u32 a = m->state[0], b = m->state[1], c = m->state[2], d = m->state[3];
- u32 * x = data;
+ u32 *x = data;
/* Constants for MD5Transform routine. */
#define S11 7
@@ -110,76 +108,76 @@ static void md5_transform (md5_context_t * m,
#define S44 21
/* Round 1 */
- FF (a, b, c, d, clib_host_to_little_u32 (x[ 0]), S11, 0xd76aa478); /* 1 */
- FF (d, a, b, c, clib_host_to_little_u32 (x[ 1]), S12, 0xe8c7b756); /* 2 */
- FF (c, d, a, b, clib_host_to_little_u32 (x[ 2]), S13, 0x242070db); /* 3 */
- FF (b, c, d, a, clib_host_to_little_u32 (x[ 3]), S14, 0xc1bdceee); /* 4 */
- FF (a, b, c, d, clib_host_to_little_u32 (x[ 4]), S11, 0xf57c0faf); /* 5 */
- FF (d, a, b, c, clib_host_to_little_u32 (x[ 5]), S12, 0x4787c62a); /* 6 */
- FF (c, d, a, b, clib_host_to_little_u32 (x[ 6]), S13, 0xa8304613); /* 7 */
- FF (b, c, d, a, clib_host_to_little_u32 (x[ 7]), S14, 0xfd469501); /* 8 */
- FF (a, b, c, d, clib_host_to_little_u32 (x[ 8]), S11, 0x698098d8); /* 9 */
- FF (d, a, b, c, clib_host_to_little_u32 (x[ 9]), S12, 0x8b44f7af); /* 10 */
- FF (c, d, a, b, clib_host_to_little_u32 (x[10]), S13, 0xffff5bb1); /* 11 */
- FF (b, c, d, a, clib_host_to_little_u32 (x[11]), S14, 0x895cd7be); /* 12 */
- FF (a, b, c, d, clib_host_to_little_u32 (x[12]), S11, 0x6b901122); /* 13 */
- FF (d, a, b, c, clib_host_to_little_u32 (x[13]), S12, 0xfd987193); /* 14 */
- FF (c, d, a, b, clib_host_to_little_u32 (x[14]), S13, 0xa679438e); /* 15 */
- FF (b, c, d, a, clib_host_to_little_u32 (x[15]), S14, 0x49b40821); /* 16 */
+ FF (a, b, c, d, clib_host_to_little_u32 (x[0]), S11, 0xd76aa478); /* 1 */
+ FF (d, a, b, c, clib_host_to_little_u32 (x[1]), S12, 0xe8c7b756); /* 2 */
+ FF (c, d, a, b, clib_host_to_little_u32 (x[2]), S13, 0x242070db); /* 3 */
+ FF (b, c, d, a, clib_host_to_little_u32 (x[3]), S14, 0xc1bdceee); /* 4 */
+ FF (a, b, c, d, clib_host_to_little_u32 (x[4]), S11, 0xf57c0faf); /* 5 */
+ FF (d, a, b, c, clib_host_to_little_u32 (x[5]), S12, 0x4787c62a); /* 6 */
+ FF (c, d, a, b, clib_host_to_little_u32 (x[6]), S13, 0xa8304613); /* 7 */
+ FF (b, c, d, a, clib_host_to_little_u32 (x[7]), S14, 0xfd469501); /* 8 */
+ FF (a, b, c, d, clib_host_to_little_u32 (x[8]), S11, 0x698098d8); /* 9 */
+ FF (d, a, b, c, clib_host_to_little_u32 (x[9]), S12, 0x8b44f7af); /* 10 */
+ FF (c, d, a, b, clib_host_to_little_u32 (x[10]), S13, 0xffff5bb1); /* 11 */
+ FF (b, c, d, a, clib_host_to_little_u32 (x[11]), S14, 0x895cd7be); /* 12 */
+ FF (a, b, c, d, clib_host_to_little_u32 (x[12]), S11, 0x6b901122); /* 13 */
+ FF (d, a, b, c, clib_host_to_little_u32 (x[13]), S12, 0xfd987193); /* 14 */
+ FF (c, d, a, b, clib_host_to_little_u32 (x[14]), S13, 0xa679438e); /* 15 */
+ FF (b, c, d, a, clib_host_to_little_u32 (x[15]), S14, 0x49b40821); /* 16 */
/* Round 2 */
- GG (a, b, c, d, x[ 1], S21, 0xf61e2562); /* 17 */
- GG (d, a, b, c, x[ 6], S22, 0xc040b340); /* 18 */
- GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */
- GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa); /* 20 */
- GG (a, b, c, d, x[ 5], S21, 0xd62f105d); /* 21 */
- GG (d, a, b, c, x[10], S22, 0x02441453); /* 22 */
- GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */
- GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8); /* 24 */
- GG (a, b, c, d, x[ 9], S21, 0x21e1cde6); /* 25 */
- GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */
- GG (c, d, a, b, x[ 3], S23, 0xf4d50d87); /* 27 */
- GG (b, c, d, a, x[ 8], S24, 0x455a14ed); /* 28 */
- GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */
- GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8); /* 30 */
- GG (c, d, a, b, x[ 7], S23, 0x676f02d9); /* 31 */
- GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */
+ GG (a, b, c, d, x[1], S21, 0xf61e2562); /* 17 */
+ GG (d, a, b, c, x[6], S22, 0xc040b340); /* 18 */
+ GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */
+ GG (b, c, d, a, x[0], S24, 0xe9b6c7aa); /* 20 */
+ GG (a, b, c, d, x[5], S21, 0xd62f105d); /* 21 */
+ GG (d, a, b, c, x[10], S22, 0x02441453); /* 22 */
+ GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */
+ GG (b, c, d, a, x[4], S24, 0xe7d3fbc8); /* 24 */
+ GG (a, b, c, d, x[9], S21, 0x21e1cde6); /* 25 */
+ GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */
+ GG (c, d, a, b, x[3], S23, 0xf4d50d87); /* 27 */
+ GG (b, c, d, a, x[8], S24, 0x455a14ed); /* 28 */
+ GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */
+ GG (d, a, b, c, x[2], S22, 0xfcefa3f8); /* 30 */
+ GG (c, d, a, b, x[7], S23, 0x676f02d9); /* 31 */
+ GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */
/* Round 3 */
- HH (a, b, c, d, x[ 5], S31, 0xfffa3942); /* 33 */
- HH (d, a, b, c, x[ 8], S32, 0x8771f681); /* 34 */
- HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */
- HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */
- HH (a, b, c, d, x[ 1], S31, 0xa4beea44); /* 37 */
- HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9); /* 38 */
- HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60); /* 39 */
- HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */
- HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */
- HH (d, a, b, c, x[ 0], S32, 0xeaa127fa); /* 42 */
- HH (c, d, a, b, x[ 3], S33, 0xd4ef3085); /* 43 */
- HH (b, c, d, a, x[ 6], S34, 0x04881d05); /* 44 */
- HH (a, b, c, d, x[ 9], S31, 0xd9d4d039); /* 45 */
- HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */
- HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */
- HH (b, c, d, a, x[ 2], S34, 0xc4ac5665); /* 48 */
+ HH (a, b, c, d, x[5], S31, 0xfffa3942); /* 33 */
+ HH (d, a, b, c, x[8], S32, 0x8771f681); /* 34 */
+ HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */
+ HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */
+ HH (a, b, c, d, x[1], S31, 0xa4beea44); /* 37 */
+ HH (d, a, b, c, x[4], S32, 0x4bdecfa9); /* 38 */
+ HH (c, d, a, b, x[7], S33, 0xf6bb4b60); /* 39 */
+ HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */
+ HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */
+ HH (d, a, b, c, x[0], S32, 0xeaa127fa); /* 42 */
+ HH (c, d, a, b, x[3], S33, 0xd4ef3085); /* 43 */
+ HH (b, c, d, a, x[6], S34, 0x04881d05); /* 44 */
+ HH (a, b, c, d, x[9], S31, 0xd9d4d039); /* 45 */
+ HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */
+ HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */
+ HH (b, c, d, a, x[2], S34, 0xc4ac5665); /* 48 */
/* Round 4 */
- II (a, b, c, d, x[ 0], S41, 0xf4292244); /* 49 */
- II (d, a, b, c, x[ 7], S42, 0x432aff97); /* 50 */
- II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */
- II (b, c, d, a, x[ 5], S44, 0xfc93a039); /* 52 */
- II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */
- II (d, a, b, c, x[ 3], S42, 0x8f0ccc92); /* 54 */
- II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */
- II (b, c, d, a, x[ 1], S44, 0x85845dd1); /* 56 */
- II (a, b, c, d, x[ 8], S41, 0x6fa87e4f); /* 57 */
- II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */
- II (c, d, a, b, x[ 6], S43, 0xa3014314); /* 59 */
- II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */
- II (a, b, c, d, x[ 4], S41, 0xf7537e82); /* 61 */
- II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */
- II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb); /* 63 */
- II (b, c, d, a, x[ 9], S44, 0xeb86d391); /* 64 */
+ II (a, b, c, d, x[0], S41, 0xf4292244); /* 49 */
+ II (d, a, b, c, x[7], S42, 0x432aff97); /* 50 */
+ II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */
+ II (b, c, d, a, x[5], S44, 0xfc93a039); /* 52 */
+ II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */
+ II (d, a, b, c, x[3], S42, 0x8f0ccc92); /* 54 */
+ II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */
+ II (b, c, d, a, x[1], S44, 0x85845dd1); /* 56 */
+ II (a, b, c, d, x[8], S41, 0x6fa87e4f); /* 57 */
+ II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */
+ II (c, d, a, b, x[6], S43, 0xa3014314); /* 59 */
+ II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */
+ II (a, b, c, d, x[4], S41, 0xf7537e82); /* 61 */
+ II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */
+ II (c, d, a, b, x[2], S43, 0x2ad7d2bb); /* 63 */
+ II (b, c, d, a, x[9], S44, 0xeb86d391); /* 64 */
a += m->state[0];
b += m->state[1];
@@ -209,7 +207,8 @@ static void md5_transform (md5_context_t * m,
}
/* MD5 initialization. Begins an MD5 operation, writing a new context. */
-void md5_init (md5_context_t * c)
+void
+md5_init (md5_context_t * c)
{
memset (c, 0, sizeof (c[0]));
@@ -220,9 +219,8 @@ void md5_init (md5_context_t * c)
c->state[3] = 0x10325476;
}
-always_inline void __attribute__((unused))
-md5_fill_buffer_aligned (md5_context_t * c,
- u32 * d32)
+always_inline void __attribute__ ((unused))
+md5_fill_buffer_aligned (md5_context_t * c, u32 * d32)
{
int i;
for (i = 0; i < ARRAY_LEN (c->input_buffer.b32); i++)
@@ -233,10 +231,11 @@ md5_fill_buffer_aligned (md5_context_t * c,
operation, processing another message block, and updating the
context.
*/
-void md5_add (md5_context_t * c, void * data, int data_bytes)
+void
+md5_add (md5_context_t * c, void *data, int data_bytes)
{
u32 data_bytes_left;
- void * d;
+ void *d;
if (data_bytes == 0)
return;
@@ -250,18 +249,21 @@ void md5_add (md5_context_t * c, void * data, int data_bytes)
{
int is_last_iteration;
/* Fast aligned version. */
- do {
- data_bytes_left -= sizeof (c->input_buffer);
- is_last_iteration = data_bytes_left < sizeof (c->input_buffer);
- md5_transform (c, d, /* result */ 0, /* zero_buffer */ is_last_iteration);
- d += sizeof (c->input_buffer);
- } while (! is_last_iteration);
+ do
+ {
+ data_bytes_left -= sizeof (c->input_buffer);
+ is_last_iteration = data_bytes_left < sizeof (c->input_buffer);
+ md5_transform (c, d, /* result */ 0, /* zero_buffer */
+ is_last_iteration);
+ d += sizeof (c->input_buffer);
+ }
+ while (!is_last_iteration);
}
/* Slow unaligned version. */
{
int bi;
- u8 * d8 = d;
+ u8 *d8 = d;
bi = (c->n_bits / BITS (u8)) % ARRAY_LEN (c->input_buffer.b8);
@@ -284,7 +286,8 @@ void md5_add (md5_context_t * c, void * data, int data_bytes)
c->n_bits += data_bytes * BITS (u8);
}
-void md5_finish (md5_context_t * c, u8 * digest)
+void
+md5_finish (md5_context_t * c, u8 * digest)
{
u64 n_bits_save;
int bi, n_pad;
@@ -301,7 +304,14 @@ void md5_finish (md5_context_t * c, u8 * digest)
c->input_buffer.b64[ARRAY_LEN (c->input_buffer.b64) - 1]
= clib_host_to_little_u64 (n_bits_save);
- md5_transform (c, c->input_buffer.b32,
- (u32 *) digest,
+ md5_transform (c, c->input_buffer.b32, (u32 *) digest,
/* zero_buffer */ 1);
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/md5.h b/vppinfra/vppinfra/md5.h
index 1b22a5b388c..52123886f7e 100644
--- a/vppinfra/vppinfra/md5.h
+++ b/vppinfra/vppinfra/md5.h
@@ -26,22 +26,32 @@
#include <vppinfra/clib.h>
-typedef struct {
- u64 n_bits;
+typedef struct
+{
+ u64 n_bits;
- u32 state[4];
+ u32 state[4];
- union {
- u64 b64[8];
- u32 b32[16];
- u8 b8[16*4];
- } input_buffer;
+ union
+ {
+ u64 b64[8];
+ u32 b32[16];
+ u8 b8[16 * 4];
+ } input_buffer;
- /* Resulting message digest filled in by md5_finish. */
+ /* Resulting message digest filled in by md5_finish. */
} md5_context_t;
void md5_init (md5_context_t * c);
-void md5_add (md5_context_t * c, void * data, int data_bytes);
+void md5_add (md5_context_t * c, void *data, int data_bytes);
void md5_finish (md5_context_t * c, u8 digest[16]);
#endif /* included_md5_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/mem.h b/vppinfra/vppinfra/mem.h
index a4c679c2c71..d88254562c0 100644
--- a/vppinfra/vppinfra/mem.h
+++ b/vppinfra/vppinfra/mem.h
@@ -40,38 +40,38 @@
#include <stdarg.h>
-#include <vppinfra/clib.h> /* uword, etc */
+#include <vppinfra/clib.h> /* uword, etc */
#include <vppinfra/mheap_bootstrap.h>
#include <vppinfra/os.h>
-#include <vppinfra/string.h> /* memcpy, memset */
+#include <vppinfra/string.h> /* memcpy, memset */
#include <vppinfra/valgrind.h>
#define CLIB_MAX_MHEAPS 256
/* Per CPU heaps. */
-extern void * clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
+extern void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
-always_inline void * clib_mem_get_per_cpu_heap (void)
+always_inline void *
+clib_mem_get_per_cpu_heap (void)
{
int cpu = os_get_cpu_number ();
return clib_per_cpu_mheaps[cpu];
}
-always_inline void * clib_mem_set_per_cpu_heap (u8 * new_heap)
+always_inline void *
+clib_mem_set_per_cpu_heap (u8 * new_heap)
{
int cpu = os_get_cpu_number ();
- void * old = clib_per_cpu_mheaps[cpu];
+ void *old = clib_per_cpu_mheaps[cpu];
clib_per_cpu_mheaps[cpu] = new_heap;
return old;
}
/* Memory allocator which returns null when it fails. */
always_inline void *
-clib_mem_alloc_aligned_at_offset (uword size,
- uword align,
- uword align_offset)
+clib_mem_alloc_aligned_at_offset (uword size, uword align, uword align_offset)
{
- void * heap, * p;
+ void *heap, *p;
uword offset, cpu;
if (align_offset > align)
@@ -84,9 +84,7 @@ clib_mem_alloc_aligned_at_offset (uword size,
cpu = os_get_cpu_number ();
heap = clib_per_cpu_mheaps[cpu];
- heap = mheap_get_aligned (heap,
- size, align, align_offset,
- &offset);
+ heap = mheap_get_aligned (heap, size, align, align_offset, &offset);
clib_per_cpu_mheaps[cpu] = heap;
if (offset != ~0)
@@ -107,11 +105,16 @@ clib_mem_alloc_aligned_at_offset (uword size,
/* Memory allocator which returns null when it fails. */
always_inline void *
clib_mem_alloc (uword size)
-{ return clib_mem_alloc_aligned_at_offset (size, /* align */ 1, /* align_offset */ 0); }
+{
+ return clib_mem_alloc_aligned_at_offset (size, /* align */ 1,
+ /* align_offset */ 0);
+}
always_inline void *
clib_mem_alloc_aligned (uword size, uword align)
-{ return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0); }
+{
+ return clib_mem_alloc_aligned_at_offset (size, align, /* align_offset */ 0);
+}
/* Memory allocator which panics when it fails.
Use macro so that clib_panic macro can expand __FUNCTION__ and __LINE__. */
@@ -130,25 +133,27 @@ clib_mem_alloc_aligned (uword size, uword align)
/* Alias to stack allocator for naming consistency. */
#define clib_mem_alloc_stack(bytes) __builtin_alloca(bytes)
-always_inline uword clib_mem_is_heap_object (void * p)
+always_inline uword
+clib_mem_is_heap_object (void *p)
{
- void * heap = clib_mem_get_per_cpu_heap ();
- uword offset = (uword)p - (uword)heap;
- mheap_elt_t * e, * n;
+ void *heap = clib_mem_get_per_cpu_heap ();
+ uword offset = (uword) p - (uword) heap;
+ mheap_elt_t *e, *n;
if (offset >= vec_len (heap))
return 0;
e = mheap_elt_at_uoffset (heap, offset);
n = mheap_next_elt (e);
-
+
/* Check that heap forward and reverse pointers agree. */
return e->n_user_data == n->prev_n_user_data;
}
-always_inline void clib_mem_free (void * p)
+always_inline void
+clib_mem_free (void *p)
{
- u8 * heap = clib_mem_get_per_cpu_heap ();
+ u8 *heap = clib_mem_get_per_cpu_heap ();
/* Make sure object is in the correct heap. */
ASSERT (clib_mem_is_heap_object (p));
@@ -160,10 +165,11 @@ always_inline void clib_mem_free (void * p)
#endif
}
-always_inline void * clib_mem_realloc (void * p, uword new_size, uword old_size)
+always_inline void *
+clib_mem_realloc (void *p, uword new_size, uword old_size)
{
/* By default use alloc, copy and free to emulate realloc. */
- void * q = clib_mem_alloc (new_size);
+ void *q = clib_mem_alloc (new_size);
if (q)
{
uword copy_size;
@@ -177,20 +183,27 @@ always_inline void * clib_mem_realloc (void * p, uword new_size, uword old_size)
return q;
}
-always_inline uword clib_mem_size (void * p)
+always_inline uword
+clib_mem_size (void *p)
{
ASSERT (clib_mem_is_heap_object (p));
- mheap_elt_t * e = mheap_user_pointer_to_elt (p);
+ mheap_elt_t *e = mheap_user_pointer_to_elt (p);
return mheap_elt_data_bytes (e);
}
-always_inline void * clib_mem_get_heap (void)
-{ return clib_mem_get_per_cpu_heap (); }
+always_inline void *
+clib_mem_get_heap (void)
+{
+ return clib_mem_get_per_cpu_heap ();
+}
-always_inline void * clib_mem_set_heap (void * heap)
-{ return clib_mem_set_per_cpu_heap (heap); }
+always_inline void *
+clib_mem_set_heap (void *heap)
+{
+ return clib_mem_set_per_cpu_heap (heap);
+}
-void * clib_mem_init (void * heap, uword size);
+void *clib_mem_init (void *heap, uword size);
void clib_mem_exit (void);
@@ -200,7 +213,8 @@ void clib_mem_validate (void);
void clib_mem_trace (int enable);
-typedef struct {
+typedef struct
+{
/* Total number of objects allocated. */
uword object_count;
@@ -214,7 +228,7 @@ typedef struct {
/* Amount of free space returned to operating system. */
uword bytes_free_reclaimed;
-
+
/* For malloc which puts small objects in sbrk region and
large objects in mmap'ed regions. */
uword bytes_used_sbrk;
@@ -226,7 +240,7 @@ typedef struct {
void clib_mem_usage (clib_mem_usage_t * usage);
-u8 * format_clib_mem_usage (u8 * s, va_list * args);
+u8 *format_clib_mem_usage (u8 * s, va_list * args);
/* Include appropriate VM functions depending on whether
we are compiling for linux kernel, for Unix or standalone. */
@@ -242,6 +256,14 @@ u8 * format_clib_mem_usage (u8 * s, va_list * args);
#include <vppinfra/vm_standalone.h>
#endif
-#include <vppinfra/error.h> /* clib_panic */
+#include <vppinfra/error.h> /* clib_panic */
#endif /* _included_clib_mem_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/mem_mheap.c b/vppinfra/vppinfra/mem_mheap.c
index 00437b011c9..9b2af520ca6 100644
--- a/vppinfra/vppinfra/mem_mheap.c
+++ b/vppinfra/vppinfra/mem_mheap.c
@@ -43,11 +43,12 @@
#include <vppinfra/memcheck.h>
#include <vppinfra/valgrind.h>
-void * clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
+void *clib_per_cpu_mheaps[CLIB_MAX_MHEAPS];
-void clib_mem_exit (void)
+void
+clib_mem_exit (void)
{
- u8 * heap = clib_mem_get_per_cpu_heap ();
+ u8 *heap = clib_mem_get_per_cpu_heap ();
if (heap)
mheap_free (heap);
clib_mem_set_per_cpu_heap (0);
@@ -55,18 +56,19 @@ void clib_mem_exit (void)
/* Initialize CLIB heap based on memory/size given by user.
Set memory to 0 and CLIB will try to allocate its own heap. */
-void * clib_mem_init (void * memory, uword memory_size)
+void *
+clib_mem_init (void *memory, uword memory_size)
{
- u8 * heap;
+ u8 *heap;
if (memory || memory_size)
heap = mheap_alloc (memory, memory_size);
else
{
/* Allocate lots of address space since this will limit
- the amount of memory the program can allocate.
- In the kernel we're more conservative since some architectures
- (e.g. mips) have pretty small kernel virtual address spaces. */
+ the amount of memory the program can allocate.
+ In the kernel we're more conservative since some architectures
+ (e.g. mips) have pretty small kernel virtual address spaces. */
#ifdef __KERNEL__
#define MAX_VM_MEG 64
#else
@@ -96,34 +98,48 @@ void * clib_mem_init (void * memory, uword memory_size)
#ifdef CLIB_LINUX_KERNEL
#include <asm/page.h>
-uword clib_mem_get_page_size (void)
-{ return PAGE_SIZE; }
+uword
+clib_mem_get_page_size (void)
+{
+ return PAGE_SIZE;
+}
#endif
#ifdef CLIB_UNIX
-uword clib_mem_get_page_size (void)
-{ return getpagesize (); }
+uword
+clib_mem_get_page_size (void)
+{
+ return getpagesize ();
+}
#endif
/* Make a guess for standalone. */
#ifdef CLIB_STANDALONE
-uword clib_mem_get_page_size (void)
-{ return 4096; }
+uword
+clib_mem_get_page_size (void)
+{
+ return 4096;
+}
#endif
-u8 * format_clib_mem_usage (u8 * s, va_list * va)
+u8 *
+format_clib_mem_usage (u8 * s, va_list * va)
{
- int verbose = va_arg (*va, int);
- return format (s, "%U", format_mheap, clib_mem_get_heap (), verbose);
+ int verbose = va_arg (*va, int);
+ return format (s, "%U", format_mheap, clib_mem_get_heap (), verbose);
}
-void clib_mem_usage (clib_mem_usage_t * u)
-{ mheap_usage (clib_mem_get_heap (), u); }
+void
+clib_mem_usage (clib_mem_usage_t * u)
+{
+ mheap_usage (clib_mem_get_heap (), u);
+}
/* Call serial number for debugger breakpoints. */
uword clib_mem_validate_serial = 0;
-void clib_mem_validate (void)
+void
+clib_mem_validate (void)
{
if (MHEAP_HAVE_SMALL_OBJECT_CACHE)
clib_warning ("clib_mem_validate disabled (small object cache is ON)");
@@ -134,5 +150,16 @@ void clib_mem_validate (void)
}
}
-void clib_mem_trace (int enable)
-{ mheap_trace (clib_mem_get_heap (), enable); }
+void
+clib_mem_trace (int enable)
+{
+ mheap_trace (clib_mem_get_heap (), enable);
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/memcheck.h b/vppinfra/vppinfra/memcheck.h
index fc50dabfbb0..44db3a8a6cb 100644
--- a/vppinfra/vppinfra/memcheck.h
+++ b/vppinfra/vppinfra/memcheck.h
@@ -22,16 +22,16 @@
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- 2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
- 4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
@@ -53,7 +53,7 @@
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
- ----------------------------------------------------------------
+ ----------------------------------------------------------------
*/
@@ -71,35 +71,35 @@
#include "valgrind.h"
-/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
-typedef
- enum {
- VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE('M','C'),
- VG_USERREQ__MAKE_MEM_UNDEFINED,
- VG_USERREQ__MAKE_MEM_DEFINED,
- VG_USERREQ__DISCARD,
- VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
- VG_USERREQ__CHECK_MEM_IS_DEFINED,
- VG_USERREQ__DO_LEAK_CHECK,
- VG_USERREQ__COUNT_LEAKS,
+typedef enum
+{
+ VG_USERREQ__MAKE_MEM_NOACCESS = VG_USERREQ_TOOL_BASE ('M', 'C'),
+ VG_USERREQ__MAKE_MEM_UNDEFINED,
+ VG_USERREQ__MAKE_MEM_DEFINED,
+ VG_USERREQ__DISCARD,
+ VG_USERREQ__CHECK_MEM_IS_ADDRESSABLE,
+ VG_USERREQ__CHECK_MEM_IS_DEFINED,
+ VG_USERREQ__DO_LEAK_CHECK,
+ VG_USERREQ__COUNT_LEAKS,
- VG_USERREQ__GET_VBITS,
- VG_USERREQ__SET_VBITS,
+ VG_USERREQ__GET_VBITS,
+ VG_USERREQ__SET_VBITS,
- VG_USERREQ__CREATE_BLOCK,
+ VG_USERREQ__CREATE_BLOCK,
- VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
+ VG_USERREQ__MAKE_MEM_DEFINED_IF_ADDRESSABLE,
- /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
- VG_USERREQ__COUNT_LEAK_BLOCKS,
+ /* Not next to VG_USERREQ__COUNT_LEAKS because it was added later. */
+ VG_USERREQ__COUNT_LEAK_BLOCKS,
- /* This is just for memcheck's internal use - don't use it */
- _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
- = VG_USERREQ_TOOL_BASE('M','C') + 256
- } Vg_MemCheckClientRequest;
+ /* This is just for memcheck's internal use - don't use it */
+ _VG_USERREQ__MEMCHECK_RECORD_OVERLAP_ERROR
+ = VG_USERREQ_TOOL_BASE ('M', 'C') + 256
+} Vg_MemCheckClientRequest;
@@ -113,7 +113,7 @@ typedef
_qzz_addr, _qzz_len, 0, 0, 0); \
_qzz_res; \
}))
-
+
/* Similarly, mark memory at _qzz_addr as addressable but undefined
for _qzz_len bytes. */
#define VALGRIND_MAKE_MEM_UNDEFINED(_qzz_addr,_qzz_len) \
@@ -307,3 +307,11 @@ typedef
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/memcpy_avx.h b/vppinfra/vppinfra/memcpy_avx.h
index 0ec6032a0f6..e987d044b58 100644
--- a/vppinfra/vppinfra/memcpy_avx.h
+++ b/vppinfra/vppinfra/memcpy_avx.h
@@ -52,210 +52,245 @@
#include <x86intrin.h>
static inline void
-clib_mov16(u8 *dst, const u8 *src)
+clib_mov16 (u8 * dst, const u8 * src)
{
- __m128i xmm0;
+ __m128i xmm0;
- xmm0 = _mm_loadu_si128((const __m128i *)src);
- _mm_storeu_si128((__m128i *)dst, xmm0);
+ xmm0 = _mm_loadu_si128 ((const __m128i *) src);
+ _mm_storeu_si128 ((__m128i *) dst, xmm0);
}
static inline void
-clib_mov32(u8 *dst, const u8 *src)
+clib_mov32 (u8 * dst, const u8 * src)
{
- __m256i ymm0;
+ __m256i ymm0;
- ymm0 = _mm256_loadu_si256((const __m256i *)src);
- _mm256_storeu_si256((__m256i *)dst, ymm0);
+ ymm0 = _mm256_loadu_si256 ((const __m256i *) src);
+ _mm256_storeu_si256 ((__m256i *) dst, ymm0);
}
static inline void
-clib_mov64(u8 *dst, const u8 *src)
+clib_mov64 (u8 * dst, const u8 * src)
{
- clib_mov32((u8 *)dst + 0 * 32, (const u8 *)src + 0 * 32);
- clib_mov32((u8 *)dst + 1 * 32, (const u8 *)src + 1 * 32);
+ clib_mov32 ((u8 *) dst + 0 * 32, (const u8 *) src + 0 * 32);
+ clib_mov32 ((u8 *) dst + 1 * 32, (const u8 *) src + 1 * 32);
}
static inline void
-clib_mov128(u8 *dst, const u8 *src)
+clib_mov128 (u8 * dst, const u8 * src)
{
- clib_mov64((u8 *)dst + 0 * 64, (const u8 *)src + 0 * 64);
- clib_mov64((u8 *)dst + 1 * 64, (const u8 *)src + 1 * 64);
+ clib_mov64 ((u8 *) dst + 0 * 64, (const u8 *) src + 0 * 64);
+ clib_mov64 ((u8 *) dst + 1 * 64, (const u8 *) src + 1 * 64);
}
static inline void
-clib_mov256(u8 *dst, const u8 *src)
+clib_mov256 (u8 * dst, const u8 * src)
{
- clib_mov128((u8 *)dst + 0 * 128, (const u8 *)src + 0 * 128);
- clib_mov128((u8 *)dst + 1 * 128, (const u8 *)src + 1 * 128);
+ clib_mov128 ((u8 *) dst + 0 * 128, (const u8 *) src + 0 * 128);
+ clib_mov128 ((u8 *) dst + 1 * 128, (const u8 *) src + 1 * 128);
}
static inline void
-clib_mov64blocks(u8 *dst, const u8 *src, size_t n)
+clib_mov64blocks (u8 * dst, const u8 * src, size_t n)
{
- __m256i ymm0, ymm1;
+ __m256i ymm0, ymm1;
- while (n >= 64) {
- ymm0 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 0 * 32));
- n -= 64;
- ymm1 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 1 * 32));
- src = (const u8 *)src + 64;
- _mm256_storeu_si256((__m256i *)((u8 *)dst + 0 * 32), ymm0);
- _mm256_storeu_si256((__m256i *)((u8 *)dst + 1 * 32), ymm1);
- dst = (u8 *)dst + 64;
- }
+ while (n >= 64)
+ {
+ ymm0 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 0 * 32));
+ n -= 64;
+ ymm1 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 1 * 32));
+ src = (const u8 *) src + 64;
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 0 * 32), ymm0);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 1 * 32), ymm1);
+ dst = (u8 *) dst + 64;
+ }
}
static inline void
-clib_mov256blocks(u8 *dst, const u8 *src, size_t n)
+clib_mov256blocks (u8 * dst, const u8 * src, size_t n)
{
- __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
+ __m256i ymm0, ymm1, ymm2, ymm3, ymm4, ymm5, ymm6, ymm7;
- while (n >= 256) {
- ymm0 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 0 * 32));
- n -= 256;
- ymm1 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 1 * 32));
- ymm2 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 2 * 32));
- ymm3 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 3 * 32));
- ymm4 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 4 * 32));
- ymm5 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 5 * 32));
- ymm6 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 6 * 32));
- ymm7 = _mm256_loadu_si256((const __m256i *)((const u8 *)src + 7 * 32));
- src = (const u8 *)src + 256;
- _mm256_storeu_si256((__m256i *)((u8 *)dst + 0 * 32), ymm0);
- _mm256_storeu_si256((__m256i *)((u8 *)dst + 1 * 32), ymm1);
- _mm256_storeu_si256((__m256i *)((u8 *)dst + 2 * 32), ymm2);
- _mm256_storeu_si256((__m256i *)((u8 *)dst + 3 * 32), ymm3);
- _mm256_storeu_si256((__m256i *)((u8 *)dst + 4 * 32), ymm4);
- _mm256_storeu_si256((__m256i *)((u8 *)dst + 5 * 32), ymm5);
- _mm256_storeu_si256((__m256i *)((u8 *)dst + 6 * 32), ymm6);
- _mm256_storeu_si256((__m256i *)((u8 *)dst + 7 * 32), ymm7);
- dst = (u8 *)dst + 256;
- }
+ while (n >= 256)
+ {
+ ymm0 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 0 * 32));
+ n -= 256;
+ ymm1 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 1 * 32));
+ ymm2 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 2 * 32));
+ ymm3 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 3 * 32));
+ ymm4 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 4 * 32));
+ ymm5 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 5 * 32));
+ ymm6 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 6 * 32));
+ ymm7 =
+ _mm256_loadu_si256 ((const __m256i *) ((const u8 *) src + 7 * 32));
+ src = (const u8 *) src + 256;
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 0 * 32), ymm0);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 1 * 32), ymm1);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 2 * 32), ymm2);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 3 * 32), ymm3);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 4 * 32), ymm4);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 5 * 32), ymm5);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 6 * 32), ymm6);
+ _mm256_storeu_si256 ((__m256i *) ((u8 *) dst + 7 * 32), ymm7);
+ dst = (u8 *) dst + 256;
+ }
}
static inline void *
-clib_memcpy(void *dst, const void *src, size_t n)
+clib_memcpy (void *dst, const void *src, size_t n)
{
- uword dstu = (uword)dst;
- uword srcu = (uword)src;
- void *ret = dst;
- size_t dstofss;
- size_t bits;
+ uword dstu = (uword) dst;
+ uword srcu = (uword) src;
+ void *ret = dst;
+ size_t dstofss;
+ size_t bits;
- /**
+ /**
* Copy less than 16 bytes
*/
- if (n < 16) {
- if (n & 0x01) {
- *(u8 *)dstu = *(const u8 *)srcu;
- srcu = (uword)((const u8 *)srcu + 1);
- dstu = (uword)((u8 *)dstu + 1);
- }
- if (n & 0x02) {
- *(uint16_t *)dstu = *(const uint16_t *)srcu;
- srcu = (uword)((const uint16_t *)srcu + 1);
- dstu = (uword)((uint16_t *)dstu + 1);
- }
- if (n & 0x04) {
- *(uint32_t *)dstu = *(const uint32_t *)srcu;
- srcu = (uword)((const uint32_t *)srcu + 1);
- dstu = (uword)((uint32_t *)dstu + 1);
- }
- if (n & 0x08) {
- *(uint64_t *)dstu = *(const uint64_t *)srcu;
- }
- return ret;
- }
+ if (n < 16)
+ {
+ if (n & 0x01)
+ {
+ *(u8 *) dstu = *(const u8 *) srcu;
+ srcu = (uword) ((const u8 *) srcu + 1);
+ dstu = (uword) ((u8 *) dstu + 1);
+ }
+ if (n & 0x02)
+ {
+ *(uint16_t *) dstu = *(const uint16_t *) srcu;
+ srcu = (uword) ((const uint16_t *) srcu + 1);
+ dstu = (uword) ((uint16_t *) dstu + 1);
+ }
+ if (n & 0x04)
+ {
+ *(uint32_t *) dstu = *(const uint32_t *) srcu;
+ srcu = (uword) ((const uint32_t *) srcu + 1);
+ dstu = (uword) ((uint32_t *) dstu + 1);
+ }
+ if (n & 0x08)
+ {
+ *(uint64_t *) dstu = *(const uint64_t *) srcu;
+ }
+ return ret;
+ }
- /**
+ /**
* Fast way when copy size doesn't exceed 512 bytes
*/
- if (n <= 32) {
- clib_mov16((u8 *)dst, (const u8 *)src);
- clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
- return ret;
- }
- if (n <= 64) {
- clib_mov32((u8 *)dst, (const u8 *)src);
- clib_mov32((u8 *)dst - 32 + n, (const u8 *)src - 32 + n);
- return ret;
- }
- if (n <= 512) {
- if (n >= 256) {
- n -= 256;
- clib_mov256((u8 *)dst, (const u8 *)src);
- src = (const u8 *)src + 256;
- dst = (u8 *)dst + 256;
- }
- if (n >= 128) {
- n -= 128;
- clib_mov128((u8 *)dst, (const u8 *)src);
- src = (const u8 *)src + 128;
- dst = (u8 *)dst + 128;
- }
- if (n >= 64) {
- n -= 64;
- clib_mov64((u8 *)dst, (const u8 *)src);
- src = (const u8 *)src + 64;
- dst = (u8 *)dst + 64;
- }
-COPY_BLOCK_64_BACK31:
- if (n > 32) {
- clib_mov32((u8 *)dst, (const u8 *)src);
- clib_mov32((u8 *)dst - 32 + n, (const u8 *)src - 32 + n);
- return ret;
- }
- if (n > 0) {
- clib_mov32((u8 *)dst - 32 + n, (const u8 *)src - 32 + n);
- }
- return ret;
- }
+ if (n <= 32)
+ {
+ clib_mov16 ((u8 *) dst, (const u8 *) src);
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ return ret;
+ }
+ if (n <= 64)
+ {
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
+ return ret;
+ }
+ if (n <= 512)
+ {
+ if (n >= 256)
+ {
+ n -= 256;
+ clib_mov256 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 256;
+ dst = (u8 *) dst + 256;
+ }
+ if (n >= 128)
+ {
+ n -= 128;
+ clib_mov128 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 128;
+ dst = (u8 *) dst + 128;
+ }
+ if (n >= 64)
+ {
+ n -= 64;
+ clib_mov64 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 64;
+ dst = (u8 *) dst + 64;
+ }
+ COPY_BLOCK_64_BACK31:
+ if (n > 32)
+ {
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
+ return ret;
+ }
+ if (n > 0)
+ {
+ clib_mov32 ((u8 *) dst - 32 + n, (const u8 *) src - 32 + n);
+ }
+ return ret;
+ }
- /**
+ /**
* Make store aligned when copy size exceeds 512 bytes
*/
- dstofss = (uword)dst & 0x1F;
- if (dstofss > 0) {
- dstofss = 32 - dstofss;
- n -= dstofss;
- clib_mov32((u8 *)dst, (const u8 *)src);
- src = (const u8 *)src + dstofss;
- dst = (u8 *)dst + dstofss;
- }
+ dstofss = (uword) dst & 0x1F;
+ if (dstofss > 0)
+ {
+ dstofss = 32 - dstofss;
+ n -= dstofss;
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + dstofss;
+ dst = (u8 *) dst + dstofss;
+ }
- /**
+ /**
* Copy 256-byte blocks.
* Use copy block function for better instruction order control,
* which is important when load is unaligned.
*/
- clib_mov256blocks((u8 *)dst, (const u8 *)src, n);
- bits = n;
- n = n & 255;
- bits -= n;
- src = (const u8 *)src + bits;
- dst = (u8 *)dst + bits;
+ clib_mov256blocks ((u8 *) dst, (const u8 *) src, n);
+ bits = n;
+ n = n & 255;
+ bits -= n;
+ src = (const u8 *) src + bits;
+ dst = (u8 *) dst + bits;
- /**
+ /**
* Copy 64-byte blocks.
* Use copy block function for better instruction order control,
* which is important when load is unaligned.
*/
- if (n >= 64) {
- clib_mov64blocks((u8 *)dst, (const u8 *)src, n);
- bits = n;
- n = n & 63;
- bits -= n;
- src = (const u8 *)src + bits;
- dst = (u8 *)dst + bits;
- }
+ if (n >= 64)
+ {
+ clib_mov64blocks ((u8 *) dst, (const u8 *) src, n);
+ bits = n;
+ n = n & 63;
+ bits -= n;
+ src = (const u8 *) src + bits;
+ dst = (u8 *) dst + bits;
+ }
- /**
+ /**
* Copy whatever left
*/
- goto COPY_BLOCK_64_BACK31;
+ goto COPY_BLOCK_64_BACK31;
}
#endif /* included_clib_mamcpy_avx_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/memcpy_sse3.h b/vppinfra/vppinfra/memcpy_sse3.h
index 12748f78b48..f61396c8922 100644
--- a/vppinfra/vppinfra/memcpy_sse3.h
+++ b/vppinfra/vppinfra/memcpy_sse3.h
@@ -52,40 +52,40 @@
#include <x86intrin.h>
static inline void
-clib_mov16(u8 *dst, const u8 *src)
+clib_mov16 (u8 * dst, const u8 * src)
{
- __m128i xmm0;
+ __m128i xmm0;
- xmm0 = _mm_loadu_si128((const __m128i *)src);
- _mm_storeu_si128((__m128i *)dst, xmm0);
+ xmm0 = _mm_loadu_si128 ((const __m128i *) src);
+ _mm_storeu_si128 ((__m128i *) dst, xmm0);
}
static inline void
-clib_mov32(u8 *dst, const u8 *src)
+clib_mov32 (u8 * dst, const u8 * src)
{
- clib_mov16((u8 *)dst + 0 * 16, (const u8 *)src + 0 * 16);
- clib_mov16((u8 *)dst + 1 * 16, (const u8 *)src + 1 * 16);
+ clib_mov16 ((u8 *) dst + 0 * 16, (const u8 *) src + 0 * 16);
+ clib_mov16 ((u8 *) dst + 1 * 16, (const u8 *) src + 1 * 16);
}
static inline void
-clib_mov64(u8 *dst, const u8 *src)
+clib_mov64 (u8 * dst, const u8 * src)
{
- clib_mov32((u8 *)dst + 0 * 32, (const u8 *)src + 0 * 32);
- clib_mov32((u8 *)dst + 1 * 32, (const u8 *)src + 1 * 32);
+ clib_mov32 ((u8 *) dst + 0 * 32, (const u8 *) src + 0 * 32);
+ clib_mov32 ((u8 *) dst + 1 * 32, (const u8 *) src + 1 * 32);
}
static inline void
-clib_mov128(u8 *dst, const u8 *src)
+clib_mov128 (u8 * dst, const u8 * src)
{
- clib_mov64((u8 *)dst + 0 * 64, (const u8 *)src + 0 * 64);
- clib_mov64((u8 *)dst + 1 * 64, (const u8 *)src + 1 * 64);
+ clib_mov64 ((u8 *) dst + 0 * 64, (const u8 *) src + 0 * 64);
+ clib_mov64 ((u8 *) dst + 1 * 64, (const u8 *) src + 1 * 64);
}
static inline void
-clib_mov256(u8 *dst, const u8 *src)
+clib_mov256 (u8 * dst, const u8 * src)
{
- clib_mov128((u8 *)dst + 0 * 128, (const u8 *)src + 0 * 128);
- clib_mov128((u8 *)dst + 1 * 128, (const u8 *)src + 1 * 128);
+ clib_mov128 ((u8 *) dst + 0 * 128, (const u8 *) src + 0 * 128);
+ clib_mov128 ((u8 *) dst + 1 * 128, (const u8 *) src + 1 * 128);
}
/**
@@ -183,101 +183,117 @@ clib_mov256(u8 *dst, const u8 *src)
})
static inline void *
-clib_memcpy(void *dst, const void *src, size_t n)
+clib_memcpy (void *dst, const void *src, size_t n)
{
- __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
- uword dstu = (uword)dst;
- uword srcu = (uword)src;
- void *ret = dst;
- size_t dstofss;
- size_t srcofs;
+ __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8;
+ uword dstu = (uword) dst;
+ uword srcu = (uword) src;
+ void *ret = dst;
+ size_t dstofss;
+ size_t srcofs;
/**
* Copy less than 16 bytes
*/
- if (n < 16) {
- if (n & 0x01) {
- *(u8 *)dstu = *(const u8 *)srcu;
- srcu = (uword)((const u8 *)srcu + 1);
- dstu = (uword)((u8 *)dstu + 1);
- }
- if (n & 0x02) {
- *(u16 *)dstu = *(const u16 *)srcu;
- srcu = (uword)((const u16 *)srcu + 1);
- dstu = (uword)((u16 *)dstu + 1);
- }
- if (n & 0x04) {
- *(u32 *)dstu = *(const u32 *)srcu;
- srcu = (uword)((const u32 *)srcu + 1);
- dstu = (uword)((u32 *)dstu + 1);
- }
- if (n & 0x08) {
- *(u64 *)dstu = *(const u64 *)srcu;
- }
- return ret;
+ if (n < 16)
+ {
+ if (n & 0x01)
+ {
+ *(u8 *) dstu = *(const u8 *) srcu;
+ srcu = (uword) ((const u8 *) srcu + 1);
+ dstu = (uword) ((u8 *) dstu + 1);
}
+ if (n & 0x02)
+ {
+ *(u16 *) dstu = *(const u16 *) srcu;
+ srcu = (uword) ((const u16 *) srcu + 1);
+ dstu = (uword) ((u16 *) dstu + 1);
+ }
+ if (n & 0x04)
+ {
+ *(u32 *) dstu = *(const u32 *) srcu;
+ srcu = (uword) ((const u32 *) srcu + 1);
+ dstu = (uword) ((u32 *) dstu + 1);
+ }
+ if (n & 0x08)
+ {
+ *(u64 *) dstu = *(const u64 *) srcu;
+ }
+ return ret;
+ }
/**
* Fast way when copy size doesn't exceed 512 bytes
*/
- if (n <= 32) {
- clib_mov16((u8 *)dst, (const u8 *)src);
- clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
- return ret;
+ if (n <= 32)
+ {
+ clib_mov16 ((u8 *) dst, (const u8 *) src);
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ return ret;
+ }
+ if (n <= 48)
+ {
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ return ret;
+ }
+ if (n <= 64)
+ {
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ clib_mov16 ((u8 *) dst + 32, (const u8 *) src + 32);
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ return ret;
+ }
+ if (n <= 128)
+ {
+ goto COPY_BLOCK_128_BACK15;
+ }
+ if (n <= 512)
+ {
+ if (n >= 256)
+ {
+ n -= 256;
+ clib_mov128 ((u8 *) dst, (const u8 *) src);
+ clib_mov128 ((u8 *) dst + 128, (const u8 *) src + 128);
+ src = (const u8 *) src + 256;
+ dst = (u8 *) dst + 256;
}
- if (n <= 48) {
- clib_mov32((u8 *)dst, (const u8 *)src);
- clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
- return ret;
+ COPY_BLOCK_255_BACK15:
+ if (n >= 128)
+ {
+ n -= 128;
+ clib_mov128 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 128;
+ dst = (u8 *) dst + 128;
}
- if (n <= 64) {
- clib_mov32((u8 *)dst, (const u8 *)src);
- clib_mov16((u8 *)dst + 32, (const u8 *)src + 32);
- clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
- return ret;
+ COPY_BLOCK_128_BACK15:
+ if (n >= 64)
+ {
+ n -= 64;
+ clib_mov64 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 64;
+ dst = (u8 *) dst + 64;
}
- if (n <= 128) {
- goto COPY_BLOCK_128_BACK15;
+ COPY_BLOCK_64_BACK15:
+ if (n >= 32)
+ {
+ n -= 32;
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + 32;
+ dst = (u8 *) dst + 32;
}
- if (n <= 512) {
- if (n >= 256) {
- n -= 256;
- clib_mov128((u8 *)dst, (const u8 *)src);
- clib_mov128((u8 *)dst + 128, (const u8 *)src + 128);
- src = (const u8 *)src + 256;
- dst = (u8 *)dst + 256;
- }
-COPY_BLOCK_255_BACK15:
- if (n >= 128) {
- n -= 128;
- clib_mov128((u8 *)dst, (const u8 *)src);
- src = (const u8 *)src + 128;
- dst = (u8 *)dst + 128;
- }
-COPY_BLOCK_128_BACK15:
- if (n >= 64) {
- n -= 64;
- clib_mov64((u8 *)dst, (const u8 *)src);
- src = (const u8 *)src + 64;
- dst = (u8 *)dst + 64;
- }
-COPY_BLOCK_64_BACK15:
- if (n >= 32) {
- n -= 32;
- clib_mov32((u8 *)dst, (const u8 *)src);
- src = (const u8 *)src + 32;
- dst = (u8 *)dst + 32;
- }
- if (n > 16) {
- clib_mov16((u8 *)dst, (const u8 *)src);
- clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
- return ret;
- }
- if (n > 0) {
- clib_mov16((u8 *)dst - 16 + n, (const u8 *)src - 16 + n);
- }
- return ret;
+ if (n > 16)
+ {
+ clib_mov16 ((u8 *) dst, (const u8 *) src);
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ return ret;
}
+ if (n > 0)
+ {
+ clib_mov16 ((u8 *) dst - 16 + n, (const u8 *) src - 16 + n);
+ }
+ return ret;
+ }
/**
* Make store aligned when copy size exceeds 512 bytes,
@@ -285,41 +301,43 @@ COPY_BLOCK_64_BACK15:
* unaligned copy functions require up to 15 bytes
* backwards access.
*/
- dstofss = 16 - ((uword)dst & 0x0F) + 16;
- n -= dstofss;
- clib_mov32((u8 *)dst, (const u8 *)src);
- src = (const u8 *)src + dstofss;
- dst = (u8 *)dst + dstofss;
- srcofs = ((uword)src & 0x0F);
+ dstofss = 16 - ((uword) dst & 0x0F) + 16;
+ n -= dstofss;
+ clib_mov32 ((u8 *) dst, (const u8 *) src);
+ src = (const u8 *) src + dstofss;
+ dst = (u8 *) dst + dstofss;
+ srcofs = ((uword) src & 0x0F);
/**
* For aligned copy
*/
- if (srcofs == 0) {
+ if (srcofs == 0)
+ {
/**
* Copy 256-byte blocks
*/
- for (; n >= 256; n -= 256) {
- clib_mov256((u8 *)dst, (const u8 *)src);
- dst = (u8 *)dst + 256;
- src = (const u8 *)src + 256;
- }
+ for (; n >= 256; n -= 256)
+ {
+ clib_mov256 ((u8 *) dst, (const u8 *) src);
+ dst = (u8 *) dst + 256;
+ src = (const u8 *) src + 256;
+ }
/**
* Copy whatever left
*/
- goto COPY_BLOCK_255_BACK15;
- }
+ goto COPY_BLOCK_255_BACK15;
+ }
/**
* For copy with unaligned load
*/
- CLIB_MVUNALIGN_LEFT47(dst, src, n, srcofs);
+ CLIB_MVUNALIGN_LEFT47 (dst, src, n, srcofs);
/**
* Copy whatever left
*/
- goto COPY_BLOCK_64_BACK15;
+ goto COPY_BLOCK_64_BACK15;
}
@@ -328,3 +346,11 @@ COPY_BLOCK_64_BACK15:
#endif /* included_clib_memcpy_sse3_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/mhash.c b/vppinfra/vppinfra/mhash.c
index a7ff8587981..84517e191bd 100644
--- a/vppinfra/vppinfra/mhash.c
+++ b/vppinfra/vppinfra/mhash.c
@@ -38,7 +38,7 @@
#include <vppinfra/mhash.h>
always_inline u32
-load_partial_u32 (void * d, uword n)
+load_partial_u32 (void *d, uword n)
{
if (n == 4)
return ((u32 *) d)[0];
@@ -53,9 +53,9 @@ load_partial_u32 (void * d, uword n)
}
always_inline u32
-mhash_key_sum_inline (void * data, uword n_data_bytes, u32 seed)
+mhash_key_sum_inline (void *data, uword n_data_bytes, u32 seed)
{
- u32 * d32 = data;
+ u32 *d32 = data;
u32 a, b, c, n_left;
a = b = c = seed;
@@ -117,40 +117,38 @@ mhash_key_sum_inline (void * data, uword n_data_bytes, u32 seed)
}
foreach_mhash_key_size
-
#undef _
-
static uword
mhash_key_sum_c_string (hash_t * h, uword key)
{
- mhash_t * hv = uword_to_pointer (h->user, mhash_t *);
- void * k = mhash_key_to_mem (hv, key);
+ mhash_t *hv = uword_to_pointer (h->user, mhash_t *);
+ void *k = mhash_key_to_mem (hv, key);
return mhash_key_sum_inline (k, strlen (k), hv->hash_seed);
}
static uword
mhash_key_equal_c_string (hash_t * h, uword key1, uword key2)
{
- mhash_t * hv = uword_to_pointer (h->user, mhash_t *);
- void * k1 = mhash_key_to_mem (hv, key1);
- void * k2 = mhash_key_to_mem (hv, key2);
+ mhash_t *hv = uword_to_pointer (h->user, mhash_t *);
+ void *k1 = mhash_key_to_mem (hv, key1);
+ void *k2 = mhash_key_to_mem (hv, key2);
return strcmp (k1, k2) == 0;
}
static uword
mhash_key_sum_vec_string (hash_t * h, uword key)
{
- mhash_t * hv = uword_to_pointer (h->user, mhash_t *);
- void * k = mhash_key_to_mem (hv, key);
+ mhash_t *hv = uword_to_pointer (h->user, mhash_t *);
+ void *k = mhash_key_to_mem (hv, key);
return mhash_key_sum_inline (k, vec_len (k), hv->hash_seed);
}
static uword
mhash_key_equal_vec_string (hash_t * h, uword key1, uword key2)
{
- mhash_t * hv = uword_to_pointer (h->user, mhash_t *);
- void * k1 = mhash_key_to_mem (hv, key1);
- void * k2 = mhash_key_to_mem (hv, key2);
+ mhash_t *hv = uword_to_pointer (h->user, mhash_t *);
+ void *k1 = mhash_key_to_mem (hv, key1);
+ void *k2 = mhash_key_to_mem (hv, key2);
return vec_len (k1) == vec_len (k2) && memcmp (k1, k2, vec_len (k1)) == 0;
}
@@ -161,17 +159,20 @@ mhash_key_equal_vec_string (hash_t * h, uword key1, uword key2)
always_inline void
mhash_sanitize_hash_user (mhash_t * mh)
{
- uword * hash = mh->hash;
- hash_t * h = hash_header (hash);
+ uword *hash = mh->hash;
+ hash_t *h = hash_header (hash);
h->user = pointer_to_uword (mh);
}
-void mhash_init (mhash_t * h, uword n_value_bytes, uword n_key_bytes)
+void
+mhash_init (mhash_t * h, uword n_value_bytes, uword n_key_bytes)
{
- static struct {
- hash_key_sum_function_t * key_sum;
- hash_key_equal_function_t * key_equal;
- } t[] = {
+ static struct
+ {
+ hash_key_sum_function_t *key_sum;
+ hash_key_equal_function_t *key_equal;
+ } t[] =
+ {
#define _(N_KEY_BYTES) \
[N_KEY_BYTES] = { \
.key_sum = mhash_key_sum_##N_KEY_BYTES, \
@@ -179,19 +180,14 @@ void mhash_init (mhash_t * h, uword n_value_bytes, uword n_key_bytes)
},
foreach_mhash_key_size
-
#undef _
-
- [MHASH_C_STRING_KEY] = {
- .key_sum = mhash_key_sum_c_string,
- .key_equal = mhash_key_equal_c_string,
- },
-
- [MHASH_VEC_STRING_KEY] = {
- .key_sum = mhash_key_sum_vec_string,
- .key_equal = mhash_key_equal_vec_string,
- },
- };
+ [MHASH_C_STRING_KEY] =
+ {
+ .key_sum = mhash_key_sum_c_string,.key_equal = mhash_key_equal_c_string,},
+ [MHASH_VEC_STRING_KEY] =
+ {
+ .key_sum = mhash_key_sum_vec_string,.key_equal =
+ mhash_key_equal_vec_string,},};
if (mhash_key_vector_is_heap (h))
heap_free (h->key_vector_or_heap);
@@ -212,27 +208,27 @@ void mhash_init (mhash_t * h, uword n_value_bytes, uword n_key_bytes)
#if 0
if (h->n_key_bytes > 0)
{
- vec_validate (h->key_tmp, h->n_key_bytes-1);
- _vec_len(h->key_tmp) = 0;
+ vec_validate (h->key_tmp, h->n_key_bytes - 1);
+ _vec_len (h->key_tmp) = 0;
}
#endif
ASSERT (n_key_bytes < ARRAY_LEN (t));
- h->hash = hash_create2 (/* elts */ 0,
+ h->hash = hash_create2 ( /* elts */ 0,
/* user */ pointer_to_uword (h),
/* value_bytes */ n_value_bytes,
- t[n_key_bytes].key_sum,
- t[n_key_bytes].key_equal,
+ t[n_key_bytes].key_sum, t[n_key_bytes].key_equal,
/* format pair/arg */
0, 0);
}
-static uword mhash_set_tmp_key (mhash_t * h, void * key)
+static uword
+mhash_set_tmp_key (mhash_t * h, void *key)
{
- u8 * key_tmp;
- int my_cpu = os_get_cpu_number();
+ u8 *key_tmp;
+ int my_cpu = os_get_cpu_number ();
- vec_validate(h->key_tmps, my_cpu);
+ vec_validate (h->key_tmps, my_cpu);
key_tmp = h->key_tmps[my_cpu];
vec_reset_length (key_tmp);
@@ -254,7 +250,8 @@ static uword mhash_set_tmp_key (mhash_t * h, void * key)
return ~0;
}
-hash_pair_t * mhash_get_pair (mhash_t * h, void * key)
+hash_pair_t *
+mhash_get_pair (mhash_t * h, void *key)
{
uword ikey;
mhash_sanitize_hash_user (h);
@@ -262,28 +259,32 @@ hash_pair_t * mhash_get_pair (mhash_t * h, void * key)
return hash_get_pair (h->hash, ikey);
}
-typedef struct {
+typedef struct
+{
u32 heap_handle;
/* Must conincide with vec_header. */
vec_header_t vec;
} mhash_string_key_t;
-uword mhash_set_mem (mhash_t * h, void * key, uword * new_value, uword * old_value)
+uword
+mhash_set_mem (mhash_t * h, void *key, uword * new_value, uword * old_value)
{
- u8 * k;
- uword ikey, i, l=0, n_key_bytes, old_n_elts, key_alloc_from_free_list = 0;
+ u8 *k;
+ uword ikey, i, l = 0, n_key_bytes, old_n_elts, key_alloc_from_free_list = 0;
mhash_sanitize_hash_user (h);
if (mhash_key_vector_is_heap (h))
{
- mhash_string_key_t * sk;
+ mhash_string_key_t *sk;
uword is_c_string = h->n_key_bytes == MHASH_C_STRING_KEY;
uword handle;
n_key_bytes = is_c_string ? (strlen (key) + 1) : vec_len (key);
- i = heap_alloc (h->key_vector_or_heap, n_key_bytes + sizeof (sk[0]), handle);
+ i =
+ heap_alloc (h->key_vector_or_heap, n_key_bytes + sizeof (sk[0]),
+ handle);
sk = (void *) (h->key_vector_or_heap + i);
sk->heap_handle = handle;
@@ -295,7 +296,8 @@ uword mhash_set_mem (mhash_t * h, void * key, uword * new_value, uword * old_val
}
else
{
- key_alloc_from_free_list = (l = vec_len (h->key_vector_free_indices)) > 0;
+ key_alloc_from_free_list = (l =
+ vec_len (h->key_vector_free_indices)) > 0;
if (key_alloc_from_free_list)
{
i = h->key_vector_free_indices[l - 1];
@@ -319,7 +321,7 @@ uword mhash_set_mem (mhash_t * h, void * key, uword * new_value, uword * old_val
/* If element already existed remove duplicate key. */
if (hash_elts (h->hash) == old_n_elts)
{
- hash_pair_t * p;
+ hash_pair_t *p;
/* Fetch old key for return value. */
p = hash_get_pair (h->hash, ikey);
@@ -328,7 +330,7 @@ uword mhash_set_mem (mhash_t * h, void * key, uword * new_value, uword * old_val
/* Remove duplicate key. */
if (mhash_key_vector_is_heap (h))
{
- mhash_string_key_t * sk;
+ mhash_string_key_t *sk;
sk = (void *) (h->key_vector_or_heap + i - sizeof (sk[0]));
heap_dealloc (h->key_vector_or_heap, sk->heap_handle);
}
@@ -347,16 +349,17 @@ uword mhash_set_mem (mhash_t * h, void * key, uword * new_value, uword * old_val
return ikey;
}
-uword mhash_unset (mhash_t * h, void * key, uword * old_value)
+uword
+mhash_unset (mhash_t * h, void *key, uword * old_value)
{
- hash_pair_t * p;
+ hash_pair_t *p;
uword i;
mhash_sanitize_hash_user (h);
i = mhash_set_tmp_key (h, key);
p = hash_get_pair (h->hash, i);
- if (! p)
+ if (!p)
return 0;
ASSERT (p->key != ~0);
@@ -364,7 +367,7 @@ uword mhash_unset (mhash_t * h, void * key, uword * old_value)
if (mhash_key_vector_is_heap (h))
{
- mhash_string_key_t * sk;
+ mhash_string_key_t *sk;
sk = (void *) (h->key_vector_or_heap + i) - sizeof (sk[0]);
heap_dealloc (h->key_vector_or_heap, sk->heap_handle);
}
@@ -375,11 +378,12 @@ uword mhash_unset (mhash_t * h, void * key, uword * old_value)
return 1;
}
-u8 * format_mhash_key (u8 * s, va_list * va)
+u8 *
+format_mhash_key (u8 * s, va_list * va)
{
- mhash_t * h = va_arg (*va, mhash_t *);
+ mhash_t *h = va_arg (*va, mhash_t *);
u32 ki = va_arg (*va, u32);
- void * k = mhash_key_to_mem (h, ki);
+ void *k = mhash_key_to_mem (h, ki);
if (mhash_key_vector_is_heap (h))
{
@@ -394,3 +398,11 @@ u8 * format_mhash_key (u8 * s, va_list * va)
return s;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/mhash.h b/vppinfra/vppinfra/mhash.h
index 2938a8472d2..8ce8454b0a4 100644
--- a/vppinfra/vppinfra/mhash.h
+++ b/vppinfra/vppinfra/mhash.h
@@ -43,16 +43,17 @@
#include <vppinfra/heap.h>
/* Hash table plus vector of keys. */
-typedef struct {
+typedef struct
+{
/* Vector or heap used to store keys. Hash table stores keys as byte
offsets into this vector. */
- u8 * key_vector_or_heap;
+ u8 *key_vector_or_heap;
/* Byte offsets of free keys in vector (used to store free keys when
n_key_bytes > 1). */
- u32 * key_vector_free_indices;
+ u32 *key_vector_free_indices;
- u8 ** key_tmps;
+ u8 **key_tmps;
/* Possibly fixed size of key.
0 means keys are vectors of u8's.
@@ -65,73 +66,84 @@ typedef struct {
u32 hash_seed;
/* Hash table mapping key -> value. */
- uword * hash;
+ uword *hash;
/* Format function for keys. */
- format_function_t * format_key;
+ format_function_t *format_key;
} mhash_t;
void mhash_init (mhash_t * h, uword n_value_bytes, uword n_key_bytes);
always_inline void
mhash_init_c_string (mhash_t * h, uword n_value_bytes)
-{ mhash_init (h, n_value_bytes, MHASH_C_STRING_KEY); }
+{
+ mhash_init (h, n_value_bytes, MHASH_C_STRING_KEY);
+}
always_inline void
mhash_init_vec_string (mhash_t * h, uword n_value_bytes)
-{ mhash_init (h, n_value_bytes, MHASH_VEC_STRING_KEY); }
+{
+ mhash_init (h, n_value_bytes, MHASH_VEC_STRING_KEY);
+}
always_inline void *
mhash_key_to_mem (mhash_t * h, uword key)
{
if (key == ~0)
{
- u8 * key_tmp;
-
- int my_cpu = os_get_cpu_number();
- vec_validate(h->key_tmps, my_cpu);
+ u8 *key_tmp;
+
+ int my_cpu = os_get_cpu_number ();
+ vec_validate (h->key_tmps, my_cpu);
key_tmp = h->key_tmps[my_cpu];
return key_tmp;
}
return vec_elt_at_index (h->key_vector_or_heap, key);
}
-hash_pair_t * mhash_get_pair (mhash_t * h, void * key);
-uword mhash_set_mem (mhash_t * h, void * key, uword * new_value, uword * old_value);
-uword mhash_unset (mhash_t * h, void * key, uword * old_value);
+hash_pair_t *mhash_get_pair (mhash_t * h, void *key);
+uword mhash_set_mem (mhash_t * h, void *key, uword * new_value,
+ uword * old_value);
+uword mhash_unset (mhash_t * h, void *key, uword * old_value);
always_inline uword *
-mhash_get (mhash_t * h, void * key)
+mhash_get (mhash_t * h, void *key)
{
- hash_pair_t * p = mhash_get_pair (h, key);
+ hash_pair_t *p = mhash_get_pair (h, key);
return p ? &p->value[0] : 0;
}
always_inline uword
-mhash_set (mhash_t * h, void * key, uword new_value, uword * old_value)
-{ return mhash_set_mem (h, key, &new_value, old_value); }
+mhash_set (mhash_t * h, void *key, uword new_value, uword * old_value)
+{
+ return mhash_set_mem (h, key, &new_value, old_value);
+}
always_inline uword
mhash_unset_key (mhash_t * h, uword key, uword * old_value)
{
- void * k = mhash_key_to_mem (h, key);
+ void *k = mhash_key_to_mem (h, key);
return mhash_unset (h, k, old_value);
}
always_inline uword
mhash_value_bytes (mhash_t * m)
{
- hash_t * h = hash_header (m->hash);
+ hash_t *h = hash_header (m->hash);
return hash_value_bytes (h);
}
always_inline uword
mhash_elts (mhash_t * m)
-{ return hash_elts (m->hash); }
+{
+ return hash_elts (m->hash);
+}
always_inline uword
mhash_key_vector_is_heap (mhash_t * h)
-{ return h->n_key_bytes <= 1; }
+{
+ return h->n_key_bytes <= 1;
+}
always_inline void
mhash_free (mhash_t * h)
@@ -157,3 +169,11 @@ do { \
format_function_t format_mhash_key;
#endif /* included_clib_mhash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/mheap.c b/vppinfra/vppinfra/mheap.c
index 3bb2d037d0f..211cda600da 100644
--- a/vppinfra/vppinfra/mheap.c
+++ b/vppinfra/vppinfra/mheap.c
@@ -46,42 +46,44 @@
#include <vppinfra/elf_clib.h>
#endif
-static void mheap_get_trace (void * v, uword offset, uword size);
-static void mheap_put_trace (void * v, uword offset, uword size);
-static int mheap_trace_sort (const void * t1, const void * t2);
+static void mheap_get_trace (void *v, uword offset, uword size);
+static void mheap_put_trace (void *v, uword offset, uword size);
+static int mheap_trace_sort (const void *t1, const void *t2);
-always_inline void mheap_maybe_lock (void * v)
+always_inline void
+mheap_maybe_lock (void *v)
{
- mheap_t * h = mheap_header (v);
+ mheap_t *h = mheap_header (v);
if (v && (h->flags & MHEAP_FLAG_THREAD_SAFE))
{
- u32 my_cpu = os_get_cpu_number();
+ u32 my_cpu = os_get_cpu_number ();
if (h->owner_cpu == my_cpu)
- {
- h->recursion_count++;
- return;
- }
-
+ {
+ h->recursion_count++;
+ return;
+ }
+
while (__sync_lock_test_and_set (&h->lock, 1))
- ;
+ ;
h->owner_cpu = my_cpu;
h->recursion_count = 1;
}
}
-always_inline void mheap_maybe_unlock (void * v)
+always_inline void
+mheap_maybe_unlock (void *v)
{
- mheap_t * h = mheap_header (v);
+ mheap_t *h = mheap_header (v);
if (v && h->flags & MHEAP_FLAG_THREAD_SAFE)
{
- ASSERT(os_get_cpu_number() == h->owner_cpu);
+ ASSERT (os_get_cpu_number () == h->owner_cpu);
if (--h->recursion_count == 0)
- {
- h->owner_cpu = ~0;
- CLIB_MEMORY_BARRIER();
- h->lock = 0;
- }
+ {
+ h->owner_cpu = ~0;
+ CLIB_MEMORY_BARRIER ();
+ h->lock = 0;
+ }
}
}
@@ -96,14 +98,19 @@ user_data_size_to_bin_index (uword n_user_data_bytes)
n_user_data_bytes = clib_max (n_user_data_bytes, MHEAP_MIN_USER_DATA_BYTES);
/* Round to words. */
- n_user_data_words = (round_pow2 (n_user_data_bytes, MHEAP_USER_DATA_WORD_BYTES)
- / MHEAP_USER_DATA_WORD_BYTES);
+ n_user_data_words =
+ (round_pow2 (n_user_data_bytes, MHEAP_USER_DATA_WORD_BYTES) /
+ MHEAP_USER_DATA_WORD_BYTES);
ASSERT (n_user_data_words > 0);
- small_bin = n_user_data_words - (MHEAP_MIN_USER_DATA_BYTES / MHEAP_USER_DATA_WORD_BYTES);
+ small_bin =
+ n_user_data_words -
+ (MHEAP_MIN_USER_DATA_BYTES / MHEAP_USER_DATA_WORD_BYTES);
ASSERT (small_bin >= 0);
- large_bin = MHEAP_N_SMALL_OBJECT_BINS + max_log2 (n_user_data_bytes) - MHEAP_LOG2_N_SMALL_OBJECT_BINS;
+ large_bin =
+ MHEAP_N_SMALL_OBJECT_BINS + max_log2 (n_user_data_bytes) -
+ MHEAP_LOG2_N_SMALL_OBJECT_BINS;
return small_bin < MHEAP_N_SMALL_OBJECT_BINS ? small_bin : large_bin;
}
@@ -115,20 +122,19 @@ mheap_elt_size_to_user_n_bytes (uword n_bytes)
return (n_bytes - STRUCT_OFFSET_OF (mheap_elt_t, user_data));
}
-always_inline uword __attribute__((unused))
+always_inline uword __attribute__ ((unused))
mheap_elt_size_to_user_n_words (uword n_bytes)
{
ASSERT (n_bytes % MHEAP_USER_DATA_WORD_BYTES == 0);
- return mheap_elt_size_to_user_n_bytes (n_bytes) / MHEAP_USER_DATA_WORD_BYTES;
+ return mheap_elt_size_to_user_n_bytes (n_bytes) /
+ MHEAP_USER_DATA_WORD_BYTES;
}
always_inline void
-mheap_elt_set_size (void * v,
- uword uoffset,
- uword n_user_data_bytes,
- uword is_free)
+mheap_elt_set_size (void *v,
+ uword uoffset, uword n_user_data_bytes, uword is_free)
{
- mheap_elt_t * e, * n;
+ mheap_elt_t *e, *n;
e = mheap_elt_at_uoffset (v, uoffset);
@@ -136,14 +142,16 @@ mheap_elt_set_size (void * v,
e->n_user_data = n_user_data_bytes / MHEAP_USER_DATA_WORD_BYTES;
e->is_free = is_free;
- ASSERT (e->prev_n_user_data * sizeof (e->user_data[0]) >= MHEAP_MIN_USER_DATA_BYTES);
+ ASSERT (e->prev_n_user_data * sizeof (e->user_data[0]) >=
+ MHEAP_MIN_USER_DATA_BYTES);
n = mheap_next_elt (e);
n->prev_n_user_data = e->n_user_data;
n->prev_is_free = is_free;
}
-always_inline void set_first_free_elt_offset (mheap_t * h, uword bin, uword uoffset)
+always_inline void
+set_first_free_elt_offset (mheap_t * h, uword bin, uword uoffset)
{
uword i0, i1;
@@ -160,11 +168,11 @@ always_inline void set_first_free_elt_offset (mheap_t * h, uword bin, uword uoff
}
always_inline void
-set_free_elt (void * v, uword uoffset, uword n_user_data_bytes)
+set_free_elt (void *v, uword uoffset, uword n_user_data_bytes)
{
- mheap_t * h = mheap_header (v);
- mheap_elt_t * e = mheap_elt_at_uoffset (v, uoffset);
- mheap_elt_t * n = mheap_next_elt (e);
+ mheap_t *h = mheap_header (v);
+ mheap_elt_t *e = mheap_elt_at_uoffset (v, uoffset);
+ mheap_elt_t *n = mheap_next_elt (e);
uword bin = user_data_size_to_bin_index (n_user_data_bytes);
ASSERT (n->prev_is_free);
@@ -176,7 +184,7 @@ set_free_elt (void * v, uword uoffset, uword n_user_data_bytes)
/* Fill in next free elt's previous pointer. */
if (e->free_elt.next_uoffset != MHEAP_GROUNDED)
{
- mheap_elt_t * nf = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
+ mheap_elt_t *nf = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
ASSERT (nf->is_free);
nf->free_elt.prev_uoffset = uoffset;
}
@@ -185,17 +193,17 @@ set_free_elt (void * v, uword uoffset, uword n_user_data_bytes)
}
always_inline void
-new_free_elt (void * v, uword uoffset, uword n_user_data_bytes)
+new_free_elt (void *v, uword uoffset, uword n_user_data_bytes)
{
mheap_elt_set_size (v, uoffset, n_user_data_bytes, /* is_free */ 1);
set_free_elt (v, uoffset, n_user_data_bytes);
}
always_inline void
-remove_free_elt (void * v, mheap_elt_t * e, uword bin)
+remove_free_elt (void *v, mheap_elt_t * e, uword bin)
{
- mheap_t * h = mheap_header (v);
- mheap_elt_t * p, * n;
+ mheap_t *h = mheap_header (v);
+ mheap_elt_t *p, *n;
#if CLIB_VEC64 > 0
u64 no, po;
#else
@@ -208,7 +216,7 @@ remove_free_elt (void * v, mheap_elt_t * e, uword bin)
po = e->free_elt.prev_uoffset;
p = po != MHEAP_GROUNDED ? mheap_elt_at_uoffset (v, po) : 0;
- if (! p)
+ if (!p)
set_first_free_elt_offset (h, bin, no);
else
p->free_elt.next_uoffset = no;
@@ -218,7 +226,7 @@ remove_free_elt (void * v, mheap_elt_t * e, uword bin)
}
always_inline void
-remove_free_elt2 (void * v, mheap_elt_t * e)
+remove_free_elt2 (void *v, mheap_elt_t * e)
{
uword bin;
bin = user_data_size_to_bin_index (mheap_elt_data_bytes (e));
@@ -234,23 +242,26 @@ remove_free_elt2 (void * v, mheap_elt_t * e)
static uword mheap_page_size;
-static_always_inline uword mheap_page_round (uword addr)
-{ return (addr + mheap_page_size - 1) &~ (mheap_page_size - 1); }
+static_always_inline uword
+mheap_page_round (uword addr)
+{
+ return (addr + mheap_page_size - 1) & ~(mheap_page_size - 1);
+}
-static_always_inline uword mheap_page_truncate (uword addr)
-{ return addr &~ (mheap_page_size - 1); }
+static_always_inline uword
+mheap_page_truncate (uword addr)
+{
+ return addr & ~(mheap_page_size - 1);
+}
static_always_inline uword
-mheap_vm (void * v,
- uword flags,
- clib_address_t start_addr,
- uword size)
+mheap_vm (void *v, uword flags, clib_address_t start_addr, uword size)
{
- mheap_t * h = mheap_header (v);
+ mheap_t *h = mheap_header (v);
clib_address_t start_page, end_page, end_addr;
uword mapped_bytes;
- ASSERT (! (h->flags & MHEAP_FLAG_DISABLE_VM));
+ ASSERT (!(h->flags & MHEAP_FLAG_DISABLE_VM));
end_addr = start_addr + size;
@@ -276,9 +287,9 @@ mheap_vm (void * v,
}
static_always_inline uword
-mheap_vm_elt (void * v, uword flags, uword offset)
+mheap_vm_elt (void *v, uword flags, uword offset)
{
- mheap_elt_t * e;
+ mheap_elt_t *e;
clib_address_t start_addr, end_addr;
e = mheap_elt_at_uoffset (v, offset);
@@ -301,9 +312,9 @@ mheap_small_object_cache_mask (mheap_small_object_cache_t * c, uword bin)
ASSERT (bin < 256);
#define _(i) ((uword) u8x16_compare_byte_mask (u8x16_is_equal (b, c->bins.as_u8x16[i])) << (uword) ((i)*16))
- mask = _ (0) | _ (1);
+ mask = _(0) | _(1);
if (BITS (uword) > 32)
- mask |= _ (2) | _ (3);
+ mask |= _(2) | _(3);
#undef _
#endif
@@ -313,7 +324,7 @@ mheap_small_object_cache_mask (mheap_small_object_cache_t * c, uword bin)
always_inline uword
mheap_get_small_object (mheap_t * h, uword bin)
{
- mheap_small_object_cache_t * c = &h->small_object_cache;
+ mheap_small_object_cache_t *c = &h->small_object_cache;
uword mask = mheap_small_object_cache_mask (c, bin + 1);
uword offset = MHEAP_GROUNDED;
@@ -332,7 +343,7 @@ mheap_get_small_object (mheap_t * h, uword bin)
always_inline uword
mheap_put_small_object (mheap_t * h, uword bin, uword offset)
{
- mheap_small_object_cache_t * c = &h->small_object_cache;
+ mheap_small_object_cache_t *c = &h->small_object_cache;
uword free_mask = mheap_small_object_cache_mask (c, 0);
uword b = bin + 1;
uword i;
@@ -361,14 +372,13 @@ mheap_put_small_object (mheap_t * h, uword bin, uword offset)
}
static uword
-mheap_get_search_free_bin (void * v,
+mheap_get_search_free_bin (void *v,
uword bin,
uword * n_user_data_bytes_arg,
- uword align,
- uword align_offset)
+ uword align, uword align_offset)
{
- mheap_t * h = mheap_header (v);
- mheap_elt_t * e;
+ mheap_t *h = mheap_header (v);
+ mheap_elt_t *e;
/* Free object is at offset f0 ... f1;
Allocatted object is at offset o0 ... o1. */
@@ -404,12 +414,12 @@ mheap_get_search_free_bin (void * v,
f1 = f0 + this_object_n_user_data_bytes;
/* Place candidate object at end of free block and align as requested. */
- o0 = ((f1 - search_n_user_data_bytes) &~ (align - 1)) - align_offset;
+ o0 = ((f1 - search_n_user_data_bytes) & ~(align - 1)) - align_offset;
while (o0 < f0)
o0 += align;
/* Make sure that first free fragment is either empty or
- large enough to be valid. */
+ large enough to be valid. */
while (1)
{
lo_free_usize = o0 != f0 ? o0 - f0 - MHEAP_ELT_OVERHEAD_BYTES : 0;
@@ -433,7 +443,7 @@ mheap_get_search_free_bin (void * v,
e = mheap_elt_at_uoffset (v, e->free_elt.next_uoffset);
}
- found:
+found:
/* Free fragment at end. */
hi_free_usize = f1 != o1 ? f1 - o1 - MHEAP_ELT_OVERHEAD_BYTES : 0;
@@ -447,19 +457,19 @@ mheap_get_search_free_bin (void * v,
}
/* Need to make sure that relevant memory areas are mapped. */
- if (! (h->flags & MHEAP_FLAG_DISABLE_VM))
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
{
- mheap_elt_t * f0_elt = mheap_elt_at_uoffset (v, f0);
- mheap_elt_t * f1_elt = mheap_elt_at_uoffset (v, f1);
- mheap_elt_t * o0_elt = mheap_elt_at_uoffset (v, o0);
- mheap_elt_t * o1_elt = mheap_elt_at_uoffset (v, o1);
+ mheap_elt_t *f0_elt = mheap_elt_at_uoffset (v, f0);
+ mheap_elt_t *f1_elt = mheap_elt_at_uoffset (v, f1);
+ mheap_elt_t *o0_elt = mheap_elt_at_uoffset (v, o0);
+ mheap_elt_t *o1_elt = mheap_elt_at_uoffset (v, o1);
uword f0_page_start, f0_page_end;
uword o0_page_start, o0_page_end;
/* Free elt is mapped. Addresses after that may not be mapped. */
f0_page_start = mheap_page_round (pointer_to_uword (f0_elt->user_data));
- f0_page_end = mheap_page_truncate (pointer_to_uword (f1_elt));
+ f0_page_end = mheap_page_truncate (pointer_to_uword (f1_elt));
o0_page_start = mheap_page_truncate (pointer_to_uword (o0_elt));
o0_page_end = mheap_page_round (pointer_to_uword (o1_elt->user_data));
@@ -504,12 +514,11 @@ mheap_get_search_free_bin (void * v,
/* Search free lists for object with given size and alignment. */
static uword
-mheap_get_search_free_list (void * v,
+mheap_get_search_free_list (void *v,
uword * n_user_bytes_arg,
- uword align,
- uword align_offset)
+ uword align, uword align_offset)
{
- mheap_t * h = mheap_header (v);
+ mheap_t *h = mheap_header (v);
uword bin, n_user_bytes, i, bi;
n_user_bytes = *n_user_bytes_arg;
@@ -530,7 +539,8 @@ mheap_get_search_free_list (void * v,
}
}
- for (i = bin / BITS (uword); i < ARRAY_LEN (h->non_empty_free_elt_heads); i++)
+ for (i = bin / BITS (uword); i < ARRAY_LEN (h->non_empty_free_elt_heads);
+ i++)
{
uword non_empty_bin_mask = h->non_empty_free_elt_heads[i];
@@ -539,28 +549,39 @@ mheap_get_search_free_list (void * v,
non_empty_bin_mask &= ~pow2_mask (bin % BITS (uword));
/* Search each occupied free bin which is large enough. */
- foreach_set_bit (bi, non_empty_bin_mask, ({
- uword r = mheap_get_search_free_bin (v, bi + i * BITS (uword), n_user_bytes_arg, align, align_offset);
- if (r != MHEAP_GROUNDED)
- return r;
- }));
+ foreach_set_bit (bi, non_empty_bin_mask, (
+ {
+ uword r =
+ mheap_get_search_free_bin (v,
+ bi
+ +
+ i
+ *
+ BITS
+ (uword),
+ n_user_bytes_arg,
+ align,
+ align_offset);
+ if (r !=
+ MHEAP_GROUNDED) return
+ r;}
+ ));
}
return MHEAP_GROUNDED;
}
static never_inline void *
-mheap_get_extend_vector (void * v,
+mheap_get_extend_vector (void *v,
uword n_user_data_bytes,
uword align,
- uword align_offset,
- uword * offset_return)
+ uword align_offset, uword * offset_return)
{
/* Bounds of free and allocated objects (as above). */
uword f0, f1, o0, o1;
word free_size;
- mheap_t * h = mheap_header (v);
- mheap_elt_t * e;
+ mheap_t *h = mheap_header (v);
+ mheap_elt_t *e;
if (_vec_len (v) == 0)
{
@@ -585,7 +606,7 @@ mheap_get_extend_vector (void * v,
o1 = o0 + n_user_data_bytes;
f1 = o1 + MHEAP_ELT_OVERHEAD_BYTES;
-
+
ASSERT (v != 0);
h = mheap_header (v);
@@ -598,10 +619,10 @@ mheap_get_extend_vector (void * v,
_vec_len (v) = f1;
- if (! (h->flags & MHEAP_FLAG_DISABLE_VM))
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
{
- mheap_elt_t * f0_elt = mheap_elt_at_uoffset (v, f0);
- mheap_elt_t * f1_elt = mheap_elt_at_uoffset (v, f1);
+ mheap_elt_t *f0_elt = mheap_elt_at_uoffset (v, f0);
+ mheap_elt_t *f1_elt = mheap_elt_at_uoffset (v, f1);
uword f0_page = mheap_page_round (pointer_to_uword (f0_elt->user_data));
uword f1_page = mheap_page_round (pointer_to_uword (f1_elt->user_data));
@@ -624,13 +645,12 @@ mheap_get_extend_vector (void * v,
return v;
}
-void * mheap_get_aligned (void * v,
- uword n_user_data_bytes,
- uword align,
- uword align_offset,
- uword * offset_return)
+void *
+mheap_get_aligned (void *v,
+ uword n_user_data_bytes,
+ uword align, uword align_offset, uword * offset_return)
{
- mheap_t * h;
+ mheap_t *h;
uword offset;
u64 cpu_times[2];
@@ -651,9 +671,11 @@ void * mheap_get_aligned (void * v,
/* Round requested size. */
n_user_data_bytes = clib_max (n_user_data_bytes, MHEAP_MIN_USER_DATA_BYTES);
- n_user_data_bytes = round_pow2 (n_user_data_bytes, STRUCT_SIZE_OF (mheap_elt_t, user_data[0]));
+ n_user_data_bytes =
+ round_pow2 (n_user_data_bytes,
+ STRUCT_SIZE_OF (mheap_elt_t, user_data[0]));
- if (! v)
+ if (!v)
v = mheap_alloc (0, 64 << 20);
mheap_maybe_lock (v);
@@ -664,14 +686,17 @@ void * mheap_get_aligned (void * v,
mheap_validate (v);
/* First search free lists for object. */
- offset = mheap_get_search_free_list (v, &n_user_data_bytes, align, align_offset);
+ offset =
+ mheap_get_search_free_list (v, &n_user_data_bytes, align, align_offset);
h = mheap_header (v);
/* If that fails allocate object at end of heap by extending vector. */
if (offset == MHEAP_GROUNDED && _vec_len (v) < h->max_size)
{
- v = mheap_get_extend_vector (v, n_user_data_bytes, align, align_offset, &offset);
+ v =
+ mheap_get_extend_vector (v, n_user_data_bytes, align, align_offset,
+ &offset);
h = mheap_header (v);
h->stats.n_vector_expands += offset != MHEAP_GROUNDED;
}
@@ -704,9 +729,10 @@ void * mheap_get_aligned (void * v,
return v;
}
-static void free_last_elt (void * v, mheap_elt_t * e)
+static void
+free_last_elt (void *v, mheap_elt_t * e)
{
- mheap_t * h = mheap_header (v);
+ mheap_t *h = mheap_header (v);
/* Possibly delete preceeding free element also. */
if (e->prev_is_free)
@@ -717,25 +743,26 @@ static void free_last_elt (void * v, mheap_elt_t * e)
if (e->prev_n_user_data == MHEAP_N_USER_DATA_INVALID)
{
- if (! (h->flags & MHEAP_FLAG_DISABLE_VM))
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
mheap_vm_elt (v, MHEAP_VM_UNMAP, mheap_elt_uoffset (v, e));
_vec_len (v) = 0;
}
else
{
uword uo = mheap_elt_uoffset (v, e);
- if (! (h->flags & MHEAP_FLAG_DISABLE_VM))
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
mheap_vm_elt (v, MHEAP_VM_UNMAP, uo);
e->n_user_data = MHEAP_N_USER_DATA_INVALID;
_vec_len (v) = uo;
}
}
-void mheap_put (void * v, uword uoffset)
+void
+mheap_put (void *v, uword uoffset)
{
- mheap_t * h;
+ mheap_t *h;
uword n_user_data_bytes, bin;
- mheap_elt_t * e, * n;
+ mheap_elt_t *e, *n;
uword trace_uoffset, trace_n_user_data_bytes;
u64 cpu_times[2];
@@ -761,11 +788,10 @@ void mheap_put (void * v, uword uoffset)
bin = user_data_size_to_bin_index (n_user_data_bytes);
if (MHEAP_HAVE_SMALL_OBJECT_CACHE
- && bin < 255
- && (h->flags & MHEAP_FLAG_SMALL_OBJECT_CACHE))
+ && bin < 255 && (h->flags & MHEAP_FLAG_SMALL_OBJECT_CACHE))
{
uoffset = mheap_put_small_object (h, bin, uoffset);
- if (uoffset == 0)
+ if (uoffset == 0)
goto done;
e = mheap_elt_at_uoffset (v, uoffset);
@@ -799,7 +825,7 @@ void mheap_put (void * v, uword uoffset)
if (e->prev_is_free)
{
- mheap_elt_t * p = mheap_prev_elt (e);
+ mheap_elt_t *p = mheap_prev_elt (e);
f0 = mheap_elt_uoffset (v, p);
remove_free_elt2 (v, p);
n_combine++;
@@ -807,7 +833,7 @@ void mheap_put (void * v, uword uoffset)
if (n->is_free)
{
- mheap_elt_t * m = mheap_next_elt (n);
+ mheap_elt_t *m = mheap_next_elt (n);
f1 = (void *) m - v;
remove_free_elt2 (v, n);
n_combine++;
@@ -819,11 +845,11 @@ void mheap_put (void * v, uword uoffset)
e->is_free = n->prev_is_free = 1;
set_free_elt (v, f0, f1 - f0);
- if (! (h->flags & MHEAP_FLAG_DISABLE_VM))
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
mheap_vm_elt (v, MHEAP_VM_UNMAP, f0);
}
- done:
+done:
h = mheap_header (v);
if (h->flags & MHEAP_FLAG_TRACE)
@@ -845,20 +871,21 @@ void mheap_put (void * v, uword uoffset)
h->stats.n_clocks_put += cpu_times[1] - cpu_times[0];
}
-void * mheap_alloc_with_flags (void * memory, uword memory_size, uword flags)
+void *
+mheap_alloc_with_flags (void *memory, uword memory_size, uword flags)
{
- mheap_t * h;
- void * v;
+ mheap_t *h;
+ void *v;
uword size;
- if (! mheap_page_size)
+ if (!mheap_page_size)
mheap_page_size = clib_mem_get_page_size ();
- if (! memory)
+ if (!memory)
{
/* No memory given, try to VM allocate some. */
memory = clib_mem_vm_alloc (memory_size);
- if (! memory)
+ if (!memory)
return 0;
/* No memory region implies we have virtual memory. */
@@ -880,20 +907,21 @@ void * mheap_alloc_with_flags (void * memory, uword memory_size, uword flags)
h = uword_to_pointer (ah, void *);
v = mheap_vector (h);
- if (PREDICT_FALSE(memory + memory_size < v)) {
+ if (PREDICT_FALSE (memory + memory_size < v))
+ {
/*
* This will happen when the requested memory_size is too
* small to cope with the heap header and/or memory alignment.
*/
- clib_mem_vm_free(memory, memory_size);
+ clib_mem_vm_free (memory, memory_size);
return 0;
- }
+ }
size = memory + memory_size - v;
}
/* VM map header so we can use memory. */
- if (! (flags & MHEAP_FLAG_DISABLE_VM))
+ if (!(flags & MHEAP_FLAG_DISABLE_VM))
clib_mem_vm_map (h, sizeof (h[0]));
/* Zero vector header: both heap header and vector length. */
@@ -907,21 +935,22 @@ void * mheap_alloc_with_flags (void * memory, uword memory_size, uword flags)
h->owner_cpu = ~0;
/* Set flags based on those given less builtin-flags. */
- h->flags |= (flags &~ MHEAP_FLAG_TRACE);
+ h->flags |= (flags & ~MHEAP_FLAG_TRACE);
/* Unmap remainder of heap until we will be ready to use it. */
- if (! (h->flags & MHEAP_FLAG_DISABLE_VM))
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
mheap_vm (v, MHEAP_VM_UNMAP | MHEAP_VM_ROUND_UP,
(clib_address_t) v, h->max_size);
/* Initialize free list heads to empty. */
- memset (h->first_free_elt_uoffset_by_bin, 0xFF,
- sizeof (h->first_free_elt_uoffset_by_bin));
+ memset (h->first_free_elt_uoffset_by_bin, 0xFF,
+ sizeof (h->first_free_elt_uoffset_by_bin));
return v;
}
-void * mheap_alloc (void * memory, uword size)
+void *
+mheap_alloc (void *memory, uword size)
{
uword flags = 0;
@@ -935,24 +964,27 @@ void * mheap_alloc (void * memory, uword size)
return mheap_alloc_with_flags (memory, size, flags);
}
-void * _mheap_free (void * v)
+void *
+_mheap_free (void *v)
{
- mheap_t * h = mheap_header (v);
+ mheap_t *h = mheap_header (v);
if (v)
- clib_mem_vm_free ((void *) h - h->vm_alloc_offset_from_header, h->vm_alloc_size);
-
+ clib_mem_vm_free ((void *) h - h->vm_alloc_offset_from_header,
+ h->vm_alloc_size);
+
return 0;
}
/* Call user's function with each object in heap. */
-void mheap_foreach (void * v,
- uword (* func) (void * arg, void * v, void * elt_data, uword elt_size),
- void * arg)
+void
+mheap_foreach (void *v,
+ uword (*func) (void *arg, void *v, void *elt_data,
+ uword elt_size), void *arg)
{
- mheap_elt_t * e;
- u8 * stack_heap, * clib_mem_mheap_save;
- u8 tmp_heap_memory[16*1024];
+ mheap_elt_t *e;
+ u8 *stack_heap, *clib_mem_mheap_save;
+ u8 tmp_heap_memory[16 * 1024];
mheap_maybe_lock (v);
@@ -974,13 +1006,12 @@ void mheap_foreach (void * v,
}
for (e = v;
- e->n_user_data != MHEAP_N_USER_DATA_INVALID;
- e = mheap_next_elt (e))
+ e->n_user_data != MHEAP_N_USER_DATA_INVALID; e = mheap_next_elt (e))
{
- void * p = mheap_elt_data (v, e);
+ void *p = mheap_elt_data (v, e);
if (e->is_free)
continue;
- if ((* func) (arg, v, p, mheap_elt_data_bytes (e)))
+ if ((*func) (arg, v, p, mheap_elt_data_bytes (e)))
break;
}
@@ -988,30 +1019,34 @@ void mheap_foreach (void * v,
if (clib_mem_mheap_save)
clib_mem_set_heap (clib_mem_mheap_save);
- done:
+done:
mheap_maybe_unlock (v);
}
/* Bytes in mheap header overhead not including data bytes. */
always_inline uword
-mheap_bytes_overhead (void * v)
+mheap_bytes_overhead (void *v)
{
- mheap_t * h = mheap_header (v);
+ mheap_t *h = mheap_header (v);
return v ? sizeof (h[0]) + h->n_elts * sizeof (mheap_elt_t) : 0;
}
/* Total number of bytes including both data and overhead. */
-uword mheap_bytes (void * v)
-{ return mheap_bytes_overhead (v) + vec_bytes (v); }
+uword
+mheap_bytes (void *v)
+{
+ return mheap_bytes_overhead (v) + vec_bytes (v);
+}
-static void mheap_usage_no_lock (void * v, clib_mem_usage_t * usage)
+static void
+mheap_usage_no_lock (void *v, clib_mem_usage_t * usage)
{
- mheap_t * h = mheap_header (v);
+ mheap_t *h = mheap_header (v);
uword used = 0, free = 0, free_vm_unmapped = 0;
if (vec_len (v) > 0)
{
- mheap_elt_t * e;
+ mheap_elt_t *e;
for (e = v;
e->n_user_data != MHEAP_N_USER_DATA_INVALID;
@@ -1021,7 +1056,7 @@ static void mheap_usage_no_lock (void * v, clib_mem_usage_t * usage)
if (e->is_free)
{
free += size;
- if (! (h->flags & MHEAP_FLAG_DISABLE_VM))
+ if (!(h->flags & MHEAP_FLAG_DISABLE_VM))
free_vm_unmapped +=
mheap_vm_elt (v, MHEAP_VM_NOMAP, mheap_elt_uoffset (v, e));
}
@@ -1039,14 +1074,16 @@ static void mheap_usage_no_lock (void * v, clib_mem_usage_t * usage)
usage->bytes_free_reclaimed = free_vm_unmapped;
}
-void mheap_usage (void * v, clib_mem_usage_t * usage)
+void
+mheap_usage (void *v, clib_mem_usage_t * usage)
{
mheap_maybe_lock (v);
mheap_usage_no_lock (v, usage);
mheap_maybe_unlock (v);
}
-static u8 * format_mheap_byte_count (u8 * s, va_list * va)
+static u8 *
+format_mheap_byte_count (u8 * s, va_list * va)
{
uword n_bytes = va_arg (*va, uword);
if (n_bytes < 1024)
@@ -1056,9 +1093,10 @@ static u8 * format_mheap_byte_count (u8 * s, va_list * va)
}
/* Returns first corrupt heap element. */
-static mheap_elt_t * mheap_first_corrupt (void * v)
+static mheap_elt_t *
+mheap_first_corrupt (void *v)
{
- mheap_elt_t * e, * n;
+ mheap_elt_t *e, *n;
if (vec_len (v) == 0)
return 0;
@@ -1083,58 +1121,59 @@ static mheap_elt_t * mheap_first_corrupt (void * v)
return 0;
}
-static u8 * format_mheap_stats (u8 * s, va_list * va)
+static u8 *
+format_mheap_stats (u8 * s, va_list * va)
{
- mheap_t * h = va_arg (*va, mheap_t *);
- mheap_stats_t * st = &h->stats;
+ mheap_t *h = va_arg (*va, mheap_t *);
+ mheap_stats_t *st = &h->stats;
uword indent = format_get_indent (s);
- s = format (s, "alloc. from small object cache: %Ld hits %Ld attempts (%.2f%%) replacements %d",
- st->n_small_object_cache_hits,
- st->n_small_object_cache_attempts,
- (st->n_small_object_cache_attempts != 0
- ? 100. * (f64) st->n_small_object_cache_hits / (f64) st->n_small_object_cache_attempts
- : 0.),
- h->small_object_cache.replacement_index);
-
- s = format (s, "\n%Ualloc. from free-list: %Ld attempts, %Ld hits (%.2f%%), %Ld considered (per-attempt %.2f)",
- format_white_space, indent,
- st->free_list.n_search_attempts,
- st->free_list.n_objects_found,
- (st->free_list.n_search_attempts != 0
- ? 100. * (f64) st->free_list.n_objects_found / (f64) st->free_list.n_search_attempts
- : 0.),
- st->free_list.n_objects_searched,
- (st->free_list.n_search_attempts != 0
- ? (f64) st->free_list.n_objects_searched / (f64) st->free_list.n_search_attempts
- : 0.));
+ s =
+ format (s,
+ "alloc. from small object cache: %Ld hits %Ld attempts (%.2f%%) replacements %d",
+ st->n_small_object_cache_hits, st->n_small_object_cache_attempts,
+ (st->n_small_object_cache_attempts !=
+ 0 ? 100. * (f64) st->n_small_object_cache_hits /
+ (f64) st->n_small_object_cache_attempts : 0.),
+ h->small_object_cache.replacement_index);
+
+ s =
+ format (s,
+ "\n%Ualloc. from free-list: %Ld attempts, %Ld hits (%.2f%%), %Ld considered (per-attempt %.2f)",
+ format_white_space, indent, st->free_list.n_search_attempts,
+ st->free_list.n_objects_found,
+ (st->free_list.n_search_attempts !=
+ 0 ? 100. * (f64) st->free_list.n_objects_found /
+ (f64) st->free_list.n_search_attempts : 0.),
+ st->free_list.n_objects_searched,
+ (st->free_list.n_search_attempts !=
+ 0 ? (f64) st->free_list.n_objects_searched /
+ (f64) st->free_list.n_search_attempts : 0.));
s = format (s, "\n%Ualloc. from vector-expand: %Ld",
- format_white_space, indent,
- st->n_vector_expands);
+ format_white_space, indent, st->n_vector_expands);
s = format (s, "\n%Uallocs: %Ld %.2f clocks/call",
format_white_space, indent,
- st->n_gets,
- (f64) st->n_clocks_get / (f64) st->n_gets);
+ st->n_gets, (f64) st->n_clocks_get / (f64) st->n_gets);
s = format (s, "\n%Ufrees: %Ld %.2f clocks/call",
format_white_space, indent,
- st->n_puts,
- (f64) st->n_clocks_put / (f64) st->n_puts);
-
+ st->n_puts, (f64) st->n_clocks_put / (f64) st->n_puts);
+
return s;
}
-u8 * format_mheap (u8 * s, va_list * va)
+u8 *
+format_mheap (u8 * s, va_list * va)
{
- void * v = va_arg (*va, u8 *);
+ void *v = va_arg (*va, u8 *);
int verbose = va_arg (*va, int);
- mheap_t * h;
+ mheap_t *h;
uword i, size, indent;
clib_mem_usage_t usage;
- mheap_elt_t * first_corrupt;
+ mheap_elt_t *first_corrupt;
mheap_maybe_lock (v);
@@ -1144,13 +1183,14 @@ u8 * format_mheap (u8 * s, va_list * va)
indent = format_get_indent (s);
- s = format (s, "%d objects, %U of %U used, %U free, %U reclaimed, %U overhead",
- usage.object_count,
- format_mheap_byte_count, usage.bytes_used,
- format_mheap_byte_count, usage.bytes_total,
- format_mheap_byte_count, usage.bytes_free,
- format_mheap_byte_count, usage.bytes_free_reclaimed,
- format_mheap_byte_count, usage.bytes_overhead);
+ s =
+ format (s,
+ "%d objects, %U of %U used, %U free, %U reclaimed, %U overhead",
+ usage.object_count, format_mheap_byte_count, usage.bytes_used,
+ format_mheap_byte_count, usage.bytes_total,
+ format_mheap_byte_count, usage.bytes_free,
+ format_mheap_byte_count, usage.bytes_free_reclaimed,
+ format_mheap_byte_count, usage.bytes_overhead);
if (usage.bytes_max != ~0)
s = format (s, ", %U capacity", format_mheap_byte_count, usage.bytes_max);
@@ -1159,7 +1199,7 @@ u8 * format_mheap (u8 * s, va_list * va)
if (verbose > 1)
{
uword hist[MHEAP_N_BINS];
- mheap_elt_t * e;
+ mheap_elt_t *e;
uword i, n_hist;
memset (hist, 0, sizeof (hist));
@@ -1171,7 +1211,7 @@ u8 * format_mheap (u8 * s, va_list * va)
{
uword n_user_data_bytes = mheap_elt_data_bytes (e);
uword bin = user_data_size_to_bin_index (n_user_data_bytes);
- if (! e->is_free)
+ if (!e->is_free)
{
hist[bin] += 1;
n_hist += 1;
@@ -1188,21 +1228,20 @@ u8 * format_mheap (u8 * s, va_list * va)
continue;
s = format (s, "\n%U%12d%12wd%16.4f",
format_white_space, indent + 2,
- MHEAP_MIN_USER_DATA_BYTES + i * MHEAP_USER_DATA_WORD_BYTES,
- hist[i],
+ MHEAP_MIN_USER_DATA_BYTES +
+ i * MHEAP_USER_DATA_WORD_BYTES, hist[i],
(f64) hist[i] / (f64) n_hist);
}
}
if (verbose)
s = format (s, "\n%U%U",
- format_white_space, indent + 2,
- format_mheap_stats, h);
+ format_white_space, indent + 2, format_mheap_stats, h);
if ((h->flags & MHEAP_FLAG_TRACE) && vec_len (h->trace_main.traces) > 0)
{
/* Make a copy of traces since we'll be sorting them. */
- mheap_trace_t * t, * traces_copy;
+ mheap_trace_t *t, *traces_copy;
uword indent, total_objects_traced;
traces_copy = vec_dup (h->trace_main.traces);
@@ -1211,7 +1250,8 @@ u8 * format_mheap (u8 * s, va_list * va)
total_objects_traced = 0;
s = format (s, "\n");
- vec_foreach (t, traces_copy) {
+ vec_foreach (t, traces_copy)
+ {
/* Skip over free elements. */
if (t->n_allocations == 0)
continue;
@@ -1219,21 +1259,23 @@ u8 * format_mheap (u8 * s, va_list * va)
total_objects_traced += t->n_allocations;
/* When not verbose only report allocations of more than 1k. */
- if (! verbose && t->n_bytes < 1024)
- continue;
+ if (!verbose && t->n_bytes < 1024)
+ continue;
if (t == traces_copy)
- s = format (s, "%=9s%=9s %=10s Traceback\n", "Bytes", "Count",
- "Sample");
- s = format (s, "%9d%9d %p", t->n_bytes, t->n_allocations,
- t->offset + v);
+ s = format (s, "%=9s%=9s %=10s Traceback\n", "Bytes", "Count",
+ "Sample");
+ s = format (s, "%9d%9d %p", t->n_bytes, t->n_allocations,
+ t->offset + v);
indent = format_get_indent (s);
for (i = 0; i < ARRAY_LEN (t->callers) && t->callers[i]; i++)
{
if (i > 0)
s = format (s, "%U", format_white_space, indent);
#ifdef CLIB_UNIX
- s = format (s, " %U\n", format_clib_elf_symbol_with_address, t->callers[i]);
+ s =
+ format (s, " %U\n", format_clib_elf_symbol_with_address,
+ t->callers[i]);
#else
s = format (s, " %p\n", t->callers[i]);
#endif
@@ -1243,23 +1285,21 @@ u8 * format_mheap (u8 * s, va_list * va)
s = format (s, "%d total traced objects\n", total_objects_traced);
vec_free (traces_copy);
- }
+ }
first_corrupt = mheap_first_corrupt (v);
if (first_corrupt)
{
size = mheap_elt_data_bytes (first_corrupt);
s = format (s, "\n first corrupt object: %p, size %wd\n %U",
- first_corrupt,
- size,
- format_hex_bytes, first_corrupt, size);
+ first_corrupt, size, format_hex_bytes, first_corrupt, size);
}
/* FIXME. This output could be wrong in the unlikely case that format
uses the same mheap as we are currently inspecting. */
if (verbose > 1)
{
- mheap_elt_t * e;
+ mheap_elt_t *e;
uword i, o;
s = format (s, "\n");
@@ -1288,15 +1328,22 @@ u8 * format_mheap (u8 * s, va_list * va)
return s;
}
-void dmh (void * v)
-{ fformat (stderr, "%U", format_mheap, v, 1); }
+void
+dmh (void *v)
+{
+ fformat (stderr, "%U", format_mheap, v, 1);
+}
-static void mheap_validate_breakpoint ()
-{ os_panic (); }
+static void
+mheap_validate_breakpoint ()
+{
+ os_panic ();
+}
-void mheap_validate (void * v)
+void
+mheap_validate (void *v)
{
- mheap_t * h = mheap_header (v);
+ mheap_t *h = mheap_header (v);
uword i, s;
uword elt_count, elt_size;
@@ -1314,11 +1361,17 @@ void mheap_validate (void * v)
free_size_from_free_lists = free_count_from_free_lists = 0;
for (i = 0; i < ARRAY_LEN (h->first_free_elt_uoffset_by_bin); i++)
{
- mheap_elt_t * e, * n;
+ mheap_elt_t *e, *n;
uword is_first;
CHECK ((h->first_free_elt_uoffset_by_bin[i] != MHEAP_GROUNDED)
- == ((h->non_empty_free_elt_heads[i / BITS (uword)] & ((uword) 1 << (uword) (i % BITS (uword)))) != 0));
+ ==
+ ((h->non_empty_free_elt_heads[i /
+ BITS (uword)] & ((uword) 1 <<
+ (uword) (i %
+ BITS
+ (uword))))
+ != 0));
if (h->first_free_elt_uoffset_by_bin[i] == MHEAP_GROUNDED)
continue;
@@ -1365,7 +1418,7 @@ void mheap_validate (void * v)
{
if (h->small_object_cache.bins.as_u8[i] != 0)
{
- mheap_elt_t * e;
+ mheap_elt_t *e;
uword b = h->small_object_cache.bins.as_u8[i] - 1;
uword o = h->small_object_cache.offsets[i];
uword s;
@@ -1373,7 +1426,7 @@ void mheap_validate (void * v)
e = mheap_elt_at_uoffset (v, o);
/* Object must be allocated. */
- CHECK (! e->is_free);
+ CHECK (!e->is_free);
s = mheap_elt_data_bytes (e);
CHECK (user_data_size_to_bin_index (s) == b);
@@ -1384,18 +1437,18 @@ void mheap_validate (void * v)
}
{
- mheap_elt_t * e, * n;
+ mheap_elt_t *e, *n;
uword elt_free_size, elt_free_count;
elt_count = elt_size = elt_free_size = elt_free_count = 0;
- for (e = v;
- e->n_user_data != MHEAP_N_USER_DATA_INVALID;
- e = n)
+ for (e = v; e->n_user_data != MHEAP_N_USER_DATA_INVALID; e = n)
{
if (e->prev_n_user_data != MHEAP_N_USER_DATA_INVALID)
- CHECK (e->prev_n_user_data * sizeof (e->user_data[0]) >= MHEAP_MIN_USER_DATA_BYTES);
+ CHECK (e->prev_n_user_data * sizeof (e->user_data[0]) >=
+ MHEAP_MIN_USER_DATA_BYTES);
- CHECK (e->n_user_data * sizeof (e->user_data[0]) >= MHEAP_MIN_USER_DATA_BYTES);
+ CHECK (e->n_user_data * sizeof (e->user_data[0]) >=
+ MHEAP_MIN_USER_DATA_BYTES);
n = mheap_next_elt (e);
@@ -1412,21 +1465,20 @@ void mheap_validate (void * v)
}
/* Consecutive free objects should have been combined. */
- CHECK (! (e->prev_is_free && n->prev_is_free));
+ CHECK (!(e->prev_is_free && n->prev_is_free));
}
CHECK (free_count_from_free_lists == elt_free_count);
CHECK (free_size_from_free_lists == elt_free_size);
CHECK (elt_count == h->n_elts + elt_free_count + small_elt_free_count);
- CHECK (elt_size + (elt_count + 1) * MHEAP_ELT_OVERHEAD_BYTES == vec_len (v));
+ CHECK (elt_size + (elt_count + 1) * MHEAP_ELT_OVERHEAD_BYTES ==
+ vec_len (v));
}
{
- mheap_elt_t * e, * n;
+ mheap_elt_t *e, *n;
- for (e = v;
- e->n_user_data == MHEAP_N_USER_DATA_INVALID;
- e = n)
+ for (e = v; e->n_user_data == MHEAP_N_USER_DATA_INVALID; e = n)
{
n = mheap_next_elt (e);
CHECK (e->n_user_data == n->prev_n_user_data);
@@ -1440,21 +1492,22 @@ void mheap_validate (void * v)
h->validate_serial += 1;
}
-static void mheap_get_trace (void * v, uword offset, uword size)
+static void
+mheap_get_trace (void *v, uword offset, uword size)
{
- mheap_t * h;
- mheap_trace_main_t * tm;
- mheap_trace_t * t;
- uword i, n_callers, trace_index, * p;
+ mheap_t *h;
+ mheap_trace_main_t *tm;
+ mheap_trace_t *t;
+ uword i, n_callers, trace_index, *p;
mheap_trace_t trace;
/* Spurious Coverity warnings be gone. */
- memset(&trace, 0, sizeof(trace));
+ memset (&trace, 0, sizeof (trace));
n_callers = clib_backtrace (trace.callers, ARRAY_LEN (trace.callers),
/* Skip mheap_get_aligned's frame */ 1);
if (n_callers == 0)
- return;
+ return;
for (i = n_callers; i < ARRAY_LEN (trace.callers); i++)
trace.callers[i] = 0;
@@ -1462,8 +1515,9 @@ static void mheap_get_trace (void * v, uword offset, uword size)
h = mheap_header (v);
tm = &h->trace_main;
- if (! tm->trace_by_callers)
- tm->trace_by_callers = hash_create_mem (0, sizeof (trace.callers), sizeof (uword));
+ if (!tm->trace_by_callers)
+ tm->trace_by_callers =
+ hash_create_mem (0, sizeof (trace.callers), sizeof (uword));
p = hash_get_mem (tm->trace_by_callers, &trace.callers);
if (p)
@@ -1481,20 +1535,24 @@ static void mheap_get_trace (void * v, uword offset, uword size)
}
else
{
- mheap_trace_t * old_start = tm->traces;
- mheap_trace_t * old_end = vec_end (tm->traces);
+ mheap_trace_t *old_start = tm->traces;
+ mheap_trace_t *old_end = vec_end (tm->traces);
vec_add2 (tm->traces, t, 1);
- if (tm->traces != old_start) {
- hash_pair_t * p;
- mheap_trace_t * q;
- hash_foreach_pair (p, tm->trace_by_callers, ({
- q = uword_to_pointer (p->key, mheap_trace_t *);
- ASSERT (q >= old_start && q < old_end);
+ if (tm->traces != old_start)
+ {
+ hash_pair_t *p;
+ mheap_trace_t *q;
+ /* *INDENT-OFF* */
+ hash_foreach_pair (p, tm->trace_by_callers,
+ ({
+ q = uword_to_pointer (p->key, mheap_trace_t *);
+ ASSERT (q >= old_start && q < old_end);
p->key = pointer_to_uword (tm->traces + (q - old_start));
}));
- }
+ /* *INDENT-ON* */
+ }
trace_index = t - tm->traces;
}
@@ -1507,21 +1565,22 @@ static void mheap_get_trace (void * v, uword offset, uword size)
t->n_allocations += 1;
t->n_bytes += size;
- t->offset = offset; /* keep a sample to autopsy */
+ t->offset = offset; /* keep a sample to autopsy */
hash_set (tm->trace_index_by_offset, offset, t - tm->traces);
}
-static void mheap_put_trace (void * v, uword offset, uword size)
+static void
+mheap_put_trace (void *v, uword offset, uword size)
{
- mheap_t * h;
- mheap_trace_main_t * tm;
- mheap_trace_t * t;
- uword trace_index, * p;
+ mheap_t *h;
+ mheap_trace_main_t *tm;
+ mheap_trace_t *t;
+ uword trace_index, *p;
h = mheap_header (v);
tm = &h->trace_main;
p = hash_get (tm->trace_index_by_offset, offset);
- if (! p)
+ if (!p)
return;
trace_index = p[0];
@@ -1541,14 +1600,15 @@ static void mheap_put_trace (void * v, uword offset, uword size)
}
}
-static int mheap_trace_sort (const void * _t1, const void * _t2)
+static int
+mheap_trace_sort (const void *_t1, const void *_t2)
{
- const mheap_trace_t * t1 = _t1;
- const mheap_trace_t * t2 = _t2;
+ const mheap_trace_t *t1 = _t1;
+ const mheap_trace_t *t2 = _t2;
word cmp;
cmp = (word) t2->n_bytes - (word) t1->n_bytes;
- if (! cmp)
+ if (!cmp)
cmp = (word) t2->n_allocations - (word) t1->n_allocations;
return cmp;
}
@@ -1562,9 +1622,10 @@ mheap_trace_main_free (mheap_trace_main_t * tm)
hash_free (tm->trace_index_by_offset);
}
-void mheap_trace (void * v, int enable)
+void
+mheap_trace (void *v, int enable)
{
- mheap_t * h;
+ mheap_t *h;
h = mheap_header (v);
@@ -1578,3 +1639,11 @@ void mheap_trace (void * v, int enable)
h->flags &= ~MHEAP_FLAG_TRACE;
}
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/mheap.h b/vppinfra/vppinfra/mheap.h
index a40c26cb60d..5b7cdfbaf8d 100644
--- a/vppinfra/vppinfra/mheap.h
+++ b/vppinfra/vppinfra/mheap.h
@@ -39,45 +39,56 @@
#define included_mheap_h
#include <vppinfra/vec.h>
-#include <vppinfra/error.h> /* clib_error_t */
-#include <vppinfra/mem.h> /* clib_mem_usage_t */
+#include <vppinfra/error.h> /* clib_error_t */
+#include <vppinfra/mem.h> /* clib_mem_usage_t */
#include <vppinfra/format.h> /* for unformat_input_t */
/* Allocate size bytes. New heap and offset are returned.
offset == ~0 means allocation failed. */
-always_inline void * mheap_get (void * v, uword size, uword * offset_return)
-{ return mheap_get_aligned (v, size, 0, 0, offset_return); }
+always_inline void *
+mheap_get (void *v, uword size, uword * offset_return)
+{
+ return mheap_get_aligned (v, size, 0, 0, offset_return);
+}
/* Create allocation heap of given size.
* The actual usable size is smaller than the requested size.
* memory_bytes must be greater than mheap_page_size + sizeof (mheap_t) + 16.
* Otherwise, allocation may fail and return 0.
*/
-void * mheap_alloc (void * memory, uword memory_bytes);
-void * mheap_alloc_with_flags (void * memory, uword memory_bytes, uword flags);
+void *mheap_alloc (void *memory, uword memory_bytes);
+void *mheap_alloc_with_flags (void *memory, uword memory_bytes, uword flags);
#define mheap_free(v) (v) = _mheap_free(v)
-void * _mheap_free (void * v);
+void *_mheap_free (void *v);
-void mheap_foreach (void * v,
- uword (* func) (void * arg, void * v, void * elt_data, uword elt_size),
- void * arg);
+void mheap_foreach (void *v,
+ uword (*func) (void *arg, void *v, void *elt_data,
+ uword elt_size), void *arg);
/* Format mheap data structures as string. */
-u8 * format_mheap (u8 * s, va_list * va);
+u8 *format_mheap (u8 * s, va_list * va);
/* Validate internal consistency. */
-void mheap_validate (void * h);
+void mheap_validate (void *h);
/* Query bytes used. */
-uword mheap_bytes (void * v);
+uword mheap_bytes (void *v);
-void mheap_usage (void * v, clib_mem_usage_t * usage);
+void mheap_usage (void *v, clib_mem_usage_t * usage);
/* Enable disable traceing. */
-void mheap_trace (void * v, int enable);
+void mheap_trace (void *v, int enable);
/* Test routine. */
int test_mheap_main (unformat_input_t * input);
#endif /* included_mheap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/mheap_bootstrap.h b/vppinfra/vppinfra/mheap_bootstrap.h
index c6cff8e7b7d..4b21051bfcc 100644
--- a/vppinfra/vppinfra/mheap_bootstrap.h
+++ b/vppinfra/vppinfra/mheap_bootstrap.h
@@ -47,45 +47,47 @@
#include <vppinfra/vector.h>
/* Each element in heap is immediately followed by this struct. */
-typedef struct {
+typedef struct
+{
/* Number of mheap_size_t words of user data in previous object.
Used to find mheap_elt_t for previous object. */
#if CLIB_VEC64 > 0
- u64 prev_n_user_data : 63;
+ u64 prev_n_user_data:63;
/* Used to mark end/start of of doubly-linked list of mheap_elt_t's. */
#define MHEAP_N_USER_DATA_INVALID (0x7fffffffffffffffULL)
#define MHEAP_GROUNDED (~0ULL)
/* Set if previous object is free. */
- u64 prev_is_free : 1;
+ u64 prev_is_free:1;
/* Number of mheap_size_t words of user data that follow this object. */
- u64 n_user_data : 63;
+ u64 n_user_data:63;
/* Set if this object is on free list (and therefore following free_elt
is valid). */
- u64 is_free : 1;
+ u64 is_free:1;
#else
- u32 prev_n_user_data : 31;
+ u32 prev_n_user_data:31;
/* Used to mark end/start of of doubly-linked list of mheap_elt_t's. */
#define MHEAP_N_USER_DATA_INVALID (0x7fffffff)
#define MHEAP_GROUNDED (~0)
/* Set if previous object is free. */
- u32 prev_is_free : 1;
+ u32 prev_is_free:1;
/* Number of mheap_size_t words of user data that follow this object. */
- u32 n_user_data : 31;
+ u32 n_user_data:31;
/* Set if this object is on free list (and therefore following free_elt
is valid). */
- u32 is_free : 1;
+ u32 is_free:1;
#endif
-
- union {
+
+ union
+ {
#if CLIB_VEC64 > 0
/* For allocated objects: user data follows.
User data is allocated in units of typeof (user_data[0]). */
@@ -95,7 +97,8 @@ typedef struct {
~0 means end of doubly-linked list.
This is stored in user data (guaranteed to be at least 8 bytes)
but only for *free* objects. */
- struct {
+ struct
+ {
u64 next_uoffset, prev_uoffset;
} free_elt;
#else
@@ -107,7 +110,8 @@ typedef struct {
~0 means end of doubly-linked list.
This is stored in user data (guaranteed to be at least 8 bytes)
but only for *free* objects. */
- struct {
+ struct
+ {
u32 next_uoffset, prev_uoffset;
} free_elt;
#endif
@@ -123,7 +127,8 @@ typedef struct {
/* Number of byte in user data "words". */
#define MHEAP_USER_DATA_WORD_BYTES STRUCT_SIZE_OF (mheap_elt_t, user_data[0])
-typedef struct {
+typedef struct
+{
/* Address of callers: outer first, inner last. */
uword callers[12];
@@ -138,25 +143,26 @@ typedef struct {
u32 n_bytes;
/* Offset of this item */
- uword offset;
+ uword offset;
} mheap_trace_t;
-typedef struct {
- mheap_trace_t * traces;
+typedef struct
+{
+ mheap_trace_t *traces;
/* Indices of free traces. */
- u32 * trace_free_list;
+ u32 *trace_free_list;
/* Hash table mapping callers to trace index. */
- uword * trace_by_callers;
+ uword *trace_by_callers;
/* Hash table mapping mheap offset to trace index. */
- uword * trace_index_by_offset;
+ uword *trace_index_by_offset;
} mheap_trace_main_t;
/* Small object bin i is for objects with
- user_size > sizeof (mheap_elt_t) + sizeof (mheap_elt_t) * (i - 1)
- user_size <= sizeof (mheap_elt_t) + sizeof (mheap_size_t) * i. */
+ user_size > sizeof (mheap_elt_t) + sizeof (mheap_elt_t) * (i - 1)
+ user_size <= sizeof (mheap_elt_t) + sizeof (mheap_size_t) * i. */
#define MHEAP_LOG2_N_SMALL_OBJECT_BINS 8
#define MHEAP_N_SMALL_OBJECT_BINS (1 << MHEAP_LOG2_N_SMALL_OBJECT_BINS)
@@ -164,8 +170,10 @@ typedef struct {
(MHEAP_N_SMALL_OBJECT_BINS \
+ (STRUCT_BITS_OF (mheap_elt_t, user_data[0]) - MHEAP_LOG2_N_SMALL_OBJECT_BINS))
-typedef struct {
- struct {
+typedef struct
+{
+ struct
+ {
u64 n_search_attempts;
u64 n_objects_searched;
u64 n_objects_found;
@@ -193,8 +201,10 @@ typedef struct {
#endif
/* For objects with align == 4 and align_offset == 0 (e.g. vector strings). */
-typedef struct {
- union {
+typedef struct
+{
+ union
+ {
#ifdef CLIB_HAVE_VEC128
u8x16 as_u8x16[BITS (uword) / 16];
#endif
@@ -209,7 +219,8 @@ typedef struct {
} mheap_small_object_cache_t;
/* Vec header for heaps. */
-typedef struct {
+typedef struct
+{
/* User offsets for head of doubly-linked list of free objects of this size. */
#if CLIB_VEC64 > 0
u64 first_free_elt_uoffset_by_bin[MHEAP_N_BINS];
@@ -218,7 +229,8 @@ typedef struct {
#endif
/* Bitmap of non-empty free list bins. */
- uword non_empty_free_elt_heads[(MHEAP_N_BINS + BITS (uword) - 1) / BITS (uword)];
+ uword non_empty_free_elt_heads[(MHEAP_N_BINS + BITS (uword) - 1) /
+ BITS (uword)];
mheap_small_object_cache_t small_object_cache;
@@ -254,50 +266,75 @@ typedef struct {
mheap_stats_t stats;
} mheap_t;
-always_inline mheap_t * mheap_header (u8 * v)
-{ return vec_aligned_header (v, sizeof (mheap_t), 16); }
+always_inline mheap_t *
+mheap_header (u8 * v)
+{
+ return vec_aligned_header (v, sizeof (mheap_t), 16);
+}
-always_inline u8 * mheap_vector (mheap_t * h)
-{ return vec_aligned_header_end (h, sizeof (mheap_t), 16); }
+always_inline u8 *
+mheap_vector (mheap_t * h)
+{
+ return vec_aligned_header_end (h, sizeof (mheap_t), 16);
+}
-always_inline uword mheap_elt_uoffset (void * v, mheap_elt_t * e)
-{ return (uword)e->user_data - (uword)v; }
+always_inline uword
+mheap_elt_uoffset (void *v, mheap_elt_t * e)
+{
+ return (uword) e->user_data - (uword) v;
+}
-always_inline mheap_elt_t * mheap_user_pointer_to_elt (void *v)
-{ return v - STRUCT_OFFSET_OF (mheap_elt_t, user_data); }
+always_inline mheap_elt_t *
+mheap_user_pointer_to_elt (void *v)
+{
+ return v - STRUCT_OFFSET_OF (mheap_elt_t, user_data);
+}
/* For debugging we keep track of offsets for valid objects.
We make sure user is not trying to free object with invalid offset. */
-always_inline uword mheap_offset_is_valid (void * v, uword uo)
-{ return uo >= MHEAP_ELT_OVERHEAD_BYTES && uo <= vec_len (v); }
+always_inline uword
+mheap_offset_is_valid (void *v, uword uo)
+{
+ return uo >= MHEAP_ELT_OVERHEAD_BYTES && uo <= vec_len (v);
+}
-always_inline mheap_elt_t * mheap_elt_at_uoffset (void * v, uword uo)
+always_inline mheap_elt_t *
+mheap_elt_at_uoffset (void *v, uword uo)
{
ASSERT (mheap_offset_is_valid (v, uo));
return (mheap_elt_t *) (v + uo - STRUCT_OFFSET_OF (mheap_elt_t, user_data));
}
-always_inline void * mheap_elt_data (void * v, mheap_elt_t * e)
-{ return v + mheap_elt_uoffset (v, e); }
+always_inline void *
+mheap_elt_data (void *v, mheap_elt_t * e)
+{
+ return v + mheap_elt_uoffset (v, e);
+}
-always_inline uword mheap_elt_data_bytes (mheap_elt_t * e)
-{ return e->n_user_data * sizeof (e->user_data[0]); }
+always_inline uword
+mheap_elt_data_bytes (mheap_elt_t * e)
+{
+ return e->n_user_data * sizeof (e->user_data[0]);
+}
-always_inline uword mheap_data_bytes (void * v, uword uo)
+always_inline uword
+mheap_data_bytes (void *v, uword uo)
{
- mheap_elt_t * e = mheap_elt_at_uoffset (v, uo);
+ mheap_elt_t *e = mheap_elt_at_uoffset (v, uo);
return mheap_elt_data_bytes (e);
}
#define mheap_len(v,d) (mheap_data_bytes((v),(void *) (d) - (void *) (v)) / sizeof ((d)[0]))
-always_inline mheap_elt_t * mheap_next_elt (mheap_elt_t * e)
+always_inline mheap_elt_t *
+mheap_next_elt (mheap_elt_t * e)
{
ASSERT (e->n_user_data < MHEAP_N_USER_DATA_INVALID);
return (mheap_elt_t *) (e->user_data + e->n_user_data);
}
-always_inline mheap_elt_t * mheap_prev_elt (mheap_elt_t * e)
+always_inline mheap_elt_t *
+mheap_prev_elt (mheap_elt_t * e)
{
ASSERT (e->prev_n_user_data < MHEAP_N_USER_DATA_INVALID);
return ((void *) e
@@ -307,17 +344,31 @@ always_inline mheap_elt_t * mheap_prev_elt (mheap_elt_t * e)
/* Exported operations. */
-always_inline uword mheap_elts (void * v)
-{ return v ? mheap_header (v)->n_elts : 0; }
+always_inline uword
+mheap_elts (void *v)
+{
+ return v ? mheap_header (v)->n_elts : 0;
+}
-always_inline uword mheap_max_size (void * v)
-{ return v ? mheap_header (v)->max_size : ~0; }
+always_inline uword
+mheap_max_size (void *v)
+{
+ return v ? mheap_header (v)->max_size : ~0;
+}
/* Free previously allocated offset. */
-void mheap_put (void * v, uword offset);
+void mheap_put (void *v, uword offset);
/* Allocate object from mheap. */
-void * mheap_get_aligned (void * v, uword size, uword align, uword align_offset,
- uword * offset_return);
+void *mheap_get_aligned (void *v, uword size, uword align, uword align_offset,
+ uword * offset_return);
#endif /* included_mem_mheap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/mod_test_hash.c b/vppinfra/vppinfra/mod_test_hash.c
index dd13055dd4a..b3fa676d2e2 100644
--- a/vppinfra/vppinfra/mod_test_hash.c
+++ b/vppinfra/vppinfra/mod_test_hash.c
@@ -15,6 +15,13 @@
#include <vppinfra/linux_kernel_init.h>
#include <vppinfra/hash.h>
-CLIB_LINUX_KERNEL_MODULE ("test_hash",
- test_hash_main,
+CLIB_LINUX_KERNEL_MODULE ("test_hash", test_hash_main,
/* kernel-thread flags */ 0 & CLONE_KERNEL);
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/os.h b/vppinfra/vppinfra/os.h
index 124864f5578..a5c74f8ce72 100644
--- a/vppinfra/vppinfra/os.h
+++ b/vppinfra/vppinfra/os.h
@@ -62,3 +62,11 @@ uword os_get_ncpus (void);
#include <vppinfra/smp.h>
#endif /* included_os_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/pfhash.c b/vppinfra/vppinfra/pfhash.c
index 81a5f491bd5..3b9fa8f34aa 100644
--- a/vppinfra/vppinfra/pfhash.c
+++ b/vppinfra/vppinfra/pfhash.c
@@ -19,21 +19,30 @@
/* This is incredibly handy when debugging */
u32 vl (void *v) __attribute__ ((weak));
-u32 vl (void *v) { return vec_len(v); }
+u32
+vl (void *v)
+{
+ return vec_len (v);
+}
#if defined(CLIB_HAVE_VEC128) && ! defined (__ALTIVEC__)
-typedef struct {
- u8 * key[16];
+typedef struct
+{
+ u8 *key[16];
u64 value;
} pfhash_show_t;
-static int sh_compare (pfhash_show_t * sh0, pfhash_show_t * sh1)
-{ return ((i32)(sh0->value) - ((i32)sh1->value)); }
+static int
+sh_compare (pfhash_show_t * sh0, pfhash_show_t * sh1)
+{
+ return ((i32) (sh0->value) - ((i32) sh1->value));
+}
-u8 * format_pfhash (u8 * s, va_list * args)
+u8 *
+format_pfhash (u8 * s, va_list * args)
{
- pfhash_t * p = va_arg (*args, pfhash_t *);
+ pfhash_t *p = va_arg (*args, pfhash_t *);
int verbose = va_arg (*args, int);
if (p == 0 || p->overflow_hash == 0 || p->buckets == 0)
@@ -43,122 +52,131 @@ u8 * format_pfhash (u8 * s, va_list * args)
}
s = format (s, "Prefetch hash '%s'\n", p->name);
- s = format (s, " %d buckets, %u bucket overflows, %.1f%% bucket overflow \n",
- vec_len(p->buckets), p->overflow_count,
- 100.0*((f64)p->overflow_count)/((f64)vec_len(p->buckets)));
+ s =
+ format (s, " %d buckets, %u bucket overflows, %.1f%% bucket overflow \n",
+ vec_len (p->buckets), p->overflow_count,
+ 100.0 * ((f64) p->overflow_count) / ((f64) vec_len (p->buckets)));
if (p->nitems)
- s = format (s, " %u items, %u items in overflow, %.1f%% items in overflow\n",
- p->nitems, p->nitems_in_overflow,
- 100.0*((f64)p->nitems_in_overflow)/((f64)p->nitems));
+ s =
+ format (s,
+ " %u items, %u items in overflow, %.1f%% items in overflow\n",
+ p->nitems, p->nitems_in_overflow,
+ 100.0 * ((f64) p->nitems_in_overflow) / ((f64) p->nitems));
if (verbose)
{
- pfhash_show_t * shs = 0, * sh;
- hash_pair_t * hp;
+ pfhash_show_t *shs = 0, *sh;
+ hash_pair_t *hp;
int i, j;
for (i = 0; i < vec_len (p->buckets); i++)
- {
- pfhash_kv_t * kv;
- pfhash_kv_16_t * kv16;
- pfhash_kv_8_t * kv8;
- pfhash_kv_8v8_t * kv8v8;
- pfhash_kv_4_t * kv4;
-
- if (p->buckets[i] == 0 || p->buckets[i] == PFHASH_BUCKET_OVERFLOW)
- continue;
-
- kv = pool_elt_at_index (p->kvp, p->buckets[i]);
-
- switch (p->key_size)
- {
- case 16:
- kv16 = &kv->kv16;
- for (j = 0; j < 3; j++)
- {
- if (kv16->values[j] != (u32)~0)
- {
- vec_add2 (shs, sh, 1);
- clib_memcpy (sh->key, &kv16->kb.k_u32x4[j], p->key_size);
- sh->value = kv16->values[j];
- }
- }
- break;
- case 8:
- if (p->value_size == 4)
- {
- kv8 = &kv->kv8;
- for (j = 0; j < 5; j++)
- {
- if (kv8->values[j] != (u32)~0)
- {
- vec_add2 (shs, sh, 1);
- clib_memcpy (sh->key, &kv8->kb.k_u64[j], p->key_size);
- sh->value = kv8->values[j];
- }
- }
- }
- else
- {
- kv8v8 = &kv->kv8v8;
- for (j = 0; j < 4; j++)
- {
- if (kv8v8->values[j] != (u64)~0)
- {
- vec_add2 (shs, sh, 1);
- clib_memcpy (sh->key, &kv8v8->kb.k_u64[j], p->key_size);
- sh->value = kv8v8->values[j];
- }
- }
-
- }
- break;
- case 4:
- kv4 = &kv->kv4;
- for (j = 0; j < 8; j++)
- {
- if (kv4->values[j] != (u32)~0)
- {
- vec_add2 (shs, sh, 1);
- clib_memcpy (sh->key, &kv4->kb.kb[j], p->key_size);
- sh->value = kv4->values[j];
- }
- }
- break;
- }
- }
-
- hash_foreach_pair (hp, p->overflow_hash,
+ {
+ pfhash_kv_t *kv;
+ pfhash_kv_16_t *kv16;
+ pfhash_kv_8_t *kv8;
+ pfhash_kv_8v8_t *kv8v8;
+ pfhash_kv_4_t *kv4;
+
+ if (p->buckets[i] == 0 || p->buckets[i] == PFHASH_BUCKET_OVERFLOW)
+ continue;
+
+ kv = pool_elt_at_index (p->kvp, p->buckets[i]);
+
+ switch (p->key_size)
+ {
+ case 16:
+ kv16 = &kv->kv16;
+ for (j = 0; j < 3; j++)
+ {
+ if (kv16->values[j] != (u32) ~ 0)
+ {
+ vec_add2 (shs, sh, 1);
+ clib_memcpy (sh->key, &kv16->kb.k_u32x4[j],
+ p->key_size);
+ sh->value = kv16->values[j];
+ }
+ }
+ break;
+ case 8:
+ if (p->value_size == 4)
+ {
+ kv8 = &kv->kv8;
+ for (j = 0; j < 5; j++)
+ {
+ if (kv8->values[j] != (u32) ~ 0)
+ {
+ vec_add2 (shs, sh, 1);
+ clib_memcpy (sh->key, &kv8->kb.k_u64[j],
+ p->key_size);
+ sh->value = kv8->values[j];
+ }
+ }
+ }
+ else
+ {
+ kv8v8 = &kv->kv8v8;
+ for (j = 0; j < 4; j++)
+ {
+ if (kv8v8->values[j] != (u64) ~ 0)
+ {
+ vec_add2 (shs, sh, 1);
+ clib_memcpy (sh->key, &kv8v8->kb.k_u64[j],
+ p->key_size);
+ sh->value = kv8v8->values[j];
+ }
+ }
+
+ }
+ break;
+ case 4:
+ kv4 = &kv->kv4;
+ for (j = 0; j < 8; j++)
+ {
+ if (kv4->values[j] != (u32) ~ 0)
+ {
+ vec_add2 (shs, sh, 1);
+ clib_memcpy (sh->key, &kv4->kb.kb[j], p->key_size);
+ sh->value = kv4->values[j];
+ }
+ }
+ break;
+ }
+ }
+
+ /* *INDENT-OFF* */
+ hash_foreach_pair (hp, p->overflow_hash,
({
vec_add2 (shs, sh, 1);
clib_memcpy (sh->key, (u8 *)hp->key, p->key_size);
sh->value = hp->value[0];
- }));
+ }));
+ /* *INDENT-ON* */
vec_sort_with_function (shs, sh_compare);
for (i = 0; i < vec_len (shs); i++)
- {
- sh = vec_elt_at_index (shs, i);
- s = format (s, " %U value %u\n", format_hex_bytes, sh->key,
- p->key_size, sh->value);
- }
+ {
+ sh = vec_elt_at_index (shs, i);
+ s = format (s, " %U value %u\n", format_hex_bytes, sh->key,
+ p->key_size, sh->value);
+ }
vec_free (shs);
}
return s;
}
-void abort(void);
+void abort (void);
-void pfhash_init (pfhash_t * p, char * name, u32 key_size, u32 value_size,
- u32 nbuckets)
+void
+pfhash_init (pfhash_t * p, char *name, u32 key_size, u32 value_size,
+ u32 nbuckets)
{
- pfhash_kv_t * kv;
+ pfhash_kv_t *kv;
memset (p, 0, sizeof (*p));
u32 key_bytes;
-
- switch(key_size)
+
+ switch (key_size)
{
case 4:
key_bytes = 4;
@@ -170,78 +188,80 @@ void pfhash_init (pfhash_t * p, char * name, u32 key_size, u32 value_size,
key_bytes = 16;
break;
default:
- ASSERT(0);
- abort();
+ ASSERT (0);
+ abort ();
}
- switch(value_size)
+ switch (value_size)
{
case 4:
case 8:
break;
- default:
- ASSERT(0);
- abort();
+ default:
+ ASSERT (0);
+ abort ();
}
p->name = format (0, "%s", name);
- vec_add1(p->name, 0);
+ vec_add1 (p->name, 0);
p->overflow_hash = hash_create_mem (0, key_bytes, sizeof (uword));
nbuckets = 1 << (max_log2 (nbuckets));
/* This sets the entire bucket array to zero */
- vec_validate (p->buckets, nbuckets-1);
+ vec_validate (p->buckets, nbuckets - 1);
p->key_size = key_size;
p->value_size = value_size;
- /*
- * Unset buckets implicitly point at the 0th pool elt.
+ /*
+ * Unset buckets implicitly point at the 0th pool elt.
* All search routines will return ~0 if they go there.
*/
pool_get_aligned (p->kvp, kv, 16);
memset (kv, 0xff, sizeof (*kv));
}
-static pfhash_kv_16_t * pfhash_get_kv_16 (pfhash_t * p, u32 bucket_contents,
- u32x4 * key, u32 *match_index)
+static pfhash_kv_16_t *
+pfhash_get_kv_16 (pfhash_t * p, u32 bucket_contents,
+ u32x4 * key, u32 * match_index)
{
u32x4 diff[3];
u32 is_equal[3];
pfhash_kv_16_t *kv = 0;
-
- *match_index = (u32)~0;
+
+ *match_index = (u32) ~ 0;
kv = &p->kvp[bucket_contents].kv16;
diff[0] = u32x4_sub (kv->kb.k_u32x4[0], key[0]);
diff[1] = u32x4_sub (kv->kb.k_u32x4[1], key[0]);
diff[2] = u32x4_sub (kv->kb.k_u32x4[2], key[0]);
-
+
is_equal[0] = u32x4_zero_byte_mask (diff[0]) == 0xffff;
is_equal[1] = u32x4_zero_byte_mask (diff[1]) == 0xffff;
is_equal[2] = u32x4_zero_byte_mask (diff[2]) == 0xffff;
-
+
if (is_equal[0])
*match_index = 0;
if (is_equal[1])
*match_index = 1;
if (is_equal[2])
*match_index = 2;
-
+
return kv;
}
-static pfhash_kv_8_t * pfhash_get_kv_8 (pfhash_t * p, u32 bucket_contents,
- u64 * key, u32 * match_index)
+static pfhash_kv_8_t *
+pfhash_get_kv_8 (pfhash_t * p, u32 bucket_contents,
+ u64 * key, u32 * match_index)
{
pfhash_kv_8_t *kv;
-
- *match_index = (u32)~0;
-
+
+ *match_index = (u32) ~ 0;
+
kv = &p->kvp[bucket_contents].kv8;
-
+
if (kv->kb.k_u64[0] == key[0])
*match_index = 0;
if (kv->kb.k_u64[1] == key[0])
@@ -252,20 +272,20 @@ static pfhash_kv_8_t * pfhash_get_kv_8 (pfhash_t * p, u32 bucket_contents,
*match_index = 3;
if (kv->kb.k_u64[4] == key[0])
*match_index = 4;
-
+
return kv;
}
-static pfhash_kv_8v8_t * pfhash_get_kv_8v8 (pfhash_t * p,
- u32 bucket_contents,
- u64 * key, u32 * match_index)
+static pfhash_kv_8v8_t *
+pfhash_get_kv_8v8 (pfhash_t * p,
+ u32 bucket_contents, u64 * key, u32 * match_index)
{
pfhash_kv_8v8_t *kv;
- *match_index = (u32)~0;
-
+ *match_index = (u32) ~ 0;
+
kv = &p->kvp[bucket_contents].kv8v8;
-
+
if (kv->kb.k_u64[0] == key[0])
*match_index = 0;
if (kv->kb.k_u64[1] == key[0])
@@ -278,87 +298,94 @@ static pfhash_kv_8v8_t * pfhash_get_kv_8v8 (pfhash_t * p,
return kv;
}
-static pfhash_kv_4_t * pfhash_get_kv_4 (pfhash_t * p, u32 bucket_contents,
- u32 * key, u32 * match_index)
+static pfhash_kv_4_t *
+pfhash_get_kv_4 (pfhash_t * p, u32 bucket_contents,
+ u32 * key, u32 * match_index)
{
u32x4 vector_key;
u32x4 is_equal[2];
u32 zbm[2], winner_index;
pfhash_kv_4_t *kv;
- *match_index = (u32)~0;
-
+ *match_index = (u32) ~ 0;
+
kv = &p->kvp[bucket_contents].kv4;
-
+
vector_key = u32x4_splat (key[0]);
-
+
is_equal[0] = u32x4_is_equal (kv->kb.k_u32x4[0], vector_key);
is_equal[1] = u32x4_is_equal (kv->kb.k_u32x4[1], vector_key);
zbm[0] = ~u32x4_zero_byte_mask (is_equal[0]) & 0xFFFF;
zbm[1] = ~u32x4_zero_byte_mask (is_equal[1]) & 0xFFFF;
-
- if (PREDICT_FALSE((zbm[0] == 0) &&(zbm[1] == 0)))
+
+ if (PREDICT_FALSE ((zbm[0] == 0) && (zbm[1] == 0)))
return kv;
-
- winner_index = min_log2 (zbm[0])>>2;
- winner_index = zbm[1] ? (4 + (min_log2 (zbm[1])>>2)) : winner_index;
-
+
+ winner_index = min_log2 (zbm[0]) >> 2;
+ winner_index = zbm[1] ? (4 + (min_log2 (zbm[1]) >> 2)) : winner_index;
+
*match_index = winner_index;
return kv;
}
-static pfhash_kv_t * pfhash_get_internal (pfhash_t * p, u32 bucket_contents,
- void * key, u32 *match_index)
+static pfhash_kv_t *
+pfhash_get_internal (pfhash_t * p, u32 bucket_contents,
+ void *key, u32 * match_index)
{
pfhash_kv_t *kv = 0;
switch (p->key_size)
{
case 16:
- kv = (pfhash_kv_t *) pfhash_get_kv_16 (p, bucket_contents, key, match_index);
+ kv =
+ (pfhash_kv_t *) pfhash_get_kv_16 (p, bucket_contents, key,
+ match_index);
break;
case 8:
if (p->value_size == 4)
- kv = (pfhash_kv_t *) pfhash_get_kv_8 (p, bucket_contents,
- key, match_index);
+ kv = (pfhash_kv_t *) pfhash_get_kv_8 (p, bucket_contents,
+ key, match_index);
else
- kv = (pfhash_kv_t *) pfhash_get_kv_8v8 (p, bucket_contents,
- key, match_index);
+ kv = (pfhash_kv_t *) pfhash_get_kv_8v8 (p, bucket_contents,
+ key, match_index);
break;
case 4:
- kv = (pfhash_kv_t *) pfhash_get_kv_4 (p, bucket_contents, key, match_index);
+ kv =
+ (pfhash_kv_t *) pfhash_get_kv_4 (p, bucket_contents, key,
+ match_index);
break;
default:
- ASSERT(0);
+ ASSERT (0);
}
return kv;
}
-u64 pfhash_get (pfhash_t * p, u32 bucket, void * key)
+u64
+pfhash_get (pfhash_t * p, u32 bucket, void *key)
{
pfhash_kv_t *kv;
- u32 match_index=~0;
- pfhash_kv_16_t * kv16;
- pfhash_kv_8_t * kv8;
- pfhash_kv_8v8_t * kv8v8;
- pfhash_kv_4_t * kv4;
+ u32 match_index = ~0;
+ pfhash_kv_16_t *kv16;
+ pfhash_kv_8_t *kv8;
+ pfhash_kv_8v8_t *kv8v8;
+ pfhash_kv_4_t *kv4;
u32 bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
if (bucket_contents == PFHASH_BUCKET_OVERFLOW)
{
- uword * hp;
+ uword *hp;
hp = hash_get_mem (p->overflow_hash, key);
if (hp)
- return hp[0];
- return (u64)~0;
+ return hp[0];
+ return (u64) ~ 0;
}
kv = pfhash_get_internal (p, bucket_contents, key, &match_index);
- if (match_index == (u32)~0)
- return (u64)~0;
-
+ if (match_index == (u32) ~ 0)
+ return (u64) ~ 0;
+
kv16 = (void *) kv;
kv8 = (void *) kv;
kv4 = (void *) kv;
@@ -367,32 +394,33 @@ u64 pfhash_get (pfhash_t * p, u32 bucket, void * key)
switch (p->key_size)
{
case 16:
- return (kv16->values[match_index] == (u32)~0)
- ? (u64)~0 : (u64) kv16->values[match_index];
+ return (kv16->values[match_index] == (u32) ~ 0)
+ ? (u64) ~ 0 : (u64) kv16->values[match_index];
case 8:
if (p->value_size == 4)
- return (kv8->values[match_index] == (u32)~0)
- ? (u64)~0 : (u64) kv8->values[match_index];
+ return (kv8->values[match_index] == (u32) ~ 0)
+ ? (u64) ~ 0 : (u64) kv8->values[match_index];
else
- return kv8v8->values[match_index];
+ return kv8v8->values[match_index];
case 4:
- return (kv4->values[match_index] == (u32)~0)
- ? (u64)~0 : (u64) kv4->values[match_index];
+ return (kv4->values[match_index] == (u32) ~ 0)
+ ? (u64) ~ 0 : (u64) kv4->values[match_index];
default:
- ASSERT(0);
+ ASSERT (0);
}
- return (u64) ~0;
+ return (u64) ~ 0;
}
-void pfhash_set (pfhash_t * p, u32 bucket, void * key, void * value)
+void
+pfhash_set (pfhash_t * p, u32 bucket, void *key, void *value)
{
u32 bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
- u32 match_index = (u32)~0;
+ u32 match_index = (u32) ~ 0;
pfhash_kv_t *kv;
- pfhash_kv_16_t * kv16;
- pfhash_kv_8_t * kv8;
- pfhash_kv_8v8_t * kv8v8;
- pfhash_kv_4_t * kv4;
+ pfhash_kv_16_t *kv16;
+ pfhash_kv_8_t *kv8;
+ pfhash_kv_8v8_t *kv8v8;
+ pfhash_kv_4_t *kv4;
int i;
u8 *kcopy;
@@ -401,12 +429,12 @@ void pfhash_set (pfhash_t * p, u32 bucket, void * key, void * value)
hash_pair_t *hp;
hp = hash_get_pair_mem (p->overflow_hash, key);
if (hp)
- {
- clib_warning ("replace value 0x%08x with value 0x%08x",
- hp->value[0], (u64) value);
- hp->value[0] = (u64) value;
- return;
- }
+ {
+ clib_warning ("replace value 0x%08x with value 0x%08x",
+ hp->value[0], (u64) value);
+ hp->value[0] = (u64) value;
+ return;
+ }
kcopy = clib_mem_alloc (p->key_size);
clib_memcpy (kcopy, key, p->key_size);
hash_set_mem (p->overflow_hash, kcopy, value);
@@ -424,57 +452,57 @@ void pfhash_set (pfhash_t * p, u32 bucket, void * key, void * value)
else
kv = pfhash_get_internal (p, bucket_contents, key, &match_index);
- kv16 = (void *)kv;
- kv8 = (void *)kv;
- kv8v8 = (void *)kv;
- kv4 = (void *)kv;
+ kv16 = (void *) kv;
+ kv8 = (void *) kv;
+ kv8v8 = (void *) kv;
+ kv4 = (void *) kv;
p->nitems++;
- if (match_index != (u32)~0)
+ if (match_index != (u32) ~ 0)
{
switch (p->key_size)
- {
- case 16:
- kv16->values[match_index] = (u32)(u64) value;
- return;
-
- case 8:
- if (p->value_size == 4)
- kv8->values[match_index] = (u32)(u64) value;
- else
- kv8v8->values[match_index] = (u64) value;
- return;
-
- case 4:
- kv4->values[match_index] = (u64) value;
- return;
-
- default:
- ASSERT(0);
- }
+ {
+ case 16:
+ kv16->values[match_index] = (u32) (u64) value;
+ return;
+
+ case 8:
+ if (p->value_size == 4)
+ kv8->values[match_index] = (u32) (u64) value;
+ else
+ kv8v8->values[match_index] = (u64) value;
+ return;
+
+ case 4:
+ kv4->values[match_index] = (u64) value;
+ return;
+
+ default:
+ ASSERT (0);
+ }
}
switch (p->key_size)
{
case 16:
for (i = 0; i < 3; i++)
- {
- if (kv16->values[i] == (u32)~0)
- {
- clib_memcpy (&kv16->kb.k_u32x4[i], key, p->key_size);
- kv16->values[i] = (u32)(u64) value;
- return;
- }
- }
+ {
+ if (kv16->values[i] == (u32) ~ 0)
+ {
+ clib_memcpy (&kv16->kb.k_u32x4[i], key, p->key_size);
+ kv16->values[i] = (u32) (u64) value;
+ return;
+ }
+ }
/* copy bucket contents to overflow hash tbl */
- for (i = 0; i < 3; i++)
- {
- kcopy = clib_mem_alloc (p->key_size);
- clib_memcpy (kcopy, &kv16->kb.k_u32x4[i], p->key_size);
- hash_set_mem (p->overflow_hash, kcopy, kv16->values[i]);
- p->nitems_in_overflow++;
- }
+ for (i = 0; i < 3; i++)
+ {
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, &kv16->kb.k_u32x4[i], p->key_size);
+ hash_set_mem (p->overflow_hash, kcopy, kv16->values[i]);
+ p->nitems_in_overflow++;
+ }
/* Add new key to overflow */
kcopy = clib_mem_alloc (p->key_size);
clib_memcpy (kcopy, key, p->key_size);
@@ -486,46 +514,46 @@ void pfhash_set (pfhash_t * p, u32 bucket, void * key, void * value)
case 8:
if (p->value_size == 4)
- {
- for (i = 0; i < 5; i++)
- {
- if (kv8->values[i] == (u32)~0)
- {
- clib_memcpy (&kv8->kb.k_u64[i], key, 8);
- kv8->values[i] = (u32)(u64) value;
- return;
- }
- }
- /* copy bucket contents to overflow hash tbl */
- for (i = 0; i < 5; i++)
- {
- kcopy = clib_mem_alloc (p->key_size);
- clib_memcpy (kcopy, &kv8->kb.k_u64[i], 8);
- hash_set_mem (p->overflow_hash, kcopy, kv8->values[i]);
- p->nitems_in_overflow++;
- }
- }
+ {
+ for (i = 0; i < 5; i++)
+ {
+ if (kv8->values[i] == (u32) ~ 0)
+ {
+ clib_memcpy (&kv8->kb.k_u64[i], key, 8);
+ kv8->values[i] = (u32) (u64) value;
+ return;
+ }
+ }
+ /* copy bucket contents to overflow hash tbl */
+ for (i = 0; i < 5; i++)
+ {
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, &kv8->kb.k_u64[i], 8);
+ hash_set_mem (p->overflow_hash, kcopy, kv8->values[i]);
+ p->nitems_in_overflow++;
+ }
+ }
else
- {
- for (i = 0; i < 4; i++)
- {
- if (kv8v8->values[i] == (u64)~0)
- {
- clib_memcpy (&kv8v8->kb.k_u64[i], key, 8);
- kv8v8->values[i] = (u64) value;
- return;
- }
- }
- /* copy bucket contents to overflow hash tbl */
- for (i = 0; i < 4; i++)
- {
- kcopy = clib_mem_alloc (p->key_size);
- clib_memcpy (kcopy, &kv8v8->kb.k_u64[i], 8);
- hash_set_mem (p->overflow_hash, kcopy, kv8v8->values[i]);
- p->nitems_in_overflow++;
- }
-
- }
+ {
+ for (i = 0; i < 4; i++)
+ {
+ if (kv8v8->values[i] == (u64) ~ 0)
+ {
+ clib_memcpy (&kv8v8->kb.k_u64[i], key, 8);
+ kv8v8->values[i] = (u64) value;
+ return;
+ }
+ }
+ /* copy bucket contents to overflow hash tbl */
+ for (i = 0; i < 4; i++)
+ {
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, &kv8v8->kb.k_u64[i], 8);
+ hash_set_mem (p->overflow_hash, kcopy, kv8v8->values[i]);
+ p->nitems_in_overflow++;
+ }
+
+ }
/* Add new key to overflow */
kcopy = clib_mem_alloc (p->key_size);
clib_memcpy (kcopy, key, p->key_size);
@@ -537,22 +565,22 @@ void pfhash_set (pfhash_t * p, u32 bucket, void * key, void * value)
case 4:
for (i = 0; i < 8; i++)
- {
- if (kv4->values[i] == (u32)~0)
- {
- clib_memcpy (&kv4->kb.kb[i], key, 4);
- kv4->values[i] = (u32)(u64) value;
- return;
- }
- }
+ {
+ if (kv4->values[i] == (u32) ~ 0)
+ {
+ clib_memcpy (&kv4->kb.kb[i], key, 4);
+ kv4->values[i] = (u32) (u64) value;
+ return;
+ }
+ }
/* copy bucket contents to overflow hash tbl */
- for (i = 0; i < 8; i++)
- {
- kcopy = clib_mem_alloc (p->key_size);
- clib_memcpy (kcopy, &kv4->kb.kb[i], 4);
- hash_set_mem (p->overflow_hash, kcopy, kv4->values[i]);
- p->nitems_in_overflow++;
- }
+ for (i = 0; i < 8; i++)
+ {
+ kcopy = clib_mem_alloc (p->key_size);
+ clib_memcpy (kcopy, &kv4->kb.kb[i], 4);
+ hash_set_mem (p->overflow_hash, kcopy, kv4->values[i]);
+ p->nitems_in_overflow++;
+ }
/* Add new key to overflow */
kcopy = clib_mem_alloc (p->key_size);
clib_memcpy (kcopy, key, p->key_size);
@@ -563,83 +591,87 @@ void pfhash_set (pfhash_t * p, u32 bucket, void * key, void * value)
return;
default:
- ASSERT(0);
+ ASSERT (0);
}
}
-void pfhash_unset (pfhash_t * p, u32 bucket, void * key)
+void
+pfhash_unset (pfhash_t * p, u32 bucket, void *key)
{
u32 bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
- u32 match_index = (u32)~0;
+ u32 match_index = (u32) ~ 0;
pfhash_kv_t *kv;
- pfhash_kv_16_t * kv16;
- pfhash_kv_8_t * kv8;
- pfhash_kv_8v8_t * kv8v8;
- pfhash_kv_4_t * kv4;
- void * oldkey;
+ pfhash_kv_16_t *kv16;
+ pfhash_kv_8_t *kv8;
+ pfhash_kv_8v8_t *kv8v8;
+ pfhash_kv_4_t *kv4;
+ void *oldkey;
if (bucket_contents == PFHASH_BUCKET_OVERFLOW)
{
hash_pair_t *hp;
hp = hash_get_pair_mem (p->overflow_hash, key);
if (hp)
- {
- oldkey = (void *) hp->key;
- hash_unset_mem (p->overflow_hash, key);
- clib_mem_free (oldkey);
- p->nitems--;
- p->nitems_in_overflow--;
- }
+ {
+ oldkey = (void *) hp->key;
+ hash_unset_mem (p->overflow_hash, key);
+ clib_mem_free (oldkey);
+ p->nitems--;
+ p->nitems_in_overflow--;
+ }
return;
}
kv = pfhash_get_internal (p, bucket_contents, key, &match_index);
- if (match_index == (u32)~0)
+ if (match_index == (u32) ~ 0)
return;
-
+
p->nitems--;
- kv16 = (void *)kv;
- kv8 = (void *)kv;
- kv8v8 = (void *)kv;
- kv4 = (void *)kv;
+ kv16 = (void *) kv;
+ kv8 = (void *) kv;
+ kv8v8 = (void *) kv;
+ kv4 = (void *) kv;
switch (p->key_size)
{
case 16:
- kv16->values[match_index] = (u32)~0;
+ kv16->values[match_index] = (u32) ~ 0;
return;
case 8:
if (p->value_size == 4)
- kv8->values[match_index] = (u32)~0;
+ kv8->values[match_index] = (u32) ~ 0;
else
- kv8v8->values[match_index] = (u64)~0;
+ kv8v8->values[match_index] = (u64) ~ 0;
return;
case 4:
- kv4->values[match_index] = (u32)~0;
+ kv4->values[match_index] = (u32) ~ 0;
return;
default:
- ASSERT(0);
+ ASSERT (0);
}
}
-void pfhash_free (pfhash_t * p)
+void
+pfhash_free (pfhash_t * p)
{
hash_pair_t *hp;
int i;
- u8 ** keys = 0;
+ u8 **keys = 0;
vec_free (p->name);
pool_free (p->kvp);
- hash_foreach_pair (hp, p->overflow_hash,
+ /* *INDENT-OFF* */
+ hash_foreach_pair (hp, p->overflow_hash,
({
vec_add1 (keys, (u8 *)hp->key);
- }));
+ }));
+ /* *INDENT-ON* */
hash_free (p->overflow_hash);
for (i = 0; i < vec_len (keys); i++)
vec_free (keys[i]);
@@ -647,3 +679,11 @@ void pfhash_free (pfhash_t * p)
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/pfhash.h b/vppinfra/vppinfra/pfhash.h
index a2ca300f7c5..e054c668f3b 100644
--- a/vppinfra/vppinfra/pfhash.h
+++ b/vppinfra/vppinfra/pfhash.h
@@ -24,20 +24,24 @@
#if defined(CLIB_HAVE_VEC128) && ! defined (__ALTIVEC__)
-typedef struct {
+typedef struct
+{
/* 3 x 16 = 48 key bytes */
- union {
+ union
+ {
u32x4 k_u32x4[3];
- u64 k_u64[6];
+ u64 k_u64[6];
} kb;
/* 3 x 4 = 12 value bytes */
u32 values[3];
u32 pad;
} pfhash_kv_16_t;
-typedef struct {
+typedef struct
+{
/* 5 x 8 = 40 key bytes */
- union {
+ union
+ {
u64 k_u64[5];
} kb;
@@ -46,9 +50,11 @@ typedef struct {
u32 pad;
} pfhash_kv_8_t;
-typedef struct {
+typedef struct
+{
/* 4 x 8 = 32 key bytes */
- union {
+ union
+ {
u64 k_u64[4];
} kb;
@@ -56,9 +62,11 @@ typedef struct {
u64 values[4];
} pfhash_kv_8v8_t;
-typedef struct {
+typedef struct
+{
/* 8 x 4 = 32 key bytes */
- union {
+ union
+ {
u32x4 k_u32x4[2];
u32 kb[8];
} kb;
@@ -67,26 +75,28 @@ typedef struct {
u32 values[8];
} pfhash_kv_4_t;
-typedef union {
+typedef union
+{
pfhash_kv_16_t kv16;
pfhash_kv_8_t kv8;
pfhash_kv_8v8_t kv8v8;
pfhash_kv_4_t kv4;
} pfhash_kv_t;
-typedef struct {
+typedef struct
+{
/* Bucket vector */
- u32 * buckets;
+ u32 *buckets;
#define PFHASH_BUCKET_OVERFLOW (u32)~0
/* Pool of key/value pairs */
- pfhash_kv_t * kvp;
-
+ pfhash_kv_t *kvp;
+
/* overflow plain-o-hash */
- uword * overflow_hash;
+ uword *overflow_hash;
/* Pretty-print name */
- u8 * name;
+ u8 *name;
u32 key_size;
u32 value_size;
@@ -96,19 +106,23 @@ typedef struct {
u32 nitems_in_overflow;
} pfhash_t;
-void pfhash_init (pfhash_t * p, char * name, u32 key_size, u32 value_size,
- u32 nbuckets);
+void pfhash_init (pfhash_t * p, char *name, u32 key_size, u32 value_size,
+ u32 nbuckets);
void pfhash_free (pfhash_t * p);
-u64 pfhash_get (pfhash_t * p, u32 bucket, void * key);
-void pfhash_set (pfhash_t * p, u32 bucket, void * key, void * value);
-void pfhash_unset (pfhash_t * p, u32 bucket, void * key);
+u64 pfhash_get (pfhash_t * p, u32 bucket, void *key);
+void pfhash_set (pfhash_t * p, u32 bucket, void *key, void *value);
+void pfhash_unset (pfhash_t * p, u32 bucket, void *key);
format_function_t format_pfhash;
-static inline void pfhash_prefetch_bucket (pfhash_t * p, u32 bucket)
-{ CLIB_PREFETCH (&p->buckets[bucket], CLIB_CACHE_LINE_BYTES, LOAD); }
+static inline void
+pfhash_prefetch_bucket (pfhash_t * p, u32 bucket)
+{
+ CLIB_PREFETCH (&p->buckets[bucket], CLIB_CACHE_LINE_BYTES, LOAD);
+}
-static inline u32 pfhash_read_bucket_prefetch_kv (pfhash_t * p, u32 bucket)
+static inline u32
+pfhash_read_bucket_prefetch_kv (pfhash_t * p, u32 bucket)
{
u32 bucket_contents = p->buckets[bucket];
if (PREDICT_TRUE ((bucket_contents & PFHASH_BUCKET_OVERFLOW) == 0))
@@ -116,17 +130,17 @@ static inline u32 pfhash_read_bucket_prefetch_kv (pfhash_t * p, u32 bucket)
return bucket_contents;
}
-/*
+/*
* pfhash_search_kv_16
* See if the supplied 16-byte key matches one of three 16-byte (key,value) pairs.
* Return the indicated value, or ~0 if no match
- *
+ *
* Note: including the overflow test, the fast path is 35 instrs
* on x86_64. Elves will steal your keyboard in the middle of the night if
* you "improve" it without checking the generated code!
*/
-static inline u32 pfhash_search_kv_16 (pfhash_t * p, u32 bucket_contents,
- u32x4 * key)
+static inline u32
+pfhash_search_kv_16 (pfhash_t * p, u32 bucket_contents, u32x4 * key)
{
u32x4 diff0, diff1, diff2;
u32 is_equal0, is_equal1, is_equal2;
@@ -136,11 +150,11 @@ static inline u32 pfhash_search_kv_16 (pfhash_t * p, u32 bucket_contents,
if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
{
- uword * hp;
+ uword *hp;
hp = hash_get_mem (p->overflow_hash, key);
if (hp)
- return hp[0];
- return (u32)~0;
+ return hp[0];
+ return (u32) ~ 0;
}
kv = &p->kvp[bucket_contents].kv16;
@@ -148,7 +162,7 @@ static inline u32 pfhash_search_kv_16 (pfhash_t * p, u32 bucket_contents,
diff0 = u32x4_sub (kv->kb.k_u32x4[0], key[0]);
diff1 = u32x4_sub (kv->kb.k_u32x4[1], key[0]);
diff2 = u32x4_sub (kv->kb.k_u32x4[2], key[0]);
-
+
no_match = is_equal0 = (i16) u32x4_zero_byte_mask (diff0);
is_equal1 = (i16) u32x4_zero_byte_mask (diff1);
no_match |= is_equal1;
@@ -156,101 +170,107 @@ static inline u32 pfhash_search_kv_16 (pfhash_t * p, u32 bucket_contents,
no_match |= is_equal2;
/* If any of the three items matched, no_match will be zero after this line */
no_match = ~no_match;
-
- rv = (is_equal0 & kv->values[0])
- |(is_equal1 & kv->values[1])
- | (is_equal2 & kv->values[2])
- | no_match;
-
+
+ rv = (is_equal0 & kv->values[0])
+ | (is_equal1 & kv->values[1]) | (is_equal2 & kv->values[2]) | no_match;
+
return rv;
}
-static inline u32 pfhash_search_kv_8 (pfhash_t * p, u32 bucket_contents,
- u64 * key)
+static inline u32
+pfhash_search_kv_8 (pfhash_t * p, u32 bucket_contents, u64 * key)
{
pfhash_kv_8_t *kv;
- u32 rv = (u32)~0;
-
+ u32 rv = (u32) ~ 0;
+
if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
{
- uword * hp;
+ uword *hp;
hp = hash_get_mem (p->overflow_hash, key);
if (hp)
- return hp[0];
- return (u32)~0;
+ return hp[0];
+ return (u32) ~ 0;
}
-
+
kv = &p->kvp[bucket_contents].kv8;
-
+
rv = (kv->kb.k_u64[0] == key[0]) ? kv->values[0] : rv;
rv = (kv->kb.k_u64[1] == key[0]) ? kv->values[1] : rv;
rv = (kv->kb.k_u64[2] == key[0]) ? kv->values[2] : rv;
rv = (kv->kb.k_u64[3] == key[0]) ? kv->values[3] : rv;
rv = (kv->kb.k_u64[4] == key[0]) ? kv->values[4] : rv;
-
+
return rv;
}
-static inline u64 pfhash_search_kv_8v8 (pfhash_t * p, u32 bucket_contents,
- u64 * key)
+static inline u64
+pfhash_search_kv_8v8 (pfhash_t * p, u32 bucket_contents, u64 * key)
{
pfhash_kv_8v8_t *kv;
- u64 rv = (u64)~0;
-
+ u64 rv = (u64) ~ 0;
+
if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
{
- uword * hp;
+ uword *hp;
hp = hash_get_mem (p->overflow_hash, key);
if (hp)
- return hp[0];
- return (u64)~0;
+ return hp[0];
+ return (u64) ~ 0;
}
-
+
kv = &p->kvp[bucket_contents].kv8v8;
-
+
rv = (kv->kb.k_u64[0] == key[0]) ? kv->values[0] : rv;
rv = (kv->kb.k_u64[1] == key[0]) ? kv->values[1] : rv;
rv = (kv->kb.k_u64[2] == key[0]) ? kv->values[2] : rv;
rv = (kv->kb.k_u64[3] == key[0]) ? kv->values[3] : rv;
-
+
return rv;
}
-static inline u32 pfhash_search_kv_4 (pfhash_t * p, u32 bucket_contents,
- u32 * key)
+static inline u32
+pfhash_search_kv_4 (pfhash_t * p, u32 bucket_contents, u32 * key)
{
u32x4 vector_key;
u32x4 is_equal[2];
u32 zbm[2], winner_index;
pfhash_kv_4_t *kv;
-
+
if (PREDICT_FALSE (bucket_contents == PFHASH_BUCKET_OVERFLOW))
{
- uword * hp;
+ uword *hp;
hp = hash_get_mem (p->overflow_hash, key);
if (hp)
- return hp[0];
- return (u32)~0;
+ return hp[0];
+ return (u32) ~ 0;
}
-
+
kv = &p->kvp[bucket_contents].kv4;
-
+
vector_key = u32x4_splat (key[0]);
-
+
is_equal[0] = u32x4_is_equal (kv->kb.k_u32x4[0], vector_key);
is_equal[1] = u32x4_is_equal (kv->kb.k_u32x4[1], vector_key);
zbm[0] = ~u32x4_zero_byte_mask (is_equal[0]) & 0xFFFF;
zbm[1] = ~u32x4_zero_byte_mask (is_equal[1]) & 0xFFFF;
-
- if (PREDICT_FALSE((zbm[0] == 0) && (zbm[1] == 0)))
- return (u32)~0;
-
- winner_index = min_log2 (zbm[0])>>2;
- winner_index = zbm[1] ? (4 + (min_log2 (zbm[1])>>2)) : winner_index;
-
+
+ if (PREDICT_FALSE ((zbm[0] == 0) && (zbm[1] == 0)))
+ return (u32) ~ 0;
+
+ winner_index = min_log2 (zbm[0]) >> 2;
+ winner_index = zbm[1] ? (4 + (min_log2 (zbm[1]) >> 2)) : winner_index;
+
return kv->values[winner_index];
}
#endif /* CLIB_HAVE_VEC128 */
#endif /* included_clib_pfhash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/phash.c b/vppinfra/vppinfra/phash.c
index a104e64e1be..14da522594a 100644
--- a/vppinfra/vppinfra/phash.c
+++ b/vppinfra/vppinfra/phash.c
@@ -53,13 +53,13 @@ those keys into a value in 0..n-1 with no collisions.
The perfect hash function first uses a normal hash function on the key
to determine (a,b) such that the pair (a,b) is distinct for all
keys, then it computes a^scramble[tab[b]] to get the final perfect hash.
-tab[] is an array of 1-byte values and scramble[] is a 256-term array of
-2-byte or 4-byte values. If there are n keys, the length of tab[] is a
+tab[] is an array of 1-byte values and scramble[] is a 256-term array of
+2-byte or 4-byte values. If there are n keys, the length of tab[] is a
power of two between n/3 and n.
-I found the idea of computing distinct (a,b) values in "Practical minimal
-perfect hash functions for large databases", Fox, Heath, Chen, and Daoud,
-Communications of the ACM, January 1992. They found the idea in Chichelli
+I found the idea of computing distinct (a,b) values in "Practical minimal
+perfect hash functions for large databases", Fox, Heath, Chen, and Daoud,
+Communications of the ACM, January 1992. They found the idea in Chichelli
(CACM Jan 1980). Beyond that, our methods differ.
The key is hashed to a pair (a,b) where a in 0..*alen*-1 and b in
@@ -91,11 +91,12 @@ determined a perfect hash for the whole set of keys.
#include <vppinfra/phash.h>
#include <vppinfra/random.h>
-static void init_keys_direct_u32 (phash_main_t * pm)
+static void
+init_keys_direct_u32 (phash_main_t * pm)
{
int n_keys_left, b_mask, a_shift;
u32 seed;
- phash_key_t * k;
+ phash_key_t *k;
seed = pm->hash_seed;
b_mask = (1 << pm->b_bits) - 1;
@@ -147,11 +148,12 @@ static void init_keys_direct_u32 (phash_main_t * pm)
}
}
-static void init_keys_direct_u64 (phash_main_t * pm)
+static void
+init_keys_direct_u64 (phash_main_t * pm)
{
int n_keys_left, b_mask, a_shift;
u64 seed;
- phash_key_t * k;
+ phash_key_t *k;
seed = pm->hash_seed;
b_mask = (1 << pm->b_bits) - 1;
@@ -203,11 +205,12 @@ static void init_keys_direct_u64 (phash_main_t * pm)
}
}
-static void init_keys_indirect_u32 (phash_main_t * pm)
+static void
+init_keys_indirect_u32 (phash_main_t * pm)
{
int n_keys_left, b_mask, a_shift;
u32 seed;
- phash_key_t * k;
+ phash_key_t *k;
seed = pm->hash_seed;
b_mask = (1 << pm->b_bits) - 1;
@@ -226,8 +229,12 @@ static void init_keys_indirect_u32 (phash_main_t * pm)
x0 = y0 = z0 = seed;
x1 = y1 = z1 = seed;
- x0 += xyz[0]; y0 += xyz[1]; z0 += xyz[2];
- x1 += xyz[3]; y1 += xyz[4]; z1 += xyz[5];
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
+ x1 += xyz[3];
+ y1 += xyz[4];
+ z1 += xyz[5];
hash_mix32 (x0, y0, z0);
hash_mix32 (x1, y1, z1);
@@ -251,7 +258,9 @@ static void init_keys_indirect_u32 (phash_main_t * pm)
pm->key_seed1 (pm->private, k[0].key, &xyz);
x0 = y0 = z0 = seed;
- x0 += xyz[0]; y0 += xyz[1]; z0 += xyz[2];
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
hash_mix32 (x0, y0, z0);
@@ -265,11 +274,12 @@ static void init_keys_indirect_u32 (phash_main_t * pm)
}
}
-static void init_keys_indirect_u64 (phash_main_t * pm)
+static void
+init_keys_indirect_u64 (phash_main_t * pm)
{
int n_keys_left, b_mask, a_shift;
u64 seed;
- phash_key_t * k;
+ phash_key_t *k;
seed = pm->hash_seed;
b_mask = (1 << pm->b_bits) - 1;
@@ -288,8 +298,12 @@ static void init_keys_indirect_u64 (phash_main_t * pm)
x0 = y0 = z0 = seed;
x1 = y1 = z1 = seed;
- x0 += xyz[0]; y0 += xyz[1]; z0 += xyz[2];
- x1 += xyz[3]; y1 += xyz[4]; z1 += xyz[5];
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
+ x1 += xyz[3];
+ y1 += xyz[4];
+ z1 += xyz[5];
hash_mix64 (x0, y0, z0);
hash_mix64 (x1, y1, z1);
@@ -313,7 +327,9 @@ static void init_keys_indirect_u64 (phash_main_t * pm)
pm->key_seed1 (pm->private, k[0].key, &xyz);
x0 = y0 = z0 = seed;
- x0 += xyz[0]; y0 += xyz[1]; z0 += xyz[2];
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
hash_mix64 (x0, y0, z0);
@@ -327,15 +343,16 @@ static void init_keys_indirect_u64 (phash_main_t * pm)
}
}
-/*
+/*
* insert keys into table according to key->b
- * check if the initial hash might work
+ * check if the initial hash might work
*/
-static int init_tabb (phash_main_t * pm)
+static int
+init_tabb (phash_main_t * pm)
{
int no_collisions;
- phash_tabb_t * tb;
- phash_key_t * k, * l;
+ phash_tabb_t *tb;
+ phash_key_t *k, *l;
if (pm->key_seed1)
{
@@ -352,49 +369,49 @@ static int init_tabb (phash_main_t * pm)
init_keys_direct_u32 (pm);
}
- if (! pm->tabb)
+ if (!pm->tabb)
vec_resize (pm->tabb, 1 << pm->b_bits);
else
- vec_foreach (tb, pm->tabb)
- phash_tabb_free (tb);
-
+ vec_foreach (tb, pm->tabb) phash_tabb_free (tb);
+
/* Two keys with the same (a,b) guarantees a collision */
no_collisions = 1;
vec_foreach (k, pm->keys)
- {
- u32 i, * ki;
-
- tb = pm->tabb + k->b;
- ki = tb->keys;
- for (i = 0; i < vec_len (ki); i++)
- {
- l = pm->keys + ki[i];
- if (k->a == l->a)
- {
- /* Given keys are supposed to be unique. */
- if (pm->key_is_equal
- && pm->key_is_equal (pm->private, l->key, k->key))
- clib_error ("duplicate keys");
- no_collisions = 0;
- goto done;
- }
- }
+ {
+ u32 i, *ki;
+
+ tb = pm->tabb + k->b;
+ ki = tb->keys;
+ for (i = 0; i < vec_len (ki); i++)
+ {
+ l = pm->keys + ki[i];
+ if (k->a == l->a)
+ {
+ /* Given keys are supposed to be unique. */
+ if (pm->key_is_equal
+ && pm->key_is_equal (pm->private, l->key, k->key))
+ clib_error ("duplicate keys");
+ no_collisions = 0;
+ goto done;
+ }
+ }
- vec_add1 (tb->keys, k - pm->keys);
- }
+ vec_add1 (tb->keys, k - pm->keys);
+ }
- done:
+done:
return no_collisions;
}
/* Try to apply an augmenting list */
-static int apply (phash_main_t * pm, u32 tail, u32 rollback)
+static int
+apply (phash_main_t * pm, u32 tail, u32 rollback)
{
- phash_key_t * k;
- phash_tabb_t * pb;
- phash_tabq_t * q_child, * q_parent;
+ phash_key_t *k;
+ phash_tabb_t *pb;
+ phash_tabq_t *q_child, *q_parent;
u32 ki, i, hash, child, parent;
- u32 stabb; /* scramble[tab[b]] */
+ u32 stabb; /* scramble[tab[b]] */
int no_collision;
no_collision = 1;
@@ -436,7 +453,8 @@ static int apply (phash_main_t * pm, u32 tail, u32 rollback)
hash = k->a ^ stabb;
if (rollback)
{
- if (parent == 0) continue; /* root never had a hash */
+ if (parent == 0)
+ continue; /* root never had a hash */
}
else if (pm->tabh[hash] != ~0)
{
@@ -449,7 +467,7 @@ static int apply (phash_main_t * pm, u32 tail, u32 rollback)
}
}
- done:
+done:
return no_collision;
}
@@ -459,32 +477,34 @@ static int apply (phash_main_t * pm, u32 tail, u32 rollback)
augment(): Add item to the mapping.
Construct a spanning tree of *b*s with *item* as root, where each
-parent can have all its hashes changed (by some new val_b) with
+parent can have all its hashes changed (by some new val_b) with
at most one collision, and each child is the b of that collision.
I got this from Tarjan's "Data Structures and Network Algorithms". The
-path from *item* to a *b* that can be remapped with no collision is
-an "augmenting path". Change values of tab[b] along the path so that
+path from *item* to a *b* that can be remapped with no collision is
+an "augmenting path". Change values of tab[b] along the path so that
the unmapped key gets mapped and the unused hash value gets used.
-Assuming 1 key per b, if m out of n hash values are still unused,
-you should expect the transitive closure to cover n/m nodes before
+Assuming 1 key per b, if m out of n hash values are still unused,
+you should expect the transitive closure to cover n/m nodes before
an unused node is found. Sum(i=1..n)(n/i) is about nlogn, so expect
this approach to take about nlogn time to map all single-key b's.
-------------------------------------------------------------------------------
high_water: a value higher than any now in tabb[].water_b.
*/
-static int augment (phash_main_t * pm, u32 b_root, u32 high_water)
+static int
+augment (phash_main_t * pm, u32 b_root, u32 high_water)
{
- u32 q; /* current position walking through the queue */
- u32 tail; /* tail of the queue. 0 is the head of the queue. */
- phash_tabb_t * tb_parent, * tb_child, * tb_hit;
- phash_key_t * k_parent, * k_child;
- u32 v, v_limit; /* possible value for myb->val_b */
+ u32 q; /* current position walking through the queue */
+ u32 tail; /* tail of the queue. 0 is the head of the queue. */
+ phash_tabb_t *tb_parent, *tb_child, *tb_hit;
+ phash_key_t *k_parent, *k_child;
+ u32 v, v_limit; /* possible value for myb->val_b */
u32 i, ki, hash;
- v_limit = 1 << ((pm->flags & PHASH_FLAG_USE_SCRAMBLE) ? pm->s_bits : BITS (u8));
+ v_limit =
+ 1 << ((pm->flags & PHASH_FLAG_USE_SCRAMBLE) ? pm->s_bits : BITS (u8));
/* Initialize the root of the spanning tree. */
pm->tabq[0].b_q = b_root;
@@ -494,11 +514,10 @@ static int augment (phash_main_t * pm, u32 b_root, u32 high_water)
for (q = 0; q < tail; q++)
{
if ((pm->flags & PHASH_FLAG_FAST_MODE)
- && ! (pm->flags & PHASH_FLAG_MINIMAL)
- && q == 1)
- break; /* don't do transitive closure */
+ && !(pm->flags & PHASH_FLAG_MINIMAL) && q == 1)
+ break; /* don't do transitive closure */
- tb_parent = pm->tabb + pm->tabq[q].b_q; /* the b for this node */
+ tb_parent = pm->tabb + pm->tabq[q].b_q; /* the b for this node */
for (v = 0; v < v_limit; v++)
{
@@ -511,7 +530,7 @@ static int augment (phash_main_t * pm, u32 b_root, u32 high_water)
hash = k_parent->a ^ pm->scramble[v];
if (hash >= pm->hash_max)
- goto try_next_v; /* hash code out of bounds => we can't use this v */
+ goto try_next_v; /* hash code out of bounds => we can't use this v */
ki = pm->tabh[hash];
if (ki == ~0)
@@ -531,7 +550,7 @@ static int augment (phash_main_t * pm, u32 b_root, u32 high_water)
/* Remember this as child b. */
tb_child = tb_hit;
if (tb_hit->water_b == high_water)
- goto try_next_v; /* already explored */
+ goto try_next_v; /* already explored */
}
}
@@ -541,18 +560,18 @@ static int augment (phash_main_t * pm, u32 b_root, u32 high_water)
if (tb_child)
tb_child->water_b = high_water;
pm->tabq[tail].b_q = tb_child ? tb_child - pm->tabb : ~0;
- pm->tabq[tail].newval_q = v; /* how to make parent (myb) use this hash */
- pm->tabq[tail].oldval_q = tb_parent->val_b; /* need this for rollback */
+ pm->tabq[tail].newval_q = v; /* how to make parent (myb) use this hash */
+ pm->tabq[tail].oldval_q = tb_parent->val_b; /* need this for rollback */
pm->tabq[tail].parent_q = q;
++tail;
/* Found a v with no collisions? */
- if (! tb_child)
- {
+ if (!tb_child)
+ {
/* Try to apply the augmenting path. */
if (apply (pm, tail, /* rollback */ 0))
- return 1; /* success, item was added to the perfect hash */
- --tail; /* don't know how to handle such a child! */
+ return 1; /* success, item was added to the perfect hash */
+ --tail; /* don't know how to handle such a child! */
}
try_next_v:
@@ -563,27 +582,29 @@ static int augment (phash_main_t * pm, u32 b_root, u32 high_water)
}
-static phash_tabb_t * sort_tabb;
+static phash_tabb_t *sort_tabb;
-static int phash_tabb_compare (void *a1, void *a2)
+static int
+phash_tabb_compare (void *a1, void *a2)
{
- u32 *b1 = a1;
- u32 *b2 = a2;
- phash_tabb_t * tb1, * tb2;
+ u32 *b1 = a1;
+ u32 *b2 = a2;
+ phash_tabb_t *tb1, *tb2;
- tb1 = sort_tabb + b1[0];
- tb2 = sort_tabb + b2[0];
+ tb1 = sort_tabb + b1[0];
+ tb2 = sort_tabb + b2[0];
- return ((int) vec_len (tb2->keys) - (int) vec_len(tb1->keys));
+ return ((int) vec_len (tb2->keys) - (int) vec_len (tb1->keys));
}
/* find a mapping that makes this a perfect hash */
-static int perfect (phash_main_t * pm)
+static int
+perfect (phash_main_t * pm)
{
u32 i;
/* clear any state from previous attempts */
- if (vec_bytes(pm->tabh))
+ if (vec_bytes (pm->tabh))
memset (pm->tabh, ~0, vec_bytes (pm->tabh));
vec_validate (pm->tabb_sort, vec_len (pm->tabb) - 1);
@@ -597,7 +618,7 @@ static int perfect (phash_main_t * pm)
/* In descending order by number of keys, map all *b*s */
for (i = 0; i < vec_len (pm->tabb_sort); i++)
{
- if (! augment(pm, pm->tabb_sort[i], i + 1))
+ if (!augment (pm, pm->tabb_sort[i], i + 1))
return 0;
}
@@ -629,10 +650,10 @@ static int perfect (phash_main_t * pm)
* We want b_max as small as possible because it is the number of bytes in
* the huge array we must create for the perfect hash.
*
- * When nkey <= s_max*(5/8), b_max=s_max/4 works much more often with
+ * When nkey <= s_max*(5/8), b_max=s_max/4 works much more often with
* a_max=s_max/8 than with a_max=s_max/4. Above s_max*(5/8), b_max=s_max/4
* doesn't seem to care whether a_max=s_max/8 or a_max=s_max/4. I think it
- * has something to do with 5/8 = 1/8 * 5. For example examine 80000,
+ * has something to do with 5/8 = 1/8 * 5. For example examine 80000,
* 85000, and 90000 keys with different values of a_max. This only matters
* if we're doing a minimal perfect hash.
*
@@ -640,7 +661,8 @@ static int perfect (phash_main_t * pm)
* Bigger than that it must produce two integers, which increases the
* cost of the hash per character hashed.
*/
-static void guess_initial_parameters (phash_main_t * pm)
+static void
+guess_initial_parameters (phash_main_t * pm)
{
u32 s_bits, s_max, a_max, b_max, n_keys;
int is_minimal, is_fast_mode;
@@ -661,78 +683,95 @@ static void guess_initial_parameters (phash_main_t * pm)
case 0:
a_max = 1;
b_max = 1;
- case 1: case 2: case 3: case 4: case 5: case 6: case 7: case 8:
- /*
- * Was: a_max = is_minimal ? s_max / 2 : s_max;
- * However, we know that is_minimal must be true, so the
- * if-arm of the ternary expression is always executed.
- */
- a_max = s_max/2;
- b_max = s_max/2;
+ case 1:
+ case 2:
+ case 3:
+ case 4:
+ case 5:
+ case 6:
+ case 7:
+ case 8:
+ /*
+ * Was: a_max = is_minimal ? s_max / 2 : s_max;
+ * However, we know that is_minimal must be true, so the
+ * if-arm of the ternary expression is always executed.
+ */
+ a_max = s_max / 2;
+ b_max = s_max / 2;
break;
- case 9: case 10: case 11: case 12: case 13:
- case 14: case 15: case 16: case 17:
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16:
+ case 17:
if (is_fast_mode)
{
- a_max = s_max/2;
- b_max = s_max/4;
+ a_max = s_max / 2;
+ b_max = s_max / 4;
}
- else if (s_max/4 < b_max_use_scramble_threshold)
+ else if (s_max / 4 < b_max_use_scramble_threshold)
{
- if (n_keys <= s_max*0.52)
- a_max = b_max = s_max/8;
+ if (n_keys <= s_max * 0.52)
+ a_max = b_max = s_max / 8;
else
- a_max = b_max = s_max/4;
+ a_max = b_max = s_max / 4;
}
else
{
- a_max = ((n_keys <= s_max*(5.0/8.0)) ? s_max/8 :
- (n_keys <= s_max*(3.0/4.0)) ? s_max/4 : s_max/2);
- b_max = s_max/4; /* always give the small size a shot */
+ a_max = ((n_keys <= s_max * (5.0 / 8.0)) ? s_max / 8 :
+ (n_keys <=
+ s_max * (3.0 / 4.0)) ? s_max / 4 : s_max / 2);
+ b_max = s_max / 4; /* always give the small size a shot */
}
break;
case 18:
if (is_fast_mode)
- a_max = b_max = s_max/2;
+ a_max = b_max = s_max / 2;
else
{
- a_max = s_max/8; /* never require the multiword hash */
- b_max = (n_keys <= s_max*(5.0/8.0)) ? s_max/4 : s_max/2;
+ a_max = s_max / 8; /* never require the multiword hash */
+ b_max = (n_keys <= s_max * (5.0 / 8.0)) ? s_max / 4 : s_max / 2;
}
break;
case 19:
case 20:
- a_max = (n_keys <= s_max*(5.0/8.0)) ? s_max/8 : s_max/2;
- b_max = (n_keys <= s_max*(5.0/8.0)) ? s_max/4 : s_max/2;
+ a_max = (n_keys <= s_max * (5.0 / 8.0)) ? s_max / 8 : s_max / 2;
+ b_max = (n_keys <= s_max * (5.0 / 8.0)) ? s_max / 4 : s_max / 2;
break;
default:
/* Just find a hash as quick as possible.
We'll be thrashing virtual memory at this size. */
- a_max = b_max = s_max/2;
+ a_max = b_max = s_max / 2;
break;
}
}
else
{
/* Non-minimal perfect hash. */
- if (is_fast_mode && n_keys > s_max*0.8)
+ if (is_fast_mode && n_keys > s_max * 0.8)
{
s_max *= 2;
s_bits += 1;
}
- if (s_max/4 <= (1 << 14))
- b_max = ((n_keys <= s_max*0.56) ? s_max/32 :
- (n_keys <= s_max*0.74) ? s_max/16 : s_max/8);
+ if (s_max / 4 <= (1 << 14))
+ b_max = ((n_keys <= s_max * 0.56) ? s_max / 32 :
+ (n_keys <= s_max * 0.74) ? s_max / 16 : s_max / 8);
else
- b_max = ((n_keys <= s_max*0.6) ? s_max/16 :
- (n_keys <= s_max*0.8) ? s_max/8 : s_max/4);
+ b_max = ((n_keys <= s_max * 0.6) ? s_max / 16 :
+ (n_keys <= s_max * 0.8) ? s_max / 8 : s_max / 4);
- if (is_fast_mode && b_max < s_max/8)
- b_max = s_max/8;
+ if (is_fast_mode && b_max < s_max / 8)
+ b_max = s_max / 8;
- if (a_max < 1) a_max = 1;
- if (b_max < 1) b_max = 1;
+ if (a_max < 1)
+ a_max = 1;
+ if (b_max < 1)
+ b_max = 1;
}
ASSERT (s_max == (1 << s_bits));
@@ -747,17 +786,18 @@ static void guess_initial_parameters (phash_main_t * pm)
/* compute p(x), where p is a permutation of 0..(1<<nbits)-1 */
/* permute(0)=0. This is intended and useful. */
-always_inline u32 scramble_permute (u32 x, u32 nbits)
+always_inline u32
+scramble_permute (u32 x, u32 nbits)
{
int i;
- int mask = (1 << nbits) - 1;
- int const2 = 1+nbits/2;
- int const3 = 1+nbits/3;
- int const4 = 1+nbits/4;
- int const5 = 1+nbits/5;
+ int mask = (1 << nbits) - 1;
+ int const2 = 1 + nbits / 2;
+ int const3 = 1 + nbits / 3;
+ int const4 = 1 + nbits / 4;
+ int const5 = 1 + nbits / 5;
for (i = 0; i < 20; i++)
{
- x = (x + (x << const2)) & mask;
+ x = (x + (x << const2)) & mask;
x = (x ^ (x >> const3));
x = (x + (x << const4)) & mask;
x = (x ^ (x >> const5));
@@ -766,7 +806,8 @@ always_inline u32 scramble_permute (u32 x, u32 nbits)
}
/* initialize scramble[] with distinct random values in 0..smax-1 */
-static void scramble_init (phash_main_t * pm)
+static void
+scramble_init (phash_main_t * pm)
{
u32 i;
@@ -780,7 +821,7 @@ static void scramble_init (phash_main_t * pm)
clib_error_t *
phash_find_perfect_hash (phash_main_t * pm)
{
- clib_error_t * error = 0;
+ clib_error_t *error = 0;
u32 max_a_bits, n_tries_this_a_b, want_minimal;
/* guess initial values for s_max, a_max and b_max */
@@ -788,7 +829,7 @@ phash_find_perfect_hash (phash_main_t * pm)
want_minimal = pm->flags & PHASH_FLAG_MINIMAL;
- new_s:
+new_s:
if (pm->b_bits == 0)
pm->a_bits = pm->s_bits;
@@ -805,7 +846,7 @@ phash_find_perfect_hash (phash_main_t * pm)
vec_validate_init_empty (pm->tabh, pm->hash_max - 1, ~0);
vec_free (pm->tabq);
vec_validate (pm->tabq, 1 << pm->b_bits);
-
+
/* Actually find the perfect hash */
n_tries_this_a_b = 0;
while (1)
@@ -850,9 +891,9 @@ phash_find_perfect_hash (phash_main_t * pm)
}
}
- done:
+done:
/* Construct mapping table for hash lookups. */
- if (! error)
+ if (!error)
{
u32 b, v;
@@ -865,7 +906,7 @@ phash_find_perfect_hash (phash_main_t * pm)
v = pm->tabb[b].val_b;
/* Apply scramble now for small enough value of b_bits. */
- if (! (pm->flags & PHASH_FLAG_USE_SCRAMBLE))
+ if (!(pm->flags & PHASH_FLAG_USE_SCRAMBLE))
v = pm->scramble[v];
pm->tab[b] = v;
@@ -879,7 +920,8 @@ phash_find_perfect_hash (phash_main_t * pm)
}
/* Slow hash computation for general keys. */
-uword phash_hash_slow (phash_main_t * pm, uword key)
+uword
+phash_hash_slow (phash_main_t * pm, uword key)
{
u32 a, b, v;
@@ -893,7 +935,9 @@ uword phash_hash_slow (phash_main_t * pm, uword key)
{
u64 xyz[3];
pm->key_seed1 (pm->private, key, &xyz);
- x0 += xyz[0]; y0 += xyz[1]; z0 += xyz[2];
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
}
else
x0 += key;
@@ -913,7 +957,9 @@ uword phash_hash_slow (phash_main_t * pm, uword key)
{
u32 xyz[3];
pm->key_seed1 (pm->private, key, &xyz);
- x0 += xyz[0]; y0 += xyz[1]; z0 += xyz[2];
+ x0 += xyz[0];
+ y0 += xyz[1];
+ z0 += xyz[2];
}
else
x0 += key;
@@ -934,30 +980,38 @@ uword phash_hash_slow (phash_main_t * pm, uword key)
clib_error_t *
phash_validate (phash_main_t * pm)
{
- phash_key_t * k;
- uword * unique_bitmap = 0;
- clib_error_t * error = 0;
+ phash_key_t *k;
+ uword *unique_bitmap = 0;
+ clib_error_t *error = 0;
vec_foreach (k, pm->keys)
- {
- uword h = phash_hash_slow (pm, k->key);
+ {
+ uword h = phash_hash_slow (pm, k->key);
- if (h >= pm->hash_max)
- {
- error = clib_error_return (0, "hash out of range %wd", h);
- goto done;
- }
+ if (h >= pm->hash_max)
+ {
+ error = clib_error_return (0, "hash out of range %wd", h);
+ goto done;
+ }
- if (clib_bitmap_get (unique_bitmap, h))
- {
- error = clib_error_return (0, "hash non-unique");
- goto done;
- }
+ if (clib_bitmap_get (unique_bitmap, h))
+ {
+ error = clib_error_return (0, "hash non-unique");
+ goto done;
+ }
- unique_bitmap = clib_bitmap_ori (unique_bitmap, h);
- }
+ unique_bitmap = clib_bitmap_ori (unique_bitmap, h);
+ }
- done:
+done:
clib_bitmap_free (unique_bitmap);
return error;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/phash.h b/vppinfra/vppinfra/phash.h
index 33a35c23ffd..746a0fddfab 100644
--- a/vppinfra/vppinfra/phash.h
+++ b/vppinfra/vppinfra/phash.h
@@ -38,9 +38,10 @@
#ifndef included_phash_h
#define included_phash_h
-#include <vppinfra/hash.h> /* for Bob's mixing functions */
+#include <vppinfra/hash.h> /* for Bob's mixing functions */
-typedef struct {
+typedef struct
+{
/* Maybe either pointer to vector or inline word. */
uword key;
@@ -49,9 +50,10 @@ typedef struct {
} phash_key_t;
/* Table indexed by B. */
-typedef struct {
+typedef struct
+{
/* Vector of key indices with this same value of B. */
- u32 * keys;
+ u32 *keys;
/* hash=a^tabb[b].val_b */
u32 val_b;
@@ -82,11 +84,12 @@ typedef struct
u32 oldval_q;
} phash_tabq_t;
-typedef struct {
+typedef struct
+{
u8 a_bits, b_bits, s_bits, a_shift;
u32 b_mask;
- u32 * tab;
- u32 * scramble;
+ u32 *tab;
+ u32 *scramble;
/* Seed value for hash mixer. */
u64 hash_seed;
@@ -114,35 +117,35 @@ typedef struct {
u32 hash_max;
/* Vector of keys. */
- phash_key_t * keys;
+ phash_key_t *keys;
/* Used by callbacks to identify keys. */
- void * private;
+ void *private;
/* Key comparison callback. */
- int (* key_is_equal) (void * private, uword key1, uword key2);
+ int (*key_is_equal) (void *private, uword key1, uword key2);
/* Callback to reduce single key -> hash seeds. */
- void (* key_seed1) (void * private, uword key, void * seed);
+ void (*key_seed1) (void *private, uword key, void *seed);
/* Callback to reduce two key2 -> hash seeds. */
- void (* key_seed2) (void * private, uword key1, uword key2, void * seed);
+ void (*key_seed2) (void *private, uword key1, uword key2, void *seed);
/* Stuff used to compute perfect hash. */
u32 random_seed;
/* Stuff indexed by B. */
- phash_tabb_t * tabb;
+ phash_tabb_t *tabb;
/* Table of B ordered by number of keys in tabb[b]. */
- u32 * tabb_sort;
+ u32 *tabb_sort;
/* Unique key (or ~0 if none) for a given hash
H = A ^ scramble[tab[B].val_b]. */
- u32 * tabh;
+ u32 *tabh;
/* Stuff indexed by q. */
- phash_tabq_t * tabq;
+ phash_tabq_t *tabq;
/* Stats. */
u32 n_seed_trials, n_perfect_calls;
@@ -155,7 +158,7 @@ phash_main_free_working_memory (phash_main_t * pm)
vec_free (pm->tabq);
vec_free (pm->tabh);
vec_free (pm->tabb_sort);
- if (! (pm->flags & PHASH_FLAG_USE_SCRAMBLE))
+ if (!(pm->flags & PHASH_FLAG_USE_SCRAMBLE))
vec_free (pm->scramble);
}
@@ -172,12 +175,20 @@ phash_main_free (phash_main_t * pm)
uword phash_hash_slow (phash_main_t * pm, uword key);
/* Main routine to compute perfect hash. */
-clib_error_t * phash_find_perfect_hash (phash_main_t * pm);
+clib_error_t *phash_find_perfect_hash (phash_main_t * pm);
/* Validates that hash is indeed perfect. */
-clib_error_t * phash_validate (phash_main_t * pm);
+clib_error_t *phash_validate (phash_main_t * pm);
/* Unit test. */
int phash_test_main (unformat_input_t * input);
#endif /* included_phash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/pipeline.h b/vppinfra/vppinfra/pipeline.h
index 93fed20d6a3..5a9799b455e 100644
--- a/vppinfra/vppinfra/pipeline.h
+++ b/vppinfra/vppinfra/pipeline.h
@@ -166,3 +166,11 @@ do { \
} while (0)
#endif /* included_clib_pipeline_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/pool.h b/vppinfra/vppinfra/pool.h
index 186a973107f..e1c89e0a5fa 100644
--- a/vppinfra/vppinfra/pool.h
+++ b/vppinfra/vppinfra/pool.h
@@ -49,12 +49,13 @@
#include <vppinfra/mheap.h>
-typedef struct {
+typedef struct
+{
/** Bitmap of indices of free objects. */
- uword * free_bitmap;
+ uword *free_bitmap;
/** Vector of free indices. One element for each set bit in bitmap. */
- u32 * free_indices;
+ u32 *free_indices;
} pool_header_t;
/** Align pool header so that pointers are naturally aligned. */
@@ -62,16 +63,20 @@ typedef struct {
vec_aligned_header_bytes (sizeof (pool_header_t), sizeof (void *))
/** Get pool header from user pool pointer */
-always_inline pool_header_t * pool_header (void * v)
-{ return vec_aligned_header (v, sizeof (pool_header_t), sizeof (void *)); }
+always_inline pool_header_t *
+pool_header (void *v)
+{
+ return vec_aligned_header (v, sizeof (pool_header_t), sizeof (void *));
+}
/** Validate a pool */
-always_inline void pool_validate (void * v)
+always_inline void
+pool_validate (void *v)
{
- pool_header_t * p = pool_header (v);
+ pool_header_t *p = pool_header (v);
uword i, n_free_bitmap;
- if (! v)
+ if (!v)
return;
n_free_bitmap = clib_bitmap_count_set_bits (p->free_bitmap);
@@ -80,9 +85,10 @@ always_inline void pool_validate (void * v)
ASSERT (clib_bitmap_get (p->free_bitmap, p->free_indices[i]) == 1);
}
-always_inline void pool_header_validate_index (void * v, uword index)
+always_inline void
+pool_header_validate_index (void *v, uword index)
{
- pool_header_t * p = pool_header (v);
+ pool_header_t *p = pool_header (v);
if (v)
vec_validate (p->free_bitmap, index / BITS (uword));
@@ -99,7 +105,8 @@ do { \
/** Number of active elements in a pool.
* @return Number of active elements in a pool
*/
-always_inline uword pool_elts (void * v)
+always_inline uword
+pool_elts (void *v)
{
uword ret = vec_len (v);
if (v)
@@ -121,11 +128,11 @@ always_inline uword pool_elts (void * v)
/** Memory usage of pool header. */
always_inline uword
-pool_header_bytes (void * v)
+pool_header_bytes (void *v)
{
- pool_header_t * p = pool_header (v);
+ pool_header_t *p = pool_header (v);
- if (! v)
+ if (!v)
return 0;
return vec_bytes (p->free_bitmap) + vec_bytes (p->free_indices);
@@ -139,17 +146,18 @@ pool_header_bytes (void * v)
/** Queries whether pool has at least N_FREE free elements. */
always_inline uword
-pool_free_elts (void * v)
+pool_free_elts (void *v)
{
- pool_header_t * p = pool_header (v);
+ pool_header_t *p = pool_header (v);
uword n_free = 0;
- if (v) {
- n_free += vec_len (p->free_indices);
+ if (v)
+ {
+ n_free += vec_len (p->free_indices);
- /* Space left at end of vector? */
- n_free += vec_capacity (v, sizeof (p[0])) - vec_len (v);
- }
+ /* Space left at end of vector? */
+ n_free += vec_capacity (v, sizeof (p[0])) - vec_len (v);
+ }
return n_free;
}
@@ -198,7 +206,7 @@ do { \
uword _pool_var (i) = (E) - (P); \
(_pool_var (i) < vec_len (P)) ? clib_bitmap_get (_pool_var (p)->free_bitmap, _pool_i) : 1; \
})
-
+
/** Use free bitmap to query whether given index is free */
#define pool_is_free_index(P,I) pool_is_free((P),(P)+(I))
@@ -239,10 +247,11 @@ do { \
#define pool_alloc(P,N) pool_alloc_aligned(P,N,0)
/** Low-level free pool operator (do not call directly). */
-always_inline void * _pool_free (void * v)
+always_inline void *
+_pool_free (void *v)
{
- pool_header_t * p = pool_header (v);
- if (! v)
+ pool_header_t *p = pool_header (v);
+ if (!v)
return v;
clib_bitmap_free (p->free_bitmap);
vec_free (p->free_indices);
@@ -260,7 +269,7 @@ always_inline void * _pool_free (void * v)
@param POOL pool to iterate across
@param BODY operation to perform
- Optimized version which assumes that BODY is smart enough to
+ Optimized version which assumes that BODY is smart enough to
process multiple (LOW,HI) chunks. See also pool_foreach().
*/
#define pool_foreach_region(LO,HI,POOL,BODY) \
@@ -355,7 +364,7 @@ do { \
@code
p = pool_base + index;
@endcode
- use of @c pool_elt_at_index is strongly suggested.
+ use of @c pool_elt_at_index is strongly suggested.
*/
#define pool_elt_at_index(p,i) \
({ \
@@ -386,3 +395,11 @@ do { \
}
#endif /* included_pool_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/ptclosure.c b/vppinfra/vppinfra/ptclosure.c
index 705af62c18a..cda873ef442 100644
--- a/vppinfra/vppinfra/ptclosure.c
+++ b/vppinfra/vppinfra/ptclosure.c
@@ -15,33 +15,35 @@
#include <vppinfra/ptclosure.h>
-u8 ** clib_ptclosure_alloc (int n)
+u8 **
+clib_ptclosure_alloc (int n)
{
- u8 ** rv = 0;
- u8 * row;
+ u8 **rv = 0;
+ u8 *row;
int i;
ASSERT (n > 0);
- vec_validate (rv, n-1);
+ vec_validate (rv, n - 1);
for (i = 0; i < n; i++)
{
row = 0;
- vec_validate (row, n-1);
-
+ vec_validate (row, n - 1);
+
rv[i] = row;
}
return rv;
}
-void clib_ptclosure_free (u8 ** ptc)
+void
+clib_ptclosure_free (u8 ** ptc)
{
- u8 * row;
+ u8 *row;
int n = vec_len (ptc);
int i;
ASSERT (n > 0);
-
+
for (i = 0; i < n; i++)
{
row = ptc[i];
@@ -50,14 +52,15 @@ void clib_ptclosure_free (u8 ** ptc)
vec_free (ptc);
}
-void clib_ptclosure_copy (u8 ** dst, u8 **src)
+void
+clib_ptclosure_copy (u8 ** dst, u8 ** src)
{
int i, n;
- u8 * src_row, * dst_row;
+ u8 *src_row, *dst_row;
n = vec_len (dst);
- for (i = 0; i < vec_len(dst); i++)
+ for (i = 0; i < vec_len (dst); i++)
{
src_row = src[i];
dst_row = dst[i];
@@ -67,13 +70,13 @@ void clib_ptclosure_copy (u8 ** dst, u8 **src)
/*
* compute the positive transitive closure
- * of a relation via Warshall's algorithm.
- *
+ * of a relation via Warshall's algorithm.
+ *
* Ref:
- * Warshall, Stephen (January 1962). "A theorem on Boolean matrices".
- * Journal of the ACM 9 (1): 11–12.
+ * Warshall, Stephen (January 1962). "A theorem on Boolean matrices".
+ * Journal of the ACM 9 (1): 11–12.
*
- * foo[i][j] = 1 means that item i
+ * foo[i][j] = 1 means that item i
* "bears the relation" to item j.
*
* For example: "item i must be before item j"
@@ -83,11 +86,12 @@ void clib_ptclosure_copy (u8 ** dst, u8 **src)
*
*/
-u8 ** clib_ptclosure (u8 ** orig)
+u8 **
+clib_ptclosure (u8 ** orig)
{
int i, j, k;
int n;
- u8 ** prev, ** cur;
+ u8 **prev, **cur;
n = vec_len (orig);
prev = clib_ptclosure_alloc (n);
@@ -98,16 +102,24 @@ u8 ** clib_ptclosure (u8 ** orig)
for (k = 0; k < n; k++)
{
for (i = 0; i < n; i++)
- {
- for (j = 0; j < n; j++)
- {
- cur[i][j] = prev[i][j] || (prev[i][k] && prev[k][j]);
- }
- }
+ {
+ for (j = 0; j < n; j++)
+ {
+ cur[i][j] = prev[i][j] || (prev[i][k] && prev[k][j]);
+ }
+ }
clib_ptclosure_copy (prev, cur);
}
clib_ptclosure_free (prev);
return cur;
}
-
+
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/ptclosure.h b/vppinfra/vppinfra/ptclosure.h
index c3b71743703..ee1609a1250 100644
--- a/vppinfra/vppinfra/ptclosure.h
+++ b/vppinfra/vppinfra/ptclosure.h
@@ -19,14 +19,22 @@
#include <vppinfra/format.h>
#include <vppinfra/error.h>
-/*
- * set r[i][j] if item i "bears the relation to" item j
- *
+/*
+ * set r[i][j] if item i "bears the relation to" item j
+ *
*/
-u8 ** clib_ptclosure_alloc (int n);
+u8 **clib_ptclosure_alloc (int n);
void clib_ptclosure_free (u8 ** ptc);
-void clib_ptclosure_copy (u8 ** dst, u8 **src);
-u8 ** clib_ptclosure (u8 ** orig);
+void clib_ptclosure_copy (u8 ** dst, u8 ** src);
+u8 **clib_ptclosure (u8 ** orig);
#endif /* included_clib_ptclosure_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/qhash.c b/vppinfra/vppinfra/qhash.c
index 8629971b0aa..f4e38c4a1d7 100644
--- a/vppinfra/vppinfra/qhash.c
+++ b/vppinfra/vppinfra/qhash.c
@@ -40,9 +40,9 @@
#define QHASH_ALL_VALID ((1 << QHASH_KEYS_PER_BUCKET) - 1)
void *
-_qhash_resize (void * v, uword length, uword elt_bytes)
+_qhash_resize (void *v, uword length, uword elt_bytes)
{
- qhash_t * h;
+ qhash_t *h;
uword l;
l = clib_max (max_log2 (length), 2 + QHASH_LOG2_KEYS_PER_BUCKET);
@@ -50,16 +50,15 @@ _qhash_resize (void * v, uword length, uword elt_bytes)
/* Round up if less than 1/2 full. */
l += ((f64) length / (f64) (1 << l)) < .5;
- v = _vec_resize (0, 1 << l,
- elt_bytes << l,
- sizeof (h[0]),
+ v = _vec_resize (0, 1 << l, elt_bytes << l, sizeof (h[0]),
/* align */ sizeof (uword));
h = qhash_header (v);
h->n_elts = 0;
h->log2_hash_size = l;
- h->hash_keys = clib_mem_alloc_aligned_no_fail (sizeof (h->hash_keys[0]) << l,
- CLIB_CACHE_LINE_BYTES);
+ h->hash_keys =
+ clib_mem_alloc_aligned_no_fail (sizeof (h->hash_keys[0]) << l,
+ CLIB_CACHE_LINE_BYTES);
vec_resize (h->hash_key_valid_bitmap,
1 << (l - QHASH_LOG2_KEYS_PER_BUCKET));
memset (v, ~0, elt_bytes << l);
@@ -77,7 +76,8 @@ qhash_min_log2 (uword x)
return min_log2_table[x];
}
-static void qhash_min_log2_init ()
+static void
+qhash_min_log2_init ()
{
int i;
for (i = 0; i < 256; i++)
@@ -86,47 +86,48 @@ static void qhash_min_log2_init ()
always_inline uword
qhash_get_valid_elt_mask (qhash_t * h, uword i)
-{ return h->hash_key_valid_bitmap[i / QHASH_KEYS_PER_BUCKET]; }
+{
+ return h->hash_key_valid_bitmap[i / QHASH_KEYS_PER_BUCKET];
+}
always_inline void
qhash_set_valid_elt_mask (qhash_t * h, uword i, uword mask)
-{ h->hash_key_valid_bitmap[i / QHASH_KEYS_PER_BUCKET] = mask; }
+{
+ h->hash_key_valid_bitmap[i / QHASH_KEYS_PER_BUCKET] = mask;
+}
always_inline uword
-qhash_search_bucket (uword * hash_keys, uword search_key,
- uword m)
+qhash_search_bucket (uword * hash_keys, uword search_key, uword m)
{
uword t;
#define _(i) ((hash_keys[i] == search_key) << i)
- t = (_ (0) | _ (1) | _ (2) | _ (3));
+ t = (_(0) | _(1) | _(2) | _(3));
if (QHASH_KEYS_PER_BUCKET > 4)
- t |= (_ (4) | _ (5) | _ (6) | _ (7));
+ t |= (_(4) | _(5) | _(6) | _(7));
if (QHASH_KEYS_PER_BUCKET > 8)
- t |= (_ (8) | _ (9) | _ (10) | _ (11)
- | _ (12) | _ (13) | _ (14) | _ (15));
+ t |= (_(8) | _(9) | _(10) | _(11) | _(12) | _(13) | _(14) | _(15));
#undef _
return m & t;
}
/* Lookup multiple keys in the same hash table. */
void
-qhash_get_multiple (void * v,
+qhash_get_multiple (void *v,
uword * search_keys,
- uword n_search_keys,
- u32 * result_indices)
+ uword n_search_keys, u32 * result_indices)
{
- qhash_t * h = qhash_header (v);
- uword * k, * hash_keys;
+ qhash_t *h = qhash_header (v);
+ uword *k, *hash_keys;
uword n_left, bucket_mask;
- u32 * r;
+ u32 *r;
- if (! v)
+ if (!v)
{
memset (result_indices, ~0, sizeof (result_indices[0]) * n_search_keys);
return;
}
- bucket_mask = pow2_mask (h->log2_hash_size) &~ (QHASH_KEYS_PER_BUCKET - 1);
+ bucket_mask = pow2_mask (h->log2_hash_size) & ~(QHASH_KEYS_PER_BUCKET - 1);
k = search_keys;
n_left = n_search_keys;
@@ -137,7 +138,7 @@ qhash_get_multiple (void * v,
{
u32 a0, b0, c0, bi0, valid0, match0;
u32 a1, b1, c1, bi1, valid1, match1;
- uword k0, k1, * h0, * h1;
+ uword k0, k1, *h0, *h1;
k0 = k[0];
k1 = k[1];
@@ -153,7 +154,7 @@ qhash_get_multiple (void * v,
b0 ^= k0 >> 32;
b1 ^= k1 >> 32;
#endif
-
+
hash_mix32_step_1 (a0, b0, c0);
hash_mix32_step_1 (a1, b1, c1);
hash_mix32_step_2 (a0, b0, c0);
@@ -182,16 +183,16 @@ qhash_get_multiple (void * v,
r += 2;
/* Full buckets trigger search of overflow hash. */
- if (PREDICT_FALSE (! match0 && valid0 == QHASH_ALL_VALID))
+ if (PREDICT_FALSE (!match0 && valid0 == QHASH_ALL_VALID))
{
- uword * p = hash_get (h->overflow_hash, k0);
+ uword *p = hash_get (h->overflow_hash, k0);
r[-2] = p ? p[0] : ~0;
}
/* Full buckets trigger search of overflow hash. */
- if (PREDICT_FALSE (! match1 && valid1 == QHASH_ALL_VALID))
+ if (PREDICT_FALSE (!match1 && valid1 == QHASH_ALL_VALID))
{
- uword * p = hash_get (h->overflow_hash, k1);
+ uword *p = hash_get (h->overflow_hash, k1);
r[-1] = p ? p[0] : ~0;
}
}
@@ -199,7 +200,7 @@ qhash_get_multiple (void * v,
while (n_left >= 1)
{
u32 a0, b0, c0, bi0, valid0, match0;
- uword k0, * h0;
+ uword k0, *h0;
k0 = k[0];
n_left -= 1;
@@ -229,9 +230,9 @@ qhash_get_multiple (void * v,
r += 1;
/* Full buckets trigger search of overflow hash. */
- if (PREDICT_FALSE (! match0 && valid0 == QHASH_ALL_VALID))
+ if (PREDICT_FALSE (!match0 && valid0 == QHASH_ALL_VALID))
{
- uword * p = hash_get (h->overflow_hash, k0);
+ uword *p = hash_get (h->overflow_hash, k0);
r[-1] = p ? p[0] : ~0;
}
}
@@ -240,20 +241,19 @@ qhash_get_multiple (void * v,
/* Lookup multiple keys in the same hash table.
Returns index of first matching key. */
u32
-qhash_get_first_match (void * v,
+qhash_get_first_match (void *v,
uword * search_keys,
- uword n_search_keys,
- uword * matching_key)
+ uword n_search_keys, uword * matching_key)
{
- qhash_t * h = qhash_header (v);
- uword * k, * hash_keys;
+ qhash_t *h = qhash_header (v);
+ uword *k, *hash_keys;
uword n_left, match_mask, bucket_mask;
- if (! v)
+ if (!v)
return ~0;
match_mask = 0;
- bucket_mask = pow2_mask (h->log2_hash_size) &~ (QHASH_KEYS_PER_BUCKET - 1);
+ bucket_mask = pow2_mask (h->log2_hash_size) & ~(QHASH_KEYS_PER_BUCKET - 1);
k = search_keys;
n_left = n_search_keys;
@@ -262,7 +262,7 @@ qhash_get_first_match (void * v,
{
u32 a0, b0, c0, bi0, valid0;
u32 a1, b1, c1, bi1, valid1;
- uword k0, k1, * h0, * h1;
+ uword k0, k1, *h0, *h1;
k0 = k[0];
k1 = k[1];
@@ -278,7 +278,7 @@ qhash_get_first_match (void * v,
b0 ^= k0 >> 32;
b1 ^= k1 >> 32;
#endif
-
+
hash_mix32_step_1 (a0, b0, c0);
hash_mix32_step_1 (a1, b1, c1);
hash_mix32_step_2 (a0, b0, c0);
@@ -315,13 +315,13 @@ qhash_get_first_match (void * v,
if (PREDICT_FALSE (valid0 == QHASH_ALL_VALID
|| valid1 == QHASH_ALL_VALID))
{
- uword * p = 0;
+ uword *p = 0;
uword ki = k - 2 - search_keys;
if (valid0 == QHASH_ALL_VALID)
p = hash_get (h->overflow_hash, k0);
- if (! p && valid1 == QHASH_ALL_VALID)
+ if (!p && valid1 == QHASH_ALL_VALID)
{
p = hash_get (h->overflow_hash, k1);
ki++;
@@ -338,7 +338,7 @@ qhash_get_first_match (void * v,
while (n_left >= 1)
{
u32 a0, b0, c0, bi0, valid0;
- uword k0, * h0;
+ uword k0, *h0;
k0 = k[0];
n_left -= 1;
@@ -372,7 +372,7 @@ qhash_get_first_match (void * v,
/* Full buckets trigger search of overflow hash. */
if (PREDICT_FALSE (valid0 == QHASH_ALL_VALID))
{
- uword * p = hash_get (h->overflow_hash, k0);
+ uword *p = hash_get (h->overflow_hash, k0);
if (p)
{
*matching_key = (k - 1 - search_keys);
@@ -385,13 +385,11 @@ qhash_get_first_match (void * v,
}
static void *
-qhash_set_overflow (void * v, uword elt_bytes,
- uword key, uword bi,
- uword * n_elts,
- u32 * result)
+qhash_set_overflow (void *v, uword elt_bytes,
+ uword key, uword bi, uword * n_elts, u32 * result)
{
- qhash_t * h = qhash_header (v);
- uword * p = hash_get (h->overflow_hash, key);
+ qhash_t *h = qhash_header (v);
+ uword *p = hash_get (h->overflow_hash, key);
uword i;
bi /= QHASH_KEYS_PER_BUCKET;
@@ -417,11 +415,9 @@ qhash_set_overflow (void * v, uword elt_bytes,
if (i >= l)
{
uword dl = round_pow2 (1 + i - l, 8);
- v = _vec_resize (v, dl,
- (l + dl) * elt_bytes,
- sizeof (h[0]),
+ v = _vec_resize (v, dl, (l + dl) * elt_bytes, sizeof (h[0]),
/* align */ sizeof (uword));
- memset (v + l*elt_bytes, ~0, dl * elt_bytes);
+ memset (v + l * elt_bytes, ~0, dl * elt_bytes);
}
}
@@ -431,10 +427,10 @@ qhash_set_overflow (void * v, uword elt_bytes,
}
static uword
-qhash_unset_overflow (void * v, uword key, uword bi, uword * n_elts)
+qhash_unset_overflow (void *v, uword key, uword bi, uword * n_elts)
{
- qhash_t * h = qhash_header (v);
- uword * p = hash_get (h->overflow_hash, key);
+ qhash_t *h = qhash_header (v);
+ uword *p = hash_get (h->overflow_hash, key);
uword result;
bi /= QHASH_KEYS_PER_BUCKET;
@@ -458,19 +454,20 @@ qhash_unset_overflow (void * v, uword key, uword bi, uword * n_elts)
always_inline uword
qhash_find_free (uword i, uword valid_mask)
-{ return first_set (~valid_mask & pow2_mask (QHASH_KEYS_PER_BUCKET)); }
+{
+ return first_set (~valid_mask & pow2_mask (QHASH_KEYS_PER_BUCKET));
+}
void *
-_qhash_set_multiple (void * v,
+_qhash_set_multiple (void *v,
uword elt_bytes,
uword * search_keys,
- uword n_search_keys,
- u32 * result_indices)
+ uword n_search_keys, u32 * result_indices)
{
- qhash_t * h = qhash_header (v);
- uword * k, * hash_keys;
+ qhash_t *h = qhash_header (v);
+ uword *k, *hash_keys;
uword n_left, n_elts, bucket_mask;
- u32 * r;
+ u32 *r;
if (vec_len (v) < n_search_keys)
v = _qhash_resize (v, n_search_keys, elt_bytes);
@@ -483,7 +480,7 @@ _qhash_set_multiple (void * v,
ASSERT (v != 0);
- bucket_mask = pow2_mask (h->log2_hash_size) &~ (QHASH_KEYS_PER_BUCKET - 1);
+ bucket_mask = pow2_mask (h->log2_hash_size) & ~(QHASH_KEYS_PER_BUCKET - 1);
hash_keys = h->hash_keys;
k = search_keys;
@@ -495,8 +492,8 @@ _qhash_set_multiple (void * v,
{
u32 a0, b0, c0, bi0, match0, valid0, free0;
u32 a1, b1, c1, bi1, match1, valid1, free1;
- uword k0, * h0;
- uword k1, * h1;
+ uword k0, *h0;
+ uword k1, *h1;
k0 = k[0];
k1 = k[1];
@@ -506,7 +503,7 @@ _qhash_set_multiple (void * v,
n_left -= 2;
k += 2;
-
+
a0 = a1 = h->hash_seeds[0];
b0 = b1 = h->hash_seeds[1];
c0 = c1 = h->hash_seeds[2];
@@ -554,7 +551,7 @@ _qhash_set_multiple (void * v,
h0 += qhash_min_log2 (match0);
h1 += qhash_min_log2 (match1);
- if (PREDICT_FALSE (! match0 || ! match1))
+ if (PREDICT_FALSE (!match0 || !match1))
goto slow_path2;
h0[0] = k0;
@@ -567,7 +564,7 @@ _qhash_set_multiple (void * v,
continue;
slow_path2:
- if (! match0)
+ if (!match0)
{
n_elts -= 1;
v = qhash_set_overflow (v, elt_bytes, k0, bi0, &n_elts, &r[0]);
@@ -578,7 +575,7 @@ _qhash_set_multiple (void * v,
r[0] = h0 - hash_keys;
qhash_set_valid_elt_mask (h, bi0, valid0);
}
- if (! match1)
+ if (!match1)
{
n_elts -= 1;
v = qhash_set_overflow (v, elt_bytes, k1, bi1, &n_elts, &r[1]);
@@ -595,12 +592,12 @@ _qhash_set_multiple (void * v,
while (n_left >= 1)
{
u32 a0, b0, c0, bi0, match0, valid0, free0;
- uword k0, * h0;
+ uword k0, *h0;
k0 = k[0];
n_left -= 1;
k += 1;
-
+
a0 = h->hash_seeds[0];
b0 = h->hash_seeds[1];
c0 = h->hash_seeds[2];
@@ -630,7 +627,7 @@ _qhash_set_multiple (void * v,
h0 += qhash_min_log2 (match0);
- if (PREDICT_FALSE (! match0))
+ if (PREDICT_FALSE (!match0))
goto slow_path1;
h0[0] = k0;
@@ -652,15 +649,15 @@ _qhash_set_multiple (void * v,
}
static uword
-unset_slow_path (void * v, uword elt_bytes,
+unset_slow_path (void *v, uword elt_bytes,
uword k0, uword bi0, uword valid0, uword match0,
uword * n_elts)
{
- qhash_t * h = qhash_header (v);
+ qhash_t *h = qhash_header (v);
uword i, j = 0, k, l, t = ~0;
- hash_pair_t * p, * found;
+ hash_pair_t *p, *found;
- if (! match0)
+ if (!match0)
{
if (valid0 == QHASH_ALL_VALID)
t = qhash_unset_overflow (v, k0, bi0, n_elts);
@@ -671,10 +668,10 @@ unset_slow_path (void * v, uword elt_bytes,
t = bi0 + qhash_min_log2 (match0);
if (valid0 == QHASH_ALL_VALID
- && i < vec_len (h->overflow_counts)
- && h->overflow_counts[i] > 0)
+ && i < vec_len (h->overflow_counts) && h->overflow_counts[i] > 0)
{
found = 0;
+ /* *INDENT-OFF* */
hash_foreach_pair (p, h->overflow_hash, ({
j = qhash_hash_mix (h, p->key) / QHASH_KEYS_PER_BUCKET;
if (j == i)
@@ -683,6 +680,7 @@ unset_slow_path (void * v, uword elt_bytes,
break;
}
}));
+ /* *INDENT-ON* */
ASSERT (found != 0);
ASSERT (j == i);
@@ -695,9 +693,7 @@ unset_slow_path (void * v, uword elt_bytes,
qhash_set_valid_elt_mask (h, bi0, valid0);
h->hash_keys[t] = k;
- clib_memswap (v + t*elt_bytes,
- v + l*elt_bytes,
- elt_bytes);
+ clib_memswap (v + t * elt_bytes, v + l * elt_bytes, elt_bytes);
t = l;
}
else
@@ -707,25 +703,24 @@ unset_slow_path (void * v, uword elt_bytes,
}
void
-_qhash_unset_multiple (void * v,
+_qhash_unset_multiple (void *v,
uword elt_bytes,
uword * search_keys,
- uword n_search_keys,
- u32 * result_indices)
+ uword n_search_keys, u32 * result_indices)
{
- qhash_t * h = qhash_header (v);
- uword * k, * hash_keys;
+ qhash_t *h = qhash_header (v);
+ uword *k, *hash_keys;
uword n_left, n_elts, bucket_mask;
- u32 * r;
+ u32 *r;
- if (! v)
+ if (!v)
{
uword i;
for (i = 0; i < n_search_keys; i++)
result_indices[i] = ~0;
}
- bucket_mask = pow2_mask (h->log2_hash_size) &~ (QHASH_KEYS_PER_BUCKET - 1);
+ bucket_mask = pow2_mask (h->log2_hash_size) & ~(QHASH_KEYS_PER_BUCKET - 1);
hash_keys = h->hash_keys;
k = search_keys;
@@ -737,8 +732,8 @@ _qhash_unset_multiple (void * v,
{
u32 a0, b0, c0, bi0, match0, valid0;
u32 a1, b1, c1, bi1, match1, valid1;
- uword k0, * h0;
- uword k1, * h1;
+ uword k0, *h0;
+ uword k1, *h1;
k0 = k[0];
k1 = k[1];
@@ -748,7 +743,7 @@ _qhash_unset_multiple (void * v,
n_left -= 2;
k += 2;
-
+
a0 = a1 = h->hash_seeds[0];
b0 = b1 = h->hash_seeds[1];
c0 = c1 = h->hash_seeds[2];
@@ -799,32 +794,30 @@ _qhash_unset_multiple (void * v,
continue;
slow_path2:
- r[0] = unset_slow_path (v, elt_bytes, k0, bi0, valid0, match0,
- &n_elts);
+ r[0] = unset_slow_path (v, elt_bytes, k0, bi0, valid0, match0, &n_elts);
if (bi0 == bi1)
{
/* Search again in same bucket to test new overflow element. */
valid1 = qhash_get_valid_elt_mask (h, bi0);
- if (! match1)
+ if (!match1)
{
match1 = qhash_search_bucket (h1, k1, valid1);
n_elts -= (match1 != 0);
}
}
- r[1] = unset_slow_path (v, elt_bytes, k1, bi1, valid1, match1,
- &n_elts);
+ r[1] = unset_slow_path (v, elt_bytes, k1, bi1, valid1, match1, &n_elts);
r += 2;
}
while (n_left >= 1)
{
u32 a0, b0, c0, bi0, match0, valid0;
- uword k0, * h0;
+ uword k0, *h0;
k0 = k[0];
n_left -= 1;
k += 1;
-
+
a0 = h->hash_seeds[0];
b0 = h->hash_seeds[1];
c0 = h->hash_seeds[2];
@@ -855,3 +848,11 @@ _qhash_unset_multiple (void * v,
h->n_elts = n_elts;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/qhash.h b/vppinfra/vppinfra/qhash.h
index f4ea9386b55..9dbbd971ade 100644
--- a/vppinfra/vppinfra/qhash.h
+++ b/vppinfra/vppinfra/qhash.h
@@ -42,7 +42,8 @@
#include <vppinfra/hash.h>
/* Word hash tables. */
-typedef struct {
+typedef struct
+{
/* Number of elements in hash. */
u32 n_elts;
@@ -52,26 +53,32 @@ typedef struct {
u32 hash_seeds[3];
/* Fall back CLIB hash for overflow in fixed sized buckets. */
- uword * overflow_hash;
+ uword *overflow_hash;
- u32 * overflow_counts, * overflow_free_indices;
+ u32 *overflow_counts, *overflow_free_indices;
- u8 * hash_key_valid_bitmap;
+ u8 *hash_key_valid_bitmap;
- uword * hash_keys;
+ uword *hash_keys;
} qhash_t;
always_inline qhash_t *
-qhash_header (void * v)
-{ return vec_header (v, sizeof (qhash_t)); }
+qhash_header (void *v)
+{
+ return vec_header (v, sizeof (qhash_t));
+}
always_inline uword
-qhash_elts (void * v)
-{ return v ? qhash_header (v)->n_elts : 0; }
+qhash_elts (void *v)
+{
+ return v ? qhash_header (v)->n_elts : 0;
+}
always_inline uword
-qhash_n_overflow (void * v)
-{ return v ? hash_elts (qhash_header (v)->overflow_hash) : 0; }
+qhash_n_overflow (void *v)
+{
+ return v ? hash_elts (qhash_header (v)->overflow_hash) : 0;
+}
#define QHASH_LOG2_KEYS_PER_BUCKET 2
#define QHASH_KEYS_PER_BUCKET (1 << QHASH_LOG2_KEYS_PER_BUCKET)
@@ -97,7 +104,6 @@ qhash_hash_mix (qhash_t * h, uword key)
#define qhash_resize(v,n) (v) = _qhash_resize ((v), (n), sizeof ((v)[0]))
-/* FIXME */
#define qhash_foreach(var,v,body)
#define qhash_set_multiple(v,keys,n,results) \
@@ -126,36 +132,38 @@ qhash_hash_mix (qhash_t * h, uword key)
_qhash_unset_k; \
})
-void *
-_qhash_resize (void * v, uword length, uword elt_bytes);
+void *_qhash_resize (void *v, uword length, uword elt_bytes);
/* Lookup multiple keys in the same hash table. */
void
-qhash_get_multiple (void * v,
+qhash_get_multiple (void *v,
uword * search_keys,
- uword n_search_keys,
- u32 * result_indices);
+ uword n_search_keys, u32 * result_indices);
/* Lookup multiple keys in the same hash table.
Returns index of first matching key. */
u32
-qhash_get_first_match (void * v,
+qhash_get_first_match (void *v,
uword * search_keys,
- uword n_search_keys,
- uword * matching_key);
+ uword n_search_keys, uword * matching_key);
/* Set/unset helper functions. */
-void *
-_qhash_set_multiple (void * v,
- uword elt_bytes,
- uword * search_keys,
- uword n_search_keys,
- u32 * result_indices);
+void *_qhash_set_multiple (void *v,
+ uword elt_bytes,
+ uword * search_keys,
+ uword n_search_keys, u32 * result_indices);
void
-_qhash_unset_multiple (void * v,
+_qhash_unset_multiple (void *v,
uword elt_bytes,
uword * search_keys,
- uword n_search_keys,
- u32 * result_indices);
+ uword n_search_keys, u32 * result_indices);
#endif /* included_qhash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/qsort.c b/vppinfra/vppinfra/qsort.c
index b9732aaa29c..2faa5897eb2 100644
--- a/vppinfra/vppinfra/qsort.c
+++ b/vppinfra/vppinfra/qsort.c
@@ -33,17 +33,18 @@
* bytes. The MTHREShold is where we stop finding a better median.
*/
-#define THRESH 4 /* threshold for insertion */
-#define MTHRESH 6 /* threshold for median */
+#define THRESH 4 /* threshold for insertion */
+#define MTHRESH 6 /* threshold for median */
-typedef struct {
+typedef struct
+{
word qsz; /* size of each record */
word thresh; /* THRESHold in chars */
word mthresh; /* MTHRESHold in chars */
- int (*qcmp) (const void *, const void *); /* the comparison routine */
+ int (*qcmp) (const void *, const void *); /* the comparison routine */
} qst_t;
-static void qst (qst_t * q, char * base, char *max);
+static void qst (qst_t * q, char *base, char *max);
/*
* qqsort: First, set up some global parameters for qst to share.
@@ -52,7 +53,7 @@ static void qst (qst_t * q, char * base, char *max);
*/
void
-qsort (void * base, uword n, uword size,
+qsort (void *base, uword n, uword size,
int (*compar) (const void *, const void *))
{
char *i;
@@ -62,7 +63,7 @@ qsort (void * base, uword n, uword size,
char *min;
char c;
char *max;
- qst_t _q, * q = &_q;
+ qst_t _q, *q = &_q;
if (n <= 1)
return;
@@ -72,29 +73,35 @@ qsort (void * base, uword n, uword size,
q->thresh = q->qsz * THRESH;
q->mthresh = q->qsz * MTHRESH;
max = base + n * q->qsz;
- if (n >= THRESH) {
- qst(q, base, max);
- hi = base + q->thresh;
- } else {
- hi = max;
- }
+ if (n >= THRESH)
+ {
+ qst (q, base, max);
+ hi = base + q->thresh;
+ }
+ else
+ {
+ hi = max;
+ }
/*
* First put smallest element, which must be in the first THRESH, in the
* first position as a sentinel. This is done just by searching the
* first THRESH elements (or the first n if n < THRESH), finding the min,
* and swapping it into the first position.
*/
- for (j = lo = base; (lo += q->qsz) < hi;) {
- if ((*compar) (j, lo) > 0)
- j = lo;
- }
- if (j != base) { /* swap j into place */
- for (i = base, hi = base + q->qsz; i < hi;) {
- c = *j;
- *j++ = *i;
- *i++ = c;
+ for (j = lo = base; (lo += q->qsz) < hi;)
+ {
+ if ((*compar) (j, lo) > 0)
+ j = lo;
+ }
+ if (j != base)
+ { /* swap j into place */
+ for (i = base, hi = base + q->qsz; i < hi;)
+ {
+ c = *j;
+ *j++ = *i;
+ *i++ = c;
+ }
}
- }
/*
* With our sentinel in place, we now run the following hyper-fast
* insertion sort. For each remaining element, min, from [1] to [n-1],
@@ -102,17 +109,20 @@ qsort (void * base, uword n, uword size,
* the standard insertion sort shift on a character at a time basis for
* each element in the frob.
*/
- for (min = base; (hi = min += q->qsz) < max;) {
- while ((*q->qcmp) (hi -= q->qsz, min) > 0);
- if ((hi += q->qsz) != min) {
- for (lo = min + q->qsz; --lo >= min;) {
- c = *lo;
- for (i = j = lo; (j -= q->qsz) >= hi; i = j)
- *i = *j;
- *i = c;
- }
+ for (min = base; (hi = min += q->qsz) < max;)
+ {
+ while ((*q->qcmp) (hi -= q->qsz, min) > 0);
+ if ((hi += q->qsz) != min)
+ {
+ for (lo = min + q->qsz; --lo >= min;)
+ {
+ c = *lo;
+ for (i = j = lo; (j -= q->qsz) >= hi; i = j)
+ *i = *j;
+ *i = c;
+ }
+ }
}
- }
}
@@ -132,7 +142,7 @@ qsort (void * base, uword n, uword size,
*/
static void
-qst(qst_t *q, char *base, char *max)
+qst (qst_t * q, char *base, char *max)
{
char *i;
char *j;
@@ -140,91 +150,120 @@ qst(qst_t *q, char *base, char *max)
char *mid;
int ii;
char c;
- char *tmp;
- int lo;
- int hi;
+ char *tmp;
+ int lo;
+ int hi;
int qsz = q->qsz;
- lo = (int)(max - base); /* number of elements as chars */
- do {
- /*
- * At the top here, lo is the number of characters of elements in the
- * current partition. (Which should be max - base). Find the median
- * of the first, last, and middle element and make that the middle
- * element. Set j to largest of first and middle. If max is larger
- * than that guy, then it's that guy, else compare max with loser of
- * first and take larger. Things are set up to prefer the middle,
- * then the first in case of ties.
- */
- mid = i = base + qsz * ((unsigned) (lo / qsz) >> 1);
- if (lo >= q->mthresh) {
- j = ((*q->qcmp) ((jj = base), i) > 0 ? jj : i);
- if ((*q->qcmp) (j, (tmp = max - qsz)) > 0) {
- /* switch to first loser */
- j = (j == jj ? i : jj);
- if ((*q->qcmp) (j, tmp) < 0)
- j = tmp;
- }
- if (j != i) {
- ii = qsz;
- do {
- c = *i;
- *i++ = *j;
- *j++ = c;
- } while (--ii);
- }
- }
- /* Semi-standard quicksort partitioning/swapping */
- for (i = base, j = max - qsz;;) {
- while (i < mid && (*q->qcmp) (i, mid) <= 0)
- i += qsz;
- while (j > mid) {
- if ((*q->qcmp) (mid, j) <= 0) {
- j -= qsz;
- continue;
+ lo = (int) (max - base); /* number of elements as chars */
+ do
+ {
+ /*
+ * At the top here, lo is the number of characters of elements in the
+ * current partition. (Which should be max - base). Find the median
+ * of the first, last, and middle element and make that the middle
+ * element. Set j to largest of first and middle. If max is larger
+ * than that guy, then it's that guy, else compare max with loser of
+ * first and take larger. Things are set up to prefer the middle,
+ * then the first in case of ties.
+ */
+ mid = i = base + qsz * ((unsigned) (lo / qsz) >> 1);
+ if (lo >= q->mthresh)
+ {
+ j = ((*q->qcmp) ((jj = base), i) > 0 ? jj : i);
+ if ((*q->qcmp) (j, (tmp = max - qsz)) > 0)
+ {
+ /* switch to first loser */
+ j = (j == jj ? i : jj);
+ if ((*q->qcmp) (j, tmp) < 0)
+ j = tmp;
+ }
+ if (j != i)
+ {
+ ii = qsz;
+ do
+ {
+ c = *i;
+ *i++ = *j;
+ *j++ = c;
+ }
+ while (--ii);
+ }
}
- tmp = i + qsz; /* value of i after swap */
- if (i == mid) { /* j <-> mid, new mid is j */
- mid = jj = j;
- } else { /* i <-> j */
- jj = j;
- j -= qsz;
+ /* Semi-standard quicksort partitioning/swapping */
+ for (i = base, j = max - qsz;;)
+ {
+ while (i < mid && (*q->qcmp) (i, mid) <= 0)
+ i += qsz;
+ while (j > mid)
+ {
+ if ((*q->qcmp) (mid, j) <= 0)
+ {
+ j -= qsz;
+ continue;
+ }
+ tmp = i + qsz; /* value of i after swap */
+ if (i == mid)
+ { /* j <-> mid, new mid is j */
+ mid = jj = j;
+ }
+ else
+ { /* i <-> j */
+ jj = j;
+ j -= qsz;
+ }
+ goto swap;
+ }
+ if (i == mid)
+ {
+ break;
+ }
+ else
+ { /* i <-> mid, new mid is i */
+ jj = mid;
+ tmp = mid = i; /* value of i after swap */
+ j -= qsz;
+ }
+ swap:
+ ii = qsz;
+ do
+ {
+ c = *i;
+ *i++ = *jj;
+ *jj++ = c;
+ }
+ while (--ii);
+ i = tmp;
+ }
+ /*
+ * Look at sizes of the two partitions, do the smaller one first by
+ * recursion, then do the larger one by making sure lo is its size,
+ * base and max are update correctly, and branching back. But only
+ * repeat (recursively or by branching) if the partition is of at
+ * least size THRESH.
+ */
+ i = (j = mid) + qsz;
+ if ((lo = (int) (j - base)) <= (hi = (int) (max - i)))
+ {
+ if (lo >= q->thresh)
+ qst (q, base, j);
+ base = i;
+ lo = hi;
+ }
+ else
+ {
+ if (hi >= q->thresh)
+ qst (q, i, max);
+ max = j;
}
- goto swap;
- }
- if (i == mid) {
- break;
- } else { /* i <-> mid, new mid is i */
- jj = mid;
- tmp = mid = i; /* value of i after swap */
- j -= qsz;
- }
- swap:
- ii = qsz;
- do {
- c = *i;
- *i++ = *jj;
- *jj++ = c;
- } while (--ii);
- i = tmp;
- }
- /*
- * Look at sizes of the two partitions, do the smaller one first by
- * recursion, then do the larger one by making sure lo is its size,
- * base and max are update correctly, and branching back. But only
- * repeat (recursively or by branching) if the partition is of at
- * least size THRESH.
- */
- i = (j = mid) + qsz;
- if ((lo = (int)(j - base)) <= (hi = (int)(max - i))) {
- if (lo >= q->thresh)
- qst(q, base, j);
- base = i;
- lo = hi;
- } else {
- if (hi >= q->thresh)
- qst(q, i, max);
- max = j;
}
- } while (lo >= q->thresh);
+ while (lo >= q->thresh);
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/random.c b/vppinfra/vppinfra/random.c
index fb58680cb50..fa5bcc8c78a 100644
--- a/vppinfra/vppinfra/random.c
+++ b/vppinfra/vppinfra/random.c
@@ -41,3 +41,11 @@
Value can be overridden by platform code from e.g.
machine's clock count register. */
u32 standalone_random_default_seed = 1;
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/random.h b/vppinfra/vppinfra/random.h
index 1347c47c6f7..fad05898350 100644
--- a/vppinfra/vppinfra/random.h
+++ b/vppinfra/vppinfra/random.h
@@ -39,7 +39,7 @@
#define included_random_h
#include <vppinfra/clib.h>
-#include <vppinfra/vec.h> /* for vec_resize */
+#include <vppinfra/vec.h> /* for vec_resize */
#include <vppinfra/format.h> /* for unformat_input_t */
/** \file
@@ -48,12 +48,12 @@
This specific random number generator is described in
"Numerical Recipes in C", 2nd edition, page 284. If you need
random numbers with really excellent statistics, take a look
- at Chapter 7...
-
+ at Chapter 7...
+
By definition, a linear congruential random number generator
is of the form: rand[i+1] = a*rand[i] + c (mod m) for specific
- values of (a,c,m).
-
+ values of (a,c,m).
+
In this case, choose m = 2**32 and use the low-order 32-bits of
the 64-bit product a*N[i]. Knuth suggests the use of a=1664525,
H.W. Lewis has tested C=1013904223 extensively. This routine is
@@ -65,25 +65,33 @@
*/
/** \brief 32-bit random number generator */
-always_inline u32 random_u32 (u32 * seed)
+always_inline u32
+random_u32 (u32 * seed)
{
*seed = (1664525 * *seed) + 1013904223;
return *seed;
}
+
/* External test routine. */
int test_random_main (unformat_input_t * input);
/** \brief Maximum value returned by random_u32() */
-always_inline u32 random_u32_max (void)
-{ return 0xffffffff; }
+always_inline u32
+random_u32_max (void)
+{
+ return 0xffffffff;
+}
#ifdef CLIB_UNIX
#include <unistd.h> /* for getpid */
/** \brief Default random seed (unix/linux user-mode) */
-always_inline uword random_default_seed (void)
-{ return getpid (); }
+always_inline uword
+random_default_seed (void)
+{
+ return getpid ();
+}
#endif
@@ -92,16 +100,22 @@ always_inline uword random_default_seed (void)
#include <linux/sched.h> /* for jiffies */
/** \brief Default random seed (Linux kernel) */
-always_inline uword random_default_seed (void)
-{ return jiffies; }
+always_inline uword
+random_default_seed (void)
+{
+ return jiffies;
+}
#endif
#ifdef CLIB_STANDALONE
extern u32 standalone_random_default_seed;
-always_inline u32 random_default_seed (void)
-{ return standalone_random_default_seed; }
+always_inline u32
+random_default_seed (void)
+{
+ return standalone_random_default_seed;
+}
#endif
/** \brief 64-bit random number generator
@@ -130,8 +144,11 @@ random_uword (u32 * seed)
}
/** \brief Generate f64 random number in the interval [0,1] */
-always_inline f64 random_f64 (u32 * seed)
-{ return (f64) random_u32 (seed) / (f64) random_u32_max (); }
+always_inline f64
+random_f64 (u32 * seed)
+{
+ return (f64) random_u32 (seed) / (f64) random_u32_max ();
+}
/** \brief Generate random character vector
@@ -142,8 +159,8 @@ always_inline f64 random_f64 (u32 * seed)
always_inline u8 *
random_string (u32 * seed, uword len)
{
- u8 * alphabet = (u8 *) "abcdefghijklmnopqrstuvwxyz";
- u8 * s = 0;
+ u8 *alphabet = (u8 *) "abcdefghijklmnopqrstuvwxyz";
+ u8 *s = 0;
word i;
vec_resize (s, len);
@@ -154,3 +171,11 @@ random_string (u32 * seed, uword len)
}
#endif /* included_random_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/random_buffer.c b/vppinfra/vppinfra/random_buffer.c
index 5bcb12345c1..df03698066c 100644
--- a/vppinfra/vppinfra/random_buffer.c
+++ b/vppinfra/vppinfra/random_buffer.c
@@ -38,9 +38,10 @@
#include <vppinfra/random_buffer.h>
/* Fill random buffer. */
-void clib_random_buffer_fill (clib_random_buffer_t * b, uword n_words)
+void
+clib_random_buffer_fill (clib_random_buffer_t * b, uword n_words)
{
- uword * w, n = n_words;
+ uword *w, n = n_words;
if (n < 256)
n = 256;
@@ -48,14 +49,17 @@ void clib_random_buffer_fill (clib_random_buffer_t * b, uword n_words)
n = round_pow2 (n, 2 << ISAAC_LOG2_SIZE);
vec_add2 (b->buffer, w, n);
- do {
- isaac2 (b->ctx, w);
- w += 2 * ISAAC_SIZE;
- n -= 2 * ISAAC_SIZE;
- } while (n > 0);
+ do
+ {
+ isaac2 (b->ctx, w);
+ w += 2 * ISAAC_SIZE;
+ n -= 2 * ISAAC_SIZE;
+ }
+ while (n > 0);
}
-void clib_random_buffer_init (clib_random_buffer_t * b, uword seed)
+void
+clib_random_buffer_init (clib_random_buffer_t * b, uword seed)
{
uword i, j;
@@ -72,3 +76,11 @@ void clib_random_buffer_init (clib_random_buffer_t * b, uword seed)
isaac_init (&b->ctx[i], s);
}
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/random_buffer.h b/vppinfra/vppinfra/random_buffer.h
index 23988e3f0f1..eb318548b0a 100644
--- a/vppinfra/vppinfra/random_buffer.h
+++ b/vppinfra/vppinfra/random_buffer.h
@@ -41,22 +41,25 @@
#include <vppinfra/clib.h>
#include <vppinfra/random_isaac.h>
-typedef struct {
+typedef struct
+{
/* Two parallel ISAAC contexts for speed. */
isaac_t ctx[2];
/* Random buffer. */
- uword * buffer;
+ uword *buffer;
/* Cache up to 1 word worth of bytes for random data
less than one word at a time. */
uword n_cached_bytes;
- union {
+ union
+ {
u8 cached_bytes[sizeof (uword)];
uword cached_word;
};
-} clib_random_buffer_t;
+}
+clib_random_buffer_t;
always_inline void
clib_random_buffer_free (clib_random_buffer_t * b)
@@ -105,3 +108,11 @@ clib_random_buffer_get_data (clib_random_buffer_t * b, uword n_bytes)
}
#endif /* included_clib_random_buffer_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/random_isaac.c b/vppinfra/vppinfra/random_isaac.c
index ad2c165da46..6f00fc32424 100644
--- a/vppinfra/vppinfra/random_isaac.c
+++ b/vppinfra/vppinfra/random_isaac.c
@@ -68,9 +68,10 @@
*(r++) = b = ind32(mm,y>>ISAAC_LOG2_SIZE) + x; \
}
-void isaac (isaac_t * ctx, uword * results)
+void
+isaac (isaac_t * ctx, uword * results)
{
- u32 a, b, c, x, y, * m, * mm, * m2, * r, * mend;
+ u32 a, b, c, x, y, *m, *mm, *m2, *r, *mend;
mm = ctx->memory;
r = results;
@@ -83,19 +84,19 @@ void isaac (isaac_t * ctx, uword * results)
m = mm;
while (m < mend)
{
- rngstep32 (a<<13, a, b, mm, m, m2, r, x, y);
- rngstep32 (a>>6 , a, b, mm, m, m2, r, x, y);
- rngstep32 (a<<2 , a, b, mm, m, m2, r, x, y);
- rngstep32 (a>>16, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a << 13, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a >> 6, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a << 2, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a >> 16, a, b, mm, m, m2, r, x, y);
}
m2 = mm;
while (m2 < mend)
{
- rngstep32 (a<<13, a, b, mm, m, m2, r, x, y);
- rngstep32 (a>>6 , a, b, mm, m, m2, r, x, y);
- rngstep32 (a<<2 , a, b, mm, m, m2, r, x, y);
- rngstep32 (a>>16, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a << 13, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a >> 6, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a << 2, a, b, mm, m, m2, r, x, y);
+ rngstep32 (a >> 16, a, b, mm, m, m2, r, x, y);
}
ctx->a = a;
@@ -104,13 +105,15 @@ void isaac (isaac_t * ctx, uword * results)
}
/* Perform 2 isaac runs with different contexts simultaneously. */
-void isaac2 (isaac_t * ctx, uword * results)
+void
+isaac2 (isaac_t * ctx, uword * results)
{
#define _(n) \
u32 a##n, b##n, c##n, x##n, y##n, * m##n, * mm##n, * m2##n, * r##n, * mend##n
- _ (0); _ (1);
- (void)mend1; /* "set but unused variable" error on mend1 with gcc 4.9 */
+ _(0);
+ _(1);
+ (void) mend1; /* "set but unused variable" error on mend1 with gcc 4.9 */
#undef _
#define _(n) \
@@ -125,34 +128,35 @@ do { \
m##n = mm##n; \
} while (0)
- _ (0); _ (1);
+ _(0);
+ _(1);
#undef _
while (m0 < mend0)
{
- rngstep32 (a0<<13, a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep32 (a1<<13, a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep32 (a0>>6 , a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep32 (a1>>6 , a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep32 (a0<<2 , a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep32 (a1<<2 , a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep32 (a0>>16, a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep32 (a1>>16, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 << 13, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 << 13, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 >> 6, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 >> 6, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 << 2, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 << 2, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 >> 16, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 >> 16, a1, b1, mm1, m1, m21, r1, x1, y1);
}
m20 = mm0;
m21 = mm1;
while (m20 < mend0)
{
- rngstep32 (a0<<13, a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep32 (a1<<13, a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep32 (a0>>6 , a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep32 (a1>>6 , a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep32 (a0<<2 , a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep32 (a1<<2 , a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep32 (a0>>16, a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep32 (a1>>16, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 << 13, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 << 13, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 >> 6, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 >> 6, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 << 2, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 << 2, a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep32 (a0 >> 16, a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep32 (a1 >> 16, a1, b1, mm1, m1, m21, r1, x1, y1);
}
ctx[0].a = a0;
@@ -175,39 +179,64 @@ do { \
h^=a>>9; c+=h; a+=b; \
}
-void isaac_init (isaac_t * ctx, uword * seeds)
+void
+isaac_init (isaac_t * ctx, uword * seeds)
{
- word i;
- u32 a, b, c, d, e, f, g, h, * m, * r;
-
- ctx->a = ctx->b = ctx->c = 0;
- m = ctx->memory;
- r = seeds;
-
- a = b = c = d = e = f = g = h = 0x9e3779b9; /* the golden ratio */
-
- for (i = 0; i < 4; ++i) /* scramble it */
- mix32(a,b,c,d,e,f,g,h);
-
- /* initialize using the contents of r[] as the seed */
- for (i=0; i<ISAAC_SIZE; i+=8)
- {
- a+=r[i ]; b+=r[i+1]; c+=r[i+2]; d+=r[i+3];
- e+=r[i+4]; f+=r[i+5]; g+=r[i+6]; h+=r[i+7];
- mix32(a,b,c,d,e,f,g,h);
- m[i ]=a; m[i+1]=b; m[i+2]=c; m[i+3]=d;
- m[i+4]=e; m[i+5]=f; m[i+6]=g; m[i+7]=h;
- }
-
- /* do a second pass to make all of the seed affect all of m */
- for (i=0; i<ISAAC_SIZE; i+=8)
- {
- a+=m[i ]; b+=m[i+1]; c+=m[i+2]; d+=m[i+3];
- e+=m[i+4]; f+=m[i+5]; g+=m[i+6]; h+=m[i+7];
- mix32(a,b,c,d,e,f,g,h);
- m[i ]=a; m[i+1]=b; m[i+2]=c; m[i+3]=d;
- m[i+4]=e; m[i+5]=f; m[i+6]=g; m[i+7]=h;
- }
+ word i;
+ u32 a, b, c, d, e, f, g, h, *m, *r;
+
+ ctx->a = ctx->b = ctx->c = 0;
+ m = ctx->memory;
+ r = seeds;
+
+ a = b = c = d = e = f = g = h = 0x9e3779b9; /* the golden ratio */
+
+ for (i = 0; i < 4; ++i) /* scramble it */
+ mix32 (a, b, c, d, e, f, g, h);
+
+ /* initialize using the contents of r[] as the seed */
+ for (i = 0; i < ISAAC_SIZE; i += 8)
+ {
+ a += r[i];
+ b += r[i + 1];
+ c += r[i + 2];
+ d += r[i + 3];
+ e += r[i + 4];
+ f += r[i + 5];
+ g += r[i + 6];
+ h += r[i + 7];
+ mix32 (a, b, c, d, e, f, g, h);
+ m[i] = a;
+ m[i + 1] = b;
+ m[i + 2] = c;
+ m[i + 3] = d;
+ m[i + 4] = e;
+ m[i + 5] = f;
+ m[i + 6] = g;
+ m[i + 7] = h;
+ }
+
+ /* do a second pass to make all of the seed affect all of m */
+ for (i = 0; i < ISAAC_SIZE; i += 8)
+ {
+ a += m[i];
+ b += m[i + 1];
+ c += m[i + 2];
+ d += m[i + 3];
+ e += m[i + 4];
+ f += m[i + 5];
+ g += m[i + 6];
+ h += m[i + 7];
+ mix32 (a, b, c, d, e, f, g, h);
+ m[i] = a;
+ m[i + 1] = b;
+ m[i + 2] = c;
+ m[i + 3] = d;
+ m[i + 4] = e;
+ m[i + 5] = f;
+ m[i + 6] = g;
+ m[i + 7] = h;
+ }
}
#endif /* uword_bits == 32 */
@@ -222,9 +251,10 @@ void isaac_init (isaac_t * ctx, uword * seeds)
*(r++) = b = ind64(mm,y>>ISAAC_LOG2_SIZE) + x; \
}
-void isaac (isaac_t * ctx, uword * results)
+void
+isaac (isaac_t * ctx, uword * results)
{
- u64 a, b, c, x, y, * m, * mm, * m2, * r, * mend;
+ u64 a, b, c, x, y, *m, *mm, *m2, *r, *mend;
mm = ctx->memory;
r = results;
@@ -237,19 +267,19 @@ void isaac (isaac_t * ctx, uword * results)
m = mm;
while (m < mend)
{
- rngstep64 (~(a^(a<<21)), a, b, mm, m, m2, r, x, y);
- rngstep64 ( a^(a>>5) , a, b, mm, m, m2, r, x, y);
- rngstep64 ( a^(a<<12) , a, b, mm, m, m2, r, x, y);
- rngstep64 ( a^(a>>33) , a, b, mm, m, m2, r, x, y);
+ rngstep64 (~(a ^ (a << 21)), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a >> 5), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a << 12), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a >> 33), a, b, mm, m, m2, r, x, y);
}
m2 = mm;
while (m2 < mend)
{
- rngstep64 (~(a^(a<<21)), a, b, mm, m, m2, r, x, y);
- rngstep64 ( a^(a>>5) , a, b, mm, m, m2, r, x, y);
- rngstep64 ( a^(a<<12) , a, b, mm, m, m2, r, x, y);
- rngstep64 ( a^(a>>33) , a, b, mm, m, m2, r, x, y);
+ rngstep64 (~(a ^ (a << 21)), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a >> 5), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a << 12), a, b, mm, m, m2, r, x, y);
+ rngstep64 (a ^ (a >> 33), a, b, mm, m, m2, r, x, y);
}
ctx->a = a;
@@ -258,12 +288,14 @@ void isaac (isaac_t * ctx, uword * results)
}
/* Perform 2 isaac runs with different contexts simultaneously. */
-void isaac2 (isaac_t * ctx, uword * results)
+void
+isaac2 (isaac_t * ctx, uword * results)
{
#define _(n) \
u64 a##n, b##n, c##n, x##n, y##n, * m##n, * mm##n, * m2##n, * r##n, * mend##n
- _ (0); _ (1);
+ _(0);
+ _(1);
#undef _
@@ -279,7 +311,8 @@ do { \
m##n = mm##n; \
} while (0)
- _ (0); _ (1);
+ _(0);
+ _(1);
#undef _
@@ -287,28 +320,28 @@ do { \
while (m0 < mend0)
{
- rngstep64 (~(a0^(a0<<21)), a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep64 (~(a1^(a1<<21)), a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep64 ( a0^(a0>>5) , a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep64 ( a1^(a1>>5) , a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep64 ( a0^(a0<<12) , a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep64 ( a1^(a1<<12) , a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep64 ( a0^(a0>>33) , a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep64 ( a1^(a1>>33) , a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (~(a0 ^ (a0 << 21)), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (~(a1 ^ (a1 << 21)), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 >> 5), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 >> 5), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 << 12), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 << 12), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 >> 33), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 >> 33), a1, b1, mm1, m1, m21, r1, x1, y1);
}
m20 = mm0;
m21 = mm1;
while (m20 < mend0)
{
- rngstep64 (~(a0^(a0<<21)), a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep64 (~(a1^(a1<<21)), a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep64 ( a0^(a0>>5) , a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep64 ( a1^(a1>>5) , a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep64 ( a0^(a0<<12) , a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep64 ( a1^(a1<<12) , a1, b1, mm1, m1, m21, r1, x1, y1);
- rngstep64 ( a0^(a0>>33) , a0, b0, mm0, m0, m20, r0, x0, y0);
- rngstep64 ( a1^(a1>>33) , a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (~(a0 ^ (a0 << 21)), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (~(a1 ^ (a1 << 21)), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 >> 5), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 >> 5), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 << 12), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 << 12), a1, b1, mm1, m1, m21, r1, x1, y1);
+ rngstep64 (a0 ^ (a0 >> 33), a0, b0, mm0, m0, m20, r0, x0, y0);
+ rngstep64 (a1 ^ (a1 >> 33), a1, b1, mm1, m1, m21, r1, x1, y1);
}
ctx[0].a = a0;
@@ -331,38 +364,71 @@ do { \
h-=d; e^=g<<14; g+=h; \
}
-void isaac_init (isaac_t * ctx, uword * seeds)
+void
+isaac_init (isaac_t * ctx, uword * seeds)
{
word i;
- u64 a, b, c, d, e, f, g, h, * m, * r;
+ u64 a, b, c, d, e, f, g, h, *m, *r;
ctx->a = ctx->b = ctx->c = 0;
m = ctx->memory;
r = seeds;
- a = b = c = d = e = f = g = h = 0x9e3779b97f4a7c13LL; /* the golden ratio */
+ a = b = c = d = e = f = g = h = 0x9e3779b97f4a7c13LL; /* the golden ratio */
- for (i=0; i<4; ++i) /* scramble it */
- mix64(a,b,c,d,e,f,g,h);
+ for (i = 0; i < 4; ++i) /* scramble it */
+ mix64 (a, b, c, d, e, f, g, h);
- for (i=0; i<ISAAC_SIZE; i+=8) /* fill in mm[] with messy stuff */
+ for (i = 0; i < ISAAC_SIZE; i += 8) /* fill in mm[] with messy stuff */
{
- a+=r[i ]; b+=r[i+1]; c+=r[i+2]; d+=r[i+3];
- e+=r[i+4]; f+=r[i+5]; g+=r[i+6]; h+=r[i+7];
- mix64(a,b,c,d,e,f,g,h);
- m[i ]=a; m[i+1]=b; m[i+2]=c; m[i+3]=d;
- m[i+4]=e; m[i+5]=f; m[i+6]=g; m[i+7]=h;
+ a += r[i];
+ b += r[i + 1];
+ c += r[i + 2];
+ d += r[i + 3];
+ e += r[i + 4];
+ f += r[i + 5];
+ g += r[i + 6];
+ h += r[i + 7];
+ mix64 (a, b, c, d, e, f, g, h);
+ m[i] = a;
+ m[i + 1] = b;
+ m[i + 2] = c;
+ m[i + 3] = d;
+ m[i + 4] = e;
+ m[i + 5] = f;
+ m[i + 6] = g;
+ m[i + 7] = h;
}
/* do a second pass to make all of the seed affect all of mm */
- for (i=0; i<ISAAC_SIZE; i+=8)
+ for (i = 0; i < ISAAC_SIZE; i += 8)
{
- a+=m[i ]; b+=m[i+1]; c+=m[i+2]; d+=m[i+3];
- e+=m[i+4]; f+=m[i+5]; g+=m[i+6]; h+=m[i+7];
- mix64(a,b,c,d,e,f,g,h);
- m[i ]=a; m[i+1]=b; m[i+2]=c; m[i+3]=d;
- m[i+4]=e; m[i+5]=f; m[i+6]=g; m[i+7]=h;
+ a += m[i];
+ b += m[i + 1];
+ c += m[i + 2];
+ d += m[i + 3];
+ e += m[i + 4];
+ f += m[i + 5];
+ g += m[i + 6];
+ h += m[i + 7];
+ mix64 (a, b, c, d, e, f, g, h);
+ m[i] = a;
+ m[i + 1] = b;
+ m[i + 2] = c;
+ m[i + 3] = d;
+ m[i + 4] = e;
+ m[i + 5] = f;
+ m[i + 6] = g;
+ m[i + 7] = h;
}
}
#endif /* uword_bits == 64 */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/random_isaac.h b/vppinfra/vppinfra/random_isaac.h
index d3a844c3e44..803fbd621e2 100644
--- a/vppinfra/vppinfra/random_isaac.h
+++ b/vppinfra/vppinfra/random_isaac.h
@@ -51,14 +51,15 @@
#ifndef included_random_isaac_h
#define included_random_isaac_h
-#include <vppinfra/clib.h> /* for u32/u64 */
+#include <vppinfra/clib.h> /* for u32/u64 */
#include <vppinfra/format.h> /* for unformat_input_t */
/* Bob recommends 8 for crypto, 4 for simulations */
#define ISAAC_LOG2_SIZE (4)
#define ISAAC_SIZE (1 << ISAAC_LOG2_SIZE)
-typedef struct {
+typedef struct
+{
uword memory[ISAAC_SIZE];
uword a, b, c;
} isaac_t;
@@ -70,3 +71,11 @@ void isaac_init (isaac_t * ctx, uword * results);
int test_isaac_main (unformat_input_t * input);
#endif /* included_random_isaac_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/serialize.c b/vppinfra/vppinfra/serialize.c
index 9c6987dd0e9..5d401a080c1 100644
--- a/vppinfra/vppinfra/serialize.c
+++ b/vppinfra/vppinfra/serialize.c
@@ -41,7 +41,8 @@
#include <vppinfra/pool.h>
#include <vppinfra/serialize.h>
-void serialize_64 (serialize_main_t * m, va_list * va)
+void
+serialize_64 (serialize_main_t * m, va_list * va)
{
u64 x = va_arg (*va, u64);
u32 lo, hi;
@@ -51,103 +52,132 @@ void serialize_64 (serialize_main_t * m, va_list * va)
serialize_integer (m, hi, sizeof (hi));
}
-void serialize_32 (serialize_main_t * m, va_list * va)
+void
+serialize_32 (serialize_main_t * m, va_list * va)
{
u32 x = va_arg (*va, u32);
serialize_integer (m, x, sizeof (x));
}
-void serialize_16 (serialize_main_t * m, va_list * va)
+void
+serialize_16 (serialize_main_t * m, va_list * va)
{
u32 x = va_arg (*va, u32);
serialize_integer (m, x, sizeof (u16));
}
-void serialize_8 (serialize_main_t * m, va_list * va)
+void
+serialize_8 (serialize_main_t * m, va_list * va)
{
u32 x = va_arg (*va, u32);
serialize_integer (m, x, sizeof (u8));
}
-void unserialize_64 (serialize_main_t * m, va_list * va)
+void
+unserialize_64 (serialize_main_t * m, va_list * va)
{
- u64 * x = va_arg (*va, u64 *);
+ u64 *x = va_arg (*va, u64 *);
u32 lo, hi;
unserialize_integer (m, &lo, sizeof (lo));
unserialize_integer (m, &hi, sizeof (hi));
*x = ((u64) hi << 32) | (u64) lo;
}
-void unserialize_32 (serialize_main_t * m, va_list * va)
+void
+unserialize_32 (serialize_main_t * m, va_list * va)
{
- u32 * x = va_arg (*va, u32 *);
+ u32 *x = va_arg (*va, u32 *);
unserialize_integer (m, x, sizeof (x[0]));
}
-void unserialize_16 (serialize_main_t * m, va_list * va)
+void
+unserialize_16 (serialize_main_t * m, va_list * va)
{
- u16 * x = va_arg (*va, u16 *);
+ u16 *x = va_arg (*va, u16 *);
u32 t;
unserialize_integer (m, &t, sizeof (x[0]));
x[0] = t;
}
-void unserialize_8 (serialize_main_t * m, va_list * va)
+void
+unserialize_8 (serialize_main_t * m, va_list * va)
{
- u8 * x = va_arg (*va, u8 *);
+ u8 *x = va_arg (*va, u8 *);
u32 t;
unserialize_integer (m, &t, sizeof (x[0]));
x[0] = t;
}
-void serialize_f64 (serialize_main_t * m, va_list * va)
+void
+serialize_f64 (serialize_main_t * m, va_list * va)
{
f64 x = va_arg (*va, f64);
- union { f64 f; u64 i; } y;
+ union
+ {
+ f64 f;
+ u64 i;
+ } y;
y.f = x;
serialize (m, serialize_64, y.i);
}
-void serialize_f32 (serialize_main_t * m, va_list * va)
+void
+serialize_f32 (serialize_main_t * m, va_list * va)
{
f32 x = va_arg (*va, f64);
- union { f32 f; u32 i; } y;
+ union
+ {
+ f32 f;
+ u32 i;
+ } y;
y.f = x;
serialize_integer (m, y.i, sizeof (y.i));
}
-void unserialize_f64 (serialize_main_t * m, va_list * va)
+void
+unserialize_f64 (serialize_main_t * m, va_list * va)
{
- f64 * x = va_arg (*va, f64 *);
- union { f64 f; u64 i; } y;
+ f64 *x = va_arg (*va, f64 *);
+ union
+ {
+ f64 f;
+ u64 i;
+ } y;
unserialize (m, unserialize_64, &y.i);
*x = y.f;
}
-void unserialize_f32 (serialize_main_t * m, va_list * va)
+void
+unserialize_f32 (serialize_main_t * m, va_list * va)
{
- f32 * x = va_arg (*va, f32 *);
- union { f32 f; u32 i; } y;
+ f32 *x = va_arg (*va, f32 *);
+ union
+ {
+ f32 f;
+ u32 i;
+ } y;
unserialize_integer (m, &y.i, sizeof (y.i));
*x = y.f;
}
-void serialize_cstring (serialize_main_t * m, char * s)
+void
+serialize_cstring (serialize_main_t * m, char *s)
{
u32 len = s ? strlen (s) : 0;
- void * p;
+ void *p;
serialize_likely_small_unsigned_integer (m, len);
- if (len > 0)
+ if (len > 0)
{
p = serialize_get (m, len);
clib_memcpy (p, s, len);
}
}
-void unserialize_cstring (serialize_main_t * m, char ** s)
+void
+unserialize_cstring (serialize_main_t * m, char **s)
{
- char * p, * r = 0;
+ char *p, *r = 0;
u32 len;
len = unserialize_likely_small_unsigned_integer (m);
@@ -162,7 +192,7 @@ void unserialize_cstring (serialize_main_t * m, char ** s)
r = vec_new (char, len + 1);
p = unserialize_get (m, len);
clib_memcpy (r, p, len);
-
+
/* Null terminate. */
r[len] = 0;
}
@@ -170,19 +200,21 @@ void unserialize_cstring (serialize_main_t * m, char ** s)
}
/* vec_serialize/vec_unserialize helper functions for basic vector types. */
-void serialize_vec_8 (serialize_main_t * m, va_list * va)
+void
+serialize_vec_8 (serialize_main_t * m, va_list * va)
{
- u8 * s = va_arg (*va, u8 *);
+ u8 *s = va_arg (*va, u8 *);
u32 n = va_arg (*va, u32);
- u8 * p = serialize_get (m, n * sizeof (u8));
+ u8 *p = serialize_get (m, n * sizeof (u8));
clib_memcpy (p, s, n * sizeof (u8));
}
-void unserialize_vec_8 (serialize_main_t * m, va_list * va)
+void
+unserialize_vec_8 (serialize_main_t * m, va_list * va)
{
- u8 * s = va_arg (*va, u8 *);
+ u8 *s = va_arg (*va, u8 *);
u32 n = va_arg (*va, u32);
- u8 * p = unserialize_get (m, n);
+ u8 *p = unserialize_get (m, n);
clib_memcpy (s, p, n);
}
@@ -239,21 +271,22 @@ void unserialize_vec_8 (serialize_main_t * m, va_list * va)
} \
}
-_ (16);
-_ (32);
-_ (64);
+_(16);
+_(32);
+_(64);
#undef _
#define SERIALIZE_VECTOR_CHUNK_SIZE 64
-void serialize_vector (serialize_main_t * m, va_list * va)
+void
+serialize_vector (serialize_main_t * m, va_list * va)
{
- void * vec = va_arg (*va, void *);
+ void *vec = va_arg (*va, void *);
u32 elt_bytes = va_arg (*va, u32);
- serialize_function_t * f = va_arg (*va, serialize_function_t *);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
u32 l = vec_len (vec);
- void * p = vec;
+ void *p = vec;
serialize_integer (m, l, sizeof (l));
@@ -268,21 +301,20 @@ void serialize_vector (serialize_main_t * m, va_list * va)
}
void *
-unserialize_vector_ha (serialize_main_t * m,
+unserialize_vector_ha (serialize_main_t * m,
u32 elt_bytes,
u32 header_bytes,
- u32 align,
- u32 max_length,
- serialize_function_t * f)
+ u32 align, u32 max_length, serialize_function_t * f)
{
- void * v, * p;
+ void *v, *p;
u32 l;
unserialize_integer (m, &l, sizeof (l));
if (l > max_length)
- serialize_error (&m->header, clib_error_create ("bad vector length %d", l));
- p = v = _vec_resize (0, l, (uword) l*elt_bytes, header_bytes,
- /* align */ align);
+ serialize_error (&m->header,
+ clib_error_create ("bad vector length %d", l));
+ p = v = _vec_resize (0, l, (uword) l * elt_bytes, header_bytes,
+ /* align */ align);
while (l != 0)
{
@@ -294,11 +326,12 @@ unserialize_vector_ha (serialize_main_t * m,
return v;
}
-void unserialize_aligned_vector (serialize_main_t * m, va_list * va)
+void
+unserialize_aligned_vector (serialize_main_t * m, va_list * va)
{
- void ** vec = va_arg (*va, void **);
+ void **vec = va_arg (*va, void **);
u32 elt_bytes = va_arg (*va, u32);
- serialize_function_t * f = va_arg (*va, serialize_function_t *);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
u32 align = va_arg (*va, u32);
*vec = unserialize_vector_ha (m, elt_bytes,
@@ -308,11 +341,12 @@ void unserialize_aligned_vector (serialize_main_t * m, va_list * va)
f);
}
-void unserialize_vector (serialize_main_t * m, va_list * va)
+void
+unserialize_vector (serialize_main_t * m, va_list * va)
{
- void ** vec = va_arg (*va, void **);
+ void **vec = va_arg (*va, void **);
u32 elt_bytes = va_arg (*va, u32);
- serialize_function_t * f = va_arg (*va, serialize_function_t *);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
*vec = unserialize_vector_ha (m, elt_bytes,
/* header_bytes */ 0,
@@ -321,7 +355,8 @@ void unserialize_vector (serialize_main_t * m, va_list * va)
f);
}
-void serialize_bitmap (serialize_main_t * m, uword * b)
+void
+serialize_bitmap (serialize_main_t * m, uword * b)
{
u32 l, i, n_u32s;
@@ -338,9 +373,10 @@ void serialize_bitmap (serialize_main_t * m, uword * b)
}
}
-uword * unserialize_bitmap (serialize_main_t * m)
+uword *
+unserialize_bitmap (serialize_main_t * m)
{
- uword * b = 0;
+ uword *b = 0;
u32 i, n_u32s;
unserialize_integer (m, &n_u32s, sizeof (n_u32s));
@@ -358,9 +394,9 @@ uword * unserialize_bitmap (serialize_main_t * m)
if (BITS (uword) == 64)
{
if ((i % 2) == 0)
- b[i/2] |= (u64) data << (u64) 0;
+ b[i / 2] |= (u64) data << (u64) 0;
else
- b[i/2] |= (u64) data << (u64) 32;
+ b[i / 2] |= (u64) data << (u64) 32;
}
else
{
@@ -371,13 +407,14 @@ uword * unserialize_bitmap (serialize_main_t * m)
return b;
}
-void serialize_pool (serialize_main_t * m, va_list * va)
+void
+serialize_pool (serialize_main_t * m, va_list * va)
{
- void * pool = va_arg (*va, void *);
+ void *pool = va_arg (*va, void *);
u32 elt_bytes = va_arg (*va, u32);
- serialize_function_t * f = va_arg (*va, serialize_function_t *);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
u32 l, lo, hi;
- pool_header_t * p;
+ pool_header_t *p;
l = vec_len (pool);
serialize_integer (m, l, sizeof (u32));
@@ -390,17 +427,16 @@ void serialize_pool (serialize_main_t * m, va_list * va)
vec_serialize (m, p->free_indices, serialize_vec_32);
pool_foreach_region (lo, hi, pool,
- serialize (m, f, pool + lo*elt_bytes, hi - lo));
+ serialize (m, f, pool + lo * elt_bytes, hi - lo));
}
static void *
unserialize_pool_helper (serialize_main_t * m,
- u32 elt_bytes, u32 align,
- serialize_function_t * f)
+ u32 elt_bytes, u32 align, serialize_function_t * f)
{
- void * v;
+ void *v;
u32 i, l, lo, hi;
- pool_header_t * p;
+ pool_header_t *p;
unserialize_integer (m, &l, sizeof (l));
if (l == 0)
@@ -408,7 +444,7 @@ unserialize_pool_helper (serialize_main_t * m,
return 0;
}
- v = _vec_resize (0, l, (uword) l*elt_bytes, sizeof (p[0]), align);
+ v = _vec_resize (0, l, (uword) l * elt_bytes, sizeof (p[0]), align);
p = pool_header (v);
vec_unserialize (m, &p->free_indices, unserialize_vec_32);
@@ -419,31 +455,34 @@ unserialize_pool_helper (serialize_main_t * m,
p->free_bitmap = clib_bitmap_ori (p->free_bitmap, p->free_indices[i]);
pool_foreach_region (lo, hi, v,
- unserialize (m, f, v + lo*elt_bytes, hi - lo));
+ unserialize (m, f, v + lo * elt_bytes, hi - lo));
return v;
}
-void unserialize_pool (serialize_main_t * m, va_list * va)
+void
+unserialize_pool (serialize_main_t * m, va_list * va)
{
- void ** result = va_arg (*va, void **);
+ void **result = va_arg (*va, void **);
u32 elt_bytes = va_arg (*va, u32);
- serialize_function_t * f = va_arg (*va, serialize_function_t *);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
*result = unserialize_pool_helper (m, elt_bytes, /* align */ 0, f);
}
-void unserialize_aligned_pool (serialize_main_t * m, va_list * va)
+void
+unserialize_aligned_pool (serialize_main_t * m, va_list * va)
{
- void ** result = va_arg (*va, void **);
+ void **result = va_arg (*va, void **);
u32 elt_bytes = va_arg (*va, u32);
u32 align = va_arg (*va, u32);
- serialize_function_t * f = va_arg (*va, serialize_function_t *);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
*result = unserialize_pool_helper (m, elt_bytes, align, f);
}
-static void serialize_vec_heap_elt (serialize_main_t * m, va_list * va)
+static void
+serialize_vec_heap_elt (serialize_main_t * m, va_list * va)
{
- heap_elt_t * e = va_arg (*va, heap_elt_t *);
+ heap_elt_t *e = va_arg (*va, heap_elt_t *);
u32 i, n = va_arg (*va, u32);
for (i = 0; i < n; i++)
{
@@ -453,9 +492,10 @@ static void serialize_vec_heap_elt (serialize_main_t * m, va_list * va)
}
}
-static void unserialize_vec_heap_elt (serialize_main_t * m, va_list * va)
+static void
+unserialize_vec_heap_elt (serialize_main_t * m, va_list * va)
{
- heap_elt_t * e = va_arg (*va, heap_elt_t *);
+ heap_elt_t *e = va_arg (*va, heap_elt_t *);
u32 i, n = va_arg (*va, u32);
for (i = 0; i < n; i++)
{
@@ -465,12 +505,13 @@ static void unserialize_vec_heap_elt (serialize_main_t * m, va_list * va)
}
}
-void serialize_heap (serialize_main_t * m, va_list * va)
+void
+serialize_heap (serialize_main_t * m, va_list * va)
{
- void * heap = va_arg (*va, void *);
- serialize_function_t * f = va_arg (*va, serialize_function_t *);
+ void *heap = va_arg (*va, void *);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
u32 i, l;
- heap_header_t * h;
+ heap_header_t *h;
l = vec_len (heap);
serialize_integer (m, l, sizeof (u32));
@@ -496,14 +537,14 @@ void serialize_heap (serialize_main_t * m, va_list * va)
/* Serialize data in heap. */
{
- heap_elt_t * e, * end;
+ heap_elt_t *e, *end;
e = h->elts + h->head;
end = h->elts + h->tail;
while (1)
{
- if (! heap_is_free (e))
+ if (!heap_is_free (e))
{
- void * v = heap + heap_offset (e) * h->elt_bytes;
+ void *v = heap + heap_offset (e) * h->elt_bytes;
u32 n = heap_elt_size (heap, e);
serialize (m, f, v, n);
}
@@ -514,13 +555,14 @@ void serialize_heap (serialize_main_t * m, va_list * va)
}
}
-void unserialize_heap (serialize_main_t * m, va_list * va)
+void
+unserialize_heap (serialize_main_t * m, va_list * va)
{
- void ** result = va_arg (*va, void **);
- serialize_function_t * f = va_arg (*va, serialize_function_t *);
+ void **result = va_arg (*va, void **);
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
u32 i, vl, fl;
heap_header_t h;
- void * heap;
+ void *heap;
unserialize_integer (m, &vl, sizeof (u32));
if (vl == 0)
@@ -547,12 +589,12 @@ void unserialize_heap (serialize_main_t * m, va_list * va)
/* Re-construct used elt bitmap. */
if (CLIB_DEBUG > 0)
{
- heap_elt_t * e;
+ heap_elt_t *e;
vec_foreach (e, h.elts)
- {
- if (! heap_is_free (e))
- h.used_elt_bitmap = clib_bitmap_ori (h.used_elt_bitmap, e - h.elts);
- }
+ {
+ if (!heap_is_free (e))
+ h.used_elt_bitmap = clib_bitmap_ori (h.used_elt_bitmap, e - h.elts);
+ }
}
heap = *result = _heap_new (vl, h.elt_bytes);
@@ -560,14 +602,14 @@ void unserialize_heap (serialize_main_t * m, va_list * va)
/* Unserialize data in heap. */
{
- heap_elt_t * e, * end;
+ heap_elt_t *e, *end;
e = h.elts + h.head;
end = h.elts + h.tail;
while (1)
{
- if (! heap_is_free (e))
+ if (!heap_is_free (e))
{
- void * v = heap + heap_offset (e) * h.elt_bytes;
+ void *v = heap + heap_offset (e) * h.elt_bytes;
u32 n = heap_elt_size (heap, e);
unserialize (m, f, v, n);
}
@@ -578,19 +620,20 @@ void unserialize_heap (serialize_main_t * m, va_list * va)
}
}
-void serialize_magic (serialize_main_t * m, void * magic, u32 magic_bytes)
+void
+serialize_magic (serialize_main_t * m, void *magic, u32 magic_bytes)
{
- void * p;
+ void *p;
serialize_integer (m, magic_bytes, sizeof (magic_bytes));
p = serialize_get (m, magic_bytes);
clib_memcpy (p, magic, magic_bytes);
}
-void unserialize_check_magic (serialize_main_t * m, void * magic,
- u32 magic_bytes)
+void
+unserialize_check_magic (serialize_main_t * m, void *magic, u32 magic_bytes)
{
u32 l;
- void * d;
+ void *d;
unserialize_integer (m, &l, sizeof (l));
if (l != magic_bytes)
@@ -606,9 +649,9 @@ void unserialize_check_magic (serialize_main_t * m, void * magic,
clib_error_t *
va_serialize (serialize_main_t * sm, va_list * va)
{
- serialize_main_header_t * m = &sm->header;
- serialize_function_t * f = va_arg (*va, serialize_function_t *);
- clib_error_t * error = 0;
+ serialize_main_header_t *m = &sm->header;
+ serialize_function_t *f = va_arg (*va, serialize_function_t *);
+ clib_error_t *error = 0;
m->recursion_level += 1;
if (m->recursion_level == 1)
@@ -616,8 +659,8 @@ va_serialize (serialize_main_t * sm, va_list * va)
uword r = clib_setjmp (&m->error_longjmp, 0);
error = uword_to_pointer (r, clib_error_t *);
}
-
- if (! error)
+
+ if (!error)
f (sm, va);
m->recursion_level -= 1;
@@ -627,7 +670,7 @@ va_serialize (serialize_main_t * sm, va_list * va)
clib_error_t *
serialize (serialize_main_t * m, ...)
{
- clib_error_t * error;
+ clib_error_t *error;
va_list va;
va_start (va, m);
@@ -639,7 +682,7 @@ serialize (serialize_main_t * m, ...)
clib_error_t *
unserialize (serialize_main_t * m, ...)
{
- clib_error_t * error;
+ clib_error_t *error;
va_list va;
va_start (va, m);
@@ -648,10 +691,10 @@ unserialize (serialize_main_t * m, ...)
return error;
}
-static void * serialize_write_not_inline (serialize_main_header_t * m,
- serialize_stream_t * s,
- uword n_bytes_to_write,
- uword flags)
+static void *
+serialize_write_not_inline (serialize_main_header_t * m,
+ serialize_stream_t * s,
+ uword n_bytes_to_write, uword flags)
{
uword cur_bi, n_left_b, n_left_o;
@@ -661,35 +704,37 @@ static void * serialize_write_not_inline (serialize_main_header_t * m,
n_left_o = vec_len (s->overflow_buffer);
/* Prepend overflow buffer if present. */
- do {
- if (n_left_o > 0 && n_left_b > 0)
- {
- uword n = clib_min (n_left_b, n_left_o);
- clib_memcpy (s->buffer + cur_bi, s->overflow_buffer, n);
- cur_bi += n;
- n_left_b -= n;
- n_left_o -= n;
- if (n_left_o == 0)
- _vec_len (s->overflow_buffer) = 0;
- else
- vec_delete (s->overflow_buffer, n, 0);
- }
+ do
+ {
+ if (n_left_o > 0 && n_left_b > 0)
+ {
+ uword n = clib_min (n_left_b, n_left_o);
+ clib_memcpy (s->buffer + cur_bi, s->overflow_buffer, n);
+ cur_bi += n;
+ n_left_b -= n;
+ n_left_o -= n;
+ if (n_left_o == 0)
+ _vec_len (s->overflow_buffer) = 0;
+ else
+ vec_delete (s->overflow_buffer, n, 0);
+ }
- /* Call data function when buffer is complete. Data function should
- dispatch with current buffer and give us a new one to write more
- data into. */
- if (n_left_b == 0)
- {
- s->current_buffer_index = cur_bi;
- m->data_function (m, s);
- cur_bi = s->current_buffer_index;
- n_left_b = s->n_buffer_bytes - cur_bi;
- }
- } while (n_left_o > 0);
+ /* Call data function when buffer is complete. Data function should
+ dispatch with current buffer and give us a new one to write more
+ data into. */
+ if (n_left_b == 0)
+ {
+ s->current_buffer_index = cur_bi;
+ m->data_function (m, s);
+ cur_bi = s->current_buffer_index;
+ n_left_b = s->n_buffer_bytes - cur_bi;
+ }
+ }
+ while (n_left_o > 0);
if (n_left_o > 0 || n_left_b < n_bytes_to_write)
{
- u8 * r;
+ u8 *r;
vec_add2 (s->overflow_buffer, r, n_bytes_to_write);
return r;
}
@@ -700,10 +745,10 @@ static void * serialize_write_not_inline (serialize_main_header_t * m,
}
}
-static void * serialize_read_not_inline (serialize_main_header_t * m,
- serialize_stream_t * s,
- uword n_bytes_to_read,
- uword flags)
+static void *
+serialize_read_not_inline (serialize_main_header_t * m,
+ serialize_stream_t * s,
+ uword n_bytes_to_read, uword flags)
{
uword cur_bi, cur_oi, n_left_b, n_left_o, n_left_to_read;
@@ -735,7 +780,7 @@ static void * serialize_read_not_inline (serialize_main_header_t * m,
uword n;
/* If we don't have enough data between overflow and normal buffer
- call read function. */
+ call read function. */
if (n_left_o + n_left_b < n_bytes_to_read)
{
/* Save any left over buffer in overflow vector. */
@@ -745,7 +790,7 @@ static void * serialize_read_not_inline (serialize_main_header_t * m,
n_left_o += n_left_b;
n_left_to_read -= n_left_b;
/* Advance buffer to end --- even if
- SERIALIZE_FLAG_NO_ADVANCE_CURRENT_BUFFER_INDEX is set. */
+ SERIALIZE_FLAG_NO_ADVANCE_CURRENT_BUFFER_INDEX is set. */
cur_bi = s->n_buffer_bytes;
n_left_b = 0;
}
@@ -759,17 +804,15 @@ static void * serialize_read_not_inline (serialize_main_header_t * m,
}
/* For first time through loop return if we have enough data
- in normal buffer and overflow vector is empty. */
+ in normal buffer and overflow vector is empty. */
if (n_left_o == 0
- && n_left_to_read == n_bytes_to_read
- && n_left_b >= n_left_to_read)
+ && n_left_to_read == n_bytes_to_read && n_left_b >= n_left_to_read)
{
s->current_buffer_index = cur_bi + n_bytes_to_read;
return s->buffer + cur_bi;
}
- if (! m->data_function
- || serialize_stream_is_end_of_stream (s))
+ if (!m->data_function || serialize_stream_is_end_of_stream (s))
{
/* This can happen for a peek at end of file.
Pad overflow buffer with 0s. */
@@ -788,22 +831,24 @@ static void * serialize_read_not_inline (serialize_main_header_t * m,
n_left_to_read -= n;
}
}
-
+
s->current_buffer_index = cur_bi;
s->current_overflow_index = cur_oi + n_bytes_to_read;
return vec_elt_at_index (s->overflow_buffer, cur_oi);
}
-void * serialize_read_write_not_inline (serialize_main_header_t * m,
- serialize_stream_t * s,
- uword n_bytes,
- uword flags)
+void *
+serialize_read_write_not_inline (serialize_main_header_t * m,
+ serialize_stream_t * s,
+ uword n_bytes, uword flags)
{
- return (((flags & SERIALIZE_FLAG_IS_READ) ? serialize_read_not_inline : serialize_write_not_inline)
- (m, s, n_bytes, flags));
+ return (((flags & SERIALIZE_FLAG_IS_READ) ? serialize_read_not_inline :
+ serialize_write_not_inline) (m, s, n_bytes, flags));
}
-static void serialize_read_write_close (serialize_main_header_t * m, serialize_stream_t * s, uword flags)
+static void
+serialize_read_write_close (serialize_main_header_t * m,
+ serialize_stream_t * s, uword flags)
{
if (serialize_stream_is_end_of_stream (s))
return;
@@ -820,25 +865,37 @@ static void serialize_read_write_close (serialize_main_header_t * m, serialize_s
vec_free (s->overflow_buffer);
}
-void serialize_close (serialize_main_t * m)
-{ serialize_read_write_close (&m->header, &m->stream, SERIALIZE_FLAG_IS_WRITE); }
+void
+serialize_close (serialize_main_t * m)
+{
+ serialize_read_write_close (&m->header, &m->stream,
+ SERIALIZE_FLAG_IS_WRITE);
+}
-void unserialize_close (serialize_main_t * m)
-{ serialize_read_write_close (&m->header, &m->stream, SERIALIZE_FLAG_IS_READ); }
+void
+unserialize_close (serialize_main_t * m)
+{
+ serialize_read_write_close (&m->header, &m->stream, SERIALIZE_FLAG_IS_READ);
+}
-void serialize_open_data (serialize_main_t * m, u8 * data, uword n_data_bytes)
+void
+serialize_open_data (serialize_main_t * m, u8 * data, uword n_data_bytes)
{
memset (m, 0, sizeof (m[0]));
m->stream.buffer = data;
m->stream.n_buffer_bytes = n_data_bytes;
}
-void unserialize_open_data (serialize_main_t * m, u8 * data, uword n_data_bytes)
-{ serialize_open_data (m, data, n_data_bytes); }
+void
+unserialize_open_data (serialize_main_t * m, u8 * data, uword n_data_bytes)
+{
+ serialize_open_data (m, data, n_data_bytes);
+}
-static void serialize_vector_write (serialize_main_header_t * m, serialize_stream_t * s)
+static void
+serialize_vector_write (serialize_main_header_t * m, serialize_stream_t * s)
{
- if (! serialize_stream_is_end_of_stream (s))
+ if (!serialize_stream_is_end_of_stream (s))
{
/* Double buffer size. */
uword l = vec_len (s->buffer);
@@ -847,7 +904,8 @@ static void serialize_vector_write (serialize_main_header_t * m, serialize_strea
}
}
-void serialize_open_vector (serialize_main_t * m, u8 * vector)
+void
+serialize_open_vector (serialize_main_t * m, u8 * vector)
{
memset (m, 0, sizeof (m[0]));
m->header.data_function = serialize_vector_write;
@@ -855,11 +913,12 @@ void serialize_open_vector (serialize_main_t * m, u8 * vector)
m->stream.current_buffer_index = 0;
m->stream.n_buffer_bytes = vec_len (vector);
}
-
-void * serialize_close_vector (serialize_main_t * m)
+
+void *
+serialize_close_vector (serialize_main_t * m)
{
- serialize_stream_t * s = &m->stream;
- void * result;
+ serialize_stream_t *s = &m->stream;
+ void *result;
serialize_close (m); /* frees overflow buffer */
@@ -869,15 +928,13 @@ void * serialize_close_vector (serialize_main_t * m)
memset (m, 0, sizeof (m[0]));
return result;
}
-
+
void
serialize_multiple_1 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data)
+ void *data, uword data_stride, uword n_data)
{
- u8 * d = data;
- u8 * p;
+ u8 *d = data;
+ u8 *p;
uword n_left = n_data;
while (n_left >= 4)
@@ -906,21 +963,23 @@ serialize_multiple_1 (serialize_main_t * m,
void
serialize_multiple_2 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data)
+ void *data, uword data_stride, uword n_data)
{
- void * d = data;
- u16 * p;
+ void *d = data;
+ u16 *p;
uword n_left = n_data;
while (n_left >= 4)
{
p = serialize_get (m, 4 * sizeof (p[0]));
- clib_mem_unaligned (p + 0, u16) = clib_host_to_net_mem_u16 (d + 0 * data_stride);
- clib_mem_unaligned (p + 1, u16) = clib_host_to_net_mem_u16 (d + 1 * data_stride);
- clib_mem_unaligned (p + 2, u16) = clib_host_to_net_mem_u16 (d + 2 * data_stride);
- clib_mem_unaligned (p + 3, u16) = clib_host_to_net_mem_u16 (d + 3 * data_stride);
+ clib_mem_unaligned (p + 0, u16) =
+ clib_host_to_net_mem_u16 (d + 0 * data_stride);
+ clib_mem_unaligned (p + 1, u16) =
+ clib_host_to_net_mem_u16 (d + 1 * data_stride);
+ clib_mem_unaligned (p + 2, u16) =
+ clib_host_to_net_mem_u16 (d + 2 * data_stride);
+ clib_mem_unaligned (p + 3, u16) =
+ clib_host_to_net_mem_u16 (d + 3 * data_stride);
n_left -= 4;
d += 4 * data_stride;
}
@@ -930,7 +989,8 @@ serialize_multiple_2 (serialize_main_t * m,
p = serialize_get (m, n_left * sizeof (p[0]));
while (n_left > 0)
{
- clib_mem_unaligned (p + 0, u16) = clib_host_to_net_mem_u16 (d + 0 * data_stride);
+ clib_mem_unaligned (p + 0, u16) =
+ clib_host_to_net_mem_u16 (d + 0 * data_stride);
p += 1;
d += 1 * data_stride;
n_left -= 1;
@@ -940,21 +1000,23 @@ serialize_multiple_2 (serialize_main_t * m,
void
serialize_multiple_4 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data)
+ void *data, uword data_stride, uword n_data)
{
- void * d = data;
- u32 * p;
+ void *d = data;
+ u32 *p;
uword n_left = n_data;
while (n_left >= 4)
{
p = serialize_get (m, 4 * sizeof (p[0]));
- clib_mem_unaligned (p + 0, u32) = clib_host_to_net_mem_u32 (d + 0 * data_stride);
- clib_mem_unaligned (p + 1, u32) = clib_host_to_net_mem_u32 (d + 1 * data_stride);
- clib_mem_unaligned (p + 2, u32) = clib_host_to_net_mem_u32 (d + 2 * data_stride);
- clib_mem_unaligned (p + 3, u32) = clib_host_to_net_mem_u32 (d + 3 * data_stride);
+ clib_mem_unaligned (p + 0, u32) =
+ clib_host_to_net_mem_u32 (d + 0 * data_stride);
+ clib_mem_unaligned (p + 1, u32) =
+ clib_host_to_net_mem_u32 (d + 1 * data_stride);
+ clib_mem_unaligned (p + 2, u32) =
+ clib_host_to_net_mem_u32 (d + 2 * data_stride);
+ clib_mem_unaligned (p + 3, u32) =
+ clib_host_to_net_mem_u32 (d + 3 * data_stride);
n_left -= 4;
d += 4 * data_stride;
}
@@ -964,7 +1026,8 @@ serialize_multiple_4 (serialize_main_t * m,
p = serialize_get (m, n_left * sizeof (p[0]));
while (n_left > 0)
{
- clib_mem_unaligned (p + 0, u32) = clib_host_to_net_mem_u32 (d + 0 * data_stride);
+ clib_mem_unaligned (p + 0, u32) =
+ clib_host_to_net_mem_u32 (d + 0 * data_stride);
p += 1;
d += 1 * data_stride;
n_left -= 1;
@@ -974,12 +1037,10 @@ serialize_multiple_4 (serialize_main_t * m,
void
unserialize_multiple_1 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data)
+ void *data, uword data_stride, uword n_data)
{
- u8 * d = data;
- u8 * p;
+ u8 *d = data;
+ u8 *p;
uword n_left = n_data;
while (n_left >= 4)
@@ -1008,21 +1069,23 @@ unserialize_multiple_1 (serialize_main_t * m,
void
unserialize_multiple_2 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data)
+ void *data, uword data_stride, uword n_data)
{
- void * d = data;
- u16 * p;
+ void *d = data;
+ u16 *p;
uword n_left = n_data;
while (n_left >= 4)
{
p = unserialize_get (m, 4 * sizeof (p[0]));
- clib_mem_unaligned (d + 0 * data_stride, u16) = clib_net_to_host_mem_u16 (p + 0);
- clib_mem_unaligned (d + 1 * data_stride, u16) = clib_net_to_host_mem_u16 (p + 1);
- clib_mem_unaligned (d + 2 * data_stride, u16) = clib_net_to_host_mem_u16 (p + 2);
- clib_mem_unaligned (d + 3 * data_stride, u16) = clib_net_to_host_mem_u16 (p + 3);
+ clib_mem_unaligned (d + 0 * data_stride, u16) =
+ clib_net_to_host_mem_u16 (p + 0);
+ clib_mem_unaligned (d + 1 * data_stride, u16) =
+ clib_net_to_host_mem_u16 (p + 1);
+ clib_mem_unaligned (d + 2 * data_stride, u16) =
+ clib_net_to_host_mem_u16 (p + 2);
+ clib_mem_unaligned (d + 3 * data_stride, u16) =
+ clib_net_to_host_mem_u16 (p + 3);
n_left -= 4;
d += 4 * data_stride;
}
@@ -1032,7 +1095,8 @@ unserialize_multiple_2 (serialize_main_t * m,
p = unserialize_get (m, n_left * sizeof (p[0]));
while (n_left > 0)
{
- clib_mem_unaligned (d + 0 * data_stride, u16) = clib_net_to_host_mem_u16 (p + 0);
+ clib_mem_unaligned (d + 0 * data_stride, u16) =
+ clib_net_to_host_mem_u16 (p + 0);
p += 1;
d += 1 * data_stride;
n_left -= 1;
@@ -1042,21 +1106,23 @@ unserialize_multiple_2 (serialize_main_t * m,
void
unserialize_multiple_4 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data)
+ void *data, uword data_stride, uword n_data)
{
- void * d = data;
- u32 * p;
+ void *d = data;
+ u32 *p;
uword n_left = n_data;
while (n_left >= 4)
{
p = unserialize_get (m, 4 * sizeof (p[0]));
- clib_mem_unaligned (d + 0 * data_stride, u32) = clib_net_to_host_mem_u32 (p + 0);
- clib_mem_unaligned (d + 1 * data_stride, u32) = clib_net_to_host_mem_u32 (p + 1);
- clib_mem_unaligned (d + 2 * data_stride, u32) = clib_net_to_host_mem_u32 (p + 2);
- clib_mem_unaligned (d + 3 * data_stride, u32) = clib_net_to_host_mem_u32 (p + 3);
+ clib_mem_unaligned (d + 0 * data_stride, u32) =
+ clib_net_to_host_mem_u32 (p + 0);
+ clib_mem_unaligned (d + 1 * data_stride, u32) =
+ clib_net_to_host_mem_u32 (p + 1);
+ clib_mem_unaligned (d + 2 * data_stride, u32) =
+ clib_net_to_host_mem_u32 (p + 2);
+ clib_mem_unaligned (d + 3 * data_stride, u32) =
+ clib_net_to_host_mem_u32 (p + 3);
n_left -= 4;
d += 4 * data_stride;
}
@@ -1066,7 +1132,8 @@ unserialize_multiple_4 (serialize_main_t * m,
p = unserialize_get (m, n_left * sizeof (p[0]));
while (n_left > 0)
{
- clib_mem_unaligned (d + 0 * data_stride, u32) = clib_net_to_host_mem_u32 (p + 0);
+ clib_mem_unaligned (d + 0 * data_stride, u32) =
+ clib_net_to_host_mem_u32 (p + 0);
p += 1;
d += 1 * data_stride;
n_left -= 1;
@@ -1079,7 +1146,8 @@ unserialize_multiple_4 (serialize_main_t * m,
#include <unistd.h>
#include <fcntl.h>
-static void unix_file_write (serialize_main_header_t * m, serialize_stream_t * s)
+static void
+unix_file_write (serialize_main_header_t * m, serialize_stream_t * s)
{
int fd, n;
@@ -1087,7 +1155,7 @@ static void unix_file_write (serialize_main_header_t * m, serialize_stream_t * s
n = write (fd, s->buffer, s->current_buffer_index);
if (n < 0)
{
- if (! unix_error_is_fatal (errno))
+ if (!unix_error_is_fatal (errno))
n = 0;
else
serialize_error (m, clib_error_return_unix (0, "write"));
@@ -1099,7 +1167,8 @@ static void unix_file_write (serialize_main_header_t * m, serialize_stream_t * s
s->current_buffer_index = vec_len (s->buffer);
}
-static void unix_file_read (serialize_main_header_t * m, serialize_stream_t * s)
+static void
+unix_file_read (serialize_main_header_t * m, serialize_stream_t * s)
{
int fd, n;
@@ -1107,7 +1176,7 @@ static void unix_file_read (serialize_main_header_t * m, serialize_stream_t * s)
n = read (fd, s->buffer, vec_len (s->buffer));
if (n < 0)
{
- if (! unix_error_is_fatal (errno))
+ if (!unix_error_is_fatal (errno))
n = 0;
else
serialize_error (m, clib_error_return_unix (0, "read"));
@@ -1119,12 +1188,13 @@ static void unix_file_read (serialize_main_header_t * m, serialize_stream_t * s)
}
static void
-serialize_open_unix_file_descriptor_helper (serialize_main_t * m, int fd, uword is_read)
+serialize_open_unix_file_descriptor_helper (serialize_main_t * m, int fd,
+ uword is_read)
{
memset (m, 0, sizeof (m[0]));
vec_resize (m->stream.buffer, 4096);
-
- if (! is_read)
+
+ if (!is_read)
{
m->stream.n_buffer_bytes = vec_len (m->stream.buffer);
_vec_len (m->stream.buffer) = 0;
@@ -1134,14 +1204,21 @@ serialize_open_unix_file_descriptor_helper (serialize_main_t * m, int fd, uword
m->stream.data_function_opaque = fd;
}
-void serialize_open_unix_file_descriptor (serialize_main_t * m, int fd)
-{ serialize_open_unix_file_descriptor_helper (m, fd, /* is_read */ 0); }
+void
+serialize_open_unix_file_descriptor (serialize_main_t * m, int fd)
+{
+ serialize_open_unix_file_descriptor_helper (m, fd, /* is_read */ 0);
+}
-void unserialize_open_unix_file_descriptor (serialize_main_t * m, int fd)
-{ serialize_open_unix_file_descriptor_helper (m, fd, /* is_read */ 1); }
+void
+unserialize_open_unix_file_descriptor (serialize_main_t * m, int fd)
+{
+ serialize_open_unix_file_descriptor_helper (m, fd, /* is_read */ 1);
+}
static clib_error_t *
-serialize_open_unix_file_helper (serialize_main_t * m, char * file, uword is_read)
+serialize_open_unix_file_helper (serialize_main_t * m, char *file,
+ uword is_read)
{
int fd, mode;
@@ -1155,11 +1232,23 @@ serialize_open_unix_file_helper (serialize_main_t * m, char * file, uword is_rea
}
clib_error_t *
-serialize_open_unix_file (serialize_main_t * m, char * file)
-{ return serialize_open_unix_file_helper (m, file, /* is_read */ 0); }
+serialize_open_unix_file (serialize_main_t * m, char *file)
+{
+ return serialize_open_unix_file_helper (m, file, /* is_read */ 0);
+}
clib_error_t *
-unserialize_open_unix_file (serialize_main_t * m, char * file)
-{ return serialize_open_unix_file_helper (m, file, /* is_read */ 1); }
+unserialize_open_unix_file (serialize_main_t * m, char *file)
+{
+ return serialize_open_unix_file_helper (m, file, /* is_read */ 1);
+}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/serialize.h b/vppinfra/vppinfra/serialize.h
index f049f792f62..6cc2372e6ab 100644
--- a/vppinfra/vppinfra/serialize.h
+++ b/vppinfra/vppinfra/serialize.h
@@ -50,9 +50,10 @@ struct serialize_stream_t;
typedef void (serialize_data_function_t) (struct serialize_main_header_t * h,
struct serialize_stream_t * s);
-typedef struct serialize_stream_t {
+typedef struct serialize_stream_t
+{
/* Current data buffer being serialized/unserialized. */
- u8 * buffer;
+ u8 *buffer;
/* Size of buffer in bytes. */
u32 n_buffer_bytes;
@@ -62,7 +63,7 @@ typedef struct serialize_stream_t {
/* Overflow buffer for when there is not enough room at the end of
buffer to hold serialized/unserialized data. */
- u8 * overflow_buffer;
+ u8 *overflow_buffer;
/* Current index in overflow buffer for reads. */
u32 current_overflow_index;
@@ -72,25 +73,31 @@ typedef struct serialize_stream_t {
uword data_function_opaque;
- u32 opaque[64 - 4 * sizeof (u32) - 1 * sizeof (uword) - 2 * sizeof (void *)];
+ u32 opaque[64 - 4 * sizeof (u32) - 1 * sizeof (uword) -
+ 2 * sizeof (void *)];
} serialize_stream_t;
always_inline void
serialize_stream_set_end_of_stream (serialize_stream_t * s)
-{ s->flags |= SERIALIZE_END_OF_STREAM; }
+{
+ s->flags |= SERIALIZE_END_OF_STREAM;
+}
always_inline uword
serialize_stream_is_end_of_stream (serialize_stream_t * s)
-{ return (s->flags & SERIALIZE_END_OF_STREAM) != 0; }
+{
+ return (s->flags & SERIALIZE_END_OF_STREAM) != 0;
+}
-typedef struct serialize_main_header_t {
+typedef struct serialize_main_header_t
+{
u32 recursion_level;
/* Data callback function and opaque data. */
- serialize_data_function_t * data_function;
+ serialize_data_function_t *data_function;
/* Error if signaled by data function. */
- clib_error_t * error;
+ clib_error_t *error;
/* Exit unwind point if error occurs. */
clib_longjmp_t error_longjmp;
@@ -98,14 +105,16 @@ typedef struct serialize_main_header_t {
always_inline void
serialize_error (serialize_main_header_t * m, clib_error_t * error)
-{ clib_longjmp (&m->error_longjmp, pointer_to_uword (error)); }
+{
+ clib_longjmp (&m->error_longjmp, pointer_to_uword (error));
+}
#define serialize_error_return(m,args...) \
serialize_error (&(m)->header, clib_error_return (0, args))
-void * serialize_read_write_not_inline (serialize_main_header_t * m,
- serialize_stream_t * s,
- uword n_bytes, uword flags);
+void *serialize_read_write_not_inline (serialize_main_header_t * m,
+ serialize_stream_t * s,
+ uword n_bytes, uword flags);
#define SERIALIZE_FLAG_IS_READ (1 << 0)
#define SERIALIZE_FLAG_IS_WRITE (1 << 1)
@@ -113,8 +122,7 @@ void * serialize_read_write_not_inline (serialize_main_header_t * m,
always_inline void *
serialize_stream_read_write (serialize_main_header_t * header,
serialize_stream_t * s,
- uword n_bytes,
- uword flags)
+ uword n_bytes, uword flags)
{
uword i, j, l;
@@ -133,38 +141,50 @@ serialize_stream_read_write (serialize_main_header_t * header,
}
}
-typedef struct {
+typedef struct
+{
serialize_main_header_t header;
serialize_stream_t stream;
} serialize_main_t;
always_inline void
serialize_set_end_of_stream (serialize_main_t * m)
-{ serialize_stream_set_end_of_stream (&m->stream); }
+{
+ serialize_stream_set_end_of_stream (&m->stream);
+}
always_inline uword
serialize_is_end_of_stream (serialize_main_t * m)
-{ return serialize_stream_is_end_of_stream (&m->stream); }
+{
+ return serialize_stream_is_end_of_stream (&m->stream);
+}
-typedef struct {
+typedef struct
+{
serialize_main_header_t header;
- serialize_stream_t * streams;
+ serialize_stream_t *streams;
} serialize_multiple_main_t;
typedef void (serialize_function_t) (serialize_main_t * m, va_list * va);
always_inline void *
unserialize_get (serialize_main_t * m, uword n_bytes)
-{ return serialize_stream_read_write (&m->header, &m->stream, n_bytes, SERIALIZE_FLAG_IS_READ); }
+{
+ return serialize_stream_read_write (&m->header, &m->stream, n_bytes,
+ SERIALIZE_FLAG_IS_READ);
+}
always_inline void *
serialize_get (serialize_main_t * m, uword n_bytes)
-{ return serialize_stream_read_write (&m->header, &m->stream, n_bytes, SERIALIZE_FLAG_IS_WRITE); }
+{
+ return serialize_stream_read_write (&m->header, &m->stream, n_bytes,
+ SERIALIZE_FLAG_IS_WRITE);
+}
always_inline void
serialize_integer (serialize_main_t * m, u64 x, u32 n_bytes)
{
- u8 * p = serialize_get (m, n_bytes);
+ u8 *p = serialize_get (m, n_bytes);
if (n_bytes == 1)
p[0] = x;
else if (n_bytes == 2)
@@ -178,9 +198,9 @@ serialize_integer (serialize_main_t * m, u64 x, u32 n_bytes)
}
always_inline void
-unserialize_integer (serialize_main_t * m, void * x, u32 n_bytes)
+unserialize_integer (serialize_main_t * m, void *x, u32 n_bytes)
{
- u8 * p = unserialize_get (m, n_bytes);
+ u8 *p = unserialize_get (m, n_bytes);
if (n_bytes == 1)
*(u8 *) x = p[0];
else if (n_bytes == 2)
@@ -198,13 +218,13 @@ always_inline void
serialize_likely_small_unsigned_integer (serialize_main_t * m, u64 x)
{
u64 r = x;
- u8 * p;
+ u8 *p;
/* Low bit set means it fits into 1 byte. */
if (r < (1 << 7))
{
p = serialize_get (m, 1);
- p[0] = 1 + 2*r;
+ p[0] = 1 + 2 * r;
return;
}
@@ -233,7 +253,7 @@ serialize_likely_small_unsigned_integer (serialize_main_t * m, u64 x)
always_inline u64
unserialize_likely_small_unsigned_integer (serialize_main_t * m)
{
- u8 * p = unserialize_get (m, 1);
+ u8 *p = unserialize_get (m, 1);
u64 r;
u32 y = p[0];
@@ -253,9 +273,8 @@ unserialize_likely_small_unsigned_integer (serialize_main_t * m)
{
p = unserialize_get (m, 3);
r += ((y / 8)
- + (p[0] << (5 + 8*0))
- + (p[1] << (5 + 8*1))
- + (p[2] << (5 + 8*2)));
+ + (p[0] << (5 + 8 * 0))
+ + (p[1] << (5 + 8 * 1)) + (p[2] << (5 + 8 * 2)));
return r;
}
@@ -269,7 +288,7 @@ unserialize_likely_small_unsigned_integer (serialize_main_t * m)
always_inline void
serialize_likely_small_signed_integer (serialize_main_t * m, i64 s)
{
- u64 u = s < 0 ? -(2*s + 1) : 2*s;
+ u64 u = s < 0 ? -(2 * s + 1) : 2 * s;
serialize_likely_small_unsigned_integer (m, u);
}
@@ -283,42 +302,28 @@ unserialize_likely_small_signed_integer (serialize_main_t * m)
void
serialize_multiple_1 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data);
+ void *data, uword data_stride, uword n_data);
void
serialize_multiple_2 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data);
+ void *data, uword data_stride, uword n_data);
void
serialize_multiple_4 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data);
+ void *data, uword data_stride, uword n_data);
void
unserialize_multiple_1 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data);
+ void *data, uword data_stride, uword n_data);
void
unserialize_multiple_2 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data);
+ void *data, uword data_stride, uword n_data);
void
unserialize_multiple_4 (serialize_main_t * m,
- void * data,
- uword data_stride,
- uword n_data);
+ void *data, uword data_stride, uword n_data);
always_inline void
serialize_multiple (serialize_main_t * m,
- void * data,
- uword n_data_bytes,
- uword data_stride,
- uword n_data)
+ void *data,
+ uword n_data_bytes, uword data_stride, uword n_data)
{
if (n_data_bytes == 1)
serialize_multiple_1 (m, data, data_stride, n_data);
@@ -332,10 +337,8 @@ serialize_multiple (serialize_main_t * m,
always_inline void
unserialize_multiple (serialize_main_t * m,
- void * data,
- uword n_data_bytes,
- uword data_stride,
- uword n_data)
+ void *data,
+ uword n_data_bytes, uword data_stride, uword n_data)
{
if (n_data_bytes == 1)
unserialize_multiple_1 (m, data, data_stride, n_data);
@@ -362,7 +365,8 @@ serialize_function_t serialize_vec_32, unserialize_vec_32;
serialize_function_t serialize_vec_64, unserialize_vec_64;
/* Serialize generic vectors. */
-serialize_function_t serialize_vector, unserialize_vector, unserialize_aligned_vector;
+serialize_function_t serialize_vector, unserialize_vector,
+ unserialize_aligned_vector;
#define vec_serialize(m,v,f) \
serialize ((m), serialize_vector, (v), sizeof ((v)[0]), (f))
@@ -374,7 +378,8 @@ serialize_function_t serialize_vector, unserialize_vector, unserialize_aligned_v
unserialize ((m), unserialize_aligned_vector, (v), sizeof ((*(v))[0]), (f))
/* Serialize pools. */
-serialize_function_t serialize_pool, unserialize_pool, unserialize_aligned_pool;
+serialize_function_t serialize_pool, unserialize_pool,
+ unserialize_aligned_pool;
#define pool_serialize(m,v,f) \
serialize ((m), serialize_pool, (v), sizeof ((v)[0]), (f))
@@ -389,39 +394,50 @@ serialize_function_t serialize_pool, unserialize_pool, unserialize_aligned_pool;
serialize_function_t serialize_heap, unserialize_heap;
void serialize_bitmap (serialize_main_t * m, uword * b);
-uword * unserialize_bitmap (serialize_main_t * m);
+uword *unserialize_bitmap (serialize_main_t * m);
-void serialize_cstring (serialize_main_t * m, char * string);
-void unserialize_cstring (serialize_main_t * m, char ** string);
+void serialize_cstring (serialize_main_t * m, char *string);
+void unserialize_cstring (serialize_main_t * m, char **string);
void serialize_close (serialize_main_t * m);
void unserialize_close (serialize_main_t * m);
-void serialize_open_data (serialize_main_t * m, u8 * data, uword n_data_bytes);
-void unserialize_open_data (serialize_main_t * m, u8 * data, uword n_data_bytes);
+void serialize_open_data (serialize_main_t * m, u8 * data,
+ uword n_data_bytes);
+void unserialize_open_data (serialize_main_t * m, u8 * data,
+ uword n_data_bytes);
/* Starts serialization with expanding vector as buffer. */
void serialize_open_vector (serialize_main_t * m, u8 * vector);
/* Serialization is done: returns vector buffer to caller. */
-void * serialize_close_vector (serialize_main_t * m);
+void *serialize_close_vector (serialize_main_t * m);
void unserialize_open_vector (serialize_main_t * m, u8 * vector);
#ifdef CLIB_UNIX
-clib_error_t * serialize_open_unix_file (serialize_main_t * m, char * file);
-clib_error_t * unserialize_open_unix_file (serialize_main_t * m, char * file);
+clib_error_t *serialize_open_unix_file (serialize_main_t * m, char *file);
+clib_error_t *unserialize_open_unix_file (serialize_main_t * m, char *file);
void serialize_open_unix_file_descriptor (serialize_main_t * m, int fd);
void unserialize_open_unix_file_descriptor (serialize_main_t * m, int fd);
#endif /* CLIB_UNIX */
/* Main routines. */
-clib_error_t * serialize (serialize_main_t * m, ...);
-clib_error_t * unserialize (serialize_main_t * m, ...);
-clib_error_t * va_serialize (serialize_main_t * m, va_list * va);
+clib_error_t *serialize (serialize_main_t * m, ...);
+clib_error_t *unserialize (serialize_main_t * m, ...);
+clib_error_t *va_serialize (serialize_main_t * m, va_list * va);
-void serialize_magic (serialize_main_t * m, void * magic, u32 magic_bytes);
-void unserialize_check_magic (serialize_main_t * m, void * magic, u32 magic_bytes);
+void serialize_magic (serialize_main_t * m, void *magic, u32 magic_bytes);
+void unserialize_check_magic (serialize_main_t * m, void *magic,
+ u32 magic_bytes);
#endif /* included_clib_serialize_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/slist.c b/vppinfra/vppinfra/slist.c
index 435a026a462..892517bbb79 100644
--- a/vppinfra/vppinfra/slist.c
+++ b/vppinfra/vppinfra/slist.c
@@ -27,8 +27,8 @@
* is always on the "level-0" list. Since most elements are *only* on
* level 0, we keep the level 0 (and level 1) in the element. For those
* elements on more than two lists, we switch to a vector. Hence, the
- * "n" union in slib_slist_elt_t.
- *
+ * "n" union in slib_slist_elt_t.
+ *
* The low-order bit of elt->n.next0[0] is 1 for inlined next indices,
* 0 for vector indices (since the allocator always aligns to at least
* a 4-byte boundary). We can only represent 2e9 items, but since the
@@ -41,7 +41,7 @@
* User code is in charge of comparing a supplied key with
* the key component of a user pool element. The user tells this code
* to add or delete (opaque key, 32-bit integer) pairs to the skip-list.
- *
+ *
* The algorithm adds new elements to one or more lists.
* For levels greater than zero, the probability of a new element landing on
* a list is branching_factor**N. Branching_factor = 0.2 seems to work
@@ -49,9 +49,9 @@
*/
clib_error_t *
-clib_slist_init (clib_slist_t *sp, f64 branching_factor,
- clib_slist_key_compare_function_t compare,
- format_function_t format_user_element)
+clib_slist_init (clib_slist_t * sp, f64 branching_factor,
+ clib_slist_key_compare_function_t compare,
+ format_function_t format_user_element)
{
clib_slist_elt_t *head;
memset (sp, 0, sizeof (sp[0]));
@@ -60,8 +60,8 @@ clib_slist_init (clib_slist_t *sp, f64 branching_factor,
sp->compare = compare;
sp->seed = 0xdeaddabe;
pool_get (sp->elts, head);
- vec_add1 (head->n.nexts, (u32)~0);
- head->user_pool_index = (u32)~0;
+ vec_add1 (head->n.nexts, (u32) ~ 0);
+ head->user_pool_index = (u32) ~ 0;
vec_validate (sp->path, 1);
vec_validate (sp->occupancy, 0);
@@ -72,23 +72,23 @@ clib_slist_init (clib_slist_t *sp, f64 branching_factor,
* slist_search_internal
*/
static inline clib_slist_search_result_t
-slist_search_internal (clib_slist_t *sp, void *key, int need_full_path)
+slist_search_internal (clib_slist_t * sp, void *key, int need_full_path)
{
int level, comp_result;
clib_slist_elt_t *search_elt, *head_elt;
sp->ncompares = 0;
- /*
- * index 0 is the magic listhead element which is
+ /*
+ * index 0 is the magic listhead element which is
* lexically lighter than / to the left of every element
*/
- search_elt = head_elt = pool_elt_at_index (sp->elts, 0);
+ search_elt = head_elt = pool_elt_at_index (sp->elts, 0);
- /*
+ /*
* Initial negotiating position, only the head_elt is
* lighter than the supplied key
*/
- memset (sp->path, 0, vec_len(head_elt->n.nexts) * sizeof (u32));
+ memset (sp->path, 0, vec_len (head_elt->n.nexts) * sizeof (u32));
/* Walk the fastest lane first */
level = vec_len (head_elt->n.nexts) - 1;
@@ -99,226 +99,238 @@ slist_search_internal (clib_slist_t *sp, void *key, int need_full_path)
u32 next_index_this_level;
clib_slist_elt_t *prefetch_elt;
- /*
+ /*
* Prefetching the next element at this level makes a measurable
* difference, but doesn't fix the dependent read stall problem
*/
- prefetch_elt = sp->elts +
- clib_slist_get_next_at_level (search_elt, level);
+ prefetch_elt = sp->elts +
+ clib_slist_get_next_at_level (search_elt, level);
- CLIB_PREFETCH(prefetch_elt, CLIB_CACHE_LINE_BYTES, READ);
+ CLIB_PREFETCH (prefetch_elt, CLIB_CACHE_LINE_BYTES, READ);
/* Compare the key with the current element */
comp_result = (search_elt == head_elt) ? 1 :
- sp->compare (key, search_elt->user_pool_index);
+ sp->compare (key, search_elt->user_pool_index);
sp->ncompares++;
/* key "lighter" than this element */
- if (comp_result < 0)
- {
- /*
- * Back up to previous item on this list
- * and search the next finer-grained list
- * starting there.
- */
- search_elt = pool_elt_at_index (sp->elts, sp->path [level]);
- next_list:
- if (level > 0)
- {
- level--;
- continue;
- }
- else
- {
- return CLIB_SLIST_NO_MATCH;
- }
- }
+ if (comp_result < 0)
+ {
+ /*
+ * Back up to previous item on this list
+ * and search the next finer-grained list
+ * starting there.
+ */
+ search_elt = pool_elt_at_index (sp->elts, sp->path[level]);
+ next_list:
+ if (level > 0)
+ {
+ level--;
+ continue;
+ }
+ else
+ {
+ return CLIB_SLIST_NO_MATCH;
+ }
+ }
/* Match */
- if (comp_result == 0)
- {
- /*
- * If we're trying to delete an element, we need to
- * track down all of the elements which point at it.
- * Otherwise, don't bother with it
- */
- if (need_full_path && level > 0)
- {
- search_elt = pool_elt_at_index (sp->elts, sp->path [level]);
- level--;
- continue;
- }
- level = vec_len(head_elt->n.nexts);
- sp->path[level] = search_elt - sp->elts;
- _vec_len (sp->path) = level + 1;
- return CLIB_SLIST_MATCH;
- }
- /*
+ if (comp_result == 0)
+ {
+ /*
+ * If we're trying to delete an element, we need to
+ * track down all of the elements which point at it.
+ * Otherwise, don't bother with it
+ */
+ if (need_full_path && level > 0)
+ {
+ search_elt = pool_elt_at_index (sp->elts, sp->path[level]);
+ level--;
+ continue;
+ }
+ level = vec_len (head_elt->n.nexts);
+ sp->path[level] = search_elt - sp->elts;
+ _vec_len (sp->path) = level + 1;
+ return CLIB_SLIST_MATCH;
+ }
+ /*
* comp_result positive, key is to the right of
* this element
- */
+ */
sp->path[level] = search_elt - sp->elts;
/* Out of list at this level? */
- next_index_this_level = clib_slist_get_next_at_level (search_elt, level);
- if (next_index_this_level == (u32)~0)
- goto next_list;
+ next_index_this_level =
+ clib_slist_get_next_at_level (search_elt, level);
+ if (next_index_this_level == (u32) ~ 0)
+ goto next_list;
/* No, try the next element */
search_elt = pool_elt_at_index (sp->elts, next_index_this_level);
}
- return 0; /* notreached */
+ return 0; /* notreached */
}
-u32 clib_slist_search (clib_slist_t *sp, void *key, u32 *ncompares)
+u32
+clib_slist_search (clib_slist_t * sp, void *key, u32 * ncompares)
{
clib_slist_search_result_t rv;
- rv = slist_search_internal (sp, key, 0 /* dont need full path */);
+ rv = slist_search_internal (sp, key, 0 /* dont need full path */ );
if (rv == CLIB_SLIST_MATCH)
{
clib_slist_elt_t *elt;
- elt = pool_elt_at_index (sp->elts, sp->path[vec_len(sp->path)-1]);
+ elt = pool_elt_at_index (sp->elts, sp->path[vec_len (sp->path) - 1]);
if (ncompares)
- *ncompares = sp->ncompares;
+ *ncompares = sp->ncompares;
return elt->user_pool_index;
}
- return (u32)~0;
+ return (u32) ~ 0;
}
-void clib_slist_add (clib_slist_t *sp, void *key, u32 user_pool_index)
+void
+clib_slist_add (clib_slist_t * sp, void *key, u32 user_pool_index)
{
clib_slist_elt_t *new_elt;
clib_slist_search_result_t search_result;
int level;
- search_result = slist_search_internal (sp, key,
- 0 /* don't need full path */);
+ search_result = slist_search_internal (sp, key,
+ 0 /* don't need full path */ );
/* Special case: key exists, just replace user_pool_index */
- if (PREDICT_FALSE(search_result == CLIB_SLIST_MATCH))
+ if (PREDICT_FALSE (search_result == CLIB_SLIST_MATCH))
{
clib_slist_elt_t *elt;
elt = pool_elt_at_index (sp->elts, sp->path[0]);
elt->user_pool_index = user_pool_index;
return;
}
-
+
pool_get (sp->elts, new_elt);
new_elt->n.nexts = 0;
new_elt->user_pool_index = user_pool_index;
/* sp->path lists elements to the left of key, by level */
- for (level = 0; level < vec_len(sp->path); level++)
+ for (level = 0; level < vec_len (sp->path); level++)
{
clib_slist_elt_t *prev_elt_this_level;
u32 prev_elt_next_index_this_level;
/* Add to list at the current level */
prev_elt_this_level = pool_elt_at_index (sp->elts, sp->path[level]);
- prev_elt_next_index_this_level = clib_slist_get_next_at_level
- (prev_elt_this_level, level);
-
- clib_slist_set_next_at_level (new_elt, prev_elt_next_index_this_level,
- level);
+ prev_elt_next_index_this_level = clib_slist_get_next_at_level
+ (prev_elt_this_level, level);
+
+ clib_slist_set_next_at_level (new_elt, prev_elt_next_index_this_level,
+ level);
clib_slist_set_next_at_level (prev_elt_this_level, new_elt - sp->elts,
- level);
+ level);
sp->occupancy[level]++;
-
+
/* Randomly add to the next-higher level */
if (random_f64 (&sp->seed) > sp->branching_factor)
- break;
+ break;
}
- {
+ {
/* Time to add a new ply? */
- clib_slist_elt_t *head_elt = pool_elt_at_index (sp->elts, 0);
- int top_level = vec_len(head_elt->n.nexts) - 1;
- if (((f64)sp->occupancy[top_level]) * sp->branching_factor > 1.0)
- {
- vec_add1 (sp->occupancy, 0);
- vec_add1 (head_elt->n.nexts, (u32)~0);
- /* full match case returns n+1 items */
- vec_validate (sp->path, vec_len(head_elt->n.nexts));
- }
- }
+ clib_slist_elt_t *head_elt = pool_elt_at_index (sp->elts, 0);
+ int top_level = vec_len (head_elt->n.nexts) - 1;
+ if (((f64) sp->occupancy[top_level]) * sp->branching_factor > 1.0)
+ {
+ vec_add1 (sp->occupancy, 0);
+ vec_add1 (head_elt->n.nexts, (u32) ~ 0);
+ /* full match case returns n+1 items */
+ vec_validate (sp->path, vec_len (head_elt->n.nexts));
+ }
+ }
}
clib_slist_search_result_t
-clib_slist_del (clib_slist_t *sp, void *key)
+clib_slist_del (clib_slist_t * sp, void *key)
{
clib_slist_search_result_t search_result;
clib_slist_elt_t *del_elt;
int level;
-
- search_result = slist_search_internal (sp, key, 1 /* need full path */);
- if (PREDICT_FALSE(search_result == CLIB_SLIST_NO_MATCH))
+ search_result = slist_search_internal (sp, key, 1 /* need full path */ );
+
+ if (PREDICT_FALSE (search_result == CLIB_SLIST_NO_MATCH))
return search_result;
- del_elt = pool_elt_at_index (sp->elts, sp->path[vec_len(sp->path)-1]);
- ASSERT(vec_len(sp->path) > 1);
-
- for (level = 0; level < vec_len (sp->path)-1; level++)
+ del_elt = pool_elt_at_index (sp->elts, sp->path[vec_len (sp->path) - 1]);
+ ASSERT (vec_len (sp->path) > 1);
+
+ for (level = 0; level < vec_len (sp->path) - 1; level++)
{
clib_slist_elt_t *path_elt;
u32 path_elt_next_index;
-
+
path_elt = pool_elt_at_index (sp->elts, sp->path[level]);
path_elt_next_index = clib_slist_get_next_at_level (path_elt, level);
-
+
/* Splice the item out of the list if it's adjacent to the victim */
if (path_elt_next_index == del_elt - sp->elts)
- {
- sp->occupancy[level]--;
- path_elt_next_index = clib_slist_get_next_at_level (del_elt, level);
- clib_slist_set_next_at_level (path_elt, path_elt_next_index, level);
- }
+ {
+ sp->occupancy[level]--;
+ path_elt_next_index = clib_slist_get_next_at_level (del_elt, level);
+ clib_slist_set_next_at_level (path_elt, path_elt_next_index, level);
+ }
}
/* If this element is on more than two lists it has a vector of nexts */
- if (! (del_elt->n.next0[0] & 1))
+ if (!(del_elt->n.next0[0] & 1))
vec_free (del_elt->n.nexts);
pool_put (sp->elts, del_elt);
return CLIB_SLIST_MATCH;
}
-u8 * format_slist (u8 * s, va_list *args)
+u8 *
+format_slist (u8 * s, va_list * args)
{
clib_slist_t *sl = va_arg (*args, clib_slist_t *);
int verbose = va_arg (*args, int);
int i;
clib_slist_elt_t *head_elt, *elt;
- s = format (s, "slist 0x%x, %u items, branching_factor %.2f\n", sl,
- sl->occupancy ? sl->occupancy[0] : 0, sl->branching_factor);
-
- if (pool_elts(sl->elts) == 0)
+ s = format (s, "slist 0x%x, %u items, branching_factor %.2f\n", sl,
+ sl->occupancy ? sl->occupancy[0] : 0, sl->branching_factor);
+
+ if (pool_elts (sl->elts) == 0)
return s;
-
+
head_elt = pool_elt_at_index (sl->elts, 0);
-
+
for (i = 0; i < vec_len (head_elt->n.nexts); i++)
{
- s = format (s, "level %d: %d elts\n", i,
- sl->occupancy ? sl->occupancy[i] : 0);
-
- if (verbose && head_elt->n.nexts[i] != (u32)~0)
- {
- elt = pool_elt_at_index (sl->elts, head_elt->n.nexts[i]);
- while (elt)
- {
- u32 next_index;
- s = format (s, "%U(%d) ", sl->format_user_element,
- elt->user_pool_index, elt - sl->elts);
- next_index = clib_slist_get_next_at_level (elt, i);
- ASSERT(next_index != 0x7fffffff);
- if (next_index == (u32)~0)
- break;
- else
- elt = pool_elt_at_index (sl->elts, next_index);
- }
- }
+ s = format (s, "level %d: %d elts\n", i,
+ sl->occupancy ? sl->occupancy[i] : 0);
+
+ if (verbose && head_elt->n.nexts[i] != (u32) ~ 0)
+ {
+ elt = pool_elt_at_index (sl->elts, head_elt->n.nexts[i]);
+ while (elt)
+ {
+ u32 next_index;
+ s = format (s, "%U(%d) ", sl->format_user_element,
+ elt->user_pool_index, elt - sl->elts);
+ next_index = clib_slist_get_next_at_level (elt, i);
+ ASSERT (next_index != 0x7fffffff);
+ if (next_index == (u32) ~ 0)
+ break;
+ else
+ elt = pool_elt_at_index (sl->elts, next_index);
+ }
+ }
s = format (s, "\n");
}
return s;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/slist.h b/vppinfra/vppinfra/slist.h
index 1405aff0c38..a7c77e27c96 100644
--- a/vppinfra/vppinfra/slist.h
+++ b/vppinfra/vppinfra/slist.h
@@ -26,16 +26,19 @@
#include <vppinfra/cache.h>
typedef word (clib_slist_key_compare_function_t)
-(void *key, u32 elt_pool_index);
+ (void *key, u32 elt_pool_index);
-typedef enum {
+typedef enum
+{
CLIB_SLIST_MATCH = 0,
CLIB_SLIST_NO_MATCH
} clib_slist_search_result_t;
-typedef struct {
+typedef struct
+{
/* Vector of next elements. Every valid instance has at least one */
- union {
+ union
+ {
u32 next0[2];
u32 *nexts;
} n;
@@ -45,45 +48,45 @@ typedef struct {
/* $$$ pad to even divisor of cache line */
} clib_slist_elt_t;
-static inline u32 clib_slist_get_next_at_level (clib_slist_elt_t * elt,
- int level)
+static inline u32
+clib_slist_get_next_at_level (clib_slist_elt_t * elt, int level)
{
if (elt->n.next0[0] & 1)
{
ASSERT (level < 2);
if (level == 1)
- return elt->n.next0[1];
+ return elt->n.next0[1];
/* preserve ~0 (end of list) */
- return (elt->n.next0[0] == (u32)~0) ? elt->n.next0[0] :
- (elt->n.next0[0]>>1);
+ return (elt->n.next0[0] == (u32) ~ 0) ? elt->n.next0[0] :
+ (elt->n.next0[0] >> 1);
}
else
{
- ASSERT(level < vec_len (elt->n.nexts));
+ ASSERT (level < vec_len (elt->n.nexts));
return elt->n.nexts[level];
}
}
-static inline void clib_slist_set_next_at_level (clib_slist_elt_t * elt,
- u32 index, int level)
+static inline void
+clib_slist_set_next_at_level (clib_slist_elt_t * elt, u32 index, int level)
{
u32 old_level0_value[2];
/* level0 and not a vector */
if (level < 2 && (elt->n.next0[0] == 0 || elt->n.next0[0] & 1))
{
if (level == 0)
- {
- elt->n.next0[0] = (index<<1) | 1;
- return;
- }
+ {
+ elt->n.next0[0] = (index << 1) | 1;
+ return;
+ }
elt->n.next0[1] = index;
return;
}
/* have to save old level0 values? */
if (elt->n.next0[0] & 1)
{
- old_level0_value[0] = (elt->n.next0[0] == (u32)~0) ?
- elt->n.next0[0] : elt->n.next0[0]>>1;
+ old_level0_value[0] = (elt->n.next0[0] == (u32) ~ 0) ?
+ elt->n.next0[0] : elt->n.next0[0] >> 1;
old_level0_value[1] = elt->n.next0[1];
elt->n.nexts = 0;
vec_add1 (elt->n.nexts, old_level0_value[0]);
@@ -94,7 +97,8 @@ static inline void clib_slist_set_next_at_level (clib_slist_elt_t * elt,
}
-typedef struct {
+typedef struct
+{
/* pool of skip-list elements */
clib_slist_elt_t *elts;
@@ -120,16 +124,22 @@ typedef struct {
u32 seed;
} clib_slist_t;
-clib_error_t *
-clib_slist_init (clib_slist_t *sp, f64 branching_factor,
- clib_slist_key_compare_function_t compare,
- format_function_t format_user_element);
+clib_error_t *clib_slist_init (clib_slist_t * sp, f64 branching_factor,
+ clib_slist_key_compare_function_t compare,
+ format_function_t format_user_element);
format_function_t format_slist;
-void clib_slist_add (clib_slist_t *sp, void *key, u32 user_pool_index);
-clib_slist_search_result_t
-clib_slist_del (clib_slist_t *sp, void *key);
-u32 clib_slist_search (clib_slist_t *sp, void *key, u32 *ncompares);
+void clib_slist_add (clib_slist_t * sp, void *key, u32 user_pool_index);
+clib_slist_search_result_t clib_slist_del (clib_slist_t * sp, void *key);
+u32 clib_slist_search (clib_slist_t * sp, void *key, u32 * ncompares);
#endif /* included_slist_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/smp.c b/vppinfra/vppinfra/smp.c
index 68b501d4bdc..8ac19960982 100644
--- a/vppinfra/vppinfra/smp.c
+++ b/vppinfra/vppinfra/smp.c
@@ -39,15 +39,18 @@
#include <vppinfra/mheap.h>
#include <vppinfra/os.h>
-void clib_smp_free (clib_smp_main_t * m)
+void
+clib_smp_free (clib_smp_main_t * m)
{
- clib_mem_vm_free (m->vm_base, (uword) ((1 + m->n_cpus) << m->log2_n_per_cpu_vm_bytes));
+ clib_mem_vm_free (m->vm_base,
+ (uword) ((1 + m->n_cpus) << m->log2_n_per_cpu_vm_bytes));
}
-static uword allocate_per_cpu_mheap (uword cpu)
+static uword
+allocate_per_cpu_mheap (uword cpu)
{
- clib_smp_main_t * m = &clib_smp_main;
- void * heap;
+ clib_smp_main_t *m = &clib_smp_main;
+ void *heap;
uword vm_size, stack_size, mheap_flags;
ASSERT (os_get_cpu_number () == cpu);
@@ -59,8 +62,7 @@ static uword allocate_per_cpu_mheap (uword cpu)
/* Heap extends up to start of stack. */
heap = mheap_alloc_with_flags (clib_smp_vm_base_for_cpu (m, cpu),
- vm_size - stack_size,
- mheap_flags);
+ vm_size - stack_size, mheap_flags);
clib_mem_set_heap (heap);
if (cpu == 0)
@@ -79,13 +81,15 @@ static uword allocate_per_cpu_mheap (uword cpu)
return 0;
}
-void clib_smp_init (void)
+void
+clib_smp_init (void)
{
- clib_smp_main_t * m = &clib_smp_main;
+ clib_smp_main_t *m = &clib_smp_main;
uword cpu;
- m->vm_base = clib_mem_vm_alloc ((uword) (m->n_cpus + 1) << m->log2_n_per_cpu_vm_bytes);
- if (! m->vm_base)
+ m->vm_base =
+ clib_mem_vm_alloc ((uword) (m->n_cpus + 1) << m->log2_n_per_cpu_vm_bytes);
+ if (!m->vm_base)
clib_error ("error allocating virtual memory");
for (cpu = 0; cpu < m->n_cpus; cpu++)
@@ -93,9 +97,10 @@ void clib_smp_init (void)
clib_smp_stack_top_for_cpu (m, cpu));
}
-void clib_smp_lock_init (clib_smp_lock_t ** pl)
+void
+clib_smp_lock_init (clib_smp_lock_t ** pl)
{
- clib_smp_lock_t * l;
+ clib_smp_lock_t *l;
uword i, n_bytes, n_fifo_elts;
/* No locking necessary if n_cpus <= 1.
@@ -124,17 +129,18 @@ void clib_smp_lock_init (clib_smp_lock_t ** pl)
*pl = l;
}
-void clib_smp_lock_free (clib_smp_lock_t ** pl)
+void
+clib_smp_lock_free (clib_smp_lock_t ** pl)
{
if (*pl)
clib_mem_free (*pl);
*pl = 0;
}
-void clib_smp_lock_slow_path (clib_smp_lock_t * l,
- uword my_cpu,
- clib_smp_lock_header_t h0,
- clib_smp_lock_type_t type)
+void
+clib_smp_lock_slow_path (clib_smp_lock_t * l,
+ uword my_cpu,
+ clib_smp_lock_header_t h0, clib_smp_lock_type_t type)
{
clib_smp_lock_header_t h1, h2, h3;
uword is_reader = type == CLIB_SMP_LOCK_TYPE_READER;
@@ -163,7 +169,7 @@ void clib_smp_lock_slow_path (clib_smp_lock_t * l,
/* It is possible that if head and tail are both zero, CPU with lock would have unlocked lock. */
else if (type == CLIB_SMP_LOCK_TYPE_SPIN)
{
- while (! h2.writer_has_lock)
+ while (!h2.writer_has_lock)
{
ASSERT_AND_PANIC (h2.waiting_fifo.n_elts == 0);
h1 = h2;
@@ -185,7 +191,7 @@ void clib_smp_lock_slow_path (clib_smp_lock_t * l,
}
{
- clib_smp_lock_waiting_fifo_elt_t * w;
+ clib_smp_lock_waiting_fifo_elt_t *w;
w = l->waiting_fifo + my_tail;
@@ -193,8 +199,7 @@ void clib_smp_lock_slow_path (clib_smp_lock_t * l,
clib_smp_pause ();
w->wait_type = (is_reader
- ? CLIB_SMP_LOCK_WAIT_READER
- : CLIB_SMP_LOCK_WAIT_WRITER);
+ ? CLIB_SMP_LOCK_WAIT_READER : CLIB_SMP_LOCK_WAIT_WRITER);
/* Wait until CPU holding the lock grants us the lock. */
while (w->wait_type != CLIB_SMP_LOCK_WAIT_DONE)
@@ -204,18 +209,19 @@ void clib_smp_lock_slow_path (clib_smp_lock_t * l,
}
}
-void clib_smp_unlock_slow_path (clib_smp_lock_t * l,
- uword my_cpu,
- clib_smp_lock_header_t h0,
- clib_smp_lock_type_t type)
+void
+clib_smp_unlock_slow_path (clib_smp_lock_t * l,
+ uword my_cpu,
+ clib_smp_lock_header_t h0,
+ clib_smp_lock_type_t type)
{
clib_smp_lock_header_t h1, h2;
- clib_smp_lock_waiting_fifo_elt_t * head;
+ clib_smp_lock_waiting_fifo_elt_t *head;
clib_smp_lock_wait_type_t head_wait_type;
uword is_reader = type == CLIB_SMP_LOCK_TYPE_READER;
uword n_fifo_elts = l->n_waiting_fifo_elts;
uword head_index, must_wait_for_readers;
-
+
while (1)
{
/* Advance waiting fifo giving lock to first waiter. */
@@ -238,7 +244,8 @@ void clib_smp_unlock_slow_path (clib_smp_lock_t * l,
ASSERT_AND_PANIC (h1.writer_has_lock);
}
- while ((head_wait_type = head->wait_type) == CLIB_SMP_LOCK_WAIT_EMPTY)
+ while ((head_wait_type =
+ head->wait_type) == CLIB_SMP_LOCK_WAIT_EMPTY)
clib_smp_pause ();
/* Don't advance FIFO to writer unless all readers have unlocked. */
@@ -247,7 +254,7 @@ void clib_smp_unlock_slow_path (clib_smp_lock_t * l,
&& head_wait_type == CLIB_SMP_LOCK_WAIT_WRITER
&& h1.n_readers_with_lock != 0);
- if (! must_wait_for_readers)
+ if (!must_wait_for_readers)
{
head_index += 1;
h1.waiting_fifo.n_elts -= 1;
@@ -263,11 +270,14 @@ void clib_smp_unlock_slow_path (clib_smp_lock_t * l,
}
}
- h1.waiting_fifo.head_index = head_index == n_fifo_elts ? 0 : head_index;
+ h1.waiting_fifo.head_index =
+ head_index == n_fifo_elts ? 0 : head_index;
h1.request_cpu = my_cpu;
- ASSERT_AND_PANIC (h1.waiting_fifo.head_index >= 0 && h1.waiting_fifo.head_index < n_fifo_elts);
- ASSERT_AND_PANIC (h1.waiting_fifo.n_elts >= 0 && h1.waiting_fifo.n_elts <= n_fifo_elts);
+ ASSERT_AND_PANIC (h1.waiting_fifo.head_index >= 0
+ && h1.waiting_fifo.head_index < n_fifo_elts);
+ ASSERT_AND_PANIC (h1.waiting_fifo.n_elts >= 0
+ && h1.waiting_fifo.n_elts <= n_fifo_elts);
h2 = clib_smp_lock_set_header (l, h1, h0);
@@ -305,3 +315,11 @@ void clib_smp_unlock_slow_path (clib_smp_lock_t * l,
}
}
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/smp.h b/vppinfra/vppinfra/smp.h
index 7f0b93ccb91..7e703b3d6aa 100644
--- a/vppinfra/vppinfra/smp.h
+++ b/vppinfra/vppinfra/smp.h
@@ -39,7 +39,7 @@
#define included_clib_smp_h
#include <vppinfra/cache.h>
-#include <vppinfra/os.h> /* for os_panic */
+#include <vppinfra/os.h> /* for os_panic */
#define clib_smp_compare_and_swap(addr,new,old) __sync_val_compare_and_swap(addr,old,new)
#define clib_smp_swap(addr,new) __sync_lock_test_and_set(addr,new)
@@ -58,12 +58,24 @@
always_inline void
os_sched_yield (void)
-{ sched_yield (); }
+{
+ sched_yield ();
+}
#else
always_inline void
os_sched_yield (void)
-{ clib_smp_pause (); }
+{
+ clib_smp_pause ();
+}
#endif
#endif /* included_clib_smp_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/smp_fifo.c b/vppinfra/vppinfra/smp_fifo.c
index 863c2fd8068..bb74064d8f3 100644
--- a/vppinfra/vppinfra/smp_fifo.c
+++ b/vppinfra/vppinfra/smp_fifo.c
@@ -38,9 +38,10 @@
#include <vppinfra/smp_fifo.h>
#include <vppinfra/mem.h>
-clib_smp_fifo_t * clib_smp_fifo_init (uword max_n_elts, uword n_bytes_per_elt)
+clib_smp_fifo_t *
+clib_smp_fifo_init (uword max_n_elts, uword n_bytes_per_elt)
{
- clib_smp_fifo_t * f;
+ clib_smp_fifo_t *f;
uword n_bytes_per_elt_cache_aligned;
f = clib_mem_alloc_aligned (sizeof (f[0]), CLIB_CACHE_LINE_BYTES);
@@ -51,27 +52,40 @@ clib_smp_fifo_t * clib_smp_fifo_init (uword max_n_elts, uword n_bytes_per_elt)
f->log2_max_n_elts = max_log2 (max_n_elts);
f->max_n_elts_less_one = (1 << f->log2_max_n_elts) - 1;
- n_bytes_per_elt_cache_aligned = clib_smp_fifo_round_elt_bytes (n_bytes_per_elt);
- clib_exec_on_global_heap ({
- f->data = clib_mem_alloc_aligned (n_bytes_per_elt_cache_aligned << f->log2_max_n_elts,
- CLIB_CACHE_LINE_BYTES);
- });
+ n_bytes_per_elt_cache_aligned =
+ clib_smp_fifo_round_elt_bytes (n_bytes_per_elt);
+ clib_exec_on_global_heap (
+ {
+ f->data =
+ clib_mem_alloc_aligned
+ (n_bytes_per_elt_cache_aligned <<
+ f->log2_max_n_elts, CLIB_CACHE_LINE_BYTES);}
+ );
/* Zero all data and mark all elements free. */
{
uword i;
for (i = 0; i <= f->max_n_elts_less_one; i++)
{
- void * d = clib_smp_fifo_elt_at_index (f, n_bytes_per_elt, i);
- clib_smp_fifo_data_footer_t * t;
+ void *d = clib_smp_fifo_elt_at_index (f, n_bytes_per_elt, i);
+ clib_smp_fifo_data_footer_t *t;
memset (d, 0, n_bytes_per_elt_cache_aligned);
t = clib_smp_fifo_get_data_footer (d, n_bytes_per_elt);
- clib_smp_fifo_data_footer_set_state (t, CLIB_SMP_FIFO_DATA_STATE_free);
+ clib_smp_fifo_data_footer_set_state (t,
+ CLIB_SMP_FIFO_DATA_STATE_free);
}
}
return f;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/smp_fifo.h b/vppinfra/vppinfra/smp_fifo.h
index 2162c8b1e27..c74a77c8e9b 100644
--- a/vppinfra/vppinfra/smp_fifo.h
+++ b/vppinfra/vppinfra/smp_fifo.h
@@ -46,15 +46,17 @@
_ (write_done) \
_ (read_fetch)
-typedef enum {
+typedef enum
+{
#define _(f) CLIB_SMP_FIFO_DATA_STATE_##f,
foreach_clib_smp_fifo_data_state
#undef _
- CLIB_SMP_FIFO_N_DATA_STATE,
+ CLIB_SMP_FIFO_N_DATA_STATE,
} clib_smp_fifo_data_state_t;
/* Footer at end of each data element. */
-typedef struct {
+typedef struct
+{
/* Magic number marking valid footer plus state encoded in low bits. */
u32 magic_state;
} clib_smp_fifo_data_footer_t;
@@ -76,9 +78,12 @@ clib_smp_fifo_data_footer_get_state (clib_smp_fifo_data_footer_t * f)
always_inline void
clib_smp_fifo_data_footer_set_state (clib_smp_fifo_data_footer_t * f,
clib_smp_fifo_data_state_t s)
-{ f->magic_state = CLIB_SMP_DATA_FOOTER_MAGIC + s; }
+{
+ f->magic_state = CLIB_SMP_DATA_FOOTER_MAGIC + s;
+}
-typedef struct {
+typedef struct
+{
/* Read/write indices each on their own cache line.
Atomic incremented for each read/write. */
u32 read_index, write_index;
@@ -90,16 +95,18 @@ typedef struct {
u32 log2_max_n_elts;
/* Cache aligned data. */
- void * data;
+ void *data;
} clib_smp_fifo_t;
/* External functions. */
-clib_smp_fifo_t * clib_smp_fifo_init (uword max_n_elts, uword n_bytes_per_elt);
+clib_smp_fifo_t *clib_smp_fifo_init (uword max_n_elts, uword n_bytes_per_elt);
/* Elements are always cache-line sized; this is to avoid smp cache thrashing. */
always_inline uword
clib_smp_fifo_round_elt_bytes (uword n_bytes_per_elt)
-{ return round_pow2 (n_bytes_per_elt, CLIB_CACHE_LINE_BYTES); }
+{
+ return round_pow2 (n_bytes_per_elt, CLIB_CACHE_LINE_BYTES);
+}
always_inline uword
clib_smp_fifo_n_elts (clib_smp_fifo_t * f)
@@ -110,30 +117,32 @@ clib_smp_fifo_n_elts (clib_smp_fifo_t * f)
}
always_inline clib_smp_fifo_data_footer_t *
-clib_smp_fifo_get_data_footer (void * d, uword n_bytes_per_elt)
+clib_smp_fifo_get_data_footer (void *d, uword n_bytes_per_elt)
{
- clib_smp_fifo_data_footer_t * f;
+ clib_smp_fifo_data_footer_t *f;
f = d + clib_smp_fifo_round_elt_bytes (n_bytes_per_elt) - sizeof (f[0]);
return f;
}
always_inline void *
-clib_smp_fifo_elt_at_index (clib_smp_fifo_t * f, uword n_bytes_per_elt, uword i)
+clib_smp_fifo_elt_at_index (clib_smp_fifo_t * f, uword n_bytes_per_elt,
+ uword i)
{
uword n_bytes_per_elt_cache_aligned;
ASSERT (i <= f->max_n_elts_less_one);
- n_bytes_per_elt_cache_aligned = clib_smp_fifo_round_elt_bytes (n_bytes_per_elt);
-
+ n_bytes_per_elt_cache_aligned =
+ clib_smp_fifo_round_elt_bytes (n_bytes_per_elt);
+
return f->data + i * n_bytes_per_elt_cache_aligned;
}
always_inline void *
clib_smp_fifo_write_alloc (clib_smp_fifo_t * f, uword n_bytes_per_elt)
{
- void * d;
- clib_smp_fifo_data_footer_t * t;
+ void *d;
+ clib_smp_fifo_data_footer_t *t;
clib_smp_fifo_data_state_t s;
u32 wi0, wi1;
@@ -147,7 +156,9 @@ clib_smp_fifo_write_alloc (clib_smp_fifo_t * f, uword n_bytes_per_elt)
{
wi1 = wi0 + 1;
- d = clib_smp_fifo_elt_at_index (f, n_bytes_per_elt, wi0 & f->max_n_elts_less_one);
+ d =
+ clib_smp_fifo_elt_at_index (f, n_bytes_per_elt,
+ wi0 & f->max_n_elts_less_one);
t = clib_smp_fifo_get_data_footer (d, n_bytes_per_elt);
s = clib_smp_fifo_data_footer_get_state (t);
@@ -161,7 +172,8 @@ clib_smp_fifo_write_alloc (clib_smp_fifo_t * f, uword n_bytes_per_elt)
if (wi1 == wi0)
{
- clib_smp_fifo_data_footer_set_state (t, CLIB_SMP_FIFO_DATA_STATE_write_alloc);
+ clib_smp_fifo_data_footer_set_state (t,
+ CLIB_SMP_FIFO_DATA_STATE_write_alloc);
break;
}
@@ -173,9 +185,9 @@ clib_smp_fifo_write_alloc (clib_smp_fifo_t * f, uword n_bytes_per_elt)
}
always_inline void
-clib_smp_fifo_write_done (clib_smp_fifo_t * f, void * d, uword n_bytes_per_elt)
+clib_smp_fifo_write_done (clib_smp_fifo_t * f, void *d, uword n_bytes_per_elt)
{
- clib_smp_fifo_data_footer_t * t;
+ clib_smp_fifo_data_footer_t *t;
/* Flush out pending writes before we change state to write_done.
This will hold off readers until data is flushed. */
@@ -183,15 +195,17 @@ clib_smp_fifo_write_done (clib_smp_fifo_t * f, void * d, uword n_bytes_per_elt)
t = clib_smp_fifo_get_data_footer (d, n_bytes_per_elt);
- ASSERT (clib_smp_fifo_data_footer_get_state (t) == CLIB_SMP_FIFO_DATA_STATE_write_alloc);
- clib_smp_fifo_data_footer_set_state (t, CLIB_SMP_FIFO_DATA_STATE_write_done);
+ ASSERT (clib_smp_fifo_data_footer_get_state (t) ==
+ CLIB_SMP_FIFO_DATA_STATE_write_alloc);
+ clib_smp_fifo_data_footer_set_state (t,
+ CLIB_SMP_FIFO_DATA_STATE_write_done);
}
always_inline void *
clib_smp_fifo_read_fetch (clib_smp_fifo_t * f, uword n_bytes_per_elt)
{
- void * d;
- clib_smp_fifo_data_footer_t * t;
+ void *d;
+ clib_smp_fifo_data_footer_t *t;
clib_smp_fifo_data_state_t s;
u32 ri0, ri1;
@@ -205,7 +219,9 @@ clib_smp_fifo_read_fetch (clib_smp_fifo_t * f, uword n_bytes_per_elt)
{
ri1 = ri0 + 1;
- d = clib_smp_fifo_elt_at_index (f, n_bytes_per_elt, ri0 & f->max_n_elts_less_one);
+ d =
+ clib_smp_fifo_elt_at_index (f, n_bytes_per_elt,
+ ri0 & f->max_n_elts_less_one);
t = clib_smp_fifo_get_data_footer (d, n_bytes_per_elt);
s = clib_smp_fifo_data_footer_get_state (t);
@@ -218,7 +234,8 @@ clib_smp_fifo_read_fetch (clib_smp_fifo_t * f, uword n_bytes_per_elt)
ri1 = clib_smp_compare_and_swap (&f->read_index, ri1, ri0);
if (ri1 == ri0)
{
- clib_smp_fifo_data_footer_set_state (t, CLIB_SMP_FIFO_DATA_STATE_read_fetch);
+ clib_smp_fifo_data_footer_set_state (t,
+ CLIB_SMP_FIFO_DATA_STATE_read_fetch);
break;
}
@@ -229,13 +246,14 @@ clib_smp_fifo_read_fetch (clib_smp_fifo_t * f, uword n_bytes_per_elt)
}
always_inline void
-clib_smp_fifo_read_done (clib_smp_fifo_t * f, void * d, uword n_bytes_per_elt)
+clib_smp_fifo_read_done (clib_smp_fifo_t * f, void *d, uword n_bytes_per_elt)
{
- clib_smp_fifo_data_footer_t * t;
+ clib_smp_fifo_data_footer_t *t;
t = clib_smp_fifo_get_data_footer (d, n_bytes_per_elt);
- ASSERT (clib_smp_fifo_data_footer_get_state (t) == CLIB_SMP_FIFO_DATA_STATE_read_fetch);
+ ASSERT (clib_smp_fifo_data_footer_get_state (t) ==
+ CLIB_SMP_FIFO_DATA_STATE_read_fetch);
clib_smp_fifo_data_footer_set_state (t, CLIB_SMP_FIFO_DATA_STATE_free);
}
@@ -265,21 +283,31 @@ clib_smp_fifo_memcpy (uword * dst, uword * src, uword n_bytes)
}
always_inline void
-clib_smp_fifo_write_inline (clib_smp_fifo_t * f, void * elt_to_write, uword n_bytes_per_elt)
+clib_smp_fifo_write_inline (clib_smp_fifo_t * f, void *elt_to_write,
+ uword n_bytes_per_elt)
{
- uword * dst;
+ uword *dst;
dst = clib_smp_fifo_write_alloc (f, n_bytes_per_elt);
clib_smp_fifo_memcpy (dst, elt_to_write, n_bytes_per_elt);
clib_smp_fifo_write_done (f, dst, n_bytes_per_elt);
}
always_inline void
-clib_smp_fifo_read_inline (clib_smp_fifo_t * f, void * elt_to_read, uword n_bytes_per_elt)
+clib_smp_fifo_read_inline (clib_smp_fifo_t * f, void *elt_to_read,
+ uword n_bytes_per_elt)
{
- uword * src;
+ uword *src;
src = clib_smp_fifo_read_fetch (f, n_bytes_per_elt);
clib_smp_fifo_memcpy (elt_to_read, src, n_bytes_per_elt);
clib_smp_fifo_read_done (f, src, n_bytes_per_elt);
}
#endif /* included_clib_smp_vec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/socket.c b/vppinfra/vppinfra/socket.c
index 44ceb65984a..99b353fcd52 100644
--- a/vppinfra/vppinfra/socket.c
+++ b/vppinfra/vppinfra/socket.c
@@ -44,7 +44,7 @@
#include <unistd.h>
#include <stdio.h>
#include <fcntl.h>
-#include <string.h> /* strchr */
+#include <string.h> /* strchr */
#include <vppinfra/mem.h>
#include <vppinfra/vec.h>
@@ -52,7 +52,8 @@
#include <vppinfra/format.h>
#include <vppinfra/error.h>
-void clib_socket_tx_add_formatted (clib_socket_t * s, char * fmt, ...)
+void
+clib_socket_tx_add_formatted (clib_socket_t * s, char *fmt, ...)
{
va_list va;
va_start (va, fmt);
@@ -61,7 +62,8 @@ void clib_socket_tx_add_formatted (clib_socket_t * s, char * fmt, ...)
}
/* Return and bind to an unused port. */
-static word find_free_port (word sock)
+static word
+find_free_port (word sock)
{
word port;
@@ -69,7 +71,7 @@ static word find_free_port (word sock)
{
struct sockaddr_in a;
- memset(&a, 0, sizeof (a)); /* Warnings be gone */
+ memset (&a, 0, sizeof (a)); /* Warnings be gone */
a.sin_family = PF_INET;
a.sin_addr.s_addr = INADDR_ANY;
@@ -78,39 +80,37 @@ static word find_free_port (word sock)
if (bind (sock, (struct sockaddr *) &a, sizeof (a)) >= 0)
break;
}
-
+
return port < 1 << 16 ? port : -1;
}
/* Convert a config string to a struct sockaddr and length for use
with bind or connect. */
static clib_error_t *
-socket_config (char * config,
- void * addr,
- socklen_t * addr_len,
- u32 ip4_default_address)
+socket_config (char *config,
+ void *addr, socklen_t * addr_len, u32 ip4_default_address)
{
- clib_error_t * error = 0;
+ clib_error_t *error = 0;
- if (! config)
+ if (!config)
config = "";
/* Anything that begins with a / is a local PF_LOCAL socket. */
if (config[0] == '/')
{
- struct sockaddr_un * su = addr;
+ struct sockaddr_un *su = addr;
su->sun_family = PF_LOCAL;
clib_memcpy (&su->sun_path, config,
- clib_min (sizeof (su->sun_path), 1 + strlen (config)));
+ clib_min (sizeof (su->sun_path), 1 + strlen (config)));
*addr_len = sizeof (su[0]);
}
/* Hostname or hostname:port or port. */
else
{
- char * host_name;
+ char *host_name;
int port = -1;
- struct sockaddr_in * sa = addr;
+ struct sockaddr_in *sa = addr;
host_name = 0;
port = -1;
@@ -145,7 +145,7 @@ socket_config (char * config,
struct in_addr host_addr;
/* Recognize localhost to avoid host lookup in most common cast. */
- if (! strcmp (host_name, "localhost"))
+ if (!strcmp (host_name, "localhost"))
sa->sin_addr.s_addr = htonl (INADDR_LOOPBACK);
else if (inet_aton (host_name, &host_addr))
@@ -153,11 +153,12 @@ socket_config (char * config,
else if (host_name && strlen (host_name) > 0)
{
- struct hostent * host = gethostbyname (host_name);
- if (! host)
+ struct hostent *host = gethostbyname (host_name);
+ if (!host)
error = clib_error_return (0, "unknown host `%s'", config);
else
- clib_memcpy (&sa->sin_addr.s_addr, host->h_addr_list[0], host->h_length);
+ clib_memcpy (&sa->sin_addr.s_addr, host->h_addr_list[0],
+ host->h_length);
}
else
@@ -169,14 +170,14 @@ socket_config (char * config,
}
}
- done:
+done:
return error;
}
static clib_error_t *
default_socket_write (clib_socket_t * s)
{
- clib_error_t * err = 0;
+ clib_error_t *err = 0;
word written = 0;
word fd = 0;
word tx_len;
@@ -192,7 +193,7 @@ default_socket_write (clib_socket_t * s)
written = write (fd, s->tx_buffer, tx_len);
/* Ignore certain errors. */
- if (written < 0 && ! unix_error_is_fatal (errno))
+ if (written < 0 && !unix_error_is_fatal (errno))
written = 0;
/* A "real" error occurred. */
@@ -214,12 +215,12 @@ default_socket_write (clib_socket_t * s)
/* If a non-fatal error occurred AND
the buffer is full, then we must free it. */
- else if (written == 0 && tx_len > 64*1024)
+ else if (written == 0 && tx_len > 64 * 1024)
{
vec_free (s->tx_buffer);
}
- done:
+done:
return err;
}
@@ -227,7 +228,7 @@ static clib_error_t *
default_socket_read (clib_socket_t * sock, int n_bytes)
{
word fd, n_read;
- u8 * buf;
+ u8 *buf;
/* RX side of socket is down once end of file is reached. */
if (sock->flags & SOCKET_RX_END_OF_FILE)
@@ -243,54 +244,56 @@ default_socket_read (clib_socket_t * sock, int n_bytes)
n_read = 0;
/* Ignore certain errors. */
- if (! unix_error_is_fatal (errno))
+ if (!unix_error_is_fatal (errno))
goto non_fatal;
-
+
return clib_error_return_unix (0, "read %d bytes", n_bytes);
}
-
+
/* Other side closed the socket. */
if (n_read == 0)
sock->flags |= SOCKET_RX_END_OF_FILE;
- non_fatal:
+non_fatal:
_vec_len (sock->rx_buffer) += n_read - n_bytes;
return 0;
}
-static clib_error_t * default_socket_close (clib_socket_t * s)
+static clib_error_t *
+default_socket_close (clib_socket_t * s)
{
if (close (s->fd) < 0)
return clib_error_return_unix (0, "close");
return 0;
}
-static void socket_init_funcs (clib_socket_t * s)
+static void
+socket_init_funcs (clib_socket_t * s)
{
- if (! s->write_func)
+ if (!s->write_func)
s->write_func = default_socket_write;
- if (! s->read_func)
+ if (!s->read_func)
s->read_func = default_socket_read;
- if (! s->close_func)
+ if (!s->close_func)
s->close_func = default_socket_close;
}
clib_error_t *
clib_socket_init (clib_socket_t * s)
{
- union {
+ union
+ {
struct sockaddr sa;
struct sockaddr_un su;
} addr;
socklen_t addr_len = 0;
- clib_error_t * error = 0;
+ clib_error_t *error = 0;
word port;
error = socket_config (s->config, &addr.sa, &addr_len,
(s->flags & SOCKET_IS_SERVER
- ? INADDR_LOOPBACK
- : INADDR_ANY));
+ ? INADDR_LOOPBACK : INADDR_ANY));
if (error)
goto done;
@@ -334,8 +337,7 @@ clib_socket_init (clib_socket_t * s)
clib_unix_warning ("setsockopt SO_REUSEADDR fails");
}
- if (need_bind
- && bind (s->fd, &addr.sa, addr_len) < 0)
+ if (need_bind && bind (s->fd, &addr.sa, addr_len) < 0)
{
error = clib_error_return_unix (0, "bind");
goto done;
@@ -357,8 +359,8 @@ clib_socket_init (clib_socket_t * s)
}
if (connect (s->fd, &addr.sa, addr_len) < 0
- && ! ((s->flags & SOCKET_NON_BLOCKING_CONNECT) &&
- errno == EINPROGRESS))
+ && !((s->flags & SOCKET_NON_BLOCKING_CONNECT) &&
+ errno == EINPROGRESS))
{
error = clib_error_return_unix (0, "connect");
goto done;
@@ -367,31 +369,32 @@ clib_socket_init (clib_socket_t * s)
return error;
- done:
+done:
if (s->fd > 0)
close (s->fd);
return error;
}
-clib_error_t * clib_socket_accept (clib_socket_t * server, clib_socket_t * client)
+clib_error_t *
+clib_socket_accept (clib_socket_t * server, clib_socket_t * client)
{
- clib_error_t * err = 0;
- socklen_t len = 0;
-
+ clib_error_t *err = 0;
+ socklen_t len = 0;
+
memset (client, 0, sizeof (client[0]));
/* Accept the new socket connection. */
client->fd = accept (server->fd, 0, 0);
- if (client->fd < 0)
+ if (client->fd < 0)
return clib_error_return_unix (0, "accept");
-
+
/* Set the new socket to be non-blocking. */
if (fcntl (client->fd, F_SETFL, O_NONBLOCK) < 0)
{
err = clib_error_return_unix (0, "fcntl O_NONBLOCK");
goto close_client;
}
-
+
/* Get peer info. */
len = sizeof (client->peer);
if (getpeername (client->fd, (struct sockaddr *) &client->peer, &len) < 0)
@@ -405,7 +408,15 @@ clib_error_t * clib_socket_accept (clib_socket_t * server, clib_socket_t * clien
socket_init_funcs (client);
return 0;
- close_client:
+close_client:
close (client->fd);
return err;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/socket.h b/vppinfra/vppinfra/socket.h
index a2d964afed9..08e22e7eb61 100644
--- a/vppinfra/vppinfra/socket.h
+++ b/vppinfra/vppinfra/socket.h
@@ -46,12 +46,13 @@
#include <vppinfra/error.h>
#include <vppinfra/format.h>
-typedef struct _socket_t {
+typedef struct _socket_t
+{
/* File descriptor. */
i32 fd;
/* Config string for socket HOST:PORT or just HOST. */
- char * config;
+ char *config;
u32 flags;
#define SOCKET_IS_SERVER (1 << 0)
@@ -62,59 +63,74 @@ typedef struct _socket_t {
#define SOCKET_RX_END_OF_FILE (1 << 2)
/* Transmit buffer. Holds data waiting to be written. */
- u8 * tx_buffer;
+ u8 *tx_buffer;
/* Receive buffer. Holds data read from socket. */
- u8 * rx_buffer;
+ u8 *rx_buffer;
/* Peer socket we are connected to. */
struct sockaddr_in peer;
- clib_error_t * (* write_func) (struct _socket_t * sock);
- clib_error_t * (* read_func) (struct _socket_t * sock, int min_bytes);
- clib_error_t * (* close_func) (struct _socket_t * sock);
- void * private_data;
+ clib_error_t *(*write_func) (struct _socket_t * sock);
+ clib_error_t *(*read_func) (struct _socket_t * sock, int min_bytes);
+ clib_error_t *(*close_func) (struct _socket_t * sock);
+ void *private_data;
} clib_socket_t;
/* socket config format is host:port.
Unspecified port causes a free one to be chosen starting
from IPPORT_USERRESERVED (5000). */
-clib_error_t *
-clib_socket_init (clib_socket_t * socket);
+clib_error_t *clib_socket_init (clib_socket_t * socket);
-clib_error_t * clib_socket_accept (clib_socket_t * server, clib_socket_t * client);
+clib_error_t *clib_socket_accept (clib_socket_t * server,
+ clib_socket_t * client);
-always_inline uword clib_socket_is_server (clib_socket_t * sock)
-{ return (sock->flags & SOCKET_IS_SERVER) != 0; }
+always_inline uword
+clib_socket_is_server (clib_socket_t * sock)
+{
+ return (sock->flags & SOCKET_IS_SERVER) != 0;
+}
-always_inline uword clib_socket_is_client (clib_socket_t * s)
-{ return ! clib_socket_is_server (s); }
+always_inline uword
+clib_socket_is_client (clib_socket_t * s)
+{
+ return !clib_socket_is_server (s);
+}
-always_inline int clib_socket_rx_end_of_file (clib_socket_t * s)
-{ return s->flags & SOCKET_RX_END_OF_FILE; }
+always_inline int
+clib_socket_rx_end_of_file (clib_socket_t * s)
+{
+ return s->flags & SOCKET_RX_END_OF_FILE;
+}
always_inline void *
clib_socket_tx_add (clib_socket_t * s, int n_bytes)
{
- u8 * result;
+ u8 *result;
vec_add2 (s->tx_buffer, result, n_bytes);
return result;
}
always_inline void
-clib_socket_tx_add_va_formatted (clib_socket_t * s, char * fmt, va_list * va)
-{ s->tx_buffer = va_format (s->tx_buffer, fmt, va); }
+clib_socket_tx_add_va_formatted (clib_socket_t * s, char *fmt, va_list * va)
+{
+ s->tx_buffer = va_format (s->tx_buffer, fmt, va);
+}
always_inline clib_error_t *
clib_socket_tx (clib_socket_t * s)
-{ return s->write_func (s); }
+{
+ return s->write_func (s);
+}
always_inline clib_error_t *
clib_socket_rx (clib_socket_t * s, int n_bytes)
-{ return s->read_func (s, n_bytes); }
+{
+ return s->read_func (s, n_bytes);
+}
always_inline void
-clib_socket_free (clib_socket_t *s)
+clib_socket_free (clib_socket_t * s)
{
vec_free (s->tx_buffer);
vec_free (s->rx_buffer);
@@ -124,14 +140,21 @@ clib_socket_free (clib_socket_t *s)
}
always_inline clib_error_t *
-clib_socket_close (clib_socket_t *sock)
+clib_socket_close (clib_socket_t * sock)
{
- clib_error_t * err;
- err = (* sock->close_func) (sock);
+ clib_error_t *err;
+ err = (*sock->close_func) (sock);
return err;
}
-void
-clib_socket_tx_add_formatted (clib_socket_t * s, char * fmt, ...);
+void clib_socket_tx_add_formatted (clib_socket_t * s, char *fmt, ...);
#endif /* _clib_included_socket_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/sparse_vec.h b/vppinfra/vppinfra/sparse_vec.h
index bf18ebd9e9b..ec8f0a1c4bf 100644
--- a/vppinfra/vppinfra/sparse_vec.h
+++ b/vppinfra/vppinfra/sparse_vec.h
@@ -43,21 +43,24 @@
/* Sparsely indexed vectors. Basic idea taken from Hacker's delight.
Eliot added ranges. */
-typedef struct {
+typedef struct
+{
/* Bitmap one for each sparse index. */
- uword * is_member_bitmap;
+ uword *is_member_bitmap;
/* member_counts[i] = total number of members with j < i. */
- u16 * member_counts;
+ u16 *member_counts;
#define SPARSE_VEC_IS_RANGE (1 << 0)
#define SPARSE_VEC_IS_VALID_RANGE (1 << 1)
- u8 * range_flags;
+ u8 *range_flags;
} sparse_vec_header_t;
always_inline sparse_vec_header_t *
-sparse_vec_header (void * v)
-{ return vec_header (v, sizeof (sparse_vec_header_t)); }
+sparse_vec_header (void *v)
+{
+ return vec_header (v, sizeof (sparse_vec_header_t));
+}
/* Index 0 is always used to mark indices that are not valid in
sparse vector. For example, you look up V[0x1234] and 0x1234 is not
@@ -67,15 +70,15 @@ sparse_vec_header (void * v)
always_inline void *
sparse_vec_new (uword elt_bytes, uword sparse_index_bits)
{
- void * v;
- sparse_vec_header_t * h;
+ void *v;
+ sparse_vec_header_t *h;
word n;
ASSERT (sparse_index_bits <= 16);
v = _vec_resize (0,
/* length increment */ 8,
- /* data bytes */ 8*elt_bytes,
+ /* data bytes */ 8 * elt_bytes,
/* header bytes */ sizeof (h[0]),
/* data align */ 0);
@@ -95,12 +98,11 @@ sparse_vec_new (uword elt_bytes, uword sparse_index_bits)
}
always_inline uword
-sparse_vec_index_internal (void * v,
+sparse_vec_index_internal (void *v,
uword sparse_index,
- uword maybe_range,
- u32 * insert)
+ uword maybe_range, u32 * insert)
{
- sparse_vec_header_t * h;
+ sparse_vec_header_t *h;
uword i, b, d, w;
u8 is_member;
@@ -128,8 +130,8 @@ sparse_vec_index_internal (void * v,
if (insert)
{
- *insert = ! is_member;
- if (! is_member)
+ *insert = !is_member;
+ if (!is_member)
{
uword j;
w |= b;
@@ -147,19 +149,18 @@ sparse_vec_index_internal (void * v,
}
always_inline uword
-sparse_vec_index (void * v, uword sparse_index)
+sparse_vec_index (void *v, uword sparse_index)
{
return sparse_vec_index_internal (v, sparse_index,
/* maybe range */ 0,
/* insert? */ 0);
}
-
+
always_inline void
-sparse_vec_index2 (void * v,
- u32 si0, u32 si1,
- u32 * i0_return, u32 * i1_return)
+sparse_vec_index2 (void *v,
+ u32 si0, u32 si1, u32 * i0_return, u32 * i1_return)
{
- sparse_vec_header_t * h;
+ sparse_vec_header_t *h;
uword b0, b1, w0, w1, v0, v1;
u32 i0, i1, d0, d1;
u8 is_member0, is_member1;
@@ -189,7 +190,7 @@ sparse_vec_index2 (void * v,
d1 = h->member_counts[i1] + (v1 != 0);
/* Validate speculation. */
- if (PREDICT_FALSE (! is_pow2 (v0) || ! is_pow2 (v1)))
+ if (PREDICT_FALSE (!is_pow2 (v0) || !is_pow2 (v1)))
{
d0 += count_set_bits (v0) - (v0 != 0);
d1 += count_set_bits (v1) - (v1 != 0);
@@ -233,3 +234,11 @@ sparse_vec_index2 (void * v,
})
#endif /* included_sparse_vec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/std-formats.c b/vppinfra/vppinfra/std-formats.c
index b47d8fb5aac..ac98f999f21 100644
--- a/vppinfra/vppinfra/std-formats.c
+++ b/vppinfra/vppinfra/std-formats.c
@@ -39,10 +39,11 @@
#include <ctype.h>
/* Format vectors. */
-u8 * format_vec32 (u8 * s, va_list * va)
+u8 *
+format_vec32 (u8 * s, va_list * va)
{
- u32 * v = va_arg (*va, u32 *);
- char * fmt = va_arg (*va, char *);
+ u32 *v = va_arg (*va, u32 *);
+ char *fmt = va_arg (*va, char *);
uword i;
for (i = 0; i < vec_len (v); i++)
{
@@ -53,10 +54,11 @@ u8 * format_vec32 (u8 * s, va_list * va)
return s;
}
-u8 * format_vec_uword (u8 * s, va_list * va)
+u8 *
+format_vec_uword (u8 * s, va_list * va)
{
- uword * v = va_arg (*va, uword *);
- char * fmt = va_arg (*va, char *);
+ uword *v = va_arg (*va, uword *);
+ char *fmt = va_arg (*va, char *);
uword i;
for (i = 0; i < vec_len (v); i++)
{
@@ -68,18 +70,20 @@ u8 * format_vec_uword (u8 * s, va_list * va)
}
/* Ascii buffer and length. */
-u8 * format_ascii_bytes (u8 * s, va_list * va)
+u8 *
+format_ascii_bytes (u8 * s, va_list * va)
{
- u8 * v = va_arg (*va, u8 *);
+ u8 *v = va_arg (*va, u8 *);
uword n_bytes = va_arg (*va, uword);
vec_add (s, v, n_bytes);
return s;
}
/* Format hex dump. */
-u8 * format_hex_bytes (u8 * s, va_list * va)
+u8 *
+format_hex_bytes (u8 * s, va_list * va)
{
- u8 * bytes = va_arg (*va, u8 *);
+ u8 *bytes = va_arg (*va, u8 *);
int n_bytes = va_arg (*va, int);
uword i;
@@ -92,12 +96,12 @@ u8 * format_hex_bytes (u8 * s, va_list * va)
for (i = 0; i < n_bytes; i++)
{
- if (! short_form && (i % 32) == 0)
+ if (!short_form && (i % 32) == 0)
s = format (s, "%08x: ", i);
s = format (s, "%02x", bytes[i]);
- if (! short_form && ((i + 1) % 32) == 0 && (i + 1) < n_bytes)
+ if (!short_form && ((i + 1) % 32) == 0 && (i + 1) < n_bytes)
s = format (s, "\n%U", format_white_space, indent);
}
@@ -105,7 +109,8 @@ u8 * format_hex_bytes (u8 * s, va_list * va)
}
/* Add variable number of spaces. */
-u8 * format_white_space (u8 * s, va_list * va)
+u8 *
+format_white_space (u8 * s, va_list * va)
{
uword n = va_arg (*va, uword);
while (n-- > 0)
@@ -113,17 +118,18 @@ u8 * format_white_space (u8 * s, va_list * va)
return s;
}
-u8 * format_time_interval (u8 * s, va_list * args)
+u8 *
+format_time_interval (u8 * s, va_list * args)
{
- u8 * fmt = va_arg (*args, u8 *);
+ u8 *fmt = va_arg (*args, u8 *);
f64 t = va_arg (*args, f64);
- u8 * f;
+ u8 *f;
const f64 seconds_per_minute = 60;
const f64 seconds_per_hour = 60 * seconds_per_minute;
const f64 seconds_per_day = 24 * seconds_per_hour;
uword days, hours, minutes, secs, msecs, usecs;
-
+
days = t / seconds_per_day;
t -= days * seconds_per_day;
@@ -136,13 +142,13 @@ u8 * format_time_interval (u8 * s, va_list * args)
secs = t;
t -= secs;
- msecs = 1e3*t;
- usecs = 1e6*t;
+ msecs = 1e3 * t;
+ usecs = 1e6 * t;
for (f = fmt; *f; f++)
{
uword what, c;
- char * what_fmt = "%d";
+ char *what_fmt = "%d";
switch (c = *f)
{
@@ -183,7 +189,8 @@ u8 * format_time_interval (u8 * s, va_list * args)
}
/* Unparse memory size e.g. 100, 100k, 100m, 100g. */
-u8 * format_memory_size (u8 * s, va_list * va)
+u8 *
+format_memory_size (u8 * s, va_list * va)
{
uword size = va_arg (*va, uword);
uword l, u, log_u;
@@ -211,20 +218,30 @@ u8 * format_memory_size (u8 * s, va_list * va)
}
/* Parse memory size e.g. 100, 100k, 100m, 100g. */
-uword unformat_memory_size (unformat_input_t * input, va_list * va)
+uword
+unformat_memory_size (unformat_input_t * input, va_list * va)
{
uword amount, shift, c;
- uword * result = va_arg (*va, uword *);
+ uword *result = va_arg (*va, uword *);
- if (! unformat (input, "%wd%_", &amount))
+ if (!unformat (input, "%wd%_", &amount))
return 0;
c = unformat_get_input (input);
switch (c)
{
- case 'k': case 'K': shift = 10; break;
- case 'm': case 'M': shift = 20; break;
- case 'g': case 'G': shift = 30; break;
+ case 'k':
+ case 'K':
+ shift = 10;
+ break;
+ case 'm':
+ case 'M':
+ shift = 20;
+ break;
+ case 'g':
+ case 'G':
+ shift = 30;
+ break;
default:
shift = 0;
unformat_put_input (input);
@@ -237,9 +254,10 @@ uword unformat_memory_size (unformat_input_t * input, va_list * va)
/* Format c identifier: e.g. a_name -> "a name".
Words for both vector names and null terminated c strings. */
-u8 * format_c_identifier (u8 * s, va_list * va)
+u8 *
+format_c_identifier (u8 * s, va_list * va)
{
- u8 * id = va_arg (*va, u8 *);
+ u8 *id = va_arg (*va, u8 *);
uword i, l;
l = ~0;
@@ -262,22 +280,22 @@ u8 * format_c_identifier (u8 * s, va_list * va)
u8 *
format_hexdump (u8 * s, va_list * args)
{
- u8 * data = va_arg (*args, u8 *);
+ u8 *data = va_arg (*args, u8 *);
uword len = va_arg (*args, uword);
- int i, index =0;
+ int i, index = 0;
const int line_len = 16;
- u8 * line_hex = 0;
- u8 * line_str = 0;
+ u8 *line_hex = 0;
+ u8 *line_str = 0;
uword indent = format_get_indent (s);
if (!len)
return s;
- for(i=0; i < len; i++)
+ for (i = 0; i < len; i++)
{
- line_hex = format (line_hex, "%02x ", data[i]);
+ line_hex = format (line_hex, "%02x ", data[i]);
line_str = format (line_str, "%c", isprint (data[i]) ? data[i] : '.');
- if (!( (i + 1) % line_len))
+ if (!((i + 1) % line_len))
{
s = format (s, "%U%05x: %v[%v]",
format_white_space, index ? indent : 0,
@@ -285,21 +303,28 @@ format_hexdump (u8 * s, va_list * args)
if (i < len - 1)
s = format (s, "\n");
index = i + 1;
- vec_reset_length(line_hex);
- vec_reset_length(line_str);
+ vec_reset_length (line_hex);
+ vec_reset_length (line_str);
}
}
while (i++ % line_len)
line_hex = format (line_hex, " ");
- if (vec_len(line_hex))
+ if (vec_len (line_hex))
s = format (s, "%U%05x: %v[%v]",
- format_white_space, indent,
- index, line_hex, line_str);
+ format_white_space, indent, index, line_hex, line_str);
- vec_free(line_hex);
- vec_free(line_str);
+ vec_free (line_hex);
+ vec_free (line_str);
return s;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/string.c b/vppinfra/vppinfra/string.c
index 1f6923ca413..ba21e7b3490 100644
--- a/vppinfra/vppinfra/string.c
+++ b/vppinfra/vppinfra/string.c
@@ -39,7 +39,8 @@
#include <vppinfra/error.h>
/* Exchanges source and destination. */
-void clib_memswap (void * _a, void * _b, uword bytes)
+void
+clib_memswap (void *_a, void *_b, uword bytes)
{
uword pa = pointer_to_uword (_a);
uword pb = pointer_to_uword (_b);
@@ -64,21 +65,30 @@ void clib_memswap (void * _a, void * _b, uword bytes)
pa = pointer_to_uword (a); \
pb = pointer_to_uword (b); \
}
-
+
if (BITS (uword) == BITS (u64))
- _ (u64);
- _ (u32);
- _ (u16);
- _ (u8);
+ _(u64);
+ _(u32);
+ _(u16);
+ _(u8);
#undef _
ASSERT (bytes < 2);
if (bytes)
{
- u8 * a = uword_to_pointer (pa, u8 *);
- u8 * b = uword_to_pointer (pb, u8 *);
+ u8 *a = uword_to_pointer (pa, u8 *);
+ u8 *b = uword_to_pointer (pb, u8 *);
u8 a0 = a[0], b0 = b[0];
- a[0] = b0; b[0] = a0;
+ a[0] = b0;
+ b[0] = a0;
}
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/string.h b/vppinfra/vppinfra/string.h
index 8a2918f80c1..8d28375ec0a 100644
--- a/vppinfra/vppinfra/string.h
+++ b/vppinfra/vppinfra/string.h
@@ -38,7 +38,7 @@
#ifndef included_clib_string_h
#define included_clib_string_h
-#include <vppinfra/clib.h> /* for CLIB_LINUX_KERNEL */
+#include <vppinfra/clib.h> /* for CLIB_LINUX_KERNEL */
#ifdef CLIB_LINUX_KERNEL
#include <linux/string.h>
@@ -53,7 +53,7 @@
#endif
/* Exchanges source and destination. */
-void clib_memswap (void * _a, void * _b, uword bytes);
+void clib_memswap (void *_a, void *_b, uword bytes);
#if __AVX__
#include <vppinfra/memcpy_avx.h>
@@ -64,3 +64,11 @@ void clib_memswap (void * _a, void * _b, uword bytes);
#endif
#endif /* included_clib_string_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_bihash_template.c b/vppinfra/vppinfra/test_bihash_template.c
index c9ce7ee4721..5a5b63484aa 100644
--- a/vppinfra/vppinfra/test_bihash_template.c
+++ b/vppinfra/vppinfra/test_bihash_template.c
@@ -21,7 +21,8 @@
#include <vppinfra/bihash_template.c>
-typedef struct {
+typedef struct
+{
u32 seed;
u32 nbuckets;
u32 nitems;
@@ -29,56 +30,58 @@ typedef struct {
int careful_delete_tests;
int verbose;
int non_random_keys;
- uword * key_hash;
- u64 * keys;
- BVT(clib_bihash) hash;
+ uword *key_hash;
+ u64 *keys;
+ BVT (clib_bihash) hash;
clib_time_t clib_time;
- unformat_input_t * input;
-
+ unformat_input_t *input;
+
} test_main_t;
test_main_t test_main;
-uword vl (void * v)
+uword
+vl (void *v)
{
- return vec_len (v);
+ return vec_len (v);
}
-static clib_error_t * test_bihash (test_main_t * tm)
+static clib_error_t *
+test_bihash (test_main_t * tm)
{
int i, j;
- uword * p;
+ uword *p;
uword total_searches;
f64 before, delta;
- BVT(clib_bihash) * h;
- BVT(clib_bihash_kv) kv;
+ BVT (clib_bihash) * h;
+ BVT (clib_bihash_kv) kv;
h = &tm->hash;
- BV(clib_bihash_init) (h, "test", tm->nbuckets, 3ULL<<30);
-
- fformat (stdout, "Pick %lld unique %s keys...\n",
- tm->nitems, tm->non_random_keys ? "non-random" : "random");
+ BV (clib_bihash_init) (h, "test", tm->nbuckets, 3ULL << 30);
+
+ fformat (stdout, "Pick %lld unique %s keys...\n",
+ tm->nitems, tm->non_random_keys ? "non-random" : "random");
for (i = 0; i < tm->nitems; i++)
{
u64 rndkey;
if (tm->non_random_keys == 0)
- {
-
- again:
- rndkey = random_u64 (&tm->seed);
-
- p = hash_get (tm->key_hash, rndkey);
- if (p)
- goto again;
- }
+ {
+
+ again:
+ rndkey = random_u64 (&tm->seed);
+
+ p = hash_get (tm->key_hash, rndkey);
+ if (p)
+ goto again;
+ }
else
- rndkey = (u64)(i+1) << 16;
+ rndkey = (u64) (i + 1) << 16;
- hash_set (tm->key_hash, rndkey, i+1);
+ hash_set (tm->key_hash, rndkey, i + 1);
vec_add1 (tm->keys, rndkey);
}
@@ -86,20 +89,21 @@ static clib_error_t * test_bihash (test_main_t * tm)
for (i = 0; i < tm->nitems; i++)
{
kv.key = tm->keys[i];
- kv.value = i+1;
+ kv.value = i + 1;
- BV(clib_bihash_add_del) (h, &kv, 1 /* is_add */);
+ BV (clib_bihash_add_del) (h, &kv, 1 /* is_add */ );
if (tm->verbose > 1)
- {
- fformat (stdout, "--------------------\n");
- fformat (stdout, "After adding key %llu value %lld...\n",
- tm->keys[i], (u64)(i+1));
- fformat (stdout, "%U", BV(format_bihash), h, 2 /* very verbose */);
- }
+ {
+ fformat (stdout, "--------------------\n");
+ fformat (stdout, "After adding key %llu value %lld...\n",
+ tm->keys[i], (u64) (i + 1));
+ fformat (stdout, "%U", BV (format_bihash), h,
+ 2 /* very verbose */ );
+ }
}
- fformat (stdout, "%U", BV(format_bihash), h, 0 /* very verbose */);
+ fformat (stdout, "%U", BV (format_bihash), h, 0 /* very verbose */ );
fformat (stdout, "Search for items %d times...\n", tm->search_iter);
@@ -110,69 +114,69 @@ static clib_error_t * test_bihash (test_main_t * tm)
u64 hash1 = clib_xxhash (tm->keys[0]);
for (i = 0; i < tm->nitems; i++)
- {
- if (i < (tm->nitems - 3))
- {
- clib_bihash_bucket_t * b;
- BVT(clib_bihash_value) * v;
- u64 hash2 = clib_xxhash (tm->keys[i+3]);
- u32 bucket_index = hash2 & (h->nbuckets-1);
- b = &h->buckets[bucket_index];
- CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
-
- bucket_index = hash1 & (h->nbuckets-1);
- b = &h->buckets[bucket_index];
- v = BV(clib_bihash_get_value) (h, b->offset);
- hash1 >>= h->log2_nbuckets;
- hash1 = hash1 & ((1<<b->log2_pages)-1);
- v += hash1;
- CLIB_PREFETCH (v, CLIB_CACHE_LINE_BYTES, LOAD);
-
- hash1 = hash2;
- }
-
- kv.key = tm->keys[i];
- if (BV(clib_bihash_search) (h, &kv, &kv) < 0)
- clib_warning ("search for key %lld failed unexpectedly\n",
- tm->keys[i]);
- if (kv.value != (u64)(i+1))
- clib_warning ("search for key %lld returned %lld, not %lld\n",
- tm->keys, kv.value, (u64)(i+1));
- }
+ {
+ if (i < (tm->nitems - 3))
+ {
+ clib_bihash_bucket_t *b;
+ BVT (clib_bihash_value) * v;
+ u64 hash2 = clib_xxhash (tm->keys[i + 3]);
+ u32 bucket_index = hash2 & (h->nbuckets - 1);
+ b = &h->buckets[bucket_index];
+ CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ bucket_index = hash1 & (h->nbuckets - 1);
+ b = &h->buckets[bucket_index];
+ v = BV (clib_bihash_get_value) (h, b->offset);
+ hash1 >>= h->log2_nbuckets;
+ hash1 = hash1 & ((1 << b->log2_pages) - 1);
+ v += hash1;
+ CLIB_PREFETCH (v, CLIB_CACHE_LINE_BYTES, LOAD);
+
+ hash1 = hash2;
+ }
+
+ kv.key = tm->keys[i];
+ if (BV (clib_bihash_search) (h, &kv, &kv) < 0)
+ clib_warning ("search for key %lld failed unexpectedly\n",
+ tm->keys[i]);
+ if (kv.value != (u64) (i + 1))
+ clib_warning ("search for key %lld returned %lld, not %lld\n",
+ tm->keys, kv.value, (u64) (i + 1));
+ }
}
delta = clib_time_now (&tm->clib_time) - before;
- total_searches = (uword)tm->search_iter * (uword) tm->nitems;
+ total_searches = (uword) tm->search_iter * (uword) tm->nitems;
if (delta > 0)
fformat (stdout, "%.f searches per second\n",
- ((f64)total_searches) / delta);
+ ((f64) total_searches) / delta);
fformat (stdout, "%lld searches in %.6f seconds\n", total_searches, delta);
- fformat (stdout, "Standard E-hash search for items %d times...\n",
- tm->search_iter);
+ fformat (stdout, "Standard E-hash search for items %d times...\n",
+ tm->search_iter);
before = clib_time_now (&tm->clib_time);
for (j = 0; j < tm->search_iter; j++)
{
for (i = 0; i < tm->nitems; i++)
- {
- p = hash_get (tm->key_hash, tm->keys[i]);
- if (p == 0 || p[0] != (uword)(i+1))
- clib_warning ("ugh, couldn't find %lld\n", tm->keys[i]);
- }
+ {
+ p = hash_get (tm->key_hash, tm->keys[i]);
+ if (p == 0 || p[0] != (uword) (i + 1))
+ clib_warning ("ugh, couldn't find %lld\n", tm->keys[i]);
+ }
}
delta = clib_time_now (&tm->clib_time) - before;
- total_searches = (uword)tm->search_iter * (uword) tm->nitems;
+ total_searches = (uword) tm->search_iter * (uword) tm->nitems;
fformat (stdout, "%lld searches in %.6f seconds\n", total_searches, delta);
if (delta > 0)
fformat (stdout, "%.f searches per second\n",
- ((f64)total_searches) / delta);
+ ((f64) total_searches) / delta);
fformat (stdout, "Delete items...\n");
@@ -182,83 +186,83 @@ static clib_error_t * test_bihash (test_main_t * tm)
int rv;
kv.key = tm->keys[i];
- kv.value = (u64)(i+1);
- rv = BV(clib_bihash_add_del) (h, &kv, 0 /* is_add */);
+ kv.value = (u64) (i + 1);
+ rv = BV (clib_bihash_add_del) (h, &kv, 0 /* is_add */ );
if (rv < 0)
- clib_warning ("delete key %lld not ok but should be",
- tm->keys[i]);
+ clib_warning ("delete key %lld not ok but should be", tm->keys[i]);
if (tm->careful_delete_tests)
- {
- for (j = 0; j < tm->nitems; j++)
- {
- kv.key = tm->keys[j];
- rv = BV(clib_bihash_search) (h, &kv, &kv);
- if (j <= i && rv >= 0)
- {
- clib_warning
- ( "i %d j %d search ok but should not be, value %lld",
- i, j, kv.value);
- }
- if (j > i && rv < 0)
- {
- clib_warning ("i %d j %d search not ok but should be",
- i, j);
- }
- }
- }
+ {
+ for (j = 0; j < tm->nitems; j++)
+ {
+ kv.key = tm->keys[j];
+ rv = BV (clib_bihash_search) (h, &kv, &kv);
+ if (j <= i && rv >= 0)
+ {
+ clib_warning
+ ("i %d j %d search ok but should not be, value %lld",
+ i, j, kv.value);
+ }
+ if (j > i && rv < 0)
+ {
+ clib_warning ("i %d j %d search not ok but should be",
+ i, j);
+ }
+ }
+ }
}
fformat (stdout, "After deletions, should be empty...\n");
- fformat (stdout, "%U", BV(format_bihash), h, 0 /* very verbose */);
+ fformat (stdout, "%U", BV (format_bihash), h, 0 /* very verbose */ );
return 0;
}
-clib_error_t *
+clib_error_t *
test_bihash_main (test_main_t * tm)
{
- unformat_input_t * i = tm->input;
- clib_error_t * error;
+ unformat_input_t *i = tm->input;
+ clib_error_t *error;
while (unformat_check_input (i) != UNFORMAT_END_OF_INPUT)
{
if (unformat (i, "seed %u", &tm->seed))
- ;
+ ;
else if (unformat (i, "nbuckets %d", &tm->nbuckets))
- ;
+ ;
else if (unformat (i, "non-random-keys"))
- tm->non_random_keys = 1;
+ tm->non_random_keys = 1;
else if (unformat (i, "nitems %d", &tm->nitems))
- ;
+ ;
else if (unformat (i, "careful %d", &tm->careful_delete_tests))
- ;
+ ;
else if (unformat (i, "verbose %d", &tm->verbose))
- ;
+ ;
else if (unformat (i, "search %d", &tm->search_iter))
- ;
+ ;
else if (unformat (i, "verbose"))
- tm->verbose = 1;
+ tm->verbose = 1;
else
- return clib_error_return (0, "unknown input '%U'",
- format_unformat_error, i);
+ return clib_error_return (0, "unknown input '%U'",
+ format_unformat_error, i);
}
-
+
error = test_bihash (tm);
return error;
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
- clib_error_t * error;
- test_main_t * tm = &test_main;
+ clib_error_t *error;
+ test_main_t *tm = &test_main;
- clib_mem_init (0, 3ULL<<30);
+ clib_mem_init (0, 3ULL << 30);
tm->input = &i;
tm->seed = 0xdeaddabe;
@@ -283,3 +287,11 @@ int main (int argc, char * argv[])
return 0;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_dlist.c b/vppinfra/vppinfra/test_dlist.c
index 0f75281d1b7..c5535c854a6 100644
--- a/vppinfra/vppinfra/test_dlist.c
+++ b/vppinfra/vppinfra/test_dlist.c
@@ -15,21 +15,23 @@
#include <vppinfra/dlist.h>
-typedef struct {
- dlist_elt_t * test_pool;
+typedef struct
+{
+ dlist_elt_t *test_pool;
u32 head_index;
} test_main_t;
test_main_t test_main;
-int test_dlist_main (unformat_input_t * input)
+int
+test_dlist_main (unformat_input_t * input)
{
- test_main_t * tm = &test_main;
- dlist_elt_t * head, * elt;
+ test_main_t *tm = &test_main;
+ dlist_elt_t *head, *elt;
u32 elt_index, head_index;
u32 value;
int i;
-
+
pool_get (tm->test_pool, head);
head_index = head - tm->test_pool;
clib_dlist_init (tm->test_pool, head - tm->test_pool);
@@ -53,8 +55,7 @@ int test_dlist_main (unformat_input_t * input)
while (value != ~0)
{
elt = pool_elt_at_index (tm->test_pool, elt_index);
- fformat(stdout, "elt %d value %d\n",
- i++, elt->value);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
elt_index = elt->next;
value = elt->value;
}
@@ -66,8 +67,7 @@ int test_dlist_main (unformat_input_t * input)
while (value != ~0)
{
elt = pool_elt_at_index (tm->test_pool, elt_index);
- fformat(stdout, "elt %d value %d\n",
- i++, elt->value);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
elt_index = elt->prev;
value = elt->value;
}
@@ -88,8 +88,7 @@ int test_dlist_main (unformat_input_t * input)
while (value != ~0)
{
elt = pool_elt_at_index (tm->test_pool, elt_index);
- fformat(stdout, "elt %d value %d\n",
- i++, elt->value);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
elt_index = elt->next;
value = elt->value;
}
@@ -101,8 +100,7 @@ int test_dlist_main (unformat_input_t * input)
while (value != ~0)
{
elt = pool_elt_at_index (tm->test_pool, elt_index);
- fformat(stdout, "elt %d value %d\n",
- i++, elt->value);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
elt_index = elt->prev;
value = elt->value;
}
@@ -118,8 +116,7 @@ int test_dlist_main (unformat_input_t * input)
while (value != ~0)
{
elt = pool_elt_at_index (tm->test_pool, elt_index);
- fformat(stdout, "elt %d value %d\n",
- i++, elt->value);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
elt_index = elt->next;
value = elt->value;
}
@@ -131,8 +128,7 @@ int test_dlist_main (unformat_input_t * input)
while (value != ~0)
{
elt = pool_elt_at_index (tm->test_pool, elt_index);
- fformat(stdout, "elt %d value %d\n",
- i++, elt->value);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
elt_index = elt->prev;
value = elt->value;
}
@@ -143,7 +139,7 @@ int test_dlist_main (unformat_input_t * input)
elt = pool_elt_at_index (tm->test_pool, 2);
fformat (stdout, "removed index %d value %d\n", elt_index, elt->value);
-
+
fformat (stdout, "Dump forward links\n");
elt_index = head->next;
i = 1;
@@ -151,8 +147,7 @@ int test_dlist_main (unformat_input_t * input)
while (value != ~0)
{
elt = pool_elt_at_index (tm->test_pool, elt_index);
- fformat(stdout, "elt %d value %d\n",
- i++, elt->value);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
elt_index = elt->next;
value = elt->value;
}
@@ -164,8 +159,7 @@ int test_dlist_main (unformat_input_t * input)
while (value != ~0)
{
elt = pool_elt_at_index (tm->test_pool, elt_index);
- fformat(stdout, "elt %d value %d\n",
- i++, elt->value);
+ fformat (stdout, "elt %d value %d\n", i++, elt->value);
elt_index = elt->prev;
value = elt->value;
}
@@ -174,12 +168,13 @@ int test_dlist_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
- clib_mem_init (0, 3ULL<<30);
+ clib_mem_init (0, 3ULL << 30);
unformat_init_command_line (&i, argv);
ret = test_dlist_main (&i);
@@ -188,3 +183,11 @@ int main (int argc, char * argv[])
return ret;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_elf.c b/vppinfra/vppinfra/test_elf.c
index afdb4708a5c..84fe0776c33 100644
--- a/vppinfra/vppinfra/test_elf.c
+++ b/vppinfra/vppinfra/test_elf.c
@@ -45,26 +45,29 @@
#error "unix only"
#endif
-static clib_error_t * elf_set_interpreter (elf_main_t * em, char * interp)
+static clib_error_t *
+elf_set_interpreter (elf_main_t * em, char *interp)
{
- elf_segment_t * g;
- elf_section_t * s;
- clib_error_t * error;
+ elf_segment_t *g;
+ elf_section_t *s;
+ clib_error_t *error;
vec_foreach (g, em->segments)
- {
- if (g->header.type == ELF_SEGMENT_INTERP)
- break;
- }
+ {
+ if (g->header.type == ELF_SEGMENT_INTERP)
+ break;
+ }
if (g >= vec_end (em->segments))
return clib_error_return (0, "interpreter not found");
if (g->header.memory_size < 1 + strlen (interp))
- return clib_error_return (0, "given interpreter does not fit; must be less than %d bytes (`%s' given)",
+ return clib_error_return (0,
+ "given interpreter does not fit; must be less than %d bytes (`%s' given)",
g->header.memory_size, interp);
- error = elf_get_section_by_start_address (em, g->header.virtual_address, &s);
+ error =
+ elf_get_section_by_start_address (em, g->header.virtual_address, &s);
if (error)
return error;
@@ -78,22 +81,22 @@ static clib_error_t * elf_set_interpreter (elf_main_t * em, char * interp)
static void
delete_dynamic_rpath_entries_from_section (elf_main_t * em, elf_section_t * s)
{
- elf64_dynamic_entry_t * e;
- elf64_dynamic_entry_t * new_es = 0;
+ elf64_dynamic_entry_t *e;
+ elf64_dynamic_entry_t *new_es = 0;
vec_foreach (e, em->dynamic_entries)
- {
- switch (e->type)
- {
- case ELF_DYNAMIC_ENTRY_RPATH:
- case ELF_DYNAMIC_ENTRY_RUN_PATH:
- break;
+ {
+ switch (e->type)
+ {
+ case ELF_DYNAMIC_ENTRY_RPATH:
+ case ELF_DYNAMIC_ENTRY_RUN_PATH:
+ break;
- default:
- vec_add1 (new_es, e[0]);
- break;
- }
- }
+ default:
+ vec_add1 (new_es, e[0]);
+ break;
+ }
+ }
/* Pad so as to keep section size constant. */
{
@@ -107,38 +110,41 @@ delete_dynamic_rpath_entries_from_section (elf_main_t * em, elf_section_t * s)
elf_set_dynamic_entries (em);
}
-static void elf_delete_dynamic_rpath_entries (elf_main_t * em)
+static void
+elf_delete_dynamic_rpath_entries (elf_main_t * em)
{
- elf_section_t * s;
+ elf_section_t *s;
vec_foreach (s, em->sections)
- {
- switch (s->header.type)
- {
- case ELF_SECTION_DYNAMIC:
- delete_dynamic_rpath_entries_from_section (em, s);
- break;
+ {
+ switch (s->header.type)
+ {
+ case ELF_SECTION_DYNAMIC:
+ delete_dynamic_rpath_entries_from_section (em, s);
+ break;
- default:
- break;
- }
- }
+ default:
+ break;
+ }
+ }
}
-typedef struct {
+typedef struct
+{
elf_main_t elf_main;
- char * input_file;
- char * output_file;
- char * set_interpreter;
+ char *input_file;
+ char *output_file;
+ char *set_interpreter;
int verbose;
} elf_test_main_t;
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
- elf_test_main_t _tm, * tm = &_tm;
- elf_main_t * em = &tm->elf_main;
+ elf_test_main_t _tm, *tm = &_tm;
+ elf_main_t *em = &tm->elf_main;
unformat_input_t i;
- clib_error_t * error = 0;
+ clib_error_t *error = 0;
memset (tm, 0, sizeof (tm[0]));
@@ -168,7 +174,7 @@ int main (int argc, char * argv[])
if (!tm->input_file)
{
- clib_warning("No input file! Using test_bihash_template");
+ clib_warning ("No input file! Using test_bihash_template");
tm->input_file = "test_bihash_template";
}
@@ -178,7 +184,7 @@ int main (int argc, char * argv[])
if (tm->set_interpreter)
{
- clib_error_t * error = elf_set_interpreter (em, tm->set_interpreter);
+ clib_error_t *error = elf_set_interpreter (em, tm->set_interpreter);
if (error)
goto done;
elf_delete_dynamic_rpath_entries (em);
@@ -192,7 +198,7 @@ int main (int argc, char * argv[])
elf_main_free (em);
- done:
+done:
if (error)
{
clib_error_report (error);
@@ -201,3 +207,11 @@ int main (int argc, char * argv[])
else
return 0;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_elog.c b/vppinfra/vppinfra/test_elog.c
index bb82c275354..89905adb4be 100644
--- a/vppinfra/vppinfra/test_elog.c
+++ b/vppinfra/vppinfra/test_elog.c
@@ -42,15 +42,16 @@
#include <vppinfra/serialize.h>
#include <vppinfra/unix.h>
-int test_elog_main (unformat_input_t * input)
+int
+test_elog_main (unformat_input_t * input)
{
- clib_error_t * error = 0;
+ clib_error_t *error = 0;
u32 i, n_iter, seed, max_events;
- elog_main_t _em, * em = &_em;
+ elog_main_t _em, *em = &_em;
u32 verbose;
f64 min_sample_time;
- char * dump_file, * load_file, * merge_file, ** merge_files;
- u8 * tag, ** tags;
+ char *dump_file, *load_file, *merge_file, **merge_files;
+ u8 *tag, **tags;
n_iter = 100;
max_events = 100000;
@@ -72,7 +73,7 @@ int test_elog_main (unformat_input_t * input)
else if (unformat (input, "load %s", &load_file))
;
else if (unformat (input, "tag %s", &tag))
- vec_add1 (tags, tag);
+ vec_add1 (tags, tag);
else if (unformat (input, "merge %s", &merge_file))
vec_add1 (merge_files, merge_file);
@@ -100,20 +101,21 @@ int test_elog_main (unformat_input_t * input)
else if (merge_files)
{
uword i;
- elog_main_t * ems;
+ elog_main_t *ems;
vec_clone (ems, merge_files);
elog_init (em, max_events);
for (i = 0; i < vec_len (ems); i++)
{
- if ((error = elog_read_file (i == 0 ? em : &ems[i], merge_files[i])))
+ if ((error =
+ elog_read_file (i == 0 ? em : &ems[i], merge_files[i])))
goto done;
if (i > 0)
- {
- elog_merge (em, tags[0], &ems[i], tags[i]);
- tags[0] = 0;
- }
+ {
+ elog_merge (em, tags[0], &ems[i], tags[i]);
+ tags[0] = 0;
+ }
}
}
@@ -146,32 +148,30 @@ int test_elog_main (unformat_input_t * input)
}
{
- struct { u32 string_index; f32 f; } * d;
- ELOG_TYPE_DECLARE (e) = {
- .format = "fumble %s %.9f",
- .format_args = "t4f4",
- .n_enum_strings = 4,
- .enum_strings = {
- "string0",
- "string1",
- "string2",
- "string3",
- },
- };
+ struct
+ {
+ u32 string_index;
+ f32 f;
+ } *d;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "fumble %s %.9f",.format_args =
+ "t4f4",.n_enum_strings = 4,.enum_strings =
+ {
+ "string0", "string1", "string2", "string3",},};
d = ELOG_DATA (em, e);
d->string_index = sum & 3;
d->f = (sum & 0xff) / 128.;
}
-
+
{
- ELOG_TYPE_DECLARE (e) = {
- .format = "bar %d.%d.%d.%d",
- .format_args = "i1i1i1i1",
- };
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "bar %d.%d.%d.%d",.format_args = "i1i1i1i1",};
ELOG_TRACK (my_track);
- u8 * d = ELOG_TRACK_DATA (em, e, my_track);
+ u8 *d = ELOG_TRACK_DATA (em, e, my_track);
d[0] = i + 0;
d[1] = i + 1;
d[2] = i + 2;
@@ -179,12 +179,14 @@ int test_elog_main (unformat_input_t * input)
}
{
- ELOG_TYPE_DECLARE (e) = {
- .format = "bar `%s'",
- .format_args = "s20",
- };
- struct { char s[20]; } * d;
- u8 * v;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "bar `%s'",.format_args = "s20",};
+ struct
+ {
+ char s[20];
+ } *d;
+ u8 *v;
d = ELOG_DATA (em, e);
v = format (0, "foo %d%c", i, 0);
@@ -192,20 +194,24 @@ int test_elog_main (unformat_input_t * input)
}
{
- ELOG_TYPE_DECLARE (e) = {
- .format = "bar `%s'",
- .format_args = "T4",
- };
- struct { u32 offset; } * d;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "bar `%s'",.format_args = "T4",};
+ struct
+ {
+ u32 offset;
+ } *d;
d = ELOG_DATA (em, e);
d->offset = elog_string (em, "string table %d", i);
}
}
- do {
- t[1] = unix_time_now ();
- } while (t[1] - t[0] < min_sample_time);
+ do
+ {
+ t[1] = unix_time_now ();
+ }
+ while (t[1] - t[0] < min_sample_time);
}
#ifdef CLIB_UNIX
@@ -218,24 +224,24 @@ int test_elog_main (unformat_input_t * input)
if (verbose)
{
- elog_event_t * e, * es;
+ elog_event_t *e, *es;
es = elog_get_events (em);
vec_foreach (e, es)
- {
- clib_warning ("%18.9f: %12U %U\n", e->time,
- format_elog_track, em, e,
- format_elog_event, em, e);
- }
+ {
+ clib_warning ("%18.9f: %12U %U\n", e->time,
+ format_elog_track, em, e, format_elog_event, em, e);
+ }
}
- done:
+done:
if (error)
clib_error_report (error);
return 0;
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv [])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int r;
@@ -246,3 +252,11 @@ int main (int argc, char * argv [])
return r;
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_fifo.c b/vppinfra/vppinfra/test_fifo.c
index 79155d23693..45392bc35eb 100644
--- a/vppinfra/vppinfra/test_fifo.c
+++ b/vppinfra/vppinfra/test_fifo.c
@@ -38,26 +38,30 @@
#include <vppinfra/fifo.h>
#include <vppinfra/random.h>
-typedef struct {
+typedef struct
+{
int a, b, c;
} A;
always_inline void
A_set (A * a, int k)
{
- a->a = 1*k;
- a->b = 2*k;
- a->c = 3*k;
+ a->a = 1 * k;
+ a->b = 2 * k;
+ a->c = 3 * k;
}
always_inline int
A_is_valid (A * a, int k)
-{ return a->a == 1*k && a->b == 2*k && a->c == 3*k; }
+{
+ return a->a == 1 * k && a->b == 2 * k && a->c == 3 * k;
+}
-int test_fifo_main (unformat_input_t * input)
+int
+test_fifo_main (unformat_input_t * input)
{
u32 n_added, n_removed, i, j, n_iter, seed, verbose;
- A * as = 0, * a;
+ A *as = 0, *a;
n_iter = 1000;
seed = random_default_seed ();
@@ -73,8 +77,7 @@ int test_fifo_main (unformat_input_t * input)
;
else
{
- clib_warning ("unknown input `%U'\n",
- format_unformat_error, input);
+ clib_warning ("unknown input `%U'\n", format_unformat_error, input);
return 1;
}
}
@@ -102,10 +105,12 @@ int test_fifo_main (unformat_input_t * input)
ASSERT (clib_fifo_elts (as) == n_added - n_removed);
j = 0;
+ /* *INDENT-OFF* */
clib_fifo_foreach (a, as, {
ASSERT (A_is_valid (a, n_removed + j));
j++;
});
+ /* *INDENT-ON* */
ASSERT (j == clib_fifo_elts (as));
}
@@ -116,7 +121,8 @@ int test_fifo_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv [])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int r;
@@ -128,3 +134,11 @@ int main (int argc, char * argv [])
return r;
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_format.c b/vppinfra/vppinfra/test_format.c
index bee7c0e5ed6..cc95a00ef48 100644
--- a/vppinfra/vppinfra/test_format.c
+++ b/vppinfra/vppinfra/test_format.c
@@ -38,16 +38,18 @@
#include <vppinfra/format.h>
static int verbose;
-static u8 * test_vec;
+static u8 *test_vec;
-static u8 * format_test1 (u8 * s, va_list * va)
+static u8 *
+format_test1 (u8 * s, va_list * va)
{
uword x = va_arg (*va, uword);
f64 y = va_arg (*va, f64);
return format (s, "%12d %12f%12.4e", x, y, y);
}
-static int expectation (const char * exp, char * fmt, ...)
+static int
+expectation (const char *exp, char *fmt, ...)
{
int ret = 0;
@@ -56,8 +58,8 @@ static int expectation (const char * exp, char * fmt, ...)
test_vec = va_format (test_vec, fmt, &va);
va_end (va);
- vec_add1(test_vec, 0);
- if (strcmp(exp, (char *) test_vec))
+ vec_add1 (test_vec, 0);
+ if (strcmp (exp, (char *) test_vec))
{
fformat (stdout, "FAIL: %s (expected vs. result)\n\"%s\"\n\"%v\"\n",
fmt, exp, test_vec);
@@ -65,19 +67,20 @@ static int expectation (const char * exp, char * fmt, ...)
}
else if (verbose)
fformat (stdout, "PASS: %s\n", fmt);
- vec_delete (test_vec, vec_len(test_vec), 0);
+ vec_delete (test_vec, vec_len (test_vec), 0);
return ret;
}
-int test_format_main (unformat_input_t * input)
+int
+test_format_main (unformat_input_t * input)
{
int ret = 0;
- u8 * food = format (0, "food");
+ u8 *food = format (0, "food");
ret |= expectation ("foo", "foo");
ret |= expectation ("foo", "%s", "foo");
ret |= expectation ("9876", "%d", 9876);
- ret |= expectation ("-9876", "%wd", (word) -9876);
+ ret |= expectation ("-9876", "%wd", (word) - 9876);
ret |= expectation ("98765432", "%u", 98765432);
ret |= expectation ("1200ffee", "%x", 0x1200ffee);
ret |= expectation ("BABEBABE", "%X", 0xbabebabe);
@@ -102,32 +105,35 @@ int test_format_main (unformat_input_t * input)
return ret;
}
-typedef struct {
+typedef struct
+{
int a, b;
} foo_t;
-static u8 * format_foo (u8 * s, va_list * va)
+static u8 *
+format_foo (u8 * s, va_list * va)
{
- foo_t * foo = va_arg (*va, foo_t *);
+ foo_t *foo = va_arg (*va, foo_t *);
return format (s, "{a %d, b %d}", foo->a, foo->b);
}
-static uword unformat_foo (unformat_input_t * i, va_list * va)
+static uword
+unformat_foo (unformat_input_t * i, va_list * va)
{
- foo_t * foo = va_arg (*va, foo_t *);
+ foo_t *foo = va_arg (*va, foo_t *);
return unformat (i, "{%D,%D}",
- sizeof (foo->a), &foo->a,
- sizeof (foo->b), &foo->b);
+ sizeof (foo->a), &foo->a, sizeof (foo->b), &foo->b);
}
-int test_unformat_main (unformat_input_t * input)
+int
+test_unformat_main (unformat_input_t * input)
{
u32 v[8];
long l;
long long ll;
f64 f;
- u8 * s;
- foo_t foo = {.a = ~0, .b = ~0};
+ u8 *s;
+ foo_t foo = {.a = ~0,.b = ~0 };
v[0] = v[1] = 0;
@@ -157,12 +163,10 @@ int test_unformat_main (unformat_input_t * input)
&v[0], &v[1], &v[2], &v[3],
&v[4], &v[5], &v[6], &v[7]))
fformat (stdout, "got %d.%d.%d.%d -> %d.%d.%d.%d",
- v[0], v[1], v[2], v[3],
- v[4], v[5], v[6], v[7]);
+ v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);
else
{
- clib_warning ("unknown input `%U'\n",
- format_unformat_error, input);
+ clib_warning ("unknown input `%U'\n", format_unformat_error, input);
return 1;
}
}
@@ -171,7 +175,8 @@ int test_unformat_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv [])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
@@ -184,3 +189,11 @@ int main (int argc, char * argv [])
return test_format_main (&i);
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_hash.c b/vppinfra/vppinfra/test_hash.c
index 52d0f73bd7c..94110ab68ad 100644
--- a/vppinfra/vppinfra/test_hash.c
+++ b/vppinfra/vppinfra/test_hash.c
@@ -57,7 +57,8 @@ static int verbose;
#define if_verbose(format,args...) \
if (verbose) { clib_warning(format, ## args); }
-typedef struct {
+typedef struct
+{
int n_iterations;
int n_iterations_per_print;
@@ -78,46 +79,51 @@ typedef struct {
u32 seed;
} hash_test_t;
-static clib_error_t * hash_next_test (word * h)
+static clib_error_t *
+hash_next_test (word * h)
{
- hash_next_t hn = {0};
- hash_pair_t * p0, * p1;
- clib_error_t * error = 0;
+ hash_next_t hn = { 0 };
+ hash_pair_t *p0, *p1;
+ clib_error_t *error = 0;
+ /* *INDENT-OFF* */
hash_foreach_pair (p0, h, {
p1 = hash_next (h, &hn);
error = CLIB_ERROR_ASSERT (p0 == p1);
if (error)
break;
});
+ /* *INDENT-ON* */
- if (! error)
- error = CLIB_ERROR_ASSERT (! hash_next (h, &hn));
+ if (!error)
+ error = CLIB_ERROR_ASSERT (!hash_next (h, &hn));
return error;
}
-static u8 * test1_format (u8 * s, va_list * args)
+static u8 *
+test1_format (u8 * s, va_list * args)
{
- void * CLIB_UNUSED (user_arg) = va_arg (*args, void *);
- void * v = va_arg (*args, void *);
- hash_pair_t * p = va_arg (*args, hash_pair_t *);
- hash_t * h = hash_header (v);
+ void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
+ void *v = va_arg (*args, void *);
+ hash_pair_t *p = va_arg (*args, hash_pair_t *);
+ hash_t *h = hash_header (v);
return format (s, "0x%8U -> 0x%8U",
format_hex_bytes, &p->key, sizeof (p->key),
format_hex_bytes, &p->value[0], hash_value_bytes (h));
}
-static clib_error_t * test_word_key (hash_test_t * ht)
+static clib_error_t *
+test_word_key (hash_test_t * ht)
{
- word * h = 0;
+ word *h = 0;
word i, j;
- word * keys = 0, * vals = 0;
- uword * is_inserted = 0;
+ word *keys = 0, *vals = 0;
+ uword *is_inserted = 0;
- clib_error_t * error = 0;
+ clib_error_t *error = 0;
vec_resize (keys, ht->n_pairs);
vec_resize (vals, vec_len (keys));
@@ -129,14 +135,16 @@ static clib_error_t * test_word_key (hash_test_t * ht)
hash_set_flags (h, HASH_FLAG_NO_AUTO_GROW | HASH_FLAG_NO_AUTO_SHRINK);
{
- uword * unique = 0;
+ uword *unique = 0;
u32 k;
for (i = 0; i < vec_len (keys); i++)
{
- do {
- k = random_u32 (&ht->seed) & 0xfffff;
- } while (clib_bitmap_get (unique, k));
+ do
+ {
+ k = random_u32 (&ht->seed) & 0xfffff;
+ }
+ while (clib_bitmap_get (unique, k));
unique = clib_bitmap_ori (unique, k);
keys[i] = k;
vals[i] = i;
@@ -158,20 +166,22 @@ static clib_error_t * test_word_key (hash_test_t * ht)
if (ht->n_iterations_per_print > 0
&& ((i + 1) % ht->n_iterations_per_print) == 0)
- if_verbose ("iteration %d\n %U", i + 1, format_hash, h, ht->verbose);
+ if_verbose ("iteration %d\n %U", i + 1, format_hash, h, ht->verbose);
if (ht->n_iterations_per_validate == 0
|| (i + 1) % ht->n_iterations_per_validate)
continue;
{
- hash_pair_t * p;
- uword ki;
+ hash_pair_t *p;
+ uword ki;
+ /* *INDENT-OFF* */
hash_foreach_pair (p, h, {
ki = p->value[0];
ASSERT (keys[ki] == p->key);
});
+ /* *INDENT-ON* */
}
clib_mem_validate ();
@@ -181,25 +191,28 @@ static clib_error_t * test_word_key (hash_test_t * ht)
for (j = 0; j < vec_len (keys); j++)
{
- uword * v;
+ uword *v;
v = hash_get (h, keys[j]);
- if ((error = CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) == (v != 0))))
+ if ((error =
+ CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) ==
+ (v != 0))))
goto done;
- if (v) {
- if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
- goto done;
- }
+ if (v)
+ {
+ if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
+ goto done;
+ }
}
}
if ((error = hash_next_test (h)))
goto done;
- if_verbose ("%U", format_hash, h, ht->verbose);
+ if_verbose ("%U", format_hash, h, ht->verbose);
for (i = 0; i < vec_len (keys); i++)
{
- if (! clib_bitmap_get (is_inserted, i))
+ if (!clib_bitmap_get (is_inserted, i))
continue;
hash_unset (h, keys[i]);
@@ -216,64 +229,71 @@ static clib_error_t * test_word_key (hash_test_t * ht)
for (j = 0; j < vec_len (keys); j++)
{
- uword * v;
+ uword *v;
v = hash_get (h, keys[j]);
- if ((error = CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) == (v != 0))))
+ if ((error =
+ CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) ==
+ (v != 0))))
goto done;
- if (v) {
- if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
- goto done;
- }
+ if (v)
+ {
+ if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
+ goto done;
+ }
}
}
- done:
+done:
hash_free (h);
vec_free (keys);
vec_free (vals);
clib_bitmap_free (is_inserted);
- if (verbose) fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
+ if (verbose)
+ fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
return error;
}
-static u8 * test2_format (u8 * s, va_list * args)
+static u8 *
+test2_format (u8 * s, va_list * args)
{
- void * CLIB_UNUSED (user_arg) = va_arg (*args, void *);
- void * v = va_arg (*args, void *);
- hash_pair_t * p = va_arg (*args, hash_pair_t *);
- hash_t * h = hash_header (v);
+ void *CLIB_UNUSED (user_arg) = va_arg (*args, void *);
+ void *v = va_arg (*args, void *);
+ hash_pair_t *p = va_arg (*args, hash_pair_t *);
+ hash_t *h = hash_header (v);
return format (s, "0x%8U <- %v",
format_hex_bytes, &p->value[0], hash_value_bytes (h),
p->key);
}
-static clib_error_t * test_string_key (hash_test_t * ht)
+static clib_error_t *
+test_string_key (hash_test_t * ht)
{
word i, j;
- u8 ** keys = 0;
- word * vals = 0;
- uword * is_inserted = 0;
+ u8 **keys = 0;
+ word *vals = 0;
+ uword *is_inserted = 0;
- word * h = 0;
+ word *h = 0;
- clib_error_t * error = 0;
+ clib_error_t *error = 0;
vec_resize (keys, ht->n_pairs);
vec_resize (vals, vec_len (keys));
- h = hash_create_vec (ht->fixed_hash_size, sizeof (keys[0][0]), sizeof (uword));
+ h =
+ hash_create_vec (ht->fixed_hash_size, sizeof (keys[0][0]),
+ sizeof (uword));
hash_set_pair_format (h, test2_format, 0);
if (ht->fixed_hash_size)
hash_set_flags (h, HASH_FLAG_NO_AUTO_SHRINK | HASH_FLAG_NO_AUTO_GROW);
for (i = 0; i < vec_len (keys); i++)
{
- keys[i] = random_string (&ht->seed,
- 5 + (random_u32 (&ht->seed) & 0xf));
+ keys[i] = random_string (&ht->seed, 5 + (random_u32 (&ht->seed) & 0xf));
keys[i] = format (keys[i], "%x", i);
vals[i] = random_u32 (&ht->seed);
}
@@ -291,7 +311,7 @@ static clib_error_t * test_string_key (hash_test_t * ht)
if (ht->n_iterations_per_print > 0
&& ((i + 1) % ht->n_iterations_per_print) == 0)
- if_verbose ("iteration %d\n %U", i + 1, format_hash, h, ht->verbose);
+ if_verbose ("iteration %d\n %U", i + 1, format_hash, h, ht->verbose);
if (ht->n_iterations_per_validate == 0
|| (i + 1) % ht->n_iterations_per_validate)
@@ -304,25 +324,28 @@ static clib_error_t * test_string_key (hash_test_t * ht)
for (j = 0; j < vec_len (keys); j++)
{
- uword * v;
+ uword *v;
v = hash_get_mem (h, keys[j]);
- if ((error = CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) == (v != 0))))
+ if ((error =
+ CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) ==
+ (v != 0))))
goto done;
- if (v) {
- if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
- goto done;
- }
+ if (v)
+ {
+ if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
+ goto done;
+ }
}
}
if ((error = hash_next_test (h)))
goto done;
- if_verbose ("%U", format_hash, h, ht->verbose);
+ if_verbose ("%U", format_hash, h, ht->verbose);
for (i = 0; i < vec_len (keys); i++)
{
- if (! clib_bitmap_get (is_inserted, i))
+ if (!clib_bitmap_get (is_inserted, i))
continue;
hash_unset_mem (h, keys[i]);
@@ -339,18 +362,21 @@ static clib_error_t * test_string_key (hash_test_t * ht)
for (j = 0; j < vec_len (keys); j++)
{
- uword * v;
+ uword *v;
v = hash_get_mem (h, keys[j]);
- if ((error = CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) == (v != 0))))
+ if ((error =
+ CLIB_ERROR_ASSERT (clib_bitmap_get (is_inserted, j) ==
+ (v != 0))))
goto done;
- if (v) {
- if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
- goto done;
- }
+ if (v)
+ {
+ if ((error = CLIB_ERROR_ASSERT (v[0] == vals[j])))
+ goto done;
+ }
}
}
- done:
+done:
hash_free (h);
vec_free (vals);
clib_bitmap_free (is_inserted);
@@ -358,16 +384,18 @@ static clib_error_t * test_string_key (hash_test_t * ht)
for (i = 0; i < vec_len (keys); i++)
vec_free (keys[i]);
vec_free (keys);
-
- if (verbose) fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
+
+ if (verbose)
+ fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
return error;
}
-int test_hash_main (unformat_input_t * input)
+int
+test_hash_main (unformat_input_t * input)
{
- hash_test_t _ht = {0}, * ht = &_ht;
- clib_error_t * error;
+ hash_test_t _ht = { 0 }, *ht = &_ht;
+ clib_error_t *error;
ht->n_iterations = 100;
ht->n_pairs = 10;
@@ -381,18 +409,18 @@ int test_hash_main (unformat_input_t * input)
&& 0 == unformat (input, "size %d", &ht->fixed_hash_size)
&& 0 == unformat (input, "seed %d", &ht->seed)
&& 0 == unformat (input, "verbose %=", &ht->verbose, 1)
- && 0 == unformat (input, "valid %d", &ht->n_iterations_per_validate))
+ && 0 == unformat (input, "valid %d",
+ &ht->n_iterations_per_validate))
{
clib_warning ("unknown input `%U'", format_unformat_error, input);
return 1;
}
}
- if (! ht->seed)
+ if (!ht->seed)
ht->seed = random_default_seed ();
- if_verbose ("testing %d iterations, seed %d",
- ht->n_iterations, ht->seed);
+ if_verbose ("testing %d iterations, seed %d", ht->n_iterations, ht->seed);
error = test_word_key (ht);
if (error)
@@ -406,7 +434,8 @@ int test_hash_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
@@ -419,3 +448,11 @@ int main (int argc, char * argv[])
return ret;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_heap.c b/vppinfra/vppinfra/test_heap.c
index 79203880499..3d5171bf053 100644
--- a/vppinfra/vppinfra/test_heap.c
+++ b/vppinfra/vppinfra/test_heap.c
@@ -46,13 +46,14 @@ static int verbose;
#define if_verbose(format,args...) \
if (verbose) { clib_warning(format, ## args); }
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
word i, j, k, n, check_mask;
u32 seed;
- u32 * h = 0;
- uword * objects = 0;
- uword * handles = 0;
+ u32 *h = 0;
+ uword *objects = 0;
+ uword *handles = 0;
uword objects_used;
uword align, fixed_size;
@@ -79,12 +80,13 @@ int main (int argc, char * argv[])
if (argc > 4)
align = 1 << atoi (argv[4]);
- if_verbose ("testing %wd iterations seed %wd\n", n, seed);
+ if_verbose ("testing %wd iterations seed %wd\n", n, seed);
- if (verbose) fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
+ if (verbose)
+ fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
vec_resize (objects, 1000);
- if (vec_bytes(objects)) /* stupid warning be gone */
+ if (vec_bytes (objects)) /* stupid warning be gone */
memset (objects, ~0, vec_bytes (objects));
vec_resize (handles, vec_len (objects));
@@ -93,7 +95,7 @@ int main (int argc, char * argv[])
if (fixed_size)
{
uword max_len = 1024 * 1024;
- void * memory = clib_mem_alloc (max_len * sizeof (h[0]));
+ void *memory = clib_mem_alloc (max_len * sizeof (h[0]));
h = heap_create_from_memory (memory, max_len, sizeof (h[0]));
}
@@ -114,7 +116,7 @@ int main (int argc, char * argv[])
}
else
{
- u32 * data;
+ u32 *data;
uword size;
size = 1 + (random_u32 (&seed) % 100);
@@ -142,7 +144,7 @@ int main (int argc, char * argv[])
if (check_mask & 4)
{
/* Duplicate heap at each iteration. */
- u32 * h1 = heap_dup (h);
+ u32 *h1 = heap_dup (h);
heap_free (h);
h = h1;
}
@@ -153,23 +155,26 @@ int main (int argc, char * argv[])
for (j = 0; j < vec_len (objects); j++)
if (objects[j] != ~0)
{
- u32 * data = h + objects[j];
+ u32 *data = h + objects[j];
for (k = 0; k < heap_len (h, handles[j]); k++)
- ASSERT(data[k] == objects[j] + k);
+ ASSERT (data[k] == objects[j] + k);
}
}
}
- if (verbose) fformat (stderr, "%U\n", format_heap, h, 1);
+ if (verbose)
+ fformat (stderr, "%U\n", format_heap, h, 1);
{
- u32 * h1 = heap_dup (h);
- if (verbose) fformat (stderr, "%U\n", format_heap, h1, 1);
+ u32 *h1 = heap_dup (h);
+ if (verbose)
+ fformat (stderr, "%U\n", format_heap, h1, 1);
heap_free (h1);
}
heap_free (h);
- if (verbose) fformat (stderr, "%U\n", format_heap, h, 1);
+ if (verbose)
+ fformat (stderr, "%U\n", format_heap, h, 1);
ASSERT (objects_used == 0);
vec_free (objects);
@@ -178,7 +183,16 @@ int main (int argc, char * argv[])
if (fixed_size)
vec_free_h (h, sizeof (heap_header_t));
- if (verbose) fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
+ if (verbose)
+ fformat (stderr, "%U\n", format_clib_mem_usage, /* verbose */ 0);
return 0;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_longjmp.c b/vppinfra/vppinfra/test_longjmp.c
index 299de258c2a..2415c4f061c 100644
--- a/vppinfra/vppinfra/test_longjmp.c
+++ b/vppinfra/vppinfra/test_longjmp.c
@@ -35,7 +35,7 @@
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <vppinfra/clib.h>
+#include <vppinfra/clib.h>
#include <vppinfra/longjmp.h>
#include <vppinfra/format.h>
@@ -47,19 +47,22 @@ static int verbose;
#define if_verbose(format,args...) \
if (verbose) { clib_warning(format, ## args); }
-static never_inline void f2 (clib_longjmp_t * env)
+static never_inline void
+f2 (clib_longjmp_t * env)
{
i++;
clib_longjmp (env, 1);
}
-static never_inline void f1 (clib_longjmp_t * env)
+static never_inline void
+f1 (clib_longjmp_t * env)
{
i++;
f2 (env);
}
-int test_longjmp_main (unformat_input_t * input)
+int
+test_longjmp_main (unformat_input_t * input)
{
clib_longjmp_t env;
@@ -76,7 +79,8 @@ int test_longjmp_main (unformat_input_t * input)
return 0;
}
-static uword f3 (uword arg)
+static uword
+f3 (uword arg)
{
uword i, j, array[10];
@@ -90,9 +94,10 @@ static uword f3 (uword arg)
return j;
}
-static void test_calljmp (unformat_input_t * input)
+static void
+test_calljmp (unformat_input_t * input)
{
- static u8 stack[32*1024] __attribute__((aligned(16)));
+ static u8 stack[32 * 1024] __attribute__ ((aligned (16)));
uword v;
v = clib_calljmp (f3, 0, stack + sizeof (stack));
@@ -101,7 +106,8 @@ static void test_calljmp (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv [])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int res;
@@ -113,3 +119,11 @@ int main (int argc, char * argv [])
return res;
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_macros.c b/vppinfra/vppinfra/test_macros.c
index 038ad687673..de8f2c49fc1 100644
--- a/vppinfra/vppinfra/test_macros.c
+++ b/vppinfra/vppinfra/test_macros.c
@@ -18,20 +18,21 @@
macro_main_t macro_main;
-int test_macros_main (unformat_input_t * input)
+int
+test_macros_main (unformat_input_t * input)
{
- macro_main_t * mm = &macro_main;
+ macro_main_t *mm = &macro_main;
- clib_macro_init(mm);
+ clib_macro_init (mm);
- fformat (stdout, "hostname: %s\n",
- clib_macro_eval_dollar (mm, "hostname", 1 /* complain */));
+ fformat (stdout, "hostname: %s\n",
+ clib_macro_eval_dollar (mm, "hostname", 1 /* complain */ ));
clib_macro_set_value (mm, "foo", "this is foo which contains $(bar)");
clib_macro_set_value (mm, "bar", "bar");
fformat (stdout, "evaluate: %s\n",
- clib_macro_eval (mm, "returns '$(foo)'", 1 /* complain */));
+ clib_macro_eval (mm, "returns '$(foo)'", 1 /* complain */ ));
clib_macro_free (mm);
@@ -39,7 +40,8 @@ int test_macros_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
@@ -52,3 +54,11 @@ int main (int argc, char * argv[])
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_md5.c b/vppinfra/vppinfra/test_md5.c
index 9eb2d0fa811..4be6f964963 100644
--- a/vppinfra/vppinfra/test_md5.c
+++ b/vppinfra/vppinfra/test_md5.c
@@ -43,15 +43,16 @@
#include <fcntl.h>
#include <unistd.h>
-static clib_error_t * md5_test_suite (void);
+static clib_error_t *md5_test_suite (void);
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
int i;
if (argc == 1)
{
- clib_error_t * e;
+ clib_error_t *e;
e = md5_test_suite ();
if (e)
{
@@ -64,7 +65,7 @@ int main (int argc, char * argv[])
{
md5_context_t m;
u8 digest[16];
- u8 buffer[64*1024];
+ u8 buffer[64 * 1024];
int fd, n;
fd = open (argv[i], 0);
@@ -77,8 +78,7 @@ int main (int argc, char * argv[])
close (fd);
md5_finish (&m, digest);
fformat (stdout, "%U %s\n",
- format_hex_bytes, digest, sizeof (digest),
- argv[i]);
+ format_hex_bytes, digest, sizeof (digest), argv[i]);
}
return 0;
@@ -87,30 +87,33 @@ int main (int argc, char * argv[])
static clib_error_t *
md5_test_suite (void)
{
- typedef struct {
- char * input;
- char * output;
+ typedef struct
+ {
+ char *input;
+ char *output;
} md5_test_t;
static md5_test_t tests[] = {
- { .input = "",
- .output = "d41d8cd98f00b204e9800998ecf8427e", },
- { .input = "a",
- .output = "0cc175b9c0f1b6a831c399e269772661", },
- { .input = "abc",
- .output = "900150983cd24fb0d6963f7d28e17f72", },
- { .input = "message digest",
- .output = "f96b697d7cb7938d525a2f31aaf161d0", },
- { .input = "abcdefghijklmnopqrstuvwxyz",
- .output = "c3fcd3d76192e4007dfb496cca67e13b", },
- { .input = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
- .output = "d174ab98d277d9f5a5611c2c9f419d9f", },
- { .input = "12345678901234567890123456789012345678901234567890123456789012345678901234567890",
- .output = "57edf4a22be3c955ac49da2e2107b67a", },
+ {.input = "",
+ .output = "d41d8cd98f00b204e9800998ecf8427e",},
+ {.input = "a",
+ .output = "0cc175b9c0f1b6a831c399e269772661",},
+ {.input = "abc",
+ .output = "900150983cd24fb0d6963f7d28e17f72",},
+ {.input = "message digest",
+ .output = "f96b697d7cb7938d525a2f31aaf161d0",},
+ {.input = "abcdefghijklmnopqrstuvwxyz",
+ .output = "c3fcd3d76192e4007dfb496cca67e13b",},
+ {.input =
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
+ .output = "d174ab98d277d9f5a5611c2c9f419d9f",},
+ {.input =
+ "12345678901234567890123456789012345678901234567890123456789012345678901234567890",
+ .output = "57edf4a22be3c955ac49da2e2107b67a",},
};
int i;
- u8 * s;
+ u8 *s;
md5_context_t m;
u8 digest[16];
@@ -122,10 +125,17 @@ md5_test_suite (void)
s = format (0, "%U", format_hex_bytes, digest, sizeof (digest));
if (memcmp (s, tests[i].output, 2 * sizeof (digest)))
return clib_error_return
- (0,
- "%s -> %v expected %s", tests[i].input, s, tests[i].output);
+ (0, "%s -> %v expected %s", tests[i].input, s, tests[i].output);
vec_free (s);
}
return 0;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_mheap.c b/vppinfra/vppinfra/test_mheap.c
index 624294d3cf9..6bc36b89ac3 100644
--- a/vppinfra/vppinfra/test_mheap.c
+++ b/vppinfra/vppinfra/test_mheap.c
@@ -42,7 +42,7 @@
#ifdef CLIB_UNIX
#include <unistd.h>
#include <stdlib.h>
-#include <stdio.h> /* scanf */
+#include <stdio.h> /* scanf */
#endif
#include <vppinfra/mheap.h>
@@ -53,16 +53,17 @@ static int verbose = 0;
#define if_verbose(format,args...) \
if (verbose) { clib_warning(format, ## args); }
-int test_mheap_main (unformat_input_t * input)
+int
+test_mheap_main (unformat_input_t * input)
{
int i, j, k, n_iterations;
- void * h, * h_mem;
- uword * objects = 0;
+ void *h, *h_mem;
+ uword *objects = 0;
u32 objects_used, really_verbose, n_objects, max_object_size;
u32 check_mask, seed, trace, use_vm;
u32 print_every = 0;
- u32 * data;
- mheap_t * mh;
+ u32 *data;
+ mheap_t *mh;
/* Validation flags. */
check_mask = 0;
@@ -100,27 +101,26 @@ int test_mheap_main (unformat_input_t * input)
}
/* Zero seed means use default. */
- if (! seed)
+ if (!seed)
seed = random_default_seed ();
- if_verbose ("testing %d iterations, %d %saligned objects, max. size %d, seed %d",
- n_iterations,
- n_objects,
- (check_mask & CHECK_ALIGN) ? "randomly " : "un",
- max_object_size,
- seed);
+ if_verbose
+ ("testing %d iterations, %d %saligned objects, max. size %d, seed %d",
+ n_iterations, n_objects, (check_mask & CHECK_ALIGN) ? "randomly " : "un",
+ max_object_size, seed);
vec_resize (objects, n_objects);
- if (vec_bytes(objects)) /* stupid warning be gone */
- memset (objects, ~0, vec_bytes (objects));
+ if (vec_bytes (objects)) /* stupid warning be gone */
+ memset (objects, ~0, vec_bytes (objects));
objects_used = 0;
/* Allocate initial heap. */
{
- uword size = max_pow2 (2 * n_objects * max_object_size * sizeof (data[0]));
+ uword size =
+ max_pow2 (2 * n_objects * max_object_size * sizeof (data[0]));
h_mem = clib_mem_alloc (size);
- if (! h_mem)
+ if (!h_mem)
return 0;
h = mheap_alloc (h_mem, size);
@@ -166,7 +166,7 @@ int test_mheap_main (unformat_input_t * input)
align_offset = round_pow2 (random_u32 (&seed) & (align - 1),
sizeof (u32));
}
-
+
h = mheap_get_aligned (h, size, align, align_offset, &objects[j]);
if (align > 0)
@@ -180,7 +180,7 @@ int test_mheap_main (unformat_input_t * input)
{
uword len;
- data = (void *) h + objects[j];
+ data = (void *) h + objects[j];
len = mheap_len (h, data);
ASSERT (size <= mheap_data_bytes (h, objects[j]));
@@ -197,14 +197,15 @@ int test_mheap_main (unformat_input_t * input)
for (j = 0; j < vec_len (objects); j++)
if (objects[j] != ~0)
{
- u32 * data = h + objects[j];
+ u32 *data = h + objects[j];
uword len = data[0];
for (k = 1; k < len; k++)
ASSERT (data[k] == objects[j] + k);
}
}
if (print_every != 0 && i > 0 && (i % print_every) == 0)
- fformat (stderr, "iteration %d: %U\n", i, format_mheap, h, really_verbose);
+ fformat (stderr, "iteration %d: %U\n", i, format_mheap, h,
+ really_verbose);
}
if (verbose)
@@ -217,7 +218,8 @@ int test_mheap_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
@@ -230,3 +232,11 @@ int main (int argc, char * argv[])
return ret;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_pfhash.c b/vppinfra/vppinfra/test_pfhash.c
index 3b2f290ef83..ddbdbb34be5 100644
--- a/vppinfra/vppinfra/test_pfhash.c
+++ b/vppinfra/vppinfra/test_pfhash.c
@@ -25,8 +25,8 @@ int verbose = 0;
always_inline u8 *
random_aligned_string (u32 * seed, uword len)
{
- u8 * alphabet = (u8 *) "abcdefghijklmnopqrstuvwxyz";
- u8 * s = 0;
+ u8 *alphabet = (u8 *) "abcdefghijklmnopqrstuvwxyz";
+ u8 *s = 0;
word i;
vec_resize_aligned (s, len, 16);
@@ -36,252 +36,262 @@ random_aligned_string (u32 * seed, uword len)
return s;
}
-void exit(int);
+void exit (int);
-int test_pfhash_main (unformat_input_t * input)
+int
+test_pfhash_main (unformat_input_t * input)
{
- u32 seed = 0xdeaddabe;
- int i, iter;
- u32 nkeys = 4;
- u32 niter = 1;
- u32 nbuckets = 1;
- u32 bucket;
- u32 sizes[3] = {16, 8, 4}, this_size, size;
- u8 ** keys = 0;
- pfhash_t _rec, * p = &_rec;
-
- while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
- {
- if (unformat (input, "seed %d", &seed))
- ;
- else if (unformat (input, "niter %d", &niter))
- ;
- else if (unformat (input, "nkeys %d", &nkeys))
- ;
- else if (unformat (input, "nbuckets %d", &nbuckets))
- ;
- else if (unformat (input, "verbose %d", &verbose))
- ;
- else if (unformat (input, "verbose"))
- verbose = 1;
- else
- clib_error ("unknown input `%U'", format_unformat_error, input);
- }
-
- vec_validate (keys, nkeys-1);
-
- for (i = 0; i < nkeys; i++)
- {
- int j, k;
-
- again:
- keys[i] = random_aligned_string (&seed, 16);
- for (j = 0; j < (i - 1); j++)
- {
- /* Make sure we don't have a dup key in the min key size */
- for (k = 0; k < 4; k++)
- {
- if (keys[i][k] != keys[j][k])
- goto check_next_key;
- }
- vec_free (keys[i]);
- goto again;
- check_next_key:
- ;
- }
- }
-
- /* test 8 byte key, 8 byte value case separately */
-
- for (size = 8; size < 9; size++)
- {
- this_size = 8;
-
- fformat (stdout, "%d-byte key 8 byte value test\n", this_size);
-
- pfhash_init (p, "test", 8 /* key size */ , 8 /* value size */,
- nbuckets+1);
-
- for (iter = 0; iter < niter; iter++)
- {
- bucket = 0;
- for (i = 0; i < nkeys; i++)
- {
- bucket = (i % nbuckets)+1;
- pfhash_set (p, bucket, keys[i],
- (void *) (u64) 0x100000000ULL + i+1);
- }
-
- for (i = 0; i < nkeys; i++)
- {
- bucket = (i % nbuckets)+1;
- if (pfhash_get (p, bucket, keys[i])
- != (u64) 0x100000000ULL + i+1)
- {
- clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
- (void) pfhash_get (p, bucket , keys[i]);
- }
- }
-
- /* test inline functions */
- for (i = 0; i < nkeys; i++)
- {
- u32 bucket_contents;
- u64 value = 0xdeadbeef;
- bucket = (i % nbuckets)+1;
-
- pfhash_prefetch_bucket (p, bucket);
- bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
-
- value = pfhash_search_kv_8v8 (p, bucket_contents,
- (u64 *) keys[i]);
- if (value != (u64) 0x100000000ULL + i+1)
- clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
- }
-
- if (verbose)
- fformat (stdout, "%U\n", format_pfhash, p, verbose > 1);
-
- for (i = 0; i < nkeys; i++)
- {
- bucket = (i % nbuckets)+1;
- pfhash_unset (p, bucket, keys[i]);
- }
-
- for (i = 0; i < nkeys; i++)
- {
- bucket = (i % nbuckets)+1;
- if (pfhash_get (p, bucket , keys[i]) != (u64)~0)
- {
- clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
- (void) pfhash_get (p, bucket , keys[i]);
- }
- }
- /* test inline functions */
- for (i = 0; i < nkeys; i++)
- {
- u32 bucket_contents;
- u64 value = 0xdeadbeef;
- bucket = (i % nbuckets)+1;
-
- pfhash_prefetch_bucket (p, bucket);
- bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
-
- value = pfhash_search_kv_8v8 (p, bucket_contents,
- (u64 *) keys[i]);
-
- if (value != (u64)~0)
- clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
- }
- }
- pfhash_free (p);
- }
-
- /* test other cases */
-
- for (size = 0; size < ARRAY_LEN(sizes); size++)
- {
- this_size = sizes[size];
-
- fformat (stdout, "%d-byte key test\n", this_size);
-
- pfhash_init (p, "test", this_size, 4 /* value size */, nbuckets+1);
-
- for (iter = 0; iter < niter; iter++)
- {
- bucket = 0;
- for (i = 0; i < nkeys; i++)
- {
- bucket = (i % nbuckets)+1;
- pfhash_set (p, bucket, keys[i], (void *)(u64)i+1);
- }
-
- for (i = 0; i < nkeys; i++)
- {
- bucket = (i % nbuckets)+1;
- if (pfhash_get (p, bucket , keys[i]) != i+1)
- {
- clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
- (void) pfhash_get (p, bucket , keys[i]);
- }
- }
-
- /* test inline functions */
- for (i = 0; i < nkeys; i++)
- {
- u32 bucket_contents;
- u32 value = 0xdeadbeef;
- bucket = (i % nbuckets)+1;
-
- pfhash_prefetch_bucket (p, bucket);
- bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
- switch(p->key_size)
- {
- case 16:
- value = pfhash_search_kv_16 (p, bucket_contents, (u32x4 *) keys[i]);
- break;
- case 8:
- value = pfhash_search_kv_8 (p, bucket_contents, (u64 *) keys[i]);
- break;
- case 4:
- value = pfhash_search_kv_4 (p, bucket_contents, (u32 *) keys[i]);
- break;
- }
-
- if (value != (i+1))
- clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
- }
-
- if (verbose)
- fformat (stdout, "%U\n", format_pfhash, p, verbose > 1);
-
- for (i = 0; i < nkeys; i++)
- {
- bucket = (i % nbuckets)+1;
- pfhash_unset (p, bucket, keys[i]);
- }
-
- for (i = 0; i < nkeys; i++)
- {
- bucket = (i % nbuckets)+1;
- if (pfhash_get (p, bucket , keys[i]) != (u64)~0)
- {
- clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
- (void) pfhash_get (p, bucket , keys[i]);
- }
- }
- /* test inline functions */
- for (i = 0; i < nkeys; i++)
- {
- u32 bucket_contents;
- u32 value = 0xdeadbeef;
- bucket = (i % nbuckets)+1;
-
- pfhash_prefetch_bucket (p, bucket);
- bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
- switch(p->key_size)
- {
- case 16:
- value = pfhash_search_kv_16 (p, bucket_contents, (u32x4 *) keys[i]);
- break;
- case 8:
- value = pfhash_search_kv_8 (p, bucket_contents, (u64 *) keys[i]);
- break;
- case 4:
- value = pfhash_search_kv_4 (p, bucket_contents, (u32 *) keys[i]);
- break;
- }
- if (value != (u32)~0)
- clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
- }
- }
- pfhash_free (p);
- }
-
- exit (0);
+ u32 seed = 0xdeaddabe;
+ int i, iter;
+ u32 nkeys = 4;
+ u32 niter = 1;
+ u32 nbuckets = 1;
+ u32 bucket;
+ u32 sizes[3] = { 16, 8, 4 }, this_size, size;
+ u8 **keys = 0;
+ pfhash_t _rec, *p = &_rec;
+
+ while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
+ {
+ if (unformat (input, "seed %d", &seed))
+ ;
+ else if (unformat (input, "niter %d", &niter))
+ ;
+ else if (unformat (input, "nkeys %d", &nkeys))
+ ;
+ else if (unformat (input, "nbuckets %d", &nbuckets))
+ ;
+ else if (unformat (input, "verbose %d", &verbose))
+ ;
+ else if (unformat (input, "verbose"))
+ verbose = 1;
+ else
+ clib_error ("unknown input `%U'", format_unformat_error, input);
+ }
+
+ vec_validate (keys, nkeys - 1);
+
+ for (i = 0; i < nkeys; i++)
+ {
+ int j, k;
+
+ again:
+ keys[i] = random_aligned_string (&seed, 16);
+ for (j = 0; j < (i - 1); j++)
+ {
+ /* Make sure we don't have a dup key in the min key size */
+ for (k = 0; k < 4; k++)
+ {
+ if (keys[i][k] != keys[j][k])
+ goto check_next_key;
+ }
+ vec_free (keys[i]);
+ goto again;
+ check_next_key:
+ ;
+ }
+ }
+
+ /* test 8 byte key, 8 byte value case separately */
+
+ for (size = 8; size < 9; size++)
+ {
+ this_size = 8;
+
+ fformat (stdout, "%d-byte key 8 byte value test\n", this_size);
+
+ pfhash_init (p, "test", 8 /* key size */ , 8 /* value size */ ,
+ nbuckets + 1);
+
+ for (iter = 0; iter < niter; iter++)
+ {
+ bucket = 0;
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ pfhash_set (p, bucket, keys[i],
+ (void *) (u64) 0x100000000ULL + i + 1);
+ }
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ if (pfhash_get (p, bucket, keys[i])
+ != (u64) 0x100000000ULL + i + 1)
+ {
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ (void) pfhash_get (p, bucket, keys[i]);
+ }
+ }
+
+ /* test inline functions */
+ for (i = 0; i < nkeys; i++)
+ {
+ u32 bucket_contents;
+ u64 value = 0xdeadbeef;
+ bucket = (i % nbuckets) + 1;
+
+ pfhash_prefetch_bucket (p, bucket);
+ bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+
+ value = pfhash_search_kv_8v8 (p, bucket_contents,
+ (u64 *) keys[i]);
+ if (value != (u64) 0x100000000ULL + i + 1)
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ }
+
+ if (verbose)
+ fformat (stdout, "%U\n", format_pfhash, p, verbose > 1);
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ pfhash_unset (p, bucket, keys[i]);
+ }
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ if (pfhash_get (p, bucket, keys[i]) != (u64) ~ 0)
+ {
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ (void) pfhash_get (p, bucket, keys[i]);
+ }
+ }
+ /* test inline functions */
+ for (i = 0; i < nkeys; i++)
+ {
+ u32 bucket_contents;
+ u64 value = 0xdeadbeef;
+ bucket = (i % nbuckets) + 1;
+
+ pfhash_prefetch_bucket (p, bucket);
+ bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+
+ value = pfhash_search_kv_8v8 (p, bucket_contents,
+ (u64 *) keys[i]);
+
+ if (value != (u64) ~ 0)
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ }
+ }
+ pfhash_free (p);
+ }
+
+ /* test other cases */
+
+ for (size = 0; size < ARRAY_LEN (sizes); size++)
+ {
+ this_size = sizes[size];
+
+ fformat (stdout, "%d-byte key test\n", this_size);
+
+ pfhash_init (p, "test", this_size, 4 /* value size */ , nbuckets + 1);
+
+ for (iter = 0; iter < niter; iter++)
+ {
+ bucket = 0;
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ pfhash_set (p, bucket, keys[i], (void *) (u64) i + 1);
+ }
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ if (pfhash_get (p, bucket, keys[i]) != i + 1)
+ {
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ (void) pfhash_get (p, bucket, keys[i]);
+ }
+ }
+
+ /* test inline functions */
+ for (i = 0; i < nkeys; i++)
+ {
+ u32 bucket_contents;
+ u32 value = 0xdeadbeef;
+ bucket = (i % nbuckets) + 1;
+
+ pfhash_prefetch_bucket (p, bucket);
+ bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+ switch (p->key_size)
+ {
+ case 16:
+ value =
+ pfhash_search_kv_16 (p, bucket_contents,
+ (u32x4 *) keys[i]);
+ break;
+ case 8:
+ value =
+ pfhash_search_kv_8 (p, bucket_contents, (u64 *) keys[i]);
+ break;
+ case 4:
+ value =
+ pfhash_search_kv_4 (p, bucket_contents, (u32 *) keys[i]);
+ break;
+ }
+
+ if (value != (i + 1))
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ }
+
+ if (verbose)
+ fformat (stdout, "%U\n", format_pfhash, p, verbose > 1);
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ pfhash_unset (p, bucket, keys[i]);
+ }
+
+ for (i = 0; i < nkeys; i++)
+ {
+ bucket = (i % nbuckets) + 1;
+ if (pfhash_get (p, bucket, keys[i]) != (u64) ~ 0)
+ {
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ (void) pfhash_get (p, bucket, keys[i]);
+ }
+ }
+ /* test inline functions */
+ for (i = 0; i < nkeys; i++)
+ {
+ u32 bucket_contents;
+ u32 value = 0xdeadbeef;
+ bucket = (i % nbuckets) + 1;
+
+ pfhash_prefetch_bucket (p, bucket);
+ bucket_contents = pfhash_read_bucket_prefetch_kv (p, bucket);
+ switch (p->key_size)
+ {
+ case 16:
+ value =
+ pfhash_search_kv_16 (p, bucket_contents,
+ (u32x4 *) keys[i]);
+ break;
+ case 8:
+ value =
+ pfhash_search_kv_8 (p, bucket_contents, (u64 *) keys[i]);
+ break;
+ case 4:
+ value =
+ pfhash_search_kv_4 (p, bucket_contents, (u32 *) keys[i]);
+ break;
+ }
+ if (value != (u32) ~ 0)
+ clib_warning ("key %d bucket %d lookup FAIL\n", i, bucket);
+ }
+ }
+ pfhash_free (p);
+ }
+
+ exit (0);
}
#else
-int test_pfhash_main (unformat_input_t * input)
+int
+test_pfhash_main (unformat_input_t * input)
{
clib_warning ("MMX unit not available");
return 0;
@@ -289,7 +299,8 @@ int test_pfhash_main (unformat_input_t * input)
#endif
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
@@ -301,3 +312,11 @@ int main (int argc, char * argv[])
return ret;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_phash.c b/vppinfra/vppinfra/test_phash.c
index 0d5a7132614..9ed2ac7b950 100644
--- a/vppinfra/vppinfra/test_phash.c
+++ b/vppinfra/vppinfra/test_phash.c
@@ -43,12 +43,13 @@ static int verbose;
#define if_verbose(format,args...) \
if (verbose) { clib_warning(format, ## args); }
-int test_phash_main (unformat_input_t * input)
+int
+test_phash_main (unformat_input_t * input)
{
- phash_main_t _pm = {0}, * pm = &_pm;
+ phash_main_t _pm = { 0 }, *pm = &_pm;
int n_keys, random_keys;
u32 seed;
- clib_error_t * error;
+ clib_error_t *error;
random_keys = 1;
n_keys = 1000;
@@ -61,41 +62,44 @@ int test_phash_main (unformat_input_t * input)
&& 0 == unformat (input, "seed %d", &pm->random_seed)
&& 0 == unformat (input, "64-bit %|", &pm->flags, PHASH_FLAG_MIX64)
&& 0 == unformat (input, "32-bit %|", &pm->flags, PHASH_FLAG_MIX32)
- && 0 == unformat (input, "fast %|", &pm->flags, PHASH_FLAG_FAST_MODE)
- && 0 == unformat (input, "slow %|", &pm->flags, PHASH_FLAG_SLOW_MODE)
- && 0 == unformat (input, "minimal %|", &pm->flags, PHASH_FLAG_MINIMAL)
- && 0 == unformat (input, "non-minimal %|", &pm->flags, PHASH_FLAG_NON_MINIMAL))
+ && 0 == unformat (input, "fast %|", &pm->flags,
+ PHASH_FLAG_FAST_MODE)
+ && 0 == unformat (input, "slow %|", &pm->flags,
+ PHASH_FLAG_SLOW_MODE)
+ && 0 == unformat (input, "minimal %|", &pm->flags,
+ PHASH_FLAG_MINIMAL)
+ && 0 == unformat (input, "non-minimal %|", &pm->flags,
+ PHASH_FLAG_NON_MINIMAL))
clib_error ("unknown input `%U'", format_unformat_error, input);
}
- if (! pm->random_seed)
+ if (!pm->random_seed)
pm->random_seed = random_default_seed ();
- if_verbose ("%d %d-bit keys, random seed %d, %s mode, looking for %sminimal hash",
- n_keys,
- (pm->flags & PHASH_FLAG_MIX64) ? 64 : 32,
- pm->random_seed,
- (pm->flags & PHASH_FLAG_FAST_MODE) ? "fast" : "slow",
- (pm->flags & PHASH_FLAG_MINIMAL) ? "" : "non-");
+ if_verbose
+ ("%d %d-bit keys, random seed %d, %s mode, looking for %sminimal hash",
+ n_keys, (pm->flags & PHASH_FLAG_MIX64) ? 64 : 32, pm->random_seed,
+ (pm->flags & PHASH_FLAG_FAST_MODE) ? "fast" : "slow",
+ (pm->flags & PHASH_FLAG_MINIMAL) ? "" : "non-");
seed = pm->random_seed;
/* Initialize random keys. */
{
- phash_key_t * k;
+ phash_key_t *k;
vec_resize (pm->keys, n_keys);
vec_foreach (k, pm->keys)
- {
- k->key = k - pm->keys;
- if (random_keys)
- {
- if (pm->flags & PHASH_FLAG_MIX64)
- k->key = random_u64 (&seed);
- else
- k->key = random_u32 (&seed);
- }
- }
+ {
+ k->key = k - pm->keys;
+ if (random_keys)
+ {
+ if (pm->flags & PHASH_FLAG_MIX64)
+ k->key = random_u64 (&seed);
+ else
+ k->key = random_u32 (&seed);
+ }
+ }
}
error = phash_find_perfect_hash (pm);
@@ -106,9 +110,9 @@ int test_phash_main (unformat_input_t * input)
}
else
{
- if_verbose ("(%d,%d) (a,b) bits, %d seeds tried, %d tree walks",
- pm->a_bits, pm->b_bits,
- pm->n_seed_trials, pm->n_perfect_calls);
+ if_verbose ("(%d,%d) (a,b) bits, %d seeds tried, %d tree walks",
+ pm->a_bits, pm->b_bits,
+ pm->n_seed_trials, pm->n_perfect_calls);
error = phash_validate (pm);
if (error)
@@ -122,7 +126,8 @@ int test_phash_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv [])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int res;
@@ -134,3 +139,11 @@ int main (int argc, char * argv [])
return res;
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_pool.c b/vppinfra/vppinfra/test_pool.c
index cb9996e9d63..67a5e50a38a 100644
--- a/vppinfra/vppinfra/test_pool.c
+++ b/vppinfra/vppinfra/test_pool.c
@@ -39,16 +39,17 @@
#include <vppinfra/pool.h>
#ifdef __KERNEL__
-# include <linux/unistd.h>
+#include <linux/unistd.h>
#else
-# include <unistd.h>
+#include <unistd.h>
#endif
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
int i, n, seed;
- int * p = 0, * e, j, * o = 0;
+ int *p = 0, *e, j, *o = 0;
n = atoi (argv[1]);
seed = getpid ();
@@ -75,3 +76,11 @@ int main (int argc, char * argv[])
vec_free (o);
return 0;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_pool_iterate.c b/vppinfra/vppinfra/test_pool_iterate.c
index fd04ee704b4..27ce4bb37b7 100644
--- a/vppinfra/vppinfra/test_pool_iterate.c
+++ b/vppinfra/vppinfra/test_pool_iterate.c
@@ -18,12 +18,13 @@
#include <vppinfra/pool.h>
#ifdef __KERNEL__
-# include <linux/unistd.h>
+#include <linux/unistd.h>
#else
-# include <unistd.h>
+#include <unistd.h>
#endif
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
int i;
uword next;
@@ -32,17 +33,27 @@ int main (int argc, char * argv[])
for (i = 0; i < 70; i++)
pool_get (tp, junk);
-
+
(void) junk; /* compiler warning */
pool_put_index (tp, 1);
pool_put_index (tp, 65);
next = ~0;
- do {
- next = pool_next_index (tp, next);
- fformat (stdout, "next index %d\n", next);
- } while (next != ~0);
+ do
+ {
+ next = pool_next_index (tp, next);
+ fformat (stdout, "next index %d\n", next);
+ }
+ while (next != ~0);
return 0;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_ptclosure.c b/vppinfra/vppinfra/test_ptclosure.c
index b5ac13f7de5..be7d51dfa7d 100644
--- a/vppinfra/vppinfra/test_ptclosure.c
+++ b/vppinfra/vppinfra/test_ptclosure.c
@@ -16,33 +16,36 @@
#include <vppinfra/ptclosure.h>
#include <vppinfra/hash.h>
-typedef struct {
- uword * index_by_name;
- u8 * items;
+typedef struct
+{
+ uword *index_by_name;
+ u8 *items;
} test_main_t;
test_main_t test_main;
-static char * items [] = {
- "d",
- "a",
- "b",
- "c",
+static char *items[] = {
+ "d",
+ "a",
+ "b",
+ "c",
};
-char * constraints [] = {
- "a,b",
- "b,c",
- "d,b",
- // "c,a", /* no partial order possible */
+char *constraints[] = {
+ "a,b",
+ "b,c",
+ "d,b",
+ // "c,a", /* no partial order possible */
};
-u32 vl(void *p)
+u32
+vl (void *p)
{
return vec_len (p);
}
-static void dump_closure (test_main_t * tm, char * s, u8 ** orig)
+static void
+dump_closure (test_main_t * tm, char *s, u8 ** orig)
{
int i, j;
@@ -50,14 +53,15 @@ static void dump_closure (test_main_t * tm, char * s, u8 ** orig)
for (i = 0; i < vec_len (orig); i++)
{
for (j = 0; j < vec_len (orig); j++)
- if (orig[i][j])
- {
- fformat (stdout, "%s <before> %s\n", items[i], items[j]);
- }
+ if (orig[i][j])
+ {
+ fformat (stdout, "%s <before> %s\n", items[i], items[j]);
+ }
}
}
-int comma_split (u8 *s, u8 **a, u8 **b)
+int
+comma_split (u8 * s, u8 ** a, u8 ** b)
{
*a = s;
@@ -69,27 +73,28 @@ int comma_split (u8 *s, u8 **a, u8 **b)
else
return 1;
- *b = (u8 *) (s+1);
+ *b = (u8 *) (s + 1);
return 0;
}
-int test_ptclosure_main (unformat_input_t * input)
+int
+test_ptclosure_main (unformat_input_t * input)
{
- test_main_t * tm = &test_main;
- u8 * item_name;
+ test_main_t *tm = &test_main;
+ u8 *item_name;
int i, j;
- u8 ** orig;
- u8 ** closure;
- u8 * a_name, * b_name;
+ u8 **orig;
+ u8 **closure;
+ u8 *a_name, *b_name;
int a_index, b_index;
- uword * p;
- u8 * this_constraint;
+ uword *p;
+ u8 *this_constraint;
int n;
- u32 * result = 0;
+ u32 *result = 0;
tm->index_by_name = hash_create_string (0, sizeof (uword));
- n = ARRAY_LEN(items);
+ n = ARRAY_LEN (items);
for (i = 0; i < n; i++)
{
@@ -99,63 +104,63 @@ int test_ptclosure_main (unformat_input_t * input)
orig = clib_ptclosure_alloc (n);
- for (i = 0; i < ARRAY_LEN(constraints); i++)
+ for (i = 0; i < ARRAY_LEN (constraints); i++)
{
this_constraint = format (0, "%s%c", constraints[i], 0);
-
+
if (comma_split (this_constraint, &a_name, &b_name))
- {
- clib_warning ("couldn't split '%s'", constraints[i]);
- return 1;
- }
-
+ {
+ clib_warning ("couldn't split '%s'", constraints[i]);
+ return 1;
+ }
+
p = hash_get_mem (tm->index_by_name, a_name);
if (p == 0)
- {
- clib_warning ("couldn't find '%s'", a_name);
- return 1;
- }
+ {
+ clib_warning ("couldn't find '%s'", a_name);
+ return 1;
+ }
a_index = p[0];
p = hash_get_mem (tm->index_by_name, b_name);
if (p == 0)
- {
- clib_warning ("couldn't find '%s'", b_name);
- return 1;
- }
+ {
+ clib_warning ("couldn't find '%s'", b_name);
+ return 1;
+ }
b_index = p[0];
orig[a_index][b_index] = 1;
vec_free (this_constraint);
}
-
+
dump_closure (tm, "original relation", orig);
closure = clib_ptclosure (orig);
dump_closure (tm, "closure", closure);
- /*
+ /*
* Output partial order
*/
- again:
+again:
for (i = 0; i < n; i++)
{
for (j = 0; j < n; j++)
- {
- if (closure[i][j])
- goto item_constrained;
- }
+ {
+ if (closure[i][j])
+ goto item_constrained;
+ }
/* Item i can be output */
vec_add1 (result, i);
{
- int k;
- for (k = 0; k < n; k++)
- closure [k][i] = 0;
- /* "Magic" a before a, to keep from ever outputting it again */
- closure [i][i] = 1;
- goto again;
+ int k;
+ for (k = 0; k < n; k++)
+ closure[k][i] = 0;
+ /* "Magic" a before a, to keep from ever outputting it again */
+ closure[i][i] = 1;
+ goto again;
}
item_constrained:
;
@@ -169,7 +174,7 @@ int test_ptclosure_main (unformat_input_t * input)
fformat (stdout, "Partial order:\n");
- for (i = vec_len(result)-1; i >= 0; i--)
+ for (i = vec_len (result) - 1; i >= 0; i--)
{
fformat (stdout, "%s\n", items[result[i]]);
}
@@ -182,12 +187,13 @@ int test_ptclosure_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
- clib_mem_init (0, 3ULL<<30);
+ clib_mem_init (0, 3ULL << 30);
unformat_init_command_line (&i, argv);
ret = test_ptclosure_main (&i);
@@ -196,3 +202,11 @@ int main (int argc, char * argv[])
return ret;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_qhash.c b/vppinfra/vppinfra/test_qhash.c
index 8d0b8b7e47b..fdbf0bbebb0 100644
--- a/vppinfra/vppinfra/test_qhash.c
+++ b/vppinfra/vppinfra/test_qhash.c
@@ -18,24 +18,25 @@
#include <vppinfra/random.h>
#include <vppinfra/time.h>
-typedef struct {
+typedef struct
+{
u32 n_iter, seed, n_keys, n_hash_keys, verbose;
u32 max_vector;
- uword * hash;
+ uword *hash;
- uword * keys_in_hash_bitmap;
+ uword *keys_in_hash_bitmap;
- u32 * qhash;
+ u32 *qhash;
- uword * keys;
+ uword *keys;
- uword * lookup_keys;
- uword * lookup_key_indices;
- u32 * lookup_results;
+ uword *lookup_keys;
+ uword *lookup_key_indices;
+ u32 *lookup_results;
- u32 * get_multiple_results;
+ u32 *get_multiple_results;
clib_time_t time;
@@ -49,8 +50,8 @@ typedef struct {
clib_error_t *
test_qhash_main (unformat_input_t * input)
{
- clib_error_t * error = 0;
- test_qhash_main_t _tm, * tm = &_tm;
+ clib_error_t *error = 0;
+ test_qhash_main_t _tm, *tm = &_tm;
uword i, iter;
memset (tm, 0, sizeof (tm[0]));
@@ -81,7 +82,7 @@ test_qhash_main (unformat_input_t * input)
}
}
- if (! tm->seed)
+ if (!tm->seed)
tm->seed = random_default_seed ();
clib_time_init (&tm->time);
@@ -94,13 +95,13 @@ test_qhash_main (unformat_input_t * input)
for (i = 0; i < vec_len (tm->keys); i++)
tm->keys[i] = random_uword (&tm->seed);
- if (! tm->n_hash_keys)
+ if (!tm->n_hash_keys)
tm->n_hash_keys = 2 * max_pow2 (tm->n_keys);
tm->n_hash_keys = clib_max (tm->n_keys, tm->n_hash_keys);
qhash_resize (tm->qhash, tm->n_hash_keys);
{
- qhash_t * h = qhash_header (tm->qhash);
+ qhash_t *h = qhash_header (tm->qhash);
int i;
for (i = 0; i < ARRAY_LEN (h->hash_seeds); i++)
h->hash_seeds[i] = random_uword (&tm->seed);
@@ -112,7 +113,7 @@ test_qhash_main (unformat_input_t * input)
for (iter = 0; iter < tm->n_iter; iter++)
{
- uword * p, j, n, is_set;
+ uword *p, j, n, is_set;
n = tm->max_vector;
@@ -143,8 +144,7 @@ test_qhash_main (unformat_input_t * input)
else
tm->hash_unset_time += t[1] - t[0];
tm->keys_in_hash_bitmap
- = clib_bitmap_set (tm->keys_in_hash_bitmap, i,
- is_set);
+ = clib_bitmap_set (tm->keys_in_hash_bitmap, i, is_set);
j++;
}
}
@@ -191,7 +191,7 @@ test_qhash_main (unformat_input_t * input)
os_panic ();
{
- qhash_t * h;
+ qhash_t *h;
uword i, k, l, count;
h = qhash_header (tm->qhash);
@@ -210,13 +210,15 @@ test_qhash_main (unformat_input_t * input)
os_panic ();
{
- u32 * tmp = 0;
+ u32 *tmp = 0;
+ /* *INDENT-OFF* */
hash_foreach (k, l, h->overflow_hash, ({
j = qhash_hash_mix (h, k) / QHASH_KEYS_PER_BUCKET;
vec_validate (tmp, j);
tmp[j] += 1;
}));
+ /* *INDENT-ON* */
for (k = 0; k < vec_len (tmp); k++)
{
@@ -256,7 +258,7 @@ test_qhash_main (unformat_input_t * input)
{
if (p[0] != i)
os_panic ();
- if (* vec_elt_at_index (tm->qhash, r) != i)
+ if (*vec_elt_at_index (tm->qhash, r) != i)
os_panic ();
}
else
@@ -276,8 +278,7 @@ test_qhash_main (unformat_input_t * input)
fformat (stderr, "%d iter %.6e overflow, %.4f ave. elts\n",
tm->n_iter,
- tm->overflow_fraction / tm->n_iter,
- tm->ave_elts / tm->n_iter);
+ tm->overflow_fraction / tm->n_iter, tm->ave_elts / tm->n_iter);
tm->get_time /= tm->n_iter * vec_len (tm->keys);
tm->hash_get_time /= tm->n_iter * vec_len (tm->keys);
@@ -287,27 +288,28 @@ test_qhash_main (unformat_input_t * input)
tm->hash_set_time /= tm->set_count;
tm->hash_unset_time /= tm->unset_count;
- fformat (stderr, "get/set/unset clocks %.2e %.2e %.2e clib %.2e %.2e %.2e ratio %.2f %.2f %.2f\n",
+ fformat (stderr,
+ "get/set/unset clocks %.2e %.2e %.2e clib %.2e %.2e %.2e ratio %.2f %.2f %.2f\n",
tm->get_time * tm->time.clocks_per_second,
tm->set_time * tm->time.clocks_per_second,
tm->unset_time * tm->time.clocks_per_second,
tm->hash_get_time * tm->time.clocks_per_second,
tm->hash_set_time * tm->time.clocks_per_second,
tm->hash_unset_time * tm->time.clocks_per_second,
- tm->hash_get_time / tm->get_time,
- tm->hash_set_time / tm->set_time,
+ tm->hash_get_time / tm->get_time, tm->hash_set_time / tm->set_time,
tm->hash_unset_time / tm->unset_time);
-
- done:
+
+done:
return error;
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
- clib_error_t * error;
+ clib_error_t *error;
unformat_init_command_line (&i, argv);
error = test_qhash_main (&i);
@@ -321,3 +323,11 @@ int main (int argc, char * argv[])
return 0;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_random.c b/vppinfra/vppinfra/test_random.c
index 9cf07264449..49759eacb97 100644
--- a/vppinfra/vppinfra/test_random.c
+++ b/vppinfra/vppinfra/test_random.c
@@ -38,19 +38,19 @@
#include <vppinfra/format.h>
#include <vppinfra/bitmap.h>
-static u32 known_random_sequence[] =
-{
- 0x00000000, 0x3c6ef35f, 0x47502932, 0xd1ccf6e9,
- 0xaaf95334, 0x6252e503, 0x9f2ec686, 0x57fe6c2d,
- 0xa3d95fa8, 0x81fdbee7, 0x94f0af1a, 0xcbf633b1,
+static u32 known_random_sequence[] = {
+ 0x00000000, 0x3c6ef35f, 0x47502932, 0xd1ccf6e9,
+ 0xaaf95334, 0x6252e503, 0x9f2ec686, 0x57fe6c2d,
+ 0xa3d95fa8, 0x81fdbee7, 0x94f0af1a, 0xcbf633b1,
};
-int test_random_main (unformat_input_t * input)
+int
+test_random_main (unformat_input_t * input)
{
uword n_iterations;
uword i, repeat_count;
- uword * bitmap = 0;
+ uword *bitmap = 0;
uword print;
u32 seed;
u32 *seedp = &seed;
@@ -58,15 +58,15 @@ int test_random_main (unformat_input_t * input)
/* first, check known sequence from Numerical Recipes in C, 2nd ed.
page 284 */
seed = known_random_sequence[0];
- for (i = 0; i < ARRAY_LEN(known_random_sequence)-1; i++)
+ for (i = 0; i < ARRAY_LEN (known_random_sequence) - 1; i++)
{
u32 rv;
rv = random_u32 (seedp);
- if (rv != known_random_sequence[i+1])
- {
- fformat(stderr, "known sequence check FAILS at index %d", i+1);
- break;
- }
+ if (rv != known_random_sequence[i + 1])
+ {
+ fformat (stderr, "known sequence check FAILS at index %d", i + 1);
+ break;
+ }
}
clib_warning ("known sequence check passes");
@@ -83,7 +83,7 @@ int test_random_main (unformat_input_t * input)
clib_error ("unknown input `%U'", format_unformat_error, input);
}
- if (! seed)
+ if (!seed)
seed = random_default_seed ();
if (n_iterations == 0)
@@ -122,12 +122,13 @@ int test_random_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
- clib_mem_init (0, 3ULL<<30);
+ clib_mem_init (0, 3ULL << 30);
unformat_init_command_line (&i, argv);
ret = test_random_main (&i);
@@ -137,3 +138,11 @@ int main (int argc, char * argv[])
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_random_isaac.c b/vppinfra/vppinfra/test_random_isaac.c
index a73178616c8..337d30ddea0 100644
--- a/vppinfra/vppinfra/test_random_isaac.c
+++ b/vppinfra/vppinfra/test_random_isaac.c
@@ -44,16 +44,17 @@ static int verbose;
#define if_verbose(format,args...) \
if (verbose) { clib_warning(format, ## args); }
-int test_isaac_main (unformat_input_t * input)
+int
+test_isaac_main (unformat_input_t * input)
{
uword n_iterations, seed;
uword i, repeat_count;
- uword * hash = 0;
+ uword *hash = 0;
uword print;
isaac_t ctx;
- uword results[ISAAC_SIZE] = {0};
+ uword results[ISAAC_SIZE] = { 0 };
uword n_results;
-
+
n_iterations = 1000;
seed = 0;
print = 1 << 24;
@@ -66,7 +67,7 @@ int test_isaac_main (unformat_input_t * input)
clib_error ("unknown input `%U'", format_unformat_error, input);
}
- if (! seed)
+ if (!seed)
seed = random_default_seed ();
results[0] = seed;
@@ -74,7 +75,7 @@ int test_isaac_main (unformat_input_t * input)
if (n_iterations == 0)
n_iterations = ~0;
- if_verbose ("%d iterations, seed %d\n", n_iterations, seed);
+ if_verbose ("%d iterations, seed %d\n", n_iterations, seed);
repeat_count = 0;
isaac_init (&ctx, results);
@@ -84,7 +85,7 @@ int test_isaac_main (unformat_input_t * input)
{
uword r = results[n_results++];
- if (! hash)
+ if (!hash)
hash = hash_create (0, /* value bytes */ 0);
if (hash_get (hash, r))
@@ -116,7 +117,8 @@ int test_isaac_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
@@ -130,3 +132,11 @@ int main (int argc, char * argv[])
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_serialize.c b/vppinfra/vppinfra/test_serialize.c
index 58dd32c8d6a..e00eec3268f 100644
--- a/vppinfra/vppinfra/test_serialize.c
+++ b/vppinfra/vppinfra/test_serialize.c
@@ -45,15 +45,17 @@
_ (u16, a16) \
_ (u32, a32)
-typedef struct {
+typedef struct
+{
#define _(t,f) t f;
foreach_my_vector_type
#undef _
} my_vector_type_t;
-static void serialize_my_vector_type_single (serialize_main_t * m, va_list * va)
+static void
+serialize_my_vector_type_single (serialize_main_t * m, va_list * va)
{
- my_vector_type_t * v = va_arg (*va, my_vector_type_t *);
+ my_vector_type_t *v = va_arg (*va, my_vector_type_t *);
u32 n = va_arg (*va, u32);
u32 i;
@@ -65,9 +67,10 @@ static void serialize_my_vector_type_single (serialize_main_t * m, va_list * va)
#undef _
}
-static void unserialize_my_vector_type_single (serialize_main_t * m, va_list * va)
+static void
+unserialize_my_vector_type_single (serialize_main_t * m, va_list * va)
{
- my_vector_type_t * v = va_arg (*va, my_vector_type_t *);
+ my_vector_type_t *v = va_arg (*va, my_vector_type_t *);
u32 n = va_arg (*va, u32);
u32 i;
@@ -79,9 +82,10 @@ static void unserialize_my_vector_type_single (serialize_main_t * m, va_list * v
}
}
-static void serialize_my_vector_type_multiple (serialize_main_t * m, va_list * va)
+static void
+serialize_my_vector_type_multiple (serialize_main_t * m, va_list * va)
{
- my_vector_type_t * v = va_arg (*va, my_vector_type_t *);
+ my_vector_type_t *v = va_arg (*va, my_vector_type_t *);
u32 n = va_arg (*va, u32);
#define _(t,f) \
@@ -97,9 +101,10 @@ static void serialize_my_vector_type_multiple (serialize_main_t * m, va_list * v
#undef _
}
-static void unserialize_my_vector_type_multiple (serialize_main_t * m, va_list * va)
+static void
+unserialize_my_vector_type_multiple (serialize_main_t * m, va_list * va)
{
- my_vector_type_t * v = va_arg (*va, my_vector_type_t *);
+ my_vector_type_t *v = va_arg (*va, my_vector_type_t *);
u32 n = va_arg (*va, u32);
#define _(t,f) \
@@ -115,27 +120,29 @@ static void unserialize_my_vector_type_multiple (serialize_main_t * m, va_list *
#undef _
}
-typedef struct {
+typedef struct
+{
u32 n_iter;
u32 seed;
u32 verbose;
u32 multiple;
u32 max_len;
- my_vector_type_t ** test_vectors;
+ my_vector_type_t **test_vectors;
- char * dump_file;
+ char *dump_file;
serialize_main_t serialize_main;
serialize_main_t unserialize_main;
} test_serialize_main_t;
-int test_serialize_main (unformat_input_t * input)
+int
+test_serialize_main (unformat_input_t * input)
{
- clib_error_t * error = 0;
- test_serialize_main_t _tm, * tm = &_tm;
- serialize_main_t * sm = &tm->serialize_main;
- serialize_main_t * um = &tm->unserialize_main;
+ clib_error_t *error = 0;
+ test_serialize_main_t _tm, *tm = &_tm;
+ serialize_main_t *sm = &tm->serialize_main;
+ serialize_main_t *um = &tm->unserialize_main;
uword i;
memset (tm, 0, sizeof (tm[0]));
@@ -172,7 +179,8 @@ int test_serialize_main (unformat_input_t * input)
if (tm->seed == 0)
tm->seed = random_default_seed ();
- clib_warning ("iter %d seed %d max-len %d", tm->n_iter, tm->seed, tm->max_len);
+ clib_warning ("iter %d seed %d max-len %d", tm->n_iter, tm->seed,
+ tm->max_len);
#ifdef CLIB_UNIX
if (tm->dump_file)
@@ -185,22 +193,24 @@ int test_serialize_main (unformat_input_t * input)
for (i = 0; i < tm->n_iter; i++)
{
uword l = 1 + (random_u32 (&tm->seed) % tm->max_len);
- my_vector_type_t * mv;
+ my_vector_type_t *mv;
vec_resize (tm->test_vectors[i], l);
vec_foreach (mv, tm->test_vectors[i])
- {
+ {
#define _(t,f) mv->f = random_u32 (&tm->seed) & pow2_mask (31);
- foreach_my_vector_type;
+ foreach_my_vector_type;
#undef _
- }
+ }
vec_serialize (sm, tm->test_vectors[i],
- tm->multiple ? serialize_my_vector_type_multiple : serialize_my_vector_type_single);
+ tm->multiple ? serialize_my_vector_type_multiple :
+ serialize_my_vector_type_single);
}
if (tm->verbose)
- clib_warning ("overflow vector max bytes %d", vec_max_len (sm->stream.overflow_buffer));
+ clib_warning ("overflow vector max bytes %d",
+ vec_max_len (sm->stream.overflow_buffer));
serialize_close (sm);
@@ -213,17 +223,18 @@ int test_serialize_main (unformat_input_t * input)
else
#endif
{
- u8 * v = serialize_close_vector (sm);
+ u8 *v = serialize_close_vector (sm);
unserialize_open_data (um, v, vec_len (v));
}
for (i = 0; i < tm->n_iter; i++)
{
- my_vector_type_t * mv0;
- my_vector_type_t * mv1;
+ my_vector_type_t *mv0;
+ my_vector_type_t *mv1;
vec_unserialize (um, &mv0,
- tm->multiple ? unserialize_my_vector_type_multiple : unserialize_my_vector_type_single);
+ tm->multiple ? unserialize_my_vector_type_multiple :
+ unserialize_my_vector_type_single);
mv1 = tm->test_vectors[i];
if (vec_len (mv0) != vec_len (mv1))
@@ -234,14 +245,15 @@ int test_serialize_main (unformat_input_t * input)
vec_free (mv0);
}
- done:
+done:
if (error)
clib_error_report (error);
return 0;
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv [])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int r;
@@ -252,3 +264,11 @@ int main (int argc, char * argv [])
return r;
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_slist.c b/vppinfra/vppinfra/test_slist.c
index 7a021532530..3c3cbf73ca9 100644
--- a/vppinfra/vppinfra/test_slist.c
+++ b/vppinfra/vppinfra/test_slist.c
@@ -14,14 +14,15 @@
*/
#ifdef CLIB_UNIX
-# include <unistd.h>
-# include <stdlib.h>
-# include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
#endif
#include <vppinfra/slist.h>
-typedef struct {
+typedef struct
+{
u32 *random_pool;
u32 seed;
u32 iter;
@@ -36,80 +37,84 @@ test_main_t test_main;
_(2) \
_(4) \
_(3) \
-_(1)
+_(1)
-void run_test (test_main_t *tm)
+void
+run_test (test_main_t * tm)
{
int i;
- u32 * tv;
+ u32 *tv;
u32 ncompares;
u64 total_compares = 0;
- if (1) {
- /*
- * Add a bunch of random numbers to the skip-list,
- * sorting them.
- */
- for (i = 0; i < tm->iter; i++)
- {
- pool_get (tm->random_pool, tv);
- *tv = random_u32 (&tm->seed);
- clib_slist_add (&tm->slist, tv, tv - tm->random_pool);
- }
- /* make sure we can find each one */
- for (i = 0; i < tm->iter; i++)
- {
- u32 search_result;
- tv = pool_elt_at_index (tm->random_pool, i);
-
- search_result = clib_slist_search (&tm->slist, tv, &ncompares);
- ASSERT(search_result == i);
-
- total_compares +=ncompares;
- }
-
- fformat(stdout, "%.2f avg compares/search\n",
- (f64)total_compares / (f64)i);
-
- fformat(stdout, "%U\n", format_slist, &tm->slist,
- tm->iter < 1000 /* verbose */);
-
- /* delete half of them */
- for (i = tm->iter / 2; i < tm->iter ; i++)
- {
- tv = pool_elt_at_index (tm->random_pool, i);
- (void) clib_slist_del (&tm->slist, tv);
- }
-
- /* make sure we can find the set we should find, and no others */
- for (i = 0; i < tm->iter; i++)
- {
- u32 search_result;
- tv = pool_elt_at_index (tm->random_pool, i);
-
- search_result = clib_slist_search (&tm->slist, tv, &ncompares);
- if (i >= tm->iter/2)
- ASSERT(search_result == (u32)~0);
- else
- ASSERT(search_result == i);
-
- }
-
- fformat(stdout, "%U\n", format_slist, &tm->slist,
- tm->iter < 1000 /* verbose */);
-
- /* delete the rest */
- for (i = 0; i < tm->iter; i++)
- {
- tv = pool_elt_at_index (tm->random_pool, i);
-
- (void) clib_slist_del (&tm->slist, tv);
- }
-
- fformat(stdout, "%U\n", format_slist, &tm->slist,
- tm->iter < 1000 /* verbose */);
- } else {
+ if (1)
+ {
+ /*
+ * Add a bunch of random numbers to the skip-list,
+ * sorting them.
+ */
+ for (i = 0; i < tm->iter; i++)
+ {
+ pool_get (tm->random_pool, tv);
+ *tv = random_u32 (&tm->seed);
+ clib_slist_add (&tm->slist, tv, tv - tm->random_pool);
+ }
+ /* make sure we can find each one */
+ for (i = 0; i < tm->iter; i++)
+ {
+ u32 search_result;
+ tv = pool_elt_at_index (tm->random_pool, i);
+
+ search_result = clib_slist_search (&tm->slist, tv, &ncompares);
+ ASSERT (search_result == i);
+
+ total_compares += ncompares;
+ }
+
+ fformat (stdout, "%.2f avg compares/search\n",
+ (f64) total_compares / (f64) i);
+
+ fformat (stdout, "%U\n", format_slist, &tm->slist,
+ tm->iter < 1000 /* verbose */ );
+
+ /* delete half of them */
+ for (i = tm->iter / 2; i < tm->iter; i++)
+ {
+ tv = pool_elt_at_index (tm->random_pool, i);
+ (void) clib_slist_del (&tm->slist, tv);
+ }
+
+ /* make sure we can find the set we should find, and no others */
+ for (i = 0; i < tm->iter; i++)
+ {
+ u32 search_result;
+ tv = pool_elt_at_index (tm->random_pool, i);
+
+ search_result = clib_slist_search (&tm->slist, tv, &ncompares);
+ if (i >= tm->iter / 2)
+ ASSERT (search_result == (u32) ~ 0);
+ else
+ ASSERT (search_result == i);
+
+ }
+
+ fformat (stdout, "%U\n", format_slist, &tm->slist,
+ tm->iter < 1000 /* verbose */ );
+
+ /* delete the rest */
+ for (i = 0; i < tm->iter; i++)
+ {
+ tv = pool_elt_at_index (tm->random_pool, i);
+
+ (void) clib_slist_del (&tm->slist, tv);
+ }
+
+ fformat (stdout, "%U\n", format_slist, &tm->slist,
+ tm->iter < 1000 /* verbose */ );
+ }
+ else
+ {
#define _(n) \
do { \
@@ -118,16 +123,17 @@ void run_test (test_main_t *tm)
clib_slist_add (&tm->slist, tv, tv - tm->random_pool); \
fformat(stdout, "%U\n", format_slist, &tm->slist, 1 /* verbose */); \
} while (0);
- foreach_simple_test;
+ foreach_simple_test;
#undef _
- }
+ }
return;
}
-word test_compare (void *key, u32 elt_index)
+word
+test_compare (void *key, u32 elt_index)
{
- u32 * k = (u32 *)key;
+ u32 *k = (u32 *) key;
u32 elt = test_main.random_pool[elt_index];
if (*k < elt)
@@ -137,7 +143,8 @@ word test_compare (void *key, u32 elt_index)
return 0;
}
-u8 * test_format (u8 * s, va_list * args)
+u8 *
+test_format (u8 * s, va_list * args)
{
u32 elt_index = va_arg (*args, u32);
u32 elt = test_main.random_pool[elt_index];
@@ -145,14 +152,15 @@ u8 * test_format (u8 * s, va_list * args)
return format (s, "%u", elt);
}
-void initialize_slist (test_main_t *tm)
+void
+initialize_slist (test_main_t * tm)
{
clib_slist_init (&tm->slist, tm->branching_factor,
- test_compare,
- test_format);
+ test_compare, test_format);
}
-int test_slist_main (unformat_input_t *input)
+int
+test_slist_main (unformat_input_t * input)
{
test_main_t *tm = &test_main;
u32 tmp;
@@ -165,42 +173,43 @@ int test_slist_main (unformat_input_t *input)
while (unformat_check_input (input) != UNFORMAT_END_OF_INPUT)
{
if (unformat (input, "seed %d", &tm->seed))
- continue;
+ continue;
else if (unformat (input, "iter %d", &tm->iter))
- continue;
+ continue;
else if (unformat (input, "verbose"))
- tm->verbose = 1;
+ tm->verbose = 1;
else if (unformat (input, "branch %d", &tmp))
- {
- if (tmp > 0)
- tm->branching_factor = 1.0 / (f64) tmp;
- else
- fformat(stderr, "warning: branch = 0, ignored\n");
- }
+ {
+ if (tmp > 0)
+ tm->branching_factor = 1.0 / (f64) tmp;
+ else
+ fformat (stderr, "warning: branch = 0, ignored\n");
+ }
else
- {
- clib_error ("unknown input `%U'", format_unformat_error, input);
+ {
+ clib_error ("unknown input `%U'", format_unformat_error, input);
goto usage;
- }
+ }
}
initialize_slist (tm);
run_test (tm);
return 0;
- usage:
- fformat(stderr, "usage: test_slist seed <seed> iter <iter> [verbose]\n");
+usage:
+ fformat (stderr, "usage: test_slist seed <seed> iter <iter> [verbose]\n");
return 1;
-
+
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
- clib_mem_init (0, (u64)4<<30);
+ clib_mem_init (0, (u64) 4 << 30);
unformat_init_command_line (&i, argv);
ret = test_slist_main (&i);
@@ -209,3 +218,11 @@ int main (int argc, char * argv[])
return ret;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_socket.c b/vppinfra/vppinfra/test_socket.c
index ae27ac9cd38..0b05467af80 100644
--- a/vppinfra/vppinfra/test_socket.c
+++ b/vppinfra/vppinfra/test_socket.c
@@ -42,11 +42,12 @@ static int verbose;
#define if_verbose(format,args...) \
if (verbose) { clib_warning(format, ## args); }
-int test_socket_main (unformat_input_t * input)
+int
+test_socket_main (unformat_input_t * input)
{
- clib_socket_t _s = {0}, * s = &_s;
- char * config;
- clib_error_t * error;
+ clib_socket_t _s = { 0 }, *s = &_s;
+ char *config;
+ clib_error_t *error;
s->config = "localhost:22";
s->flags = SOCKET_IS_CLIENT;
@@ -73,7 +74,10 @@ int test_socket_main (unformat_input_t * input)
if (0)
{
- struct { int a, b; } * msg;
+ struct
+ {
+ int a, b;
+ } *msg;
msg = clib_socket_tx_add (s, sizeof (msg[0]));
msg->a = 99;
msg->b = 100;
@@ -94,20 +98,21 @@ int test_socket_main (unformat_input_t * input)
if (clib_socket_rx_end_of_file (s))
break;
- if_verbose ("%v", s->rx_buffer);
+ if_verbose ("%v", s->rx_buffer);
_vec_len (s->rx_buffer) = 0;
}
error = clib_socket_close (s);
- done:
+done:
if (error)
clib_error_report (error);
return 0;
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv [])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int r;
@@ -119,3 +124,11 @@ int main (int argc, char * argv [])
return r;
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_time.c b/vppinfra/vppinfra/test_time.c
index 08c2e9e6349..63cfeac5b0a 100644
--- a/vppinfra/vppinfra/test_time.c
+++ b/vppinfra/vppinfra/test_time.c
@@ -37,13 +37,14 @@
#include <vppinfra/format.h>
#include <vppinfra/time.h>
-#include <vppinfra/math.h> /* for sqrt */
+#include <vppinfra/math.h> /* for sqrt */
static int verbose;
#define if_verbose(format,args...) \
if (verbose) { clib_warning(format, ## args); }
-static int test_time_main (unformat_input_t * input)
+static int
+test_time_main (unformat_input_t * input)
{
f64 wait, error;
f64 t, tu[3], ave, rms;
@@ -57,28 +58,30 @@ static int test_time_main (unformat_input_t * input)
ave = rms = 0;
tu[0] = unix_time_now ();
tu[1] = unix_time_now ();
- for (i = 0; i < n; i++) {
- j = 0;
- t = clib_time_now (&c);
- while (clib_time_now (&c) < t + wait)
- j++;
- t = j;
- ave += t;
- rms += t*t;
- }
+ for (i = 0; i < n; i++)
+ {
+ j = 0;
+ t = clib_time_now (&c);
+ while (clib_time_now (&c) < t + wait)
+ j++;
+ t = j;
+ ave += t;
+ rms += t * t;
+ }
tu[2] = unix_time_now ();
ave /= n;
- rms = sqrt (rms/n - ave*ave);
+ rms = sqrt (rms / n - ave * ave);
- error = ((tu[2] - tu[1]) - 2 * (tu[1] - tu[0]) - n*wait) / n;
- if_verbose ("tested %d x %.6e sec waits, error %.6e loops %.6e +- %.6e\n",
- n, wait, error, ave, rms);
+ error = ((tu[2] - tu[1]) - 2 * (tu[1] - tu[0]) - n * wait) / n;
+ if_verbose ("tested %d x %.6e sec waits, error %.6e loops %.6e +- %.6e\n",
+ n, wait, error, ave, rms);
return 0;
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
@@ -91,3 +94,11 @@ int main (int argc, char * argv[])
return ret;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_timing_wheel.c b/vppinfra/vppinfra/test_timing_wheel.c
index 81483027905..0ce15ad88cb 100644
--- a/vppinfra/vppinfra/test_timing_wheel.c
+++ b/vppinfra/vppinfra/test_timing_wheel.c
@@ -29,7 +29,8 @@
#define SQRT(a) sqrt(a)
#endif
-typedef struct {
+typedef struct
+{
uword n_iter;
u32 n_events;
@@ -42,7 +43,7 @@ typedef struct {
clib_time_t time;
timing_wheel_t timing_wheel;
- u64 * events;
+ u64 *events;
f64 max_time;
f64 wait_time;
@@ -54,32 +55,36 @@ typedef struct {
f64 time_next_status_update;
} test_timing_wheel_main_t;
-typedef struct {
+typedef struct
+{
f64 dt;
f64 fraction;
u64 count;
} test_timing_wheel_tmp_t;
-static void set_event (test_timing_wheel_main_t * tm, uword i)
+static void
+set_event (test_timing_wheel_main_t * tm, uword i)
{
- timing_wheel_t * w = &tm->timing_wheel;
+ timing_wheel_t *w = &tm->timing_wheel;
u64 cpu_time;
cpu_time = w->current_time_index << w->log2_clocks_per_bin;
if (tm->synthetic_time)
cpu_time += random_u32 (&tm->seed) % tm->n_iter;
else
- cpu_time += random_f64 (&tm->seed) * tm->max_time * tm->time.clocks_per_second;
+ cpu_time +=
+ random_f64 (&tm->seed) * tm->max_time * tm->time.clocks_per_second;
timing_wheel_insert (w, cpu_time, i);
timing_wheel_validate (w);
tm->events[i] = cpu_time;
}
-static int test_timing_wheel_tmp_cmp (void * a1, void * a2)
+static int
+test_timing_wheel_tmp_cmp (void *a1, void *a2)
{
- test_timing_wheel_tmp_t * f1 = a1;
- test_timing_wheel_tmp_t * f2 = a2;
+ test_timing_wheel_tmp_t *f1 = a1;
+ test_timing_wheel_tmp_t *f2 = a2;
return f1->dt < f2->dt ? -1 : (f1->dt > f2->dt ? +1 : 0);
}
@@ -87,9 +92,9 @@ static int test_timing_wheel_tmp_cmp (void * a1, void * a2)
clib_error_t *
test_timing_wheel_main (unformat_input_t * input)
{
- clib_error_t * error = 0;
- test_timing_wheel_main_t _tm, * tm = &_tm;
- timing_wheel_t * w = &tm->timing_wheel;
+ clib_error_t *error = 0;
+ test_timing_wheel_main_t _tm, *tm = &_tm;
+ timing_wheel_t *w = &tm->timing_wheel;
uword iter, i;
memset (tm, 0, sizeof (tm[0]));
@@ -110,7 +115,8 @@ test_timing_wheel_main (unformat_input_t * input)
;
else if (unformat (input, "events %d", &tm->n_events))
;
- else if (unformat (input, "elt-time-bits %d", &w->n_wheel_elt_time_bits))
+ else
+ if (unformat (input, "elt-time-bits %d", &w->n_wheel_elt_time_bits))
;
else if (unformat (input, "seed %d", &tm->seed))
;
@@ -140,7 +146,7 @@ test_timing_wheel_main (unformat_input_t * input)
}
}
- if (! tm->seed)
+ if (!tm->seed)
tm->seed = random_default_seed ();
clib_time_init (&tm->time);
@@ -166,20 +172,21 @@ test_timing_wheel_main (unformat_input_t * input)
set_event (tm, i);
{
- u32 * expired = 0;
+ u32 *expired = 0;
f64 ave_error = 0;
f64 rms_error = 0;
f64 max_error = 0, min_error = 1e30;
- u32 * error_hist = 0;
+ u32 *error_hist = 0;
uword n_expired = 0;
- uword * expired_bitmap[2] = {0};
+ uword *expired_bitmap[2] = { 0 };
uword n_events_in_wheel = vec_len (tm->events);
vec_resize (expired, 32);
vec_resize (error_hist, 1024);
tm->time_iterate_start = clib_time_now (&tm->time);
- tm->time_next_status_update = tm->time_iterate_start + tm->time_per_status_update;
+ tm->time_next_status_update =
+ tm->time_iterate_start + tm->time_per_status_update;
if (tm->total_iterate_time != 0)
tm->n_iter = ~0;
@@ -194,7 +201,8 @@ test_timing_wheel_main (unformat_input_t * input)
cpu_time = clib_cpu_time_now ();
_vec_len (expired) = 0;
- expired = timing_wheel_advance (w, cpu_time, expired, &min_next_time[0]);
+ expired =
+ timing_wheel_advance (w, cpu_time, expired, &min_next_time[0]);
timing_wheel_validate (w);
/* Update bitmap of expired events. */
@@ -204,8 +212,11 @@ test_timing_wheel_main (unformat_input_t * input)
{
uword is_expired;
- is_expired = (cpu_time >> w->log2_clocks_per_bin) >= (tm->events[i] >> w->log2_clocks_per_bin);
- expired_bitmap[0] = clib_bitmap_set (expired_bitmap[0], i, is_expired);
+ is_expired =
+ (cpu_time >> w->log2_clocks_per_bin) >=
+ (tm->events[i] >> w->log2_clocks_per_bin);
+ expired_bitmap[0] =
+ clib_bitmap_set (expired_bitmap[0], i, is_expired);
/* Validate min next time. */
if (is_expired)
@@ -241,7 +252,9 @@ test_timing_wheel_main (unformat_input_t * input)
if (fdt_cpu < min_error)
min_error = fdt_cpu;
- idt = (cpu_time >> w->log2_clocks_per_bin) - (tm->events[j] >> w->log2_clocks_per_bin);
+ idt =
+ (cpu_time >> w->log2_clocks_per_bin) -
+ (tm->events[j] >> w->log2_clocks_per_bin);
idt = zvec_signed_to_unsigned (idt);
vec_validate (error_hist, idt);
error_hist[idt] += 1;
@@ -258,11 +271,12 @@ test_timing_wheel_main (unformat_input_t * input)
min_next_time[1] = ~0;
for (i = 0; i < vec_len (tm->events); i++)
{
- if (! clib_bitmap_get (expired_bitmap[1], i))
+ if (!clib_bitmap_get (expired_bitmap[1], i))
min_next_time[1] = clib_min (min_next_time[1], tm->events[i]);
}
if (min_next_time[0] != min_next_time[1])
- clib_error ("min next time wrong 0x%Lx != 0x%Lx", min_next_time[0], min_next_time[1]);
+ clib_error ("min next time wrong 0x%Lx != 0x%Lx", min_next_time[0],
+ min_next_time[1]);
if (tm->time_per_status_update != 0
&& clib_time_now (&tm->time) >= tm->time_next_status_update)
@@ -276,8 +290,9 @@ test_timing_wheel_main (unformat_input_t * input)
rms = SQRT (rms_error / n_expired - ave * ave);
}
- clib_warning ("%12wd iter done %10wd expired; ave. error %.4e +- %.4e, range %.4e %.4e",
- iter, n_expired, ave, rms, min_error, max_error);
+ clib_warning
+ ("%12wd iter done %10wd expired; ave. error %.4e +- %.4e, range %.4e %.4e",
+ iter, n_expired, ave, rms, min_error, max_error);
}
if (tm->total_iterate_time != 0
@@ -293,22 +308,22 @@ test_timing_wheel_main (unformat_input_t * input)
{
uword j = expired[i];
set_event (tm, j);
- expired_bitmap[1] = clib_bitmap_andnoti (expired_bitmap[1], j);
+ expired_bitmap[1] =
+ clib_bitmap_andnoti (expired_bitmap[1], j);
}
n_events_in_wheel += vec_len (expired);
}
}
-
+
ave_error /= n_expired;
rms_error = SQRT (rms_error / n_expired - ave_error * ave_error);
- clib_warning ("%wd iter done %wd expired; ave. error %.4e +- %.4e, range %.4e %.4e",
- 1 + iter, n_expired,
- ave_error, rms_error,
- min_error, max_error);
+ clib_warning
+ ("%wd iter done %wd expired; ave. error %.4e +- %.4e, range %.4e %.4e",
+ 1 + iter, n_expired, ave_error, rms_error, min_error, max_error);
{
- test_timing_wheel_tmp_t * fs, * f;
+ test_timing_wheel_tmp_t *fs, *f;
f64 total_fraction;
fs = 0;
@@ -317,8 +332,9 @@ test_timing_wheel_main (unformat_input_t * input)
if (error_hist[i] == 0)
continue;
vec_add2 (fs, f, 1);
- f->dt = (((i64) zvec_unsigned_to_signed (i) << w->log2_clocks_per_bin)
- * tm->time.seconds_per_clock);
+ f->dt =
+ (((i64) zvec_unsigned_to_signed (i) << w->log2_clocks_per_bin) *
+ tm->time.seconds_per_clock);
f->fraction = (f64) error_hist[i] / (f64) n_expired;
f->count = error_hist[i];
}
@@ -327,27 +343,29 @@ test_timing_wheel_main (unformat_input_t * input)
total_fraction = 0;
vec_foreach (f, fs)
- {
- total_fraction += f->fraction;
- if (f == fs)
- fformat (stdout, "%=12s %=16s %=16s %s\n", "Error max", "Fraction", "Total", "Count");
- fformat (stdout, "%12.4e %16.4f%% %16.4f%% %Ld\n",
- f->dt, f->fraction * 100, total_fraction * 100, f->count);
- }
+ {
+ total_fraction += f->fraction;
+ if (f == fs)
+ fformat (stdout, "%=12s %=16s %=16s %s\n", "Error max", "Fraction",
+ "Total", "Count");
+ fformat (stdout, "%12.4e %16.4f%% %16.4f%% %Ld\n", f->dt,
+ f->fraction * 100, total_fraction * 100, f->count);
+ }
}
clib_warning ("%U", format_timing_wheel, w, /* verbose */ 1);
}
- done:
+done:
return error;
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
- clib_error_t * error;
+ clib_error_t *error;
unformat_init_command_line (&i, argv);
error = test_timing_wheel_main (&i);
@@ -361,3 +379,11 @@ int main (int argc, char * argv[])
return 0;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_vec.c b/vppinfra/vppinfra/test_vec.c
index 80832bf52fd..f0497ac640e 100644
--- a/vppinfra/vppinfra/test_vec.c
+++ b/vppinfra/vppinfra/test_vec.c
@@ -37,13 +37,13 @@
*/
#ifdef CLIB_LINUX_KERNEL
-# include <linux/unistd.h>
+#include <linux/unistd.h>
#endif
#ifdef CLIB_UNIX
-# include <unistd.h>
-# include <stdlib.h>
-# include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
#endif
#include <vppinfra/clib.h>
@@ -62,10 +62,11 @@ static int verbose;
#define MAX_CHANGE 100
-typedef enum {
+typedef enum
+{
/* Values have to be sequential and start with 0. */
OP_IS_VEC_RESIZE = 0,
- OP_IS_VEC_ADD1,
+ OP_IS_VEC_ADD1,
OP_IS_VEC_ADD2,
OP_IS_VEC_ADD,
OP_IS_VEC_INSERT,
@@ -74,7 +75,7 @@ typedef enum {
OP_IS_VEC_DUP,
OP_IS_VEC_IS_EQUAL,
OP_IS_VEC_ZERO,
- OP_IS_VEC_SET,
+ OP_IS_VEC_SET,
OP_IS_VEC_VALIDATE,
OP_IS_VEC_FREE,
OP_IS_VEC_INIT,
@@ -94,31 +95,31 @@ typedef enum {
#define LAST_VEC_HDR_OP OP_IS_VEC_FREE_H
uword g_prob_ratio[] = {
- [OP_IS_VEC_RESIZE] = 5,
- [OP_IS_VEC_ADD1] = 5,
- [OP_IS_VEC_ADD2] = 5,
- [OP_IS_VEC_ADD] = 5,
- [OP_IS_VEC_INSERT] = 5,
- [OP_IS_VEC_INSERT_ELTS] = 5,
- [OP_IS_VEC_DELETE] = 30,
- [OP_IS_VEC_DUP] = 5,
- [OP_IS_VEC_IS_EQUAL] = 5,
- [OP_IS_VEC_ZERO] = 2,
- [OP_IS_VEC_SET] = 3,
- [OP_IS_VEC_VALIDATE] = 5,
- [OP_IS_VEC_FREE] = 5,
- [OP_IS_VEC_INIT] = 5,
- [OP_IS_VEC_CLONE] = 5,
- [OP_IS_VEC_APPEND] = 5,
- [OP_IS_VEC_PREPEND] = 5,
+ [OP_IS_VEC_RESIZE] = 5,
+ [OP_IS_VEC_ADD1] = 5,
+ [OP_IS_VEC_ADD2] = 5,
+ [OP_IS_VEC_ADD] = 5,
+ [OP_IS_VEC_INSERT] = 5,
+ [OP_IS_VEC_INSERT_ELTS] = 5,
+ [OP_IS_VEC_DELETE] = 30,
+ [OP_IS_VEC_DUP] = 5,
+ [OP_IS_VEC_IS_EQUAL] = 5,
+ [OP_IS_VEC_ZERO] = 2,
+ [OP_IS_VEC_SET] = 3,
+ [OP_IS_VEC_VALIDATE] = 5,
+ [OP_IS_VEC_FREE] = 5,
+ [OP_IS_VEC_INIT] = 5,
+ [OP_IS_VEC_CLONE] = 5,
+ [OP_IS_VEC_APPEND] = 5,
+ [OP_IS_VEC_PREPEND] = 5,
/* Operations on vectors with custom headers. */
- [OP_IS_VEC_INIT_H] = 5,
- [OP_IS_VEC_RESIZE_H] = 5,
- [OP_IS_VEC_FREE_H] = 5,
+ [OP_IS_VEC_INIT_H] = 5,
+ [OP_IS_VEC_RESIZE_H] = 5,
+ [OP_IS_VEC_FREE_H] = 5,
};
-op_t * g_prob;
-op_t * g_prob_wh;
+op_t *g_prob;
+op_t *g_prob_wh;
uword g_call_stats[OP_MAX];
@@ -126,28 +127,30 @@ uword g_call_stats[OP_MAX];
/* A structure for both vector headers and vector elements might be useful to
uncover potential alignement issues. */
-typedef struct {
+typedef struct
+{
u8 field1[4];
- CLIB_PACKED (u32 field2);
+ CLIB_PACKED (u32 field2);
} hdr_t;
-typedef struct {
+typedef struct
+{
u8 field1[3];
- CLIB_PACKED (u32 field2);
+ CLIB_PACKED (u32 field2);
} elt_t;
-
#ifdef CLIB_UNIX
u32 g_seed = 0xdeadbabe;
uword g_verbose = 1;
#endif
-op_t * g_op_prob;
+op_t *g_op_prob;
uword g_set_verbose_at = ~0;
uword g_dump_period = ~0;
-static u8 * format_vec_op_type (u8 * s, va_list * args)
+static u8 *
+format_vec_op_type (u8 * s, va_list * args)
{
op_t op = va_arg (*args, int);
@@ -158,10 +161,25 @@ static u8 * format_vec_op_type (u8 * s, va_list * args)
s = format (s, "OP_IS_" #n); \
break;
- _(VEC_RESIZE); _(VEC_ADD1); _(VEC_ADD2); _(VEC_ADD); _(VEC_INSERT);
- _(VEC_INSERT_ELTS); _(VEC_DELETE); _(VEC_DUP); _(VEC_IS_EQUAL);
- _(VEC_ZERO); _(VEC_SET); _(VEC_VALIDATE); _(VEC_FREE); _(VEC_INIT);
- _(VEC_CLONE); _(VEC_APPEND); _(VEC_PREPEND); _(VEC_INIT_H); _(VEC_RESIZE_H);
+ _(VEC_RESIZE);
+ _(VEC_ADD1);
+ _(VEC_ADD2);
+ _(VEC_ADD);
+ _(VEC_INSERT);
+ _(VEC_INSERT_ELTS);
+ _(VEC_DELETE);
+ _(VEC_DUP);
+ _(VEC_IS_EQUAL);
+ _(VEC_ZERO);
+ _(VEC_SET);
+ _(VEC_VALIDATE);
+ _(VEC_FREE);
+ _(VEC_INIT);
+ _(VEC_CLONE);
+ _(VEC_APPEND);
+ _(VEC_PREPEND);
+ _(VEC_INIT_H);
+ _(VEC_RESIZE_H);
_(VEC_FREE_H);
default:
@@ -174,7 +192,8 @@ static u8 * format_vec_op_type (u8 * s, va_list * args)
return s;
}
-static void dump_call_stats (uword * stats)
+static void
+dump_call_stats (uword * stats)
{
uword i;
@@ -229,27 +248,30 @@ create_random_vec_wh (elt_type, len, 0, seed)
compute_mem_hash (_v(hh), _v(v), _v(n)); \
})
-static elt_t * validate_vec_free (elt_t * vec)
+static elt_t *
+validate_vec_free (elt_t * vec)
{
vec_free (vec);
ASSERT (vec == NULL);
return vec;
}
-static elt_t * validate_vec_free_h (elt_t * vec, uword hdr_bytes)
+static elt_t *
+validate_vec_free_h (elt_t * vec, uword hdr_bytes)
{
vec_free_h (vec, hdr_bytes);
ASSERT (vec == NULL);
return vec;
}
-static void validate_vec_hdr (elt_t * vec, uword hdr_bytes)
+static void
+validate_vec_hdr (elt_t * vec, uword hdr_bytes)
{
- u8 * hdr;
- u8 * hdr_end;
- vec_header_t * vh;
+ u8 *hdr;
+ u8 *hdr_end;
+ vec_header_t *vh;
- if (! vec)
+ if (!vec)
return;
vh = _vec_find (vec);
@@ -260,16 +282,17 @@ static void validate_vec_hdr (elt_t * vec, uword hdr_bytes)
ASSERT ((u8 *) vh - (u8 *) hdr >= hdr_bytes);
}
-static void validate_vec_len (elt_t * vec)
+static void
+validate_vec_len (elt_t * vec)
{
- u8 * ptr;
- u8 * end;
+ u8 *ptr;
+ u8 *end;
uword len;
uword bytes;
uword i;
- elt_t * elt;
+ elt_t *elt;
- if (! vec)
+ if (!vec)
return;
ptr = (u8 *) vec;
@@ -284,7 +307,7 @@ static void validate_vec_len (elt_t * vec)
/* XXX - TODO: confirm that auto-incrementing in vec_is_member() would not
have the expected result. */
- while (vec_is_member (vec, (__typeof__(vec[0]) *) ptr))
+ while (vec_is_member (vec, (__typeof__ (vec[0]) *) ptr))
{
ptr++;
i++;
@@ -295,18 +318,18 @@ static void validate_vec_len (elt_t * vec)
i = 0;
- vec_foreach (elt, vec)
- i++;
+ vec_foreach (elt, vec) i++;
ASSERT (i == len);
}
-static void validate_vec (elt_t * vec, uword hdr_bytes)
+static void
+validate_vec (elt_t * vec, uword hdr_bytes)
{
validate_vec_hdr (vec, hdr_bytes);
validate_vec_len (vec);
- if (! vec || vec_len (vec) == 0)
+ if (!vec || vec_len (vec) == 0)
{
VERBOSE3 ("Vector at %p has zero elements.\n\n", vec);
}
@@ -316,13 +339,14 @@ static void validate_vec (elt_t * vec, uword hdr_bytes)
VERBOSE3 ("Header: %U\n",
format_hex_bytes, vec_header (vec, sizeof (vec[0])),
sizeof (vec[0]));
-
+
VERBOSE3 ("%U\n\n",
format_hex_bytes, vec, vec_len (vec) * sizeof (vec[0]));
}
}
-static elt_t * validate_vec_resize (elt_t * vec, uword num_elts)
+static elt_t *
+validate_vec_resize (elt_t * vec, uword num_elts)
{
uword len1 = vec_len (vec);
uword len2;
@@ -337,12 +361,12 @@ static elt_t * validate_vec_resize (elt_t * vec, uword num_elts)
return vec;
}
-static elt_t * validate_vec_resize_h (elt_t * vec,
- uword num_elts, uword hdr_bytes)
+static elt_t *
+validate_vec_resize_h (elt_t * vec, uword num_elts, uword hdr_bytes)
{
uword len1, len2;
- u8 * end1, * end2;
- u8 * hdr = NULL;
+ u8 *end1, *end2;
+ u8 *hdr = NULL;
u8 hash, hdr_hash;
len1 = vec_len (vec);
@@ -376,13 +400,13 @@ static elt_t * validate_vec_resize_h (elt_t * vec,
return vec;
}
-static elt_t * generic_validate_vec_add (elt_t * vec,
- uword num_elts, uword is_add2)
+static elt_t *
+generic_validate_vec_add (elt_t * vec, uword num_elts, uword is_add2)
{
uword len1 = vec_len (vec);
uword len2;
u8 hash = compute_vec_hash (0, vec);
- elt_t * new;
+ elt_t *new;
if (is_add2)
{
@@ -391,12 +415,13 @@ static elt_t * generic_validate_vec_add (elt_t * vec,
else
{
new = create_random_vec (elt_t, num_elts, g_seed);
-
- VERBOSE3 ("%U\n", format_hex_bytes, new, vec_len (new) * sizeof (new[0]));
-
+
+ VERBOSE3 ("%U\n", format_hex_bytes, new,
+ vec_len (new) * sizeof (new[0]));
+
/* Add the hash value of the new elements to that of the old vector. */
hash = compute_vec_hash (hash, new);
-
+
if (num_elts == 1)
vec_add1 (vec, new[0]);
else if (num_elts > 1)
@@ -413,23 +438,26 @@ static elt_t * generic_validate_vec_add (elt_t * vec,
return vec;
}
-static elt_t * validate_vec_add1 (elt_t * vec)
+static elt_t *
+validate_vec_add1 (elt_t * vec)
{
return generic_validate_vec_add (vec, 1, 0);
}
-static elt_t * validate_vec_add2 (elt_t * vec, uword num_elts)
+static elt_t *
+validate_vec_add2 (elt_t * vec, uword num_elts)
{
return generic_validate_vec_add (vec, num_elts, 1);
}
-static elt_t * validate_vec_add (elt_t * vec, uword num_elts)
+static elt_t *
+validate_vec_add (elt_t * vec, uword num_elts)
{
return generic_validate_vec_add (vec, num_elts, 0);
}
-static elt_t * validate_vec_insert (elt_t * vec,
- uword num_elts, uword start_elt)
+static elt_t *
+validate_vec_insert (elt_t * vec, uword num_elts, uword start_elt)
{
uword len1 = vec_len (vec);
uword len2;
@@ -449,12 +477,12 @@ static elt_t * validate_vec_insert (elt_t * vec,
return vec;
}
-static elt_t * validate_vec_insert_elts (elt_t * vec,
- uword num_elts, uword start_elt)
+static elt_t *
+validate_vec_insert_elts (elt_t * vec, uword num_elts, uword start_elt)
{
uword len1 = vec_len (vec);
uword len2;
- elt_t * new;
+ elt_t *new;
u8 hash;
/* vec_insert_elts() would not handle it properly. */
@@ -462,13 +490,13 @@ static elt_t * validate_vec_insert_elts (elt_t * vec,
return vec;
new = create_random_vec (elt_t, num_elts, g_seed);
-
+
VERBOSE3 ("%U\n", format_hex_bytes, new, vec_len (new) * sizeof (new[0]));
-
+
/* Add the hash value of the new elements to that of the old vector. */
hash = compute_vec_hash (0, vec);
hash = compute_vec_hash (hash, new);
-
+
vec_insert_elts (vec, new, num_elts, start_elt);
len2 = vec_len (vec);
@@ -480,12 +508,12 @@ static elt_t * validate_vec_insert_elts (elt_t * vec,
return vec;
}
-static elt_t * validate_vec_delete (elt_t * vec,
- uword num_elts, uword start_elt)
+static elt_t *
+validate_vec_delete (elt_t * vec, uword num_elts, uword start_elt)
{
uword len1 = vec_len (vec);
uword len2;
- u8 * start;
+ u8 *start;
u8 hash;
u8 hash_del;
@@ -508,9 +536,10 @@ static elt_t * validate_vec_delete (elt_t * vec,
return vec;
}
-static elt_t * validate_vec_dup (elt_t * vec)
+static elt_t *
+validate_vec_dup (elt_t * vec)
{
- elt_t * new;
+ elt_t *new;
u8 hash;
hash = compute_vec_hash (0, vec);
@@ -522,10 +551,11 @@ static elt_t * validate_vec_dup (elt_t * vec)
return new;
}
-static elt_t * validate_vec_zero (elt_t * vec)
+static elt_t *
+validate_vec_zero (elt_t * vec)
{
- u8 * ptr;
- u8 * end;
+ u8 *ptr;
+ u8 *end;
vec_zero (vec);
@@ -543,9 +573,10 @@ static elt_t * validate_vec_zero (elt_t * vec)
return vec;
}
-static void validate_vec_is_equal (elt_t * vec)
+static void
+validate_vec_is_equal (elt_t * vec)
{
- elt_t * new = NULL;
+ elt_t *new = NULL;
if (vec_len (vec) <= 0)
return;
@@ -555,13 +586,14 @@ static void validate_vec_is_equal (elt_t * vec)
vec_free (new);
}
-static elt_t * validate_vec_set (elt_t * vec)
+static elt_t *
+validate_vec_set (elt_t * vec)
{
uword i;
uword len = vec_len (vec);
- elt_t * new;
+ elt_t *new;
- if (! vec)
+ if (!vec)
return NULL;
new = create_random_vec (elt_t, 1, g_seed);
@@ -578,12 +610,13 @@ static elt_t * validate_vec_set (elt_t * vec)
return vec;
}
-static elt_t * validate_vec_validate (elt_t * vec, uword index)
+static elt_t *
+validate_vec_validate (elt_t * vec, uword index)
{
uword len = vec_len (vec);
word num_new = index - len + 1;
- u8 * ptr;
- u8 * end;
+ u8 *ptr;
+ u8 *end;
u8 hash = compute_vec_hash (0, vec);
if (num_new < 0)
@@ -609,16 +642,17 @@ static elt_t * validate_vec_validate (elt_t * vec, uword index)
return vec;
}
-static elt_t * validate_vec_init (uword num_elts)
+static elt_t *
+validate_vec_init (uword num_elts)
{
- u8 * ptr;
- u8 * end;
+ u8 *ptr;
+ u8 *end;
uword len;
- elt_t * new;
+ elt_t *new;
new = vec_new (elt_t, num_elts);
len = vec_len (new);
-
+
ASSERT (len == num_elts);
ptr = (u8 *) new;
@@ -630,18 +664,19 @@ static elt_t * validate_vec_init (uword num_elts)
ASSERT (ptr[0] == 0);
ptr++;
}
-
+
validate_vec (new, 0);
return new;
}
-static elt_t * validate_vec_init_h (uword num_elts, uword hdr_bytes)
+static elt_t *
+validate_vec_init_h (uword num_elts, uword hdr_bytes)
{
uword i = 0;
- u8 * ptr;
- u8 * end;
+ u8 *ptr;
+ u8 *end;
uword len;
- elt_t * new;
+ elt_t *new;
new = vec_new_ha (elt_t, num_elts, hdr_bytes, 0);
len = vec_len (new);
@@ -675,9 +710,10 @@ static elt_t * validate_vec_init_h (uword num_elts, uword hdr_bytes)
}
/* XXX - I don't understand the purpose of the vec_clone() call. */
-static elt_t * validate_vec_clone (elt_t * vec)
+static elt_t *
+validate_vec_clone (elt_t * vec)
{
- elt_t * new;
+ elt_t *new;
vec_clone (new, vec);
@@ -687,19 +723,20 @@ static elt_t * validate_vec_clone (elt_t * vec)
return new;
}
-static elt_t * validate_vec_append (elt_t * vec)
+static elt_t *
+validate_vec_append (elt_t * vec)
{
- elt_t * new;
+ elt_t *new;
uword num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
uword len;
u8 hash = 0;
-
+
new = create_random_vec (elt_t, num_elts, g_seed);
len = vec_len (vec) + vec_len (new);
hash = compute_vec_hash (0, vec);
hash = compute_vec_hash (hash, new);
-
+
vec_append (vec, new);
vec_free (new);
@@ -709,19 +746,20 @@ static elt_t * validate_vec_append (elt_t * vec)
return vec;
}
-static elt_t * validate_vec_prepend (elt_t * vec)
+static elt_t *
+validate_vec_prepend (elt_t * vec)
{
- elt_t * new;
+ elt_t *new;
uword num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
uword len;
u8 hash = 0;
-
+
new = create_random_vec (elt_t, num_elts, g_seed);
len = vec_len (vec) + vec_len (new);
hash = compute_vec_hash (0, vec);
hash = compute_vec_hash (hash, new);
-
+
vec_prepend (vec, new);
vec_free (new);
@@ -731,15 +769,16 @@ static elt_t * validate_vec_prepend (elt_t * vec)
return vec;
}
-static void run_validator_wh (uword iter)
+static void
+run_validator_wh (uword iter)
{
- elt_t * vec;
+ elt_t *vec;
uword i;
uword op;
uword num_elts;
uword len;
uword dump_time;
- f64 time[3]; /* [0]: start, [1]: last, [2]: current */
+ f64 time[3]; /* [0]: start, [1]: last, [2]: current */
vec = create_random_vec_wh (elt_t, ~0, sizeof (hdr_t), g_seed);
validate_vec (vec, 0);
@@ -805,17 +844,18 @@ static void run_validator_wh (uword iter)
vec_free_h (vec, sizeof (hdr_t));
}
-static void run_validator (uword iter)
+static void
+run_validator (uword iter)
{
- elt_t * vec;
- elt_t * new;
+ elt_t *vec;
+ elt_t *new;
uword i;
uword op;
uword num_elts;
uword index;
uword len;
uword dump_time;
- f64 time[3]; /* [0]: start, [1]: last, [2]: current */
+ f64 time[3]; /* [0]: start, [1]: last, [2]: current */
vec = create_random_vec (elt_t, ~0, g_seed);
validate_vec (vec, 0);
@@ -864,7 +904,8 @@ static void run_validator (uword iter)
num_elts = bounded_random_u32 (&g_seed, 0, MAX_CHANGE);
index = bounded_random_u32 (&g_seed, 0,
(len > 0) ? (len - 1) : (0));
- VERBOSE2 ("vec_insert(), %d new elts, index %d.\n", num_elts, index);
+ VERBOSE2 ("vec_insert(), %d new elts, index %d.\n", num_elts,
+ index);
vec = validate_vec_insert (vec, num_elts, index);
break;
@@ -932,7 +973,7 @@ static void run_validator (uword iter)
new = validate_vec_clone (vec);
vec_free (new);
break;
-
+
case OP_IS_VEC_APPEND:
VERBOSE2 ("vec_append()\n");
vec = validate_vec_append (vec);
@@ -942,7 +983,7 @@ static void run_validator (uword iter)
VERBOSE2 ("vec_prepend()\n");
vec = validate_vec_prepend (vec);
break;
-
+
default:
ASSERT (0);
break;
@@ -970,12 +1011,13 @@ static void run_validator (uword iter)
vec_free (vec);
}
-static void prob_init (void)
+static void
+prob_init (void)
{
uword i, j, ratio, len, index;
/* Create the vector to implement the statistical profile:
- vec [ op1 op1 op1 op2 op3 op3 op3 op4 op4 .... ] */
+ vec [ op1 op1 op1 op2 op3 op3 op3 op4 op4 .... ] */
for (i = FIRST_VEC_OP; i <= LAST_VEC_OP; i++)
{
ratio = g_prob_ratio[i];
@@ -1012,19 +1054,21 @@ static void prob_init (void)
}
VERBOSE3 ("prob_vec, len %d\n%U\n", vec_len (g_prob),
- format_hex_bytes, g_prob, vec_len (g_prob) * sizeof (g_prob[0]));
+ format_hex_bytes, g_prob, vec_len (g_prob) * sizeof (g_prob[0]));
VERBOSE3 ("prob_vec_wh, len %d\n%U\n", vec_len (g_prob_wh),
- format_hex_bytes, g_prob_wh,
+ format_hex_bytes, g_prob_wh,
vec_len (g_prob_wh) * sizeof (g_prob_wh[0]));
}
-static void prob_free (void)
+static void
+prob_free (void)
{
vec_free (g_prob);
vec_free (g_prob_wh);
}
-int test_vec_main (unformat_input_t * input)
+int
+test_vec_main (unformat_input_t * input)
{
uword iter = 1000;
uword help = 0;
@@ -1038,7 +1082,7 @@ int test_vec_main (unformat_input_t * input)
&& 0 == unformat (input, "set %d", &g_set_verbose_at)
&& 0 == unformat (input, "dump %d", &g_dump_period)
&& 0 == unformat (input, "help %=", &help, 1)
- && 0 == unformat (input, "big %=", &big, 1))
+ && 0 == unformat (input, "big %=", &big, 1))
{
clib_error ("unknown input `%U'", format_unformat_error, input);
goto usage;
@@ -1047,8 +1091,8 @@ int test_vec_main (unformat_input_t * input)
if (big)
{
- u8 * bigboy = 0;
- u64 one_gig = (1<<30);
+ u8 *bigboy = 0;
+ u64 one_gig = (1 << 30);
u64 size;
u64 index;
@@ -1056,12 +1100,12 @@ int test_vec_main (unformat_input_t * input)
size = 5ULL * one_gig;
vec_validate (bigboy, size);
-
+
for (index = size; index >= 0; index--)
- bigboy[index] = index & 0xff;
+ bigboy[index] = index & 0xff;
return 0;
}
-
+
if (help)
goto usage;
@@ -1069,13 +1113,17 @@ int test_vec_main (unformat_input_t * input)
prob_init ();
run_validator (iter);
run_validator_wh (iter);
- if (verbose) dump_call_stats (g_call_stats);
+ if (verbose)
+ dump_call_stats (g_call_stats);
prob_free ();
- if (verbose) { memory_snap (); }
+ if (verbose)
+ {
+ memory_snap ();
+ }
return 0;
- usage:
+usage:
fformat (stdout, "Usage: test_vec iter <N> seed <N> verbose <N> "
"set <N> dump <N>\n");
if (help)
@@ -1085,12 +1133,13 @@ int test_vec_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
- mheap_alloc (0, (uword)10ULL<<30);
+ mheap_alloc (0, (uword) 10ULL << 30);
verbose = (argc > 1);
unformat_init_command_line (&i, argv);
@@ -1100,3 +1149,11 @@ int main (int argc, char * argv[])
return ret;
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_vec.h b/vppinfra/vppinfra/test_vec.h
index a8aef56d293..28e8e2a081d 100644
--- a/vppinfra/vppinfra/test_vec.h
+++ b/vppinfra/vppinfra/test_vec.h
@@ -48,20 +48,21 @@
extern uword g_verbose;
extern u32 g_seed;
-always_inline u8 * format_u32_binary (u8 * s, va_list * va)
+always_inline u8 *
+format_u32_binary (u8 * s, va_list * va)
{
- u32 val = va_arg (*va, u32);
- word i = 0;
-
- for (i = BITS (val) - 1; i >= 0; i--)
- {
- if (val & (1 << i))
- s = format (s, "1");
- else
- s = format (s, "0");
- }
-
- return s;
+ u32 val = va_arg (*va, u32);
+ word i = 0;
+
+ for (i = BITS (val) - 1; i >= 0; i--)
+ {
+ if (val & (1 << i))
+ s = format (s, "1");
+ else
+ s = format (s, "0");
+ }
+
+ return s;
}
#define VERBOSE1(fmt, args...) \
@@ -93,7 +94,8 @@ do { \
/* XXX - I get undefined symbol trying to call random_u32() <vppinfra/random.h> */
/* Simple random number generator with period 2^31 - 1. */
-static u32 my_random_u32 (u32 * seed_return)
+static u32
+my_random_u32 (u32 * seed_return)
{
/* Unlikely mask value to XOR into seed.
Otherwise small seed values would give
@@ -118,14 +120,15 @@ static u32 my_random_u32 (u32 * seed_return)
return result;
}
-static u32 bounded_random_u32 (u32 * seed, uword lo, uword hi)
+static u32
+bounded_random_u32 (u32 * seed, uword lo, uword hi)
{
if (lo == hi)
return lo;
ASSERT (lo < hi);
- return ((my_random_u32 (seed) % (hi - lo + ((hi != ~0) ? (1) : (0)))) + lo);
+ return ((my_random_u32 (seed) % (hi - lo + ((hi != ~0) ? (1) : (0)))) + lo);
}
#define fill_with_random_data(ptr, bytes, seed) \
@@ -184,15 +187,16 @@ uword_to_pointer (log2_align_up (pointer_to_uword (ptr), align), void *)
/* Allocates pointer to memory whose address is:
addr = <log2_align>-aligned address */
-always_inline void * alloc_aligned (uword size, uword log2_align, void ** ptr_to_free)
+always_inline void *
+alloc_aligned (uword size, uword log2_align, void **ptr_to_free)
{
- void * p;
-
+ void *p;
+
if (size <= 0)
return NULL;
-
+
p = (void *) clib_mem_alloc (size + (1 << log2_align) - 1);
-
+
if (ptr_to_free)
*ptr_to_free = p;
@@ -201,18 +205,20 @@ always_inline void * alloc_aligned (uword size, uword log2_align, void ** ptr_to
/* Allocates pointer to memory whose address is:
addr = MAX_LOG2_ALIGN-aligned address + <offset> */
-always_inline void * alloc_unaligned (uword size, uword offset, void ** ptr_to_free)
+always_inline void *
+alloc_unaligned (uword size, uword offset, void **ptr_to_free)
{
- void * p;
+ void *p;
if (size <= 0)
return NULL;
ASSERT (offset <= MAX_UNALIGN_OFFSET);
- p = alloc_aligned (size + (1 << MAX_LOG2_ALIGN), MAX_LOG2_ALIGN, ptr_to_free);
+ p =
+ alloc_aligned (size + (1 << MAX_LOG2_ALIGN), MAX_LOG2_ALIGN, ptr_to_free);
- if (! p)
+ if (!p)
return NULL;
return (void *) ((u8 *) p + (offset % MAX_UNALIGN_OFFSET));
@@ -227,3 +233,11 @@ do { \
#endif /* included_test_vec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_vhash.c b/vppinfra/vppinfra/test_vhash.c
index 8316cf76d2b..7293fdde86e 100644
--- a/vppinfra/vppinfra/test_vhash.c
+++ b/vppinfra/vppinfra/test_vhash.c
@@ -50,7 +50,8 @@
#ifdef CLIB_HAVE_VEC128
-typedef struct {
+typedef struct
+{
u32 n_iter;
u32 seed;
u32 verbose;
@@ -61,20 +62,21 @@ typedef struct {
u32 n_vectors_div_4;
u32 n_vectors_mod_4;
- u32 * keys;
- u32 * results;
+ u32 *keys;
+ u32 *results;
- u32 * vhash_get_key_indices;
- u32 * vhash_get_results;
+ u32 *vhash_get_key_indices;
+ u32 *vhash_get_results;
- u32 * vhash_key_indices;
- u32 * vhash_results;
+ u32 *vhash_key_indices;
+ u32 *vhash_results;
vhash_t vhash;
- uword ** key_hash;
+ uword **key_hash;
- struct {
+ struct
+ {
u64 n_clocks;
u64 n_vectors;
u64 n_calls;
@@ -82,9 +84,9 @@ typedef struct {
} test_vhash_main_t;
always_inline u32
-test_vhash_key_gather (void * _tm, u32 vi, u32 wi, u32 n_key_u32s)
+test_vhash_key_gather (void *_tm, u32 vi, u32 wi, u32 n_key_u32s)
{
- test_vhash_main_t * tm = _tm;
+ test_vhash_main_t *tm = _tm;
ASSERT (n_key_u32s == tm->n_key_u32);
ASSERT (wi < n_key_u32s);
vi = vec_elt (tm->vhash_key_indices, vi);
@@ -92,10 +94,10 @@ test_vhash_key_gather (void * _tm, u32 vi, u32 wi, u32 n_key_u32s)
}
always_inline u32x4
-test_vhash_4key_gather (void * _tm, u32 vi, u32 wi, u32 n_key_u32s)
+test_vhash_4key_gather (void *_tm, u32 vi, u32 wi, u32 n_key_u32s)
{
- test_vhash_main_t * tm = _tm;
- u32 * p;
+ test_vhash_main_t *tm = _tm;
+ u32 *p;
u32x4_union_t x;
ASSERT (n_key_u32s == tm->n_key_u32);
@@ -110,47 +112,41 @@ test_vhash_4key_gather (void * _tm, u32 vi, u32 wi, u32 n_key_u32s)
}
always_inline u32
-test_vhash_get_result (void * _tm,
- u32 vector_index,
- u32 result_index,
- u32 n_key_u32s)
+test_vhash_get_result (void *_tm,
+ u32 vector_index, u32 result_index, u32 n_key_u32s)
{
- test_vhash_main_t * tm = _tm;
- u32 * p = vec_elt_at_index (tm->vhash_results, vector_index);
+ test_vhash_main_t *tm = _tm;
+ u32 *p = vec_elt_at_index (tm->vhash_results, vector_index);
p[0] = result_index;
return result_index;
}
always_inline u32x4
-test_vhash_get_4result (void * _tm,
- u32 vector_index,
- u32x4 results,
- u32 n_key_u32s)
+test_vhash_get_4result (void *_tm,
+ u32 vector_index, u32x4 results, u32 n_key_u32s)
{
- test_vhash_main_t * tm = _tm;
- u32 * p = vec_elt_at_index (tm->vhash_results, vector_index);
- *(u32x4 *)p = results;
+ test_vhash_main_t *tm = _tm;
+ u32 *p = vec_elt_at_index (tm->vhash_results, vector_index);
+ *(u32x4 *) p = results;
return results;
}
always_inline u32
-test_vhash_set_result (void * _tm,
- u32 vector_index,
- u32 old_result,
- u32 n_key_u32s)
+test_vhash_set_result (void *_tm,
+ u32 vector_index, u32 old_result, u32 n_key_u32s)
{
- test_vhash_main_t * tm = _tm;
- u32 * p = vec_elt_at_index (tm->vhash_results, vector_index);
+ test_vhash_main_t *tm = _tm;
+ u32 *p = vec_elt_at_index (tm->vhash_results, vector_index);
u32 new_result = p[0];
p[0] = old_result;
return new_result;
}
always_inline u32
-test_vhash_unset_result (void * _tm, u32 i, u32 old_result, u32 n_key_u32s)
+test_vhash_unset_result (void *_tm, u32 i, u32 old_result, u32 n_key_u32s)
{
- test_vhash_main_t * tm = _tm;
- u32 * p = vec_elt_at_index (tm->vhash_results, i);
+ test_vhash_main_t *tm = _tm;
+ u32 *p = vec_elt_at_index (tm->vhash_results, i);
p[0] = old_result;
return 0;
}
@@ -268,12 +264,12 @@ test_vhash_unset_result (void * _tm, u32 i, u32 old_result, u32 n_key_u32s)
tm, N_KEY_U32); \
})
-_ (1);
-_ (2);
-_ (3);
-_ (4);
-_ (5);
-_ (6);
+_(1);
+_(2);
+_(3);
+_(4);
+_(5);
+_(6);
#undef _
@@ -292,22 +288,21 @@ _ (6);
vhash_mix_stage (&tm->vhash, tm->n_vectors_div_4, N_KEY_U32); \
})
-_ (4);
-_ (5);
-_ (6);
+_(4);
+_(5);
+_(6);
#undef _
-typedef enum {
+typedef enum
+{
GET, SET, UNSET,
} test_vhash_op_t;
static void
test_vhash_op (test_vhash_main_t * tm,
u32 * key_indices,
- u32 * results,
- uword n_keys,
- test_vhash_op_t op)
+ u32 * results, uword n_keys, test_vhash_op_t op)
{
vhash_validate_sizes (&tm->vhash, tm->n_key_u32, n_keys);
@@ -349,9 +344,9 @@ test_vhash_op (test_vhash_main_t * tm,
test_vhash_unset_stage_##N_KEY_U32); \
break;
- _ (1);
- _ (2);
- _ (3);
+ _(1);
+ _(2);
+ _(3);
#undef _
@@ -383,9 +378,9 @@ test_vhash_op (test_vhash_main_t * tm,
test_vhash_unset_stage_##N_KEY_U32); \
break;
- _ (4);
- _ (5);
- _ (6);
+ _(4);
+ _(5);
+ _(6);
#undef _
}
@@ -425,9 +420,9 @@ test_vhash_op (test_vhash_main_t * tm,
test_vhash_unset_mod_stage_##N_KEY_U32); \
break;
- _ (1);
- _ (2);
- _ (3);
+ _(1);
+ _(2);
+ _(3);
#undef _
@@ -459,20 +454,21 @@ test_vhash_op (test_vhash_main_t * tm,
test_vhash_unset_mod_stage_##N_KEY_U32); \
break;
- _ (4);
- _ (5);
- _ (6);
+ _(4);
+ _(5);
+ _(6);
#undef _
}
}
}
-int test_vhash_main (unformat_input_t * input)
+int
+test_vhash_main (unformat_input_t * input)
{
- clib_error_t * error = 0;
- test_vhash_main_t _tm, * tm = &_tm;
- vhash_t * vh = &tm->vhash;
+ clib_error_t *error = 0;
+ test_vhash_main_t _tm, *tm = &_tm;
+ vhash_t *vh = &tm->vhash;
uword i, j;
memset (tm, 0, sizeof (tm[0]));
@@ -509,7 +505,8 @@ int test_vhash_main (unformat_input_t * input)
tm->seed = random_default_seed ();
clib_warning ("iter %d seed %d n-keys %d log2-size %d key-words %d",
- tm->n_iter, tm->seed, tm->n_keys, tm->log2_size, tm->n_key_u32);
+ tm->n_iter, tm->seed, tm->n_keys, tm->log2_size,
+ tm->n_key_u32);
{
u32 seeds[3];
@@ -523,31 +520,36 @@ int test_vhash_main (unformat_input_t * input)
for (i = j = 0; i < vec_len (tm->keys); i++, j++)
{
j = j == tm->n_key_u32 ? 0 : j;
- do {
- tm->keys[i] = random_u32 (&tm->seed);
- } while (hash_get (tm->key_hash[j], tm->keys[i]));
+ do
+ {
+ tm->keys[i] = random_u32 (&tm->seed);
+ }
+ while (hash_get (tm->key_hash[j], tm->keys[i]));
hash_set (tm->key_hash[j], tm->keys[i], 0);
}
vec_resize (tm->results, tm->n_keys);
for (i = 0; i < vec_len (tm->results); i++)
{
- do {
- tm->results[i] = random_u32 (&tm->seed);
- } while (tm->results[i] == ~0);
+ do
+ {
+ tm->results[i] = random_u32 (&tm->seed);
+ }
+ while (tm->results[i] == ~0);
}
- vec_resize_aligned (tm->vhash_get_results, tm->n_keys, CLIB_CACHE_LINE_BYTES);
+ vec_resize_aligned (tm->vhash_get_results, tm->n_keys,
+ CLIB_CACHE_LINE_BYTES);
vec_clone (tm->vhash_get_key_indices, tm->results);
for (i = 0; i < vec_len (tm->vhash_get_key_indices); i++)
tm->vhash_get_key_indices[i] = i;
{
- uword * is_set_bitmap = 0;
- uword * to_set_bitmap = 0;
- uword * to_unset_bitmap = 0;
- u32 * to_set = 0, * to_unset = 0;
- u32 * to_set_results = 0, * to_unset_results = 0;
+ uword *is_set_bitmap = 0;
+ uword *to_set_bitmap = 0;
+ uword *to_unset_bitmap = 0;
+ u32 *to_set = 0, *to_unset = 0;
+ u32 *to_set_results = 0, *to_unset_results = 0;
u64 t[2];
for (i = 0; i < tm->n_iter; i++)
@@ -557,28 +559,32 @@ int test_vhash_main (unformat_input_t * input)
vec_reset_length (to_set_results);
vec_reset_length (to_unset_results);
- do {
- to_set_bitmap = clib_bitmap_random (to_set_bitmap,
- tm->n_keys, &tm->seed);
- } while (clib_bitmap_is_zero (to_set_bitmap));
+ do
+ {
+ to_set_bitmap = clib_bitmap_random (to_set_bitmap,
+ tm->n_keys, &tm->seed);
+ }
+ while (clib_bitmap_is_zero (to_set_bitmap));
to_unset_bitmap = clib_bitmap_dup_and (to_set_bitmap, is_set_bitmap);
to_set_bitmap = clib_bitmap_andnot (to_set_bitmap, to_unset_bitmap);
+ /* *INDENT-OFF* */
clib_bitmap_foreach (j, to_set_bitmap, ({
vec_add1 (to_set, j);
vec_add1 (to_set_results, tm->results[j]);
}));
+ /* *INDENT-ON* */
+ /* *INDENT-OFF* */
clib_bitmap_foreach (j, to_unset_bitmap, ({
vec_add1 (to_unset, j);
vec_add1 (to_unset_results, 0xdeadbeef);
}));
+ /* *INDENT-ON* */
if (vec_len (to_set) > 0)
{
t[0] = clib_cpu_time_now ();
- test_vhash_op (tm, to_set, to_set_results,
- vec_len (to_set),
- SET);
+ test_vhash_op (tm, to_set, to_set_results, vec_len (to_set), SET);
t[1] = clib_cpu_time_now ();
tm->set_stats.n_clocks += t[1] - t[0];
tm->set_stats.n_vectors += vec_len (to_set);
@@ -589,8 +595,7 @@ int test_vhash_main (unformat_input_t * input)
t[0] = clib_cpu_time_now ();
test_vhash_op (tm, tm->vhash_get_key_indices,
tm->vhash_get_results,
- vec_len (tm->vhash_get_key_indices),
- GET);
+ vec_len (tm->vhash_get_key_indices), GET);
t[1] = clib_cpu_time_now ();
tm->get_stats.n_clocks += t[1] - t[0];
tm->get_stats.n_vectors += vec_len (tm->vhash_get_key_indices);
@@ -619,20 +624,19 @@ int test_vhash_main (unformat_input_t * input)
{
t[0] = clib_cpu_time_now ();
test_vhash_op (tm, to_unset, to_unset_results,
- vec_len (to_unset),
- UNSET);
+ vec_len (to_unset), UNSET);
t[1] = clib_cpu_time_now ();
tm->unset_stats.n_clocks += t[1] - t[0];
tm->unset_stats.n_vectors += vec_len (to_unset);
tm->unset_stats.n_calls += 1;
- is_set_bitmap = clib_bitmap_andnot (is_set_bitmap, to_unset_bitmap);
+ is_set_bitmap =
+ clib_bitmap_andnot (is_set_bitmap, to_unset_bitmap);
}
t[0] = clib_cpu_time_now ();
test_vhash_op (tm, tm->vhash_get_key_indices,
tm->vhash_get_results,
- vec_len (tm->vhash_get_key_indices),
- GET);
+ vec_len (tm->vhash_get_key_indices), GET);
t[1] = clib_cpu_time_now ();
tm->get_stats.n_clocks += t[1] - t[0];
tm->get_stats.n_vectors += vec_len (tm->vhash_get_key_indices);
@@ -662,8 +666,7 @@ int test_vhash_main (unformat_input_t * input)
test_vhash_op (tm, tm->vhash_get_key_indices,
tm->vhash_get_results,
- vec_len (tm->vhash_get_key_indices),
- GET);
+ vec_len (tm->vhash_get_key_indices), GET);
for (j = 0; j < vec_len (tm->vhash_get_results); j++)
{
@@ -691,22 +694,30 @@ int test_vhash_main (unformat_input_t * input)
clib_time_init (&ct);
clib_warning ("%.4e clocks/get %.4e gets/call %.4e gets/sec",
- (f64) tm->get_stats.n_clocks / (f64) tm->get_stats.n_vectors,
+ (f64) tm->get_stats.n_clocks /
+ (f64) tm->get_stats.n_vectors,
(f64) tm->get_stats.n_vectors / (f64) tm->get_stats.n_calls,
- (f64) tm->get_stats.n_vectors / (f64) (tm->get_stats.n_clocks * ct.seconds_per_clock));
+ (f64) tm->get_stats.n_vectors /
+ (f64) (tm->get_stats.n_clocks * ct.seconds_per_clock));
if (tm->set_stats.n_calls > 0)
clib_warning ("%.4e clocks/set %.4e sets/call %.4e sets/sec",
- (f64) tm->set_stats.n_clocks / (f64) tm->set_stats.n_vectors,
- (f64) tm->set_stats.n_vectors / (f64) tm->set_stats.n_calls,
- (f64) tm->set_stats.n_vectors / (f64) (tm->set_stats.n_clocks * ct.seconds_per_clock));
+ (f64) tm->set_stats.n_clocks /
+ (f64) tm->set_stats.n_vectors,
+ (f64) tm->set_stats.n_vectors /
+ (f64) tm->set_stats.n_calls,
+ (f64) tm->set_stats.n_vectors /
+ (f64) (tm->set_stats.n_clocks * ct.seconds_per_clock));
if (tm->unset_stats.n_calls > 0)
clib_warning ("%.4e clocks/unset %.4e unsets/call %.4e unsets/sec",
- (f64) tm->unset_stats.n_clocks / (f64) tm->unset_stats.n_vectors,
- (f64) tm->unset_stats.n_vectors / (f64) tm->unset_stats.n_calls,
- (f64) tm->unset_stats.n_vectors / (f64) (tm->unset_stats.n_clocks * ct.seconds_per_clock));
+ (f64) tm->unset_stats.n_clocks /
+ (f64) tm->unset_stats.n_vectors,
+ (f64) tm->unset_stats.n_vectors /
+ (f64) tm->unset_stats.n_calls,
+ (f64) tm->unset_stats.n_vectors /
+ (f64) (tm->unset_stats.n_clocks * ct.seconds_per_clock));
}
- done:
+done:
if (error)
clib_error_report (error);
return 0;
@@ -715,7 +726,8 @@ int test_vhash_main (unformat_input_t * input)
#endif /* CLIB_HAVE_VEC128 */
#ifndef CLIB_HAVE_VEC128
-int test_vhash_main (unformat_input_t * input)
+int
+test_vhash_main (unformat_input_t * input)
{
clib_error ("compiled without vector support");
return 0;
@@ -723,7 +735,8 @@ int test_vhash_main (unformat_input_t * input)
#endif
#ifdef CLIB_UNIX
-int main (int argc, char * argv [])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int r;
@@ -734,3 +747,11 @@ int main (int argc, char * argv [])
return r;
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/test_zvec.c b/vppinfra/vppinfra/test_zvec.c
index f66bdfbea3c..874fdefa4ad 100644
--- a/vppinfra/vppinfra/test_zvec.c
+++ b/vppinfra/vppinfra/test_zvec.c
@@ -43,12 +43,13 @@ static int verbose;
#define if_verbose(format,args...) \
if (verbose) { clib_warning(format, ## args); }
-int test_zvec_main (unformat_input_t * input)
+int
+test_zvec_main (unformat_input_t * input)
{
uword n_iterations;
uword i;
u32 seed;
-
+
n_iterations = 1024;
seed = 0;
@@ -59,7 +60,7 @@ int test_zvec_main (unformat_input_t * input)
clib_error ("unknown input `%U'", format_unformat_error, input);
}
- if_verbose ("%d iterations, seed %d\n", n_iterations, seed);
+ if_verbose ("%d iterations, seed %d\n", n_iterations, seed);
for (i = 0; i < n_iterations; i++)
{
@@ -91,7 +92,8 @@ int test_zvec_main (unformat_input_t * input)
}
#ifdef CLIB_UNIX
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
unformat_input_t i;
int ret;
@@ -105,3 +107,11 @@ int main (int argc, char * argv[])
}
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/time.c b/vppinfra/vppinfra/time.c
index 9af599ac10c..e62ee8636ac 100644
--- a/vppinfra/vppinfra/time.c
+++ b/vppinfra/vppinfra/time.c
@@ -45,9 +45,10 @@
#include <sys/time.h>
#include <fcntl.h>
-/* Not very accurate way of determining cpu clock frequency
+/* Not very accurate way of determining cpu clock frequency
for unix. Better to use /proc/cpuinfo on linux. */
-static f64 estimate_clock_frequency (f64 sample_time)
+static f64
+estimate_clock_frequency (f64 sample_time)
{
/* Round to nearest 100KHz. */
const f64 round_to_units = 100e5;
@@ -70,11 +71,12 @@ static f64 estimate_clock_frequency (f64 sample_time)
}
/* Fetch cpu frequency via parseing /proc/cpuinfo.
- Only works for Linux. */
-static f64 clock_frequency_from_proc_filesystem (void)
+ Only works for Linux. */
+static f64
+clock_frequency_from_proc_filesystem (void)
{
- f64 cpu_freq=1e9; /* better than 40... */
- f64 ppc_timebase=0; /* warnings be gone */
+ f64 cpu_freq = 1e9; /* better than 40... */
+ f64 ppc_timebase = 0; /* warnings be gone */
int fd;
unformat_input_t input;
@@ -82,7 +84,7 @@ static f64 clock_frequency_from_proc_filesystem (void)
#if defined(__aarch64__)
return 0.0;
#endif
-
+
cpu_freq = 0;
fd = open ("/proc/cpuinfo", 0);
if (fd < 0)
@@ -113,8 +115,9 @@ static f64 clock_frequency_from_proc_filesystem (void)
}
/* Fetch cpu frequency via reading /sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq
- Only works for Linux. */
-static f64 clock_frequency_from_sys_filesystem (void)
+ Only works for Linux. */
+static f64
+clock_frequency_from_sys_filesystem (void)
{
f64 cpu_freq;
int fd;
@@ -131,11 +134,12 @@ static f64 clock_frequency_from_sys_filesystem (void)
cpu_freq *= 1e3; /* measured in kHz */
unformat_free (&input);
close (fd);
- done:
+done:
return cpu_freq;
}
-f64 os_cpu_clock_frequency (void)
+f64
+os_cpu_clock_frequency (void)
{
f64 cpu_freq;
@@ -157,7 +161,8 @@ f64 os_cpu_clock_frequency (void)
#endif /* CLIB_UNIX */
/* Initialize time. */
-void clib_time_init (clib_time_t * c)
+void
+clib_time_init (clib_time_t * c)
{
memset (c, 0, sizeof (c[0]));
c->clocks_per_second = os_cpu_clock_frequency ();
@@ -172,7 +177,8 @@ void clib_time_init (clib_time_t * c)
c->init_cpu_time = c->last_verify_cpu_time = c->last_cpu_time;
}
-void clib_time_verify_frequency (clib_time_t * c)
+void
+clib_time_verify_frequency (clib_time_t * c)
{
f64 now_reference = unix_time_now ();
f64 dtr = now_reference - c->last_verify_reference_time;
@@ -183,13 +189,13 @@ void clib_time_verify_frequency (clib_time_t * c)
c->last_verify_cpu_time = c->last_cpu_time;
c->last_verify_reference_time = now_reference;
- /*
- * Is the reported reference interval non-positive,
- * or off by a factor of two - or 8 seconds - whichever is larger?
+ /*
+ * Is the reported reference interval non-positive,
+ * or off by a factor of two - or 8 seconds - whichever is larger?
* Someone reset the clock behind our back.
*/
- dtr_max = (f64)(2ULL<<c->log2_clocks_per_frequency_verify) /
- (f64)(1ULL<<c->log2_clocks_per_second);
+ dtr_max = (f64) (2ULL << c->log2_clocks_per_frequency_verify) /
+ (f64) (1ULL << c->log2_clocks_per_second);
dtr_max = dtr_max > 8.0 ? dtr_max : 8.0;
if (dtr <= 0.0 || dtr > dtr_max)
@@ -198,10 +204,19 @@ void clib_time_verify_frequency (clib_time_t * c)
return;
}
- c->clocks_per_second = flt_round_nearest ((f64) dtc / (dtr * round_units)) * round_units;
+ c->clocks_per_second =
+ flt_round_nearest ((f64) dtc / (dtr * round_units)) * round_units;
c->seconds_per_clock = 1 / c->clocks_per_second;
/* Double time between verifies; max at 64 secs ~ 1 minute. */
if (c->log2_clocks_per_frequency_verify < c->log2_clocks_per_second + 6)
c->log2_clocks_per_frequency_verify += 1;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/time.h b/vppinfra/vppinfra/time.h
index ffafaf70b21..3b89cf789fe 100644
--- a/vppinfra/vppinfra/time.h
+++ b/vppinfra/vppinfra/time.h
@@ -40,7 +40,8 @@
#include <vppinfra/clib.h>
-typedef struct {
+typedef struct
+{
/* Total run time in clock cycles
since clib_time_init call. */
u64 total_cpu_time;
@@ -68,71 +69,74 @@ typedef struct {
/* Return CPU time stamp as 64bit number. */
#if defined(__x86_64__) || defined(i386)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u32 a, d;
- asm volatile ("rdtsc"
- : "=a" (a), "=d" (d));
+ asm volatile ("rdtsc":"=a" (a), "=d" (d));
return (u64) a + ((u64) d << (u64) 32);
}
#elif defined (__powerpc64__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u64 t;
- asm volatile ("mftb %0" : "=r" (t));
+ asm volatile ("mftb %0":"=r" (t));
return t;
}
#elif defined (__SPU__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
#ifdef _XLC
return spu_rdch (0x8);
#else
- return 0 /* __builtin_si_rdch (0x8) FIXME */;
+ return 0 /* __builtin_si_rdch (0x8) FIXME */ ;
#endif
}
#elif defined (__powerpc__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u32 hi1, hi2, lo;
- asm volatile (
- "1:\n"
- "mftbu %[hi1]\n"
- "mftb %[lo]\n"
- "mftbu %[hi2]\n"
- "cmpw %[hi1],%[hi2]\n"
- "bne 1b\n"
- : [hi1] "=r" (hi1), [hi2] "=r" (hi2), [lo] "=r" (lo));
+ asm volatile ("1:\n"
+ "mftbu %[hi1]\n"
+ "mftb %[lo]\n"
+ "mftbu %[hi2]\n"
+ "cmpw %[hi1],%[hi2]\n"
+ "bne 1b\n":[hi1] "=r" (hi1),[hi2] "=r" (hi2),[lo] "=r" (lo));
return (u64) lo + ((u64) hi2 << (u64) 32);
}
#elif defined (__arm__)
#if defined(__ARM_ARCH_8A__)
-always_inline u64 clib_cpu_time_now (void) /* We may run arm64 in aarch32 mode, to leverage 64bit counter */
+always_inline u64
+clib_cpu_time_now (void) /* We may run arm64 in aarch32 mode, to leverage 64bit counter */
{
u64 tsc;
- asm volatile("mrrc p15, 0, %Q0, %R0, c9" : "=r" (tsc));
+ asm volatile ("mrrc p15, 0, %Q0, %R0, c9":"=r" (tsc));
return tsc;
}
#elif defined(__ARM_ARCH_7A__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u32 tsc;
- asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (tsc));
- return (u64)tsc;
+ asm volatile ("mrc p15, 0, %0, c9, c13, 0":"=r" (tsc));
+ return (u64) tsc;
}
#else
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u32 lo;
- asm volatile ("mrc p15, 0, %[lo], c15, c12, 1"
- : [lo] "=r" (lo));
+ asm volatile ("mrc p15, 0, %[lo], c15, c12, 1":[lo] "=r" (lo));
return (u64) lo;
}
#endif
@@ -140,31 +144,34 @@ always_inline u64 clib_cpu_time_now (void)
#elif defined (__xtensa__)
/* Stub for now. */
-always_inline u64 clib_cpu_time_now (void)
-{ return 0; }
+always_inline u64
+clib_cpu_time_now (void)
+{
+ return 0;
+}
#elif defined (__TMS320C6X__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u32 l, h;
asm volatile (" dint\n"
" mvc .s2 TSCL,%0\n"
- " mvc .s2 TSCH,%1\n"
- " rint\n"
- : "=b" (l), "=b" (h));
+ " mvc .s2 TSCH,%1\n" " rint\n":"=b" (l), "=b" (h));
- return ((u64)h << 32) | l;
+ return ((u64) h << 32) | l;
}
#elif defined (__aarch64__)
-always_inline u64 clib_cpu_time_now (void)
+always_inline u64
+clib_cpu_time_now (void)
{
u64 tsc;
/* Works on Cavium ThunderX. Other platforms: YMMV */
- asm volatile("mrs %0, cntvct_el0" : "=r" (tsc));
+ asm volatile ("mrs %0, cntvct_el0":"=r" (tsc));
return tsc;
}
@@ -176,14 +183,17 @@ always_inline u64 clib_cpu_time_now (void)
void clib_time_verify_frequency (clib_time_t * c);
-always_inline f64 clib_time_now_internal (clib_time_t * c, u64 n)
+always_inline f64
+clib_time_now_internal (clib_time_t * c, u64 n)
{
u64 l = c->last_cpu_time;
u64 t = c->total_cpu_time;
t += n - l;
c->total_cpu_time = t;
c->last_cpu_time = n;
- if (PREDICT_FALSE ((c->last_cpu_time - c->last_verify_cpu_time) >> c->log2_clocks_per_frequency_verify))
+ if (PREDICT_FALSE
+ ((c->last_cpu_time -
+ c->last_verify_cpu_time) >> c->log2_clocks_per_frequency_verify))
clib_time_verify_frequency (c);
return t * c->seconds_per_clock;
}
@@ -191,10 +201,11 @@ always_inline f64 clib_time_now_internal (clib_time_t * c, u64 n)
always_inline f64
clib_time_now (clib_time_t * c)
{
- return clib_time_now_internal (c, clib_cpu_time_now());
+ return clib_time_now_internal (c, clib_cpu_time_now ());
}
-always_inline void clib_cpu_time_wait (u64 dt)
+always_inline void
+clib_cpu_time_wait (u64 dt)
{
u64 t_end = clib_cpu_time_now () + dt;
while (clib_cpu_time_now () < t_end)
@@ -212,32 +223,36 @@ void clib_time_init (clib_time_t * c);
#include <sys/syscall.h>
/* Use 64bit floating point to represent time offset from epoch. */
-always_inline f64 unix_time_now (void)
+always_inline f64
+unix_time_now (void)
{
/* clock_gettime without indirect syscall uses GLIBC wrappers which
we don't want. Just the bare metal, please. */
struct timespec ts;
syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
- return ts.tv_sec + 1e-9*ts.tv_nsec;
+ return ts.tv_sec + 1e-9 * ts.tv_nsec;
}
/* As above but integer number of nano-seconds. */
-always_inline u64 unix_time_now_nsec (void)
+always_inline u64
+unix_time_now_nsec (void)
{
struct timespec ts;
syscall (SYS_clock_gettime, CLOCK_REALTIME, &ts);
- return 1e9*ts.tv_sec + ts.tv_nsec;
+ return 1e9 * ts.tv_sec + ts.tv_nsec;
}
-always_inline f64 unix_usage_now (void)
+always_inline f64
+unix_usage_now (void)
{
struct rusage u;
getrusage (RUSAGE_SELF, &u);
- return u.ru_utime.tv_sec + 1e-6*u.ru_utime.tv_usec
- + u.ru_stime.tv_sec + 1e-6*u.ru_stime.tv_usec;
+ return u.ru_utime.tv_sec + 1e-6 * u.ru_utime.tv_usec
+ + u.ru_stime.tv_sec + 1e-6 * u.ru_stime.tv_usec;
}
-always_inline void unix_sleep (f64 dt)
+always_inline void
+unix_sleep (f64 dt)
{
struct timespec t;
t.tv_sec = dt;
@@ -245,20 +260,39 @@ always_inline void unix_sleep (f64 dt)
nanosleep (&t, 0);
}
-#else /* ! CLIB_UNIX */
+#else /* ! CLIB_UNIX */
-always_inline f64 unix_time_now (void)
-{ return 0; }
+always_inline f64
+unix_time_now (void)
+{
+ return 0;
+}
-always_inline u64 unix_time_now_nsec (void)
-{ return 0; }
+always_inline u64
+unix_time_now_nsec (void)
+{
+ return 0;
+}
-always_inline f64 unix_usage_now (void)
-{ return 0; }
+always_inline f64
+unix_usage_now (void)
+{
+ return 0;
+}
-always_inline void unix_sleep (f64 dt)
-{ }
+always_inline void
+unix_sleep (f64 dt)
+{
+}
#endif
#endif /* included_time_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/timer.c b/vppinfra/vppinfra/timer.c
index 2c71417f289..0221cb749a1 100644
--- a/vppinfra/vppinfra/timer.c
+++ b/vppinfra/vppinfra/timer.c
@@ -45,20 +45,22 @@
#include <vppinfra/timer.h>
#include <vppinfra/error.h>
-typedef struct {
+typedef struct
+{
f64 time;
- timer_func_t * func;
+ timer_func_t *func;
any arg;
} timer_callback_t;
/* Vector of currently unexpired timers. */
-static timer_callback_t * timers;
+static timer_callback_t *timers;
/* Convert time from 64bit floating format to struct timeval. */
-always_inline void f64_to_tv (f64 t, struct timeval * tv)
+always_inline void
+f64_to_tv (f64 t, struct timeval *tv)
{
tv->tv_sec = t;
- tv->tv_usec = 1e6*(t - tv->tv_sec);
+ tv->tv_usec = 1e6 * (t - tv->tv_sec);
while (tv->tv_usec >= 1000000)
{
tv->tv_usec -= 1000000;
@@ -67,16 +69,20 @@ always_inline void f64_to_tv (f64 t, struct timeval * tv)
}
/* Sort timers so that timer soonest to expire is at end. */
-static int timer_compare (const void * _a, const void * _b)
+static int
+timer_compare (const void *_a, const void *_b)
{
- const timer_callback_t * a = _a;
- const timer_callback_t * b = _b;
+ const timer_callback_t *a = _a;
+ const timer_callback_t *b = _b;
f64 dt = b->time - a->time;
return dt < 0 ? -1 : (dt > 0 ? +1 : 0);
}
-static inline void sort_timers (timer_callback_t * timers)
-{ qsort (timers, vec_len (timers), sizeof (timers[0]), timer_compare); }
+static inline void
+sort_timers (timer_callback_t * timers)
+{
+ qsort (timers, vec_len (timers), sizeof (timers[0]), timer_compare);
+}
#define TIMER_SIGNAL SIGALRM
@@ -89,11 +95,12 @@ static f64 time_resolution;
/* Interrupt handler. Call functions for all expired timers.
Set time for next timer interrupt. */
-static void timer_interrupt (int signum)
+static void
+timer_interrupt (int signum)
{
f64 now = unix_time_now ();
f64 dt;
- timer_callback_t * t;
+ timer_callback_t *t;
while (1)
{
@@ -101,7 +108,7 @@ static void timer_interrupt (int signum)
return;
/* Consider last (earliest) timer in reverse sorted
- vector of pending timers. */
+ vector of pending timers. */
t = vec_end (timers) - 1;
ASSERT (now >= 0 && finite (now));
@@ -110,11 +117,11 @@ static void timer_interrupt (int signum)
dt = t->time - now;
/* If timer is within threshold of going off
- call user's callback. */
+ call user's callback. */
if (dt <= time_resolution && finite (dt))
{
_vec_len (timers) -= 1;
- (* t->func) (t->arg, -dt);
+ (*t->func) (t->arg, -dt);
}
else
{
@@ -129,7 +136,8 @@ static void timer_interrupt (int signum)
}
}
-void timer_block (sigset_t * save)
+void
+timer_block (sigset_t * save)
{
sigset_t block_timer;
@@ -138,20 +146,24 @@ void timer_block (sigset_t * save)
sigprocmask (SIG_BLOCK, &block_timer, save);
}
-void timer_unblock (sigset_t * save)
-{ sigprocmask (SIG_SETMASK, save, 0); }
+void
+timer_unblock (sigset_t * save)
+{
+ sigprocmask (SIG_SETMASK, save, 0);
+}
/* Arrange for function to be called some time,
roughly equal to dt seconds, in the future. */
-void timer_call (timer_func_t * func, any arg, f64 dt)
+void
+timer_call (timer_func_t * func, any arg, f64 dt)
{
- timer_callback_t * t;
+ timer_callback_t *t;
sigset_t save;
/* Install signal handler on first call. */
static word signal_installed = 0;
- if (! signal_installed)
+ if (!signal_installed)
{
struct sigaction sa;
@@ -181,7 +193,7 @@ void timer_call (timer_func_t * func, any arg, f64 dt)
if (_vec_len (timers) > 1)
{
- reset_timer += t->time < (t-1)->time;
+ reset_timer += t->time < (t - 1)->time;
sort_timers (timers);
}
@@ -203,33 +215,38 @@ void timer_call (timer_func_t * func, any arg, f64 dt)
static f64 ave_delay = 0;
static word ave_delay_count = 0;
-always_inline update (f64 delay)
+always_inline
+update (f64 delay)
{
ave_delay += delay;
ave_delay_count += 1;
}
-typedef struct {
+typedef struct
+{
f64 time_requested, time_called;
} foo_t;
static f64 foo_base_time = 0;
-static foo_t * foos = 0;
+static foo_t *foos = 0;
-void foo (any arg, f64 delay)
+void
+foo (any arg, f64 delay)
{
foos[arg].time_called = unix_time_now () - foo_base_time;
update (delay);
}
-typedef struct {
+typedef struct
+{
word count;
word limit;
} bar_t;
-void bar (any arg, f64 delay)
+void
+bar (any arg, f64 delay)
{
- bar_t * b = (bar_t *) arg;
+ bar_t *b = (bar_t *) arg;
fformat (stdout, "bar %d delay %g\n", b->count++, delay);
@@ -238,11 +255,12 @@ void bar (any arg, f64 delay)
timer_call (bar, arg, random_f64 ());
}
-int main (int argc, char * argv[])
+int
+main (int argc, char *argv[])
{
word i, n = atoi (argv[1]);
word run_foo = argc > 2;
- bar_t b = {limit: 10};
+bar_t b = { limit:10 };
if (run_foo)
{
@@ -262,7 +280,7 @@ int main (int argc, char * argv[])
timer_call (foo, i, foos[i].time_requested);
}
else
- timer_call (bar, (any) &b, random_f64 ());
+ timer_call (bar, (any) & b, random_f64 ());
while (vec_len (timers) > 0)
sched_yield ();
@@ -280,11 +298,12 @@ int main (int argc, char * argv[])
if (dt > max)
max = dt;
ave += dt;
- rms += dt*dt;
+ rms += dt * dt;
}
ave /= n;
- rms = sqrt (rms / n - ave*ave);
- fformat (stdout, "error min %g max %g ave %g +- %g\n", min, max, ave, rms);
+ rms = sqrt (rms / n - ave * ave);
+ fformat (stdout, "error min %g max %g ave %g +- %g\n", min, max, ave,
+ rms);
}
fformat (stdout, "%d function calls, ave. timer delay %g secs\n",
@@ -293,3 +312,11 @@ int main (int argc, char * argv[])
return 0;
}
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/timer.h b/vppinfra/vppinfra/timer.h
index d029d96eed9..764103f702d 100644
--- a/vppinfra/vppinfra/timer.h
+++ b/vppinfra/vppinfra/timer.h
@@ -36,3 +36,11 @@ extern void timer_block (sigset_t * save);
extern void timer_unblock (sigset_t * save);
#endif /* included_timer_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/timing_wheel.c b/vppinfra/vppinfra/timing_wheel.c
index 931a4595aa9..902140c9e06 100644
--- a/vppinfra/vppinfra/timing_wheel.c
+++ b/vppinfra/vppinfra/timing_wheel.c
@@ -18,7 +18,8 @@
#include <vppinfra/timing_wheel.h>
void
-timing_wheel_init (timing_wheel_t * w, u64 current_cpu_time, f64 cpu_clocks_per_second)
+timing_wheel_init (timing_wheel_t * w, u64 current_cpu_time,
+ f64 cpu_clocks_per_second)
{
if (w->max_sched_time <= w->min_sched_time)
{
@@ -27,8 +28,10 @@ timing_wheel_init (timing_wheel_t * w, u64 current_cpu_time, f64 cpu_clocks_per_
}
w->cpu_clocks_per_second = cpu_clocks_per_second;
- w->log2_clocks_per_bin = max_log2 (w->cpu_clocks_per_second * w->min_sched_time);
- w->log2_bins_per_wheel = max_log2 (w->cpu_clocks_per_second * w->max_sched_time);
+ w->log2_clocks_per_bin =
+ max_log2 (w->cpu_clocks_per_second * w->min_sched_time);
+ w->log2_bins_per_wheel =
+ max_log2 (w->cpu_clocks_per_second * w->max_sched_time);
w->log2_bins_per_wheel -= w->log2_clocks_per_bin;
w->log2_clocks_per_wheel = w->log2_bins_per_wheel + w->log2_clocks_per_bin;
w->bins_per_wheel = 1 << w->log2_bins_per_wheel;
@@ -37,16 +40,21 @@ timing_wheel_init (timing_wheel_t * w, u64 current_cpu_time, f64 cpu_clocks_per_
w->current_time_index = current_cpu_time >> w->log2_clocks_per_bin;
if (w->n_wheel_elt_time_bits <= 0 ||
- w->n_wheel_elt_time_bits >= STRUCT_BITS_OF (timing_wheel_elt_t, cpu_time_relative_to_base))
- w->n_wheel_elt_time_bits = STRUCT_BITS_OF (timing_wheel_elt_t, cpu_time_relative_to_base) - 1;
+ w->n_wheel_elt_time_bits >= STRUCT_BITS_OF (timing_wheel_elt_t,
+ cpu_time_relative_to_base))
+ w->n_wheel_elt_time_bits =
+ STRUCT_BITS_OF (timing_wheel_elt_t, cpu_time_relative_to_base) - 1;
w->cpu_time_base = current_cpu_time;
w->time_index_next_cpu_time_base_update
- = w->current_time_index + ((u64) 1 << (w->n_wheel_elt_time_bits - w->log2_clocks_per_bin));
+ =
+ w->current_time_index +
+ ((u64) 1 << (w->n_wheel_elt_time_bits - w->log2_clocks_per_bin));
}
always_inline uword
-get_level_and_relative_time (timing_wheel_t * w, u64 cpu_time, uword * rtime_result)
+get_level_and_relative_time (timing_wheel_t * w, u64 cpu_time,
+ uword * rtime_result)
{
u64 dt, rtime;
uword level_index;
@@ -71,17 +79,24 @@ get_level_and_relative_time (timing_wheel_t * w, u64 cpu_time, uword * rtime_res
always_inline uword
time_index_to_wheel_index (timing_wheel_t * w, uword level_index, u64 ti)
-{ return (ti >> (level_index * w->log2_bins_per_wheel)) & w->bins_per_wheel_mask; }
+{
+ return (ti >> (level_index * w->log2_bins_per_wheel)) &
+ w->bins_per_wheel_mask;
+}
/* Find current time on this level. */
always_inline uword
current_time_wheel_index (timing_wheel_t * w, uword level_index)
-{ return time_index_to_wheel_index (w, level_index, w->current_time_index); }
+{
+ return time_index_to_wheel_index (w, level_index, w->current_time_index);
+}
/* Circular wheel indexing. */
always_inline uword
wheel_add (timing_wheel_t * w, word x)
-{ return x & w->bins_per_wheel_mask; }
+{
+ return x & w->bins_per_wheel_mask;
+}
always_inline uword
rtime_to_wheel_index (timing_wheel_t * w, uword level_index, uword rtime)
@@ -93,11 +108,11 @@ rtime_to_wheel_index (timing_wheel_t * w, uword level_index, uword rtime)
static clib_error_t *
validate_level (timing_wheel_t * w, uword level_index, uword * n_elts)
{
- timing_wheel_level_t * level;
- timing_wheel_elt_t * e;
+ timing_wheel_level_t *level;
+ timing_wheel_elt_t *e;
uword wi;
- clib_error_t * error = 0;
-
+ clib_error_t *error = 0;
+
#define _(x) \
do { \
error = CLIB_ERROR_ASSERT (x); \
@@ -109,34 +124,35 @@ validate_level (timing_wheel_t * w, uword level_index, uword * n_elts)
for (wi = 0; wi < vec_len (level->elts); wi++)
{
/* Validate occupancy bitmap. */
- _ (clib_bitmap_get_no_check (level->occupancy_bitmap, wi) == (vec_len (level->elts[wi]) > 0));
+ _(clib_bitmap_get_no_check (level->occupancy_bitmap, wi) ==
+ (vec_len (level->elts[wi]) > 0));
*n_elts += vec_len (level->elts[wi]);
vec_foreach (e, level->elts[wi])
- {
- /* Validate time bin and level. */
- u64 e_time;
- uword e_ti, e_li, e_wi;
-
- e_time = e->cpu_time_relative_to_base + w->cpu_time_base;
- e_li = get_level_and_relative_time (w, e_time, &e_ti);
- e_wi = rtime_to_wheel_index (w, level_index, e_ti);
-
- if (e_li == level_index - 1)
- /* If this element was scheduled on the previous level
- it must be wrapped. */
- _ (e_ti + current_time_wheel_index (w, level_index - 1)
- >= w->bins_per_wheel);
- else
- {
- _ (e_li == level_index);
- if (e_li == 0)
- _ (e_wi == wi);
- else
- _ (e_wi == wi || e_wi + 1 == wi || e_wi - 1 == wi);
- }
- }
+ {
+ /* Validate time bin and level. */
+ u64 e_time;
+ uword e_ti, e_li, e_wi;
+
+ e_time = e->cpu_time_relative_to_base + w->cpu_time_base;
+ e_li = get_level_and_relative_time (w, e_time, &e_ti);
+ e_wi = rtime_to_wheel_index (w, level_index, e_ti);
+
+ if (e_li == level_index - 1)
+ /* If this element was scheduled on the previous level
+ it must be wrapped. */
+ _(e_ti + current_time_wheel_index (w, level_index - 1)
+ >= w->bins_per_wheel);
+ else
+ {
+ _(e_li == level_index);
+ if (e_li == 0)
+ _(e_wi == wi);
+ else
+ _(e_wi == wi || e_wi + 1 == wi || e_wi - 1 == wi);
+ }
+ }
}
#undef _
@@ -144,13 +160,14 @@ validate_level (timing_wheel_t * w, uword level_index, uword * n_elts)
return error;
}
-void timing_wheel_validate (timing_wheel_t * w)
+void
+timing_wheel_validate (timing_wheel_t * w)
{
uword l;
- clib_error_t * error = 0;
+ clib_error_t *error = 0;
uword n_elts;
- if (! w->validate)
+ if (!w->validate)
return;
n_elts = pool_elts (w->overflow_pool);
@@ -173,19 +190,17 @@ free_elt_vector (timing_wheel_t * w, timing_wheel_elt_t * ev)
}
static timing_wheel_elt_t *
-insert_helper (timing_wheel_t * w,
- uword level_index,
- uword rtime)
+insert_helper (timing_wheel_t * w, uword level_index, uword rtime)
{
- timing_wheel_level_t * level;
- timing_wheel_elt_t * e;
+ timing_wheel_level_t *level;
+ timing_wheel_elt_t *e;
uword wheel_index;
/* Circular buffer. */
vec_validate (w->levels, level_index);
level = vec_elt_at_index (w->levels, level_index);
- if (PREDICT_FALSE (! level->elts))
+ if (PREDICT_FALSE (!level->elts))
{
uword max = w->bins_per_wheel - 1;
clib_bitmap_validate (level->occupancy_bitmap, max);
@@ -194,10 +209,11 @@ insert_helper (timing_wheel_t * w,
wheel_index = rtime_to_wheel_index (w, level_index, rtime);
- level->occupancy_bitmap = clib_bitmap_ori (level->occupancy_bitmap, wheel_index);
+ level->occupancy_bitmap =
+ clib_bitmap_ori (level->occupancy_bitmap, wheel_index);
/* Allocate an elt vector from free list if there is one. */
- if (! level->elts[wheel_index] && vec_len (w->free_elt_vectors))
+ if (!level->elts[wheel_index] && vec_len (w->free_elt_vectors))
level->elts[wheel_index] = vec_pop (w->free_elt_vectors);
/* Add element to vector for this time bin. */
@@ -207,9 +223,11 @@ insert_helper (timing_wheel_t * w,
}
/* Insert user data on wheel at given CPU time stamp. */
-static void timing_wheel_insert_helper (timing_wheel_t * w, u64 insert_cpu_time, u32 user_data)
+static void
+timing_wheel_insert_helper (timing_wheel_t * w, u64 insert_cpu_time,
+ u32 user_data)
{
- timing_wheel_elt_t * e;
+ timing_wheel_elt_t *e;
u64 dt;
uword rtime, level_index;
@@ -225,7 +243,7 @@ static void timing_wheel_insert_helper (timing_wheel_t * w, u64 insert_cpu_time,
else
{
/* Time too far in the future: add to overflow vector. */
- timing_wheel_overflow_elt_t * oe;
+ timing_wheel_overflow_elt_t *oe;
pool_get (w->overflow_pool, oe);
oe->user_data = user_data;
oe->cpu_time = insert_cpu_time;
@@ -243,57 +261,62 @@ static timing_wheel_elt_t *
delete_user_data (timing_wheel_elt_t * elts, u32 user_data)
{
uword found_match;
- timing_wheel_elt_t * e, * new_elts;
+ timing_wheel_elt_t *e, *new_elts;
/* Quickly scan to see if there are any elements to delete
in this bucket. */
found_match = 0;
vec_foreach (e, elts)
- {
- found_match = e->user_data == user_data;
- if (found_match)
- break;
- }
- if (! found_match)
+ {
+ found_match = e->user_data == user_data;
+ if (found_match)
+ break;
+ }
+ if (!found_match)
return elts;
/* Re-scan to build vector of new elts with matching user_data deleted. */
new_elts = 0;
vec_foreach (e, elts)
- {
- if (e->user_data != user_data)
- vec_add1 (new_elts, e[0]);
- }
+ {
+ if (e->user_data != user_data)
+ vec_add1 (new_elts, e[0]);
+ }
vec_free (elts);
return new_elts;
}
/* Insert user data on wheel at given CPU time stamp. */
-void timing_wheel_insert (timing_wheel_t * w, u64 insert_cpu_time, u32 user_data)
+void
+timing_wheel_insert (timing_wheel_t * w, u64 insert_cpu_time, u32 user_data)
{
/* Remove previously deleted elements. */
if (elt_is_deleted (w, user_data))
{
- timing_wheel_level_t * l;
+ timing_wheel_level_t *l;
uword wi;
/* Delete elts with given user data so that stale events don't expire. */
vec_foreach (l, w->levels)
- {
+ {
+ /* *INDENT-OFF* */
clib_bitmap_foreach (wi, l->occupancy_bitmap, ({
l->elts[wi] = delete_user_data (l->elts[wi], user_data);
if (vec_len (l->elts[wi]) == 0)
l->occupancy_bitmap = clib_bitmap_andnoti (l->occupancy_bitmap, wi);
}));
- }
+ /* *INDENT-ON* */
+ }
{
- timing_wheel_overflow_elt_t * oe;
+ timing_wheel_overflow_elt_t *oe;
+ /* *INDENT-OFF* */
pool_foreach (oe, w->overflow_pool, ({
if (oe->user_data == user_data)
pool_put (w->overflow_pool, oe);
}));
+ /* *INDENT-ON* */
}
hash_unset (w->deleted_user_data_hash, user_data);
@@ -302,19 +325,22 @@ void timing_wheel_insert (timing_wheel_t * w, u64 insert_cpu_time, u32 user_data
timing_wheel_insert_helper (w, insert_cpu_time, user_data);
}
-void timing_wheel_delete (timing_wheel_t * w, u32 user_data)
+void
+timing_wheel_delete (timing_wheel_t * w, u32 user_data)
{
- if (! w->deleted_user_data_hash)
- w->deleted_user_data_hash = hash_create (/* capacity */ 0, /* value bytes */ 0);
+ if (!w->deleted_user_data_hash)
+ w->deleted_user_data_hash =
+ hash_create ( /* capacity */ 0, /* value bytes */ 0);
hash_set1 (w->deleted_user_data_hash, user_data);
}
/* Returns time of next expiring element. */
-u64 timing_wheel_next_expiring_elt_time (timing_wheel_t * w)
+u64
+timing_wheel_next_expiring_elt_time (timing_wheel_t * w)
{
- timing_wheel_level_t * l;
- timing_wheel_elt_t * e;
+ timing_wheel_level_t *l;
+ timing_wheel_elt_t *e;
uword li, wi, wi0;
u32 min_dt;
u64 min_t;
@@ -323,51 +349,56 @@ u64 timing_wheel_next_expiring_elt_time (timing_wheel_t * w)
min_dt = ~0;
min_t = ~0ULL;
vec_foreach (l, w->levels)
- {
- if (! l->occupancy_bitmap)
- continue;
-
- li = l - w->levels;
- wi0 = wi = current_time_wheel_index (w, li);
- wrapped = 0;
- while (1)
- {
- if (clib_bitmap_get_no_check (l->occupancy_bitmap, wi))
- {
- vec_foreach (e, l->elts[wi])
- min_dt = clib_min (min_dt, e->cpu_time_relative_to_base);
+ {
+ if (!l->occupancy_bitmap)
+ continue;
- if (wrapped && li + 1 < vec_len (w->levels))
- {
- uword wi1 = current_time_wheel_index (w, li + 1);
- if (l[1].occupancy_bitmap && clib_bitmap_get_no_check (l[1].occupancy_bitmap, wi1))
+ li = l - w->levels;
+ wi0 = wi = current_time_wheel_index (w, li);
+ wrapped = 0;
+ while (1)
+ {
+ if (clib_bitmap_get_no_check (l->occupancy_bitmap, wi))
+ {
+ vec_foreach (e, l->elts[wi])
+ min_dt = clib_min (min_dt, e->cpu_time_relative_to_base);
+
+ if (wrapped && li + 1 < vec_len (w->levels))
+ {
+ uword wi1 = current_time_wheel_index (w, li + 1);
+ if (l[1].occupancy_bitmap
+ && clib_bitmap_get_no_check (l[1].occupancy_bitmap, wi1))
+ {
+ vec_foreach (e, l[1].elts[wi1])
{
- vec_foreach (e, l[1].elts[wi1]) {
- min_dt = clib_min (min_dt, e->cpu_time_relative_to_base);
- }
+ min_dt =
+ clib_min (min_dt, e->cpu_time_relative_to_base);
}
- }
+ }
+ }
- min_t = w->cpu_time_base + min_dt;
- goto done;
- }
+ min_t = w->cpu_time_base + min_dt;
+ goto done;
+ }
- wi = wheel_add (w, wi + 1);
- if (wi == wi0)
- break;
+ wi = wheel_add (w, wi + 1);
+ if (wi == wi0)
+ break;
- wrapped = wi != wi + 1;
- }
- }
+ wrapped = wi != wi + 1;
+ }
+ }
{
- timing_wheel_overflow_elt_t * oe;
+ timing_wheel_overflow_elt_t *oe;
if (min_dt != ~0)
min_t = w->cpu_time_base + min_dt;
+ /* *INDENT-OFF* */
pool_foreach (oe, w->overflow_pool,
({ min_t = clib_min (min_t, oe->cpu_time); }));
+ /* *INDENT-ON* */
done:
return min_t;
@@ -383,7 +414,9 @@ insert_elt (timing_wheel_t * w, timing_wheel_elt_t * e)
always_inline u64
elt_cpu_time (timing_wheel_t * w, timing_wheel_elt_t * e)
-{ return w->cpu_time_base + e->cpu_time_relative_to_base; }
+{
+ return w->cpu_time_base + e->cpu_time_relative_to_base;
+}
always_inline void
validate_expired_elt (timing_wheel_t * w, timing_wheel_elt_t * e,
@@ -402,13 +435,11 @@ validate_expired_elt (timing_wheel_t * w, timing_wheel_elt_t * e,
static u32 *
expire_bin (timing_wheel_t * w,
uword level_index,
- uword wheel_index,
- u64 advance_cpu_time,
- u32 * expired_user_data)
+ uword wheel_index, u64 advance_cpu_time, u32 * expired_user_data)
{
- timing_wheel_level_t * level = vec_elt_at_index (w->levels, level_index);
- timing_wheel_elt_t * e;
- u32 * x;
+ timing_wheel_level_t *level = vec_elt_at_index (w->levels, level_index);
+ timing_wheel_elt_t *e;
+ u32 *x;
uword i, j, e_len;
e = vec_elt (level->elts, wheel_index);
@@ -421,7 +452,7 @@ expire_bin (timing_wheel_t * w,
x[j] = e[i].user_data;
/* Only advance if elt is not to be deleted. */
- j += ! elt_is_deleted (w, e[i].user_data);
+ j += !elt_is_deleted (w, e[i].user_data);
}
/* Adjust for deleted elts. */
@@ -440,8 +471,8 @@ expire_bin (timing_wheel_t * w,
static void
advance_cpu_time_base (timing_wheel_t * w, u32 * expired_user_data)
{
- timing_wheel_level_t * l;
- timing_wheel_elt_t * e;
+ timing_wheel_level_t *l;
+ timing_wheel_elt_t *e;
u64 delta;
w->stats.cpu_time_base_advances++;
@@ -450,8 +481,9 @@ advance_cpu_time_base (timing_wheel_t * w, u32 * expired_user_data)
w->time_index_next_cpu_time_base_update += delta >> w->log2_clocks_per_bin;
vec_foreach (l, w->levels)
- {
- uword wi;
+ {
+ uword wi;
+ /* *INDENT-OFF* */
clib_bitmap_foreach (wi, l->occupancy_bitmap, ({
vec_foreach (e, l->elts[wi])
{
@@ -461,11 +493,13 @@ advance_cpu_time_base (timing_wheel_t * w, u32 * expired_user_data)
e->cpu_time_relative_to_base -= delta;
}
}));
- }
+ /* *INDENT-ON* */
+ }
/* See which overflow elements fit now. */
{
- timing_wheel_overflow_elt_t * oe;
+ timing_wheel_overflow_elt_t *oe;
+ /* *INDENT-OFF* */
pool_foreach (oe, w->overflow_pool, ({
/* It fits now into 32 bits. */
if (0 == ((oe->cpu_time - w->cpu_time_base) >> BITS (e->cpu_time_relative_to_base)))
@@ -483,6 +517,7 @@ advance_cpu_time_base (timing_wheel_t * w, u32 * expired_user_data)
pool_put (w->overflow_pool, oe);
}
}));
+ /* *INDENT-ON* */
}
}
@@ -491,11 +526,10 @@ refill_level (timing_wheel_t * w,
uword level_index,
u64 advance_cpu_time,
uword from_wheel_index,
- uword to_wheel_index,
- u32 * expired_user_data)
+ uword to_wheel_index, u32 * expired_user_data)
{
- timing_wheel_level_t * level;
- timing_wheel_elt_t * to_insert = w->unexpired_elts_pending_insert;
+ timing_wheel_level_t *level;
+ timing_wheel_elt_t *to_insert = w->unexpired_elts_pending_insert;
u64 advance_time_index = advance_cpu_time >> w->log2_clocks_per_bin;
vec_validate (w->stats.refills, level_index);
@@ -505,32 +539,34 @@ refill_level (timing_wheel_t * w,
goto done;
level = vec_elt_at_index (w->levels, level_index + 1);
- if (! level->occupancy_bitmap)
+ if (!level->occupancy_bitmap)
goto done;
while (1)
{
- timing_wheel_elt_t * e, * es;
+ timing_wheel_elt_t *e, *es;
- if (clib_bitmap_get_no_check (level->occupancy_bitmap, from_wheel_index))
+ if (clib_bitmap_get_no_check
+ (level->occupancy_bitmap, from_wheel_index))
{
es = level->elts[from_wheel_index];
level->elts[from_wheel_index] = 0;
- clib_bitmap_set_no_check (level->occupancy_bitmap, from_wheel_index, 0);
+ clib_bitmap_set_no_check (level->occupancy_bitmap, from_wheel_index,
+ 0);
vec_foreach (e, es)
- {
- u64 e_time = elt_cpu_time (w, e);
- u64 ti = e_time >> w->log2_clocks_per_bin;
- if (ti <= advance_time_index)
- {
- validate_expired_elt (w, e, advance_cpu_time);
- if (! elt_is_deleted (w, e->user_data))
- vec_add1 (expired_user_data, e->user_data);
- }
- else
- vec_add1 (to_insert, e[0]);
- }
+ {
+ u64 e_time = elt_cpu_time (w, e);
+ u64 ti = e_time >> w->log2_clocks_per_bin;
+ if (ti <= advance_time_index)
+ {
+ validate_expired_elt (w, e, advance_cpu_time);
+ if (!elt_is_deleted (w, e->user_data))
+ vec_add1 (expired_user_data, e->user_data);
+ }
+ else
+ vec_add1 (to_insert, e[0]);
+ }
free_elt_vector (w, es);
}
@@ -541,17 +577,18 @@ refill_level (timing_wheel_t * w,
}
timing_wheel_validate (w);
- done:
+done:
w->unexpired_elts_pending_insert = to_insert;
return expired_user_data;
}
/* Advance wheel and return any expired user data in vector. */
u32 *
-timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time, u32 * expired_user_data,
+timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time,
+ u32 * expired_user_data,
u64 * next_expiring_element_cpu_time)
{
- timing_wheel_level_t * level;
+ timing_wheel_level_t *level;
uword level_index, advance_rtime, advance_level_index, advance_wheel_index;
uword n_expired_user_data_before;
u64 current_time_index, advance_time_index;
@@ -583,8 +620,7 @@ timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time, u32 * expired_us
expired_user_data = refill_level (w,
level_index,
advance_cpu_time,
- c, a,
- expired_user_data);
+ c, a, expired_user_data);
current_ti >>= w->log2_bins_per_wheel;
advance_ti >>= w->log2_bins_per_wheel;
level_index++;
@@ -592,8 +628,10 @@ timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time, u32 * expired_us
}
}
- advance_level_index = get_level_and_relative_time (w, advance_cpu_time, &advance_rtime);
- advance_wheel_index = rtime_to_wheel_index (w, advance_level_index, advance_rtime);
+ advance_level_index =
+ get_level_and_relative_time (w, advance_cpu_time, &advance_rtime);
+ advance_wheel_index =
+ rtime_to_wheel_index (w, advance_level_index, advance_rtime);
/* Empty all occupied bins for entire levels that we advance past. */
for (level_index = 0; level_index < advance_level_index; level_index++)
@@ -604,23 +642,26 @@ timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time, u32 * expired_us
break;
level = vec_elt_at_index (w->levels, level_index);
+ /* *INDENT-OFF* */
clib_bitmap_foreach (wi, level->occupancy_bitmap, ({
expired_user_data = expire_bin (w, level_index, wi, advance_cpu_time,
expired_user_data);
}));
+ /* *INDENT-ON* */
}
if (PREDICT_TRUE (level_index < vec_len (w->levels)))
{
uword wi;
level = vec_elt_at_index (w->levels, level_index);
- wi = current_time_wheel_index (w, level_index);
+ wi = current_time_wheel_index (w, level_index);
if (level->occupancy_bitmap)
while (1)
{
if (clib_bitmap_get_no_check (level->occupancy_bitmap, wi))
- expired_user_data = expire_bin (w, advance_level_index, wi, advance_cpu_time,
- expired_user_data);
+ expired_user_data =
+ expire_bin (w, advance_level_index, wi, advance_cpu_time,
+ expired_user_data);
if (wi == advance_wheel_index)
break;
@@ -634,14 +675,14 @@ timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time, u32 * expired_us
if (vec_len (w->unexpired_elts_pending_insert) > 0)
{
- timing_wheel_elt_t * e;
- vec_foreach (e, w->unexpired_elts_pending_insert)
- insert_elt (w, e);
+ timing_wheel_elt_t *e;
+ vec_foreach (e, w->unexpired_elts_pending_insert) insert_elt (w, e);
_vec_len (w->unexpired_elts_pending_insert) = 0;
}
/* Don't advance until necessary. */
- while (PREDICT_FALSE (advance_time_index >= w->time_index_next_cpu_time_base_update))
+ while (PREDICT_FALSE
+ (advance_time_index >= w->time_index_next_cpu_time_base_update))
advance_cpu_time_base (w, expired_user_data);
if (next_expiring_element_cpu_time)
@@ -664,16 +705,18 @@ timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time, u32 * expired_us
return expired_user_data;
}
-u8 * format_timing_wheel (u8 * s, va_list * va)
+u8 *
+format_timing_wheel (u8 * s, va_list * va)
{
- timing_wheel_t * w = va_arg (*va, timing_wheel_t *);
+ timing_wheel_t *w = va_arg (*va, timing_wheel_t *);
int verbose = va_arg (*va, int);
uword indent = format_get_indent (s);
s = format (s, "level 0: %.4e - %.4e secs, 2^%d - 2^%d clocks",
(f64) (1 << w->log2_clocks_per_bin) / w->cpu_clocks_per_second,
- (f64) (1 << w->log2_clocks_per_wheel) / w->cpu_clocks_per_second,
- w->log2_clocks_per_bin, w->log2_clocks_per_wheel);
+ (f64) (1 << w->log2_clocks_per_wheel) /
+ w->cpu_clocks_per_second, w->log2_clocks_per_bin,
+ w->log2_clocks_per_wheel);
if (verbose)
{
@@ -682,13 +725,25 @@ u8 * format_timing_wheel (u8 * s, va_list * va)
s = format (s, "\n%Utime base advances %Ld, every %.4e secs",
format_white_space, indent + 2,
w->stats.cpu_time_base_advances,
- (f64) ((u64) 1 << w->n_wheel_elt_time_bits) / w->cpu_clocks_per_second);
+ (f64) ((u64) 1 << w->n_wheel_elt_time_bits) /
+ w->cpu_clocks_per_second);
for (l = 0; l < vec_len (w->levels); l++)
s = format (s, "\n%Ulevel %d: refills %Ld",
format_white_space, indent + 2,
- l, l < vec_len (w->stats.refills) ? w->stats.refills[l] : (u64) 0);
+ l,
+ l <
+ vec_len (w->stats.refills) ? w->stats.
+ refills[l] : (u64) 0);
}
return s;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/timing_wheel.h b/vppinfra/vppinfra/timing_wheel.h
index d5cffdb93e6..7daea994248 100644
--- a/vppinfra/vppinfra/timing_wheel.h
+++ b/vppinfra/vppinfra/timing_wheel.h
@@ -17,7 +17,8 @@
#include <vppinfra/format.h>
-typedef struct {
+typedef struct
+{
/* Time of this element in units cpu clock ticks relative to time
base. 32 bits should be large enough for serveral kilo-seconds
to elapse before we have to re-set time base. */
@@ -28,7 +29,8 @@ typedef struct {
} timing_wheel_elt_t;
/* Overflow wheel elements where time does not fit into 32 bits. */
-typedef struct {
+typedef struct
+{
/* Absolute time of this element. */
u64 cpu_time;
@@ -38,23 +40,26 @@ typedef struct {
u32 pad;
} timing_wheel_overflow_elt_t;
-typedef struct {
+typedef struct
+{
/* 2^M bits: 1 means vector is non-zero else zero. */
- uword * occupancy_bitmap;
+ uword *occupancy_bitmap;
/* 2^M element table of element vectors, one for each time bin. */
- timing_wheel_elt_t ** elts;
+ timing_wheel_elt_t **elts;
} timing_wheel_level_t;
-typedef struct {
+typedef struct
+{
/* Vector of refill counts per level. */
- u64 * refills;
+ u64 *refills;
/* Number of times cpu time base was rescaled. */
u64 cpu_time_base_advances;
} timing_wheel_stats_t;
-typedef struct {
+typedef struct
+{
/* Each bin is a power of two clock ticks (N)
chosen so that 2^N >= min_sched_time. */
u8 log2_clocks_per_bin;
@@ -75,17 +80,17 @@ typedef struct {
/* 2^M - 1. */
u32 bins_per_wheel_mask;
- timing_wheel_level_t * levels;
+ timing_wheel_level_t *levels;
- timing_wheel_overflow_elt_t * overflow_pool;
+ timing_wheel_overflow_elt_t *overflow_pool;
/* Free list of element vector so we can recycle old allocated vectors. */
- timing_wheel_elt_t ** free_elt_vectors;
+ timing_wheel_elt_t **free_elt_vectors;
- timing_wheel_elt_t * unexpired_elts_pending_insert;
+ timing_wheel_elt_t *unexpired_elts_pending_insert;
/* Hash table of user data values which have been deleted but not yet re-inserted. */
- uword * deleted_user_data_hash;
+ uword *deleted_user_data_hash;
/* Enable validation for debugging. */
u32 validate;
@@ -117,7 +122,8 @@ void timing_wheel_init (timing_wheel_t * w,
u64 current_cpu_time, f64 cpu_clocks_per_second);
/* Insert user data on wheel at given CPU time stamp. */
-void timing_wheel_insert (timing_wheel_t * w, u64 insert_cpu_time, u32 user_data);
+void timing_wheel_insert (timing_wheel_t * w, u64 insert_cpu_time,
+ u32 user_data);
/* Delete user data from wheel (until it is again inserted). */
void timing_wheel_delete (timing_wheel_t * w, u32 user_data);
@@ -125,8 +131,9 @@ void timing_wheel_delete (timing_wheel_t * w, u32 user_data);
/* Advance wheel and return any expired user data in vector. If non-zero
min_next_expiring_element_cpu_time will return a cpu time stamp
before which there are guaranteed to be no elements in the current wheel. */
-u32 * timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time, u32 * expired_user_data,
- u64 * min_next_expiring_element_cpu_time);
+u32 *timing_wheel_advance (timing_wheel_t * w, u64 advance_cpu_time,
+ u32 * expired_user_data,
+ u64 * min_next_expiring_element_cpu_time);
/* Returns absolute time in clock cycles of next expiring element. */
u64 timing_wheel_next_expiring_elt_time (timing_wheel_t * w);
@@ -138,3 +145,11 @@ format_function_t format_timing_wheel;
void timing_wheel_validate (timing_wheel_t * w);
#endif /* included_clib_timing_wheel_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/types.h b/vppinfra/vppinfra/types.h
index d5ad6ba18cd..f87bb48c910 100644
--- a/vppinfra/vppinfra/types.h
+++ b/vppinfra/vppinfra/types.h
@@ -51,7 +51,7 @@ typedef short i16;
#include <asm/types.h>
#define CLIB_AVOID_CLASH_WITH_LINUX_TYPES
-#else /* ! CLIB_LINUX_KERNEL */
+#else /* ! CLIB_LINUX_KERNEL */
typedef unsigned char u8;
typedef unsigned short u16;
@@ -128,8 +128,10 @@ typedef u32 clib_address_t;
pointers and machine words (but only when compiling with 64 bit
registers and 32 bit pointers). */
static inline __attribute__ ((always_inline)) uword
-pointer_to_uword (const void * p)
-{ return (uword) (clib_address_t) p; }
+pointer_to_uword (const void *p)
+{
+ return (uword) (clib_address_t) p;
+}
#define uword_to_pointer(u,type) ((type) (clib_address_t) (u))
@@ -162,3 +164,11 @@ typedef f64 fword;
} *) (addr))->_data)
#endif /* included_clib_types_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/unformat.c b/vppinfra/vppinfra/unformat.c
index 8ccbf73d88c..22d282a078f 100644
--- a/vppinfra/vppinfra/unformat.c
+++ b/vppinfra/vppinfra/unformat.c
@@ -38,7 +38,8 @@
#include <vppinfra/format.h>
/* Call user's function to fill input buffer. */
-uword _unformat_fill_input (unformat_input_t * i)
+uword
+_unformat_fill_input (unformat_input_t * i)
{
uword l, first_mark;
@@ -74,8 +75,10 @@ is_white_space (uword c)
{
switch (c)
{
- case ' ': case '\t':
- case '\n': case '\r':
+ case ' ':
+ case '\t':
+ case '\n':
+ case '\r':
return 1;
default:
@@ -84,9 +87,10 @@ is_white_space (uword c)
}
/* Format function for dumping input stream. */
-u8 * format_unformat_error (u8 * s, va_list * va)
+u8 *
+format_unformat_error (u8 * s, va_list * va)
{
- unformat_input_t * i = va_arg (*va, unformat_input_t *);
+ unformat_input_t *i = va_arg (*va, unformat_input_t *);
uword l = vec_len (i->buffer);
/* Only show so much of the input buffer (it could be really large). */
@@ -95,7 +99,7 @@ u8 * format_unformat_error (u8 * s, va_list * va)
if (i->index < l)
{
uword n = l - i->index;
- u8 * p, * p_end;
+ u8 *p, *p_end;
p = i->buffer + i->index;
p_end = p + (n > n_max ? n_max : n);
@@ -111,10 +115,18 @@ u8 * format_unformat_error (u8 * s, va_list * va)
{
switch (*p)
{
- case '\r': vec_add (s, "\\r", 2); break;
- case '\n': vec_add (s, "\\n", 2); break;
- case '\t': vec_add (s, "\\t", 2); break;
- default: vec_add1 (s, *p); break;
+ case '\r':
+ vec_add (s, "\\r", 2);
+ break;
+ case '\n':
+ vec_add (s, "\\n", 2);
+ break;
+ case '\t':
+ vec_add (s, "\\t", 2);
+ break;
+ default:
+ vec_add1 (s, *p);
+ break;
}
p++;
}
@@ -127,10 +139,10 @@ u8 * format_unformat_error (u8 * s, va_list * va)
}
/* Print everything: not just error context. */
-u8 * format_unformat_input (u8 * s,
- va_list * va)
+u8 *
+format_unformat_input (u8 * s, va_list * va)
{
- unformat_input_t * i = va_arg (*va, unformat_input_t *);
+ unformat_input_t *i = va_arg (*va, unformat_input_t *);
uword l, n;
if (i->index == UNFORMAT_END_OF_INPUT)
@@ -147,8 +159,11 @@ u8 * format_unformat_input (u8 * s,
}
#if CLIB_DEBUG > 0
-void di (unformat_input_t * i)
-{ fformat (stderr, "%U\n", format_unformat_input, i); }
+void
+di (unformat_input_t * i)
+{
+ fformat (stderr, "%U\n", format_unformat_input, i);
+}
#endif
/* Parse delimited vector string. If string starts with { then string
@@ -157,11 +172,10 @@ void di (unformat_input_t * i)
static uword
unformat_string (unformat_input_t * input,
uword delimiter_character,
- uword format_character,
- va_list * va)
+ uword format_character, va_list * va)
{
- u8 ** string_return = va_arg (*va, u8 **);
- u8 * s = 0;
+ u8 **string_return = va_arg (*va, u8 **);
+ u8 *s = 0;
word paren = 0;
word is_paren_delimited = 0;
word backslash = 0;
@@ -212,7 +226,7 @@ unformat_string (unformat_input_t * input,
case '\t':
case '\n':
case '\r':
- if (! is_paren_delimited)
+ if (!is_paren_delimited)
{
unformat_put_input (input);
goto done;
@@ -220,7 +234,7 @@ unformat_string (unformat_input_t * input,
break;
default:
- if (! is_paren_delimited && c == delimiter_character)
+ if (!is_paren_delimited && c == delimiter_character)
{
unformat_put_input (input);
goto done;
@@ -231,17 +245,17 @@ unformat_string (unformat_input_t * input,
vec_add1 (s, c);
}
- done:
+done:
if (string_return)
{
/* Match the string { END-OF-INPUT as a single brace. */
if (c == UNFORMAT_END_OF_INPUT && vec_len (s) == 0 && paren == 1)
- vec_add1 (s, '{');
+ vec_add1 (s, '{');
/* Don't match null string. */
if (c == UNFORMAT_END_OF_INPUT && vec_len (s) == 0)
return 0;
-
+
/* Null terminate C string. */
if (format_character == 's')
vec_add1 (s, 0);
@@ -255,11 +269,10 @@ unformat_string (unformat_input_t * input,
}
uword
-unformat_hex_string (unformat_input_t * input,
- va_list * va)
+unformat_hex_string (unformat_input_t * input, va_list * va)
{
- u8 ** hexstring_return = va_arg (*va, u8 **);
- u8 * s;
+ u8 **hexstring_return = va_arg (*va, u8 **);
+ u8 *s;
uword n, d, c;
n = 0;
@@ -268,11 +281,11 @@ unformat_hex_string (unformat_input_t * input,
while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
{
if (c >= '0' && c <= '9')
- d = 16*d + c - '0';
+ d = 16 * d + c - '0';
else if (c >= 'a' && c <= 'f')
- d = 16*d + 10 + c - 'a';
+ d = 16 * d + 10 + c - 'a';
else if (c >= 'A' && c <= 'F')
- d = 16*d + 10 + c - 'A';
+ d = 16 * d + 10 + c - 'A';
else
{
unformat_put_input (input);
@@ -307,19 +320,18 @@ unformat_eof (unformat_input_t * input, va_list * va)
/* Parse a token containing given set of characters. */
uword
-unformat_token (unformat_input_t * input,
- va_list * va)
+unformat_token (unformat_input_t * input, va_list * va)
{
- u8 * token_chars = va_arg (*va, u8 *);
- u8 ** string_return = va_arg (*va, u8 **);
- u8 * s, map[256];
+ u8 *token_chars = va_arg (*va, u8 *);
+ u8 **string_return = va_arg (*va, u8 **);
+ u8 *s, map[256];
uword i, c;
- if (! token_chars)
+ if (!token_chars)
token_chars = (u8 *) "a-zA-Z0-9_";
memset (map, 0, sizeof (map));
- for (s = token_chars; *s; )
+ for (s = token_chars; *s;)
{
/* Parse range. */
if (s[0] < s[2] && s[1] == '-')
@@ -327,7 +339,7 @@ unformat_token (unformat_input_t * input,
for (i = s[0]; i <= s[2]; i++)
map[i] = 1;
s = s + 3;
- }
+ }
else
{
map[s[0]] = 1;
@@ -338,12 +350,12 @@ unformat_token (unformat_input_t * input,
s = 0;
while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
{
- if (! map[c])
+ if (!map[c])
{
unformat_put_input (input);
break;
}
-
+
vec_add1 (s, c);
}
@@ -356,10 +368,11 @@ unformat_token (unformat_input_t * input,
/* Unformat (parse) function which reads a %s string and converts it
to and unformat_input_t. */
-uword unformat_input (unformat_input_t * i, va_list * args)
+uword
+unformat_input (unformat_input_t * i, va_list * args)
{
- unformat_input_t * sub_input = va_arg (*args, unformat_input_t *);
- u8 * s;
+ unformat_input_t *sub_input = va_arg (*args, unformat_input_t *);
+ u8 *s;
if (unformat (i, "%v", &s))
{
@@ -371,13 +384,13 @@ uword unformat_input (unformat_input_t * i, va_list * args)
}
/* Parse a line ending with \n and return it. */
-uword unformat_line (unformat_input_t * i, va_list * va)
+uword
+unformat_line (unformat_input_t * i, va_list * va)
{
- u8 * line = 0, ** result = va_arg (*va, u8 **);
+ u8 *line = 0, **result = va_arg (*va, u8 **);
uword c;
- while ((c = unformat_get_input (i)) != '\n'
- && c != UNFORMAT_END_OF_INPUT)
+ while ((c = unformat_get_input (i)) != '\n' && c != UNFORMAT_END_OF_INPUT)
{
vec_add1 (line, c);
}
@@ -387,10 +400,11 @@ uword unformat_line (unformat_input_t * i, va_list * va)
}
/* Parse a line ending with \n and return it as an unformat_input_t. */
-uword unformat_line_input (unformat_input_t * i, va_list * va)
+uword
+unformat_line_input (unformat_input_t * i, va_list * va)
{
- unformat_input_t * result = va_arg (*va, unformat_input_t *);
- u8 * line;
+ unformat_input_t *result = va_arg (*va, unformat_input_t *);
+ u8 *line;
unformat_user (i, unformat_line, &line);
unformat_init_vector (result, line);
return 1;
@@ -402,10 +416,7 @@ uword unformat_line_input (unformat_input_t * i, va_list * va)
static uword
unformat_integer (unformat_input_t * input,
- va_list * va,
- uword base,
- uword is_signed,
- uword data_bytes)
+ va_list * va, uword base, uword is_signed, uword data_bytes)
{
uword c, digit;
uword value = 0;
@@ -437,7 +448,7 @@ unformat_integer (unformat_input_t * input,
goto put_input_done;
case '+':
- if (n_input > 0)
+ if (n_input > 0)
goto put_input_done;
sign = 0;
goto next_digit;
@@ -474,7 +485,7 @@ unformat_integer (unformat_input_t * input,
}
{
- uword new_value = base*value + digit;
+ uword new_value = base * value + digit;
/* Check for overflow. */
if (new_value < value)
@@ -487,36 +498,45 @@ unformat_integer (unformat_input_t * input,
n_input++;
}
- done:
+done:
if (sign)
value = -value;
if (n_digits > 0)
{
- void * v = va_arg (*va, void *);
+ void *v = va_arg (*va, void *);
if (data_bytes == ~0)
- data_bytes = sizeof (int);
+ data_bytes = sizeof (int);
switch (data_bytes)
{
- case 1: *(u8 *) v = value; break;
- case 2: *(u16 *) v = value; break;
- case 4: *(u32 *) v = value; break;
- case 8: *(u64 *) v = value; break;
+ case 1:
+ *(u8 *) v = value;
+ break;
+ case 2:
+ *(u16 *) v = value;
+ break;
+ case 4:
+ *(u32 *) v = value;
+ break;
+ case 8:
+ *(u64 *) v = value;
+ break;
default:
- goto error;
+ goto error;
}
return 1;
}
- error:
+error:
return 0;
}
/* Return x 10^n */
-static f64 times_power_of_ten (f64 x, int n)
+static f64
+times_power_of_ten (f64 x, int n)
{
if (n >= 0)
{
@@ -538,12 +558,11 @@ static f64 times_power_of_ten (f64 x, int n)
}
return x * t[-n];
}
-
+
}
static uword
-unformat_float (unformat_input_t * input,
- va_list * va)
+unformat_float (unformat_input_t * input, va_list * va)
{
uword c;
u64 values[3];
@@ -561,7 +580,7 @@ unformat_float (unformat_input_t * input,
{
case '-':
if (value_index == 2 && n_digits[2] == 0)
- /* sign of exponent: it's ok. */;
+ /* sign of exponent: it's ok. */ ;
else if (value_index < 2 && n_digits[0] > 0)
{
@@ -578,7 +597,7 @@ unformat_float (unformat_input_t * input,
case '+':
if (value_index == 2 && n_digits[2] == 0)
- /* sign of exponent: it's ok. */;
+ /* sign of exponent: it's ok. */ ;
else if (value_index < 2 && n_digits[0] > 0)
{
@@ -629,9 +648,9 @@ unformat_float (unformat_input_t * input,
n_input++;
}
- done:
+done:
{
- f64 f_values[2], * value_return;
+ f64 f_values[2], *value_return;
word expon;
/* Must have either whole or fraction digits. */
@@ -658,11 +677,12 @@ unformat_float (unformat_input_t * input,
return 1;
}
- error:
+error:
return 0;
}
-static char * match_input_with_format (unformat_input_t * input, char * f)
+static char *
+match_input_with_format (unformat_input_t * input, char *f)
{
uword cf, ci;
@@ -683,7 +703,8 @@ static char * match_input_with_format (unformat_input_t * input, char * f)
return f;
}
-static char * do_percent (unformat_input_t * input, va_list * va, char * f)
+static char *
+do_percent (unformat_input_t * input, va_list * va, char *f)
{
uword cf, n, data_bytes = ~0;
@@ -712,7 +733,7 @@ static char * do_percent (unformat_input_t * input, va_list * va, char * f)
data_bytes = sizeof (long);
}
break;
-
+
case 'L':
cf = *f++;
data_bytes = sizeof (long long);
@@ -762,7 +783,7 @@ static char * do_percent (unformat_input_t * input, va_list * va, char * f)
case 'U':
{
- unformat_function_t * f = va_arg (*va, unformat_function_t *);
+ unformat_function_t *f = va_arg (*va, unformat_function_t *);
n = f (input, va);
}
break;
@@ -770,7 +791,7 @@ static char * do_percent (unformat_input_t * input, va_list * va, char * f)
case '=':
case '|':
{
- int * var = va_arg (*va, int *);
+ int *var = va_arg (*va, int *);
uword val = va_arg (*va, int);
if (cf == '|')
@@ -784,14 +805,15 @@ static char * do_percent (unformat_input_t * input, va_list * va, char * f)
return n ? f : 0;
}
-uword unformat_skip_white_space (unformat_input_t * input)
+uword
+unformat_skip_white_space (unformat_input_t * input)
{
uword n = 0;
uword c;
while ((c = unformat_get_input (input)) != UNFORMAT_END_OF_INPUT)
{
- if (! is_white_space (c))
+ if (!is_white_space (c))
{
unformat_put_input (input);
break;
@@ -802,23 +824,24 @@ uword unformat_skip_white_space (unformat_input_t * input)
}
uword
-va_unformat (unformat_input_t * input, char * fmt, va_list * va)
+va_unformat (unformat_input_t * input, char *fmt, va_list * va)
{
- char * f;
+ char *f;
uword input_matches_format;
uword default_skip_input_white_space;
uword n_input_white_space_skipped;
uword last_non_white_space_match_percent;
uword last_non_white_space_match_format;
- vec_add1_aligned (input->buffer_marks, input->index, sizeof (input->buffer_marks[0]));
+ vec_add1_aligned (input->buffer_marks, input->index,
+ sizeof (input->buffer_marks[0]));
f = fmt;
default_skip_input_white_space = 1;
input_matches_format = 0;
last_non_white_space_match_percent = 0;
last_non_white_space_match_format = 0;
-
+
while (1)
{
char cf;
@@ -828,8 +851,8 @@ va_unformat (unformat_input_t * input, char * fmt, va_list * va)
is_percent = 0;
/* Always skip input white space at start of format string.
- Otherwise use default skip value which can be changed by %_
- (see below). */
+ Otherwise use default skip value which can be changed by %_
+ (see below). */
skip_input_white_space = f == fmt || default_skip_input_white_space;
/* Spaces in format request skipping input white space. */
@@ -848,23 +871,24 @@ va_unformat (unformat_input_t * input, char * fmt, va_list * va)
switch (*++f)
{
case '_':
- default_skip_input_white_space = !default_skip_input_white_space;
+ default_skip_input_white_space =
+ !default_skip_input_white_space;
f++;
/* For transition from skip to no-skip in middle of format
- string, skip input white space. For example, the following matches:
- fmt = "%_%d.%d%_->%_%d.%d%_"
- input "1.2 -> 3.4"
- Without this the space after -> does not get skipped. */
- if (! default_skip_input_white_space
- && ! (f == fmt + 2 || *f == 0))
+ string, skip input white space. For example, the following matches:
+ fmt = "%_%d.%d%_->%_%d.%d%_"
+ input "1.2 -> 3.4"
+ Without this the space after -> does not get skipped. */
+ if (!default_skip_input_white_space
+ && !(f == fmt + 2 || *f == 0))
unformat_skip_white_space (input);
continue;
- /* %% means match % */
+ /* %% means match % */
case '%':
break;
- /* % at end of format string. */
+ /* % at end of format string. */
case 0:
goto parse_fail;
@@ -887,8 +911,8 @@ va_unformat (unformat_input_t * input, char * fmt, va_list * va)
The last_non_white_space_match_percent is to make
"foo %d" match input "foo 10,bletch" with %d matching 10. */
if (skip_input_white_space
- && ! last_non_white_space_match_percent
- && ! last_non_white_space_match_format
+ && !last_non_white_space_match_percent
+ && !last_non_white_space_match_format
&& n_input_white_space_skipped == 0
&& input->index != UNFORMAT_END_OF_INPUT)
goto parse_fail;
@@ -907,14 +931,14 @@ va_unformat (unformat_input_t * input, char * fmt, va_list * va)
else if (is_percent)
{
- if (! (f = do_percent (input, va, f)))
+ if (!(f = do_percent (input, va, f)))
goto parse_fail;
}
else
{
- char * g = match_input_with_format (input, f);
- if (! g)
+ char *g = match_input_with_format (input, f);
+ if (!g)
goto parse_fail;
last_non_white_space_match_format = g > f;
f = g;
@@ -922,15 +946,15 @@ va_unformat (unformat_input_t * input, char * fmt, va_list * va)
}
input_matches_format = 1;
- parse_fail:
+parse_fail:
/* Rewind buffer marks. */
{
uword l = vec_len (input->buffer_marks);
/* If we did not match back up buffer to last mark. */
- if (! input_matches_format)
- input->index = input->buffer_marks[l-1];
+ if (!input_matches_format)
+ input->index = input->buffer_marks[l - 1];
_vec_len (input->buffer_marks) = l - 1;
}
@@ -939,7 +963,7 @@ va_unformat (unformat_input_t * input, char * fmt, va_list * va)
}
uword
-unformat (unformat_input_t * input, char * fmt, ...)
+unformat (unformat_input_t * input, char *fmt, ...)
{
va_list va;
uword result;
@@ -957,13 +981,14 @@ unformat_user (unformat_input_t * input, unformat_function_t * func, ...)
/* Save place in input buffer in case parse fails. */
l = vec_len (input->buffer_marks);
- vec_add1_aligned (input->buffer_marks, input->index, sizeof (input->buffer_marks[0]));
+ vec_add1_aligned (input->buffer_marks, input->index,
+ sizeof (input->buffer_marks[0]));
va_start (va, func);
result = func (input, &va);
va_end (va);
- if (! result)
+ if (!result)
input->index = input->buffer_marks[l];
_vec_len (input->buffer_marks) = l;
@@ -972,8 +997,8 @@ unformat_user (unformat_input_t * input, unformat_function_t * func, ...)
}
/* Setup for unformat of Unix style command line. */
-void unformat_init_command_line (unformat_input_t * input,
- char * argv[])
+void
+unformat_init_command_line (unformat_input_t * input, char *argv[])
{
uword i;
@@ -988,17 +1013,16 @@ void unformat_init_command_line (unformat_input_t * input,
}
}
-void unformat_init_string (unformat_input_t * input,
- char * string,
- int string_len)
+void
+unformat_init_string (unformat_input_t * input, char *string, int string_len)
{
unformat_init (input, 0, 0);
if (string_len > 0)
vec_add (input->buffer, string, string_len);
}
-void unformat_init_vector (unformat_input_t * input,
- u8 * vector_string)
+void
+unformat_init_vector (unformat_input_t * input, u8 * vector_string)
{
unformat_init (input, 0, 0);
input->buffer = vector_string;
@@ -1006,7 +1030,8 @@ void unformat_init_vector (unformat_input_t * input,
#ifdef CLIB_UNIX
-static uword unix_file_fill_buffer (unformat_input_t * input)
+static uword
+unix_file_fill_buffer (unformat_input_t * input)
{
int fd = pointer_to_uword (input->fill_buffer_arg);
uword l, n;
@@ -1023,17 +1048,18 @@ static uword unix_file_fill_buffer (unformat_input_t * input)
return input->index;
}
-void unformat_init_unix_file (unformat_input_t * input,
- int file_descriptor)
+void
+unformat_init_unix_file (unformat_input_t * input, int file_descriptor)
{
unformat_init (input, unix_file_fill_buffer,
uword_to_pointer (file_descriptor, void *));
}
/* Take input from Unix environment variable. */
-uword unformat_init_unix_env (unformat_input_t * input, char * var)
+uword
+unformat_init_unix_env (unformat_input_t * input, char *var)
{
- char * val = getenv (var);
+ char *val = getenv (var);
if (val)
unformat_init_string (input, val, strlen (val));
return val != 0;
@@ -1041,3 +1067,11 @@ uword unformat_init_unix_env (unformat_input_t * input, char * var)
#endif /* CLIB_UNIX */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/unix-kelog.c b/vppinfra/vppinfra/unix-kelog.c
index 21ac9921083..88428ee8f2e 100644
--- a/vppinfra/vppinfra/unix-kelog.c
+++ b/vppinfra/vppinfra/unix-kelog.c
@@ -26,12 +26,12 @@
#include <time.h>
typedef enum
- {
- RUNNING = 0,
- WAKEUP,
- } sched_event_type_t;
+{
+ RUNNING = 0,
+ WAKEUP,
+} sched_event_type_t;
-typedef struct
+typedef struct
{
u32 cpu;
u8 *task;
@@ -40,7 +40,8 @@ typedef struct
sched_event_type_t type;
} sched_event_t;
-void kelog_init (elog_main_t * em, char * kernel_tracer, u32 n_events)
+void
+kelog_init (elog_main_t * em, char *kernel_tracer, u32 n_events)
{
int enable_fd, current_tracer_fd, data_fd;
int len;
@@ -54,7 +55,7 @@ void kelog_init (elog_main_t * em, char * kernel_tracer, u32 n_events)
ASSERT (kernel_tracer);
/*$$$$ fixme */
- n_events = 1<<18;
+ n_events = 1 << 18;
/* init first so we won't hurt ourselves if we bail */
elog_init (em, n_events);
@@ -66,24 +67,24 @@ void kelog_init (elog_main_t * em, char * kernel_tracer, u32 n_events)
return;
}
/* disable kernel tracing */
- if (write (enable_fd, "0\n", 2) != 2)
+ if (write (enable_fd, "0\n", 2) != 2)
{
clib_unix_warning ("disable tracing");
- close(enable_fd);
+ close (enable_fd);
return;
}
-
- /*
+
+ /*
* open + clear the data buffer.
* see .../linux/kernel/trace/trace.c:tracing_open()
*/
data_fd = open (trace_data, O_RDWR | O_TRUNC);
- if (data_fd < 0)
+ if (data_fd < 0)
{
clib_warning ("Couldn't open+clear %s", trace_data);
return;
}
- close(data_fd);
+ close (data_fd);
/* configure tracing */
current_tracer_fd = open (current_tracer, O_RDWR);
@@ -91,23 +92,23 @@ void kelog_init (elog_main_t * em, char * kernel_tracer, u32 n_events)
if (current_tracer_fd < 0)
{
clib_warning ("Couldn't open %s", current_tracer);
- close(enable_fd);
+ close (enable_fd);
return;
}
- len = strlen(kernel_tracer);
+ len = strlen (kernel_tracer);
- if (write (current_tracer_fd, kernel_tracer, len) != len)
+ if (write (current_tracer_fd, kernel_tracer, len) != len)
{
clib_unix_warning ("configure trace");
- close(current_tracer_fd);
- close(enable_fd);
+ close (current_tracer_fd);
+ close (enable_fd);
return;
}
-
- close(current_tracer_fd);
- /*
+ close (current_tracer_fd);
+
+ /*
* The kernel event log uses CLOCK_MONOTONIC timestamps,
* not CLOCK_REALTIME timestamps. These differ by a constant
* but the constant is not available in user mode.
@@ -116,33 +117,35 @@ void kelog_init (elog_main_t * em, char * kernel_tracer, u32 n_events)
clib_time_init (&em->cpu_timer);
em->init_time.cpu = em->cpu_timer.init_cpu_time;
syscall (SYS_clock_gettime, CLOCK_MONOTONIC, &ts);
-
+
/* enable kernel tracing */
- if (write (enable_fd, "1\n", 2) != 2)
+ if (write (enable_fd, "1\n", 2) != 2)
{
clib_unix_warning ("enable tracing");
- close(enable_fd);
+ close (enable_fd);
return;
}
- close(enable_fd);
+ close (enable_fd);
}
-u8 *format_sched_event (u8 * s, va_list * va)
+u8 *
+format_sched_event (u8 * s, va_list * va)
{
sched_event_t *e = va_arg (*va, sched_event_t *);
s = format (s, "cpu %d task %10s type %s timestamp %12.6f\n",
- e->cpu, e->task, e->type ? "WAKEUP " : "RUNNING", e->timestamp);
+ e->cpu, e->task, e->type ? "WAKEUP " : "RUNNING", e->timestamp);
return s;
}
-sched_event_t *parse_sched_switch_trace (u8 *tdata, u32 *index)
+sched_event_t *
+parse_sched_switch_trace (u8 * tdata, u32 * index)
{
u8 *cp = tdata + *index;
- u8 *limit = tdata + vec_len(tdata);
+ u8 *limit = tdata + vec_len (tdata);
int colons;
static sched_event_t event;
sched_event_t *e = &event;
@@ -150,23 +153,23 @@ sched_event_t *parse_sched_switch_trace (u8 *tdata, u32 *index)
u32 secs, usecs;
int i;
- again:
+again:
/* eat leading w/s */
while (cp < limit && (*cp == ' ' && *cp == '\t'))
cp++;
if (cp == limit)
return 0;
-
+
/* header line */
if (*cp == '#')
{
while (cp < limit && (*cp != '\n'))
- cp++;
+ cp++;
if (*cp == '\n')
- {
- cp++;
- goto again;
- }
+ {
+ cp++;
+ goto again;
+ }
clib_warning ("bugger 0");
return 0;
}
@@ -182,7 +185,7 @@ sched_event_t *parse_sched_switch_trace (u8 *tdata, u32 *index)
clib_warning ("bugger 0.1");
return 0;
}
-
+
cp++;
while (cp < limit && (*cp == ' ' && *cp == '\t'))
cp++;
@@ -191,8 +194,8 @@ sched_event_t *parse_sched_switch_trace (u8 *tdata, u32 *index)
clib_warning ("bugger 0.2");
return 0;
}
-
- secs = atoi(cp);
+
+ secs = atoi (cp);
while (cp < limit && (*cp != '.'))
cp++;
@@ -202,18 +205,18 @@ sched_event_t *parse_sched_switch_trace (u8 *tdata, u32 *index)
clib_warning ("bugger 0.3");
return 0;
}
-
+
cp++;
usecs = atoi (cp);
- e->timestamp = ((f64)secs) + ((f64)usecs)*1e-6;
-
+ e->timestamp = ((f64) secs) + ((f64) usecs) * 1e-6;
+
/* eat up to third colon */
for (i = 0; i < 3; i++)
{
while (cp < limit && *cp != ':')
- cp++;
+ cp++;
cp++;
}
--cp;
@@ -240,16 +243,16 @@ sched_event_t *parse_sched_switch_trace (u8 *tdata, u32 *index)
}
cp += 3;
- if (cp >= limit)
+ if (cp >= limit)
{
clib_warning ("bugger 4");
return 0;
}
-
+
e->cpu = atoi (cp);
cp += 4;
-
- if (cp >= limit)
+
+ if (cp >= limit)
{
clib_warning ("bugger 4");
return 0;
@@ -258,11 +261,11 @@ sched_event_t *parse_sched_switch_trace (u8 *tdata, u32 *index)
cp++;
e->pid = atoi (cp);
-
+
for (i = 0; i < 2; i++)
{
while (cp < limit && *cp != ':')
- cp++;
+ cp++;
cp++;
}
--cp;
@@ -273,35 +276,36 @@ sched_event_t *parse_sched_switch_trace (u8 *tdata, u32 *index)
}
cp += 3;
- if (cp >= limit)
+ if (cp >= limit)
{
clib_warning ("bugger 6");
return 0;
}
while (cp < limit && (*cp != ' ' && *cp != '\n'))
{
- vec_add1(task_name, *cp);
+ vec_add1 (task_name, *cp);
cp++;
}
- vec_add1(task_name, 0);
+ vec_add1 (task_name, 0);
/* _vec_len() = 0 in caller */
e->task = task_name;
if (cp < limit)
- cp++;
+ cp++;
*index = cp - tdata;
return e;
}
-static u32 elog_id_for_pid (elog_main_t *em, u8 *name, u32 pid)
+static u32
+elog_id_for_pid (elog_main_t * em, u8 * name, u32 pid)
{
- uword * p, r;
- mhash_t * h = &em->string_table_hash;
+ uword *p, r;
+ mhash_t *h = &em->string_table_hash;
- if (! em->string_table_hash.hash)
+ if (!em->string_table_hash.hash)
mhash_init (h, sizeof (uword), sizeof (pid));
-
+
p = mhash_get (h, &pid);
if (p)
return p[0];
@@ -310,7 +314,8 @@ static u32 elog_id_for_pid (elog_main_t *em, u8 *name, u32 pid)
return r;
}
-void kelog_collect_sched_switch_trace (elog_main_t *em)
+void
+kelog_collect_sched_switch_trace (elog_main_t * em)
{
int enable_fd, data_fd;
char *trace_enable = "/debug/tracing/tracing_enabled";
@@ -323,7 +328,7 @@ void kelog_collect_sched_switch_trace (elog_main_t *em)
u64 nsec_to_add;
u32 index;
f64 clocks_per_sec;
-
+
enable_fd = open (trace_enable, O_RDWR);
if (enable_fd < 0)
{
@@ -331,13 +336,13 @@ void kelog_collect_sched_switch_trace (elog_main_t *em)
return;
}
/* disable kernel tracing */
- if (write (enable_fd, "0\n", 2) != 2)
+ if (write (enable_fd, "0\n", 2) != 2)
{
clib_unix_warning ("disable tracing");
- close(enable_fd);
+ close (enable_fd);
return;
}
- close(enable_fd);
+ close (enable_fd);
/* Read the trace data */
data_fd = open (trace_data, O_RDWR);
@@ -347,7 +352,7 @@ void kelog_collect_sched_switch_trace (elog_main_t *em)
return;
}
- /*
+ /*
* Extract trace into a vector. Note that seq_printf() [kernel]
* is not guaranteed to produce 4096 bytes at a time.
*/
@@ -356,17 +361,17 @@ void kelog_collect_sched_switch_trace (elog_main_t *em)
pos = 0;
while (1)
{
- bytes = read(data_fd, data+pos, 4096);
- if (bytes <= 0)
- break;
+ bytes = read (data_fd, data + pos, 4096);
+ if (bytes <= 0)
+ break;
total_bytes += bytes;
- _vec_len(data) = total_bytes;
+ _vec_len (data) = total_bytes;
- pos = vec_len(data);
- vec_validate(data, vec_len(data)+4095);
+ pos = vec_len (data);
+ vec_validate (data, vec_len (data) + 4095);
}
- vec_add1(data, 0);
+ vec_add1 (data, 0);
/* Synthesize events */
em->is_enabled = 1;
@@ -378,23 +383,33 @@ void kelog_collect_sched_switch_trace (elog_main_t *em)
fake_cpu_clock = evt->timestamp * em->cpu_timer.clocks_per_second;
{
- ELOG_TYPE_DECLARE (e) =
- {
- .format = "%d: %s %s",
- .format_args = "i4T4t4",
- .n_enum_strings = 2,
- .enum_strings = { "running", "wakeup", },
- };
- struct { u32 cpu, string_table_offset, which; } * ed;
-
- ed = elog_event_data_not_inline (em, &__ELOG_TYPE_VAR(e),
- &em->default_track,
- fake_cpu_clock);
- ed->cpu = evt->cpu;
- ed->string_table_offset = elog_id_for_pid (em, evt->task, evt->pid);
- ed->which = evt->type;
+ ELOG_TYPE_DECLARE (e) =
+ {
+ .format = "%d: %s %s",.format_args = "i4T4t4",.n_enum_strings =
+ 2,.enum_strings =
+ {
+ "running", "wakeup",}
+ ,};
+ struct
+ {
+ u32 cpu, string_table_offset, which;
+ } *ed;
+
+ ed = elog_event_data_not_inline (em, &__ELOG_TYPE_VAR (e),
+ &em->default_track, fake_cpu_clock);
+ ed->cpu = evt->cpu;
+ ed->string_table_offset = elog_id_for_pid (em, evt->task, evt->pid);
+ ed->which = evt->type;
}
- _vec_len(evt->task) = 0;
+ _vec_len (evt->task) = 0;
}
em->is_enabled = 0;
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/unix-misc.c b/vppinfra/vppinfra/unix-misc.c
index 4a654066c5b..2928369d52e 100644
--- a/vppinfra/vppinfra/unix-misc.c
+++ b/vppinfra/vppinfra/unix-misc.c
@@ -45,7 +45,8 @@
#include <fcntl.h>
#include <stdio.h> /* for sprintf */
-clib_error_t * unix_file_n_bytes (char * file, uword * result)
+clib_error_t *
+unix_file_n_bytes (char *file, uword * result)
{
struct stat s;
@@ -60,15 +61,16 @@ clib_error_t * unix_file_n_bytes (char * file, uword * result)
return /* no error */ 0;
}
-clib_error_t * unix_file_read_contents (char * file, u8 * result, uword n_bytes)
+clib_error_t *
+unix_file_read_contents (char *file, u8 * result, uword n_bytes)
{
int fd = -1;
uword n_done, n_left;
- clib_error_t * error = 0;
- u8 * v = result;
+ clib_error_t *error = 0;
+ u8 *v = result;
if ((fd = open (file, 0)) < 0)
- return clib_error_return_unix (0, "open `%s'", file);
+ return clib_error_return_unix (0, "open `%s'", file);
n_left = n_bytes;
n_done = 0;
@@ -91,21 +93,24 @@ clib_error_t * unix_file_read_contents (char * file, u8 * result, uword n_bytes)
if (n_left > 0)
{
- error = clib_error_return (0, " `%s' expected to read %wd bytes; read only %wd",
- file, n_bytes, n_bytes - n_left);
+ error =
+ clib_error_return (0,
+ " `%s' expected to read %wd bytes; read only %wd",
+ file, n_bytes, n_bytes - n_left);
goto done;
}
- done:
+done:
close (fd);
return error;
}
-clib_error_t * unix_file_contents (char * file, u8 ** result)
+clib_error_t *
+unix_file_contents (char *file, u8 ** result)
{
uword n_bytes;
- clib_error_t * error = 0;
- u8 * v;
+ clib_error_t *error = 0;
+ u8 *v;
if ((error = unix_file_n_bytes (file, &n_bytes)))
return error;
@@ -123,7 +128,8 @@ clib_error_t * unix_file_contents (char * file, u8 ** result)
return error;
}
-clib_error_t * unix_proc_file_contents (char * file, u8 ** result)
+clib_error_t *
+unix_proc_file_contents (char *file, u8 ** result)
{
u8 *rv = 0;
uword pos;
@@ -135,25 +141,25 @@ clib_error_t * unix_proc_file_contents (char * file, u8 ** result)
if (fd < 0)
return clib_error_return_unix (0, "open `%s'", file);
- vec_validate(rv, 4095);
+ vec_validate (rv, 4095);
pos = 0;
- while (1)
+ while (1)
{
- bytes = read(fd, rv+pos, 4096);
- if (bytes < 0)
- {
- close (fd);
- vec_free (rv);
- return clib_error_return_unix (0, "read '%s'", file);
- }
-
- if (bytes == 0)
- {
- _vec_len(rv) = pos;
- break;
- }
+ bytes = read (fd, rv + pos, 4096);
+ if (bytes < 0)
+ {
+ close (fd);
+ vec_free (rv);
+ return clib_error_return_unix (0, "read '%s'", file);
+ }
+
+ if (bytes == 0)
+ {
+ _vec_len (rv) = pos;
+ break;
+ }
pos += bytes;
- vec_validate(rv, pos+4095);
+ vec_validate (rv, pos + 4095);
}
*result = rv;
close (fd);
@@ -162,20 +168,28 @@ clib_error_t * unix_proc_file_contents (char * file, u8 ** result)
void os_panic (void) __attribute__ ((weak));
-void os_panic (void) { abort (); }
+void
+os_panic (void)
+{
+ abort ();
+}
void os_exit (int) __attribute__ ((weak));
-void os_exit (int code)
-{ exit (code); }
+void
+os_exit (int code)
+{
+ exit (code);
+}
void os_puts (u8 * string, uword string_length, uword is_error)
__attribute__ ((weak));
-void os_puts (u8 * string, uword string_length, uword is_error)
+void
+os_puts (u8 * string, uword string_length, uword is_error)
{
int cpu = os_get_cpu_number ();
- int ncpus = os_get_ncpus();
+ int ncpus = os_get_ncpus ();
char buf[64];
int fd = is_error ? 2 : 1;
struct iovec iovs[2];
@@ -183,7 +197,7 @@ void os_puts (u8 * string, uword string_length, uword is_error)
if (ncpus > 1)
{
- snprintf (buf, sizeof(buf), "%d: ", cpu);
+ snprintf (buf, sizeof (buf), "%d: ", cpu);
iovs[n_iovs].iov_base = buf;
iovs[n_iovs].iov_len = strlen (buf);
@@ -199,13 +213,30 @@ void os_puts (u8 * string, uword string_length, uword is_error)
}
void os_out_of_memory (void) __attribute__ ((weak));
-void os_out_of_memory (void)
-{ os_panic (); }
+void
+os_out_of_memory (void)
+{
+ os_panic ();
+}
uword os_get_cpu_number (void) __attribute__ ((weak));
-uword os_get_cpu_number (void)
-{ return 0; }
+uword
+os_get_cpu_number (void)
+{
+ return 0;
+}
uword os_get_ncpus (void) __attribute__ ((weak));
-uword os_get_ncpus (void)
-{ return 1; }
+uword
+os_get_ncpus (void)
+{
+ return 1;
+}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/unix.h b/vppinfra/vppinfra/unix.h
index 519276888a6..29114cfece6 100644
--- a/vppinfra/vppinfra/unix.h
+++ b/vppinfra/vppinfra/unix.h
@@ -41,16 +41,24 @@
#include <vppinfra/error.h>
/* Number of bytes in a Unix file. */
-clib_error_t * unix_file_n_bytes (char * file, uword * result);
+clib_error_t *unix_file_n_bytes (char *file, uword * result);
/* Read file contents into given buffer. */
-clib_error_t *
-unix_file_read_contents (char * file, u8 * result, uword n_bytes);
+clib_error_t *unix_file_read_contents (char *file, u8 * result,
+ uword n_bytes);
/* Read and return contents of Unix file. */
-clib_error_t * unix_file_contents (char * file, u8 ** result);
+clib_error_t *unix_file_contents (char *file, u8 ** result);
/* As above but for /proc file system on Linux. */
-clib_error_t * unix_proc_file_contents (char * file, u8 ** result);
+clib_error_t *unix_proc_file_contents (char *file, u8 ** result);
#endif /* included_clib_unix_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/valgrind.h b/vppinfra/vppinfra/valgrind.h
index 582a3ac538b..e74d7e828be 100644
--- a/vppinfra/vppinfra/valgrind.h
+++ b/vppinfra/vppinfra/valgrind.h
@@ -21,16 +21,16 @@
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
- 2. The origin of this software must not be misrepresented; you must
- not claim that you wrote the original software. If you use this
- software in a product, an acknowledgment in the product
+ 2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
documentation would be appreciated but is not required.
3. Altered source versions must be plainly marked as such, and must
not be misrepresented as being the original software.
- 4. The name of the author may not be used to endorse or promote
- products derived from this software without specific prior written
+ 4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
@@ -52,13 +52,13 @@
the terms of the GNU General Public License, version 2. See the
COPYING file in the source distribution for details.
- ----------------------------------------------------------------
+ ----------------------------------------------------------------
*/
/* This file is for inclusion into client (your!) code.
- You can use these macros to manipulate and query Valgrind's
+ You can use these macros to manipulate and query Valgrind's
execution inside your own programs.
The resulting executables will still run without Valgrind, just a
@@ -94,27 +94,27 @@
#if defined(_AIX) && defined(__64BIT__)
-# define PLAT_ppc64_aix5 1
+#define PLAT_ppc64_aix5 1
#elif defined(_AIX) && !defined(__64BIT__)
-# define PLAT_ppc32_aix5 1
+#define PLAT_ppc32_aix5 1
#elif defined(__APPLE__) && defined(__i386__)
-# define PLAT_x86_darwin 1
+#define PLAT_x86_darwin 1
#elif defined(__APPLE__) && defined(__x86_64__)
-# define PLAT_amd64_darwin 1
+#define PLAT_amd64_darwin 1
#elif defined(__i386__)
-# define PLAT_x86_linux 1
+#define PLAT_x86_linux 1
#elif defined(__x86_64__)
-# define PLAT_amd64_linux 1
+#define PLAT_amd64_linux 1
#elif defined(__powerpc__) && !defined(__powerpc64__)
-# define PLAT_ppc32_linux 1
+#define PLAT_ppc32_linux 1
#elif defined(__powerpc__) && defined(__powerpc64__)
-# define PLAT_ppc64_linux 1
+#define PLAT_ppc64_linux 1
#else
/* If we're not compiling for our target platform, don't generate
any inline asms. */
-# if !defined(NVALGRIND)
-# define NVALGRIND 1
-# endif
+#if !defined(NVALGRIND)
+#define NVALGRIND 1
+#endif
#endif
@@ -135,7 +135,7 @@
(_zzq_rlval) = (_zzq_default); \
}
-#else /* ! NVALGRIND */
+#else /* ! NVALGRIND */
/* The following defines the magic code sequences which the JITter
spots and handles magically. Don't look too closely at them as
@@ -150,8 +150,8 @@
this is executed not under Valgrind. Args are passed in a memory
block, and so there's no intrinsic limit to the number that could
be passed, but it's currently five.
-
- The macro args are:
+
+ The macro args are:
_zzq_rlval result lvalue
_zzq_default default value (result returned when running on real CPU)
_zzq_request request code
@@ -176,11 +176,11 @@
#if defined(PLAT_x86_linux) || defined(PLAT_x86_darwin)
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
+typedef struct
+{
+ unsigned int nraddr; /* where's the code? */
+}
+OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"roll $3, %%edi ; roll $13, %%edi\n\t" \
@@ -230,11 +230,11 @@ typedef
#if defined(PLAT_amd64_linux) || defined(PLAT_amd64_darwin)
-typedef
- struct {
- unsigned long long int nraddr; /* where's the code? */
- }
- OrigFn;
+typedef struct
+{
+ unsigned long long int nraddr; /* where's the code? */
+}
+OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rolq $3, %%rdi ; rolq $13, %%rdi\n\t" \
@@ -284,11 +284,11 @@ typedef
#if defined(PLAT_ppc32_linux)
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- }
- OrigFn;
+typedef struct
+{
+ unsigned int nraddr; /* where's the code? */
+}
+OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
@@ -344,12 +344,12 @@ typedef
#if defined(PLAT_ppc64_linux)
-typedef
- struct {
- unsigned long long int nraddr; /* where's the code? */
- unsigned long long int r2; /* what tocptr do we need? */
- }
- OrigFn;
+typedef struct
+{
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+}
+OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
@@ -410,12 +410,12 @@ typedef
#if defined(PLAT_ppc32_aix5)
-typedef
- struct {
- unsigned int nraddr; /* where's the code? */
- unsigned int r2; /* what tocptr do we need? */
- }
- OrigFn;
+typedef struct
+{
+ unsigned int nraddr; /* where's the code? */
+ unsigned int r2; /* what tocptr do we need? */
+}
+OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rlwinm 0,0,3,0,0 ; rlwinm 0,0,13,0,0\n\t" \
@@ -482,12 +482,12 @@ typedef
#if defined(PLAT_ppc64_aix5)
-typedef
- struct {
- unsigned long long int nraddr; /* where's the code? */
- unsigned long long int r2; /* what tocptr do we need? */
- }
- OrigFn;
+typedef struct
+{
+ unsigned long long int nraddr; /* where's the code? */
+ unsigned long long int r2; /* what tocptr do we need? */
+}
+OrigFn;
#define __SPECIAL_INSTRUCTION_PREAMBLE \
"rotldi 0,0,3 ; rotldi 0,0,13\n\t" \
@@ -1501,7 +1501,7 @@ typedef
"r0", "r2", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", \
"r11", "r12", "r13"
-/* These CALL_FN_ macros assume that on ppc32-linux,
+/* These CALL_FN_ macros assume that on ppc32-linux,
sizeof(unsigned long) == 4. */
#define CALL_FN_W_v(lval, orig) \
@@ -3601,58 +3601,58 @@ typedef
#define VG_IS_TOOL_USERREQ(a, b, v) \
(VG_USERREQ_TOOL_BASE(a,b) == ((v) & 0xffff0000))
-/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
+/* !! ABIWARNING !! ABIWARNING !! ABIWARNING !! ABIWARNING !!
This enum comprises an ABI exported by Valgrind to programs
which use client requests. DO NOT CHANGE THE ORDER OF THESE
ENTRIES, NOR DELETE ANY -- add new ones at the end. */
-typedef
- enum { VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
- VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
-
- /* These allow any function to be called from the simulated
- CPU but run on the real CPU. Nb: the first arg passed to
- the function is always the ThreadId of the running
- thread! So CLIENT_CALL0 actually requires a 1 arg
- function, etc. */
- VG_USERREQ__CLIENT_CALL0 = 0x1101,
- VG_USERREQ__CLIENT_CALL1 = 0x1102,
- VG_USERREQ__CLIENT_CALL2 = 0x1103,
- VG_USERREQ__CLIENT_CALL3 = 0x1104,
-
- /* Can be useful in regression testing suites -- eg. can
- send Valgrind's output to /dev/null and still count
- errors. */
- VG_USERREQ__COUNT_ERRORS = 0x1201,
-
- /* These are useful and can be interpreted by any tool that
- tracks malloc() et al, by using vg_replace_malloc.c. */
- VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
- VG_USERREQ__FREELIKE_BLOCK = 0x1302,
- /* Memory pool support. */
- VG_USERREQ__CREATE_MEMPOOL = 0x1303,
- VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
- VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
- VG_USERREQ__MEMPOOL_FREE = 0x1306,
- VG_USERREQ__MEMPOOL_TRIM = 0x1307,
- VG_USERREQ__MOVE_MEMPOOL = 0x1308,
- VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
- VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
-
- /* Allow printfs to valgrind log. */
- VG_USERREQ__PRINTF = 0x1401,
- VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
-
- /* Stack support. */
- VG_USERREQ__STACK_REGISTER = 0x1501,
- VG_USERREQ__STACK_DEREGISTER = 0x1502,
- VG_USERREQ__STACK_CHANGE = 0x1503,
-
- /* Wine support */
- VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601
- } Vg_ClientRequest;
+typedef enum
+{ VG_USERREQ__RUNNING_ON_VALGRIND = 0x1001,
+ VG_USERREQ__DISCARD_TRANSLATIONS = 0x1002,
+
+ /* These allow any function to be called from the simulated
+ CPU but run on the real CPU. Nb: the first arg passed to
+ the function is always the ThreadId of the running
+ thread! So CLIENT_CALL0 actually requires a 1 arg
+ function, etc. */
+ VG_USERREQ__CLIENT_CALL0 = 0x1101,
+ VG_USERREQ__CLIENT_CALL1 = 0x1102,
+ VG_USERREQ__CLIENT_CALL2 = 0x1103,
+ VG_USERREQ__CLIENT_CALL3 = 0x1104,
+
+ /* Can be useful in regression testing suites -- eg. can
+ send Valgrind's output to /dev/null and still count
+ errors. */
+ VG_USERREQ__COUNT_ERRORS = 0x1201,
+
+ /* These are useful and can be interpreted by any tool that
+ tracks malloc() et al, by using vg_replace_malloc.c. */
+ VG_USERREQ__MALLOCLIKE_BLOCK = 0x1301,
+ VG_USERREQ__FREELIKE_BLOCK = 0x1302,
+ /* Memory pool support. */
+ VG_USERREQ__CREATE_MEMPOOL = 0x1303,
+ VG_USERREQ__DESTROY_MEMPOOL = 0x1304,
+ VG_USERREQ__MEMPOOL_ALLOC = 0x1305,
+ VG_USERREQ__MEMPOOL_FREE = 0x1306,
+ VG_USERREQ__MEMPOOL_TRIM = 0x1307,
+ VG_USERREQ__MOVE_MEMPOOL = 0x1308,
+ VG_USERREQ__MEMPOOL_CHANGE = 0x1309,
+ VG_USERREQ__MEMPOOL_EXISTS = 0x130a,
+
+ /* Allow printfs to valgrind log. */
+ VG_USERREQ__PRINTF = 0x1401,
+ VG_USERREQ__PRINTF_BACKTRACE = 0x1402,
+
+ /* Stack support. */
+ VG_USERREQ__STACK_REGISTER = 0x1501,
+ VG_USERREQ__STACK_DEREGISTER = 0x1502,
+ VG_USERREQ__STACK_CHANGE = 0x1503,
+
+ /* Wine support */
+ VG_USERREQ__LOAD_PDB_DEBUGINFO = 0x1601
+} Vg_ClientRequest;
#if !defined(__GNUC__)
-# define __extension__ /* */
+#define __extension__ /* */
#endif
/* Returns the number of Valgrinds this code is running under. That
@@ -3687,41 +3687,41 @@ typedef
#if defined(NVALGRIND)
-# define VALGRIND_PRINTF(...)
-# define VALGRIND_PRINTF_BACKTRACE(...)
+#define VALGRIND_PRINTF(...)
+#define VALGRIND_PRINTF_BACKTRACE(...)
#else /* NVALGRIND */
/* Modern GCC will optimize the static routine out if unused,
and unused attribute will shut down warnings about it. */
-static int VALGRIND_PRINTF(const char *format, ...)
- __attribute__((format(__printf__, 1, 2), __unused__));
+static int VALGRIND_PRINTF (const char *format, ...)
+ __attribute__ ((format (__printf__, 1, 2), __unused__));
static int
-VALGRIND_PRINTF(const char *format, ...)
+VALGRIND_PRINTF (const char *format, ...)
{
- unsigned long _qzz_res;
- va_list vargs;
- va_start(vargs, format);
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF,
- (unsigned long)format, (unsigned long)vargs,
- 0, 0, 0);
- va_end(vargs);
- return (int)_qzz_res;
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start (vargs, format);
+ VALGRIND_DO_CLIENT_REQUEST (_qzz_res, 0, VG_USERREQ__PRINTF,
+ (unsigned long) format, (unsigned long) vargs,
+ 0, 0, 0);
+ va_end (vargs);
+ return (int) _qzz_res;
}
-static int VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
- __attribute__((format(__printf__, 1, 2), __unused__));
+static int VALGRIND_PRINTF_BACKTRACE (const char *format, ...)
+ __attribute__ ((format (__printf__, 1, 2), __unused__));
static int
-VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
+VALGRIND_PRINTF_BACKTRACE (const char *format, ...)
{
- unsigned long _qzz_res;
- va_list vargs;
- va_start(vargs, format);
- VALGRIND_DO_CLIENT_REQUEST(_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
- (unsigned long)format, (unsigned long)vargs,
- 0, 0, 0);
- va_end(vargs);
- return (int)_qzz_res;
+ unsigned long _qzz_res;
+ va_list vargs;
+ va_start (vargs, format);
+ VALGRIND_DO_CLIENT_REQUEST (_qzz_res, 0, VG_USERREQ__PRINTF_BACKTRACE,
+ (unsigned long) format, (unsigned long) vargs,
+ 0, 0, 0);
+ va_end (vargs);
+ return (int) _qzz_res;
}
#endif /* NVALGRIND */
@@ -3729,7 +3729,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
/* These requests allow control to move from the simulated CPU to the
real CPU, calling an arbitary function.
-
+
Note that the current ThreadId is inserted as the first argument.
So this call:
@@ -3834,7 +3834,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
- It marks the block as being addressable and undefined (if 'is_zeroed' is
not set), or addressable and defined (if 'is_zeroed' is set). This
controls how accesses to the block by the program are handled.
-
+
'addr' is the start of the usable block (ie. after any
redzone), 'sizeB' is its size. 'rzB' is the redzone size if the allocator
can apply redzones -- these are blocks of padding at the start and end of
@@ -3842,7 +3842,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
Valgrind will spot block overruns. `is_zeroed' indicates if the memory is
zeroed (or filled with another predictable value), as is the case for
calloc().
-
+
VALGRIND_MALLOCLIKE_BLOCK should be put immediately after the point where a
heap block -- that will be used by the client program -- is allocated.
It's best to put it at the outermost level of the allocator if possible;
@@ -3888,7 +3888,7 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
Note: there is currently no VALGRIND_REALLOCLIKE_BLOCK client request; it
has to be emulated with MALLOCLIKE/FREELIKE and memory copying.
-
+
Ignored if addr == 0.
*/
#define VALGRIND_MALLOCLIKE_BLOCK(addr, sizeB, rzB, is_zeroed) \
@@ -4019,4 +4019,12 @@ VALGRIND_PRINTF_BACKTRACE(const char *format, ...)
#undef PLAT_ppc32_aix5
#undef PLAT_ppc64_aix5
-#endif /* __VALGRIND_H */
+#endif /* __VALGRIND_H */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vec.c b/vppinfra/vppinfra/vec.c
index 912038d56eb..f711679b0a4 100644
--- a/vppinfra/vppinfra/vec.c
+++ b/vppinfra/vppinfra/vec.c
@@ -40,23 +40,25 @@
/* Vector resize operator. Called as needed by various macros such as
vec_add1() when we need to allocate memory. */
-void * vec_resize_allocate_memory (void * v,
- word length_increment,
- uword data_bytes,
- uword header_bytes,
- uword data_align)
+void *
+vec_resize_allocate_memory (void *v,
+ word length_increment,
+ uword data_bytes,
+ uword header_bytes, uword data_align)
{
- vec_header_t * vh = _vec_find (v);
+ vec_header_t *vh = _vec_find (v);
uword old_alloc_bytes, new_alloc_bytes;
- void * old, * new;
+ void *old, *new;
header_bytes = vec_header_bytes (header_bytes);
data_bytes += header_bytes;
- if (! v)
+ if (!v)
{
- new = clib_mem_alloc_aligned_at_offset (data_bytes, data_align, header_bytes);
+ new =
+ clib_mem_alloc_aligned_at_offset (data_bytes, data_align,
+ header_bytes);
data_bytes = clib_mem_size (new);
memset (new, 0, data_bytes);
v = new + header_bytes;
@@ -80,12 +82,15 @@ void * vec_resize_allocate_memory (void * v,
if (new_alloc_bytes < data_bytes)
new_alloc_bytes = data_bytes;
- new = clib_mem_alloc_aligned_at_offset (new_alloc_bytes, data_align, header_bytes);
+ new =
+ clib_mem_alloc_aligned_at_offset (new_alloc_bytes, data_align,
+ header_bytes);
/* FIXME fail gracefully. */
- if (! new)
- clib_panic ("vec_resize fails, length increment %d, data bytes %d, alignment %d",
- length_increment, data_bytes, data_align);
+ if (!new)
+ clib_panic
+ ("vec_resize fails, length increment %d, data bytes %d, alignment %d",
+ length_increment, data_bytes, data_align);
clib_memcpy (new, old, old_alloc_bytes);
clib_mem_free (old);
@@ -98,10 +103,13 @@ void * vec_resize_allocate_memory (void * v,
memset (v + old_alloc_bytes, 0, new_alloc_bytes - old_alloc_bytes);
return v + header_bytes;
-}
+}
-uword clib_mem_is_vec_h (void * v, uword header_bytes)
-{ return clib_mem_is_heap_object (vec_header (v, header_bytes)); }
+uword
+clib_mem_is_vec_h (void *v, uword header_bytes)
+{
+ return clib_mem_is_heap_object (vec_header (v, header_bytes));
+}
/** \cond */
@@ -109,16 +117,18 @@ uword clib_mem_is_vec_h (void * v, uword header_bytes)
#include <stdio.h>
-void main (int argc, char * argv[])
+void
+main (int argc, char *argv[])
{
word n = atoi (argv[1]);
- word i, * x = 0;
+ word i, *x = 0;
- typedef struct {
+ typedef struct
+ {
word x, y, z;
} FOO;
- FOO * foos = vec_init (FOO, 10), * f;
+ FOO *foos = vec_init (FOO, 10), *f;
vec_validate (foos, 100);
foos[100].x = 99;
@@ -128,7 +138,9 @@ void main (int argc, char * argv[])
{
vec_add1 (x, i);
vec_add2 (foos, f, 1);
- f->x = 2*i; f->y = 3*i; f->z = 4*i;
+ f->x = 2 * i;
+ f->y = 3 * i;
+ f->z = 4 * i;
}
{
@@ -149,3 +161,11 @@ void main (int argc, char * argv[])
}
#endif
/** \endcond */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vec.h b/vppinfra/vppinfra/vec.h
index 7017358f9b2..353a06b0530 100644
--- a/vppinfra/vppinfra/vec.h
+++ b/vppinfra/vppinfra/vec.h
@@ -38,8 +38,8 @@
#ifndef included_vec_h
#define included_vec_h
-#include <vppinfra/clib.h> /* word, etc */
-#include <vppinfra/mem.h> /* clib_mem_free */
+#include <vppinfra/clib.h> /* word, etc */
+#include <vppinfra/mem.h> /* clib_mem_free */
#include <vppinfra/string.h> /* memcpy, memmove */
#include <vppinfra/vec_bootstrap.h>
@@ -60,7 +60,7 @@
~~~~~~~~
The user pointer contains the address of vector element # 0. Null
- pointer vectors are valid and mean a zero length vector.
+ pointer vectors are valid and mean a zero length vector.
You can reset the length of an allocated vector to zero via the
vec_reset_length(v) macro, or by setting the vector length field to
@@ -81,10 +81,10 @@
and _h variants supporting non zero length vector headers.
The _ha variants support both.
- Standard programming error: memorize a pointer to the ith element
+ Standard programming error: memorize a pointer to the ith element
of a vector then expand it. Vectors expand by 3/2, so such code
may appear to work for a period of time. Memorize vector indices
- which are invariant.
+ which are invariant.
*/
/** \brief Low-level resize allocation function, usually not called directly
@@ -96,11 +96,10 @@
@param data_align alignment (may be zero)
@return v_prime pointer to resized vector, may or may not equal v
*/
-void * vec_resize_allocate_memory (void * v,
- word length_increment,
- uword data_bytes,
- uword header_bytes,
- uword data_align);
+void *vec_resize_allocate_memory (void *v,
+ word length_increment,
+ uword data_bytes,
+ uword header_bytes, uword data_align);
/** \brief Low-level vector resize function, usually not called directly
@@ -113,13 +112,11 @@ void * vec_resize_allocate_memory (void * v,
*/
always_inline void *
-_vec_resize (void * v,
+_vec_resize (void *v,
word length_increment,
- uword data_bytes,
- uword header_bytes,
- uword data_align)
+ uword data_bytes, uword header_bytes, uword data_align)
{
- vec_header_t * vh = _vec_find (v);
+ vec_header_t *vh = _vec_find (v);
uword new_data_bytes, aligned_header_bytes;
aligned_header_bytes = vec_header_bytes (header_bytes);
@@ -128,7 +125,7 @@ _vec_resize (void * v,
if (PREDICT_TRUE (v != 0))
{
- void * p = v - aligned_header_bytes;
+ void *p = v - aligned_header_bytes;
/* Vector header must start heap object. */
ASSERT (clib_mem_is_heap_object (p));
@@ -142,28 +139,33 @@ _vec_resize (void * v,
}
/* Slow path: call helper function. */
- return vec_resize_allocate_memory (v, length_increment, data_bytes, header_bytes,
- clib_max (sizeof (vec_header_t), data_align));
+ return vec_resize_allocate_memory (v, length_increment, data_bytes,
+ header_bytes,
+ clib_max (sizeof (vec_header_t),
+ data_align));
}
-/** \brief Predicate function, says whether the supplied vector is a clib heap
- object (general version).
+/** \brief Predicate function, says whether the supplied vector is a clib heap
+ object (general version).
@param v pointer to a vector
@param header_bytes vector header size in bytes (may be zero)
@return 0 or 1
-*/
-uword clib_mem_is_vec_h (void * v, uword header_bytes);
+*/
+uword clib_mem_is_vec_h (void *v, uword header_bytes);
-/** \brief Predicate function, says whether the supplied vector is a clib heap
+/** \brief Predicate function, says whether the supplied vector is a clib heap
object
@param v pointer to a vector
@return 0 or 1
-*/
-always_inline uword clib_mem_is_vec (void * v)
-{ return clib_mem_is_vec_h (v, 0); }
+*/
+always_inline uword
+clib_mem_is_vec (void *v)
+{
+ return clib_mem_is_vec_h (v, 0);
+}
/* Local variable naming macro (prevents collisions with other macro naming). */
#define _v(var) _vec_##var
@@ -211,7 +213,7 @@ do { \
#define vec_resize_aligned(V,N,A) vec_resize_ha(V,N,0,A)
-/** \brief Allocate space for N more elements
+/** \brief Allocate space for N more elements
@param V pointer to a vector
@param N number of elements to add
@@ -227,8 +229,8 @@ do { \
_vec_len (V) = _v(l); \
} while (0)
-/** \brief Allocate space for N more elements
- (no header, unspecified alignment)
+/** \brief Allocate space for N more elements
+ (no header, unspecified alignment)
@param V pointer to a vector
@param N number of elements to add
@@ -258,7 +260,7 @@ do { \
_vec_resize ((T *) 0, _v(n), _v(n) * sizeof (T), (H), (A)); \
})
-/** \brief Create new vector of given type and length
+/** \brief Create new vector of given type and length
(unspecified alignment, no header).
@param T type of elements in new vector
@@ -266,8 +268,8 @@ do { \
@return V new vector
*/
#define vec_new(T,N) vec_new_ha(T,N,0,0)
-/** \brief Create new vector of given type and length
- (alignment specified, no header).
+/** \brief Create new vector of given type and length
+ (alignment specified, no header).
@param T type of elements in new vector
@param N number of elements to add
@@ -291,7 +293,7 @@ do { \
} \
} while (0)
-/** \brief Free vector's memory (no header).
+/** \brief Free vector's memory (no header).
@param V pointer to a vector
@return V (value-result parameter, V=0)
*/
@@ -343,13 +345,13 @@ do { \
/** \brief Copy a vector, memcpy wrapper. Assumes sizeof(SRC[0]) ==
sizeof(DST[0])
- @param DST destination
+ @param DST destination
@param SRC source
*/
#define vec_copy(DST,SRC) clib_memcpy (DST, SRC, vec_len (DST) * \
sizeof ((DST)[0]))
-/** \brief Clone a vector. Make a new vector with the
+/** \brief Clone a vector. Make a new vector with the
same size as a given vector but possibly with a different type.
@param NEW_V pointer to new vector
@@ -364,7 +366,7 @@ do { \
/** \brief Make sure vector is long enough for given index (general version).
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param I vector index which will be valid upon return
@param H header size in bytes (may be zero)
@param A alignment (may be zero)
@@ -384,19 +386,19 @@ do { \
} \
} while (0)
-/** \brief Make sure vector is long enough for given index
+/** \brief Make sure vector is long enough for given index
(no header, unspecified alignment)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param I vector index which will be valid upon return
@return V (value-result macro parameter)
*/
#define vec_validate(V,I) vec_validate_ha(V,I,0,0)
-/** \brief Make sure vector is long enough for given index
+/** \brief Make sure vector is long enough for given index
(no header, specified alignment)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param I vector index which will be valid upon return
@param A alignment (may be zero)
@return V (value-result macro parameter)
@@ -404,10 +406,10 @@ do { \
#define vec_validate_aligned(V,I,A) vec_validate_ha(V,I,0,A)
-/** \brief Make sure vector is long enough for given index
+/** \brief Make sure vector is long enough for given index
and initialize empty space (general version)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param I vector index which will be valid upon return
@param INIT initial value (can be a complex expression!)
@param H header size in bytes (may be zero)
@@ -429,10 +431,10 @@ do { \
} \
} while (0)
-/** \brief Make sure vector is long enough for given index
+/** \brief Make sure vector is long enough for given index
and initialize empty space (no header, unspecified alignment)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param I vector index which will be valid upon return
@param INIT initial value (can be a complex expression!)
@param H header size in bytes (may be zero)
@@ -443,10 +445,10 @@ do { \
#define vec_validate_init_empty(V,I,INIT) \
vec_validate_init_empty_ha(V,I,INIT,0,0)
-/** \brief Make sure vector is long enough for given index
+/** \brief Make sure vector is long enough for given index
and initialize empty space (no header, alignment alignment)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param I vector index which will be valid upon return
@param INIT initial value (can be a complex expression!)
@param H header size in bytes (may be zero)
@@ -456,7 +458,7 @@ do { \
#define vec_validate_init_empty_aligned(V,I,A) \
vec_validate_init_empty_ha(V,I,INIT,0,A)
-/** \brief Add 1 element to end of vector (general version).
+/** \brief Add 1 element to end of vector (general version).
@param V pointer to a vector
@param E element to add
@@ -471,7 +473,7 @@ do { \
(V)[_v(l)] = (E); \
} while (0)
-/** \brief Add 1 element to end of vector (unspecified alignment).
+/** \brief Add 1 element to end of vector (unspecified alignment).
@param V pointer to a vector
@param E element to add
@@ -479,7 +481,7 @@ do { \
*/
#define vec_add1(V,E) vec_add1_ha(V,E,0,0)
-/** \brief Add 1 element to end of vector (alignment specified).
+/** \brief Add 1 element to end of vector (alignment specified).
@param V pointer to a vector
@param E element to add
@@ -489,8 +491,8 @@ do { \
*/
#define vec_add1_aligned(V,E,A) vec_add1_ha(V,E,0,A)
-/** \brief Add N elements to end of vector V,
- return pointer to new elements in P. (general version)
+/** \brief Add N elements to end of vector V,
+ return pointer to new elements in P. (general version)
@param V pointer to a vector
@param P pointer to new vector element(s)
@@ -507,7 +509,7 @@ do { \
P = (V) + _v(l); \
} while (0)
-/** \brief Add N elements to end of vector V,
+/** \brief Add N elements to end of vector V,
return pointer to new elements in P. (no header, unspecified alignment)
@param V pointer to a vector
@@ -518,7 +520,7 @@ do { \
#define vec_add2(V,P,N) vec_add2_ha(V,P,N,0,0)
-/** \brief Add N elements to end of vector V,
+/** \brief Add N elements to end of vector V,
return pointer to new elements in P. (no header, alignment specified)
@param V pointer to a vector
@@ -530,7 +532,7 @@ do { \
#define vec_add2_aligned(V,P,N,A) vec_add2_ha(V,P,N,0,A)
-/** \brief Add N elements to end of vector V (general version)
+/** \brief Add N elements to end of vector V (general version)
@param V pointer to a vector
@param E pointer to element(s) to add
@@ -566,7 +568,7 @@ do { \
*/
#define vec_add_aligned(V,E,N,A) vec_add_ha(V,E,N,0,A)
-/** \brief Returns last element of a vector and decrements its length
+/** \brief Returns last element of a vector and decrements its length
@param V pointer to a vector
@return E element removed from the end of the vector
@@ -580,10 +582,10 @@ do { \
(V)[_v(l)]; \
})
-/** \brief Set E to the last element of a vector, decrement vector length
+/** \brief Set E to the last element of a vector, decrement vector length
@param V pointer to a vector
@param E pointer to the last vector element
- @return E element removed from the end of the vector
+ @return E element removed from the end of the vector
(value-result macro parameter
*/
@@ -594,10 +596,10 @@ do { \
_v(l) > 0; \
})
-/** \brief Insert N vector elements starting at element M,
- initialize new elements (general version).
+/** \brief Insert N vector elements starting at element M,
+ initialize new elements (general version).
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param N number of elements to insert
@param M insertion point
@param INIT initial value (can be a complex expression!)
@@ -621,10 +623,10 @@ do { \
memset ((V) + _v(m), INIT, _v(n) * sizeof ((V)[0])); \
} while (0)
-/** \brief Insert N vector elements starting at element M,
+/** \brief Insert N vector elements starting at element M,
initialize new elements to zero (general version)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param N number of elements to insert
@param M insertion point
@param H header size in bytes (may be zero)
@@ -633,20 +635,20 @@ do { \
*/
#define vec_insert_ha(V,N,M,H,A) vec_insert_init_empty_ha(V,N,M,0,H,A)
-/** \brief Insert N vector elements starting at element M,
+/** \brief Insert N vector elements starting at element M,
initialize new elements to zero (no header, unspecified alignment)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param N number of elements to insert
@param M insertion point
@return V (value-result macro parameter)
*/
#define vec_insert(V,N,M) vec_insert_ha(V,N,M,0,0)
-/** \brief Insert N vector elements starting at element M,
+/** \brief Insert N vector elements starting at element M,
initialize new elements to zero (no header, alignment specified)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param N number of elements to insert
@param M insertion point
@param A alignment (may be zero)
@@ -654,10 +656,10 @@ do { \
*/
#define vec_insert_aligned(V,N,M,A) vec_insert_ha(V,N,M,0,A)
-/** \brief Insert N vector elements starting at element M,
+/** \brief Insert N vector elements starting at element M,
initialize new elements (no header, unspecified alignment)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param N number of elements to insert
@param M insertion point
@param INIT initial value (can be a complex expression!)
@@ -668,10 +670,10 @@ do { \
vec_insert_init_empty_ha(V,N,M,INIT,0,0)
/* Resize vector by N elements starting from element M, initialize new elements to INIT (alignment specified, no header). */
-/** \brief Insert N vector elements starting at element M,
+/** \brief Insert N vector elements starting at element M,
initialize new elements (no header, specified alignment)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param N number of elements to insert
@param M insertion point
@param INIT initial value (can be a complex expression!)
@@ -681,10 +683,10 @@ do { \
#define vec_insert_init_empty_aligned(V,N,M,INIT,A) \
vec_insert_init_empty_ha(V,N,M,INIT,0,A)
-/** \brief Insert N vector elements starting at element M,
+/** \brief Insert N vector elements starting at element M,
insert given elements (general version)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param E element(s) to insert
@param N number of elements to insert
@param M insertion point
@@ -710,10 +712,10 @@ do { \
_v(n) * sizeof ((V)[0])); \
} while (0)
-/** \brief Insert N vector elements starting at element M,
+/** \brief Insert N vector elements starting at element M,
insert given elements (no header, unspecified alignment)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param E element(s) to insert
@param N number of elements to insert
@param M insertion point
@@ -721,10 +723,10 @@ do { \
*/
#define vec_insert_elts(V,E,N,M) vec_insert_elts_ha(V,E,N,M,0,0)
-/** \brief Insert N vector elements starting at element M,
+/** \brief Insert N vector elements starting at element M,
insert given elements (no header, specified alignment)
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param E element(s) to insert
@param N number of elements to insert
@param M insertion point
@@ -733,7 +735,7 @@ do { \
*/
#define vec_insert_elts_aligned(V,E,N,M,A) vec_insert_elts_ha(V,E,N,M,0,A)
-/** \brief Delete N elements starting at element M
+/** \brief Delete N elements starting at element M
@param V pointer to a vector
@param N number of elements to delete
@@ -773,7 +775,7 @@ do { \
@param V1 target vector
@param V2 vector to append
*/
-
+
#define vec_append(v1,v2) \
do { \
uword _v(l1) = vec_len (v1); \
@@ -789,7 +791,7 @@ do { \
@param V2 vector to append
@param align required alignment
*/
-
+
#define vec_append_aligned(v1,v2,align) \
do { \
uword _v(l1) = vec_len (v1); \
@@ -869,8 +871,8 @@ do { \
(vec_len (v1) == vec_len (v2) && ! memcmp ((v1), (v2), vec_len (v1) * sizeof ((v1)[0])))
/** \brief Compare two vectors (only applicable to vectors of signed numbers).
- Used in qsort compare functions.
-
+ Used in qsort compare functions.
+
@param v1 Pointer to a vector
@param v2 Pointer to a vector
@return -1, 0, +1
@@ -902,7 +904,7 @@ do { \
/** \brief Make a vector containing a NULL terminated c-string.
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@param S pointer to string buffer.
@param L string length (NOT including the terminating NULL; a la strlen())
*/
@@ -918,7 +920,7 @@ do { \
/** \brief Test whether a vector is a NULL terminated c-string.
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@return BOOLEAN indicating if the vector c-string is null terminated.
*/
#define vec_c_string_is_terminated(V) \
@@ -926,7 +928,7 @@ do { \
/** \brief (If necessary) NULL terminate a vector containing a c-string.
- @param V (possibly NULL) pointer to a vector.
+ @param V (possibly NULL) pointer to a vector.
@return V (value-result macro parameter)
*/
#define vec_terminate_c_string(V) \
@@ -941,3 +943,11 @@ do { \
#endif /* included_vec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vec_bootstrap.h b/vppinfra/vppinfra/vec_bootstrap.h
index fc99d3e3222..3b8c770744c 100644
--- a/vppinfra/vppinfra/vec_bootstrap.h
+++ b/vppinfra/vppinfra/vec_bootstrap.h
@@ -38,7 +38,7 @@
#ifndef included_clib_vec_bootstrap_h
#define included_clib_vec_bootstrap_h
-/** \file
+/** \file
Vector bootsrap header file
*/
@@ -48,11 +48,12 @@
/** \brief vector header structure
Bookeeping header preceding vector elements in memory.
- User header information may preceed standard vec header.
+ User header information may preceed standard vec header.
If you change u32 len -> u64 len, single vectors can
exceed 2**32 elements. Clib heaps are vectors. */
-typedef struct {
+typedef struct
+{
#if CLIB_VEC64 > 0
u64 len;
#else
@@ -64,7 +65,7 @@ typedef struct {
/** \brief Find the vector header
Given the user's pointer to a vector, find the corresponding
- vector header
+ vector header
@param v pointer to a vector
@return pointer to the vector's vector_header_t
@@ -76,27 +77,34 @@ typedef struct {
always_inline uword
vec_header_bytes (uword header_bytes)
-{ return round_pow2 (header_bytes + sizeof (vec_header_t), sizeof (vec_header_t)); }
+{
+ return round_pow2 (header_bytes + sizeof (vec_header_t),
+ sizeof (vec_header_t));
+}
/** \brief Find a user vector header
-
+
Finds the user header of a vector with unspecified alignment given
the user pointer to the vector.
*/
always_inline void *
-vec_header (void * v, uword header_bytes)
-{ return v - vec_header_bytes (header_bytes); }
+vec_header (void *v, uword header_bytes)
+{
+ return v - vec_header_bytes (header_bytes);
+}
/** \brief Find the end of user vector header
-
- Finds the end of the user header of a vector with unspecified
+
+ Finds the end of the user header of a vector with unspecified
alignment given the user pointer to the vector.
*/
always_inline void *
-vec_header_end (void * v, uword header_bytes)
-{ return v + vec_header_bytes (header_bytes); }
+vec_header_end (void *v, uword header_bytes)
+{
+ return v + vec_header_bytes (header_bytes);
+}
always_inline uword
vec_aligned_header_bytes (uword header_bytes, uword align)
@@ -105,31 +113,35 @@ vec_aligned_header_bytes (uword header_bytes, uword align)
}
always_inline void *
-vec_aligned_header (void * v, uword header_bytes, uword align)
-{ return v - vec_aligned_header_bytes (header_bytes, align); }
+vec_aligned_header (void *v, uword header_bytes, uword align)
+{
+ return v - vec_aligned_header_bytes (header_bytes, align);
+}
always_inline void *
-vec_aligned_header_end (void * v, uword header_bytes, uword align)
-{ return v + vec_aligned_header_bytes (header_bytes, align); }
+vec_aligned_header_end (void *v, uword header_bytes, uword align)
+{
+ return v + vec_aligned_header_bytes (header_bytes, align);
+}
/** \brief Number of elements in vector (lvalue-capable)
_vec_len (v) does not check for null, but can be used as a lvalue
- (e.g. _vec_len (v) = 99).
+ (e.g. _vec_len (v) = 99).
*/
#define _vec_len(v) (_vec_find(v)->len)
/** \brief Number of elements in vector (rvalue-only, NULL tolerant)
-
+
vec_len (v) checks for NULL, but cannot be used as an lvalue.
If in doubt, use vec_len...
*/
#define vec_len(v) ((v) ? _vec_len(v) : 0)
-/** \brief Reset vector length to zero
+/** \brief Reset vector length to zero
NULL-pointer tolerant
*/
@@ -179,3 +191,11 @@ for (var = vec_end (vec) - 1; var >= (vec); var--)
#define vec_foreach_index(var,v) for ((var) = 0; (var) < vec_len (v); (var)++)
#endif /* included_clib_vec_bootstrap_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vector.c b/vppinfra/vppinfra/vector.c
index f742e8e59ec..68b4fdc2088 100644
--- a/vppinfra/vppinfra/vector.c
+++ b/vppinfra/vppinfra/vector.c
@@ -44,3 +44,11 @@ u8 u32x4_compare_word_mask_table[256] = {
[0xff] = (1 << 0) | (1 << 1),
};
#endif
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vector.h b/vppinfra/vppinfra/vector.h
index a6f4111e88c..6bea737d247 100644
--- a/vppinfra/vppinfra/vector.h
+++ b/vppinfra/vppinfra/vector.h
@@ -99,30 +99,30 @@ typedef f64 f64x2 _vector_size (16);
/* Vector word sized types. */
#ifndef CLIB_VECTOR_WORD_BITS
-# ifdef CLIB_HAVE_VEC128
-# define CLIB_VECTOR_WORD_BITS 128
-# else
-# define CLIB_VECTOR_WORD_BITS 64
-# endif
+#ifdef CLIB_HAVE_VEC128
+#define CLIB_VECTOR_WORD_BITS 128
+#else
+#define CLIB_VECTOR_WORD_BITS 64
+#endif
#endif /* CLIB_VECTOR_WORD_BITS */
/* Vector word sized types. */
#if CLIB_VECTOR_WORD_BITS == 128
-typedef i8 i8x _vector_size (16);
+typedef i8 i8x _vector_size (16);
typedef i16 i16x _vector_size (16);
typedef i32 i32x _vector_size (16);
typedef i64 i64x _vector_size (16);
-typedef u8 u8x _vector_size (16);
+typedef u8 u8x _vector_size (16);
typedef u16 u16x _vector_size (16);
typedef u32 u32x _vector_size (16);
typedef u64 u64x _vector_size (16);
#endif
#if CLIB_VECTOR_WORD_BITS == 64
-typedef i8 i8x _vector_size (8);
+typedef i8 i8x _vector_size (8);
typedef i16 i16x _vector_size (8);
typedef i32 i32x _vector_size (8);
typedef i64 i64x _vector_size (8);
-typedef u8 u8x _vector_size (8);
+typedef u8 u8x _vector_size (8);
typedef u16 u16x _vector_size (8);
typedef u32 u32x _vector_size (8);
typedef u64 u64x _vector_size (8);
@@ -142,14 +142,14 @@ typedef u64 u64x _vector_size (8);
t as_##t[VECTOR_WORD_TYPE_LEN (t)]; \
} t##x##_union_t;
-_ (u8);
-_ (u16);
-_ (u32);
-_ (u64);
-_ (i8);
-_ (i16);
-_ (i32);
-_ (i64);
+_(u8);
+_(u16);
+_(u32);
+_(u64);
+_(i8);
+_(i16);
+_(i32);
+_(i64);
#undef _
@@ -163,12 +163,12 @@ _ (i64);
t as_##t[n]; \
} t##x##n##_union_t; \
-_ (u8, 8);
-_ (u16, 4);
-_ (u32, 2);
-_ (i8, 8);
-_ (i16, 4);
-_ (i32, 2);
+_(u8, 8);
+_(u16, 4);
+_(u32, 2);
+_(i8, 8);
+_(i16, 4);
+_(i32, 2);
#undef _
@@ -182,16 +182,16 @@ _ (i32, 2);
t as_##t[n]; \
} t##x##n##_union_t; \
-_ (u8, 16);
-_ (u16, 8);
-_ (u32, 4);
-_ (u64, 2);
-_ (i8, 16);
-_ (i16, 8);
-_ (i32, 4);
-_ (i64, 2);
-_ (f32, 4);
-_ (f64, 2);
+_(u8, 16);
+_(u16, 8);
+_(u32, 4);
+_(u64, 2);
+_(i8, 16);
+_(i16, 8);
+_(i32, 4);
+_(i64, 2);
+_(f32, 4);
+_(f64, 2);
#undef _
@@ -205,14 +205,14 @@ _ (f64, 2);
t as_##t[n]; \
} t##x##n##_union_t; \
-_ (u8, 16);
-_ (u16, 8);
-_ (u32, 4);
-_ (u64, 2);
-_ (i8, 16);
-_ (i16, 8);
-_ (i32, 4);
-_ (i64, 2);
+_(u8, 16);
+_(u16, 8);
+_(u32, 4);
+_(u64, 2);
+_(i8, 16);
+_(i16, 8);
+_(i32, 4);
+_(i64, 2);
#undef _
@@ -235,3 +235,11 @@ _ (i64, 2);
#endif
#endif /* included_clib_vector_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vector_altivec.h b/vppinfra/vppinfra/vector_altivec.h
index cc4f28adcdc..0e9de820ed8 100644
--- a/vppinfra/vppinfra/vector_altivec.h
+++ b/vppinfra/vppinfra/vector_altivec.h
@@ -69,16 +69,16 @@
return t##_##lr (x, j); \
}
-_ (u16x8, i16x8, shift_left, vslh);
-_ (u32x4, i32x4, shift_left, vslw);
-_ (u16x8, i16x8, shift_right, vsrh);
-_ (u32x4, i32x4, shift_right, vsrw);
-_ (i16x8, i16x8, shift_right, vsrah);
-_ (i32x4, i32x4, shift_right, vsraw);
-_ (u16x8, i16x8, rotate_left, vrlh);
-_ (i16x8, i16x8, rotate_left, vrlh);
-_ (u32x4, i32x4, rotate_left, vrlw);
-_ (i32x4, i32x4, rotate_left, vrlw);
+_(u16x8, i16x8, shift_left, vslh);
+_(u32x4, i32x4, shift_left, vslw);
+_(u16x8, i16x8, shift_right, vsrh);
+_(u32x4, i32x4, shift_right, vsrw);
+_(i16x8, i16x8, shift_right, vsrah);
+_(i32x4, i32x4, shift_right, vsraw);
+_(u16x8, i16x8, rotate_left, vrlh);
+_(i16x8, i16x8, rotate_left, vrlh);
+_(u32x4, i32x4, rotate_left, vrlw);
+_(i32x4, i32x4, rotate_left, vrlw);
#undef _
@@ -89,19 +89,17 @@ _ (i32x4, i32x4, rotate_left, vrlw);
return (t) __builtin_altivec_##f ((i32x4) x, n_bits); \
}
-_ (u32x4, u32, left, vslo)
-_ (i32x4, i32, left, vslo)
-_ (u32x4, u32, right, vsro)
-_ (i32x4, i32, right, vsro)
-_ (u16x8, u16, left, vslo)
-_ (i16x8, i16, left, vslo)
-_ (u16x8, u16, right, vsro)
-_ (i16x8, i16, right, vsro)
-
+_(u32x4, u32, left, vslo)
+_(i32x4, i32, left, vslo)
+_(u32x4, u32, right, vsro)
+_(i32x4, i32, right, vsro)
+_(u16x8, u16, left, vslo)
+_(i16x8, i16, left, vslo)
+_(u16x8, u16, right, vsro) _(i16x8, i16, right, vsro)
#undef _
-
-always_inline u32
-u32x4_get0 (u32x4 x)
+ always_inline
+ u32
+ u32x4_get0 (u32x4 x)
{
u32x4_union_t y;
y.as_u32x4 = x;
@@ -113,17 +111,14 @@ u32x4_get0 (u32x4 x)
always_inline t t##_interleave_##lh (t x, t y) \
{ return (t) __builtin_altivec_##f ((it) x, (it) y); }
-_ (u32x4, i32x4, lo, vmrglw)
-_ (i32x4, i32x4, lo, vmrglw)
-_ (u16x8, i16x8, lo, vmrglh)
-_ (i16x8, i16x8, lo, vmrglh)
-_ (u32x4, i32x4, hi, vmrghw)
-_ (i32x4, i32x4, hi, vmrghw)
-_ (u16x8, i16x8, hi, vmrghh)
-_ (i16x8, i16x8, hi, vmrghh)
-
+_(u32x4, i32x4, lo, vmrglw)
+_(i32x4, i32x4, lo, vmrglw)
+_(u16x8, i16x8, lo, vmrglh)
+_(i16x8, i16x8, lo, vmrglh)
+_(u32x4, i32x4, hi, vmrghw)
+_(i32x4, i32x4, hi, vmrghw)
+_(u16x8, i16x8, hi, vmrghh) _(i16x8, i16x8, hi, vmrghh)
#undef _
-
/* Unaligned loads/stores. */
#ifndef __cplusplus
#define _(t) \
@@ -131,19 +126,9 @@ _ (i16x8, i16x8, hi, vmrghh)
{ clib_mem_unaligned (a, t) = x; } \
always_inline t t##_load_unaligned (t * a) \
{ return clib_mem_unaligned (a, t); }
-
-_ (u8x16)
-_ (u16x8)
-_ (u32x4)
-_ (u64x2)
-_ (i8x16)
-_ (i16x8)
-_ (i32x4)
-_ (i64x2)
-
+ _(u8x16) _(u16x8) _(u32x4) _(u64x2) _(i8x16) _(i16x8) _(i32x4) _(i64x2)
#undef _
#endif
-
#define _signed_binop(n,m,f,g) \
/* Unsigned */ \
always_inline u##n##x##m \
@@ -154,26 +139,25 @@ _ (i64x2)
always_inline i##n##x##m \
i##n##x##m##_##f (i##n##x##m x, i##n##x##m y) \
{ return (i##n##x##m) __builtin_altivec_##g ((i##n##x##m) x, (i##n##x##m) y); }
-
/* Compare operations. */
-_signed_binop (16, 8, is_equal, vcmpequh)
+ _signed_binop (16, 8, is_equal, vcmpequh)
_signed_binop (32, 4, is_equal, vcmpequw)
-
#undef _signed_binop
-
-always_inline u16x8 u16x8_is_zero (u16x8 x)
+ always_inline u16x8 u16x8_is_zero (u16x8 x)
{
- u16x8 zero = {0};
+ u16x8 zero = { 0 };
return u16x8_is_equal (x, zero);
}
-always_inline u32x4 u32x4_is_zero (u32x4 x)
+always_inline u32x4
+u32x4_is_zero (u32x4 x)
{
- u32x4 zero = {0};
+ u32x4 zero = { 0 };
return u32x4_is_equal (x, zero);
}
-always_inline u32 u32x4_zero_byte_mask (u32x4 x)
+always_inline u32
+u32x4_zero_byte_mask (u32x4 x)
{
u32x4 cmp = u32x4_is_zero (x);
u32x4 tmp = { 0x000f, 0x00f0, 0x0f00, 0xf000, };
@@ -184,3 +168,11 @@ always_inline u32 u32x4_zero_byte_mask (u32x4 x)
}
#endif /* included_vector_altivec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vector_funcs.h b/vppinfra/vppinfra/vector_funcs.h
index c020d9868c5..db09de0f04c 100644
--- a/vppinfra/vppinfra/vector_funcs.h
+++ b/vppinfra/vppinfra/vector_funcs.h
@@ -138,7 +138,7 @@ do { \
#define u8x_interleave u8x8_interleave
#define u16x_interleave u16x4_interleave
#define u32x_interleave u32x2_interleave
-#define u64x_interleave(a,b) /* do nothing */
+#define u64x_interleave(a,b) /* do nothing */
#endif
/* Vector word sized shifts. */
@@ -324,3 +324,11 @@ do { \
#undef _
#endif /* included_vector_funcs_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vector_iwmmxt.h b/vppinfra/vppinfra/vector_iwmmxt.h
index 6096c828124..8e662045655 100644
--- a/vppinfra/vppinfra/vector_iwmmxt.h
+++ b/vppinfra/vppinfra/vector_iwmmxt.h
@@ -38,41 +38,62 @@
#ifndef included_vector_iwmmxt_h
#define included_vector_iwmmxt_h
-#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/error.h> /* for ASSERT */
/* 64 bit interleaves. */
-always_inline u8x8 u8x8_interleave_hi (u8x8 a, u8x8 b)
-{ return __builtin_arm_wunpckihb (a, b); }
+always_inline u8x8
+u8x8_interleave_hi (u8x8 a, u8x8 b)
+{
+ return __builtin_arm_wunpckihb (a, b);
+}
-always_inline u8x8 u8x8_interleave_lo (u8x8 a, u8x8 b)
-{ return __builtin_arm_wunpckilb (a, b); }
+always_inline u8x8
+u8x8_interleave_lo (u8x8 a, u8x8 b)
+{
+ return __builtin_arm_wunpckilb (a, b);
+}
-always_inline u16x4 u16x4_interleave_hi (u16x4 a, u16x4 b)
-{ return __builtin_arm_wunpckihh (a, b); }
+always_inline u16x4
+u16x4_interleave_hi (u16x4 a, u16x4 b)
+{
+ return __builtin_arm_wunpckihh (a, b);
+}
-always_inline u16x4 u16x4_interleave_lo (u16x4 a, u16x4 b)
-{ return __builtin_arm_wunpckilh (a, b); }
+always_inline u16x4
+u16x4_interleave_lo (u16x4 a, u16x4 b)
+{
+ return __builtin_arm_wunpckilh (a, b);
+}
-always_inline u32x2 u32x2_interleave_hi (u32x2 a, u32x2 b)
-{ return __builtin_arm_wunpckihw (a, b); }
+always_inline u32x2
+u32x2_interleave_hi (u32x2 a, u32x2 b)
+{
+ return __builtin_arm_wunpckihw (a, b);
+}
-always_inline u32x2 u32x2_interleave_lo (u32x2 a, u32x2 b)
-{ return __builtin_arm_wunpckilw (a, b); }
+always_inline u32x2
+u32x2_interleave_lo (u32x2 a, u32x2 b)
+{
+ return __builtin_arm_wunpckilw (a, b);
+}
-always_inline u32x2 u32x2_splat (u32 a)
+always_inline u32x2
+u32x2_splat (u32 a)
{
- u32x2 x = {a};
+ u32x2 x = { a };
x = u32x2_interleave_lo (x, x);
return x;
- }
+}
-always_inline u16x4 u16x4_splat (u16 a)
+always_inline u16x4
+u16x4_splat (u16 a)
{
u32 t = (u32) a | ((u32) a << 16);
return u32x2_splat (t);
}
-always_inline u8x8 u8x8_splat (u8 a)
+always_inline u8x8
+u8x8_splat (u8 a)
{
u32 t = (u32) a | ((u32) a << 8);
t |= t << 16;
@@ -98,30 +119,31 @@ always_inline u8x8 u8x8_splat (u8 a)
return y; \
}
-_ (u16x4, 1, shift_left, wsllhi)
-_ (u32x2, 1, shift_left, wsllwi)
-_ (u16x4, 1, shift_right, wsrlhi)
-_ (u32x2, 1, shift_right, wsrlwi)
-_ (i16x4, 1, shift_left, wsllhi)
-_ (i32x2, 1, shift_left, wsllwi)
-_ (i16x4, 1, shift_right, wsrahi)
-_ (i32x2, 1, shift_right, wsrawi)
-
+_(u16x4, 1, shift_left, wsllhi)
+_(u32x2, 1, shift_left, wsllwi)
+_(u16x4, 1, shift_right, wsrlhi)
+_(u32x2, 1, shift_right, wsrlwi)
+_(i16x4, 1, shift_left, wsllhi)
+_(i32x2, 1, shift_left, wsllwi)
+_(i16x4, 1, shift_right, wsrahi) _(i32x2, 1, shift_right, wsrawi)
/* Word shifts. */
-_ (u8x8, 8, word_shift_left, wslldi)
-_ (u16x4, 16, word_shift_left, wslldi)
-_ (u32x2, 32, word_shift_left, wslldi)
-_ (u8x8, 8, word_shift_right, wsrldi)
-_ (u16x4, 16, word_shift_right, wsrldi)
-_ (u32x2, 32, word_shift_right, wsrldi)
-_ (i8x8, 8, word_shift_left, wslldi)
-_ (i16x4, 16, word_shift_left, wslldi)
-_ (i32x2, 32, word_shift_left, wslldi)
-_ (i8x8, 8, word_shift_right, wsrldi)
-_ (i16x4, 16, word_shift_right, wsrldi)
-_ (i32x2, 32, word_shift_right, wsrldi)
-
+ _(u8x8, 8, word_shift_left, wslldi)
+_(u16x4, 16, word_shift_left, wslldi)
+_(u32x2, 32, word_shift_left, wslldi)
+_(u8x8, 8, word_shift_right, wsrldi)
+_(u16x4, 16, word_shift_right, wsrldi)
+_(u32x2, 32, word_shift_right, wsrldi)
+_(i8x8, 8, word_shift_left, wslldi)
+_(i16x4, 16, word_shift_left, wslldi)
+_(i32x2, 32, word_shift_left, wslldi)
+_(i8x8, 8, word_shift_right, wsrldi)
+_(i16x4, 16, word_shift_right, wsrldi) _(i32x2, 32, word_shift_right, wsrldi)
#undef _
-
-
#endif /* included_vector_iwmmxt_h */
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vector_sse2.h b/vppinfra/vppinfra/vector_sse2.h
index 23254fb94ee..f782e8fd409 100644
--- a/vppinfra/vppinfra/vector_sse2.h
+++ b/vppinfra/vppinfra/vector_sse2.h
@@ -38,119 +38,189 @@
#ifndef included_vector_sse2_h
#define included_vector_sse2_h
-#include <vppinfra/error_bootstrap.h> /* for ASSERT */
+#include <vppinfra/error_bootstrap.h> /* for ASSERT */
#include <x86intrin.h>
/* 128 bit interleaves. */
-always_inline u8x16 u8x16_interleave_hi (u8x16 a, u8x16 b)
-{ return (u8x16) _mm_unpackhi_epi8 ((__m128i) a, (__m128i) b); }
+always_inline u8x16
+u8x16_interleave_hi (u8x16 a, u8x16 b)
+{
+ return (u8x16) _mm_unpackhi_epi8 ((__m128i) a, (__m128i) b);
+}
-always_inline u8x16 u8x16_interleave_lo (u8x16 a, u8x16 b)
-{ return (u8x16) _mm_unpacklo_epi8 ((__m128i) a, (__m128i) b); }
+always_inline u8x16
+u8x16_interleave_lo (u8x16 a, u8x16 b)
+{
+ return (u8x16) _mm_unpacklo_epi8 ((__m128i) a, (__m128i) b);
+}
-always_inline u16x8 u16x8_interleave_hi (u16x8 a, u16x8 b)
-{ return (u16x8) _mm_unpackhi_epi16 ((__m128i) a, (__m128i) b); }
+always_inline u16x8
+u16x8_interleave_hi (u16x8 a, u16x8 b)
+{
+ return (u16x8) _mm_unpackhi_epi16 ((__m128i) a, (__m128i) b);
+}
-always_inline u16x8 u16x8_interleave_lo (u16x8 a, u16x8 b)
-{ return (u16x8) _mm_unpacklo_epi16 ((__m128i) a, (__m128i) b); }
+always_inline u16x8
+u16x8_interleave_lo (u16x8 a, u16x8 b)
+{
+ return (u16x8) _mm_unpacklo_epi16 ((__m128i) a, (__m128i) b);
+}
-always_inline u32x4 u32x4_interleave_hi (u32x4 a, u32x4 b)
-{ return (u32x4) _mm_unpackhi_epi32 ((__m128i) a, (__m128i) b); }
+always_inline u32x4
+u32x4_interleave_hi (u32x4 a, u32x4 b)
+{
+ return (u32x4) _mm_unpackhi_epi32 ((__m128i) a, (__m128i) b);
+}
-always_inline u32x4 u32x4_interleave_lo (u32x4 a, u32x4 b)
-{ return (u32x4) _mm_unpacklo_epi32 ((__m128i) a, (__m128i) b); }
+always_inline u32x4
+u32x4_interleave_lo (u32x4 a, u32x4 b)
+{
+ return (u32x4) _mm_unpacklo_epi32 ((__m128i) a, (__m128i) b);
+}
-always_inline u64x2 u64x2_interleave_hi (u64x2 a, u64x2 b)
-{ return (u64x2) _mm_unpackhi_epi64 ((__m128i) a, (__m128i) b); }
+always_inline u64x2
+u64x2_interleave_hi (u64x2 a, u64x2 b)
+{
+ return (u64x2) _mm_unpackhi_epi64 ((__m128i) a, (__m128i) b);
+}
-always_inline u64x2 u64x2_interleave_lo (u64x2 a, u64x2 b)
-{ return (u64x2) _mm_unpacklo_epi64 ((__m128i) a, (__m128i) b); }
+always_inline u64x2
+u64x2_interleave_lo (u64x2 a, u64x2 b)
+{
+ return (u64x2) _mm_unpacklo_epi64 ((__m128i) a, (__m128i) b);
+}
/* 64 bit interleaves. */
-always_inline u8x8 u8x8_interleave_hi (u8x8 a, u8x8 b)
-{ return (u8x8) _m_punpckhbw ((__m64) a, (__m64) b); }
+always_inline u8x8
+u8x8_interleave_hi (u8x8 a, u8x8 b)
+{
+ return (u8x8) _m_punpckhbw ((__m64) a, (__m64) b);
+}
-always_inline u8x8 u8x8_interleave_lo (u8x8 a, u8x8 b)
-{ return (u8x8) _m_punpcklbw ((__m64) a, (__m64) b); }
+always_inline u8x8
+u8x8_interleave_lo (u8x8 a, u8x8 b)
+{
+ return (u8x8) _m_punpcklbw ((__m64) a, (__m64) b);
+}
-always_inline u16x4 u16x4_interleave_hi (u16x4 a, u16x4 b)
-{ return (u16x4) _m_punpckhwd ((__m64) a, (__m64) b); }
+always_inline u16x4
+u16x4_interleave_hi (u16x4 a, u16x4 b)
+{
+ return (u16x4) _m_punpckhwd ((__m64) a, (__m64) b);
+}
-always_inline u16x4 u16x4_interleave_lo (u16x4 a, u16x4 b)
-{ return (u16x4) _m_punpcklwd ((__m64) a, (__m64) b); }
+always_inline u16x4
+u16x4_interleave_lo (u16x4 a, u16x4 b)
+{
+ return (u16x4) _m_punpcklwd ((__m64) a, (__m64) b);
+}
-always_inline u32x2 u32x2_interleave_hi (u32x2 a, u32x2 b)
-{ return (u32x2) _m_punpckhdq ((__m64) a, (__m64) b); }
+always_inline u32x2
+u32x2_interleave_hi (u32x2 a, u32x2 b)
+{
+ return (u32x2) _m_punpckhdq ((__m64) a, (__m64) b);
+}
-always_inline u32x2 u32x2_interleave_lo (u32x2 a, u32x2 b)
-{ return (u32x2) _m_punpckldq ((__m64) a, (__m64) b); }
+always_inline u32x2
+u32x2_interleave_lo (u32x2 a, u32x2 b)
+{
+ return (u32x2) _m_punpckldq ((__m64) a, (__m64) b);
+}
/* 128 bit packs. */
-always_inline u8x16 u16x8_pack (u16x8 lo, u16x8 hi)
-{ return (u8x16) _mm_packus_epi16 ((__m128i) lo, (__m128i) hi); }
+always_inline u8x16
+u16x8_pack (u16x8 lo, u16x8 hi)
+{
+ return (u8x16) _mm_packus_epi16 ((__m128i) lo, (__m128i) hi);
+}
-always_inline i8x16 i16x8_pack (i16x8 lo, i16x8 hi)
-{ return (i8x16) _mm_packs_epi16 ((__m128i) lo, (__m128i) hi); }
+always_inline i8x16
+i16x8_pack (i16x8 lo, i16x8 hi)
+{
+ return (i8x16) _mm_packs_epi16 ((__m128i) lo, (__m128i) hi);
+}
-always_inline u16x8 u32x4_pack (u32x4 lo, u32x4 hi)
-{ return (u16x8) _mm_packs_epi32 ((__m128i) lo, (__m128i) hi); }
+always_inline u16x8
+u32x4_pack (u32x4 lo, u32x4 hi)
+{
+ return (u16x8) _mm_packs_epi32 ((__m128i) lo, (__m128i) hi);
+}
/* 64 bit packs. */
-always_inline u8x8 u16x4_pack (u16x4 lo, u16x4 hi)
-{ return (u8x8) _m_packuswb ((__m64) lo, (__m64) hi); }
+always_inline u8x8
+u16x4_pack (u16x4 lo, u16x4 hi)
+{
+ return (u8x8) _m_packuswb ((__m64) lo, (__m64) hi);
+}
-always_inline i8x8 i16x4_pack (i16x4 lo, i16x4 hi)
-{ return (i8x8) _m_packsswb ((__m64) lo, (__m64) hi); }
+always_inline i8x8
+i16x4_pack (i16x4 lo, i16x4 hi)
+{
+ return (i8x8) _m_packsswb ((__m64) lo, (__m64) hi);
+}
-always_inline u16x4 u32x2_pack (u32x2 lo, u32x2 hi)
-{ return (u16x4) _m_packssdw ((__m64) lo, (__m64) hi); }
+always_inline u16x4
+u32x2_pack (u32x2 lo, u32x2 hi)
+{
+ return (u16x4) _m_packssdw ((__m64) lo, (__m64) hi);
+}
-always_inline i16x4 i32x2_pack (i32x2 lo, i32x2 hi)
-{ return (i16x4) _m_packssdw ((__m64) lo, (__m64) hi); }
+always_inline i16x4
+i32x2_pack (i32x2 lo, i32x2 hi)
+{
+ return (i16x4) _m_packssdw ((__m64) lo, (__m64) hi);
+}
/* Splats: replicate scalar value into vector. */
-always_inline u64x2 u64x2_splat (u64 a)
+always_inline u64x2
+u64x2_splat (u64 a)
{
- u64x2 x = {a};
+ u64x2 x = { a };
x = u64x2_interleave_lo (x, x);
return x;
}
-always_inline u32x4 u32x4_splat (u32 a)
+always_inline u32x4
+u32x4_splat (u32 a)
{
- u32x4 x = {a};
+ u32x4 x = { a };
x = u32x4_interleave_lo (x, x);
x = (u32x4) u64x2_interleave_lo ((u64x2) x, (u64x2) x);
return x;
}
-always_inline u16x8 u16x8_splat (u16 a)
+always_inline u16x8
+u16x8_splat (u16 a)
{
u32 t = (u32) a | ((u32) a << 16);
return (u16x8) u32x4_splat (t);
}
-always_inline u8x16 u8x16_splat (u8 a)
+always_inline u8x16
+u8x16_splat (u8 a)
{
u32 t = (u32) a | ((u32) a << 8);
t |= t << 16;
return (u8x16) u16x8_splat (t);
}
-always_inline u32x2 u32x2_splat (u32 a)
+always_inline u32x2
+u32x2_splat (u32 a)
{
- u32x2 x = {a};
+ u32x2 x = { a };
x = u32x2_interleave_lo (x, x);
return x;
- }
+}
-always_inline u16x4 u16x4_splat (u16 a)
+always_inline u16x4
+u16x4_splat (u16 a)
{
u32 t = (u32) a | ((u32) a << 16);
return (u16x4) u32x2_splat (t);
}
-always_inline u8x8 u8x8_splat (u8 a)
+always_inline u8x8
+u8x8_splat (u8 a)
{
u32 t = (u32) a | ((u32) a << 8);
t |= t << 16;
@@ -166,17 +236,29 @@ always_inline u8x8 u8x8_splat (u8 a)
#define i8x8_splat u8x8_splat
#ifndef __ICC
-always_inline u64x2 u64x2_read_lo (u64x2 x, u64 * a)
-{ return (u64x2) _mm_loadl_pi ((__m128) x, (__m64 *) a); }
+always_inline u64x2
+u64x2_read_lo (u64x2 x, u64 * a)
+{
+ return (u64x2) _mm_loadl_pi ((__m128) x, (__m64 *) a);
+}
-always_inline u64x2 u64x2_read_hi (u64x2 x, u64 * a)
-{ return (u64x2) _mm_loadh_pi ((__m128) x, (__m64 *) a); }
+always_inline u64x2
+u64x2_read_hi (u64x2 x, u64 * a)
+{
+ return (u64x2) _mm_loadh_pi ((__m128) x, (__m64 *) a);
+}
-always_inline void u64x2_write_lo (u64x2 x, u64 * a)
-{ _mm_storel_pi ((__m64 *) a, (__m128) x); }
+always_inline void
+u64x2_write_lo (u64x2 x, u64 * a)
+{
+ _mm_storel_pi ((__m64 *) a, (__m128) x);
+}
-always_inline void u64x2_write_hi (u64x2 x, u64 * a)
-{ _mm_storeh_pi ((__m64 *) a, (__m128) x); }
+always_inline void
+u64x2_write_hi (u64x2 x, u64 * a)
+{
+ _mm_storeh_pi ((__m64 *) a, (__m128) x);
+}
#endif
/* Unaligned loads/stores. */
@@ -187,17 +269,8 @@ always_inline void u64x2_write_hi (u64x2 x, u64 * a)
always_inline t t##_load_unaligned (t * a) \
{ return (t) _mm_loadu_si128 ((__m128i *) a); }
-_ (u8x16)
-_ (u16x8)
-_ (u32x4)
-_ (u64x2)
-_ (i8x16)
-_ (i16x8)
-_ (i32x4)
-_ (i64x2)
-
+_(u8x16) _(u16x8) _(u32x4) _(u64x2) _(i8x16) _(i16x8) _(i32x4) _(i64x2)
#undef _
-
#define _signed_binop(n,m,f,g) \
/* Unsigned */ \
always_inline u##n##x##m \
@@ -208,36 +281,42 @@ _ (i64x2)
always_inline i##n##x##m \
i##n##x##m##_##f (i##n##x##m x, i##n##x##m y) \
{ return (i##n##x##m) _mm_##g##n ((__m128i) x, (__m128i) y); }
-
/* Addition/subtraction. */
-_signed_binop (8, 16, add, add_epi)
-_signed_binop (16, 8, add, add_epi)
-_signed_binop (32, 4, add, add_epi)
-_signed_binop (64, 2, add, add_epi)
-_signed_binop (8, 16, sub, sub_epi)
-_signed_binop (16, 8, sub, sub_epi)
-_signed_binop (32, 4, sub, sub_epi)
-_signed_binop (64, 2, sub, sub_epi)
-
+ _signed_binop (8, 16, add, add_epi)
+_signed_binop (16, 8, add, add_epi)
+_signed_binop (32, 4, add, add_epi)
+_signed_binop (64, 2, add, add_epi)
+_signed_binop (8, 16, sub, sub_epi)
+_signed_binop (16, 8, sub, sub_epi)
+_signed_binop (32, 4, sub, sub_epi) _signed_binop (64, 2, sub, sub_epi)
/* Addition/subtraction with saturation. */
-
-_signed_binop (8, 16, add_saturate, adds_epu)
+ _signed_binop (8, 16, add_saturate, adds_epu)
_signed_binop (16, 8, add_saturate, adds_epu)
_signed_binop (8, 16, sub_saturate, subs_epu)
_signed_binop (16, 8, sub_saturate, subs_epu)
-
/* Multiplication. */
-always_inline i16x8 i16x8_mul_lo (i16x8 x, i16x8 y)
-{ return (i16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y); }
+ always_inline i16x8 i16x8_mul_lo (i16x8 x, i16x8 y)
+{
+ return (i16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
+}
-always_inline u16x8 u16x8_mul_lo (u16x8 x, u16x8 y)
-{ return (u16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y); }
+always_inline u16x8
+u16x8_mul_lo (u16x8 x, u16x8 y)
+{
+ return (u16x8) _mm_mullo_epi16 ((__m128i) x, (__m128i) y);
+}
-always_inline i16x8 i16x8_mul_hi (i16x8 x, i16x8 y)
-{ return (i16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y); }
+always_inline i16x8
+i16x8_mul_hi (i16x8 x, i16x8 y)
+{
+ return (i16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
+}
-always_inline u16x8 u16x8_mul_hi (u16x8 x, u16x8 y)
-{ return (u16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y); }
+always_inline u16x8
+u16x8_mul_hi (u16x8 x, u16x8 y)
+{
+ return (u16x8) _mm_mulhi_epu16 ((__m128i) x, (__m128i) y);
+}
/* 128 bit shifts. */
@@ -248,44 +327,64 @@ always_inline u16x8 u16x8_mul_hi (u16x8 x, u16x8 y)
always_inline p##a##x##b p##a##x##b##_shift_##c (p##a##x##b x, p##a##x##b y) \
{ return (p##a##x##b) _mm_##f##_epi##a ((__m128i) x, (__m128i) y); }
- _ (u, 16, 8, left, sll)
- _ (u, 32, 4, left, sll)
- _ (u, 64, 2, left, sll)
- _ (u, 16, 8, right, srl)
- _ (u, 32, 4, right, srl)
- _ (u, 64, 2, right, srl)
- _ (i, 16, 8, left, sll)
- _ (i, 32, 4, left, sll)
- _ (i, 64, 2, left, sll)
- _ (i, 16, 8, right, sra)
- _ (i, 32, 4, right, sra)
-
+_(u, 16, 8, left, sll)
+_(u, 32, 4, left, sll)
+_(u, 64, 2, left, sll)
+_(u, 16, 8, right, srl)
+_(u, 32, 4, right, srl)
+_(u, 64, 2, right, srl)
+_(i, 16, 8, left, sll)
+_(i, 32, 4, left, sll)
+_(i, 64, 2, left, sll) _(i, 16, 8, right, sra) _(i, 32, 4, right, sra)
#undef _
-
/* 64 bit shifts. */
-always_inline u16x4 u16x4_shift_left (u16x4 x, u16x4 i)
-{ return (u16x4) _m_psllw ((__m64) x, (__m64) i); };
+ always_inline u16x4
+u16x4_shift_left (u16x4 x, u16x4 i)
+{
+ return (u16x4) _m_psllw ((__m64) x, (__m64) i);
+};
-always_inline u32x2 u32x2_shift_left (u32x2 x, u32x2 i)
-{ return (u32x2) _m_pslld ((__m64) x, (__m64) i); };
+always_inline u32x2
+u32x2_shift_left (u32x2 x, u32x2 i)
+{
+ return (u32x2) _m_pslld ((__m64) x, (__m64) i);
+};
-always_inline u16x4 u16x4_shift_right (u16x4 x, u16x4 i)
-{ return (u16x4) _m_psrlw ((__m64) x, (__m64) i); };
+always_inline u16x4
+u16x4_shift_right (u16x4 x, u16x4 i)
+{
+ return (u16x4) _m_psrlw ((__m64) x, (__m64) i);
+};
-always_inline u32x2 u32x2_shift_right (u32x2 x, u32x2 i)
-{ return (u32x2) _m_psrld ((__m64) x, (__m64) i); };
+always_inline u32x2
+u32x2_shift_right (u32x2 x, u32x2 i)
+{
+ return (u32x2) _m_psrld ((__m64) x, (__m64) i);
+};
-always_inline i16x4 i16x4_shift_left (i16x4 x, i16x4 i)
-{ return (i16x4) _m_psllw ((__m64) x, (__m64) i); };
+always_inline i16x4
+i16x4_shift_left (i16x4 x, i16x4 i)
+{
+ return (i16x4) _m_psllw ((__m64) x, (__m64) i);
+};
-always_inline i32x2 i32x2_shift_left (i32x2 x, i32x2 i)
-{ return (i32x2) _m_pslld ((__m64) x, (__m64) i); };
+always_inline i32x2
+i32x2_shift_left (i32x2 x, i32x2 i)
+{
+ return (i32x2) _m_pslld ((__m64) x, (__m64) i);
+};
-always_inline i16x4 i16x4_shift_right (i16x4 x, i16x4 i)
-{ return (i16x4) _m_psraw ((__m64) x, (__m64) i); };
+always_inline i16x4
+i16x4_shift_right (i16x4 x, i16x4 i)
+{
+ return (i16x4) _m_psraw ((__m64) x, (__m64) i);
+};
-always_inline i32x2 i32x2_shift_right (i32x2 x, i32x2 i)
-{ return (i32x2) _m_psrad ((__m64) x, (__m64) i); };
+always_inline i32x2
+i32x2_shift_right (i32x2 x, i32x2 i)
+{
+ return (i32x2) _m_psrad ((__m64) x, (__m64) i);
+};
#define u8x16_word_shift_left(a,n) (u8x16) _mm_slli_si128((__m128i) a, n)
#define u8x16_word_shift_right(a,n) (u8x16) _mm_srli_si128((__m128i) a, n)
@@ -340,12 +439,12 @@ always_inline i32x2 i32x2_shift_right (i32x2 x, i32x2 i)
| t##x##n##_shift_##lr2 (w, j - i)); \
}
-_ (u16, 8, left, right);
-_ (u16, 8, right, left);
-_ (u32, 4, left, right);
-_ (u32, 4, right, left);
-_ (u64, 2, left, right);
-_ (u64, 2, right, left);
+_(u16, 8, left, right);
+_(u16, 8, right, left);
+_(u32, 4, left, right);
+_(u32, 4, right, left);
+_(u64, 2, left, right);
+_(u64, 2, right, left);
#undef _
@@ -364,64 +463,91 @@ _ (u64, 2, right, left);
t##x##n##_word_rotate_##lr1 (t##x##n w0, int i) \
{ return t##x##n##_word_rotate2_##lr1 (w0, w0, i); }
-_ (u8, 16, left, right);
-_ (u8, 16, right, left);
-_ (u16, 8, left, right);
-_ (u16, 8, right, left);
-_ (u32, 4, left, right);
-_ (u32, 4, right, left);
-_ (u64, 2, left, right);
-_ (u64, 2, right, left);
+_(u8, 16, left, right);
+_(u8, 16, right, left);
+_(u16, 8, left, right);
+_(u16, 8, right, left);
+_(u32, 4, left, right);
+_(u32, 4, right, left);
+_(u64, 2, left, right);
+_(u64, 2, right, left);
#undef _
#endif
/* Compare operations. */
-always_inline u8x16 u8x16_is_equal (u8x16 x, u8x16 y)
-{ return (u8x16) _mm_cmpeq_epi8 ((__m128i) x, (__m128i) y); }
+always_inline u8x16
+u8x16_is_equal (u8x16 x, u8x16 y)
+{
+ return (u8x16) _mm_cmpeq_epi8 ((__m128i) x, (__m128i) y);
+}
-always_inline i8x16 i8x16_is_equal (i8x16 x, i8x16 y)
-{ return (i8x16) _mm_cmpeq_epi8 ((__m128i) x, (__m128i) y); }
+always_inline i8x16
+i8x16_is_equal (i8x16 x, i8x16 y)
+{
+ return (i8x16) _mm_cmpeq_epi8 ((__m128i) x, (__m128i) y);
+}
-always_inline u16x8 u16x8_is_equal (u16x8 x, u16x8 y)
-{ return (u16x8) _mm_cmpeq_epi16 ((__m128i) x, (__m128i) y); }
+always_inline u16x8
+u16x8_is_equal (u16x8 x, u16x8 y)
+{
+ return (u16x8) _mm_cmpeq_epi16 ((__m128i) x, (__m128i) y);
+}
-always_inline i16x8 i16x8_is_equal (i16x8 x, i16x8 y)
-{ return (i16x8) _mm_cmpeq_epi16 ((__m128i) x, (__m128i) y); }
+always_inline i16x8
+i16x8_is_equal (i16x8 x, i16x8 y)
+{
+ return (i16x8) _mm_cmpeq_epi16 ((__m128i) x, (__m128i) y);
+}
-always_inline u32x4 u32x4_is_equal (u32x4 x, u32x4 y)
-{ return (u32x4) _mm_cmpeq_epi32 ((__m128i) x, (__m128i) y); }
+always_inline u32x4
+u32x4_is_equal (u32x4 x, u32x4 y)
+{
+ return (u32x4) _mm_cmpeq_epi32 ((__m128i) x, (__m128i) y);
+}
-always_inline i32x4 i32x4_is_equal (i32x4 x, i32x4 y)
-{ return (i32x4) _mm_cmpeq_epi32 ((__m128i) x, (__m128i) y); }
+always_inline i32x4
+i32x4_is_equal (i32x4 x, i32x4 y)
+{
+ return (i32x4) _mm_cmpeq_epi32 ((__m128i) x, (__m128i) y);
+}
always_inline u8x16
i8x16_is_greater (i8x16 x, i8x16 y)
-{ return (u8x16) _mm_cmpgt_epi8 ((__m128i) x, (__m128i) y); }
+{
+ return (u8x16) _mm_cmpgt_epi8 ((__m128i) x, (__m128i) y);
+}
always_inline u16x8
i16x8_is_greater (i16x8 x, i16x8 y)
-{ return (u16x8) _mm_cmpgt_epi16 ((__m128i) x, (__m128i) y); }
+{
+ return (u16x8) _mm_cmpgt_epi16 ((__m128i) x, (__m128i) y);
+}
always_inline u32x4
i32x4_is_greater (i32x4 x, i32x4 y)
-{ return (u32x4) _mm_cmpgt_epi32 ((__m128i) x, (__m128i) y); }
+{
+ return (u32x4) _mm_cmpgt_epi32 ((__m128i) x, (__m128i) y);
+}
-always_inline u8x16 u8x16_is_zero (u8x16 x)
+always_inline u8x16
+u8x16_is_zero (u8x16 x)
{
- u8x16 zero = {0};
+ u8x16 zero = { 0 };
return u8x16_is_equal (x, zero);
}
-always_inline u16x8 u16x8_is_zero (u16x8 x)
+always_inline u16x8
+u16x8_is_zero (u16x8 x)
{
- u16x8 zero = {0};
+ u16x8 zero = { 0 };
return u16x8_is_equal (x, zero);
}
-always_inline u32x4 u32x4_is_zero (u32x4 x)
+always_inline u32x4
+u32x4_is_zero (u32x4 x)
{
- u32x4 zero = {0};
+ u32x4 zero = { 0 };
return u32x4_is_equal (x, zero);
}
@@ -446,8 +572,7 @@ always_inline u32
u32x4_get0 (u32x4 x)
{
u32 result;
- asm volatile ("movd %[x], %[result]"
- : /* outputs */ [result] "=r" (result)
+ asm volatile ("movd %[x], %[result]": /* outputs */ [result] "=r" (result)
: /* inputs */ [x] "x" (x));
return result;
}
@@ -456,55 +581,69 @@ always_inline u32x4
u32x4_set0 (u32 x)
{
u32x4 result;
- asm volatile ("movd %[x], %[result]"
- : /* outputs */ [result] "=x" (result)
+ asm volatile ("movd %[x], %[result]": /* outputs */ [result] "=x" (result)
: /* inputs */ [x] "r" (x));
return result;
}
always_inline i32x4
i32x4_set0 (i32 x)
-{ return (i32x4) u32x4_set0 ((u32) x); }
+{
+ return (i32x4) u32x4_set0 ((u32) x);
+}
always_inline i32
i32x4_get0 (i32x4 x)
-{ return (i32) u32x4_get0 ((u32x4) x); }
+{
+ return (i32) u32x4_get0 ((u32x4) x);
+}
/* Converts all ones/zeros compare mask to bitmap. */
-always_inline u32 u8x16_compare_byte_mask (u8x16 x)
-{ return _mm_movemask_epi8 ((__m128i) x); }
+always_inline u32
+u8x16_compare_byte_mask (u8x16 x)
+{
+ return _mm_movemask_epi8 ((__m128i) x);
+}
extern u8 u32x4_compare_word_mask_table[256];
-always_inline u32 u32x4_compare_word_mask (u32x4 x)
+always_inline u32
+u32x4_compare_word_mask (u32x4 x)
{
u32 m = u8x16_compare_byte_mask ((u8x16) x);
return (u32x4_compare_word_mask_table[(m >> 0) & 0xff]
| (u32x4_compare_word_mask_table[(m >> 8) & 0xff] << 2));
}
-always_inline u32 u8x16_zero_byte_mask (u8x16 x)
+always_inline u32
+u8x16_zero_byte_mask (u8x16 x)
{
- u8x16 zero = {0};
+ u8x16 zero = { 0 };
return u8x16_compare_byte_mask (u8x16_is_equal (x, zero));
}
-always_inline u32 u16x8_zero_byte_mask (u16x8 x)
+always_inline u32
+u16x8_zero_byte_mask (u16x8 x)
{
- u16x8 zero = {0};
+ u16x8 zero = { 0 };
return u8x16_compare_byte_mask ((u8x16) u16x8_is_equal (x, zero));
}
-always_inline u32 u32x4_zero_byte_mask (u32x4 x)
+always_inline u32
+u32x4_zero_byte_mask (u32x4 x)
{
- u32x4 zero = {0};
+ u32x4 zero = { 0 };
return u8x16_compare_byte_mask ((u8x16) u32x4_is_equal (x, zero));
}
-always_inline u8x16 u8x16_max (u8x16 x, u8x16 y)
-{ return (u8x16) _mm_max_epu8 ((__m128i) x, (__m128i) y); }
+always_inline u8x16
+u8x16_max (u8x16 x, u8x16 y)
+{
+ return (u8x16) _mm_max_epu8 ((__m128i) x, (__m128i) y);
+}
-always_inline u32 u8x16_max_scalar (u8x16 x)
+always_inline u32
+u8x16_max_scalar (u8x16 x)
{
x = u8x16_max (x, u8x16_word_shift_right (x, 8));
x = u8x16_max (x, u8x16_word_shift_right (x, 4));
@@ -513,10 +652,14 @@ always_inline u32 u8x16_max_scalar (u8x16 x)
return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
}
-always_inline u8x16 u8x16_min (u8x16 x, u8x16 y)
-{ return (u8x16) _mm_min_epu8 ((__m128i) x, (__m128i) y); }
+always_inline u8x16
+u8x16_min (u8x16 x, u8x16 y)
+{
+ return (u8x16) _mm_min_epu8 ((__m128i) x, (__m128i) y);
+}
-always_inline u8 u8x16_min_scalar (u8x16 x)
+always_inline u8
+u8x16_min_scalar (u8x16 x)
{
x = u8x16_min (x, u8x16_word_shift_right (x, 8));
x = u8x16_min (x, u8x16_word_shift_right (x, 4));
@@ -525,10 +668,14 @@ always_inline u8 u8x16_min_scalar (u8x16 x)
return _mm_extract_epi16 ((__m128i) x, 0) & 0xff;
}
-always_inline i16x8 i16x8_max (i16x8 x, i16x8 y)
-{ return (i16x8) _mm_max_epi16 ((__m128i) x, (__m128i) y); }
+always_inline i16x8
+i16x8_max (i16x8 x, i16x8 y)
+{
+ return (i16x8) _mm_max_epi16 ((__m128i) x, (__m128i) y);
+}
-always_inline i16 i16x8_max_scalar (i16x8 x)
+always_inline i16
+i16x8_max_scalar (i16x8 x)
{
x = i16x8_max (x, i16x8_word_shift_right (x, 4));
x = i16x8_max (x, i16x8_word_shift_right (x, 2));
@@ -536,10 +683,14 @@ always_inline i16 i16x8_max_scalar (i16x8 x)
return _mm_extract_epi16 ((__m128i) x, 0);
}
-always_inline i16x8 i16x8_min (i16x8 x, i16x8 y)
-{ return (i16x8) _mm_min_epi16 ((__m128i) x, (__m128i) y); }
+always_inline i16x8
+i16x8_min (i16x8 x, i16x8 y)
+{
+ return (i16x8) _mm_min_epi16 ((__m128i) x, (__m128i) y);
+}
-always_inline i16 i16x8_min_scalar (i16x8 x)
+always_inline i16
+i16x8_min_scalar (i16x8 x)
{
x = i16x8_min (x, i16x8_word_shift_right (x, 4));
x = i16x8_min (x, i16x8_word_shift_right (x, 2));
@@ -550,3 +701,11 @@ always_inline i16 i16x8_min_scalar (i16x8 x)
#undef _signed_binop
#endif /* included_vector_sse2_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vhash.c b/vppinfra/vppinfra/vhash.c
index dbc1b364676..f9dac0d9ff1 100644
--- a/vppinfra/vppinfra/vhash.c
+++ b/vppinfra/vppinfra/vhash.c
@@ -41,7 +41,8 @@
/* Overflow search buckets have an extra u32x4 for saving key_hash data.
This makes it easier to refill main search bucket from overflow vector. */
-typedef struct {
+typedef struct
+{
/* 4 results for this bucket. */
u32x4_union_t result;
@@ -55,9 +56,7 @@ typedef struct {
always_inline void
set_overflow_result (vhash_overflow_search_bucket_t * b,
- u32 i,
- u32 result,
- u32 key_hash)
+ u32 i, u32 result, u32 key_hash)
{
b->result.as_u32[i] = result;
b->key_hash.as_u32[i] = key_hash;
@@ -65,8 +64,7 @@ set_overflow_result (vhash_overflow_search_bucket_t * b,
always_inline void
free_overflow_bucket (vhash_overflow_buckets_t * ob,
- vhash_overflow_search_bucket_t * b,
- u32 i)
+ vhash_overflow_search_bucket_t * b, u32 i)
{
u32 o = (u32x4_union_t *) b - ob->search_buckets;
ASSERT (o < vec_len (ob->search_buckets));
@@ -74,7 +72,8 @@ free_overflow_bucket (vhash_overflow_buckets_t * ob,
}
always_inline vhash_overflow_search_bucket_t *
-get_overflow_search_bucket (vhash_overflow_buckets_t * obs, u32 i, u32 n_key_u32s)
+get_overflow_search_bucket (vhash_overflow_buckets_t * obs, u32 i,
+ u32 n_key_u32s)
{
return ((vhash_overflow_search_bucket_t *)
vec_elt_at_index (obs->search_buckets, i));
@@ -82,7 +81,9 @@ get_overflow_search_bucket (vhash_overflow_buckets_t * obs, u32 i, u32 n_key_u32
always_inline vhash_overflow_search_bucket_t *
next_overflow_bucket (vhash_overflow_search_bucket_t * b, u32 n_key_u32s)
-{ return (vhash_overflow_search_bucket_t *) &b->key[n_key_u32s]; }
+{
+ return (vhash_overflow_search_bucket_t *) & b->key[n_key_u32s];
+}
#define foreach_vhash_overflow_bucket(b,ob,n_key_u32s) \
for ((b) = (vhash_overflow_search_bucket_t *) ob->search_buckets; \
@@ -90,63 +91,57 @@ next_overflow_bucket (vhash_overflow_search_bucket_t * b, u32 n_key_u32s)
b = next_overflow_bucket (b, n_key_u32s))
u32
-vhash_get_overflow (vhash_t * h,
- u32 key_hash,
- u32 vi,
- u32 n_key_u32s)
+vhash_get_overflow (vhash_t * h, u32 key_hash, u32 vi, u32 n_key_u32s)
{
- vhash_overflow_buckets_t * ob = vhash_get_overflow_buckets (h, key_hash);
- vhash_overflow_search_bucket_t * b;
+ vhash_overflow_buckets_t *ob = vhash_get_overflow_buckets (h, key_hash);
+ vhash_overflow_search_bucket_t *b;
u32 i, result = 0;
foreach_vhash_overflow_bucket (b, ob, n_key_u32s)
- {
- u32x4 r = b->result.as_u32x4;
-
- for (i = 0; i < n_key_u32s; i++)
- r &= vhash_bucket_compare (h, &b->key[0], i, vi);
+ {
+ u32x4 r = b->result.as_u32x4;
- result = vhash_merge_results (r);
- if (result)
- break;
- }
+ for (i = 0; i < n_key_u32s; i++)
+ r &= vhash_bucket_compare (h, &b->key[0], i, vi);
+
+ result = vhash_merge_results (r);
+ if (result)
+ break;
+ }
return result;
}
u32
vhash_set_overflow (vhash_t * h,
- u32 key_hash,
- u32 vi,
- u32 new_result,
- u32 n_key_u32s)
+ u32 key_hash, u32 vi, u32 new_result, u32 n_key_u32s)
{
- vhash_overflow_buckets_t * ob = vhash_get_overflow_buckets (h, key_hash);
- vhash_overflow_search_bucket_t * b;
+ vhash_overflow_buckets_t *ob = vhash_get_overflow_buckets (h, key_hash);
+ vhash_overflow_search_bucket_t *b;
u32 i_set, i, old_result;
foreach_vhash_overflow_bucket (b, ob, n_key_u32s)
- {
- u32x4 r;
+ {
+ u32x4 r;
- r = b->result.as_u32x4;
- for (i = 0; i < n_key_u32s; i++)
- r &= vhash_bucket_compare (h, &b->key[0], i, vi);
+ r = b->result.as_u32x4;
+ for (i = 0; i < n_key_u32s; i++)
+ r &= vhash_bucket_compare (h, &b->key[0], i, vi);
- old_result = vhash_merge_results (r);
- if (old_result)
- {
- i_set = vhash_non_empty_result_index (r);
- set_overflow_result (b, i_set, new_result, key_hash);
- return old_result;
- }
- }
+ old_result = vhash_merge_results (r);
+ if (old_result)
+ {
+ i_set = vhash_non_empty_result_index (r);
+ set_overflow_result (b, i_set, new_result, key_hash);
+ return old_result;
+ }
+ }
/* Check free list. */
if (vec_len (ob->free_indices) == 0)
{
/* Out of free overflow buckets. Resize. */
- u32 j, * p;
+ u32 j, *p;
i = vec_len (ob->search_buckets);
vec_resize_aligned (ob->search_buckets,
sizeof (b[0]) / sizeof (u32x4) + n_key_u32s,
@@ -176,41 +171,38 @@ vhash_set_overflow (vhash_t * h,
}
u32
-vhash_unset_overflow (vhash_t * h,
- u32 key_hash,
- u32 vi,
- u32 n_key_u32s)
+vhash_unset_overflow (vhash_t * h, u32 key_hash, u32 vi, u32 n_key_u32s)
{
- vhash_overflow_buckets_t * ob = vhash_get_overflow_buckets (h, key_hash);
- vhash_overflow_search_bucket_t * b;
+ vhash_overflow_buckets_t *ob = vhash_get_overflow_buckets (h, key_hash);
+ vhash_overflow_search_bucket_t *b;
u32 i_set, i, old_result;
foreach_vhash_overflow_bucket (b, ob, n_key_u32s)
- {
- u32x4 r;
+ {
+ u32x4 r;
- r = b->result.as_u32x4;
- for (i = 0; i < n_key_u32s; i++)
- r &= vhash_bucket_compare (h, &b->key[0], i, vi);
+ r = b->result.as_u32x4;
+ for (i = 0; i < n_key_u32s; i++)
+ r &= vhash_bucket_compare (h, &b->key[0], i, vi);
- old_result = vhash_merge_results (r);
- if (old_result)
- {
- i_set = vhash_non_empty_result_index (r);
+ old_result = vhash_merge_results (r);
+ if (old_result)
+ {
+ i_set = vhash_non_empty_result_index (r);
- /* Invalidate result and invert key hash so that this will
- never match since all keys in this overflow bucket have
- matching key hashs. */
- set_overflow_result (b, i_set, 0, ~key_hash);
+ /* Invalidate result and invert key hash so that this will
+ never match since all keys in this overflow bucket have
+ matching key hashs. */
+ set_overflow_result (b, i_set, 0, ~key_hash);
- free_overflow_bucket (ob, b, i_set);
+ free_overflow_bucket (ob, b, i_set);
- ASSERT (ob->n_overflow > 0);
- ob->n_overflow--;
- h->n_elts--;
- return old_result;
- }
- }
+ ASSERT (ob->n_overflow > 0);
+ ob->n_overflow--;
+ h->n_elts--;
+ return old_result;
+ }
+ }
/* Could not find key. */
return 0;
@@ -219,39 +211,39 @@ vhash_unset_overflow (vhash_t * h,
void
vhash_unset_refill_from_overflow (vhash_t * h,
vhash_search_bucket_t * sb,
- u32 key_hash,
- u32 n_key_u32s)
+ u32 key_hash, u32 n_key_u32s)
{
- vhash_overflow_buckets_t * obs = vhash_get_overflow_buckets (h, key_hash);
- vhash_overflow_search_bucket_t * ob;
+ vhash_overflow_buckets_t *obs = vhash_get_overflow_buckets (h, key_hash);
+ vhash_overflow_search_bucket_t *ob;
u32 i, j, i_refill, bucket_mask = h->bucket_mask.as_u32[0];
/* Find overflow element with matching key hash. */
foreach_vhash_overflow_bucket (ob, obs, n_key_u32s)
- {
- for (i = 0; i < 4; i++)
- {
- if (! ob->result.as_u32[i])
- continue;
- if ((ob->key_hash.as_u32[i] & bucket_mask)
- != (key_hash & bucket_mask))
- continue;
-
- i_refill = vhash_empty_result_index (sb->result.as_u32x4);
- sb->result.as_u32[i_refill] = ob->result.as_u32[i];
- for (j = 0; j < n_key_u32s; j++)
- sb->key[j].as_u32[i_refill] = ob->key[j].as_u32[i];
- set_overflow_result (ob, i, 0, ~key_hash);
- free_overflow_bucket (obs, ob, i);
- return;
- }
- }
+ {
+ for (i = 0; i < 4; i++)
+ {
+ if (!ob->result.as_u32[i])
+ continue;
+ if ((ob->key_hash.as_u32[i] & bucket_mask)
+ != (key_hash & bucket_mask))
+ continue;
+
+ i_refill = vhash_empty_result_index (sb->result.as_u32x4);
+ sb->result.as_u32[i_refill] = ob->result.as_u32[i];
+ for (j = 0; j < n_key_u32s; j++)
+ sb->key[j].as_u32[i_refill] = ob->key[j].as_u32[i];
+ set_overflow_result (ob, i, 0, ~key_hash);
+ free_overflow_bucket (obs, ob, i);
+ return;
+ }
+ }
}
-void vhash_init (vhash_t * h, u32 log2_n_keys, u32 n_key_u32, u32 * hash_seeds)
+void
+vhash_init (vhash_t * h, u32 log2_n_keys, u32 n_key_u32, u32 * hash_seeds)
{
uword i, j, m;
- vhash_search_bucket_t * b;
+ vhash_search_bucket_t *b;
memset (h, 0, sizeof (h[0]));
@@ -260,7 +252,7 @@ void vhash_init (vhash_t * h, u32 log2_n_keys, u32 n_key_u32, u32 * hash_seeds)
h->log2_n_keys = log2_n_keys;
h->n_key_u32 = n_key_u32;
- m = pow2_mask (h->log2_n_keys) &~ 3;
+ m = pow2_mask (h->log2_n_keys) & ~3;
for (i = 0; i < VECTOR_WORD_TYPE_LEN (u32); i++)
h->bucket_mask.as_u32[i] = m;
@@ -277,16 +269,16 @@ void vhash_init (vhash_t * h, u32 log2_n_keys, u32 n_key_u32, u32 * hash_seeds)
}
static_always_inline u32
-vhash_main_key_gather (void * _vm, u32 vi, u32 wi, u32 n_key_u32)
+vhash_main_key_gather (void *_vm, u32 vi, u32 wi, u32 n_key_u32)
{
- vhash_main_t * vm = _vm;
+ vhash_main_t *vm = _vm;
return vec_elt (vm->keys, vi * n_key_u32 + wi);
}
static_always_inline u32x4
-vhash_main_4key_gather (void * _vm, u32 vi, u32 wi, u32 n_key_u32s)
+vhash_main_4key_gather (void *_vm, u32 vi, u32 wi, u32 n_key_u32s)
{
- vhash_main_t * vm = _vm;
+ vhash_main_t *vm = _vm;
u32x4_union_t x;
ASSERT (n_key_u32s == vm->n_key_u32);
@@ -300,28 +292,28 @@ vhash_main_4key_gather (void * _vm, u32 vi, u32 wi, u32 n_key_u32s)
}
static_always_inline u32
-vhash_main_set_result (void * _vm, u32 vi, u32 old_result, u32 n_key_u32)
+vhash_main_set_result (void *_vm, u32 vi, u32 old_result, u32 n_key_u32)
{
- vhash_main_t * vm = _vm;
- u32 * p = vec_elt_at_index (vm->results, vi);
+ vhash_main_t *vm = _vm;
+ u32 *p = vec_elt_at_index (vm->results, vi);
u32 new_result = p[0];
p[0] = old_result;
return new_result;
}
static_always_inline u32
-vhash_main_get_result (void * _vm, u32 vi, u32 old_result, u32 n_key_u32)
+vhash_main_get_result (void *_vm, u32 vi, u32 old_result, u32 n_key_u32)
{
- vhash_main_t * vm = _vm;
+ vhash_main_t *vm = _vm;
vec_elt (vm->results, vi) = old_result;
return old_result;
}
static_always_inline u32x4
-vhash_main_get_4result (void * _vm, u32 vi, u32x4 old_result, u32 n_key_u32)
+vhash_main_get_4result (void *_vm, u32 vi, u32x4 old_result, u32 n_key_u32)
{
- vhash_main_t * vm = _vm;
- u32x4 * p = (u32x4 *) vec_elt_at_index (vm->results, vi);
+ vhash_main_t *vm = _vm;
+ u32x4 *p = (u32x4 *) vec_elt_at_index (vm->results, vi);
p[0] = old_result;
return old_result;
}
@@ -439,12 +431,12 @@ vhash_main_get_4result (void * _vm, u32 vi, u32x4 old_result, u32 n_key_u32)
vm, N_KEY_U32); \
})
-_ (1);
-_ (2);
-_ (3);
-_ (4);
-_ (5);
-_ (6);
+_(1);
+_(2);
+_(3);
+_(4);
+_(5);
+_(6);
#undef _
@@ -463,13 +455,14 @@ _ (6);
vhash_mix_stage (vm->vhash, vm->n_vectors_div_4, N_KEY_U32); \
})
-_ (4);
-_ (5);
-_ (6);
+_(4);
+_(5);
+_(6);
#undef _
-typedef enum {
+typedef enum
+{
GET, SET, UNSET,
} vhash_main_op_t;
@@ -518,9 +511,9 @@ vhash_main_op (vhash_main_t * vm, vhash_main_op_t op)
vhash_main_unset_stage_##N_KEY_U32); \
break;
- _ (1);
- _ (2);
- _ (3);
+ _(1);
+ _(2);
+ _(3);
#undef _
@@ -552,9 +545,9 @@ vhash_main_op (vhash_main_t * vm, vhash_main_op_t op)
vhash_main_unset_stage_##N_KEY_U32); \
break;
- _ (4);
- _ (5);
- _ (6);
+ _(4);
+ _(5);
+ _(6);
#undef _
}
@@ -594,9 +587,9 @@ vhash_main_op (vhash_main_t * vm, vhash_main_op_t op)
vhash_main_unset_mod_stage_##N_KEY_U32); \
break;
- _ (1);
- _ (2);
- _ (3);
+ _(1);
+ _(2);
+ _(3);
#undef _
@@ -628,29 +621,40 @@ vhash_main_op (vhash_main_t * vm, vhash_main_op_t op)
vhash_main_unset_mod_stage_##N_KEY_U32); \
break;
- _ (4);
- _ (5);
- _ (6);
+ _(4);
+ _(5);
+ _(6);
#undef _
}
}
}
-void vhash_main_get (vhash_main_t * vm)
-{ vhash_main_op (vm, GET); }
+void
+vhash_main_get (vhash_main_t * vm)
+{
+ vhash_main_op (vm, GET);
+}
-void vhash_main_set (vhash_main_t * vm)
-{ vhash_main_op (vm, SET); }
+void
+vhash_main_set (vhash_main_t * vm)
+{
+ vhash_main_op (vm, SET);
+}
-void vhash_main_unset (vhash_main_t * vm)
-{ vhash_main_op (vm, UNSET); }
+void
+vhash_main_unset (vhash_main_t * vm)
+{
+ vhash_main_op (vm, UNSET);
+}
-u32 vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index, u32 n_keys_this_call)
+u32
+vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index,
+ u32 n_keys_this_call)
{
- vhash_t * old = vr->old;
- vhash_main_t * vm = &vr->new;
- vhash_t * new = vm->vhash;
+ vhash_t *old = vr->old;
+ vhash_main_t *vm = &vr->new;
+ vhash_t *new = vm->vhash;
uword i, j, n_key_u32;
n_key_u32 = old->n_key_u32;
@@ -671,8 +675,9 @@ u32 vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index, u32 n_keys_
{
for (i = vector_index; 0 == (i >> (old->log2_n_keys - 2)); i++)
{
- vhash_search_bucket_t * b = vhash_get_search_bucket_with_index (old, 4 * i, n_key_u32);
- u32 r, * k;
+ vhash_search_bucket_t *b =
+ vhash_get_search_bucket_with_index (old, 4 * i, n_key_u32);
+ u32 r, *k;
#define _(I) \
if ((r = b->result.as_u32[I]) != 0) \
@@ -683,10 +688,10 @@ u32 vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index, u32 n_keys_
k[j] = b->key[j].as_u32[I]; \
}
- _ (0);
- _ (1);
- _ (2);
- _ (3);
+ _(0);
+ _(1);
+ _(2);
+ _(3);
#undef _
@@ -697,19 +702,18 @@ u32 vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index, u32 n_keys_
}
}
}
-
+
/* Add overflow buckets. */
{
- vhash_overflow_buckets_t * ob;
- vhash_overflow_search_bucket_t * b;
+ vhash_overflow_buckets_t *ob;
+ vhash_overflow_search_bucket_t *b;
for (ob = old->overflow_buckets;
- ob < old->overflow_buckets + ARRAY_LEN (old->overflow_buckets);
- ob++)
+ ob < old->overflow_buckets + ARRAY_LEN (old->overflow_buckets); ob++)
{
foreach_vhash_overflow_bucket (b, ob, old->n_key_u32)
- {
- u32 r, * k;
+ {
+ u32 r, *k;
#define _(I) \
if ((r = b->result.as_u32[I]) != 0) \
@@ -720,13 +724,13 @@ u32 vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index, u32 n_keys_
k[j] = b->key[j].as_u32[I]; \
}
- _ (0);
- _ (1);
- _ (2);
- _ (3);
+ _(0);
+ _(1);
+ _(2);
+ _(3);
#undef _
- }
+ }
}
}
@@ -736,7 +740,8 @@ u32 vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index, u32 n_keys_
return ~0;
}
-void vhash_resize (vhash_t * old, u32 log2_n_keys)
+void
+vhash_resize (vhash_t * old, u32 log2_n_keys)
{
static vhash_resize_t vr;
vhash_t new;
@@ -757,3 +762,11 @@ void vhash_resize (vhash_t * old, u32 log2_n_keys)
}
#endif /* CLIB_HAVE_VEC128 */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vhash.h b/vppinfra/vppinfra/vhash.h
index 62661b972dd..5ab42292001 100644
--- a/vppinfra/vppinfra/vhash.h
+++ b/vppinfra/vppinfra/vhash.h
@@ -47,18 +47,24 @@
#include <vppinfra/pipeline.h>
/* Gathers 32 bits worth of key with given index. */
-typedef u32 (vhash_key_function_t) (void * state, u32 vector_index, u32 key_word_index);
-typedef u32x4 (vhash_4key_function_t) (void * state, u32 vector_index, u32 key_word_index);
+typedef u32 (vhash_key_function_t) (void *state, u32 vector_index,
+ u32 key_word_index);
+typedef u32x4 (vhash_4key_function_t) (void *state, u32 vector_index,
+ u32 key_word_index);
/* Sets/gets result of hash lookup. */
-typedef u32 (vhash_result_function_t) (void * state, u32 vector_index, u32 result, u32 n_key_u32);
-typedef u32x4 (vhash_4result_function_t) (void * state, u32 vector_index, u32x4 results, u32 n_key_u32);
+typedef u32 (vhash_result_function_t) (void *state, u32 vector_index,
+ u32 result, u32 n_key_u32);
+typedef u32x4 (vhash_4result_function_t) (void *state, u32 vector_index,
+ u32x4 results, u32 n_key_u32);
-typedef struct {
+typedef struct
+{
u32x4_union_t hashed_key[3];
} vhash_hashed_key_t;
/* Search buckets are really this structure. */
-typedef struct {
+typedef struct
+{
/* 4 results for this bucket.
Zero is used to mark empty results. This means user can't use the result ~0
since user results differ from internal results stored in buckets by 1.
@@ -69,21 +75,23 @@ typedef struct {
u32x4_union_t key[0];
} vhash_search_bucket_t;
-typedef struct {
- u32x4_union_t * search_buckets;
+typedef struct
+{
+ u32x4_union_t *search_buckets;
/* Vector of bucket free indices. */
- u32 * free_indices;
+ u32 *free_indices;
/* Number of entries in this overflow bucket. */
u32 n_overflow;
} vhash_overflow_buckets_t;
-typedef struct {
+typedef struct
+{
/* 2^log2_n_keys keys grouped in groups of 4.
Each bucket contains 4 results plus 4 keys for a
total of (1 + n_key_u32) u32x4s. */
- u32x4_union_t * search_buckets;
+ u32x4_union_t *search_buckets;
/* When a bucket of 4 results/keys are full we search
the overflow. hash_key is used to select which overflow
@@ -111,11 +119,11 @@ typedef struct {
u32 log2_n_key_word_len_u32x;
/* Work space to store keys between pipeline stages. */
- u32x4_union_t * key_work_space;
+ u32x4_union_t *key_work_space;
/* Hash work space to store Jenkins hash values between
pipeline stages. */
- vhash_hashed_key_t * hash_work_space;
+ vhash_hashed_key_t *hash_work_space;
} vhash_t;
always_inline vhash_overflow_buckets_t *
@@ -191,7 +199,8 @@ vhash_validate_sizes (vhash_t * h, u32 n_key_u32, u32 n_vectors)
n = clib_max (n, 8);
h->log2_n_key_word_len_u32x = l = min_log2 (n);
- vec_validate_aligned (h->key_work_space, (n_key_u32 << l) - 1, CLIB_CACHE_LINE_BYTES);
+ vec_validate_aligned (h->key_work_space, (n_key_u32 << l) - 1,
+ CLIB_CACHE_LINE_BYTES);
vec_validate_aligned (h->hash_work_space, n - 1, CLIB_CACHE_LINE_BYTES);
}
@@ -200,8 +209,7 @@ vhash_gather_key_stage (vhash_t * h,
u32 vector_index,
u32 n_vectors,
vhash_key_function_t key_function,
- void * state,
- u32 n_key_u32s)
+ void *state, u32 n_key_u32s)
{
u32 i, j, vi;
@@ -210,8 +218,7 @@ vhash_gather_key_stage (vhash_t * h,
{
vi = vector_index * 4 + i;
for (j = 0; j < n_key_u32s; j++)
- vhash_set_key_word (h, j, vi,
- key_function (state, vi, j));
+ vhash_set_key_word (h, j, vi, key_function (state, vi, j));
}
}
@@ -219,8 +226,7 @@ always_inline void
vhash_gather_4key_stage (vhash_t * h,
u32 vector_index,
vhash_4key_function_t key_function,
- void * state,
- u32 n_key_u32s)
+ void *state, u32 n_key_u32s)
{
u32 j, vi;
vi = vector_index * 4;
@@ -229,9 +235,7 @@ vhash_gather_4key_stage (vhash_t * h,
}
always_inline void
-vhash_mix_stage (vhash_t * h,
- u32 vector_index,
- u32 n_key_u32s)
+vhash_mix_stage (vhash_t * h, u32 vector_index, u32 n_key_u32s)
{
i32 i, n_left;
u32x a, b, c;
@@ -244,18 +248,22 @@ vhash_mix_stage (vhash_t * h,
c = h->hash_seeds[2].as_u32x4;
for (i = 0, n_left = n_key_u32s - 3; n_left > 0; n_left -= 3, i += 3)
{
- a += vhash_get_key_word_u32x (h, n_key_u32s - 1 - (i + 0), vector_index);
+ a +=
+ vhash_get_key_word_u32x (h, n_key_u32s - 1 - (i + 0), vector_index);
if (n_left > 1)
- b += vhash_get_key_word_u32x (h, n_key_u32s - 1 - (i + 1), vector_index);
+ b +=
+ vhash_get_key_word_u32x (h, n_key_u32s - 1 - (i + 1), vector_index);
if (n_left > 2)
- c += vhash_get_key_word_u32x (h, n_key_u32s - 1 - (i + 2), vector_index);
+ c +=
+ vhash_get_key_word_u32x (h, n_key_u32s - 1 - (i + 2), vector_index);
hash_v3_mix_u32x (a, b, c);
}
/* Save away a, b, c for later finalize. */
{
- vhash_hashed_key_t * hk = vec_elt_at_index (h->hash_work_space, vector_index);
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
hk->hashed_key[0].as_u32x4 = a;
hk->hashed_key[1].as_u32x4 = b;
hk->hashed_key[2].as_u32x4 = c;
@@ -267,7 +275,9 @@ vhash_get_search_bucket_with_index (vhash_t * h, u32 i, u32 n_key_u32s)
{
return ((vhash_search_bucket_t *)
vec_elt_at_index (h->search_buckets,
- (i / 4) * ((sizeof (vhash_search_bucket_t) / sizeof (u32x4)) + n_key_u32s)));
+ (i / 4) *
+ ((sizeof (vhash_search_bucket_t) /
+ sizeof (u32x4)) + n_key_u32s)));
}
always_inline vhash_search_bucket_t *
@@ -278,26 +288,27 @@ vhash_get_search_bucket (vhash_t * h, u32 key_hash, u32 n_key_u32s)
}
always_inline u32x4
-vhash_get_4_search_bucket_byte_offsets (vhash_t * h, u32x4 key_hash, u32 n_key_u32s)
+vhash_get_4_search_bucket_byte_offsets (vhash_t * h, u32x4 key_hash,
+ u32 n_key_u32s)
{
- vhash_search_bucket_t * b;
+ vhash_search_bucket_t *b;
u32 n_bytes_per_bucket = sizeof (b[0]) + n_key_u32s * sizeof (b->key[0]);
u32x4 r = key_hash & h->bucket_mask.as_u32x4;
/* Multiply with shifts and adds to get bucket byte offset. */
#define _(x) u32x4_ishift_left (r, (x) - 2)
if (n_bytes_per_bucket == (1 << 5))
- r = _ (5);
+ r = _(5);
else if (n_bytes_per_bucket == ((1 << 5) + (1 << 4)))
- r = _ (5) + _ (4);
+ r = _(5) + _(4);
else if (n_bytes_per_bucket == (1 << 6))
- r = _ (6);
+ r = _(6);
else if (n_bytes_per_bucket == ((1 << 6) + (1 << 4)))
- r = _ (6) + _ (4);
+ r = _(6) + _(4);
else if (n_bytes_per_bucket == ((1 << 6) + (1 << 5)))
- r = _ (6) + _ (5);
+ r = _(6) + _(5);
else if (n_bytes_per_bucket == ((1 << 6) + (1 << 5) + (1 << 4)))
- r = _ (6) + _ (5) + _ (4);
+ r = _(6) + _(5) + _(4);
else
ASSERT (0);
#undef _
@@ -305,13 +316,12 @@ vhash_get_4_search_bucket_byte_offsets (vhash_t * h, u32x4 key_hash, u32 n_key_u
}
always_inline void
-vhash_finalize_stage (vhash_t * h,
- u32 vector_index,
- u32 n_key_u32s)
+vhash_finalize_stage (vhash_t * h, u32 vector_index, u32 n_key_u32s)
{
i32 n_left;
u32x a, b, c;
- vhash_hashed_key_t * hk = vec_elt_at_index (h->hash_work_space, vector_index);
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
if (n_key_u32s <= 3)
{
@@ -343,7 +353,7 @@ vhash_finalize_stage (vhash_t * h,
/* Prefetch buckets. This costs a bit for small tables but saves
big for large ones. */
{
- vhash_search_bucket_t * b0, * b1, * b2, * b3;
+ vhash_search_bucket_t *b0, *b1, *b2, *b3;
u32x4_union_t kh;
kh.as_u32x4 = vhash_get_4_search_bucket_byte_offsets (h, c, n_key_u32s);
@@ -354,13 +364,17 @@ vhash_finalize_stage (vhash_t * h,
b2 = (void *) h->search_buckets + kh.as_u32[2];
b3 = (void *) h->search_buckets + kh.as_u32[3];
- CLIB_PREFETCH (b0, sizeof (b0[0]) + n_key_u32s * sizeof (b0->key[0]), READ);
- CLIB_PREFETCH (b1, sizeof (b1[0]) + n_key_u32s * sizeof (b1->key[0]), READ);
- CLIB_PREFETCH (b2, sizeof (b2[0]) + n_key_u32s * sizeof (b2->key[0]), READ);
- CLIB_PREFETCH (b3, sizeof (b3[0]) + n_key_u32s * sizeof (b3->key[0]), READ);
+ CLIB_PREFETCH (b0, sizeof (b0[0]) + n_key_u32s * sizeof (b0->key[0]),
+ READ);
+ CLIB_PREFETCH (b1, sizeof (b1[0]) + n_key_u32s * sizeof (b1->key[0]),
+ READ);
+ CLIB_PREFETCH (b2, sizeof (b2[0]) + n_key_u32s * sizeof (b2->key[0]),
+ READ);
+ CLIB_PREFETCH (b3, sizeof (b3[0]) + n_key_u32s * sizeof (b3->key[0]),
+ READ);
}
}
-
+
always_inline u32
vhash_merge_results (u32x4 r)
{
@@ -372,14 +386,16 @@ vhash_merge_results (u32x4 r)
/* Bucket is full if none of its 4 results are 0. */
always_inline u32
vhash_search_bucket_is_full (u32x4 r)
-{ return u32x4_zero_byte_mask (r) == 0; }
+{
+ return u32x4_zero_byte_mask (r) == 0;
+}
always_inline u32
vhash_non_empty_result_index (u32x4 x)
{
u32 empty_mask = u32x4_zero_byte_mask (x);
ASSERT (empty_mask != 0xffff);
- return min_log2 (0xffff &~ empty_mask) / 4;
+ return min_log2 (0xffff & ~empty_mask) / 4;
}
always_inline u32
@@ -392,12 +408,10 @@ vhash_empty_result_index (u32x4 x)
always_inline u32x4
vhash_bucket_compare (vhash_t * h,
- u32x4_union_t * bucket,
- u32 key_word_index,
- u32 vi)
+ u32x4_union_t * bucket, u32 key_word_index, u32 vi)
{
u32 k = vhash_get_key_word (h, key_word_index, vi);
- u32x4 x = {k, k, k, k};
+ u32x4 x = { k, k, k, k };
return u32x4_is_equal (bucket[key_word_index].as_u32x4, x);
}
@@ -415,22 +429,19 @@ do { \
cmp3 = u32x4_is_equal (b3->key[wi].as_u32x4, _k3); \
} while (0)
-u32 vhash_get_overflow (vhash_t * h,
- u32 key_hash,
- u32 vi,
- u32 n_key_u32s);
+u32 vhash_get_overflow (vhash_t * h, u32 key_hash, u32 vi, u32 n_key_u32s);
always_inline void
vhash_get_stage (vhash_t * h,
u32 vector_index,
u32 n_vectors,
vhash_result_function_t result_function,
- void * state,
- u32 n_key_u32s)
+ void *state, u32 n_key_u32s)
{
u32 i, j;
- vhash_hashed_key_t * hk = vec_elt_at_index (h->hash_work_space, vector_index);
- vhash_search_bucket_t * b;
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
+ vhash_search_bucket_t *b;
for (i = 0; i < n_vectors; i++)
{
@@ -446,10 +457,10 @@ vhash_get_stage (vhash_t * h,
r &= vhash_bucket_compare (h, &b->key[0], j, vi);
/* At this point only one of 4 results should be non-zero.
- So we can or all 4 together and get the valid result (if there is one). */
+ So we can or all 4 together and get the valid result (if there is one). */
result = vhash_merge_results (r);
- if (! result && vhash_search_bucket_is_full (r0))
+ if (!result && vhash_search_bucket_is_full (r0))
result = vhash_get_overflow (h, key_hash, vi, n_key_u32s);
result_function (state, vi, result - 1, n_key_u32s);
@@ -460,12 +471,12 @@ always_inline void
vhash_get_4_stage (vhash_t * h,
u32 vector_index,
vhash_4result_function_t result_function,
- void * state,
- u32 n_key_u32s)
+ void *state, u32 n_key_u32s)
{
u32 i, vi;
- vhash_hashed_key_t * hk = vec_elt_at_index (h->hash_work_space, vector_index);
- vhash_search_bucket_t * b0, * b1, * b2, * b3;
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
+ vhash_search_bucket_t *b0, *b1, *b2, *b3;
u32x4 r0, r1, r2, r3, r0_before, r1_before, r2_before, r3_before;
u32x4_union_t kh;
@@ -487,8 +498,7 @@ vhash_get_4_stage (vhash_t * h,
{
u32x4 c0, c1, c2, c3;
vhash_bucket_compare_4 (h, i, vector_index,
- b0, b1, b2, b3,
- c0, c1, c2, c3);
+ b0, b1, b2, b3, c0, c1, c2, c3);
r0 &= c0;
r1 &= c1;
r2 &= c2;
@@ -500,34 +510,36 @@ vhash_get_4_stage (vhash_t * h,
/* Gather together 4 results. */
{
u32x4_union_t r;
- u32x4 ones = {1,1,1,1};
+ u32x4 ones = { 1, 1, 1, 1 };
u32 not_found_mask;
r.as_u32x4 = r0 | r1 | r2 | r3;
not_found_mask = u32x4_zero_byte_mask (r.as_u32x4);
- not_found_mask &= ((vhash_search_bucket_is_full (r0_before) << (4*0))
- | (vhash_search_bucket_is_full (r1_before) << (4*1))
- | (vhash_search_bucket_is_full (r2_before) << (4*2))
- | (vhash_search_bucket_is_full (r3_before) << (4*3)));
+ not_found_mask &= ((vhash_search_bucket_is_full (r0_before) << (4 * 0))
+ | (vhash_search_bucket_is_full (r1_before) << (4 * 1))
+ | (vhash_search_bucket_is_full (r2_before) << (4 * 2))
+ | (vhash_search_bucket_is_full (r3_before) <<
+ (4 * 3)));
if (not_found_mask)
{
u32x4_union_t key_hash;
- key_hash.as_u32x4 = hk->hashed_key[2].as_u32x4 & h->bucket_mask.as_u32x4;
+ key_hash.as_u32x4 =
+ hk->hashed_key[2].as_u32x4 & h->bucket_mask.as_u32x4;
/* Slow path: one of the buckets may have been full and we need to search overflow. */
- if (not_found_mask & (1 << (4*0)))
+ if (not_found_mask & (1 << (4 * 0)))
r.as_u32[0] = vhash_get_overflow (h, key_hash.as_u32[0],
- vi + 0, n_key_u32s);
- if (not_found_mask & (1 << (4*1)))
+ vi + 0, n_key_u32s);
+ if (not_found_mask & (1 << (4 * 1)))
r.as_u32[1] = vhash_get_overflow (h, key_hash.as_u32[1],
- vi + 1, n_key_u32s);
- if (not_found_mask & (1 << (4*2)))
+ vi + 1, n_key_u32s);
+ if (not_found_mask & (1 << (4 * 2)))
r.as_u32[2] = vhash_get_overflow (h, key_hash.as_u32[2],
- vi + 2, n_key_u32s);
- if (not_found_mask & (1 << (4*3)))
+ vi + 2, n_key_u32s);
+ if (not_found_mask & (1 << (4 * 3)))
r.as_u32[3] = vhash_get_overflow (h, key_hash.as_u32[3],
- vi + 3, n_key_u32s);
+ vi + 3, n_key_u32s);
}
result_function (state, vi, r.as_u32x4 - ones, n_key_u32s);
@@ -536,22 +548,19 @@ vhash_get_4_stage (vhash_t * h,
u32
vhash_set_overflow (vhash_t * h,
- u32 key_hash,
- u32 vi,
- u32 new_result,
- u32 n_key_u32s);
+ u32 key_hash, u32 vi, u32 new_result, u32 n_key_u32s);
always_inline void
vhash_set_stage (vhash_t * h,
u32 vector_index,
u32 n_vectors,
vhash_result_function_t result_function,
- void * state,
- u32 n_key_u32s)
+ void *state, u32 n_key_u32s)
{
u32 i, j, n_new_elts = 0;
- vhash_hashed_key_t * hk = vec_elt_at_index (h->hash_work_space, vector_index);
- vhash_search_bucket_t * b;
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
+ vhash_search_bucket_t *b;
for (i = 0; i < n_vectors; i++)
{
@@ -571,17 +580,17 @@ vhash_set_stage (vhash_t * h,
r = r0 & cmp;
/* At this point only one of 4 results should be non-zero.
- So we can or all 4 together and get the valid result (if there is one). */
+ So we can or all 4 together and get the valid result (if there is one). */
old_result = vhash_merge_results (r);
- if (! old_result && vhash_search_bucket_is_full (r0))
+ if (!old_result && vhash_search_bucket_is_full (r0))
old_result = vhash_get_overflow (h, key_hash, vi, n_key_u32s);
/* Get new result; possibly do something with old result. */
new_result = result_function (state, vi, old_result - 1, n_key_u32s);
/* User cannot use ~0 as a hash result since a result of 0 is
- used to mark unused bucket entries. */
+ used to mark unused bucket entries. */
ASSERT (new_result + 1 != 0);
new_result += 1;
@@ -603,7 +612,8 @@ vhash_set_stage (vhash_t * h,
/* Rotate 4 bit valid mask so that key_hash corresponds to bit 0. */
i_set = key_hash & 3;
- valid_mask = ((valid_mask >> i_set) | (valid_mask << (4 - i_set))) & 0xf;
+ valid_mask =
+ ((valid_mask >> i_set) | (valid_mask << (4 - i_set))) & 0xf;
/* Insert into first empty position in bucket after key_hash. */
i_set = (i_set + h->find_first_zero_table[valid_mask]) & 3;
@@ -626,17 +636,12 @@ vhash_set_stage (vhash_t * h,
h->n_elts += n_new_elts;
}
-u32
-vhash_unset_overflow (vhash_t * h,
- u32 key_hash,
- u32 vi,
- u32 n_key_u32s);
+u32 vhash_unset_overflow (vhash_t * h, u32 key_hash, u32 vi, u32 n_key_u32s);
void
vhash_unset_refill_from_overflow (vhash_t * h,
vhash_search_bucket_t * b,
- u32 key_hash,
- u32 n_key_u32s);
+ u32 key_hash, u32 n_key_u32s);
/* Note: Eliot tried doing 4 unsets at once and could not get a speed up
and abandoned vhash_unset_4_stage. */
@@ -645,12 +650,12 @@ vhash_unset_stage (vhash_t * h,
u32 vector_index,
u32 n_vectors,
vhash_result_function_t result_function,
- void * state,
- u32 n_key_u32s)
+ void *state, u32 n_key_u32s)
{
u32 i, j, n_elts_unset = 0;
- vhash_hashed_key_t * hk = vec_elt_at_index (h->hash_work_space, vector_index);
- vhash_search_bucket_t * b;
+ vhash_hashed_key_t *hk =
+ vec_elt_at_index (h->hash_work_space, vector_index);
+ vhash_search_bucket_t *b;
for (i = 0; i < n_vectors; i++)
{
@@ -668,7 +673,7 @@ vhash_unset_stage (vhash_t * h,
r0 = b->result.as_u32x4;
/* At this point cmp is all ones where key matches and zero otherwise.
- So, this will invalidate results for matching key and do nothing otherwise. */
+ So, this will invalidate results for matching key and do nothing otherwise. */
b->result.as_u32x4 = r0 & ~cmp;
old_result = vhash_merge_results (r0 & cmp);
@@ -694,19 +699,23 @@ void vhash_init (vhash_t * h, u32 log2_n_keys, u32 n_key_u32,
void vhash_resize (vhash_t * old, u32 log2_n_keys);
-typedef struct {
- vhash_t * vhash;
+typedef struct
+{
+ vhash_t *vhash;
- union {
- struct {
- u32 * keys;
- u32 * results;
+ union
+ {
+ struct
+ {
+ u32 *keys;
+ u32 *results;
};
/* Vector layout for get keys. */
- struct {
- u32x4_union_t * get_keys;
- u32x4_union_t * get_results;
+ struct
+ {
+ u32x4_union_t *get_keys;
+ u32x4_union_t *get_results;
};
};
@@ -738,7 +747,7 @@ always_inline void
vhash_get_set_key_word (vhash_main_t * vm, u32 vi, u32 wi, u32 n_key_u32,
u32 value)
{
- u32x4_union_t * k = vec_elt_at_index (vm->get_keys, (vi / 4) * n_key_u32);
+ u32x4_union_t *k = vec_elt_at_index (vm->get_keys, (vi / 4) * n_key_u32);
ASSERT (wi < n_key_u32);
k[wi].as_u32[vi % 4] = value;
}
@@ -746,7 +755,7 @@ vhash_get_set_key_word (vhash_main_t * vm, u32 vi, u32 wi, u32 n_key_u32,
always_inline u32
vhash_get_fetch_result (vhash_main_t * vm, u32 vi)
{
- u32x4_union_t * r = vec_elt_at_index (vm->get_results, vi / 4);
+ u32x4_union_t *r = vec_elt_at_index (vm->get_results, vi / 4);
return r->as_u32[vi % 4];
}
@@ -770,7 +779,7 @@ always_inline void
vhash_set_set_key_word (vhash_main_t * vm, u32 vi, u32 wi, u32 n_key_u32,
u32 value)
{
- u32 * k = vec_elt_at_index (vm->keys, vi * n_key_u32);
+ u32 *k = vec_elt_at_index (vm->keys, vi * n_key_u32);
ASSERT (wi < n_key_u32);
k[wi] = value;
}
@@ -778,14 +787,14 @@ vhash_set_set_key_word (vhash_main_t * vm, u32 vi, u32 wi, u32 n_key_u32,
always_inline void
vhash_set_set_result (vhash_main_t * vm, u32 vi, u32 result)
{
- u32 * r = vec_elt_at_index (vm->results, vi);
+ u32 *r = vec_elt_at_index (vm->results, vi);
r[0] = result;
}
always_inline u32
vhash_set_fetch_old_result (vhash_main_t * vm, u32 vi)
{
- u32 * r = vec_elt_at_index (vm->results, vi);
+ u32 *r = vec_elt_at_index (vm->results, vi);
return r[0];
}
@@ -793,31 +802,49 @@ void vhash_main_set (vhash_main_t * vm);
always_inline u32
vhash_unset_alloc_keys (vhash_main_t * vm, u32 n_keys, u32 n_key_u32)
-{ return vhash_set_alloc_keys (vm, n_keys, n_key_u32); }
+{
+ return vhash_set_alloc_keys (vm, n_keys, n_key_u32);
+}
always_inline void
vhash_unset_set_key_word (vhash_main_t * vm, u32 vi, u32 wi, u32 n_key_u32,
- u32 value)
-{ vhash_set_set_key_word (vm, vi, wi, n_key_u32, value); }
+ u32 value)
+{
+ vhash_set_set_key_word (vm, vi, wi, n_key_u32, value);
+}
always_inline void
vhash_unset_set_result (vhash_main_t * vm, u32 vi, u32 result)
-{ vhash_set_set_result (vm, vi, result); }
+{
+ vhash_set_set_result (vm, vi, result);
+}
always_inline u32
vhash_unset_fetch_old_result (vhash_main_t * vm, u32 vi)
-{ return vhash_set_fetch_old_result (vm, vi); }
+{
+ return vhash_set_fetch_old_result (vm, vi);
+}
void vhash_main_unset (vhash_main_t * vm);
-typedef struct {
+typedef struct
+{
vhash_main_t new;
- vhash_t * old;
+ vhash_t *old;
} vhash_resize_t;
-u32 vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index, u32 n_vectors);
+u32 vhash_resize_incremental (vhash_resize_t * vr, u32 vector_index,
+ u32 n_vectors);
#endif /* CLIB_HAVE_VEC128 */
#endif /* included_clib_vhash_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vm_linux_kernel.h b/vppinfra/vppinfra/vm_linux_kernel.h
index 1c6ec0d04e9..fd9e6148e0a 100644
--- a/vppinfra/vppinfra/vm_linux_kernel.h
+++ b/vppinfra/vppinfra/vm_linux_kernel.h
@@ -40,19 +40,39 @@
#include <linux/vmalloc.h>
#include <linux/gfp.h> /* for GFP_* */
-#include <asm/pgtable.h> /* for PAGE_KERNEL */
+#include <asm/pgtable.h> /* for PAGE_KERNEL */
/* Allocate virtual address space. */
-always_inline void * clib_mem_vm_alloc (uword size)
-{ return vmalloc (size); }
+always_inline void *
+clib_mem_vm_alloc (uword size)
+{
+ return vmalloc (size);
+}
-always_inline void clib_mem_vm_free (void * addr, uword size)
-{ vfree (addr); }
+always_inline void
+clib_mem_vm_free (void *addr, uword size)
+{
+ vfree (addr);
+}
-always_inline void * clib_mem_vm_unmap (void * addr, uword size)
-{ return 0; }
+always_inline void *
+clib_mem_vm_unmap (void *addr, uword size)
+{
+ return 0;
+}
-always_inline void * clib_mem_vm_map (void * addr, uword size)
-{ return addr; }
+always_inline void *
+clib_mem_vm_map (void *addr, uword size)
+{
+ return addr;
+}
#endif /* included_vm_linux_kernel_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vm_standalone.h b/vppinfra/vppinfra/vm_standalone.h
index 33dde4d0665..2cd431bc46c 100644
--- a/vppinfra/vppinfra/vm_standalone.h
+++ b/vppinfra/vppinfra/vm_standalone.h
@@ -40,16 +40,35 @@
/* Stubs for standalone "system" which has no VM support. */
-always_inline void * clib_mem_vm_alloc (uword size)
-{ return 0; }
+always_inline void *
+clib_mem_vm_alloc (uword size)
+{
+ return 0;
+}
-always_inline void clib_mem_vm_free (void * addr, uword size)
-{ }
+always_inline void
+clib_mem_vm_free (void *addr, uword size)
+{
+}
-always_inline void * clib_mem_vm_unmap (void * addr, uword size)
-{ return 0; }
+always_inline void *
+clib_mem_vm_unmap (void *addr, uword size)
+{
+ return 0;
+}
-always_inline void * clib_mem_vm_map (void * addr, uword size)
-{ return addr; }
+always_inline void *
+clib_mem_vm_map (void *addr, uword size)
+{
+ return addr;
+}
#endif /* included_vm_standalone_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/vm_unix.h b/vppinfra/vppinfra/vm_unix.h
index 80e0595a9da..07e865166e0 100644
--- a/vppinfra/vppinfra/vm_unix.h
+++ b/vppinfra/vppinfra/vm_unix.h
@@ -42,9 +42,10 @@
#include <sys/mman.h>
/* Allocate virtual address space. */
-always_inline void * clib_mem_vm_alloc (uword size)
+always_inline void *
+clib_mem_vm_alloc (uword size)
{
- void * mmap_addr;
+ void *mmap_addr;
uword flags = MAP_PRIVATE;
#ifdef MAP_ANONYMOUS
@@ -58,12 +59,16 @@ always_inline void * clib_mem_vm_alloc (uword size)
return mmap_addr;
}
-always_inline void clib_mem_vm_free (void * addr, uword size)
-{ munmap (addr, size); }
+always_inline void
+clib_mem_vm_free (void *addr, uword size)
+{
+ munmap (addr, size);
+}
-always_inline void * clib_mem_vm_unmap (void * addr, uword size)
+always_inline void *
+clib_mem_vm_unmap (void *addr, uword size)
{
- void * mmap_addr;
+ void *mmap_addr;
uword flags = MAP_PRIVATE | MAP_FIXED;
/* To unmap we "map" with no protection. If we actually called
@@ -77,9 +82,10 @@ always_inline void * clib_mem_vm_unmap (void * addr, uword size)
return mmap_addr;
}
-always_inline void * clib_mem_vm_map (void * addr, uword size)
+always_inline void *
+clib_mem_vm_map (void *addr, uword size)
{
- void * mmap_addr;
+ void *mmap_addr;
uword flags = MAP_PRIVATE | MAP_FIXED;
mmap_addr = mmap (addr, size, (PROT_READ | PROT_WRITE), flags, -1, 0);
@@ -90,3 +96,11 @@ always_inline void * clib_mem_vm_map (void * addr, uword size)
}
#endif /* included_vm_unix_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/xxhash.h b/vppinfra/vppinfra/xxhash.h
index 09a640e46c4..ea1e21bf144 100644
--- a/vppinfra/vppinfra/xxhash.h
+++ b/vppinfra/vppinfra/xxhash.h
@@ -14,7 +14,7 @@
*/
/*
Original license for the code used to construct
- clib_xxhash(...).
+ clib_xxhash(...).
xxHash - Fast Hash algorithm
Copyright (C) 2012-2014, Yann Collet.
@@ -23,14 +23,14 @@
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
-
+
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
-
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
@@ -54,7 +54,8 @@
#define PRIME64_5 2870177450012600261ULL
#define XXH_rotl64(x,r) ((x << r) | (x >> (64 - r)))
-static inline u64 clib_xxhash (u64 key)
+static inline u64
+clib_xxhash (u64 key)
{
u64 k1, h64;
@@ -64,7 +65,7 @@ static inline u64 clib_xxhash (u64 key)
k1 = XXH_rotl64 (k1, 31);
k1 *= PRIME64_1;
h64 ^= k1;
- h64 = XXH_rotl64(h64,27) * PRIME64_1 + PRIME64_4;
+ h64 = XXH_rotl64 (h64, 27) * PRIME64_1 + PRIME64_4;
h64 ^= h64 >> 33;
h64 *= PRIME64_2;
@@ -75,3 +76,11 @@ static inline u64 clib_xxhash (u64 key)
}
#endif /* __included_xxhash_h__ */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/xy.h b/vppinfra/vppinfra/xy.h
index 571a2a13c3c..fb562161a62 100644
--- a/vppinfra/vppinfra/xy.h
+++ b/vppinfra/vppinfra/xy.h
@@ -46,3 +46,11 @@ typedef __complex__ int i32xy_t;
#define xy_y_unit_vector (1I)
#endif /* included_clib_xy_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/zvec.c b/vppinfra/vppinfra/zvec.c
index 5c591abc51d..d062e5f7db1 100644
--- a/vppinfra/vppinfra/zvec.c
+++ b/vppinfra/vppinfra/zvec.c
@@ -36,10 +36,10 @@
*/
#include <vppinfra/bitmap.h>
-#include <vppinfra/bitops.h> /* for next_with_same_number_of_set_bits */
-#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/bitops.h> /* for next_with_same_number_of_set_bits */
+#include <vppinfra/error.h> /* for ASSERT */
#include <vppinfra/mem.h>
-#include <vppinfra/os.h> /* for os_panic */
+#include <vppinfra/os.h> /* for os_panic */
#include <vppinfra/vec.h>
#include <vppinfra/zvec.h>
@@ -60,7 +60,8 @@
/* Decode given compressed data. Return number of compressed data
bits used. */
-uword zvec_decode (uword coding, uword zdata, uword * n_zdata_bits)
+uword
+zvec_decode (uword coding, uword zdata, uword * n_zdata_bits)
{
uword c, d, result, n_bits;
uword explicit_end, implicit_end;
@@ -71,7 +72,7 @@ uword zvec_decode (uword coding, uword zdata, uword * n_zdata_bits)
{
c = first_set (coding);
implicit_end = c == coding;
- explicit_end = (zdata & 1) &~ implicit_end;
+ explicit_end = (zdata & 1) & ~implicit_end;
d = (zdata >> explicit_end) & (c - 1);
if (explicit_end | implicit_end)
{
@@ -93,9 +94,7 @@ uword zvec_decode (uword coding, uword zdata, uword * n_zdata_bits)
}
uword
-zvec_encode (uword coding,
- uword data,
- uword * n_result_bits)
+zvec_encode (uword coding, uword data, uword * n_result_bits)
{
uword c, shift, result;
uword explicit_end, implicit_end;
@@ -112,11 +111,11 @@ zvec_encode (uword coding,
explicit_end = ((data & (c - 1)) == data);
if (explicit_end | implicit_end)
{
- uword t = explicit_end &~ implicit_end;
+ uword t = explicit_end & ~implicit_end;
result = ((data << t) | t) << shift;
*n_result_bits =
/* data bits */ (c == 0 ? BITS (uword) : min_log2 (c))
- /* shift bits */ + shift + t;
+ /* shift bits */ + shift + t;
return result;
}
data -= c;
@@ -130,16 +129,19 @@ zvec_encode (uword coding,
}
always_inline uword
-get_data (void * data, uword data_bytes, uword is_signed)
+get_data (void *data, uword data_bytes, uword is_signed)
{
if (data_bytes == 1)
return is_signed ? zvec_signed_to_unsigned (*(i8 *) data) : *(u8 *) data;
else if (data_bytes == 2)
- return is_signed ? zvec_signed_to_unsigned (*(i16 *) data) : *(u16 *) data;
+ return is_signed ? zvec_signed_to_unsigned (*(i16 *) data) : *(u16 *)
+ data;
else if (data_bytes == 4)
- return is_signed ? zvec_signed_to_unsigned (*(i32 *) data) : *(u32 *) data;
+ return is_signed ? zvec_signed_to_unsigned (*(i32 *) data) : *(u32 *)
+ data;
else if (data_bytes == 8)
- return is_signed ? zvec_signed_to_unsigned (*(i64 *) data) : *(u64 *) data;
+ return is_signed ? zvec_signed_to_unsigned (*(i64 *) data) : *(u64 *)
+ data;
else
{
os_panic ();
@@ -148,7 +150,7 @@ get_data (void * data, uword data_bytes, uword is_signed)
}
always_inline void
-put_data (void * data, uword data_bytes, uword is_signed, uword x)
+put_data (void *data, uword data_bytes, uword is_signed, uword x)
{
if (data_bytes == 1)
{
@@ -188,11 +190,9 @@ always_inline uword *
zvec_encode_inline (uword * zvec,
uword * zvec_n_bits,
uword coding,
- void * data,
+ void *data,
uword data_stride,
- uword n_data,
- uword data_bytes,
- uword is_signed)
+ uword n_data, uword data_bytes, uword is_signed)
{
uword i;
@@ -201,8 +201,8 @@ zvec_encode_inline (uword * zvec,
{
uword d0, z0, l0;
- d0 = get_data (data + 0*data_stride, data_bytes, is_signed);
- data += 1*data_stride;
+ d0 = get_data (data + 0 * data_stride, data_bytes, is_signed);
+ data += 1 * data_stride;
n_data -= 1;
z0 = zvec_encode (coding, d0, &l0);
@@ -229,14 +229,14 @@ zvec_encode_inline (uword * zvec,
/* is_signed */ IS_SIGNED); \
}
-_ (u8, /* is_signed */ 0);
-_ (u16, /* is_signed */ 0);
-_ (u32, /* is_signed */ 0);
-_ (u64, /* is_signed */ 0);
-_ (i8, /* is_signed */ 1);
-_ (i16, /* is_signed */ 1);
-_ (i32, /* is_signed */ 1);
-_ (i64, /* is_signed */ 1);
+_(u8, /* is_signed */ 0);
+_(u16, /* is_signed */ 0);
+_(u32, /* is_signed */ 0);
+_(u64, /* is_signed */ 0);
+_(i8, /* is_signed */ 1);
+_(i16, /* is_signed */ 1);
+_(i32, /* is_signed */ 1);
+_(i64, /* is_signed */ 1);
#undef _
@@ -252,11 +252,9 @@ always_inline void
zvec_decode_inline (uword * zvec,
uword * zvec_n_bits,
uword coding,
- void * data,
+ void *data,
uword data_stride,
- uword n_data,
- uword data_bytes,
- uword is_signed)
+ uword n_data, uword data_bytes, uword is_signed)
{
uword i, n_max;
@@ -269,8 +267,8 @@ zvec_decode_inline (uword * zvec,
z0 = clib_bitmap_get_multiple (zvec, i, n_max);
d0 = zvec_decode (coding, z0, &l0);
i += l0;
- put_data (data + 0*data_stride, data_bytes, is_signed, d0);
- data += 1*data_stride;
+ put_data (data + 0 * data_stride, data_bytes, is_signed, d0);
+ data += 1 * data_stride;
n_data -= 1;
}
*zvec_n_bits = i;
@@ -291,21 +289,20 @@ zvec_decode_inline (uword * zvec,
/* is_signed */ IS_SIGNED); \
}
-_ (u8, /* is_signed */ 0);
-_ (u16, /* is_signed */ 0);
-_ (u32, /* is_signed */ 0);
-_ (u64, /* is_signed */ 0);
-_ (i8, /* is_signed */ 1);
-_ (i16, /* is_signed */ 1);
-_ (i32, /* is_signed */ 1);
-_ (i64, /* is_signed */ 1);
+_(u8, /* is_signed */ 0);
+_(u16, /* is_signed */ 0);
+_(u32, /* is_signed */ 0);
+_(u64, /* is_signed */ 0);
+_(i8, /* is_signed */ 1);
+_(i16, /* is_signed */ 1);
+_(i32, /* is_signed */ 1);
+_(i64, /* is_signed */ 1);
#undef _
/* Compute number of bits needed to encode given histogram. */
-static uword zvec_coding_bits (uword coding,
- uword * histogram_counts,
- uword min_bits)
+static uword
+zvec_coding_bits (uword coding, uword * histogram_counts, uword min_bits)
{
uword n_type_bits, n_bits;
uword this_count, last_count, max_count_index;
@@ -327,7 +324,8 @@ static uword zvec_coding_bits (uword coding,
l = min_log2 (b);
i += b;
- this_count = histogram_counts[i > max_count_index ? max_count_index : i-1];
+ this_count =
+ histogram_counts[i > max_count_index ? max_count_index : i - 1];
/* No more data to encode? */
if (this_count == last_count)
@@ -352,7 +350,7 @@ static uword zvec_coding_bits (uword coding,
}
uword
-_zvec_coding_from_histogram (void * histogram,
+_zvec_coding_from_histogram (void *histogram,
uword histogram_len,
uword histogram_elt_count_offset,
uword histogram_elt_bytes,
@@ -362,8 +360,8 @@ _zvec_coding_from_histogram (void * histogram,
uword coding, min_coding;
uword min_coding_bits, coding_bits;
uword i, n_bits_set, total_count;
- uword * counts;
- zvec_histogram_count_t * h_count = histogram + histogram_elt_count_offset;
+ uword *counts;
+ zvec_histogram_count_t *h_count = histogram + histogram_elt_count_offset;
if (histogram_len < 1)
{
@@ -382,14 +380,17 @@ _zvec_coding_from_histogram (void * histogram,
zvec_histogram_count_t this_count = h_count[0];
total_count += this_count;
counts[i] = total_count;
- h_count = (zvec_histogram_count_t *) ((void *) h_count + histogram_elt_bytes);
+ h_count =
+ (zvec_histogram_count_t *) ((void *) h_count + histogram_elt_bytes);
}
min_coding = 0;
min_coding_bits = ~0;
{
- uword base_coding = max_value_to_encode != ~0 ? (1 + max_value_to_encode) : vec_len (counts);
+ uword base_coding =
+ max_value_to_encode !=
+ ~0 ? (1 + max_value_to_encode) : vec_len (counts);
uword max_coding = max_pow2 (2 * base_coding);
for (n_bits_set = 1; n_bits_set <= 8; n_bits_set++)
@@ -413,7 +414,8 @@ _zvec_coding_from_histogram (void * histogram,
coding_return->min_coding_bits = min_coding_bits;
coding_return->n_data = total_count;
coding_return->n_codes = vec_len (counts);
- coding_return->ave_coding_bits = (f64) min_coding_bits / (f64) total_count;
+ coding_return->ave_coding_bits =
+ (f64) min_coding_bits / (f64) total_count;
}
vec_free (counts);
@@ -421,9 +423,20 @@ _zvec_coding_from_histogram (void * histogram,
return min_coding;
}
-u8 * format_zvec_coding (u8 * s, va_list * args)
+u8 *
+format_zvec_coding (u8 * s, va_list * args)
{
- zvec_coding_info_t * c = va_arg (*args, zvec_coding_info_t *);
- return format (s, "zvec coding 0x%x, %d elts, %d codes, %d bits total, %.4f ave bits/code",
- c->coding, c->n_data, c->n_codes, c->min_coding_bits, c->ave_coding_bits);
+ zvec_coding_info_t *c = va_arg (*args, zvec_coding_info_t *);
+ return format (s,
+ "zvec coding 0x%x, %d elts, %d codes, %d bits total, %.4f ave bits/code",
+ c->coding, c->n_data, c->n_codes, c->min_coding_bits,
+ c->ave_coding_bits);
}
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */
diff --git a/vppinfra/vppinfra/zvec.h b/vppinfra/vppinfra/zvec.h
index fbc88aa5b7b..7d35a3fe41f 100644
--- a/vppinfra/vppinfra/zvec.h
+++ b/vppinfra/vppinfra/zvec.h
@@ -39,7 +39,7 @@
#define included_zvec_h
#include <vppinfra/clib.h>
-#include <vppinfra/error.h> /* for ASSERT */
+#include <vppinfra/error.h> /* for ASSERT */
#include <vppinfra/format.h>
/* zvec: compressed vectors.
@@ -61,7 +61,8 @@
histogram of typical values.
*/
-typedef struct {
+typedef struct
+{
/* Smallest coding for given histogram of typical data. */
u32 coding;
@@ -94,7 +95,7 @@ typedef u32 zvec_histogram_count_t;
(zc))
uword
-_zvec_coding_from_histogram (void * _histogram,
+_zvec_coding_from_histogram (void *_histogram,
uword histogram_len,
uword histogram_elt_count_offset,
uword histogram_elt_bytes,
@@ -105,14 +106,14 @@ _zvec_coding_from_histogram (void * _histogram,
uword * zvec_encode_##TYPE (uword * zvec, uword * zvec_n_bits, uword coding, \
void * data, uword data_stride, uword n_data);
-_ (u8, /* is_signed */ 0);
-_ (u16, /* is_signed */ 0);
-_ (u32, /* is_signed */ 0);
-_ (u64, /* is_signed */ 0);
-_ (i8, /* is_signed */ 1);
-_ (i16, /* is_signed */ 1);
-_ (i32, /* is_signed */ 1);
-_ (i64, /* is_signed */ 1);
+_(u8, /* is_signed */ 0);
+_(u16, /* is_signed */ 0);
+_(u32, /* is_signed */ 0);
+_(u64, /* is_signed */ 0);
+_(i8, /* is_signed */ 1);
+_(i16, /* is_signed */ 1);
+_(i32, /* is_signed */ 1);
+_(i64, /* is_signed */ 1);
#undef _
@@ -124,14 +125,14 @@ _ (i64, /* is_signed */ 1);
uword data_stride, \
uword n_data)
-_ (u8, /* is_signed */ 0);
-_ (u16, /* is_signed */ 0);
-_ (u32, /* is_signed */ 0);
-_ (u64, /* is_signed */ 0);
-_ (i8, /* is_signed */ 1);
-_ (i16, /* is_signed */ 1);
-_ (i32, /* is_signed */ 1);
-_ (i64, /* is_signed */ 1);
+_(u8, /* is_signed */ 0);
+_(u16, /* is_signed */ 0);
+_(u32, /* is_signed */ 0);
+_(u64, /* is_signed */ 0);
+_(i8, /* is_signed */ 1);
+_(i16, /* is_signed */ 1);
+_(i32, /* is_signed */ 1);
+_(i64, /* is_signed */ 1);
#undef _
@@ -142,7 +143,7 @@ always_inline uword
zvec_signed_to_unsigned (word s)
{
uword a = s < 0;
- s = 2*s + a;
+ s = 2 * s + a;
return a ? -s : s;
}
@@ -155,3 +156,11 @@ zvec_unsigned_to_signed (uword u)
}
#endif /* included_zvec_h */
+
+/*
+ * fd.io coding-style-patch-verification: ON
+ *
+ * Local Variables:
+ * eval: (c-set-style "gnu")
+ * End:
+ */